diff --git a/.coveragerc b/.coveragerc index 27db64e09..61ff66749 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,7 +1,4 @@ [run] -omit = +omit = # standlonetemplate is read dynamically and tested by test_genscript *standalonetemplate.py - # oldinterpret could be removed, as it is no longer used in py26+ - *oldinterpret.py - vendored_packages diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index ad3fea61e..23a9f8c56 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,15 +1,14 @@ Thanks for submitting a PR, your contribution is really appreciated! -Here's a quick checklist that should be present in PRs: +Here's a quick checklist that should be present in PRs (you can delete this text from the final description, this is +just a guideline): -- [ ] Add a new news fragment into the changelog folder - * name it `$issue_id.$type` for example (588.bug) - * if you don't have an issue_id change it to the pr id after creating the pr - * ensure type is one of `removal`, `feature`, `bugfix`, `vendor`, `doc` or `trivial` - * Make sure to use full sentences with correct case and punctuation, for example: "Fix issue with non-ascii contents in doctest text files." -- [ ] Target: for `bugfix`, `vendor`, `doc` or `trivial` fixes, target `master`; for removals or features target `features`; -- [ ] Make sure to include reasonable tests for your change if necessary +- [ ] Create a new changelog file in the `changelog` folder, with a name like `..rst`. See [changelog/README.rst](/changelog/README.rst) for details. +- [ ] Target the `master` branch for bug fixes, documentation updates and trivial changes. +- [ ] Target the `features` branch for new features and removals/deprecations. +- [ ] Include documentation when adding new features. +- [ ] Include new tests or update existing tests when applicable. -Unless your change is a trivial or a documentation fix (e.g., a typo or reword of a small section) please: +Unless your change is trivial or a small documentation fix (e.g., a typo or reword of a small section) please: -- [ ] Add yourself to `AUTHORS`; +- [ ] Add yourself to `AUTHORS` in alphabetical order; diff --git a/.gitignore b/.gitignore index 3b7ec9fac..99c4c7bad 100644 --- a/.gitignore +++ b/.gitignore @@ -33,6 +33,7 @@ env/ 3rdparty/ .tox .cache +.pytest_cache .coverage .ropeproject .idea diff --git a/.travis.yml b/.travis.yml index 0a71e7dc1..b0ed7bf29 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,42 +1,58 @@ sudo: false language: python python: - - '3.5' -# command to install dependencies -install: "pip install -U tox" -# # command to run tests + - '3.6' +install: + - pip install --upgrade --pre tox env: matrix: # coveralls is not listed in tox's envlist, but should run in travis - TOXENV=coveralls # note: please use "tox --listenvs" to populate the build matrix below - TOXENV=linting - - TOXENV=py26 - TOXENV=py27 - - TOXENV=py33 - TOXENV=py34 - - TOXENV=py35 - - TOXENV=pypy + - TOXENV=py36 - TOXENV=py27-pexpect - TOXENV=py27-xdist - TOXENV=py27-trial - - TOXENV=py35-pexpect - - TOXENV=py35-xdist - - TOXENV=py35-trial + - TOXENV=py27-numpy + - TOXENV=py27-pluggymaster + - TOXENV=py36-pexpect + - TOXENV=py36-xdist + - TOXENV=py36-trial + - TOXENV=py36-numpy + - TOXENV=py36-pluggymaster - TOXENV=py27-nobyte - TOXENV=doctesting - - TOXENV=freeze - TOXENV=docs -matrix: +jobs: include: - - env: TOXENV=py36 + - env: TOXENV=pypy + python: 'pypy-5.4' + - env: TOXENV=py35 + python: '3.5' + - env: TOXENV=py35-freeze + python: '3.5' + - env: TOXENV=py37 + python: 'nightly' + + - stage: deploy python: '3.6' - - env: TOXENV=py37 - python: 'nightly' - allow_failures: - - env: TOXENV=py37 - python: 'nightly' + env: + install: pip install -U setuptools setuptools_scm + script: skip + deploy: + provider: pypi + user: nicoddemus + distributions: sdist bdist_wheel + skip_upload_docs: true + password: + secure: xanTgTUu6XDQVqB/0bwJQXoDMnU5tkwZc5koz6mBkkqZhKdNOi2CLoC1XhiSZ+ah24l4V1E0GAqY5kBBcy9d7NVe4WNg4tD095LsHw+CRU6/HCVIFfyk2IZ+FPAlguesCcUiJSXOrlBF+Wj68wEvLoK7EoRFbJeiZ/f91Ww1sbtDlqXABWGHrmhPJL5Wva7o7+wG7JwJowqdZg1pbQExsCc7b53w4v2RBu3D6TJaTAzHiVsW+nUSI67vKI/uf+cR/OixsTfy37wlHgSwihYmrYLFls3V0bSpahCim3bCgMaFZx8S8xrdgJ++PzBCof2HeflFKvW+VCkoYzGEG4NrTWJoNz6ni4red9GdvfjGH3YCjAKS56h9x58zp2E5rpsb/kVq5/45xzV+dq6JRuhQ1nJWjBC6fSKAc/bfwnuFK3EBxNLkvBssLHvsNjj5XG++cB8DdS9wVGUqjpoK4puaXUWFqy4q3S9F86HEsKNgExtieA9qNx+pCIZVs6JCXZNjr0I5eVNzqJIyggNgJG6RyravsU35t9Zd9doL5g4Y7UKmAGTn1Sz24HQ4sMQgXdm2SyD8gEK5je4tlhUvfGtDvMSlstq71kIn9nRpFnqB6MFlbYSEAZmo8dGbCquoUc++6Rum208wcVbrzzVtGlXB/Ow9AbFMYeAGA0+N/K1e59c= + on: + tags: true + repo: pytest-dev/pytest script: tox --recreate diff --git a/AUTHORS b/AUTHORS index ca282870f..e9869630b 100644 --- a/AUTHORS +++ b/AUTHORS @@ -3,19 +3,25 @@ merlinux GmbH, Germany, office at merlinux eu Contributors include:: +Aaron Coleman Abdeali JK Abhijeet Kasurde Ahn Ki-Wook +Alan Velasco Alexander Johnson Alexei Kozlenok Anatoly Bubenkoff +Anders Hovmöller +Andras Tim Andreas Zeidler Andrzej Ostrowski Andy Freeland Anthon van der Neut +Anthony Shaw Anthony Sottile Antony Lee Armin Rigo +Aron Coyle Aron Curzon Aviv Palivoda Barney Gale @@ -24,11 +30,14 @@ Benjamin Peterson Bernard Pratz Bob Ippolito Brian Dorsey +Brian Maissy Brian Okken Brianna Laugher Bruno Oliveira Cal Leeming Carl Friedrich Bolz +Carlos Jenkins +Ceridwen Charles Cloud Charnjit SiNGH (CCSJ) Chris Lamb @@ -36,6 +45,7 @@ Christian Boelsen Christian Theunert Christian Tismer Christopher Gilling +Cyrus Maden Daniel Grana Daniel Hahler Daniel Nuri @@ -45,6 +55,7 @@ Dave Hunt David Díaz-Barquero David Mohr David Vierra +Daw-Ran Liou Denis Kirisov Diego Russo Dmitry Dygalo @@ -63,6 +74,7 @@ Feng Ma Florian Bruhin Floris Bruynooghe Gabriel Reis +George Kussumoto Georgy Dyuldin Graham Horler Greg Price @@ -70,33 +82,45 @@ Grig Gheorghiu Grigorii Eremeev (budulianin) Guido Wesdorp Harald Armin Massa +Henk-Jaap Wagenaar +Hugo van Kemenade Hui Wang (coldnight) Ian Bicking +Ian Lesperance Jaap Broekhuizen Jan Balster Janne Vanhala Jason R. Coombs Javier Domingo Cansino Javier Romero +Jeff Rackauckas Jeff Widman +John Eddie Ayson John Towler Jon Sonesen Jonas Obrist Jordan Guymon +Jordan Moldow +Jordan Speicher Joshua Bronson Jurko Gospodnetić Justyna Janczyszyn Kale Kundert Katarzyna Jachim +Katerina Koukiou Kevin Cox Kodi B. Arfer +Kostis Anagnostopoulos +Lawrence Mitchell Lee Kamentsky Lev Maximov +Llandy Riveron Del Risco Loic Esteve Lukas Bednar Luke Murphy Maciek Fijalkowski Maho +Maik Figura Mandeep Bhutani Manuel Krebber Marc Schlaich @@ -104,6 +128,7 @@ Marcin Bachry Mark Abramowitz Markus Unterwaditzer Martijn Faassen +Martin Altmayer Martin K. Scherer Martin Prusse Mathieu Clabaut @@ -111,28 +136,34 @@ Matt Bachmann Matt Duck Matt Williams Matthias Hafner +Maxim Filipenko mbyt Michael Aquilina Michael Birtwell Michael Droettboom Michael Seifert Michal Wajszczuk +Mihai Capotă Mike Lundy +Nathaniel Waisbrot Ned Batchelder Neven Mundar Nicolas Delaby Oleg Pidsadnyi +Oleg Sushchenko Oliver Bestwalter Omar Kohl Omer Hadari Patrick Hayes Paweł Adamczak +Pedro Algarvio Pieter Mulder Piotr Banaszkiewicz Punyashloka Biswal Quentin Pradet Ralf Schmitt Ran Benita +Raphael Castaneda Raphael Pierzina Raquel Alegre Ravi Chandra @@ -143,25 +174,37 @@ Ronny Pfannschmidt Ross Lawley Russel Winder Ryan Wooden +Samuel Dion-Girardeau Samuele Pedroni Segev Finer Simon Gomizelj Skylar Downes +Srinivas Reddy Thatiparthy Stefan Farmbauer Stefan Zimmermann Stefano Taschini Steffen Allner Stephan Obermann +Tarcisio Fischer Tareq Alayan Ted Xiao Thomas Grainger +Thomas Hisch +Tim Strazny +Tom Dalton Tom Viner Trevor Bekolay Tyler Goodlet +Tzu-ping Chung Vasily Kuznetsov Victor Uriarte Vidar T. Fauske Vitaly Lashmanov Vlad Dragos +William Lee Wouter van Ackooy +Xuan Luong Xuecong Liao +Zoltán Máté +Roland Puntaier +Allan Feldman diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 87af5d72a..1896f550b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,4 +1,4 @@ -.. +.. You should *NOT* be adding new change log entries to this file, this file is managed by towncrier. You *may* edit previous change logs to fix problems like typo corrections or such. @@ -8,6 +8,1033 @@ .. towncrier release notes start +Pytest 3.5.0 (2018-03-21) +========================= + +Deprecations and Removals +------------------------- + +- ``record_xml_property`` fixture is now deprecated in favor of the more + generic ``record_property``. (`#2770 + `_) + +- Defining ``pytest_plugins`` is now deprecated in non-top-level conftest.py + files, because they "leak" to the entire directory tree. (`#3084 + `_) + + +Features +-------- + +- New ``--show-capture`` command-line option that allows to specify how to + display captured output when tests fail: ``no``, ``stdout``, ``stderr``, + ``log`` or ``all`` (the default). (`#1478 + `_) + +- New ``--rootdir`` command-line option to override the rules for discovering + the root directory. See `customize + `_ in the documentation for + details. (`#1642 `_) + +- Fixtures are now instantiated based on their scopes, with higher-scoped + fixtures (such as ``session``) being instantiated first than lower-scoped + fixtures (such as ``function``). The relative order of fixtures of the same + scope is kept unchanged, based in their declaration order and their + dependencies. (`#2405 `_) + +- ``record_xml_property`` renamed to ``record_property`` and is now compatible + with xdist, markers and any reporter. ``record_xml_property`` name is now + deprecated. (`#2770 `_) + +- New ``--nf``, ``--new-first`` options: run new tests first followed by the + rest of the tests, in both cases tests are also sorted by the file modified + time, with more recent files coming first. (`#3034 + `_) + +- New ``--last-failed-no-failures`` command-line option that allows to specify + the behavior of the cache plugin's ```--last-failed`` feature when no tests + failed in the last run (or no cache was found): ``none`` or ``all`` (the + default). (`#3139 `_) + +- New ``--doctest-continue-on-failure`` command-line option to enable doctests + to show multiple failures for each snippet, instead of stopping at the first + failure. (`#3149 `_) + +- Captured log messages are added to the ```` tag in the generated + junit xml file if the ``junit_logging`` ini option is set to ``system-out``. + If the value of this ini option is ``system-err`, the logs are written to + ````. The default value for ``junit_logging`` is ``no``, meaning + captured logs are not written to the output file. (`#3156 + `_) + +- Allow the logging plugin to handle ``pytest_runtest_logstart`` and + ``pytest_runtest_logfinish`` hooks when live logs are enabled. (`#3189 + `_) + +- Passing `--log-cli-level` in the command-line now automatically activates + live logging. (`#3190 `_) + +- Add command line option ``--deselect`` to allow deselection of individual + tests at collection time. (`#3198 + `_) + +- Captured logs are printed before entering pdb. (`#3204 + `_) + +- Deselected item count is now shown before tests are run, e.g. ``collected X + items / Y deselected``. (`#3213 + `_) + +- The builtin module ``platform`` is now available for use in expressions in + ``pytest.mark``. (`#3236 + `_) + +- The *short test summary info* section now is displayed after tracebacks and + warnings in the terminal. (`#3255 + `_) + +- New ``--verbosity`` flag to set verbosity level explicitly. (`#3296 + `_) + +- ``pytest.approx`` now accepts comparing a numpy array with a scalar. (`#3312 + `_) + + +Bug Fixes +--------- + +- Suppress ``IOError`` when closing the temporary file used for capturing + streams in Python 2.7. (`#2370 + `_) + +- Fixed ``clear()`` method on ``caplog`` fixture which cleared ``records``, but + not the ``text`` property. (`#3297 + `_) + +- During test collection, when stdin is not allowed to be read, the + ``DontReadFromStdin`` object still allow itself to be iterable and resolved + to an iterator without crashing. (`#3314 + `_) + + +Improved Documentation +---------------------- + +- Added a `reference `_ page + to the docs. (`#1713 `_) + + +Trivial/Internal Changes +------------------------ + +- Change minimum requirement of ``attrs`` to ``17.4.0``. (`#3228 + `_) + +- Renamed example directories so all tests pass when ran from the base + directory. (`#3245 `_) + +- Internal ``mark.py`` module has been turned into a package. (`#3250 + `_) + +- ``pytest`` now depends on the `more-itertools + `_ package. (`#3265 + `_) + +- Added warning when ``[pytest]`` section is used in a ``.cfg`` file passed + with ``-c`` (`#3268 `_) + +- ``nodeids`` can now be passed explicitly to ``FSCollector`` and ``Node`` + constructors. (`#3291 `_) + +- Internal refactoring of ``FormattedExcinfo`` to use ``attrs`` facilities and + remove old support code for legacy Python versions. (`#3292 + `_) + +- Refactoring to unify how verbosity is handled internally. (`#3296 + `_) + +- Internal refactoring to better integrate with argparse. (`#3304 + `_) + +- Fix a python example when calling a fixture in doc/en/usage.rst (`#3308 + `_) + + +Pytest 3.4.2 (2018-03-04) +========================= + +Bug Fixes +--------- + +- Removed progress information when capture option is ``no``. (`#3203 + `_) + +- Refactor check of bindir from ``exists`` to ``isdir``. (`#3241 + `_) + +- Fix ``TypeError`` issue when using ``approx`` with a ``Decimal`` value. + (`#3247 `_) + +- Fix reference cycle generated when using the ``request`` fixture. (`#3249 + `_) + +- ``[tool:pytest]`` sections in ``*.cfg`` files passed by the ``-c`` option are + now properly recognized. (`#3260 + `_) + + +Improved Documentation +---------------------- + +- Add logging plugin to plugins list. (`#3209 + `_) + + +Trivial/Internal Changes +------------------------ + +- Fix minor typo in fixture.rst (`#3259 + `_) + + +Pytest 3.4.1 (2018-02-20) +========================= + +Bug Fixes +--------- + +- Move import of ``doctest.UnexpectedException`` to top-level to avoid possible + errors when using ``--pdb``. (`#1810 + `_) + +- Added printing of captured stdout/stderr before entering pdb, and improved a + test which was giving false negatives about output capturing. (`#3052 + `_) + +- Fix ordering of tests using parametrized fixtures which can lead to fixtures + being created more than necessary. (`#3161 + `_) + +- Fix bug where logging happening at hooks outside of "test run" hooks would + cause an internal error. (`#3184 + `_) + +- Detect arguments injected by ``unittest.mock.patch`` decorator correctly when + pypi ``mock.patch`` is installed and imported. (`#3206 + `_) + +- Errors shown when a ``pytest.raises()`` with ``match=`` fails are now cleaner + on what happened: When no exception was raised, the "matching '...'" part got + removed as it falsely implies that an exception was raised but it didn't + match. When a wrong exception was raised, it's now thrown (like + ``pytest.raised()`` without ``match=`` would) instead of complaining about + the unmatched text. (`#3222 + `_) + +- Fixed output capture handling in doctests on macOS. (`#985 + `_) + + +Improved Documentation +---------------------- + +- Add Sphinx parameter docs for ``match`` and ``message`` args to + ``pytest.raises``. (`#3202 + `_) + + +Trivial/Internal Changes +------------------------ + +- pytest has changed the publication procedure and is now being published to + PyPI directly from Travis. (`#3060 + `_) + +- Rename ``ParameterSet._for_parameterize()`` to ``_for_parametrize()`` in + order to comply with the naming convention. (`#3166 + `_) + +- Skip failing pdb/doctest test on mac. (`#985 + `_) + + +Pytest 3.4.0 (2018-01-30) +========================= + +Deprecations and Removals +------------------------- + +- All pytest classes now subclass ``object`` for better Python 2/3 compatibility. + This should not affect user code except in very rare edge cases. (`#2147 + `_) + + +Features +-------- + +- Introduce ``empty_parameter_set_mark`` ini option to select which mark to + apply when ``@pytest.mark.parametrize`` is given an empty set of parameters. + Valid options are ``skip`` (default) and ``xfail``. Note that it is planned + to change the default to ``xfail`` in future releases as this is considered + less error prone. (`#2527 + `_) + +- **Incompatible change**: after community feedback the `logging + `_ functionality has + undergone some changes. Please consult the `logging documentation + `_ + for details. (`#3013 `_) + +- Console output falls back to "classic" mode when capturing is disabled (``-s``), + otherwise the output gets garbled to the point of being useless. (`#3038 + `_) + +- New `pytest_runtest_logfinish + `_ + hook which is called when a test item has finished executing, analogous to + `pytest_runtest_logstart + `_. + (`#3101 `_) + +- Improve performance when collecting tests using many fixtures. (`#3107 + `_) + +- New ``caplog.get_records(when)`` method which provides access to the captured + records for the ``"setup"``, ``"call"`` and ``"teardown"`` + testing stages. (`#3117 `_) + +- New fixture ``record_xml_attribute`` that allows modifying and inserting + attributes on the ```` xml node in JUnit reports. (`#3130 + `_) + +- The default cache directory has been renamed from ``.cache`` to + ``.pytest_cache`` after community feedback that the name ``.cache`` did not + make it clear that it was used by pytest. (`#3138 + `_) + +- Colorize the levelname column in the live-log output. (`#3142 + `_) + + +Bug Fixes +--------- + +- Fix hanging pexpect test on MacOS by using flush() instead of wait(). + (`#2022 `_) + +- Fix restoring Python state after in-process pytest runs with the + ``pytester`` plugin; this may break tests using multiple inprocess + pytest runs if later ones depend on earlier ones leaking global interpreter + changes. (`#3016 `_) + +- Fix skipping plugin reporting hook when test aborted before plugin setup + hook. (`#3074 `_) + +- Fix progress percentage reported when tests fail during teardown. (`#3088 + `_) + +- **Incompatible change**: ``-o/--override`` option no longer eats all the + remaining options, which can lead to surprising behavior: for example, + ``pytest -o foo=1 /path/to/test.py`` would fail because ``/path/to/test.py`` + would be considered as part of the ``-o`` command-line argument. One + consequence of this is that now multiple configuration overrides need + multiple ``-o`` flags: ``pytest -o foo=1 -o bar=2``. (`#3103 + `_) + + +Improved Documentation +---------------------- + +- Document hooks (defined with ``historic=True``) which cannot be used with + ``hookwrapper=True``. (`#2423 + `_) + +- Clarify that warning capturing doesn't change the warning filter by default. + (`#2457 `_) + +- Clarify a possible confusion when using pytest_fixture_setup with fixture + functions that return None. (`#2698 + `_) + +- Fix the wording of a sentence on doctest flags used in pytest. (`#3076 + `_) + +- Prefer ``https://*.readthedocs.io`` over ``http://*.rtfd.org`` for links in + the documentation. (`#3092 + `_) + +- Improve readability (wording, grammar) of Getting Started guide (`#3131 + `_) + +- Added note that calling pytest.main multiple times from the same process is + not recommended because of import caching. (`#3143 + `_) + + +Trivial/Internal Changes +------------------------ + +- Show a simple and easy error when keyword expressions trigger a syntax error + (for example, ``"-k foo and import"`` will show an error that you can not use + the ``import`` keyword in expressions). (`#2953 + `_) + +- Change parametrized automatic test id generation to use the ``__name__`` + attribute of functions instead of the fallback argument name plus counter. + (`#2976 `_) + +- Replace py.std with stdlib imports. (`#3067 + `_) + +- Corrected 'you' to 'your' in logging docs. (`#3129 + `_) + + +Pytest 3.3.2 (2017-12-25) +========================= + +Bug Fixes +--------- + +- pytester: ignore files used to obtain current user metadata in the fd leak + detector. (`#2784 `_) + +- Fix **memory leak** where objects returned by fixtures were never destructed + by the garbage collector. (`#2981 + `_) + +- Fix conversion of pyargs to filename to not convert symlinks on Python 2. (`#2985 + `_) + +- ``PYTEST_DONT_REWRITE`` is now checked for plugins too rather than only for + test modules. (`#2995 `_) + + +Improved Documentation +---------------------- + +- Add clarifying note about behavior of multiple parametrized arguments (`#3001 + `_) + + +Trivial/Internal Changes +------------------------ + +- Code cleanup. (`#3015 `_, + `#3021 `_) + +- Clean up code by replacing imports and references of `_ast` to `ast`. (`#3018 + `_) + + +Pytest 3.3.1 (2017-12-05) +========================= + +Bug Fixes +--------- + +- Fix issue about ``-p no:`` having no effect. (`#2920 + `_) + +- Fix regression with warnings that contained non-strings in their arguments in + Python 2. (`#2956 `_) + +- Always escape null bytes when setting ``PYTEST_CURRENT_TEST``. (`#2957 + `_) + +- Fix ``ZeroDivisionError`` when using the ``testmon`` plugin when no tests + were actually collected. (`#2971 + `_) + +- Bring back ``TerminalReporter.writer`` as an alias to + ``TerminalReporter._tw``. This alias was removed by accident in the ``3.3.0`` + release. (`#2984 `_) + +- The ``pytest-capturelog`` plugin is now also blacklisted, avoiding errors when + running pytest with it still installed. (`#3004 + `_) + + +Improved Documentation +---------------------- + +- Fix broken link to plugin ``pytest-localserver``. (`#2963 + `_) + + +Trivial/Internal Changes +------------------------ + +- Update github "bugs" link in ``CONTRIBUTING.rst`` (`#2949 + `_) + + +Pytest 3.3.0 (2017-11-23) +========================= + +Deprecations and Removals +------------------------- + +- Pytest no longer supports Python **2.6** and **3.3**. Those Python versions + are EOL for some time now and incur maintenance and compatibility costs on + the pytest core team, and following up with the rest of the community we + decided that they will no longer be supported starting on this version. Users + which still require those versions should pin pytest to ``<3.3``. (`#2812 + `_) + +- Remove internal ``_preloadplugins()`` function. This removal is part of the + ``pytest_namespace()`` hook deprecation. (`#2636 + `_) + +- Internally change ``CallSpec2`` to have a list of marks instead of a broken + mapping of keywords. This removes the keywords attribute of the internal + ``CallSpec2`` class. (`#2672 + `_) + +- Remove ParameterSet.deprecated_arg_dict - its not a public api and the lack + of the underscore was a naming error. (`#2675 + `_) + +- Remove the internal multi-typed attribute ``Node._evalskip`` and replace it + with the boolean ``Node._skipped_by_mark``. (`#2767 + `_) + +- The ``params`` list passed to ``pytest.fixture`` is now for + all effects considered immutable and frozen at the moment of the ``pytest.fixture`` + call. Previously the list could be changed before the first invocation of the fixture + allowing for a form of dynamic parametrization (for example, updated from command-line options), + but this was an unwanted implementation detail which complicated the internals and prevented + some internal cleanup. See issue `#2959 `_ + for details and a recommended workaround. + +Features +-------- + +- ``pytest_fixture_post_finalizer`` hook can now receive a ``request`` + argument. (`#2124 `_) + +- Replace the old introspection code in compat.py that determines the available + arguments of fixtures with inspect.signature on Python 3 and + funcsigs.signature on Python 2. This should respect ``__signature__`` + declarations on functions. (`#2267 + `_) + +- Report tests with global ``pytestmark`` variable only once. (`#2549 + `_) + +- Now pytest displays the total progress percentage while running tests. The + previous output style can be set by configuring the ``console_output_style`` + setting to ``classic``. (`#2657 `_) + +- Match ``warns`` signature to ``raises`` by adding ``match`` keyword. (`#2708 + `_) + +- Pytest now captures and displays output from the standard ``logging`` module. + The user can control the logging level to be captured by specifying options + in ``pytest.ini``, the command line and also during individual tests using + markers. Also, a ``caplog`` fixture is available that enables users to test + the captured log during specific tests (similar to ``capsys`` for example). + For more information, please see the `logging docs + `_. This feature was + introduced by merging the popular `pytest-catchlog + `_ plugin, thanks to `Thomas Hisch + `_. Be advised that during the merging the + backward compatibility interface with the defunct ``pytest-capturelog`` has + been dropped. (`#2794 `_) + +- Add ``allow_module_level`` kwarg to ``pytest.skip()``, enabling to skip the + whole module. (`#2808 `_) + +- Allow setting ``file_or_dir``, ``-c``, and ``-o`` in PYTEST_ADDOPTS. (`#2824 + `_) + +- Return stdout/stderr capture results as a ``namedtuple``, so ``out`` and + ``err`` can be accessed by attribute. (`#2879 + `_) + +- Add ``capfdbinary``, a version of ``capfd`` which returns bytes from + ``readouterr()``. (`#2923 + `_) + +- Add ``capsysbinary`` a version of ``capsys`` which returns bytes from + ``readouterr()``. (`#2934 + `_) + +- Implement feature to skip ``setup.py`` files when run with + ``--doctest-modules``. (`#502 + `_) + + +Bug Fixes +--------- + +- Resume output capturing after ``capsys/capfd.disabled()`` context manager. + (`#1993 `_) + +- ``pytest_fixture_setup`` and ``pytest_fixture_post_finalizer`` hooks are now + called for all ``conftest.py`` files. (`#2124 + `_) + +- If an exception happens while loading a plugin, pytest no longer hides the + original traceback. In Python 2 it will show the original traceback with a new + message that explains in which plugin. In Python 3 it will show 2 canonized + exceptions, the original exception while loading the plugin in addition to an + exception that pytest throws about loading a plugin. (`#2491 + `_) + +- ``capsys`` and ``capfd`` can now be used by other fixtures. (`#2709 + `_) + +- Internal ``pytester`` plugin properly encodes ``bytes`` arguments to + ``utf-8``. (`#2738 `_) + +- ``testdir`` now uses use the same method used by ``tmpdir`` to create its + temporary directory. This changes the final structure of the ``testdir`` + directory slightly, but should not affect usage in normal scenarios and + avoids a number of potential problems. (`#2751 + `_) + +- Pytest no longer complains about warnings with unicode messages being + non-ascii compatible even for ascii-compatible messages. As a result of this, + warnings with unicode messages are converted first to an ascii representation + for safety. (`#2809 `_) + +- Change return value of pytest command when ``--maxfail`` is reached from + ``2`` (interrupted) to ``1`` (failed). (`#2845 + `_) + +- Fix issue in assertion rewriting which could lead it to rewrite modules which + should not be rewritten. (`#2939 + `_) + +- Handle marks without description in ``pytest.ini``. (`#2942 + `_) + + +Trivial/Internal Changes +------------------------ + +- pytest now depends on `attrs `_ for internal + structures to ease code maintainability. (`#2641 + `_) + +- Refactored internal Python 2/3 compatibility code to use ``six``. (`#2642 + `_) + +- Stop vendoring ``pluggy`` - we're missing out on its latest changes for not + much benefit (`#2719 `_) + +- Internal refactor: simplify ascii string escaping by using the + backslashreplace error handler in newer Python 3 versions. (`#2734 + `_) + +- Remove unnecessary mark evaluator in unittest plugin (`#2767 + `_) + +- Calls to ``Metafunc.addcall`` now emit a deprecation warning. This function + is scheduled to be removed in ``pytest-4.0``. (`#2876 + `_) + +- Internal move of the parameterset extraction to a more maintainable place. + (`#2877 `_) + +- Internal refactoring to simplify scope node lookup. (`#2910 + `_) + +- Configure ``pytest`` to prevent pip from installing pytest in unsupported + Python versions. (`#2922 + `_) + + +Pytest 3.2.5 (2017-11-15) +========================= + +Bug Fixes +--------- + +- Remove ``py<1.5`` restriction from ``pytest`` as this can cause version + conflicts in some installations. (`#2926 + `_) + + +Pytest 3.2.4 (2017-11-13) +========================= + +Bug Fixes +--------- + +- Fix the bug where running with ``--pyargs`` will result in items with + empty ``parent.nodeid`` if run from a different root directory. (`#2775 + `_) + +- Fix issue with ``@pytest.parametrize`` if argnames was specified as keyword arguments. + (`#2819 `_) + +- Strip whitespace from marker names when reading them from INI config. (`#2856 + `_) + +- Show full context of doctest source in the pytest output, if the line number of + failed example in the docstring is < 9. (`#2882 + `_) + +- Match fixture paths against actual path segments in order to avoid matching folders which share a prefix. + (`#2836 `_) + +Improved Documentation +---------------------- + +- Introduce a dedicated section about conftest.py. (`#1505 + `_) + +- Explicitly mention ``xpass`` in the documentation of ``xfail``. (`#1997 + `_) + +- Append example for pytest.param in the example/parametrize document. (`#2658 + `_) + +- Clarify language of proposal for fixtures parameters (`#2893 + `_) + +- List python 3.6 in the documented supported versions in the getting started + document. (`#2903 `_) + +- Clarify the documentation of available fixture scopes. (`#538 + `_) + +- Add documentation about the ``python -m pytest`` invocation adding the + current directory to sys.path. (`#911 + `_) + + +Pytest 3.2.3 (2017-10-03) +========================= + +Bug Fixes +--------- + +- Fix crash in tab completion when no prefix is given. (`#2748 + `_) + +- The equality checking function (``__eq__``) of ``MarkDecorator`` returns + ``False`` if one object is not an instance of ``MarkDecorator``. (`#2758 + `_) + +- When running ``pytest --fixtures-per-test``: don't crash if an item has no + _fixtureinfo attribute (e.g. doctests) (`#2788 + `_) + + +Improved Documentation +---------------------- + +- In help text of ``-k`` option, add example of using ``not`` to not select + certain tests whose names match the provided expression. (`#1442 + `_) + +- Add note in ``parametrize.rst`` about calling ``metafunc.parametrize`` + multiple times. (`#1548 `_) + + +Trivial/Internal Changes +------------------------ + +- Set ``xfail_strict=True`` in pytest's own test suite to catch expected + failures as soon as they start to pass. (`#2722 + `_) + +- Fix typo in example of passing a callable to markers (in example/markers.rst) + (`#2765 `_) + + +Pytest 3.2.2 (2017-09-06) +========================= + +Bug Fixes +--------- + +- Calling the deprecated `request.getfuncargvalue()` now shows the source of + the call. (`#2681 `_) + +- Allow tests declared as ``@staticmethod`` to use fixtures. (`#2699 + `_) + +- Fixed edge-case during collection: attributes which raised ``pytest.fail`` + when accessed would abort the entire collection. (`#2707 + `_) + +- Fix ``ReprFuncArgs`` with mixed unicode and UTF-8 args. (`#2731 + `_) + + +Improved Documentation +---------------------- + +- In examples on working with custom markers, add examples demonstrating the + usage of ``pytest.mark.MARKER_NAME.with_args`` in comparison with + ``pytest.mark.MARKER_NAME.__call__`` (`#2604 + `_) + +- In one of the simple examples, use `pytest_collection_modifyitems()` to skip + tests based on a command-line option, allowing its sharing while preventing a + user error when acessing `pytest.config` before the argument parsing. (`#2653 + `_) + + +Trivial/Internal Changes +------------------------ + +- Fixed minor error in 'Good Practices/Manual Integration' code snippet. + (`#2691 `_) + +- Fixed typo in goodpractices.rst. (`#2721 + `_) + +- Improve user guidance regarding ``--resultlog`` deprecation. (`#2739 + `_) + + +Pytest 3.2.1 (2017-08-08) +========================= + +Bug Fixes +--------- + +- Fixed small terminal glitch when collecting a single test item. (`#2579 + `_) + +- Correctly consider ``/`` as the file separator to automatically mark plugin + files for rewrite on Windows. (`#2591 `_) + +- Properly escape test names when setting ``PYTEST_CURRENT_TEST`` environment + variable. (`#2644 `_) + +- Fix error on Windows and Python 3.6+ when ``sys.stdout`` has been replaced + with a stream-like object which does not implement the full ``io`` module + buffer protocol. In particular this affects ``pytest-xdist`` users on the + aforementioned platform. (`#2666 `_) + + +Improved Documentation +---------------------- + +- Explicitly document which pytest features work with ``unittest``. (`#2626 + `_) + + +Pytest 3.2.0 (2017-07-30) +========================= + +Deprecations and Removals +------------------------- + +- ``pytest.approx`` no longer supports ``>``, ``>=``, ``<`` and ``<=`` + operators to avoid surprising/inconsistent behavior. See `the approx docs + `_ for more + information. (`#2003 `_) + +- All old-style specific behavior in current classes in the pytest's API is + considered deprecated at this point and will be removed in a future release. + This affects Python 2 users only and in rare situations. (`#2147 + `_) + +- A deprecation warning is now raised when using marks for parameters + in ``pytest.mark.parametrize``. Use ``pytest.param`` to apply marks to + parameters instead. (`#2427 `_) + + +Features +-------- + +- Add support for numpy arrays (and dicts) to approx. (`#1994 + `_) + +- Now test function objects have a ``pytestmark`` attribute containing a list + of marks applied directly to the test function, as opposed to marks inherited + from parent classes or modules. (`#2516 `_) + +- Collection ignores local virtualenvs by default; `--collect-in-virtualenv` + overrides this behavior. (`#2518 `_) + +- Allow class methods decorated as ``@staticmethod`` to be candidates for + collection as a test function. (Only for Python 2.7 and above. Python 2.6 + will still ignore static methods.) (`#2528 `_) + +- Introduce ``mark.with_args`` in order to allow passing functions/classes as + sole argument to marks. (`#2540 `_) + +- New ``cache_dir`` ini option: sets the directory where the contents of the + cache plugin are stored. Directory may be relative or absolute path: if relative path, then + directory is created relative to ``rootdir``, otherwise it is used as is. + Additionally path may contain environment variables which are expanded during + runtime. (`#2543 `_) + +- Introduce the ``PYTEST_CURRENT_TEST`` environment variable that is set with + the ``nodeid`` and stage (``setup``, ``call`` and ``teardown``) of the test + being currently executed. See the `documentation + `_ for more info. (`#2583 `_) + +- Introduced ``@pytest.mark.filterwarnings`` mark which allows overwriting the + warnings filter on a per test, class or module level. See the `docs + `_ for more information. (`#2598 `_) + +- ``--last-failed`` now remembers forever when a test has failed and only + forgets it if it passes again. This makes it easy to fix a test suite by + selectively running files and fixing tests incrementally. (`#2621 + `_) + +- New ``pytest_report_collectionfinish`` hook which allows plugins to add + messages to the terminal reporting after collection has been finished + successfully. (`#2622 `_) + +- Added support for `PEP-415's `_ + ``Exception.__suppress_context__``. Now if a ``raise exception from None`` is + caught by pytest, pytest will no longer chain the context in the test report. + The behavior now matches Python's traceback behavior. (`#2631 + `_) + +- Exceptions raised by ``pytest.fail``, ``pytest.skip`` and ``pytest.xfail`` + now subclass BaseException, making them harder to be caught unintentionally + by normal code. (`#580 `_) + + +Bug Fixes +--------- + +- Set ``stdin`` to a closed ``PIPE`` in ``pytester.py.Testdir.popen()`` for + avoid unwanted interactive ``pdb`` (`#2023 `_) + +- Add missing ``encoding`` attribute to ``sys.std*`` streams when using + ``capsys`` capture mode. (`#2375 `_) + +- Fix terminal color changing to black on Windows if ``colorama`` is imported + in a ``conftest.py`` file. (`#2510 `_) + +- Fix line number when reporting summary of skipped tests. (`#2548 + `_) + +- capture: ensure that EncodedFile.name is a string. (`#2555 + `_) + +- The options ``--fixtures`` and ``--fixtures-per-test`` will now keep + indentation within docstrings. (`#2574 `_) + +- doctests line numbers are now reported correctly, fixing `pytest-sugar#122 + `_. (`#2610 + `_) + +- Fix non-determinism in order of fixture collection. Adds new dependency + (ordereddict) for Python 2.6. (`#920 `_) + + +Improved Documentation +---------------------- + +- Clarify ``pytest_configure`` hook call order. (`#2539 + `_) + +- Extend documentation for testing plugin code with the ``pytester`` plugin. + (`#971 `_) + + +Trivial/Internal Changes +------------------------ + +- Update help message for ``--strict`` to make it clear it only deals with + unregistered markers, not warnings. (`#2444 `_) + +- Internal code move: move code for pytest.approx/pytest.raises to own files in + order to cut down the size of python.py (`#2489 `_) + +- Renamed the utility function ``_pytest.compat._escape_strings`` to + ``_ascii_escaped`` to better communicate the function's purpose. (`#2533 + `_) + +- Improve error message for CollectError with skip/skipif. (`#2546 + `_) + +- Emit warning about ``yield`` tests being deprecated only once per generator. + (`#2562 `_) + +- Ensure final collected line doesn't include artifacts of previous write. + (`#2571 `_) + +- Fixed all flake8 errors and warnings. (`#2581 `_) + +- Added ``fix-lint`` tox environment to run automatic pep8 fixes on the code. + (`#2582 `_) + +- Turn warnings into errors in pytest's own test suite in order to catch + regressions due to deprecations more promptly. (`#2588 + `_) + +- Show multiple issue links in CHANGELOG entries. (`#2620 + `_) + + +Pytest 3.1.3 (2017-07-03) +========================= + +Bug Fixes +--------- + +- Fix decode error in Python 2 for doctests in docstrings. (`#2434 + `_) + +- Exceptions raised during teardown by finalizers are now suppressed until all + finalizers are called, with the initial exception reraised. (`#2440 + `_) + +- Fix incorrect "collected items" report when specifying tests on the command- + line. (`#2464 `_) + +- ``deprecated_call`` in context-manager form now captures deprecation warnings + even if the same warning has already been raised. Also, ``deprecated_call`` + will always produce the same error message (previously it would produce + different messages in context-manager vs. function-call mode). (`#2469 + `_) + +- Fix issue where paths collected by pytest could have triple leading ``/`` + characters. (`#2475 `_) + +- Fix internal error when trying to detect the start of a recursive traceback. + (`#2486 `_) + + +Improved Documentation +---------------------- + +- Explicitly state for which hooks the calls stop after the first non-None + result. (`#2493 `_) + + +Trivial/Internal Changes +------------------------ + +- Create invoke tasks for updating the vendored packages. (`#2474 + `_) + +- Update copyright dates in LICENSE, README.rst and in the documentation. + (`#2499 `_) + + Pytest 3.1.2 (2017-06-08) ========================= @@ -1690,7 +2717,7 @@ time or change existing behaviors in order to make them less surprising/more use - fix issue655: work around different ways that cause python2/3 to leak sys.exc_info into fixtures/tests causing failures in 3rd party code -- fix issue615: assertion re-writing did not correctly escape % signs +- fix issue615: assertion rewriting did not correctly escape % signs when formatting boolean operations, which tripped over mixing booleans with modulo operators. Thanks to Tom Viner for the report, triaging and fix. diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index edf71dad7..79be79fa6 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -34,13 +34,13 @@ If you are reporting a bug, please include: * Your operating system name and version. * Any details about your local setup that might be helpful in troubleshooting, - specifically Python interpreter version, - installed libraries and pytest version. + specifically the Python interpreter version, installed libraries, and pytest + version. * Detailed steps to reproduce the bug. -If you can write a demonstration test that currently fails but should pass (xfail), -that is a very useful commit to make as well, even if you can't find how -to fix the bug yet. +If you can write a demonstration test that currently fails but should pass +(xfail), that is a very useful commit to make as well, even if you cannot +fix the bug itself. .. _fixbugs: @@ -49,7 +49,7 @@ Fix bugs -------- Look through the GitHub issues for bugs. Here is a filter you can use: -https://github.com/pytest-dev/pytest/labels/bug +https://github.com/pytest-dev/pytest/labels/type%3A%20bug :ref:`Talk ` to developers to find out how you can fix specific bugs. @@ -120,7 +120,7 @@ the following: - PyPI presence with a ``setup.py`` that contains a license, ``pytest-`` prefixed name, version number, authors, short and long description. -- a ``tox.ini`` for running tests using `tox `_. +- a ``tox.ini`` for running tests using `tox `_. - a ``README.txt`` describing how to use the plugin and on which platforms it runs. @@ -158,19 +158,41 @@ As stated, the objective is to share maintenance and avoid "plugin-abandon". .. _`pull requests`: .. _pull-requests: -Preparing Pull Requests on GitHub ---------------------------------- +Preparing Pull Requests +----------------------- -.. note:: - What is a "pull request"? It informs project's core developers about the - changes you want to review and merge. Pull requests are stored on - `GitHub servers `_. - Once you send a pull request, we can discuss its potential modifications and - even add more commits to it later on. +Short version +~~~~~~~~~~~~~ -There's an excellent tutorial on how Pull Requests work in the -`GitHub Help Center `_, -but here is a simple overview: +#. Fork the repository; +#. Target ``master`` for bugfixes and doc changes; +#. Target ``features`` for new features or functionality changes. +#. Follow **PEP-8**. There's a ``tox`` command to help fixing it: ``tox -e fix-lint``. +#. Tests are run using ``tox``:: + + tox -e linting,py27,py36 + + The test environments above are usually enough to cover most cases locally. + +#. Write a ``changelog`` entry: ``changelog/2574.bugfix``, use issue id number + and one of ``bugfix``, ``removal``, ``feature``, ``vendor``, ``doc`` or + ``trivial`` for the issue type. +#. Unless your change is a trivial or a documentation fix (e.g., a typo or reword of a small section) please + add yourself to the ``AUTHORS`` file, in alphabetical order; + + +Long version +~~~~~~~~~~~~ + +What is a "pull request"? It informs the project's core developers about the +changes you want to review and merge. Pull requests are stored on +`GitHub servers `_. +Once you send a pull request, we can discuss its potential modifications and +even add more commits to it later on. There's an excellent tutorial on how Pull +Requests work in the +`GitHub Help Center `_. + +Here is a simple overview, with pytest-specific bits: #. Fork the `pytest GitHub repository `__. It's @@ -214,12 +236,18 @@ but here is a simple overview: This command will run tests via the "tox" tool against Python 2.7 and 3.6 and also perform "lint" coding-style checks. -#. You can now edit your local working copy. +#. You can now edit your local working copy. Please follow PEP-8. You can now make the changes you want and run the tests again as necessary. - To run tests on Python 2.7 and pass options to pytest (e.g. enter pdb on - failure) to pytest you can do:: + If you have too much linting errors, try running:: + + $ tox -e fix-lint + + To fix pep8 related errors. + + You can pass different options to ``tox``. For example, to run tests on Python 2.7 and pass options to pytest + (e.g. enter pdb on failure) to pytest you can do:: $ tox -e py27 -- --pdb @@ -232,9 +260,11 @@ but here is a simple overview: $ git commit -a -m "" $ git push -u - Make sure you add a message to ``CHANGELOG.rst`` and add yourself to - ``AUTHORS``. If you are unsure about either of these steps, submit your - pull request and we'll help you fix it up. +#. Create a new changelog entry in ``changelog``. The file should be named ``.``, + where *issueid* is the number of the issue related to the change and *type* is one of + ``bugfix``, ``removal``, ``feature``, ``vendor``, ``doc`` or ``trivial``. + +#. Add yourself to ``AUTHORS`` file if not there yet, in alphabetical order. #. Finally, submit a pull request through the GitHub website using this data:: @@ -246,3 +276,15 @@ but here is a simple overview: base: features # if it's a feature +Joining the Development Team +---------------------------- + +Anyone who has successfully seen through a pull request which did not +require any extra work from the development team to merge will +themselves gain commit access if they so wish (if we forget to ask please send a friendly +reminder). This does not mean your workflow to contribute changes, +everyone goes through the same pull-request-and-review process and +no-one merges their own pull requests unless already approved. It does however mean you can +participate in the development process more fully since you can merge +pull requests from other contributors yourself after having reviewed +them. diff --git a/HOWTORELEASE.rst b/HOWTORELEASE.rst index f094e369a..97bddf720 100644 --- a/HOWTORELEASE.rst +++ b/HOWTORELEASE.rst @@ -1,5 +1,9 @@ -How to release pytest --------------------------------------------- +Release Procedure +----------------- + +Our current policy for releasing is to aim for a bugfix every few weeks and a minor release every 2-3 months. The idea +is to get fixes and new features out instead of trying to cram a ton of features into a release and by consequence +taking a lot of time to make a new one. .. important:: @@ -8,7 +12,7 @@ How to release pytest #. Install development dependencies in a virtual environment with:: - pip3 install -r tasks/requirements.txt + pip3 install -U -r tasks/requirements.txt #. Create a branch ``release-X.Y.Z`` with the version for the release. @@ -18,44 +22,28 @@ How to release pytest Ensure your are in a clean work tree. -#. Generate docs, changelog, announcements and upload a package to - your ``devpi`` staging server:: +#. Generate docs, changelog, announcements and a **local** tag:: - invoke generate.pre_release --password - - If ``--password`` is not given, it is assumed the user is already logged in ``devpi``. - If you don't have an account, please ask for one. + invoke generate.pre-release #. Open a PR for this branch targeting ``master``. -#. Test the package +#. After all tests pass and the PR has been approved, publish to PyPI by pushing the tag:: - * **Manual method** + git push git@github.com:pytest-dev/pytest.git - Run from multiple machines:: + Wait for the deploy to complete, then make sure it is `available on PyPI `_. - devpi use https://devpi.net/USER/dev - devpi test pytest==VERSION +#. Send an email announcement with the contents from:: - Check that tests pass for relevant combinations with:: + doc/en/announce/release-.rst - devpi list pytest + To the following mailing lists: - * **CI servers** + * pytest-dev@python.org (all releases) + * python-announce-list@python.org (all releases) + * testing-in-python@lists.idyll.org (only major/minor releases) - Configure a repository as per-instructions on - devpi-cloud-test_ to test the package on Travis_ and AppVeyor_. - All test environments should pass. + And announce it on `Twitter `_ with the ``#pytest`` hashtag. -#. Publish to PyPI:: - - invoke generate.publish_release - - where PYPI_NAME is the name of pypi.python.org as configured in your ``~/.pypirc`` - file `for devpi `_. - -#. After a minor/major release, merge ``features`` into ``master`` and push (or open a PR). - -.. _devpi-cloud-test: https://github.com/obestwalter/devpi-cloud-test -.. _AppVeyor: https://www.appveyor.com/ -.. _Travis: https://travis-ci.org +#. After a minor/major release, merge ``release-X.Y.Z`` into ``master`` and push (or open a PR). diff --git a/README.rst b/README.rst index cf2304ed8..b2ed1e140 100644 --- a/README.rst +++ b/README.rst @@ -23,6 +23,9 @@ .. image:: https://ci.appveyor.com/api/projects/status/mrgbjaua7t33pg6b?svg=true :target: https://ci.appveyor.com/project/pytestbot/pytest +.. image:: https://www.codetriage.com/pytest-dev/pytest/badges/users.svg + :target: https://www.codetriage.com/pytest-dev/pytest + The ``pytest`` framework makes it easy to write small tests, yet scales to support complex functional testing for applications and libraries. @@ -76,9 +79,9 @@ Features - Can run `unittest `_ (or trial), `nose `_ test suites out of the box; -- Python2.6+, Python3.3+, PyPy-2.3, Jython-2.5 (untested); +- Python 2.7, Python 3.4+, PyPy 2.3, Jython 2.5 (untested); -- Rich plugin architecture, with over 150+ `external plugins `_ and thriving community; +- Rich plugin architecture, with over 315+ `external plugins `_ and thriving community; Documentation diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py index 8c93e4c92..ea8c98c7f 100644 --- a/_pytest/_argcomplete.py +++ b/_pytest/_argcomplete.py @@ -4,9 +4,6 @@ needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail to find the magic string, so _ARGCOMPLETE env. var is never set, and this does not need special code. -argcomplete does not support python 2.5 (although the changes for that -are minor). - Function try_argcomplete(parser) should be called directly before the call to ArgumentParser.parse_args(). @@ -62,21 +59,24 @@ import sys import os from glob import glob -class FastFilesCompleter: + +class FastFilesCompleter(object): 'Fast file completer class' + def __init__(self, directories=True): self.directories = directories def __call__(self, prefix, **kwargs): """only called on non option completions""" - if os.path.sep in prefix[1:]: # + if os.path.sep in prefix[1:]: prefix_dir = len(os.path.dirname(prefix) + os.path.sep) else: prefix_dir = 0 completion = [] globbed = [] if '*' not in prefix and '?' not in prefix: - if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash + # we are on unix, otherwise no bash + if not prefix or prefix[-1] == os.path.sep: globbed.extend(glob(prefix + '.*')) prefix += '*' globbed.extend(glob(prefix)) @@ -96,7 +96,8 @@ if os.environ.get('_ARGCOMPLETE'): filescompleter = FastFilesCompleter() def try_argcomplete(parser): - argcomplete.autocomplete(parser) + argcomplete.autocomplete(parser, always_complete_options=False) else: - def try_argcomplete(parser): pass + def try_argcomplete(parser): + pass filescompleter = None diff --git a/_pytest/_code/_py2traceback.py b/_pytest/_code/_py2traceback.py index d45ee01fa..5aacf0a42 100644 --- a/_pytest/_code/_py2traceback.py +++ b/_pytest/_code/_py2traceback.py @@ -5,6 +5,7 @@ from __future__ import absolute_import, division, print_function import types + def format_exception_only(etype, value): """Format the exception part of a traceback. @@ -30,7 +31,7 @@ def format_exception_only(etype, value): # would throw another exception and mask the original problem. if (isinstance(etype, BaseException) or isinstance(etype, types.InstanceType) or - etype is None or type(etype) is str): + etype is None or type(etype) is str): return [_format_final_exc_line(etype, value)] stype = etype.__name__ @@ -62,6 +63,7 @@ def format_exception_only(etype, value): lines.append(_format_final_exc_line(stype, value)) return lines + def _format_final_exc_line(etype, value): """Return a list of a single line -- normal case for format_exception_only""" valuestr = _some_str(value) @@ -71,6 +73,7 @@ def _format_final_exc_line(etype, value): line = "%s: %s\n" % (etype, valuestr) return line + def _some_str(value): try: return unicode(value) diff --git a/_pytest/_code/code.py b/_pytest/_code/code.py index 5b7cc4191..76e143774 100644 --- a/_pytest/_code/code.py +++ b/_pytest/_code/code.py @@ -1,6 +1,10 @@ from __future__ import absolute_import, division, print_function +import inspect import sys +import traceback from inspect import CO_VARARGS, CO_VARKEYWORDS + +import attr import re from weakref import ref from _pytest.compat import _PY2, _PY3, PY35, safe_str @@ -8,8 +12,6 @@ from _pytest.compat import _PY2, _PY3, PY35, safe_str import py builtin_repr = repr -reprlib = py.builtin._tryimport('repr', 'reprlib') - if _PY3: from traceback import format_exception_only else: @@ -18,6 +20,7 @@ else: class Code(object): """ wrapper around Python code objects """ + def __init__(self, rawcode): if not hasattr(rawcode, "co_filename"): rawcode = getrawcode(rawcode) @@ -26,7 +29,7 @@ class Code(object): self.firstlineno = rawcode.co_firstlineno - 1 self.name = rawcode.co_name except AttributeError: - raise TypeError("not a code object: %r" %(rawcode,)) + raise TypeError("not a code object: %r" % (rawcode,)) self.raw = rawcode def __eq__(self, other): @@ -82,6 +85,7 @@ class Code(object): argcount += raw.co_flags & CO_VARKEYWORDS return raw.co_varnames[:argcount] + class Frame(object): """Wrapper around a Python frame holding f_locals and f_globals in which expressions can be evaluated.""" @@ -119,7 +123,7 @@ class Frame(object): """ f_locals = self.f_locals.copy() f_locals.update(vars) - py.builtin.exec_(code, self.f_globals, f_locals ) + py.builtin.exec_(code, self.f_globals, f_locals) def repr(self, object): """ return a 'safe' (non-recursive, one-line) string repr for 'object' @@ -143,6 +147,7 @@ class Frame(object): pass # this can occur when using Psyco return retval + class TracebackEntry(object): """ a single entry in a traceback """ @@ -168,7 +173,7 @@ class TracebackEntry(object): return self.lineno - self.frame.code.firstlineno def __repr__(self): - return "" %(self.frame.code.path, self.lineno+1) + return "" % (self.frame.code.path, self.lineno + 1) @property def statement(self): @@ -232,7 +237,7 @@ class TracebackEntry(object): except KeyError: return False - if py.builtin.callable(tbh): + if callable(tbh): return tbh(None if self._excinfo is None else self._excinfo()) else: return tbh @@ -247,19 +252,21 @@ class TracebackEntry(object): line = str(self.statement).lstrip() except KeyboardInterrupt: raise - except: + except: # noqa line = "???" - return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line) + return " File %r:%d in %s\n %s\n" % (fn, self.lineno + 1, name, line) def name(self): return self.frame.code.raw.co_name name = property(name, None, None, "co_name of underlaying code") + class Traceback(list): """ Traceback objects encapsulate and offer higher level access to Traceback entries. """ Entry = TracebackEntry + def __init__(self, tb, excinfo=None): """ initialize from given python traceback object and ExceptionInfo """ self._excinfo = excinfo @@ -289,7 +296,7 @@ class Traceback(list): (excludepath is None or not hasattr(codepath, 'relto') or not codepath.relto(excludepath)) and (lineno is None or x.lineno == lineno) and - (firstlineno is None or x.frame.code.firstlineno == firstlineno)): + (firstlineno is None or x.frame.code.firstlineno == firstlineno)): return Traceback(x._rawentry, self._excinfo) return self @@ -315,7 +322,7 @@ class Traceback(list): """ return last non-hidden traceback entry that lead to the exception of a traceback. """ - for i in range(-1, -len(self)-1, -1): + for i in range(-1, -len(self) - 1, -1): entry = self[i] if not entry.ishidden(): return entry @@ -330,25 +337,26 @@ class Traceback(list): # id for the code.raw is needed to work around # the strange metaprogramming in the decorator lib from pypi # which generates code objects that have hash/value equality - #XXX needs a test + # XXX needs a test key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno - #print "checking for recursion at", key - l = cache.setdefault(key, []) - if l: + # print "checking for recursion at", key + values = cache.setdefault(key, []) + if values: f = entry.frame loc = f.f_locals - for otherloc in l: + for otherloc in values: if f.is_true(f.eval(co_equal, - __recursioncache_locals_1=loc, - __recursioncache_locals_2=otherloc)): + __recursioncache_locals_1=loc, + __recursioncache_locals_2=otherloc)): return i - l.append(entry.frame.f_locals) + values.append(entry.frame.f_locals) return None co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2', '?', 'eval') + class ExceptionInfo(object): """ wraps sys.exc_info() objects and offers help for navigating the traceback. @@ -405,10 +413,10 @@ class ExceptionInfo(object): exconly = self.exconly(tryshort=True) entry = self.traceback.getcrashentry() path, lineno = entry.frame.code.raw.co_filename, entry.lineno - return ReprFileLocation(path, lineno+1, exconly) + return ReprFileLocation(path, lineno + 1, exconly) def getrepr(self, showlocals=False, style="long", - abspath=False, tbfilter=True, funcargs=False): + abspath=False, tbfilter=True, funcargs=False): """ return str()able representation of this exception info. showlocals: show locals per traceback entry style: long|short|no|native traceback style @@ -418,14 +426,14 @@ class ExceptionInfo(object): """ if style == 'native': return ReprExceptionInfo(ReprTracebackNative( - py.std.traceback.format_exception( + traceback.format_exception( self.type, self.value, self.traceback[0]._rawentry, )), self._getreprcrash()) fmt = FormattedExcinfo(showlocals=showlocals, style=style, - abspath=abspath, tbfilter=tbfilter, funcargs=funcargs) + abspath=abspath, tbfilter=tbfilter, funcargs=funcargs) return fmt.repr_excinfo(self) def __str__(self): @@ -452,32 +460,32 @@ class ExceptionInfo(object): return True +@attr.s class FormattedExcinfo(object): """ presenting information about failing Functions and Generators. """ # for traceback entries flow_marker = ">" fail_marker = "E" - def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False): - self.showlocals = showlocals - self.style = style - self.tbfilter = tbfilter - self.funcargs = funcargs - self.abspath = abspath - self.astcache = {} + showlocals = attr.ib(default=False) + style = attr.ib(default="long") + abspath = attr.ib(default=True) + tbfilter = attr.ib(default=True) + funcargs = attr.ib(default=False) + astcache = attr.ib(default=attr.Factory(dict), init=False, repr=False) def _getindent(self, source): # figure out indent for given source try: - s = str(source.getstatement(len(source)-1)) + s = str(source.getstatement(len(source) - 1)) except KeyboardInterrupt: raise - except: + except: # noqa try: s = str(source[-1]) except KeyboardInterrupt: raise - except: + except: # noqa return 0 return 4 + (len(s) - len(s.lstrip())) @@ -513,7 +521,7 @@ class FormattedExcinfo(object): for line in source.lines[:line_index]: lines.append(space_prefix + line) lines.append(self.flow_marker + " " + source.lines[line_index]) - for line in source.lines[line_index+1:]: + for line in source.lines[line_index + 1:]: lines.append(space_prefix + line) if excinfo is not None: indent = 4 if short else self._getindent(source) @@ -546,13 +554,13 @@ class FormattedExcinfo(object): # _repr() function, which is only reprlib.Repr in # disguise, so is very configurable. str_repr = self._saferepr(value) - #if len(str_repr) < 70 or not isinstance(value, + # if len(str_repr) < 70 or not isinstance(value, # (list, tuple, dict)): - lines.append("%-10s = %s" %(name, str_repr)) - #else: + lines.append("%-10s = %s" % (name, str_repr)) + # else: # self._line("%-10s =\\" % (name,)) # # XXX - # py.std.pprint.pprint(value, stream=self.excinfowriter) + # pprint.pprint(value, stream=self.excinfowriter) return ReprLocals(lines) def repr_traceback_entry(self, entry, excinfo=None): @@ -575,14 +583,14 @@ class FormattedExcinfo(object): s = self.get_source(source, line_index, excinfo, short=short) lines.extend(s) if short: - message = "in %s" %(entry.name) + message = "in %s" % (entry.name) else: message = excinfo and excinfo.typename or "" path = self._makepath(entry.path) - filelocrepr = ReprFileLocation(path, entry.lineno+1, message) + filelocrepr = ReprFileLocation(path, entry.lineno + 1, message) localsrepr = None if not short: - localsrepr = self.repr_locals(entry.locals) + localsrepr = self.repr_locals(entry.locals) return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style) if excinfo: lines.extend(self.get_exconly(excinfo, indent=4)) @@ -645,7 +653,7 @@ class FormattedExcinfo(object): traceback = traceback[:recursionindex + 1] else: extraline = None - + return traceback, extraline def repr_excinfo(self, excinfo): @@ -665,7 +673,7 @@ class FormattedExcinfo(object): else: # fallback to native repr if the exception doesn't have a traceback: # ExceptionInfo objects require a full traceback to work - reprtraceback = ReprTracebackNative(py.std.traceback.format_exception(type(e), e, None)) + reprtraceback = ReprTracebackNative(traceback.format_exception(type(e), e, None)) reprcrash = None repr_chain += [(reprtraceback, reprcrash, descr)] @@ -673,7 +681,7 @@ class FormattedExcinfo(object): e = e.__cause__ excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None descr = 'The above exception was the direct cause of the following exception:' - elif e.__context__ is not None: + elif (e.__context__ is not None and not e.__suppress_context__): e = e.__context__ excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None descr = 'During handling of the above exception, another exception occurred:' @@ -699,7 +707,7 @@ class TerminalRepr(object): return io.getvalue().strip() def __repr__(self): - return "<%s instance at %0x>" %(self.__class__, id(self)) + return "<%s instance at %0x>" % (self.__class__, id(self)) class ExceptionRepr(TerminalRepr): @@ -743,6 +751,7 @@ class ReprExceptionInfo(ExceptionRepr): self.reprtraceback.toterminal(tw) super(ReprExceptionInfo, self).toterminal(tw) + class ReprTraceback(TerminalRepr): entrysep = "_ " @@ -758,7 +767,7 @@ class ReprTraceback(TerminalRepr): tw.line("") entry.toterminal(tw) if i < len(self.reprentries) - 1: - next_entry = self.reprentries[i+1] + next_entry = self.reprentries[i + 1] if entry.style == "long" or \ entry.style == "short" and next_entry.style == "long": tw.sep(self.entrysep) @@ -766,12 +775,14 @@ class ReprTraceback(TerminalRepr): if self.extraline: tw.line(self.extraline) + class ReprTracebackNative(ReprTraceback): def __init__(self, tblines): self.style = "native" self.reprentries = [ReprEntryNative(tblines)] self.extraline = None + class ReprEntryNative(TerminalRepr): style = "native" @@ -781,6 +792,7 @@ class ReprEntryNative(TerminalRepr): def toterminal(self, tw): tw.write("".join(self.lines)) + class ReprEntry(TerminalRepr): localssep = "_ " @@ -797,7 +809,7 @@ class ReprEntry(TerminalRepr): for line in self.lines: red = line.startswith("E ") tw.line(line, bold=True, red=red) - #tw.line("") + # tw.line("") return if self.reprfuncargs: self.reprfuncargs.toterminal(tw) @@ -805,7 +817,7 @@ class ReprEntry(TerminalRepr): red = line.startswith("E ") tw.line(line, bold=True, red=red) if self.reprlocals: - #tw.sep(self.localssep, "Locals") + # tw.sep(self.localssep, "Locals") tw.line("") self.reprlocals.toterminal(tw) if self.reprfileloc: @@ -818,6 +830,7 @@ class ReprEntry(TerminalRepr): self.reprlocals, self.reprfileloc) + class ReprFileLocation(TerminalRepr): def __init__(self, path, lineno, message): self.path = str(path) @@ -834,6 +847,7 @@ class ReprFileLocation(TerminalRepr): tw.write(self.path, bold=True, red=True) tw.line(":%s: %s" % (self.lineno, msg)) + class ReprLocals(TerminalRepr): def __init__(self, lines): self.lines = lines @@ -842,6 +856,7 @@ class ReprLocals(TerminalRepr): for line in self.lines: tw.line(line) + class ReprFuncArgs(TerminalRepr): def __init__(self, args): self.args = args @@ -850,11 +865,11 @@ class ReprFuncArgs(TerminalRepr): if self.args: linesofar = "" for name, value in self.args: - ns = "%s = %s" %(name, value) + ns = "%s = %s" % (safe_str(name), safe_str(value)) if len(ns) + len(linesofar) + 2 > tw.fullwidth: if linesofar: tw.line(linesofar) - linesofar = ns + linesofar = ns else: if linesofar: linesofar += ", " + ns @@ -875,7 +890,7 @@ def getrawcode(obj, trycall=True): obj = getattr(obj, 'f_code', obj) obj = getattr(obj, '__code__', obj) if trycall and not hasattr(obj, 'co_firstlineno'): - if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj): + if hasattr(obj, '__call__') and not inspect.isclass(obj): x = getrawcode(obj.__call__, trycall=False) if hasattr(x, 'co_firstlineno'): return x diff --git a/_pytest/_code/source.py b/_pytest/_code/source.py index 8e6148410..cb5e13f05 100644 --- a/_pytest/_code/source.py +++ b/_pytest/_code/source.py @@ -1,17 +1,16 @@ from __future__ import absolute_import, division, generators, print_function +import ast +from ast import PyCF_ONLY_AST as _AST_FLAG from bisect import bisect_right +import linecache import sys -import inspect, tokenize +import six +import inspect +import tokenize import py -cpy_compile = compile -try: - import _ast - from _ast import PyCF_ONLY_AST as _AST_FLAG -except ImportError: - _AST_FLAG = 0 - _ast = None +cpy_compile = compile class Source(object): @@ -19,6 +18,7 @@ class Source(object): possibly deindenting it. """ _compilecounter = 0 + def __init__(self, *parts, **kwargs): self.lines = lines = [] de = kwargs.get('deindent', True) @@ -26,11 +26,11 @@ class Source(object): for part in parts: if not part: partlines = [] - if isinstance(part, Source): + elif isinstance(part, Source): partlines = part.lines elif isinstance(part, (tuple, list)): partlines = [x.rstrip("\n") for x in part] - elif isinstance(part, py.builtin._basestring): + elif isinstance(part, six.string_types): partlines = part.split('\n') if rstrip: while partlines: @@ -73,7 +73,7 @@ class Source(object): start, end = 0, len(self) while start < end and not self.lines[start].strip(): start += 1 - while end > start and not self.lines[end-1].strip(): + while end > start and not self.lines[end - 1].strip(): end -= 1 source = Source() source.lines[:] = self.lines[start:end] @@ -86,8 +86,8 @@ class Source(object): before = Source(before) after = Source(after) newsource = Source() - lines = [ (indent + line) for line in self.lines] - newsource.lines = before.lines + lines + after.lines + lines = [(indent + line) for line in self.lines] + newsource.lines = before.lines + lines + after.lines return newsource def indent(self, indent=' ' * 4): @@ -95,17 +95,17 @@ class Source(object): all lines indented by the given indent-string. """ newsource = Source() - newsource.lines = [(indent+line) for line in self.lines] + newsource.lines = [(indent + line) for line in self.lines] return newsource - def getstatement(self, lineno, assertion=False): + def getstatement(self, lineno): """ return Source statement which contains the given linenumber (counted from 0). """ - start, end = self.getstatementrange(lineno, assertion) + start, end = self.getstatementrange(lineno) return self[start:end] - def getstatementrange(self, lineno, assertion=False): + def getstatementrange(self, lineno): """ return (start, end) tuple which spans the minimal statement region which containing the given lineno. """ @@ -131,20 +131,15 @@ class Source(object): """ return True if source is parseable, heuristically deindenting it by default. """ - try: - import parser - except ImportError: - syntax_checker = lambda x: compile(x, 'asd', 'exec') - else: - syntax_checker = parser.suite + from parser import suite as syntax_checker if deindent: source = str(self.deindent()) else: source = str(self) try: - #compile(source+'\n', "x", "exec") - syntax_checker(source+'\n') + # compile(source+'\n', "x", "exec") + syntax_checker(source + '\n') except KeyboardInterrupt: raise except Exception: @@ -164,8 +159,8 @@ class Source(object): """ if not filename or py.path.local(filename).check(file=0): if _genframe is None: - _genframe = sys._getframe(1) # the caller - fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno + _genframe = sys._getframe(1) # the caller + fn, lineno = _genframe.f_code.co_filename, _genframe.f_lineno base = "<%d-codegen " % self._compilecounter self.__class__._compilecounter += 1 if not filename: @@ -180,7 +175,7 @@ class Source(object): # re-represent syntax errors from parsing python strings msglines = self.lines[:ex.lineno] if ex.offset: - msglines.append(" "*ex.offset + '^') + msglines.append(" " * ex.offset + '^') msglines.append("(code was compiled probably from here: %s)" % filename) newex = SyntaxError('\n'.join(msglines)) newex.offset = ex.offset @@ -191,24 +186,24 @@ class Source(object): if flag & _AST_FLAG: return co lines = [(x + "\n") for x in self.lines] - py.std.linecache.cache[filename] = (1, None, lines, filename) + linecache.cache[filename] = (1, None, lines, filename) return co # # public API shortcut functions # -def compile_(source, filename=None, mode='exec', flags= - generators.compiler_flag, dont_inherit=0): + +def compile_(source, filename=None, mode='exec', flags=generators.compiler_flag, dont_inherit=0): """ compile the given source to a raw code object, and maintain an internal cache which allows later retrieval of the source code for the code object and any recursively created code objects. """ - if _ast is not None and isinstance(source, _ast.AST): + if isinstance(source, ast.AST): # XXX should Source support having AST? return cpy_compile(source, filename, mode, flags, dont_inherit) - _genframe = sys._getframe(1) # the caller + _genframe = sys._getframe(1) # the caller s = Source(source) co = s.compile(filename, mode, flags, _genframe=_genframe) return co @@ -218,13 +213,12 @@ def getfslineno(obj): """ Return source location (path, lineno) for the given object. If the source cannot be determined return ("", -1) """ - import _pytest._code + from .code import Code try: - code = _pytest._code.Code(obj) + code = Code(obj) except TypeError: try: - fn = (py.std.inspect.getsourcefile(obj) or - py.std.inspect.getfile(obj)) + fn = inspect.getsourcefile(obj) or inspect.getfile(obj) except TypeError: return "", -1 @@ -245,12 +239,13 @@ def getfslineno(obj): # helper functions # + def findsource(obj): try: - sourcelines, lineno = py.std.inspect.findsource(obj) + sourcelines, lineno = inspect.findsource(obj) except py.builtin._sysex: raise - except: + except: # noqa return None, -1 source = Source() source.lines = [line.rstrip() for line in sourcelines] @@ -258,8 +253,8 @@ def findsource(obj): def getsource(obj, **kwargs): - import _pytest._code - obj = _pytest._code.getrawcode(obj) + from .code import getrawcode + obj = getrawcode(obj) try: strsrc = inspect.getsource(obj) except IndentationError: @@ -274,7 +269,7 @@ def deindent(lines, offset=None): line = line.expandtabs() s = line.lstrip() if s: - offset = len(line)-len(s) + offset = len(line) - len(s) break else: offset = 0 @@ -285,19 +280,17 @@ def deindent(lines, offset=None): def readline_generator(lines): for line in lines: yield line + '\n' - while True: - yield '' it = readline_generator(lines) try: for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)): if sline > len(lines): - break # End of input reached + break # End of input reached if sline > len(newlines): line = lines[sline - 1].expandtabs() if line.lstrip() and line[:offset].isspace(): - line = line[offset:] # Deindent + line = line[offset:] # Deindent newlines.append(line) for i in range(sline, eline): @@ -315,35 +308,30 @@ def get_statement_startend2(lineno, node): import ast # flatten all statements and except handlers into one lineno-list # AST's line numbers start indexing at 1 - l = [] + values = [] for x in ast.walk(node): - if isinstance(x, _ast.stmt) or isinstance(x, _ast.ExceptHandler): - l.append(x.lineno - 1) - for name in "finalbody", "orelse": + if isinstance(x, (ast.stmt, ast.ExceptHandler)): + values.append(x.lineno - 1) + for name in ("finalbody", "orelse"): val = getattr(x, name, None) if val: # treat the finally/orelse part as its own statement - l.append(val[0].lineno - 1 - 1) - l.sort() - insert_index = bisect_right(l, lineno) - start = l[insert_index - 1] - if insert_index >= len(l): + values.append(val[0].lineno - 1 - 1) + values.sort() + insert_index = bisect_right(values, lineno) + start = values[insert_index - 1] + if insert_index >= len(values): end = None else: - end = l[insert_index] + end = values[insert_index] return start, end def getstatementrange_ast(lineno, source, assertion=False, astnode=None): if astnode is None: content = str(source) - if sys.version_info < (2,7): - content += "\n" - try: - astnode = compile(content, "source", "exec", 1024) # 1024 for AST - except ValueError: - start, end = getstatementrange_old(lineno, source, assertion) - return None, start, end + astnode = compile(content, "source", "exec", 1024) # 1024 for AST + start, end = get_statement_startend2(lineno, astnode) # we need to correct the end: # - ast-parsing strips comments @@ -375,40 +363,3 @@ def getstatementrange_ast(lineno, source, assertion=False, astnode=None): else: break return astnode, start, end - - -def getstatementrange_old(lineno, source, assertion=False): - """ return (start, end) tuple which spans the minimal - statement region which containing the given lineno. - raise an IndexError if no such statementrange can be found. - """ - # XXX this logic is only used on python2.4 and below - # 1. find the start of the statement - from codeop import compile_command - for start in range(lineno, -1, -1): - if assertion: - line = source.lines[start] - # the following lines are not fully tested, change with care - if 'super' in line and 'self' in line and '__init__' in line: - raise IndexError("likely a subclass") - if "assert" not in line and "raise" not in line: - continue - trylines = source.lines[start:lineno+1] - # quick hack to prepare parsing an indented line with - # compile_command() (which errors on "return" outside defs) - trylines.insert(0, 'def xxx():') - trysource = '\n '.join(trylines) - # ^ space here - try: - compile_command(trysource) - except (SyntaxError, OverflowError, ValueError): - continue - - # 2. find the end of the statement - for end in range(lineno+1, len(source)+1): - trysource = source[start:end] - if trysource.isparseable(): - return start, end - raise SyntaxError("no valid source range around line %d " % (lineno,)) - - diff --git a/_pytest/_pluggy.py b/_pytest/_pluggy.py deleted file mode 100644 index 6cc1d3d54..000000000 --- a/_pytest/_pluggy.py +++ /dev/null @@ -1,11 +0,0 @@ -""" -imports symbols from vendored "pluggy" if available, otherwise -falls back to importing "pluggy" from the default namespace. -""" -from __future__ import absolute_import, division, print_function -try: - from _pytest.vendored_packages.pluggy import * # noqa - from _pytest.vendored_packages.pluggy import __version__ # noqa -except ImportError: - from pluggy import * # noqa - from pluggy import __version__ # noqa diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py index acb034d86..39c57c5f3 100644 --- a/_pytest/assertion/__init__.py +++ b/_pytest/assertion/__init__.py @@ -2,8 +2,8 @@ support for presenting detailed information in failing assertions. """ from __future__ import absolute_import, division, print_function -import py import sys +import six from _pytest.assertion import util from _pytest.assertion import rewrite @@ -25,7 +25,6 @@ def pytest_addoption(parser): expression information.""") - def register_assert_rewrite(*names): """Register one or more module names to be rewritten on import. @@ -57,7 +56,7 @@ class DummyRewriteHook(object): pass -class AssertionState: +class AssertionState(object): """State for the assertion plugin.""" def __init__(self, config, mode): @@ -68,10 +67,8 @@ class AssertionState: def install_importhook(config): """Try to install the rewrite hook, raise SystemError if it fails.""" - # Both Jython and CPython 2.6.0 have AST bugs that make the - # assertion rewriting hook malfunction. - if (sys.platform.startswith('java') or - sys.version_info[:3] == (2, 6, 0)): + # Jython has an AST bug that make the assertion rewriting hook malfunction. + if (sys.platform.startswith('java')): raise SystemError('rewrite not supported') config._assertstate = AssertionState(config, 'rewrite') @@ -127,7 +124,7 @@ def pytest_runtest_setup(item): if new_expl: new_expl = truncate.truncate_if_required(new_expl, item) new_expl = [line.replace("\n", "\\n") for line in new_expl] - res = py.builtin._totext("\n~").join(new_expl) + res = six.text_type("\n~").join(new_expl) if item.config.getvalue("assertmode") == "rewrite": res = res.replace("%", "%%") return res diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py index 6ec54d7e7..0499a792f 100644 --- a/_pytest/assertion/rewrite.py +++ b/_pytest/assertion/rewrite.py @@ -1,18 +1,20 @@ """Rewrite assertion AST to produce nice error messages""" from __future__ import absolute_import, division, print_function import ast -import _ast import errno import itertools import imp import marshal import os import re +import six import struct import sys import types +import atomicwrites import py + from _pytest.assertion import util @@ -33,13 +35,13 @@ else: PYC_EXT = ".py" + (__debug__ and "c" or "o") PYC_TAIL = "." + PYTEST_TAG + PYC_EXT -REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2) ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 -if sys.version_info >= (3,5): +if sys.version_info >= (3, 5): ast_Call = ast.Call else: - ast_Call = lambda a,b,c: ast.Call(a, b, c, None, None) + def ast_Call(a, b, c): + return ast.Call(a, b, c, None, None) class AssertionRewritingHook(object): @@ -140,7 +142,7 @@ class AssertionRewritingHook(object): # Probably a SyntaxError in the test. return None if write: - _make_rewritten_pyc(state, source_stat, pyc, co) + _write_pyc(state, co, source_stat, pyc) else: state.trace("found cached rewritten pyc for %r" % (fn,)) self.modules[name] = co, pyc @@ -167,29 +169,31 @@ class AssertionRewritingHook(object): return True for marked in self._must_rewrite: - if name.startswith(marked): + if name == marked or name.startswith(marked + '.'): state.trace("matched marked file %r (from %r)" % (name, marked)) return True return False def mark_rewrite(self, *names): - """Mark import names as needing to be re-written. + """Mark import names as needing to be rewritten. The named module or package as well as any nested modules will - be re-written on import. + be rewritten on import. """ - already_imported = set(names).intersection(set(sys.modules)) - if already_imported: - for name in already_imported: - if name not in self._rewritten_names: - self._warn_already_imported(name) + already_imported = (set(names) + .intersection(sys.modules) + .difference(self._rewritten_names)) + for name in already_imported: + if not AssertionRewriter.is_rewrite_disabled( + sys.modules[name].__doc__ or ""): + self._warn_already_imported(name) self._must_rewrite.update(names) def _warn_already_imported(self, name): self.config.warn( 'P1', - 'Module already imported so can not be re-written: %s' % name) + 'Module already imported so cannot be rewritten: %s' % name) def load_module(self, name): # If there is an existing module object named 'fullname' in @@ -209,14 +213,12 @@ class AssertionRewritingHook(object): mod.__cached__ = pyc mod.__loader__ = self py.builtin.exec_(co, mod.__dict__) - except: + except: # noqa if name in sys.modules: del sys.modules[name] raise return sys.modules[name] - - def is_package(self, name): try: fd, fn, desc = imp.find_module(name) @@ -258,22 +260,21 @@ def _write_pyc(state, co, source_stat, pyc): # sometime to be able to use imp.load_compiled to load them. (See # the comment in load_module above.) try: - fp = open(pyc, "wb") - except IOError: - err = sys.exc_info()[1].errno - state.trace("error writing pyc file at %s: errno=%s" %(pyc, err)) + with atomicwrites.atomic_write(pyc, mode="wb", overwrite=True) as fp: + fp.write(imp.get_magic()) + mtime = int(source_stat.mtime) + size = source_stat.size & 0xFFFFFFFF + fp.write(struct.pack(" 0 or item.module != "__future__"): lineno = item.lineno break pos += 1 + else: + lineno = item.lineno imports = [ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases] mod.body[pos:pos] = imports @@ -631,6 +624,10 @@ class AssertionRewriter(ast.NodeVisitor): not isinstance(field, ast.expr)): nodes.append(field) + @staticmethod + def is_rewrite_disabled(docstring): + return "PYTEST_DONT_REWRITE" in docstring + def variable(self): """Get a new variable.""" # Use a character invalid in python identifiers to avoid clashing. @@ -714,7 +711,7 @@ class AssertionRewriter(ast.NodeVisitor): def visit_Assert(self, assert_): """Return the AST statements to replace the ast.Assert instance. - This re-writes the test of an assertion to provide + This rewrites the test of an assertion to provide intermediate values and replace it with an if statement which raises an assertion error with a detailed explanation in case the expression is false. @@ -723,7 +720,7 @@ class AssertionRewriter(ast.NodeVisitor): if isinstance(assert_.test, ast.Tuple) and self.config is not None: fslocation = (self.module_path, assert_.lineno) self.config.warn('R1', 'assertion is always true, perhaps ' - 'remove parentheses?', fslocation=fslocation) + 'remove parentheses?', fslocation=fslocation) self.statements = [] self.variables = [] self.variable_counter = itertools.count() @@ -787,7 +784,7 @@ class AssertionRewriter(ast.NodeVisitor): if i: fail_inner = [] # cond is set in a prior loop iteration below - self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa + self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa self.on_failure = fail_inner self.push_format_context() res, expl = self.visit(v) @@ -839,7 +836,7 @@ class AssertionRewriter(ast.NodeVisitor): new_kwargs.append(ast.keyword(keyword.arg, res)) if keyword.arg: arg_expls.append(keyword.arg + "=" + expl) - else: ## **args have `arg` keywords with an .arg of None + else: # **args have `arg` keywords with an .arg of None arg_expls.append("**" + expl) expl = "%s(%s)" % (func_expl, ', '.join(arg_expls)) @@ -893,7 +890,6 @@ class AssertionRewriter(ast.NodeVisitor): else: visit_Call = visit_Call_legacy - def visit_Attribute(self, attr): if not isinstance(attr.ctx, ast.Load): return self.generic_visit(attr) @@ -907,7 +903,7 @@ class AssertionRewriter(ast.NodeVisitor): def visit_Compare(self, comp): self.push_format_context() left_res, left_expl = self.visit(comp.left) - if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)): + if isinstance(comp.left, (ast.Compare, ast.BoolOp)): left_expl = "({0})".format(left_expl) res_variables = [self.variable() for i in range(len(comp.ops))] load_names = [ast.Name(v, ast.Load()) for v in res_variables] @@ -918,7 +914,7 @@ class AssertionRewriter(ast.NodeVisitor): results = [left_res] for i, op, next_operand in it: next_res, next_expl = self.visit(next_operand) - if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)): + if isinstance(next_operand, (ast.Compare, ast.BoolOp)): next_expl = "({0})".format(next_expl) results.append(next_res) sym = binop_map[op.__class__] diff --git a/_pytest/assertion/truncate.py b/_pytest/assertion/truncate.py index 1e1306356..2ed12e2e5 100644 --- a/_pytest/assertion/truncate.py +++ b/_pytest/assertion/truncate.py @@ -7,7 +7,7 @@ Current default behaviour is to truncate assertion explanations at from __future__ import absolute_import, division, print_function import os -import py +import six DEFAULT_MAX_LINES = 8 @@ -74,8 +74,8 @@ def _truncate_explanation(input_lines, max_lines=None, max_chars=None): msg += ' ({0} lines hidden)'.format(truncated_line_count) msg += ", {0}" .format(USAGE_MSG) truncated_explanation.extend([ - py.builtin._totext(""), - py.builtin._totext(msg), + six.text_type(""), + six.text_type(msg), ]) return truncated_explanation diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py index 06eda8d91..06d60a6fc 100644 --- a/_pytest/assertion/util.py +++ b/_pytest/assertion/util.py @@ -4,13 +4,10 @@ import pprint import _pytest._code import py -try: - from collections import Sequence -except ImportError: - Sequence = list +import six +from ..compat import Sequence - -u = py.builtin._totext +u = six.text_type # The _reprcompare attribute on the util module is used by the new assertion # interpretation code and assertion rewriter to detect this plugin was @@ -53,11 +50,11 @@ def _split_explanation(explanation): """ raw_lines = (explanation or u('')).split('\n') lines = [raw_lines[0]] - for l in raw_lines[1:]: - if l and l[0] in ['{', '}', '~', '>']: - lines.append(l) + for values in raw_lines[1:]: + if values and values[0] in ['{', '}', '~', '>']: + lines.append(values) else: - lines[-1] += '\\n' + l + lines[-1] += '\\n' + values return lines @@ -82,7 +79,7 @@ def _format_lines(lines): stack.append(len(result)) stackcnt[-1] += 1 stackcnt.append(0) - result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:]) + result.append(u(' +') + u(' ') * (len(stack) - 1) + s + line[1:]) elif line.startswith('}'): stack.pop() stackcnt.pop() @@ -91,7 +88,7 @@ def _format_lines(lines): assert line[0] in ['~', '>'] stack[-1] += 1 indent = len(stack) if line.startswith('~') else len(stack) - 1 - result.append(u(' ')*indent + line[1:]) + result.append(u(' ') * indent + line[1:]) assert len(stack) == 1 return result @@ -106,16 +103,22 @@ except NameError: def assertrepr_compare(config, op, left, right): """Return specialised explanations for some operators/operands""" width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op - left_repr = py.io.saferepr(left, maxsize=int(width//2)) - right_repr = py.io.saferepr(right, maxsize=width-len(left_repr)) + left_repr = py.io.saferepr(left, maxsize=int(width // 2)) + right_repr = py.io.saferepr(right, maxsize=width - len(left_repr)) summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr)) - issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) and - not isinstance(x, basestring)) - istext = lambda x: isinstance(x, basestring) - isdict = lambda x: isinstance(x, dict) - isset = lambda x: isinstance(x, (set, frozenset)) + def issequence(x): + return isinstance(x, Sequence) and not isinstance(x, basestring) + + def istext(x): + return isinstance(x, basestring) + + def isdict(x): + return isinstance(x, dict) + + def isset(x): + return isinstance(x, (set, frozenset)) def isiterable(obj): try: @@ -168,9 +171,9 @@ def _diff_text(left, right, verbose=False): """ from difflib import ndiff explanation = [] - if isinstance(left, py.builtin.bytes): + if isinstance(left, six.binary_type): left = u(repr(left)[1:-1]).replace(r'\n', '\n') - if isinstance(right, py.builtin.bytes): + if isinstance(right, six.binary_type): right = u(repr(right)[1:-1]).replace(r'\n', '\n') if not verbose: i = 0 # just in case left or right has zero length @@ -285,7 +288,7 @@ def _compare_eq_dict(left, right, verbose=False): def _notin_text(term, text, verbose=False): index = text.find(term) head = text[:index] - tail = text[index+len(term):] + tail = text[index + len(term):] correct_text = head + tail diff = _diff_text(correct_text, text, verbose) newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)] diff --git a/_pytest/cacheprovider.py b/_pytest/cacheprovider.py index 7fc08fff3..717c061d4 100755 --- a/_pytest/cacheprovider.py +++ b/_pytest/cacheprovider.py @@ -5,23 +5,39 @@ the name cache was not chosen to ensure pluggy automatically ignores the external pytest-cache """ from __future__ import absolute_import, division, print_function + +from collections import OrderedDict + import py +import six + import pytest import json +import os from os.path import sep as _sep, altsep as _altsep class Cache(object): def __init__(self, config): self.config = config - self._cachedir = config.rootdir.join(".cache") + self._cachedir = Cache.cache_dir_from_config(config) self.trace = config.trace.root.get("cache") - if config.getvalue("cacheclear"): + if config.getoption("cacheclear"): self.trace("clearing cachedir") if self._cachedir.check(): self._cachedir.remove() self._cachedir.mkdir() + @staticmethod + def cache_dir_from_config(config): + cache_dir = config.getini("cache_dir") + cache_dir = os.path.expanduser(cache_dir) + cache_dir = os.path.expandvars(cache_dir) + if os.path.isabs(cache_dir): + return py.path.local(cache_dir) + else: + return config.rootdir.join(cache_dir) + def makedir(self, name): """ return a directory path object with the given name. If the directory does not yet exist, it will be created. You can use it @@ -87,33 +103,35 @@ class Cache(object): json.dump(value, f, indent=2, sort_keys=True) -class LFPlugin: +class LFPlugin(object): """ Plugin which implements the --lf (run last-failing) option """ + def __init__(self, config): self.config = config active_keys = 'lf', 'failedfirst' - self.active = any(config.getvalue(key) for key in active_keys) - if self.active: - self.lastfailed = config.cache.get("cache/lastfailed", {}) - else: - self.lastfailed = {} + self.active = any(config.getoption(key) for key in active_keys) + self.lastfailed = config.cache.get("cache/lastfailed", {}) + self._previously_failed_count = None + self._no_failures_behavior = self.config.getoption('last_failed_no_failures') - def pytest_report_header(self): + def pytest_report_collectionfinish(self): if self.active: - if not self.lastfailed: - mode = "run all (no recorded failures)" + if not self._previously_failed_count: + mode = "run {} (no recorded failures)".format(self._no_failures_behavior) else: - mode = "rerun last %d failures%s" % ( - len(self.lastfailed), - " first" if self.config.getvalue("failedfirst") else "") + noun = 'failure' if self._previously_failed_count == 1 else 'failures' + suffix = " first" if self.config.getoption( + "failedfirst") else "" + mode = "rerun previous {count} {noun}{suffix}".format( + count=self._previously_failed_count, suffix=suffix, noun=noun + ) return "run-last-failure: %s" % mode def pytest_runtest_logreport(self, report): - if report.failed and "xfail" not in report.keywords: + if (report.when == 'call' and report.passed) or report.skipped: + self.lastfailed.pop(report.nodeid, None) + elif report.failed: self.lastfailed[report.nodeid] = True - elif not report.failed: - if report.when == "call": - self.lastfailed.pop(report.nodeid, None) def pytest_collectreport(self, report): passed = report.outcome in ('passed', 'skipped') @@ -127,33 +145,72 @@ class LFPlugin: self.lastfailed[report.nodeid] = True def pytest_collection_modifyitems(self, session, config, items): - if self.active and self.lastfailed: - previously_failed = [] - previously_passed = [] - for item in items: - if item.nodeid in self.lastfailed: - previously_failed.append(item) + if self.active: + if self.lastfailed: + previously_failed = [] + previously_passed = [] + for item in items: + if item.nodeid in self.lastfailed: + previously_failed.append(item) + else: + previously_passed.append(item) + self._previously_failed_count = len(previously_failed) + if not previously_failed: + # running a subset of all tests with recorded failures outside + # of the set of tests currently executing + return + if self.config.getoption("lf"): + items[:] = previously_failed + config.hook.pytest_deselected(items=previously_passed) else: - previously_passed.append(item) - if not previously_failed and previously_passed: - # running a subset of all tests with recorded failures outside - # of the set of tests currently executing - pass - elif self.config.getvalue("lf"): - items[:] = previously_failed - config.hook.pytest_deselected(items=previously_passed) - else: - items[:] = previously_failed + previously_passed + items[:] = previously_failed + previously_passed + elif self._no_failures_behavior == 'none': + config.hook.pytest_deselected(items=items) + items[:] = [] def pytest_sessionfinish(self, session): config = self.config - if config.getvalue("cacheshow") or hasattr(config, "slaveinput"): + if config.getoption("cacheshow") or hasattr(config, "slaveinput"): return - prev_failed = config.cache.get("cache/lastfailed", None) is not None - if (session.testscollected and prev_failed) or self.lastfailed: + + saved_lastfailed = config.cache.get("cache/lastfailed", {}) + if saved_lastfailed != self.lastfailed: config.cache.set("cache/lastfailed", self.lastfailed) +class NFPlugin(object): + """ Plugin which implements the --nf (run new-first) option """ + + def __init__(self, config): + self.config = config + self.active = config.option.newfirst + self.cached_nodeids = config.cache.get("cache/nodeids", []) + + def pytest_collection_modifyitems(self, session, config, items): + if self.active: + new_items = OrderedDict() + other_items = OrderedDict() + for item in items: + if item.nodeid not in self.cached_nodeids: + new_items[item.nodeid] = item + else: + other_items[item.nodeid] = item + + items[:] = self._get_increasing_order(six.itervalues(new_items)) + \ + self._get_increasing_order(six.itervalues(other_items)) + self.cached_nodeids = [x.nodeid for x in items if isinstance(x, pytest.Item)] + + def _get_increasing_order(self, items): + return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True) + + def pytest_sessionfinish(self, session): + config = self.config + if config.getoption("cacheshow") or hasattr(config, "slaveinput"): + return + + config.cache.set("cache/nodeids", self.cached_nodeids) + + def pytest_addoption(parser): group = parser.getgroup("general") group.addoption( @@ -165,12 +222,25 @@ def pytest_addoption(parser): help="run all tests but run the last failures first. " "This may re-order tests and thus lead to " "repeated fixture setup/teardown") + group.addoption( + '--nf', '--new-first', action='store_true', dest="newfirst", + help="run tests from new files first, then the rest of the tests " + "sorted by file mtime") group.addoption( '--cache-show', action='store_true', dest="cacheshow", help="show cache contents, don't perform collection or tests") group.addoption( '--cache-clear', action='store_true', dest="cacheclear", help="remove all cache contents at start of test run.") + parser.addini( + "cache_dir", default='.pytest_cache', + help="cache directory path.") + group.addoption( + '--lfnf', '--last-failed-no-failures', action='store', + dest='last_failed_no_failures', choices=('all', 'none'), default='all', + help='change the behavior when no test failed in the last run or no ' + 'information about the last failures was found in the cache' + ) def pytest_cmdline_main(config): @@ -179,11 +249,11 @@ def pytest_cmdline_main(config): return wrap_session(config, cacheshow) - @pytest.hookimpl(tryfirst=True) def pytest_configure(config): config.cache = Cache(config) config.pluginmanager.register(LFPlugin(config), "lfplugin") + config.pluginmanager.register(NFPlugin(config), "nfplugin") @pytest.fixture @@ -224,7 +294,7 @@ def cacheshow(config, session): val = config.cache.get(key, dummy) if val is dummy: tw.line("%s contains unreadable content, " - "will be ignored" % key) + "will be ignored" % key) else: tw.line("%s contains:" % key) stream = py.io.TextIO() @@ -236,7 +306,7 @@ def cacheshow(config, session): if ddir.isdir() and ddir.listdir(): tw.sep("-", "cache directories") for p in sorted(basedir.join("d").visit()): - #if p.check(dir=1): + # if p.check(dir=1): # print("%s/" % p.relto(basedir)) if p.isfile(): key = p.relto(basedir) diff --git a/_pytest/capture.py b/_pytest/capture.py index 3661f2691..9f4f41c41 100644 --- a/_pytest/capture.py +++ b/_pytest/capture.py @@ -4,6 +4,7 @@ per-test stdout/stderr capturing mechanism. """ from __future__ import absolute_import, division, print_function +import collections import contextlib import sys import os @@ -11,11 +12,10 @@ import io from io import UnsupportedOperation from tempfile import TemporaryFile -import py +import six import pytest from _pytest.compat import CaptureIO -unicode = py.builtin.text patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} @@ -36,14 +36,15 @@ def pytest_addoption(parser): def pytest_load_initial_conftests(early_config, parser, args): ns = early_config.known_args_namespace if ns.capture == "fd": - _py36_windowsconsoleio_workaround() + _py36_windowsconsoleio_workaround(sys.stdout) + _colorama_workaround() _readline_workaround() pluginmanager = early_config.pluginmanager capman = CaptureManager(ns.capture) pluginmanager.register(capman, "capturemanager") # make sure that capturemanager is properly reset at final shutdown - early_config.add_cleanup(capman.reset_capturings) + early_config.add_cleanup(capman.stop_global_capturing) # make sure logging does not raise exceptions at the end def silence_logging_at_shutdown(): @@ -52,17 +53,30 @@ def pytest_load_initial_conftests(early_config, parser, args): early_config.add_cleanup(silence_logging_at_shutdown) # finally trigger conftest loading but while capturing (issue93) - capman.init_capturings() + capman.start_global_capturing() outcome = yield - out, err = capman.suspendcapture() + out, err = capman.suspend_global_capture() if outcome.excinfo is not None: sys.stdout.write(out) sys.stderr.write(err) -class CaptureManager: +class CaptureManager(object): + """ + Capture plugin, manages that the appropriate capture method is enabled/disabled during collection and each + test phase (setup, call, teardown). After each of those points, the captured output is obtained and + attached to the collection/runtest report. + + There are two levels of capture: + * global: which is enabled by default and can be suppressed by the ``-s`` option. This is always enabled/disabled + during collection and each test phase. + * fixture: when a test function or one of its fixture depend on the ``capsys`` or ``capfd`` fixtures. In this + case special handling is needed to ensure the fixtures take precedence over the global capture. + """ + def __init__(self, method): self._method = method + self._global_capturing = None def _getcapture(self, method): if method == "fd": @@ -74,23 +88,24 @@ class CaptureManager: else: raise ValueError("unknown capturing method: %r" % method) - def init_capturings(self): - assert not hasattr(self, "_capturing") - self._capturing = self._getcapture(self._method) - self._capturing.start_capturing() + def start_global_capturing(self): + assert self._global_capturing is None + self._global_capturing = self._getcapture(self._method) + self._global_capturing.start_capturing() - def reset_capturings(self): - cap = self.__dict__.pop("_capturing", None) - if cap is not None: - cap.pop_outerr_to_orig() - cap.stop_capturing() + def stop_global_capturing(self): + if self._global_capturing is not None: + self._global_capturing.pop_outerr_to_orig() + self._global_capturing.stop_capturing() + self._global_capturing = None - def resumecapture(self): - self._capturing.resume_capturing() + def resume_global_capture(self): + self._global_capturing.resume_capturing() - def suspendcapture(self, in_=False): - self.deactivate_funcargs() - cap = getattr(self, "_capturing", None) + def suspend_global_capture(self, item=None, in_=False): + if item is not None: + self.deactivate_fixture(item) + cap = getattr(self, "_global_capturing", None) if cap is not None: try: outerr = cap.readouterr() @@ -98,23 +113,26 @@ class CaptureManager: cap.suspend_capturing(in_=in_) return outerr - def activate_funcargs(self, pyfuncitem): - capfuncarg = pyfuncitem.__dict__.pop("_capfuncarg", None) - if capfuncarg is not None: - capfuncarg._start() - self._capfuncarg = capfuncarg + def activate_fixture(self, item): + """If the current item is using ``capsys`` or ``capfd``, activate them so they take precedence over + the global capture. + """ + fixture = getattr(item, "_capture_fixture", None) + if fixture is not None: + fixture._start() - def deactivate_funcargs(self): - capfuncarg = self.__dict__.pop("_capfuncarg", None) - if capfuncarg is not None: - capfuncarg.close() + def deactivate_fixture(self, item): + """Deactivates the ``capsys`` or ``capfd`` fixture of this item, if any.""" + fixture = getattr(item, "_capture_fixture", None) + if fixture is not None: + fixture.close() @pytest.hookimpl(hookwrapper=True) def pytest_make_collect_report(self, collector): if isinstance(collector, pytest.File): - self.resumecapture() + self.resume_global_capture() outcome = yield - out, err = self.suspendcapture() + out, err = self.suspend_global_capture() rep = outcome.get_result() if out: rep.sections.append(("Captured stdout", out)) @@ -125,67 +143,139 @@ class CaptureManager: @pytest.hookimpl(hookwrapper=True) def pytest_runtest_setup(self, item): - self.resumecapture() + self.resume_global_capture() + # no need to activate a capture fixture because they activate themselves during creation; this + # only makes sense when a fixture uses a capture fixture, otherwise the capture fixture will + # be activated during pytest_runtest_call yield - self.suspendcapture_item(item, "setup") + self.suspend_capture_item(item, "setup") @pytest.hookimpl(hookwrapper=True) def pytest_runtest_call(self, item): - self.resumecapture() - self.activate_funcargs(item) + self.resume_global_capture() + # it is important to activate this fixture during the call phase so it overwrites the "global" + # capture + self.activate_fixture(item) yield - #self.deactivate_funcargs() called from suspendcapture() - self.suspendcapture_item(item, "call") + self.suspend_capture_item(item, "call") @pytest.hookimpl(hookwrapper=True) def pytest_runtest_teardown(self, item): - self.resumecapture() + self.resume_global_capture() + self.activate_fixture(item) yield - self.suspendcapture_item(item, "teardown") + self.suspend_capture_item(item, "teardown") @pytest.hookimpl(tryfirst=True) def pytest_keyboard_interrupt(self, excinfo): - self.reset_capturings() + self.stop_global_capturing() @pytest.hookimpl(tryfirst=True) def pytest_internalerror(self, excinfo): - self.reset_capturings() + self.stop_global_capturing() - def suspendcapture_item(self, item, when, in_=False): - out, err = self.suspendcapture(in_=in_) + def suspend_capture_item(self, item, when, in_=False): + out, err = self.suspend_global_capture(item, in_=in_) item.add_report_section(when, "stdout", out) item.add_report_section(when, "stderr", err) -error_capsysfderror = "cannot use capsys and capfd at the same time" +capture_fixtures = {'capfd', 'capfdbinary', 'capsys', 'capsysbinary'} + + +def _ensure_only_one_capture_fixture(request, name): + fixtures = set(request.fixturenames) & capture_fixtures - set((name,)) + if fixtures: + fixtures = sorted(fixtures) + fixtures = fixtures[0] if len(fixtures) == 1 else fixtures + raise request.raiseerror( + "cannot use {0} and {1} at the same time".format( + fixtures, name, + ), + ) @pytest.fixture def capsys(request): - """Enable capturing of writes to sys.stdout/sys.stderr and make + """Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make captured output available via ``capsys.readouterr()`` method calls - which return a ``(out, err)`` tuple. + which return a ``(out, err)`` namedtuple. ``out`` and ``err`` will be ``text`` + objects. """ - if "capfd" in request.fixturenames: - raise request.raiseerror(error_capsysfderror) - request.node._capfuncarg = c = CaptureFixture(SysCapture, request) - return c + _ensure_only_one_capture_fixture(request, 'capsys') + with _install_capture_fixture_on_item(request, SysCapture) as fixture: + yield fixture + + +@pytest.fixture +def capsysbinary(request): + """Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make + captured output available via ``capsys.readouterr()`` method calls + which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``bytes`` + objects. + """ + _ensure_only_one_capture_fixture(request, 'capsysbinary') + # Currently, the implementation uses the python3 specific `.buffer` + # property of CaptureIO. + if sys.version_info < (3,): + raise request.raiseerror('capsysbinary is only supported on python 3') + with _install_capture_fixture_on_item(request, SysCaptureBinary) as fixture: + yield fixture + @pytest.fixture def capfd(request): - """Enable capturing of writes to file descriptors 1 and 2 and make + """Enable capturing of writes to file descriptors ``1`` and ``2`` and make captured output available via ``capfd.readouterr()`` method calls - which return a ``(out, err)`` tuple. + which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``text`` + objects. """ - if "capsys" in request.fixturenames: - request.raiseerror(error_capsysfderror) + _ensure_only_one_capture_fixture(request, 'capfd') if not hasattr(os, 'dup'): - pytest.skip("capfd funcarg needs os.dup") - request.node._capfuncarg = c = CaptureFixture(FDCapture, request) - return c + pytest.skip("capfd fixture needs os.dup function which is not available in this system") + with _install_capture_fixture_on_item(request, FDCapture) as fixture: + yield fixture -class CaptureFixture: +@pytest.fixture +def capfdbinary(request): + """Enable capturing of write to file descriptors 1 and 2 and make + captured output available via ``capfdbinary.readouterr`` method calls + which return a ``(out, err)`` tuple. ``out`` and ``err`` will be + ``bytes`` objects. + """ + _ensure_only_one_capture_fixture(request, 'capfdbinary') + if not hasattr(os, 'dup'): + pytest.skip("capfdbinary fixture needs os.dup function which is not available in this system") + with _install_capture_fixture_on_item(request, FDCaptureBinary) as fixture: + yield fixture + + +@contextlib.contextmanager +def _install_capture_fixture_on_item(request, capture_class): + """ + Context manager which creates a ``CaptureFixture`` instance and "installs" it on + the item/node of the given request. Used by ``capsys`` and ``capfd``. + + The CaptureFixture is added as attribute of the item because it needs to accessed + by ``CaptureManager`` during its ``pytest_runtest_*`` hooks. + """ + request.node._capture_fixture = fixture = CaptureFixture(capture_class, request) + capmanager = request.config.pluginmanager.getplugin('capturemanager') + # need to active this fixture right away in case it is being used by another fixture (setup phase) + # if this fixture is being used only by a test function (call phase), then we wouldn't need this + # activation, but it doesn't hurt + capmanager.activate_fixture(request.node) + yield fixture + fixture.close() + del request.node._capture_fixture + + +class CaptureFixture(object): + """ + Object returned by :py:func:`capsys`, :py:func:`capsysbinary`, :py:func:`capfd` and :py:func:`capfdbinary` + fixtures. + """ def __init__(self, captureclass, request): self.captureclass = captureclass self.request = request @@ -202,6 +292,10 @@ class CaptureFixture: cap.stop_capturing() def readouterr(self): + """Read and return the captured output so far, resetting the internal buffer. + + :return: captured content as a namedtuple with ``out`` and ``err`` string attributes + """ try: return self._capture.readouterr() except AttributeError: @@ -209,12 +303,15 @@ class CaptureFixture: @contextlib.contextmanager def disabled(self): + """Temporarily disables capture while inside the 'with' block.""" + self._capture.suspend_capturing() capmanager = self.request.config.pluginmanager.getplugin('capturemanager') - capmanager.suspendcapture_item(self.request.node, "call", in_=True) + capmanager.suspend_global_capture(item=None, in_=False) try: yield finally: - capmanager.resumecapture() + capmanager.resume_global_capture() + self._capture.resume_capturing() def safe_text_dupfile(f, mode, default_encoding="UTF8"): @@ -238,12 +335,13 @@ def safe_text_dupfile(f, mode, default_encoding="UTF8"): class EncodedFile(object): errors = "strict" # possibly needed by py3 code (issue555) + def __init__(self, buffer, encoding): self.buffer = buffer self.encoding = encoding def write(self, obj): - if isinstance(obj, unicode): + if isinstance(obj, six.text_type): obj = obj.encode(self.encoding, "replace") self.buffer.write(obj) @@ -251,10 +349,18 @@ class EncodedFile(object): data = ''.join(linelist) self.write(data) + @property + def name(self): + """Ensure that file.name is a string.""" + return repr(self.buffer) + def __getattr__(self, name): return getattr(object.__getattribute__(self, "buffer"), name) +CaptureResult = collections.namedtuple("CaptureResult", ["out", "err"]) + + class MultiCapture(object): out = err = in_ = None @@ -315,14 +421,19 @@ class MultiCapture(object): def readouterr(self): """ return snapshot unicode value of stdout/stderr capturings. """ - return (self.out.snap() if self.out is not None else "", - self.err.snap() if self.err is not None else "") + return CaptureResult(self.out.snap() if self.out is not None else "", + self.err.snap() if self.err is not None else "") -class NoCapture: + +class NoCapture(object): __init__ = start = done = suspend = resume = lambda *args: None -class FDCapture: - """ Capture IO to/from a given os-level filedescriptor. """ + +class FDCaptureBinary(object): + """Capture IO to/from a given os-level filedescriptor. + + snap() produces `bytes` + """ def __init__(self, targetfd, tmpfile=None): self.targetfd = targetfd @@ -361,17 +472,11 @@ class FDCapture: self.syscapture.start() def snap(self): - f = self.tmpfile - f.seek(0) - res = f.read() - if res: - enc = getattr(f, "encoding", None) - if enc and isinstance(res, bytes): - res = py.builtin._totext(res, enc, "replace") - f.truncate(0) - f.seek(0) - return res - return '' + self.tmpfile.seek(0) + res = self.tmpfile.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res def done(self): """ stop capturing, restore streams, return original capture file, @@ -380,7 +485,7 @@ class FDCapture: os.dup2(targetfd_save, self.targetfd) os.close(targetfd_save) self.syscapture.done() - self.tmpfile.close() + _attempt_to_close_capture_file(self.tmpfile) def suspend(self): self.syscapture.suspend() @@ -392,12 +497,25 @@ class FDCapture: def writeorg(self, data): """ write to original file descriptor. """ - if py.builtin._istext(data): - data = data.encode("utf8") # XXX use encoding of original stream + if isinstance(data, six.text_type): + data = data.encode("utf8") # XXX use encoding of original stream os.write(self.targetfd_save, data) -class SysCapture: +class FDCapture(FDCaptureBinary): + """Capture IO to/from a given os-level filedescriptor. + + snap() produces text + """ + def snap(self): + res = FDCaptureBinary.snap(self) + enc = getattr(self.tmpfile, "encoding", None) + if enc and isinstance(res, bytes): + res = six.text_type(res, enc, "replace") + return res + + +class SysCapture(object): def __init__(self, fd, tmpfile=None): name = patchsysdict[fd] self._old = getattr(sys, name) @@ -413,16 +531,15 @@ class SysCapture: setattr(sys, self.name, self.tmpfile) def snap(self): - f = self.tmpfile - res = f.getvalue() - f.truncate(0) - f.seek(0) + res = self.tmpfile.getvalue() + self.tmpfile.seek(0) + self.tmpfile.truncate() return res def done(self): setattr(sys, self.name, self._old) del self._old - self.tmpfile.close() + _attempt_to_close_capture_file(self.tmpfile) def suspend(self): setattr(sys, self.name, self._old) @@ -435,7 +552,15 @@ class SysCapture: self._old.flush() -class DontReadFromInput: +class SysCaptureBinary(SysCapture): + def snap(self): + res = self.tmpfile.buffer.getvalue() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + +class DontReadFromInput(six.Iterator): """Temporary stub class. Ideally when stdin is accessed, the capturing should be turned off, with possibly all data captured so far sent to the screen. This should be configurable, though, @@ -449,7 +574,10 @@ class DontReadFromInput: raise IOError("reading from stdin while output is captured") readline = read readlines = read - __iter__ = read + __next__ = read + + def __iter__(self): + return self def fileno(self): raise UnsupportedOperation("redirected stdin is pseudofile, " @@ -463,12 +591,30 @@ class DontReadFromInput: @property def buffer(self): - if sys.version_info >= (3,0): + if sys.version_info >= (3, 0): return self else: raise AttributeError('redirected stdin has no attribute buffer') +def _colorama_workaround(): + """ + Ensure colorama is imported so that it attaches to the correct stdio + handles on Windows. + + colorama uses the terminal on import time. So if something does the + first import of colorama while I/O capture is active, colorama will + fail in various ways. + """ + + if not sys.platform.startswith('win32'): + return + try: + import colorama # noqa + except ImportError: + pass + + def _readline_workaround(): """ Ensure readline is imported so that it attaches to the correct stdio @@ -496,7 +642,7 @@ def _readline_workaround(): pass -def _py36_windowsconsoleio_workaround(): +def _py36_windowsconsoleio_workaround(stream): """ Python 3.6 implemented unicode console handling for Windows. This works by reading/writing to the raw console handle using @@ -513,13 +659,20 @@ def _py36_windowsconsoleio_workaround(): also means a different handle by replicating the logic in "Py_lifecycle.c:initstdio/create_stdio". + :param stream: in practice ``sys.stdout`` or ``sys.stderr``, but given + here as parameter for unittesting purposes. + See https://github.com/pytest-dev/py/issues/103 """ if not sys.platform.startswith('win32') or sys.version_info[:2] < (3, 6): return - buffered = hasattr(sys.stdout.buffer, 'raw') - raw_stdout = sys.stdout.buffer.raw if buffered else sys.stdout.buffer + # bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666) + if not hasattr(stream, 'buffer'): + return + + buffered = hasattr(stream.buffer, 'raw') + raw_stdout = stream.buffer.raw if buffered else stream.buffer if not isinstance(raw_stdout, io._WindowsConsoleIO): return @@ -540,3 +693,14 @@ def _py36_windowsconsoleio_workaround(): sys.__stdin__ = sys.stdin = _reopen_stdio(sys.stdin, 'rb') sys.__stdout__ = sys.stdout = _reopen_stdio(sys.stdout, 'wb') sys.__stderr__ = sys.stderr = _reopen_stdio(sys.stderr, 'wb') + + +def _attempt_to_close_capture_file(f): + """Suppress IOError when closing the temporary file used for capturing streams in py27 (#2370)""" + if six.PY2: + try: + f.close() + except IOError: + pass + else: + f.close() diff --git a/_pytest/compat.py b/_pytest/compat.py index 8c200af5f..bcb31cb88 100644 --- a/_pytest/compat.py +++ b/_pytest/compat.py @@ -2,17 +2,17 @@ python version compatibility code """ from __future__ import absolute_import, division, print_function -import sys -import inspect -import types -import re + +import codecs import functools +import inspect +import re +import sys import py -import _pytest - - +import _pytest +from _pytest.outcomes import TEST_OUTCOME try: import enum @@ -25,6 +25,12 @@ _PY3 = sys.version_info > (3, 0) _PY2 = not _PY3 +if _PY3: + from inspect import signature, Parameter as Parameter +else: + from funcsigs import signature, Parameter as Parameter + + NoneType = type(None) NOTSET = object() @@ -32,12 +38,18 @@ PY35 = sys.version_info[:2] >= (3, 5) PY36 = sys.version_info[:2] >= (3, 6) MODULE_NOT_FOUND_ERROR = 'ModuleNotFoundError' if PY36 else 'ImportError' -if hasattr(inspect, 'signature'): - def _format_args(func): - return str(inspect.signature(func)) +if _PY3: + from collections.abc import MutableMapping as MappingMixin # noqa + from collections.abc import Sequence # noqa else: - def _format_args(func): - return inspect.formatargspec(*inspect.getargspec(func)) + # those raise DeprecationWarnings in Python >=3.7 + from collections import MutableMapping as MappingMixin # noqa + from collections import Sequence # noqa + + +def _format_args(func): + return str(signature(func)) + isfunction = inspect.isfunction isclass = inspect.isclass @@ -59,16 +71,15 @@ def iscoroutinefunction(func): which in turns also initializes the "logging" module as side-effect (see issue #8). """ return (getattr(func, '_is_coroutine', False) or - (hasattr(inspect, 'iscoroutinefunction') and inspect.iscoroutinefunction(func))) + (hasattr(inspect, 'iscoroutinefunction') and inspect.iscoroutinefunction(func))) def getlocation(function, curdir): - import inspect fn = py.path.local(inspect.getfile(function)) lineno = py.builtin._getcode(function).co_firstlineno if fn.relto(curdir): fn = fn.relto(curdir) - return "%s:%d" %(fn, lineno+1) + return "%s:%d" % (fn, lineno + 1) def num_mock_patch_args(function): @@ -76,59 +87,72 @@ def num_mock_patch_args(function): patchings = getattr(function, "patchings", None) if not patchings: return 0 - mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None)) - if mock is not None: + mock_modules = [sys.modules.get("mock"), sys.modules.get("unittest.mock")] + if any(mock_modules): + sentinels = [m.DEFAULT for m in mock_modules if m is not None] return len([p for p in patchings - if not p.attribute_name and p.new is mock.DEFAULT]) + if not p.attribute_name and p.new in sentinels]) return len(patchings) -def getfuncargnames(function, startindex=None): - # XXX merge with main.py's varnames - #assert not isclass(function) - realfunction = function - while hasattr(realfunction, "__wrapped__"): - realfunction = realfunction.__wrapped__ - if startindex is None: - startindex = inspect.ismethod(function) and 1 or 0 - if realfunction != function: - startindex += num_mock_patch_args(function) - function = realfunction - if isinstance(function, functools.partial): - argnames = inspect.getargs(_pytest._code.getrawcode(function.func))[0] - partial = function - argnames = argnames[len(partial.args):] - if partial.keywords: - for kw in partial.keywords: - argnames.remove(kw) - else: - argnames = inspect.getargs(_pytest._code.getrawcode(function))[0] - defaults = getattr(function, 'func_defaults', - getattr(function, '__defaults__', None)) or () - numdefaults = len(defaults) - if numdefaults: - return tuple(argnames[startindex:-numdefaults]) - return tuple(argnames[startindex:]) +def getfuncargnames(function, is_method=False, cls=None): + """Returns the names of a function's mandatory arguments. + This should return the names of all function arguments that: + * Aren't bound to an instance or type as in instance or class methods. + * Don't have default values. + * Aren't bound with functools.partial. + * Aren't replaced with mocks. + The is_method and cls arguments indicate that the function should + be treated as a bound method even though it's not unless, only in + the case of cls, the function is a static method. -if sys.version_info[:2] == (2, 6): - def isclass(object): - """ Return true if the object is a class. Overrides inspect.isclass for - python 2.6 because it will return True for objects which always return - something on __getattr__ calls (see #1035). - Backport of https://hg.python.org/cpython/rev/35bf8f7a8edc - """ - return isinstance(object, (type, types.ClassType)) + @RonnyPfannschmidt: This function should be refactored when we + revisit fixtures. The fixture mechanism should ask the node for + the fixture names, and not try to obtain directly from the + function object well after collection has occurred. + + """ + # The parameters attribute of a Signature object contains an + # ordered mapping of parameter names to Parameter instances. This + # creates a tuple of the names of the parameters that don't have + # defaults. + arg_names = tuple(p.name for p in signature(function).parameters.values() + if (p.kind is Parameter.POSITIONAL_OR_KEYWORD or + p.kind is Parameter.KEYWORD_ONLY) and + p.default is Parameter.empty) + # If this function should be treated as a bound method even though + # it's passed as an unbound method or function, remove the first + # parameter name. + if (is_method or + (cls and not isinstance(cls.__dict__.get(function.__name__, None), + staticmethod))): + arg_names = arg_names[1:] + # Remove any names that will be replaced with mocks. + if hasattr(function, "__wrapped__"): + arg_names = arg_names[num_mock_patch_args(function):] + return arg_names if _PY3: - import codecs - imap = map STRING_TYPES = bytes, str UNICODE_TYPES = str, - def _escape_strings(val): + if PY35: + def _bytes_to_ascii(val): + return val.decode('ascii', 'backslashreplace') + else: + def _bytes_to_ascii(val): + if val: + # source: http://goo.gl/bGsnwC + encoded_bytes, _ = codecs.escape_encode(val) + return encoded_bytes.decode('ascii') + else: + # empty bytes crashes codecs.escape_encode (#1087) + return '' + + def ascii_escaped(val): """If val is pure ascii, returns it as a str(). Otherwise, escapes bytes objects into a sequence of escaped bytes: @@ -147,22 +171,14 @@ if _PY3: """ if isinstance(val, bytes): - if val: - # source: http://goo.gl/bGsnwC - encoded_bytes, _ = codecs.escape_encode(val) - return encoded_bytes.decode('ascii') - else: - # empty bytes crashes codecs.escape_encode (#1087) - return '' + return _bytes_to_ascii(val) else: return val.encode('unicode_escape').decode('ascii') else: STRING_TYPES = bytes, str, unicode UNICODE_TYPES = unicode, - from itertools import imap # NOQA - - def _escape_strings(val): + def ascii_escaped(val): """In py2 bytes and str are the same type, so return if it's a bytes object, return it unchanged if it is a full ascii string, otherwise escape it into its binary form. @@ -215,21 +231,20 @@ def getimfunc(func): try: return func.__func__ except AttributeError: - try: - return func.im_func - except AttributeError: - return func + return func def safe_getattr(object, name, default): - """ Like getattr but return default upon any Exception. + """ Like getattr but return default upon any Exception or any OutcomeException. Attribute access can potentially fail for 'evil' Python objects. See issue #214. + It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException + instead of Exception (for more details check #2707) """ try: return getattr(object, name, default) - except Exception: + except TEST_OUTCOME: return default @@ -283,7 +298,15 @@ def _setup_collect_fakemodule(): if _PY2: - from py.io import TextIO as CaptureIO + # Without this the test_dupfile_on_textio will fail, otherwise CaptureIO could directly inherit from StringIO. + from py.io import TextIO + + class CaptureIO(TextIO): + + @property + def encoding(self): + return getattr(self, '_encoding', 'UTF-8') + else: import io @@ -297,6 +320,7 @@ else: def getvalue(self): return self.buffer.getvalue().decode('UTF-8') + class FuncargnamesCompatAttr(object): """ helper class so that Metafunc, Function and FixtureRequest don't need to each define the "funcargnames" compatibility attribute. diff --git a/_pytest/config.py b/_pytest/config.py index dadd5ca9d..eb9c2a1f2 100644 --- a/_pytest/config.py +++ b/_pytest/config.py @@ -5,15 +5,18 @@ import shlex import traceback import types import warnings - +import copy +import six import py # DON't import pytest here because it causes import cycle troubles import sys import os +from _pytest.outcomes import Skipped + import _pytest._code import _pytest.hookspec # the extension point definitions import _pytest.assertion -from _pytest._pluggy import PluginManager, HookimplMarker, HookspecMarker +from pluggy import PluginManager, HookimplMarker, HookspecMarker from _pytest.compat import safe_str hookimpl = HookimplMarker("pytest") @@ -51,7 +54,7 @@ def main(args=None, plugins=None): tw = py.io.TerminalWriter(sys.stderr) for line in traceback.format_exception(*e.excinfo): tw.line(line.rstrip(), red=True) - tw.line("ERROR: could not load %s\n" % (e.path), red=True) + tw.line("ERROR: could not load %s\n" % (e.path,), red=True) return 4 else: try: @@ -59,11 +62,13 @@ def main(args=None, plugins=None): finally: config._ensure_unconfigure() except UsageError as e: + tw = py.io.TerminalWriter(sys.stderr) for msg in e.args: - sys.stderr.write("ERROR: %s\n" %(msg,)) + tw.line("ERROR: {}\n".format(msg), red=True) return 4 -class cmdline: # compatibility namespace + +class cmdline(object): # NOQA compatibility namespace main = staticmethod(main) @@ -99,26 +104,18 @@ def directory_arg(path, optname): return path -_preinit = [] - default_plugins = ( - "mark main terminal runner python fixtures debugging unittest capture skipping " - "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion " - "junitxml resultlog doctest cacheprovider freeze_support " - "setuponly setupplan warnings").split() + "mark main terminal runner python fixtures debugging unittest capture skipping " + "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion " + "junitxml resultlog doctest cacheprovider freeze_support " + "setuponly setupplan warnings logging").split() builtin_plugins = set(default_plugins) builtin_plugins.add("pytester") -def _preloadplugins(): - assert not _preinit - _preinit.append(get_config()) - def get_config(): - if _preinit: - return _preinit.pop(0) # subsequent calls to main will create a fresh instance pluginmanager = PytestPluginManager() config = Config(pluginmanager) @@ -126,6 +123,7 @@ def get_config(): pluginmanager.import_plugin(spec) return config + def get_plugin_manager(): """ Obtain a new instance of the @@ -137,6 +135,7 @@ def get_plugin_manager(): """ return get_config().pluginmanager + def _prepareconfig(args=None, plugins=None): warning = None if args is None: @@ -154,14 +153,14 @@ def _prepareconfig(args=None, plugins=None): try: if plugins: for plugin in plugins: - if isinstance(plugin, py.builtin._basestring): + if isinstance(plugin, six.string_types): pluginmanager.consider_pluginarg(plugin) else: pluginmanager.register(plugin) if warning: config.warn('C1', warning) return pluginmanager.hook.pytest_cmdline_parse( - pluginmanager=pluginmanager, args=args) + pluginmanager=pluginmanager, args=args) except BaseException: config._ensure_unconfigure() raise @@ -169,13 +168,14 @@ def _prepareconfig(args=None, plugins=None): class PytestPluginManager(PluginManager): """ - Overwrites :py:class:`pluggy.PluginManager <_pytest.vendored_packages.pluggy.PluginManager>` to add pytest-specific + Overwrites :py:class:`pluggy.PluginManager ` to add pytest-specific functionality: - * loading plugins from the command line, ``PYTEST_PLUGIN`` env variable and + * loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and ``pytest_plugins`` global variables found in plugins being loaded; * ``conftest.py`` loading during start-up; """ + def __init__(self): super(PytestPluginManager, self).__init__("pytest", implprefix="pytest_") self._conftest_plugins = set() @@ -201,12 +201,15 @@ class PytestPluginManager(PluginManager): # Config._consider_importhook will set a real object if required. self.rewrite_hook = _pytest.assertion.DummyRewriteHook() + # Used to know when we are importing conftests after the pytest_configure stage + self._configured = False def addhooks(self, module_or_class): """ .. deprecated:: 2.8 - Use :py:meth:`pluggy.PluginManager.add_hookspecs <_pytest.vendored_packages.pluggy.PluginManager.add_hookspecs>` instead. + Use :py:meth:`pluggy.PluginManager.add_hookspecs ` + instead. """ warning = dict(code="I2", fslocation=_pytest._code.getfslineno(sys._getframe(1)), @@ -235,7 +238,7 @@ class PytestPluginManager(PluginManager): def parse_hookspec_opts(self, module_or_class, name): opts = super(PytestPluginManager, self).parse_hookspec_opts( - module_or_class, name) + module_or_class, name) if opts is None: method = getattr(module_or_class, name) if name.startswith("pytest_"): @@ -243,22 +246,16 @@ class PytestPluginManager(PluginManager): "historic": hasattr(method, "historic")} return opts - def _verify_hook(self, hook, hookmethod): - super(PytestPluginManager, self)._verify_hook(hook, hookmethod) - if "__multicall__" in hookmethod.argnames: - fslineno = _pytest._code.getfslineno(hookmethod.function) - warning = dict(code="I1", - fslocation=fslineno, - nodeid=None, - message="%r hook uses deprecated __multicall__ " - "argument" % (hook.name)) - self._warn(warning) - def register(self, plugin, name=None): + if name in ['pytest_catchlog', 'pytest_capturelog']: + self._warn('{0} plugin has been merged into the core, ' + 'please remove it from your requirements.'.format( + name.replace('_', '-'))) + return ret = super(PytestPluginManager, self).register(plugin, name) if ret: self.hook.pytest_plugin_registered.call_historic( - kwargs=dict(plugin=plugin, manager=self)) + kwargs=dict(plugin=plugin, manager=self)) if isinstance(plugin, types.ModuleType): self.consider_module(plugin) @@ -276,11 +273,12 @@ class PytestPluginManager(PluginManager): # XXX now that the pluginmanager exposes hookimpl(tryfirst...) # we should remove tryfirst/trylast as markers config.addinivalue_line("markers", - "tryfirst: mark a hook implementation function such that the " - "plugin machinery will try to call it first/as early as possible.") + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible.") config.addinivalue_line("markers", - "trylast: mark a hook implementation function such that the " - "plugin machinery will try to call it last/as late as possible.") + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible.") + self._configured = True def _warn(self, message): kwargs = message if isinstance(message, dict) else { @@ -304,7 +302,7 @@ class PytestPluginManager(PluginManager): """ current = py.path.local() self._confcutdir = current.join(namespace.confcutdir, abs=True) \ - if namespace.confcutdir else None + if namespace.confcutdir else None self._noconftest = namespace.noconftest testpaths = namespace.file_or_dir foundanchor = False @@ -315,7 +313,7 @@ class PytestPluginManager(PluginManager): if i != -1: path = path[:i] anchor = current.join(path, abs=1) - if exists(anchor): # we found some file object + if exists(anchor): # we found some file object self._try_load_conftest(anchor) foundanchor = True if not foundanchor: @@ -371,6 +369,9 @@ class PytestPluginManager(PluginManager): _ensure_removed_sysmodule(conftestpath.purebasename) try: mod = conftestpath.pyimport() + if hasattr(mod, 'pytest_plugins') and self._configured: + from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST + warnings.warn(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST) except Exception: raise ConftestImportFailure(conftestpath, sys.exc_info()) @@ -382,7 +383,7 @@ class PytestPluginManager(PluginManager): if path and path.relto(dirpath) or path == dirpath: assert mod not in mods mods.append(mod) - self.trace("loaded conftestmodule %r" %(mod)) + self.trace("loaded conftestmodule %r" % (mod)) self.consider_conftest(mod) return mod @@ -392,7 +393,7 @@ class PytestPluginManager(PluginManager): # def consider_preparse(self, args): - for opt1,opt2 in zip(args, args[1:]): + for opt1, opt2 in zip(args, args[1:]): if opt1 == "-p": self.consider_pluginarg(opt2) @@ -424,9 +425,9 @@ class PytestPluginManager(PluginManager): # "terminal" or "capture". Those plugins are registered under their # basename for historic purposes but must be imported with the # _pytest prefix. - assert isinstance(modname, (py.builtin.text, str)), "module name as text required, got %r" % modname + assert isinstance(modname, (six.text_type, str)), "module name as text required, got %r" % modname modname = str(modname) - if self.get_plugin(modname) is not None: + if self.is_blocked(modname) or self.get_plugin(modname) is not None: return if modname in builtin_plugins: importspec = "_pytest." + modname @@ -436,17 +437,14 @@ class PytestPluginManager(PluginManager): try: __import__(importspec) except ImportError as e: - new_exc = ImportError('Error importing plugin "%s": %s' % (modname, safe_str(e.args[0]))) - # copy over name and path attributes - for attr in ('name', 'path'): - if hasattr(e, attr): - setattr(new_exc, attr, getattr(e, attr)) - raise new_exc - except Exception as e: - import pytest - if not hasattr(pytest, 'skip') or not isinstance(e, pytest.skip.Exception): - raise - self._warn("skipped plugin %r: %s" %((modname, e.msg))) + new_exc_type = ImportError + new_exc_message = 'Error importing plugin "%s": %s' % (modname, safe_str(e.args[0])) + new_exc = new_exc_type(new_exc_message) + + six.reraise(new_exc_type, new_exc, sys.exc_info()[2]) + + except Skipped as e: + self._warn("skipped plugin %r: %s" % ((modname, e.msg))) else: mod = sys.modules[importspec] self.register(mod, modname) @@ -470,7 +468,7 @@ def _get_plugin_specs_as_list(specs): return [] -class Parser: +class Parser(object): """ Parser for command line arguments and ini-file values. :ivar extra_info: dict of generic param -> value to display in case @@ -511,7 +509,7 @@ class Parser: for i, grp in enumerate(self._groups): if grp.name == after: break - self._groups.insert(i+1, group) + self._groups.insert(i + 1, group) return group def addoption(self, *opts, **attrs): @@ -549,7 +547,7 @@ class Parser: a = option.attrs() arggroup.add_argument(*n, **a) # bash like autocompletion for dirs (appending '/') - optparser.add_argument(FILE_OR_DIR, nargs='*').completer=filescompleter + optparser.add_argument(FILE_OR_DIR, nargs='*').completer = filescompleter return optparser def parse_setoption(self, args, option, namespace=None): @@ -605,7 +603,7 @@ class ArgumentError(Exception): return self.msg -class Argument: +class Argument(object): """class that mimics the necessary behaviour of optparse.Option its currently a least effort implementation @@ -637,7 +635,7 @@ class Argument: pass else: # this might raise a keyerror as well, don't want to catch that - if isinstance(typ, py.builtin._basestring): + if isinstance(typ, six.string_types): if typ == 'choice': warnings.warn( 'type argument to addoption() is a string %r.' @@ -693,7 +691,7 @@ class Argument: if self._attrs.get('help'): a = self._attrs['help'] a = a.replace('%default', '%(default)s') - #a = a.replace('%prog', '%(prog)s') + # a = a.replace('%prog', '%(prog)s') self._attrs['help'] = a return self._attrs @@ -735,7 +733,7 @@ class Argument: return 'Argument({0})'.format(', '.join(args)) -class OptionGroup: +class OptionGroup(object): def __init__(self, name, description="", parser=None): self.name = name self.description = description @@ -777,7 +775,7 @@ class MyOptionParser(argparse.ArgumentParser): extra_info = {} self._parser = parser argparse.ArgumentParser.__init__(self, usage=parser._usage, - add_help=False, formatter_class=DropShorterLongHelpFormatter) + add_help=False, formatter_class=DropShorterLongHelpFormatter) # extra_info is a dict of (param -> value) to display if there's # an usage error to provide more contextual information to the user self.extra_info = extra_info @@ -805,9 +803,10 @@ class DropShorterLongHelpFormatter(argparse.HelpFormatter): - shortcut if there are only two options and one of them is a short one - cache result on action object as this is called at least 2 times """ + def _format_action_invocation(self, action): orgstr = argparse.HelpFormatter._format_action_invocation(self, action) - if orgstr and orgstr[0] != '-': # only optional arguments + if orgstr and orgstr[0] != '-': # only optional arguments return orgstr res = getattr(action, '_formatted_action_invocation', None) if res: @@ -818,7 +817,7 @@ class DropShorterLongHelpFormatter(argparse.HelpFormatter): action._formatted_action_invocation = orgstr return orgstr return_list = [] - option_map = getattr(action, 'map_long_option', {}) + option_map = getattr(action, 'map_long_option', {}) if option_map is None: option_map = {} short_long = {} @@ -836,7 +835,7 @@ class DropShorterLongHelpFormatter(argparse.HelpFormatter): short_long[shortened] = xxoption # now short_long has been filled out to the longest with dashes # **and** we keep the right option ordering from add_argument - for option in options: # + for option in options: if len(option) == 2 or option[2] == ' ': return_list.append(option) if option[2:] == short_long.get(option.replace('-', '')): @@ -845,23 +844,14 @@ class DropShorterLongHelpFormatter(argparse.HelpFormatter): return action._formatted_action_invocation - def _ensure_removed_sysmodule(modname): try: del sys.modules[modname] except KeyError: pass -class CmdOptions(object): - """ holds cmdline options as attributes.""" - def __init__(self, values=()): - self.__dict__.update(values) - def __repr__(self): - return "" %(self.__dict__,) - def copy(self): - return CmdOptions(self.__dict__) -class Notset: +class Notset(object): def __repr__(self): return "" @@ -870,13 +860,25 @@ notset = Notset() FILE_OR_DIR = 'file_or_dir' +def _iter_rewritable_modules(package_files): + for fn in package_files: + is_simple_module = '/' not in fn and fn.endswith('.py') + is_package = fn.count('/') == 1 and fn.endswith('__init__.py') + if is_simple_module: + module_name, _ = os.path.splitext(fn) + yield module_name + elif is_package: + package_name = os.path.dirname(fn) + yield package_name + + class Config(object): """ access to configuration values, pluginmanager and plugin hooks. """ def __init__(self, pluginmanager): #: access to command line option as attributes. #: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead - self.option = CmdOptions() + self.option = argparse.Namespace() _a = FILE_OR_DIR self._parser = Parser( usage="%%(prog)s [options] [%s] [%s] [...]" % (_a, _a), @@ -940,14 +942,14 @@ class Config(object): else: style = "native" excrepr = excinfo.getrepr(funcargs=True, - showlocals=getattr(option, 'showlocals', False), - style=style, - ) + showlocals=getattr(option, 'showlocals', False), + style=style, + ) res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo) - if not py.builtin.any(res): + if not any(res): for line in str(excrepr).split("\n"): - sys.stderr.write("INTERNALERROR> %s\n" %line) + sys.stderr.write("INTERNALERROR> %s\n" % line) sys.stderr.flush() def cwd_relative_nodeid(self, nodeid): @@ -980,8 +982,9 @@ class Config(object): self.pluginmanager._set_initial_conftests(early_config.known_args_namespace) def _initini(self, args): - ns, unknown_args = self._parser.parse_known_and_unknown_args(args, namespace=self.option.copy()) - r = determine_setup(ns.inifilename, ns.file_or_dir + unknown_args, warnfunc=self.warn) + ns, unknown_args = self._parser.parse_known_and_unknown_args(args, namespace=copy.copy(self.option)) + r = determine_setup(ns.inifilename, ns.file_or_dir + unknown_args, warnfunc=self.warn, + rootdir_cmd_arg=ns.rootdir or None) self.rootdir, self.inifile, self.inicfg = r self._parser.extra_info['rootdir'] = self.rootdir self._parser.extra_info['inifile'] = self.inifile @@ -991,10 +994,10 @@ class Config(object): self._override_ini = ns.override_ini or () def _consider_importhook(self, args): - """Install the PEP 302 import hook if using assertion re-writing. + """Install the PEP 302 import hook if using assertion rewriting. Needs to parse the --assert= option from the commandline - and find all the installed plugins to mark them for re-writing + and find all the installed plugins to mark them for rewriting by the importhook. """ ns, unknown_args = self._parser.parse_known_and_unknown_args(args) @@ -1006,7 +1009,7 @@ class Config(object): mode = 'plain' else: self._mark_plugins_for_rewrite(hook) - self._warn_about_missing_assertion(mode) + _warn_about_missing_assertion(mode) def _mark_plugins_for_rewrite(self, hook): """ @@ -1030,51 +1033,28 @@ class Config(object): for entry in entrypoint.dist._get_metadata(metadata) ) - for fn in package_files: - is_simple_module = os.sep not in fn and fn.endswith('.py') - is_package = fn.count(os.sep) == 1 and fn.endswith('__init__.py') - if is_simple_module: - module_name, ext = os.path.splitext(fn) - hook.mark_rewrite(module_name) - elif is_package: - package_name = os.path.dirname(fn) - hook.mark_rewrite(package_name) - - def _warn_about_missing_assertion(self, mode): - try: - assert False - except AssertionError: - pass - else: - if mode == 'plain': - sys.stderr.write("WARNING: ASSERTIONS ARE NOT EXECUTED" - " and FAILING TESTS WILL PASS. Are you" - " using python -O?") - else: - sys.stderr.write("WARNING: assertions not in test modules or" - " plugins will be ignored" - " because assert statements are not executed " - "by the underlying Python interpreter " - "(are you using python -O?)\n") + for name in _iter_rewritable_modules(package_files): + hook.mark_rewrite(name) def _preparse(self, args, addopts=True): - self._initini(args) if addopts: args[:] = shlex.split(os.environ.get('PYTEST_ADDOPTS', '')) + args + self._initini(args) + if addopts: args[:] = self.getini("addopts") + args self._checkversion() self._consider_importhook(args) self.pluginmanager.consider_preparse(args) self.pluginmanager.load_setuptools_entrypoints('pytest11') self.pluginmanager.consider_env() - self.known_args_namespace = ns = self._parser.parse_known_args(args, namespace=self.option.copy()) - confcutdir = self.known_args_namespace.confcutdir + self.known_args_namespace = ns = self._parser.parse_known_args( + args, namespace=copy.copy(self.option)) if self.known_args_namespace.confcutdir is None and self.inifile: confcutdir = py.path.local(self.inifile).dirname self.known_args_namespace.confcutdir = confcutdir try: self.hook.pytest_load_initial_conftests(early_config=self, - args=args, parser=self._parser) + args=args, parser=self._parser) except ConftestImportFailure: e = sys.exc_info()[1] if ns.help or ns.version: @@ -1092,17 +1072,17 @@ class Config(object): myver = pytest.__version__.split(".") if myver < ver: raise pytest.UsageError( - "%s:%d: requires pytest-%s, actual pytest-%s'" %( - self.inicfg.config.path, self.inicfg.lineof('minversion'), - minver, pytest.__version__)) + "%s:%d: requires pytest-%s, actual pytest-%s'" % ( + self.inicfg.config.path, self.inicfg.lineof('minversion'), + minver, pytest.__version__)) def parse(self, args, addopts=True): # parse given cmdline arguments into this config object. assert not hasattr(self, 'args'), ( - "can only parse cmdline args at most once per Config object") + "can only parse cmdline args at most once per Config object") self._origargs = args self.hook.pytest_addhooks.call_historic( - kwargs=dict(pluginmanager=self.pluginmanager)) + kwargs=dict(pluginmanager=self.pluginmanager)) self._preparse(args, addopts=addopts) # XXX deprecated hook: self.hook.pytest_cmdline_preparse(config=self, args=args) @@ -1125,7 +1105,7 @@ class Config(object): the first line in its value. """ x = self.getini(name) assert isinstance(x, list) - x.append(line) # modifies the cached list inline + x.append(line) # modifies the cached list inline def getini(self, name): """ return configuration value from an :ref:`ini file `. If the @@ -1142,7 +1122,7 @@ class Config(object): try: description, type, default = self._parser._inidict[name] except KeyError: - raise ValueError("unknown configuration value: %r" %(name,)) + raise ValueError("unknown configuration value: %r" % (name,)) value = self._get_override_ini_value(name) if value is None: try: @@ -1155,10 +1135,10 @@ class Config(object): return [] if type == "pathlist": dp = py.path.local(self.inicfg.config.path).dirpath() - l = [] + values = [] for relpath in shlex.split(value): - l.append(dp.join(relpath, abs=True)) - return l + values.append(dp.join(relpath, abs=True)) + return values elif type == "args": return shlex.split(value) elif type == "linelist": @@ -1175,26 +1155,25 @@ class Config(object): except KeyError: return None modpath = py.path.local(mod.__file__).dirpath() - l = [] + values = [] for relroot in relroots: if not isinstance(relroot, py.path.local): relroot = relroot.replace("/", py.path.local.sep) relroot = modpath.join(relroot, abs=True) - l.append(relroot) - return l + values.append(relroot) + return values def _get_override_ini_value(self, name): value = None - # override_ini is a list of list, to support both -o foo1=bar1 foo2=bar2 and - # and -o foo1=bar1 -o foo2=bar2 options - # always use the last item if multiple value set for same ini-name, + # override_ini is a list of "ini=value" options + # always use the last item if multiple values are set for same ini-name, # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2 - for ini_config_list in self._override_ini: - for ini_config in ini_config_list: - try: - (key, user_ini_value) = ini_config.split("=", 1) - except ValueError: - raise UsageError("-o/--override-ini expects option=value style.") + for ini_config in self._override_ini: + try: + key, user_ini_value = ini_config.split("=", 1) + except ValueError: + raise UsageError("-o/--override-ini expects option=value style.") + else: if key == name: value = user_ini_value return value @@ -1219,7 +1198,7 @@ class Config(object): return default if skip: import pytest - pytest.skip("no %r option found" %(name,)) + pytest.skip("no %r option found" % (name,)) raise ValueError("no option named %r" % (name,)) def getvalue(self, name, path=None): @@ -1230,12 +1209,37 @@ class Config(object): """ (deprecated, use getoption(skip=True)) """ return self.getoption(name, skip=True) + +def _assertion_supported(): + try: + assert False + except AssertionError: + return True + else: + return False + + +def _warn_about_missing_assertion(mode): + if not _assertion_supported(): + if mode == 'plain': + sys.stderr.write("WARNING: ASSERTIONS ARE NOT EXECUTED" + " and FAILING TESTS WILL PASS. Are you" + " using python -O?") + else: + sys.stderr.write("WARNING: assertions not in test modules or" + " plugins will be ignored" + " because assert statements are not executed " + "by the underlying Python interpreter " + "(are you using python -O?)\n") + + def exists(path, ignore=EnvironmentError): try: return path.check() except ignore: return False + def getcfg(args, warnfunc=None): """ Search the list of arguments for a valid ini-file for pytest, @@ -1246,7 +1250,7 @@ def getcfg(args, warnfunc=None): This parameter should be removed when pytest adopts standard deprecation warnings (#1804). """ - from _pytest.deprecated import SETUP_CFG_PYTEST + from _pytest.deprecated import CFG_PYTEST_SECTION inibasenames = ["pytest.ini", "tox.ini", "setup.cfg"] args = [x for x in args if not str(x).startswith("-")] if not args: @@ -1260,7 +1264,7 @@ def getcfg(args, warnfunc=None): iniconfig = py.iniconfig.IniConfig(p) if 'pytest' in iniconfig.sections: if inibasename == 'setup.cfg' and warnfunc: - warnfunc('C1', SETUP_CFG_PYTEST) + warnfunc('C1', CFG_PYTEST_SECTION.format(filename=inibasename)) return base, p, iniconfig['pytest'] if inibasename == 'setup.cfg' and 'tool:pytest' in iniconfig.sections: return base, p, iniconfig['tool:pytest'] @@ -1319,14 +1323,22 @@ def get_dirs_from_args(args): ] -def determine_setup(inifile, args, warnfunc=None): +def determine_setup(inifile, args, warnfunc=None, rootdir_cmd_arg=None): dirs = get_dirs_from_args(args) if inifile: iniconfig = py.iniconfig.IniConfig(inifile) - try: - inicfg = iniconfig["pytest"] - except KeyError: - inicfg = None + is_cfg_file = str(inifile).endswith('.cfg') + # TODO: [pytest] section in *.cfg files is depricated. Need refactoring. + sections = ['tool:pytest', 'pytest'] if is_cfg_file else ['pytest'] + for section in sections: + try: + inicfg = iniconfig[section] + if is_cfg_file and section == 'pytest' and warnfunc: + from _pytest.deprecated import CFG_PYTEST_SECTION + warnfunc('C1', CFG_PYTEST_SECTION.format(filename=str(inifile))) + break + except KeyError: + inicfg = None rootdir = get_common_ancestor(dirs) else: ancestor = get_common_ancestor(dirs) @@ -1339,9 +1351,14 @@ def determine_setup(inifile, args, warnfunc=None): rootdir, inifile, inicfg = getcfg(dirs, warnfunc=warnfunc) if rootdir is None: rootdir = get_common_ancestor([py.path.local(), ancestor]) - is_fs_root = os.path.splitdrive(str(rootdir))[1] == os.sep + is_fs_root = os.path.splitdrive(str(rootdir))[1] == '/' if is_fs_root: rootdir = ancestor + if rootdir_cmd_arg: + rootdir_abs_path = py.path.local(os.path.expandvars(rootdir_cmd_arg)) + if not os.path.isdir(str(rootdir_abs_path)): + raise UsageError("Directory '{}' not found. Check your '--rootdir' option.".format(rootdir_abs_path)) + rootdir = rootdir_abs_path return rootdir, inifile, inicfg or {} @@ -1361,7 +1378,7 @@ def setns(obj, dic): else: setattr(obj, name, value) obj.__all__.append(name) - #if obj != pytest: + # if obj != pytest: # pytest.__all__.append(name) setattr(pytest, name, value) diff --git a/_pytest/debugging.py b/_pytest/debugging.py index 73a0a2ef5..97a625369 100644 --- a/_pytest/debugging.py +++ b/_pytest/debugging.py @@ -2,7 +2,14 @@ from __future__ import absolute_import, division, print_function import pdb import sys +import os +from doctest import UnexpectedException +try: + from builtins import breakpoint # noqa + SUPPORTS_BREAKPOINT_BUILTIN = True +except ImportError: + SUPPORTS_BREAKPOINT_BUILTIN = False def pytest_addoption(parser): @@ -27,12 +34,20 @@ def pytest_configure(config): if config.getvalue("usepdb"): config.pluginmanager.register(PdbInvoke(), 'pdbinvoke') + # Use custom Pdb class set_trace instead of default Pdb on breakpoint() call + if SUPPORTS_BREAKPOINT_BUILTIN: + _environ_pythonbreakpoint = os.environ.get('PYTHONBREAKPOINT', '') + if _environ_pythonbreakpoint == '': + sys.breakpointhook = pytestPDB.set_trace + old = (pdb.set_trace, pytestPDB._pluginmanager) def fin(): pdb.set_trace, pytestPDB._pluginmanager = old pytestPDB._config = None pytestPDB._pdb_cls = pdb.Pdb + if SUPPORTS_BREAKPOINT_BUILTIN: + sys.breakpointhook = sys.__breakpointhook__ pdb.set_trace = pytestPDB.set_trace pytestPDB._pluginmanager = config.pluginmanager @@ -40,7 +55,8 @@ def pytest_configure(config): pytestPDB._pdb_cls = pdb_cls config._cleanup.append(fin) -class pytestPDB: + +class pytestPDB(object): """ Pseudo PDB that defers to the real pdb. """ _pluginmanager = None _config = None @@ -54,7 +70,7 @@ class pytestPDB: if cls._pluginmanager is not None: capman = cls._pluginmanager.getplugin("capturemanager") if capman: - capman.suspendcapture(in_=True) + capman.suspend_global_capture(in_=True) tw = _pytest.config.create_terminal_writer(cls._config) tw.line() tw.sep(">", "PDB set_trace (IO-capturing turned off)") @@ -62,11 +78,11 @@ class pytestPDB: cls._pdb_cls().set_trace(frame) -class PdbInvoke: +class PdbInvoke(object): def pytest_exception_interact(self, node, call, report): capman = node.config.pluginmanager.getplugin("capturemanager") if capman: - out, err = capman.suspendcapture(in_=True) + out, err = capman.suspend_global_capture(in_=True) sys.stdout.write(out) sys.stdout.write(err) _enter_pdb(node, call.excinfo, report) @@ -85,6 +101,18 @@ def _enter_pdb(node, excinfo, rep): # for not completely clear reasons. tw = node.config.pluginmanager.getplugin("terminalreporter")._tw tw.line() + + showcapture = node.config.option.showcapture + + for sectionname, content in (('stdout', rep.capstdout), + ('stderr', rep.capstderr), + ('log', rep.caplog)): + if showcapture in (sectionname, 'all') and content: + tw.sep(">", "captured " + sectionname) + if content[-1:] == "\n": + content = content[:-1] + tw.line(content) + tw.sep(">", "traceback") rep.toterminal(tw) tw.sep(">", "entering PDB") @@ -95,10 +123,9 @@ def _enter_pdb(node, excinfo, rep): def _postmortem_traceback(excinfo): - # A doctest.UnexpectedException is not useful for post_mortem. - # Use the underlying exception instead: - from doctest import UnexpectedException if isinstance(excinfo.value, UnexpectedException): + # A doctest.UnexpectedException is not useful for post_mortem. + # Use the underlying exception instead: return excinfo.value.exc_info[2] else: return excinfo._excinfo[2] diff --git a/_pytest/deprecated.py b/_pytest/deprecated.py index 1eeb74918..2be6b7300 100644 --- a/_pytest/deprecated.py +++ b/_pytest/deprecated.py @@ -13,7 +13,7 @@ class RemovedInPytest4Warning(DeprecationWarning): MAIN_STR_ARGS = 'passing a string to pytest.main() is deprecated, ' \ - 'pass a list of arguments instead.' + 'pass a list of arguments instead.' YIELD_TESTS = 'yield tests are deprecated, and scheduled to be removed in pytest 4.0' @@ -22,18 +22,44 @@ FUNCARG_PREFIX = ( 'and scheduled to be removed in pytest 4.0. ' 'Please remove the prefix and use the @pytest.fixture decorator instead.') -SETUP_CFG_PYTEST = '[pytest] section in setup.cfg files is deprecated, use [tool:pytest] instead.' +CFG_PYTEST_SECTION = '[pytest] section in {filename} files is deprecated, use [tool:pytest] instead.' GETFUNCARGVALUE = "use of getfuncargvalue is deprecated, use getfixturevalue" -RESULT_LOG = '--result-log is deprecated and scheduled for removal in pytest 4.0' +RESULT_LOG = ( + '--result-log is deprecated and scheduled for removal in pytest 4.0.\n' + 'See https://docs.pytest.org/en/latest/usage.html#creating-resultlog-format-files for more information.' +) MARK_INFO_ATTRIBUTE = RemovedInPytest4Warning( - "MarkInfo objects are deprecated as they contain the merged marks" + "MarkInfo objects are deprecated as they contain the merged marks.\n" + "Please use node.iter_markers to iterate over markers correctly" ) MARK_PARAMETERSET_UNPACKING = RemovedInPytest4Warning( "Applying marks directly to parameters is deprecated," " please use pytest.param(..., marks=...) instead.\n" "For more details, see: https://docs.pytest.org/en/latest/parametrize.html" -) \ No newline at end of file +) + +RECORD_XML_PROPERTY = ( + 'Fixture renamed from "record_xml_property" to "record_property" as user ' + 'properties are now available to all reporters.\n' + '"record_xml_property" is now deprecated.' +) + +COLLECTOR_MAKEITEM = RemovedInPytest4Warning( + "pycollector makeitem was removed " + "as it is an accidentially leaked internal api" +) + +METAFUNC_ADD_CALL = ( + "Metafunc.addcall is deprecated and scheduled to be removed in pytest 4.0.\n" + "Please use Metafunc.parametrize instead." +) + +PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST = RemovedInPytest4Warning( + "Defining pytest_plugins in a non-top-level conftest is deprecated, " + "because it affects the entire directory tree in a non-explicit way.\n" + "Please move it to the top level conftest file instead." +) diff --git a/_pytest/doctest.py b/_pytest/doctest.py index fde6dd71d..131109cba 100644 --- a/_pytest/doctest.py +++ b/_pytest/doctest.py @@ -2,6 +2,8 @@ from __future__ import absolute_import, division, print_function import traceback +import sys +import platform import pytest from _pytest._code.code import ExceptionInfo, ReprFileLocation, TerminalRepr @@ -22,39 +24,54 @@ DOCTEST_REPORT_CHOICES = ( DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE, ) +# Lazy definiton of runner class +RUNNER_CLASS = None + + def pytest_addoption(parser): parser.addini('doctest_optionflags', 'option flags for doctests', - type="args", default=["ELLIPSIS"]) + type="args", default=["ELLIPSIS"]) parser.addini("doctest_encoding", 'encoding used for doctest files', default="utf-8") group = parser.getgroup("collect") group.addoption("--doctest-modules", - action="store_true", default=False, - help="run doctests in all .py modules", - dest="doctestmodules") + action="store_true", default=False, + help="run doctests in all .py modules", + dest="doctestmodules") group.addoption("--doctest-report", - type=str.lower, default="udiff", - help="choose another output format for diffs on doctest failure", - choices=DOCTEST_REPORT_CHOICES, - dest="doctestreport") + type=str.lower, default="udiff", + help="choose another output format for diffs on doctest failure", + choices=DOCTEST_REPORT_CHOICES, + dest="doctestreport") group.addoption("--doctest-glob", - action="append", default=[], metavar="pat", - help="doctests file matching pattern, default: test*.txt", - dest="doctestglob") + action="append", default=[], metavar="pat", + help="doctests file matching pattern, default: test*.txt", + dest="doctestglob") group.addoption("--doctest-ignore-import-errors", - action="store_true", default=False, - help="ignore doctest ImportErrors", - dest="doctest_ignore_import_errors") + action="store_true", default=False, + help="ignore doctest ImportErrors", + dest="doctest_ignore_import_errors") + group.addoption("--doctest-continue-on-failure", + action="store_true", default=False, + help="for a given doctest, continue to run after the first failure", + dest="doctest_continue_on_failure") def pytest_collect_file(path, parent): config = parent.config if path.ext == ".py": - if config.option.doctestmodules: + if config.option.doctestmodules and not _is_setup_py(config, path, parent): return DoctestModule(path, parent) elif _is_doctest(config, path, parent): return DoctestTextfile(path, parent) +def _is_setup_py(config, path, parent): + if path.basename != "setup.py": + return False + contents = path.read() + return 'setuptools' in contents or 'distutils' in contents + + def _is_doctest(config, path, parent): if path.ext in ('.txt', '.rst') and parent.session.isinitpath(path): return True @@ -67,14 +84,63 @@ def _is_doctest(config, path, parent): class ReprFailDoctest(TerminalRepr): - def __init__(self, reprlocation, lines): - self.reprlocation = reprlocation - self.lines = lines + def __init__(self, reprlocation_lines): + # List of (reprlocation, lines) tuples + self.reprlocation_lines = reprlocation_lines def toterminal(self, tw): - for line in self.lines: - tw.line(line) - self.reprlocation.toterminal(tw) + for reprlocation, lines in self.reprlocation_lines: + for line in lines: + tw.line(line) + reprlocation.toterminal(tw) + + +class MultipleDoctestFailures(Exception): + def __init__(self, failures): + super(MultipleDoctestFailures, self).__init__() + self.failures = failures + + +def _init_runner_class(): + import doctest + + class PytestDoctestRunner(doctest.DebugRunner): + """ + Runner to collect failures. Note that the out variable in this case is + a list instead of a stdout-like object + """ + def __init__(self, checker=None, verbose=None, optionflags=0, + continue_on_failure=True): + doctest.DebugRunner.__init__( + self, checker=checker, verbose=verbose, optionflags=optionflags) + self.continue_on_failure = continue_on_failure + + def report_failure(self, out, test, example, got): + failure = doctest.DocTestFailure(test, example, got) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + def report_unexpected_exception(self, out, test, example, exc_info): + failure = doctest.UnexpectedException(test, example, exc_info) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + return PytestDoctestRunner + + +def _get_runner(checker=None, verbose=None, optionflags=0, + continue_on_failure=True): + # We need this in order to do a lazy import on doctest + global RUNNER_CLASS + if RUNNER_CLASS is None: + RUNNER_CLASS = _init_runner_class() + return RUNNER_CLASS( + checker=checker, verbose=verbose, optionflags=optionflags, + continue_on_failure=continue_on_failure) class DoctestItem(pytest.Item): @@ -95,51 +161,76 @@ class DoctestItem(pytest.Item): def runtest(self): _check_all_skipped(self.dtest) - self.runner.run(self.dtest) + self._disable_output_capturing_for_darwin() + failures = [] + self.runner.run(self.dtest, out=failures) + if failures: + raise MultipleDoctestFailures(failures) + + def _disable_output_capturing_for_darwin(self): + """ + Disable output capturing. Otherwise, stdout is lost to doctest (#985) + """ + if platform.system() != 'Darwin': + return + capman = self.config.pluginmanager.getplugin("capturemanager") + if capman: + out, err = capman.suspend_global_capture(in_=True) + sys.stdout.write(out) + sys.stderr.write(err) def repr_failure(self, excinfo): import doctest + failures = None if excinfo.errisinstance((doctest.DocTestFailure, doctest.UnexpectedException)): - doctestfailure = excinfo.value - example = doctestfailure.example - test = doctestfailure.test - filename = test.filename - if test.lineno is None: - lineno = None - else: - lineno = test.lineno + example.lineno + 1 - message = excinfo.type.__name__ - reprlocation = ReprFileLocation(filename, lineno, message) - checker = _get_checker() - report_choice = _get_report_choice(self.config.getoption("doctestreport")) - if lineno is not None: - lines = doctestfailure.test.docstring.splitlines(False) - # add line numbers to the left of the error message - lines = ["%03d %s" % (i + test.lineno + 1, x) - for (i, x) in enumerate(lines)] - # trim docstring error lines to 10 - lines = lines[example.lineno - 9:example.lineno + 1] - else: - lines = ['EXAMPLE LOCATION UNKNOWN, not showing all tests of that example'] - indent = '>>>' - for line in example.source.splitlines(): - lines.append('??? %s %s' % (indent, line)) - indent = '...' - if excinfo.errisinstance(doctest.DocTestFailure): - lines += checker.output_difference(example, - doctestfailure.got, report_choice).split("\n") - else: - inner_excinfo = ExceptionInfo(excinfo.value.exc_info) - lines += ["UNEXPECTED EXCEPTION: %s" % - repr(inner_excinfo.value)] - lines += traceback.format_exception(*excinfo.value.exc_info) - return ReprFailDoctest(reprlocation, lines) + failures = [excinfo.value] + elif excinfo.errisinstance(MultipleDoctestFailures): + failures = excinfo.value.failures + + if failures is not None: + reprlocation_lines = [] + for failure in failures: + example = failure.example + test = failure.test + filename = test.filename + if test.lineno is None: + lineno = None + else: + lineno = test.lineno + example.lineno + 1 + message = type(failure).__name__ + reprlocation = ReprFileLocation(filename, lineno, message) + checker = _get_checker() + report_choice = _get_report_choice(self.config.getoption("doctestreport")) + if lineno is not None: + lines = failure.test.docstring.splitlines(False) + # add line numbers to the left of the error message + lines = ["%03d %s" % (i + test.lineno + 1, x) + for (i, x) in enumerate(lines)] + # trim docstring error lines to 10 + lines = lines[max(example.lineno - 9, 0):example.lineno + 1] + else: + lines = ['EXAMPLE LOCATION UNKNOWN, not showing all tests of that example'] + indent = '>>>' + for line in example.source.splitlines(): + lines.append('??? %s %s' % (indent, line)) + indent = '...' + if isinstance(failure, doctest.DocTestFailure): + lines += checker.output_difference(example, + failure.got, + report_choice).split("\n") + else: + inner_excinfo = ExceptionInfo(failure.exc_info) + lines += ["UNEXPECTED EXCEPTION: %s" % + repr(inner_excinfo.value)] + lines += traceback.format_exception(*failure.exc_info) + reprlocation_lines.append((reprlocation, lines)) + return ReprFailDoctest(reprlocation_lines) else: return super(DoctestItem, self).repr_failure(excinfo) def reportinfo(self): - return self.fspath, None, "[doctest] %s" % self.name + return self.fspath, self.dtest.lineno, "[doctest] %s" % self.name def _get_flag_lookup(): @@ -163,6 +254,17 @@ def get_optionflags(parent): flag_acc |= flag_lookup_table[flag] return flag_acc + +def _get_continue_on_failure(config): + continue_on_failure = config.getvalue('doctest_continue_on_failure') + if continue_on_failure: + # We need to turn off this if we use pdb since we should stop at + # the first failure + if config.getvalue("usepdb"): + continue_on_failure = False + return continue_on_failure + + class DoctestTextfile(pytest.Module): obj = None @@ -178,8 +280,11 @@ class DoctestTextfile(pytest.Module): globs = {'__name__': '__main__'} optionflags = get_optionflags(self) - runner = doctest.DebugRunner(verbose=0, optionflags=optionflags, - checker=_get_checker()) + + runner = _get_runner( + verbose=0, optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config)) _fix_spoof_python2(runner, encoding) parser = doctest.DocTestParser() @@ -214,8 +319,10 @@ class DoctestModule(pytest.Module): # uses internal doctest module parsing mechanism finder = doctest.DocTestFinder() optionflags = get_optionflags(self) - runner = doctest.DebugRunner(verbose=0, optionflags=optionflags, - checker=_get_checker()) + runner = _get_runner( + verbose=0, optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config)) for test in finder.find(module, module.__name__): if test.examples: # skip empty doctests @@ -332,7 +439,7 @@ def _fix_spoof_python2(runner, encoding): should patch only doctests for text files because they don't have a way to declare their encoding. Doctests in docstrings from Python modules don't have the same problem given that Python already decoded the strings. - + This fixes the problem related in issue #2434. """ from _pytest.compat import _PY2 @@ -355,6 +462,6 @@ def _fix_spoof_python2(runner, encoding): @pytest.fixture(scope='session') def doctest_namespace(): """ - Inject names into the doctest namespace. + Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests. """ return dict() diff --git a/_pytest/fixtures.py b/_pytest/fixtures.py index b9d1070ce..fa16fea64 100644 --- a/_pytest/fixtures.py +++ b/_pytest/fixtures.py @@ -1,13 +1,18 @@ from __future__ import absolute_import, division, print_function -import sys +import functools +import inspect +import sys +import warnings +from collections import OrderedDict, deque, defaultdict +from more_itertools import flatten + +import attr +import py from py._code.code import FormattedExcinfo -import py -import warnings - -import inspect import _pytest +from _pytest import nodes from _pytest._code.code import TerminalRepr from _pytest.compat import ( NOTSET, exc_clear, _format_args, @@ -15,16 +20,26 @@ from _pytest.compat import ( is_generator, isclass, getimfunc, getlocation, getfuncargnames, safe_getattr, + FuncargnamesCompatAttr, ) -from _pytest.runner import fail -from _pytest.compat import FuncargnamesCompatAttr +from _pytest.outcomes import fail, TEST_OUTCOME + + +@attr.s(frozen=True) +class PseudoFixtureDef(object): + cached_result = attr.ib() + scope = attr.ib() + def pytest_sessionstart(session): import _pytest.python + import _pytest.nodes + scopename2class.update({ 'class': _pytest.python.Class, 'module': _pytest.python.Module, - 'function': _pytest.main.Item, + 'function': _pytest.nodes.Item, + 'session': _pytest.main.Session, }) session._fixturemanager = FixtureManager(session) @@ -38,6 +53,7 @@ scope2props["class"] = scope2props["module"] + ("cls",) scope2props["instance"] = scope2props["class"] + ("instance", ) scope2props["function"] = scope2props["instance"] + ("function", "keywords") + def scopeproperty(name=None, doc=None): def decoratescope(func): scopename = name or func.__name__ @@ -55,8 +71,6 @@ def scopeproperty(name=None, doc=None): def get_scope_node(node, scope): cls = scopename2class.get(scope) if cls is None: - if scope == "session": - return node.session raise ValueError("unknown scope") return node.getparent(cls) @@ -69,7 +83,7 @@ def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager): # XXX we can probably avoid this algorithm if we modify CallSpec2 # to directly care for creating the fixturedefs within its methods. if not metafunc._calls[0].funcargs: - return # this function call does not have direct parametrization + return # this function call does not have direct parametrization # collect funcargs of all callspecs into a list of values arg2params = {} arg2scope = {} @@ -105,28 +119,26 @@ def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager): if node and argname in node._name2pseudofixturedef: arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]] else: - fixturedef = FixtureDef(fixturemanager, '', argname, - get_direct_param_fixture_func, - arg2scope[argname], - valuelist, False, False) + fixturedef = FixtureDef(fixturemanager, '', argname, + get_direct_param_fixture_func, + arg2scope[argname], + valuelist, False, False) arg2fixturedefs[argname] = [fixturedef] if node is not None: node._name2pseudofixturedef[argname] = fixturedef - def getfixturemarker(obj): """ return fixturemarker or None if it doesn't exist or raised exceptions.""" try: return getattr(obj, "_pytestfixturefunction", None) - except Exception: + except TEST_OUTCOME: # some objects raise errors like request (from flask import request) # we don't expect them to be fixture functions return None - def get_parametrized_fixture_keys(item, scopenum): """ return list of keys for all parametrized arguments which match the specified scope. """ @@ -136,10 +148,10 @@ def get_parametrized_fixture_keys(item, scopenum): except AttributeError: pass else: - # cs.indictes.items() is random order of argnames but - # then again different functions (items) can change order of - # arguments so it doesn't matter much probably - for argname, param_index in cs.indices.items(): + # cs.indices.items() is random order of argnames. Need to + # sort this so that different calls to + # get_parametrized_fixture_keys will be deterministic. + for argname, param_index in sorted(cs.indices.items()): if cs._arg2scopenum[argname] != scopenum: continue if scopenum == 0: # session @@ -158,61 +170,59 @@ def get_parametrized_fixture_keys(item, scopenum): def reorder_items(items): argkeys_cache = {} + items_by_argkey = {} for scopenum in range(0, scopenum_function): argkeys_cache[scopenum] = d = {} + items_by_argkey[scopenum] = item_d = defaultdict(deque) for item in items: - keys = set(get_parametrized_fixture_keys(item, scopenum)) + keys = OrderedDict.fromkeys(get_parametrized_fixture_keys(item, scopenum)) if keys: d[item] = keys - return reorder_items_atscope(items, set(), argkeys_cache, 0) + for key in keys: + item_d[key].append(item) + items = OrderedDict.fromkeys(items) + return list(reorder_items_atscope(items, argkeys_cache, items_by_argkey, 0)) -def reorder_items_atscope(items, ignore, argkeys_cache, scopenum): + +def fix_cache_order(item, argkeys_cache, items_by_argkey): + for scopenum in range(0, scopenum_function): + for key in argkeys_cache[scopenum].get(item, []): + items_by_argkey[scopenum][key].appendleft(item) + + +def reorder_items_atscope(items, argkeys_cache, items_by_argkey, scopenum): if scopenum >= scopenum_function or len(items) < 3: return items - items_done = [] - while 1: - items_before, items_same, items_other, newignore = \ - slice_items(items, ignore, argkeys_cache[scopenum]) - items_before = reorder_items_atscope( - items_before, ignore, argkeys_cache,scopenum+1) - if items_same is None: - # nothing to reorder in this scope - assert items_other is None - return items_done + items_before - items_done.extend(items_before) - items = items_same + items_other - ignore = newignore - - -def slice_items(items, ignore, scoped_argkeys_cache): - # we pick the first item which uses a fixture instance in the - # requested scope and which we haven't seen yet. We slice the input - # items list into a list of items_nomatch, items_same and - # items_other - if scoped_argkeys_cache: # do we need to do work at all? - it = iter(items) - # first find a slicing key - for i, item in enumerate(it): - argkeys = scoped_argkeys_cache.get(item) - if argkeys is not None: - argkeys = argkeys.difference(ignore) - if argkeys: # found a slicing key - slicing_argkey = argkeys.pop() - items_before = items[:i] - items_same = [item] - items_other = [] - # now slice the remainder of the list - for item in it: - argkeys = scoped_argkeys_cache.get(item) - if argkeys and slicing_argkey in argkeys and \ - slicing_argkey not in ignore: - items_same.append(item) - else: - items_other.append(item) - newignore = ignore.copy() - newignore.add(slicing_argkey) - return (items_before, items_same, items_other, newignore) - return items, None, None, None + ignore = set() + items_deque = deque(items) + items_done = OrderedDict() + scoped_items_by_argkey = items_by_argkey[scopenum] + scoped_argkeys_cache = argkeys_cache[scopenum] + while items_deque: + no_argkey_group = OrderedDict() + slicing_argkey = None + while items_deque: + item = items_deque.popleft() + if item in items_done or item in no_argkey_group: + continue + argkeys = OrderedDict.fromkeys(k for k in scoped_argkeys_cache.get(item, []) if k not in ignore) + if not argkeys: + no_argkey_group[item] = None + else: + slicing_argkey, _ = argkeys.popitem() + # we don't have to remove relevant items from later in the deque because they'll just be ignored + matching_items = [i for i in scoped_items_by_argkey[slicing_argkey] if i in items] + for i in reversed(matching_items): + fix_cache_order(i, argkeys_cache, items_by_argkey) + items_deque.appendleft(i) + break + if no_argkey_group: + no_argkey_group = reorder_items_atscope( + no_argkey_group, argkeys_cache, items_by_argkey, scopenum + 1) + for item in no_argkey_group: + items_done[item] = None + ignore.add(slicing_argkey) + return items_done def fillfixtures(function): @@ -237,11 +247,11 @@ def fillfixtures(function): request._fillfixtures() - def get_direct_param_fixture_func(request): return request.param -class FuncFixtureInfo: + +class FuncFixtureInfo(object): def __init__(self, argnames, names_closure, name2fixturedefs): self.argnames = argnames self.names_closure = names_closure @@ -262,7 +272,6 @@ class FixtureRequest(FuncargnamesCompatAttr): self.fixturename = None #: Scope string, one of "function", "class", "module", "session" self.scope = "function" - self._fixture_values = {} # argname -> fixture value self._fixture_defs = {} # argname -> FixtureDef fixtureinfo = pyfuncitem._fixtureinfo self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy() @@ -279,7 +288,6 @@ class FixtureRequest(FuncargnamesCompatAttr): """ underlying collection node (depends on current request scope)""" return self._getscopeitem(self.scope) - def _getnextfixturedef(self, argname): fixturedefs = self._arg2fixturedefs.get(argname, None) if fixturedefs is None: @@ -301,7 +309,6 @@ class FixtureRequest(FuncargnamesCompatAttr): """ the pytest config object associated with this request. """ return self._pyfuncitem.config - @scopeproperty() def function(self): """ test function object if the request has a per-function scope. """ @@ -365,10 +372,7 @@ class FixtureRequest(FuncargnamesCompatAttr): :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object created by a call to ``pytest.mark.NAME(...)``. """ - try: - self.node.keywords[marker.markname] = marker - except AttributeError: - raise ValueError(marker) + self.node.add_marker(marker) def raiseerror(self, msg): """ raise a FixtureLookupError with the given message. """ @@ -397,7 +401,7 @@ class FixtureRequest(FuncargnamesCompatAttr): :arg extrakey: added to internal caching key of (funcargname, scope). """ if not hasattr(self.config, '_setupcache'): - self.config._setupcache = {} # XXX weakref? + self.config._setupcache = {} # XXX weakref? cachekey = (self.fixturename, self._getscopeitem(scope), extrakey) cache = self.config._setupcache try: @@ -428,7 +432,8 @@ class FixtureRequest(FuncargnamesCompatAttr): from _pytest import deprecated warnings.warn( deprecated.GETFUNCARGVALUE, - DeprecationWarning) + DeprecationWarning, + stacklevel=2) return self.getfixturevalue(argname) def _get_active_fixturedef(self, argname): @@ -439,30 +444,35 @@ class FixtureRequest(FuncargnamesCompatAttr): fixturedef = self._getnextfixturedef(argname) except FixtureLookupError: if argname == "request": - class PseudoFixtureDef: - cached_result = (self, [0], None) - scope = "function" - return PseudoFixtureDef + cached_result = (self, [0], None) + scope = "function" + return PseudoFixtureDef(cached_result, scope) raise # remove indent to prevent the python3 exception # from leaking into the call - result = self._getfixturevalue(fixturedef) - self._fixture_values[argname] = result + self._compute_fixture_value(fixturedef) self._fixture_defs[argname] = fixturedef return fixturedef def _get_fixturestack(self): current = self - l = [] + values = [] while 1: fixturedef = getattr(current, "_fixturedef", None) if fixturedef is None: - l.reverse() - return l - l.append(fixturedef) + values.reverse() + return values + values.append(fixturedef) current = current._parent_request - def _getfixturevalue(self, fixturedef): + def _compute_fixture_value(self, fixturedef): + """ + Creates a SubRequest based on "self" and calls the execute method of the given fixturedef object. This will + force the FixtureDef object to throw away any previous results and compute a new fixture value, which + will be stored into the FixtureDef object itself. + + :param FixtureDef fixturedef: + """ # prepare a subrequest object before calling fixture function # (latter managed by fixturedef) argname = fixturedef.argname @@ -511,12 +521,11 @@ class FixtureRequest(FuncargnamesCompatAttr): exc_clear() try: # call the fixture function - val = fixturedef.execute(request=subrequest) + fixturedef.execute(request=subrequest) finally: # if fixture function failed it might have registered finalizers - self.session._setupstate.addfinalizer(fixturedef.finish, + self.session._setupstate.addfinalizer(functools.partial(fixturedef.finish, request=subrequest), subrequest.node) - return val def _check_scope(self, argname, invoking_scope, requested_scope): if argname == "request": @@ -527,8 +536,8 @@ class FixtureRequest(FuncargnamesCompatAttr): fail("ScopeMismatch: You tried to access the %r scoped " "fixture %r with a %r scoped request object, " "involved factories\n%s" % ( - (requested_scope, argname, invoking_scope, "\n".join(lines))), - pytrace=False) + (requested_scope, argname, invoking_scope, "\n".join(lines))), + pytrace=False) def _factorytraceback(self): lines = [] @@ -549,16 +558,17 @@ class FixtureRequest(FuncargnamesCompatAttr): if node is None and scope == "class": # fallback to function item itself node = self._pyfuncitem - assert node + assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format(scope, self._pyfuncitem) return node def __repr__(self): - return "" %(self.node) + return "" % (self.node) class SubRequest(FixtureRequest): """ a sub request for handling getting a fixture from a test function/fixture. """ + def __init__(self, request, scope, param, param_index, fixturedef): self._parent_request = request self.fixturename = fixturedef.argname @@ -567,9 +577,7 @@ class SubRequest(FixtureRequest): self.param_index = param_index self.scope = scope self._fixturedef = fixturedef - self.addfinalizer = fixturedef.addfinalizer self._pyfuncitem = request._pyfuncitem - self._fixture_values = request._fixture_values self._fixture_defs = request._fixture_defs self._arg2fixturedefs = request._arg2fixturedefs self._arg2index = request._arg2index @@ -578,6 +586,9 @@ class SubRequest(FixtureRequest): def __repr__(self): return "" % (self.fixturename, self._pyfuncitem) + def addfinalizer(self, finalizer): + self._fixturedef.addfinalizer(finalizer) + class ScopeMismatchError(Exception): """ A fixture function tries to use a different fixture function which @@ -609,6 +620,7 @@ def scope2index(scope, descr, where=None): class FixtureLookupError(LookupError): """ could not return a requested Fixture (missing or invalid). """ + def __init__(self, argname, request, msg=None): self.argname = argname self.request = request @@ -631,9 +643,9 @@ class FixtureLookupError(LookupError): lines, _ = inspect.getsourcelines(get_real_func(function)) except (IOError, IndexError, TypeError): error_msg = "file %s, line %s: source code not available" - addline(error_msg % (fspath, lineno+1)) + addline(error_msg % (fspath, lineno + 1)) else: - addline("file %s, line %s" % (fspath, lineno+1)) + addline("file %s, line %s" % (fspath, lineno + 1)) for i, line in enumerate(lines): line = line.rstrip() addline(" " + line) @@ -649,7 +661,7 @@ class FixtureLookupError(LookupError): if faclist and name not in available: available.append(name) msg = "fixture %r not found" % (self.argname,) - msg += "\n available fixtures: %s" %(", ".join(sorted(available)),) + msg += "\n available fixtures: %s" % (", ".join(sorted(available)),) msg += "\n use 'pytest --fixtures [testpath]' for help on them." return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname) @@ -675,12 +687,12 @@ class FixtureLookupErrorRepr(TerminalRepr): tw.line('{0} {1}'.format(FormattedExcinfo.flow_marker, line.strip()), red=True) tw.line() - tw.line("%s:%d" % (self.filename, self.firstlineno+1)) + tw.line("%s:%d" % (self.filename, self.firstlineno + 1)) def fail_fixturefunc(fixturefunc, msg): fs, lineno = getfslineno(fixturefunc) - location = "%s:%s" % (fs, lineno+1) + location = "%s:%s" % (fs, lineno + 1) source = _pytest._code.Source(fixturefunc) fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False) @@ -699,7 +711,7 @@ def call_fixture_func(fixturefunc, request, kwargs): pass else: fail_fixturefunc(fixturefunc, - "yield_fixture function has more than one 'yield'") + "yield_fixture function has more than one 'yield'") request.addfinalizer(teardown) else: @@ -707,8 +719,9 @@ def call_fixture_func(fixturefunc, request, kwargs): return res -class FixtureDef: +class FixtureDef(object): """ A container for a factory definition. """ + def __init__(self, fixturemanager, baseid, argname, func, scope, params, unittest=False, ids=None): self._fixturemanager = fixturemanager @@ -723,23 +736,22 @@ class FixtureDef: where=baseid ) self.params = params - startindex = unittest and 1 or None - self.argnames = getfuncargnames(func, startindex=startindex) + self.argnames = getfuncargnames(func, is_method=unittest) self.unittest = unittest self.ids = ids - self._finalizer = [] + self._finalizers = [] def addfinalizer(self, finalizer): - self._finalizer.append(finalizer) + self._finalizers.append(finalizer) - def finish(self): + def finish(self, request): exceptions = [] try: - while self._finalizer: + while self._finalizers: try: - func = self._finalizer.pop() + func = self._finalizers.pop() func() - except: + except: # noqa exceptions.append(sys.exc_info()) if exceptions: e = exceptions[0] @@ -747,12 +759,15 @@ class FixtureDef: py.builtin._reraise(*e) finally: - ihook = self._fixturemanager.session.ihook - ihook.pytest_fixture_post_finalizer(fixturedef=self) + hook = self._fixturemanager.session.gethookproxy(request.node.fspath) + hook.pytest_fixture_post_finalizer(fixturedef=self, request=request) # even if finalization fails, we invalidate - # the cached fixture value + # the cached fixture value and remove + # all finalizers because they may be bound methods which will + # keep instances alive if hasattr(self, "cached_result"): del self.cached_result + self._finalizers = [] def execute(self, request): # get required arguments and register our own finish() @@ -760,7 +775,7 @@ class FixtureDef: for argname in self.argnames: fixturedef = request._get_active_fixturedef(argname) if argname != "request": - fixturedef.addfinalizer(self.finish) + fixturedef.addfinalizer(functools.partial(self.finish, request=request)) my_cache_key = request.param_index cached_result = getattr(self, "cached_result", None) @@ -773,16 +788,17 @@ class FixtureDef: return result # we have a previous but differently parametrized fixture instance # so we need to tear it down before creating a new one - self.finish() + self.finish(request) assert not hasattr(self, "cached_result") - ihook = self._fixturemanager.session.ihook - return ihook.pytest_fixture_setup(fixturedef=self, request=request) + hook = self._fixturemanager.session.gethookproxy(request.node.fspath) + return hook.pytest_fixture_setup(fixturedef=self, request=request) def __repr__(self): return ("" % (self.argname, self.scope, self.baseid)) + def pytest_fixture_setup(fixturedef, request): """ Execution of fixture setup. """ kwargs = {} @@ -808,25 +824,34 @@ def pytest_fixture_setup(fixturedef, request): my_cache_key = request.param_index try: result = call_fixture_func(fixturefunc, request, kwargs) - except Exception: + except TEST_OUTCOME: fixturedef.cached_result = (None, my_cache_key, sys.exc_info()) raise fixturedef.cached_result = (result, my_cache_key, None) return result -class FixtureFunctionMarker: - def __init__(self, scope, params, autouse=False, ids=None, name=None): - self.scope = scope - self.params = params - self.autouse = autouse - self.ids = ids - self.name = name +def _ensure_immutable_ids(ids): + if ids is None: + return + if callable(ids): + return ids + return tuple(ids) + + +@attr.s(frozen=True) +class FixtureFunctionMarker(object): + scope = attr.ib() + params = attr.ib(converter=attr.converters.optional(tuple)) + autouse = attr.ib(default=False) + ids = attr.ib(default=None, converter=_ensure_immutable_ids) + name = attr.ib(default=None) def __call__(self, function): if isclass(function): raise ValueError( - "class fixtures not supported (may be in the future)") + "class fixtures not supported (may be in the future)") + if getattr(function, "_pytestfixturefunction", False): raise ValueError( "fixture is being applied more than once to the same function") @@ -835,9 +860,8 @@ class FixtureFunctionMarker: return function - def fixture(scope="function", params=None, autouse=False, ids=None, name=None): - """ (return a) decorator to mark a fixture factory function. + """Decorator to mark a fixture factory function. This decorator can be used (with or without parameters) to define a fixture function. The name of the fixture function can later be @@ -874,10 +898,10 @@ def fixture(scope="function", params=None, autouse=False, ids=None, name=None): instead of ``return``. In this case, the code block after the ``yield`` statement is executed as teardown code regardless of the test outcome. A fixture function must yield exactly once. """ - if callable(scope) and params is None and autouse == False: + if callable(scope) and params is None and autouse is False: # direct decoration return FixtureFunctionMarker( - "function", params, autouse, name=name)(scope) + "function", params, autouse, name=name)(scope) if params is not None and not isinstance(params, (list, tuple)): params = list(params) return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name) @@ -892,7 +916,7 @@ def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=N if callable(scope) and params is None and not autouse: # direct decoration return FixtureFunctionMarker( - "function", params, autouse, ids=ids, name=name)(scope) + "function", params, autouse, ids=ids, name=name)(scope) else: return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name) @@ -902,11 +926,19 @@ defaultfuncargprefixmarker = fixture() @fixture(scope="session") def pytestconfig(request): - """ the pytest config object with access to command line opts.""" + """Session-scoped fixture that returns the :class:`_pytest.config.Config` object. + + Example:: + + def test_foo(pytestconfig): + if pytestconfig.getoption("verbose"): + ... + + """ return request.config -class FixtureManager: +class FixtureManager(object): """ pytest fixtures definitions and information is stored and managed from this class. @@ -951,20 +983,14 @@ class FixtureManager: self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))] session.config.pluginmanager.register(self, "funcmanage") - def getfixtureinfo(self, node, func, cls, funcargs=True): if funcargs and not hasattr(node, "nofuncargs"): - if cls is not None: - startindex = 1 - else: - startindex = None - argnames = getfuncargnames(func, startindex) + argnames = getfuncargnames(func, cls=cls) else: argnames = () - usefixtures = getattr(func, "usefixtures", None) + usefixtures = flatten(mark.args for mark in node.iter_markers() if mark.name == "usefixtures") initialnames = argnames - if usefixtures is not None: - initialnames = usefixtures.args + initialnames + initialnames = tuple(usefixtures) + initialnames fm = node.session._fixturemanager names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames, node) @@ -982,8 +1008,8 @@ class FixtureManager: # by their test id) if p.basename.startswith("conftest.py"): nodeid = p.dirpath().relto(self.config.rootdir) - if p.sep != "/": - nodeid = nodeid.replace(p.sep, "/") + if p.sep != nodes.SEP: + nodeid = nodeid.replace(p.sep, nodes.SEP) self.parsefactories(plugin, nodeid) def _getautousenames(self, nodeid): @@ -993,13 +1019,10 @@ class FixtureManager: if nodeid.startswith(baseid): if baseid: i = len(baseid) - nextchar = nodeid[i:i+1] + nextchar = nodeid[i:i + 1] if nextchar and nextchar not in ":/": continue autousenames.extend(basenames) - # make sure autousenames are sorted by scope, scopenum 0 is session - autousenames.sort( - key=lambda x: self._arg2fixturedefs[x][-1].scopenum) return autousenames def getfixtureclosure(self, fixturenames, parentnode): @@ -1030,6 +1053,16 @@ class FixtureManager: if fixturedefs: arg2fixturedefs[argname] = fixturedefs merge(fixturedefs[-1].argnames) + + def sort_by_scope(arg_name): + try: + fixturedefs = arg2fixturedefs[arg_name] + except KeyError: + return scopes.index('function') + else: + return fixturedefs[-1].scopenum + + fixturenames_closure.sort(key=sort_by_scope) return fixturenames_closure, arg2fixturedefs def pytest_generate_tests(self, metafunc): @@ -1038,9 +1071,16 @@ class FixtureManager: if faclist: fixturedef = faclist[-1] if fixturedef.params is not None: - func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]]) + parametrize_func = getattr(metafunc.function, 'parametrize', None) + if parametrize_func is not None: + parametrize_func = parametrize_func.combined + func_params = getattr(parametrize_func, 'args', [[None]]) + func_kwargs = getattr(parametrize_func, 'kwargs', {}) # skip directly parametrized arguments - argnames = func_params[0] + if "argnames" in func_kwargs: + argnames = parametrize_func.kwargs["argnames"] + else: + argnames = func_params[0] if not isinstance(argnames, (tuple, list)): argnames = [x.strip() for x in argnames.split(",") if x.strip()] if argname not in func_params and argname not in argnames: @@ -1128,6 +1168,5 @@ class FixtureManager: def _matchfactories(self, fixturedefs, nodeid): for fixturedef in fixturedefs: - if nodeid.startswith(fixturedef.baseid): + if nodes.ischildnode(fixturedef.baseid, nodeid): yield fixturedef - diff --git a/_pytest/freeze_support.py b/_pytest/freeze_support.py index 52f86087f..97147a882 100644 --- a/_pytest/freeze_support.py +++ b/_pytest/freeze_support.py @@ -5,7 +5,6 @@ pytest from __future__ import absolute_import, division, print_function - def freeze_includes(): """ Returns a list of module names used by py.test that should be diff --git a/_pytest/helpconfig.py b/_pytest/helpconfig.py index e3c6b6e99..5a81a5bd3 100644 --- a/_pytest/helpconfig.py +++ b/_pytest/helpconfig.py @@ -4,7 +4,8 @@ from __future__ import absolute_import, division, print_function import py import pytest from _pytest.config import PrintHelp -import os, sys +import os +import sys from argparse import Action @@ -41,24 +42,24 @@ class HelpAction(Action): def pytest_addoption(parser): group = parser.getgroup('debugconfig') group.addoption('--version', action="store_true", - help="display pytest lib version and import information.") + help="display pytest lib version and import information.") group._addoption("-h", "--help", action=HelpAction, dest="help", - help="show help message and configuration info") - group._addoption('-p', action="append", dest="plugins", default = [], - metavar="name", - help="early-load given plugin (multi-allowed). " - "To avoid loading of plugins, use the `no:` prefix, e.g. " - "`no:doctest`.") + help="show help message and configuration info") + group._addoption('-p', action="append", dest="plugins", default=[], + metavar="name", + help="early-load given plugin (multi-allowed). " + "To avoid loading of plugins, use the `no:` prefix, e.g. " + "`no:doctest`.") group.addoption('--traceconfig', '--trace-config', - action="store_true", default=False, - help="trace considerations of conftest.py files."), + action="store_true", default=False, + help="trace considerations of conftest.py files."), group.addoption('--debug', - action="store_true", dest="debug", default=False, - help="store internal tracing debug information in 'pytestdebug.log'.") + action="store_true", dest="debug", default=False, + help="store internal tracing debug information in 'pytestdebug.log'.") group._addoption( - '-o', '--override-ini', nargs='*', dest="override_ini", + '-o', '--override-ini', dest="override_ini", action="append", - help="override config option with option=value style, e.g. `-o xfail_strict=True`.") + help='override ini option with "option=value" style, e.g. `-o xfail_strict=True -o cache_dir=cache`.') @pytest.hookimpl(hookwrapper=True) @@ -69,10 +70,10 @@ def pytest_cmdline_parse(): path = os.path.abspath("pytestdebug.log") debugfile = open(path, 'w') debugfile.write("versions pytest-%s, py-%s, " - "python-%s\ncwd=%s\nargs=%s\n\n" %( - pytest.__version__, py.__version__, - ".".join(map(str, sys.version_info)), - os.getcwd(), config._origargs)) + "python-%s\ncwd=%s\nargs=%s\n\n" % ( + pytest.__version__, py.__version__, + ".".join(map(str, sys.version_info)), + os.getcwd(), config._origargs)) config.trace.root.setwriter(debugfile.write) undo_tracing = config.pluginmanager.enable_tracing() sys.stderr.write("writing pytestdebug information to %s\n" % path) @@ -86,11 +87,12 @@ def pytest_cmdline_parse(): config.add_cleanup(unset_tracing) + def pytest_cmdline_main(config): if config.option.version: p = py.path.local(pytest.__file__) sys.stderr.write("This is pytest version %s, imported from %s\n" % - (pytest.__version__, p)) + (pytest.__version__, p)) plugininfo = getpluginversioninfo(config) if plugininfo: for line in plugininfo: @@ -102,6 +104,7 @@ def pytest_cmdline_main(config): config._ensure_unconfigure() return 0 + def showhelp(config): reporter = config.pluginmanager.get_plugin('terminalreporter') tw = reporter._tw @@ -117,7 +120,7 @@ def showhelp(config): if type is None: type = "string" spec = "%s (%s)" % (name, type) - line = " %-24s %s" %(spec, help) + line = " %-24s %s" % (spec, help) tw.line(line[:tw.fullwidth]) tw.line() @@ -146,6 +149,7 @@ conftest_options = [ ('pytest_plugins', 'list of plugin names to load'), ] + def getpluginversioninfo(config): lines = [] plugininfo = config.pluginmanager.list_plugin_distinfo() @@ -157,11 +161,12 @@ def getpluginversioninfo(config): lines.append(" " + content) return lines + def pytest_report_header(config): lines = [] if config.option.debug or config.option.traceconfig: lines.append("using: pytest-%s pylib-%s" % - (pytest.__version__,py.__version__)) + (pytest.__version__, py.__version__)) verinfo = getpluginversioninfo(config) if verinfo: @@ -175,5 +180,5 @@ def pytest_report_header(config): r = plugin.__file__ else: r = repr(plugin) - lines.append(" %-20s: %s" %(name, r)) + lines.append(" %-20s: %s" % (name, r)) return lines diff --git a/_pytest/hookspec.py b/_pytest/hookspec.py index 2c9a66163..f5bdfabc5 100644 --- a/_pytest/hookspec.py +++ b/_pytest/hookspec.py @@ -1,6 +1,6 @@ """ hook specifications for pytest plugins, invoked from main.py and builtin plugins. """ -from _pytest._pluggy import HookspecMarker +from pluggy import HookspecMarker hookspec = HookspecMarker("pytest") @@ -8,24 +8,44 @@ hookspec = HookspecMarker("pytest") # Initialization hooks called for every plugin # ------------------------------------------------------------------------- + @hookspec(historic=True) def pytest_addhooks(pluginmanager): """called at plugin registration time to allow adding new hooks via a call to - pluginmanager.add_hookspecs(module_or_class, prefix).""" + ``pluginmanager.add_hookspecs(module_or_class, prefix)``. + + + :param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ @hookspec(historic=True) def pytest_namespace(): """ - DEPRECATED: this hook causes direct monkeypatching on pytest, its use is strongly discouraged + (**Deprecated**) this hook causes direct monkeypatching on pytest, its use is strongly discouraged return dict of name->object to be made globally available in - the pytest namespace. This hook is called at plugin registration - time. + the pytest namespace. + + This hook is called at plugin registration time. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. """ + @hookspec(historic=True) def pytest_plugin_registered(plugin, manager): - """ a new pytest plugin got registered. """ + """ a new pytest plugin got registered. + + :param plugin: the plugin module or instance + :param _pytest.config.PytestPluginManager manager: pytest plugin manager + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ @hookspec(historic=True) @@ -39,7 +59,7 @@ def pytest_addoption(parser): files situated at the tests root directory due to how pytest :ref:`discovers plugins during startup `. - :arg parser: To add command line options, call + :arg _pytest.config.Parser parser: To add command line options, call :py:func:`parser.addoption(...) <_pytest.config.Parser.addoption>`. To add ini-file values call :py:func:`parser.addini(...) <_pytest.config.Parser.addini>`. @@ -54,42 +74,89 @@ def pytest_addoption(parser): a value read from an ini-style file. The config object is passed around on many internal objects via the ``.config`` - attribute or can be retrieved as the ``pytestconfig`` fixture or accessed - via (deprecated) ``pytest.config``. + attribute or can be retrieved as the ``pytestconfig`` fixture. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. """ + @hookspec(historic=True) def pytest_configure(config): - """ called after command line options have been parsed - and all plugins and initial conftest files been loaded. - This hook is called for every plugin. + """ + Allows plugins and conftest files to perform initial configuration. + + This hook is called for every plugin and initial conftest file + after command line options have been parsed. + + After that, the hook is called for other conftest files as they are + imported. + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + + :arg _pytest.config.Config config: pytest config object """ # ------------------------------------------------------------------------- # Bootstrapping hooks called for plugins registered early enough: -# internal and 3rd party plugins as well as directly -# discoverable conftest.py local plugins. +# internal and 3rd party plugins. # ------------------------------------------------------------------------- + @hookspec(firstresult=True) def pytest_cmdline_parse(pluginmanager, args): """return initialized config object, parsing the specified args. - Stops at first non-None result, see :ref:`firstresult` """ + Stops at first non-None result, see :ref:`firstresult` + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + :param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager + :param list[str] args: list of arguments passed on the command line + """ + def pytest_cmdline_preparse(config, args): - """(deprecated) modify command line arguments before option parsing. """ + """(**Deprecated**) modify command line arguments before option parsing. + + This hook is considered deprecated and will be removed in a future pytest version. Consider + using :func:`pytest_load_initial_conftests` instead. + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + :param _pytest.config.Config config: pytest config object + :param list[str] args: list of arguments passed on the command line + """ + @hookspec(firstresult=True) def pytest_cmdline_main(config): """ called for performing the main command line action. The default implementation will invoke the configure hooks and runtest_mainloop. - Stops at first non-None result, see :ref:`firstresult` """ + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + Stops at first non-None result, see :ref:`firstresult` + + :param _pytest.config.Config config: pytest config object + """ + def pytest_load_initial_conftests(early_config, parser, args): """ implements the loading of initial conftest files ahead - of command line option parsing. """ + of command line option parsing. + + .. note:: + This hook will not be called for ``conftest.py`` files, only for setuptools plugins. + + :param _pytest.config.Config early_config: pytest config object + :param list[str] args: list of arguments passed on the command line + :param _pytest.config.Parser parser: to add command line options + """ # ------------------------------------------------------------------------- @@ -98,16 +165,30 @@ def pytest_load_initial_conftests(early_config, parser, args): @hookspec(firstresult=True) def pytest_collection(session): - """ perform the collection protocol for the given session. + """Perform the collection protocol for the given session. + + Stops at first non-None result, see :ref:`firstresult`. + + :param _pytest.main.Session session: the pytest session object + """ - Stops at first non-None result, see :ref:`firstresult` """ def pytest_collection_modifyitems(session, config, items): """ called after collection has been performed, may filter or re-order - the items in-place.""" + the items in-place. + + :param _pytest.main.Session session: the pytest session object + :param _pytest.config.Config config: pytest config object + :param List[_pytest.nodes.Item] items: list of item objects + """ + def pytest_collection_finish(session): - """ called after collection has been performed and modified. """ + """ called after collection has been performed and modified. + + :param _pytest.main.Session session: the pytest session object + """ + @hookspec(firstresult=True) def pytest_ignore_collect(path, config): @@ -116,31 +197,48 @@ def pytest_ignore_collect(path, config): more specific hooks. Stops at first non-None result, see :ref:`firstresult` + + :param str path: the path to analyze + :param _pytest.config.Config config: pytest config object """ + @hookspec(firstresult=True) def pytest_collect_directory(path, parent): """ called before traversing a directory for collection files. - Stops at first non-None result, see :ref:`firstresult` """ + Stops at first non-None result, see :ref:`firstresult` + + :param str path: the path to analyze + """ + def pytest_collect_file(path, parent): """ return collection Node or None for the given path. Any new node - needs to have the specified ``parent`` as a parent.""" + needs to have the specified ``parent`` as a parent. + + :param str path: the path to collect + """ # logging hooks for collection + + def pytest_collectstart(collector): """ collector starts collecting. """ + def pytest_itemcollected(item): """ we just collected a test item. """ + def pytest_collectreport(report): """ collector finished collecting. """ + def pytest_deselected(items): """ called for test items deselected by keyword. """ + @hookspec(firstresult=True) def pytest_make_collect_report(collector): """ perform ``collector.collect()`` and return a CollectReport. @@ -151,6 +249,7 @@ def pytest_make_collect_report(collector): # Python test function related hooks # ------------------------------------------------------------------------- + @hookspec(firstresult=True) def pytest_pycollect_makemodule(path, parent): """ return a Module collector or None for the given path. @@ -160,42 +259,57 @@ def pytest_pycollect_makemodule(path, parent): Stops at first non-None result, see :ref:`firstresult` """ + @hookspec(firstresult=True) def pytest_pycollect_makeitem(collector, name, obj): """ return custom item/collector for a python object in a module, or None. Stops at first non-None result, see :ref:`firstresult` """ + @hookspec(firstresult=True) def pytest_pyfunc_call(pyfuncitem): """ call underlying test function. Stops at first non-None result, see :ref:`firstresult` """ + def pytest_generate_tests(metafunc): """ generate (multiple) parametrized calls to a test function.""" + @hookspec(firstresult=True) def pytest_make_parametrize_id(config, val, argname): """Return a user-friendly string representation of the given ``val`` that will be used by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``. The parameter name is available as ``argname``, if required. - Stops at first non-None result, see :ref:`firstresult` """ + Stops at first non-None result, see :ref:`firstresult` + + :param _pytest.config.Config config: pytest config object + :param val: the parametrized value + :param str argname: the automatic parameter name produced by pytest + """ # ------------------------------------------------------------------------- # generic runtest related hooks # ------------------------------------------------------------------------- + @hookspec(firstresult=True) def pytest_runtestloop(session): """ called for performing the main runtest loop (after collection finished). - Stops at first non-None result, see :ref:`firstresult` """ + Stops at first non-None result, see :ref:`firstresult` + + :param _pytest.main.Session session: the pytest session object + """ + def pytest_itemstart(item, node): - """ (deprecated, use pytest_runtest_logstart). """ + """(**Deprecated**) use pytest_runtest_logstart. """ + @hookspec(firstresult=True) def pytest_runtest_protocol(item, nextitem): @@ -214,15 +328,37 @@ def pytest_runtest_protocol(item, nextitem): Stops at first non-None result, see :ref:`firstresult` """ + def pytest_runtest_logstart(nodeid, location): - """ signal the start of running a single test item. """ + """ signal the start of running a single test item. + + This hook will be called **before** :func:`pytest_runtest_setup`, :func:`pytest_runtest_call` and + :func:`pytest_runtest_teardown` hooks. + + :param str nodeid: full id of the item + :param location: a triple of ``(filename, linenum, testname)`` + """ + + +def pytest_runtest_logfinish(nodeid, location): + """ signal the complete finish of running a single test item. + + This hook will be called **after** :func:`pytest_runtest_setup`, :func:`pytest_runtest_call` and + :func:`pytest_runtest_teardown` hooks. + + :param str nodeid: full id of the item + :param location: a triple of ``(filename, linenum, testname)`` + """ + def pytest_runtest_setup(item): """ called before ``pytest_runtest_call(item)``. """ + def pytest_runtest_call(item): """ called to execute the test ``item``. """ + def pytest_runtest_teardown(item, nextitem): """ called after ``pytest_runtest_call``. @@ -232,6 +368,7 @@ def pytest_runtest_teardown(item, nextitem): so that nextitem only needs to call setup-functions. """ + @hookspec(firstresult=True) def pytest_runtest_makereport(item, call): """ return a :py:class:`_pytest.runner.TestReport` object @@ -240,6 +377,7 @@ def pytest_runtest_makereport(item, call): Stops at first non-None result, see :ref:`firstresult` """ + def pytest_runtest_logreport(report): """ process a test setup/call/teardown report relating to the respective phase of executing a test. """ @@ -248,13 +386,23 @@ def pytest_runtest_logreport(report): # Fixture related hooks # ------------------------------------------------------------------------- + @hookspec(firstresult=True) def pytest_fixture_setup(fixturedef, request): """ performs fixture setup execution. - Stops at first non-None result, see :ref:`firstresult` """ + :return: The return value of the call to the fixture function -def pytest_fixture_post_finalizer(fixturedef): + Stops at first non-None result, see :ref:`firstresult` + + .. note:: + If the fixture function returns None, other implementations of + this hook function will continue to be called, according to the + behavior of the :ref:`firstresult` option. + """ + + +def pytest_fixture_post_finalizer(fixturedef, request): """ called after fixture teardown, but before the cache is cleared so the fixture result cache ``fixturedef.cached_result`` can still be accessed.""" @@ -263,14 +411,28 @@ def pytest_fixture_post_finalizer(fixturedef): # test session related hooks # ------------------------------------------------------------------------- + def pytest_sessionstart(session): - """ before session.main() is called. """ + """ called after the ``Session`` object has been created and before performing collection + and entering the run test loop. + + :param _pytest.main.Session session: the pytest session object + """ + def pytest_sessionfinish(session, exitstatus): - """ whole test run finishes. """ + """ called after whole test run finished, right before returning the exit status to the system. + + :param _pytest.main.Session session: the pytest session object + :param int exitstatus: the status which pytest will return to the system + """ + def pytest_unconfigure(config): - """ called before test process is exited. """ + """ called before test process is exited. + + :param _pytest.config.Config config: pytest config object + """ # ------------------------------------------------------------------------- @@ -284,14 +446,20 @@ def pytest_assertrepr_compare(config, op, left, right): of strings. The strings will be joined by newlines but any newlines *in* a string will be escaped. Note that all but the first line will be indented slightly, the intention is for the first line to be a summary. + + :param _pytest.config.Config config: pytest config object """ # ------------------------------------------------------------------------- # hooks for influencing reporting (invoked from _pytest_terminal) # ------------------------------------------------------------------------- + def pytest_report_header(config, startdir): - """ return a string to be displayed as header info for terminal reporting. + """ return a string or list of strings to be displayed as header info for terminal reporting. + + :param _pytest.config.Config config: pytest config object + :param startdir: py.path object with the starting dir .. note:: @@ -300,26 +468,54 @@ def pytest_report_header(config, startdir): :ref:`discovers plugins during startup `. """ + +def pytest_report_collectionfinish(config, startdir, items): + """ + .. versionadded:: 3.2 + + return a string or list of strings to be displayed after collection has finished successfully. + + This strings will be displayed after the standard "collected X items" message. + + :param _pytest.config.Config config: pytest config object + :param startdir: py.path object with the starting dir + :param items: list of pytest items that are going to be executed; this list should not be modified. + """ + + @hookspec(firstresult=True) def pytest_report_teststatus(report): """ return result-category, shortletter and verbose word for reporting. Stops at first non-None result, see :ref:`firstresult` """ + def pytest_terminal_summary(terminalreporter, exitstatus): - """ add additional section in terminal summary reporting. """ + """Add a section to terminal summary reporting. + + :param _pytest.terminal.TerminalReporter terminalreporter: the internal terminal reporter object + :param int exitstatus: the exit status that will be reported back to the OS + + .. versionadded:: 3.5 + The ``config`` parameter. + """ @hookspec(historic=True) def pytest_logwarning(message, code, nodeid, fslocation): """ process a warning specified by a message, a code string, a nodeid and fslocation (both of which may be None - if the warning is not tied to a partilar node/location).""" + if the warning is not tied to a particular node/location). + + .. note:: + This hook is incompatible with ``hookwrapper=True``. + """ # ------------------------------------------------------------------------- # doctest hooks # ------------------------------------------------------------------------- + @hookspec(firstresult=True) def pytest_doctest_prepare_content(content): """ return processed content for a given doctest @@ -330,12 +526,15 @@ def pytest_doctest_prepare_content(content): # error handling and internal debugging hooks # ------------------------------------------------------------------------- + def pytest_internalerror(excrepr, excinfo): """ called for internal errors. """ + def pytest_keyboard_interrupt(excinfo): """ called for keyboard interrupt. """ + def pytest_exception_interact(node, call, report): """called when an exception was raised which can potentially be interactively handled. @@ -344,10 +543,10 @@ def pytest_exception_interact(node, call, report): that is not an internal exception like ``skip.Exception``. """ + def pytest_enter_pdb(config): """ called upon pdb.set_trace(), can be used by plugins to take special action just before the python debugger enters in interactive mode. - :arg config: pytest config object - :type config: _pytest.config.Config + :param _pytest.config.Config config: pytest config object """ diff --git a/_pytest/impl b/_pytest/impl deleted file mode 100644 index 889e37e5a..000000000 --- a/_pytest/impl +++ /dev/null @@ -1,254 +0,0 @@ -Sorting per-resource ------------------------------ - -for any given set of items: - -- collect items per session-scoped parametrized funcarg -- re-order until items no parametrizations are mixed - - examples: - - test() - test1(s1) - test1(s2) - test2() - test3(s1) - test3(s2) - - gets sorted to: - - test() - test2() - test1(s1) - test3(s1) - test1(s2) - test3(s2) - - -the new @setup functions --------------------------------------- - -Consider a given @setup-marked function:: - - @pytest.mark.setup(maxscope=SCOPE) - def mysetup(request, arg1, arg2, ...) - ... - request.addfinalizer(fin) - ... - -then FUNCARGSET denotes the set of (arg1, arg2, ...) funcargs and -all of its dependent funcargs. The mysetup function will execute -for any matching test item once per scope. - -The scope is determined as the minimum scope of all scopes of the args -in FUNCARGSET and the given "maxscope". - -If mysetup has been called and no finalizers have been called it is -called "active". - -Furthermore the following rules apply: - -- if an arg value in FUNCARGSET is about to be torn down, the - mysetup-registered finalizers will execute as well. - -- There will never be two active mysetup invocations. - -Example 1, session scope:: - - @pytest.mark.funcarg(scope="session", params=[1,2]) - def db(request): - request.addfinalizer(db_finalize) - - @pytest.mark.setup - def mysetup(request, db): - request.addfinalizer(mysetup_finalize) - ... - -And a given test module: - - def test_something(): - ... - def test_otherthing(): - pass - -Here is what happens:: - - db(request) executes with request.param == 1 - mysetup(request, db) executes - test_something() executes - test_otherthing() executes - mysetup_finalize() executes - db_finalize() executes - db(request) executes with request.param == 2 - mysetup(request, db) executes - test_something() executes - test_otherthing() executes - mysetup_finalize() executes - db_finalize() executes - -Example 2, session/function scope:: - - @pytest.mark.funcarg(scope="session", params=[1,2]) - def db(request): - request.addfinalizer(db_finalize) - - @pytest.mark.setup(scope="function") - def mysetup(request, db): - ... - request.addfinalizer(mysetup_finalize) - ... - -And a given test module: - - def test_something(): - ... - def test_otherthing(): - pass - -Here is what happens:: - - db(request) executes with request.param == 1 - mysetup(request, db) executes - test_something() executes - mysetup_finalize() executes - mysetup(request, db) executes - test_otherthing() executes - mysetup_finalize() executes - db_finalize() executes - db(request) executes with request.param == 2 - mysetup(request, db) executes - test_something() executes - mysetup_finalize() executes - mysetup(request, db) executes - test_otherthing() executes - mysetup_finalize() executes - db_finalize() executes - - -Example 3 - funcargs session-mix ----------------------------------------- - -Similar with funcargs, an example:: - - @pytest.mark.funcarg(scope="session", params=[1,2]) - def db(request): - request.addfinalizer(db_finalize) - - @pytest.mark.funcarg(scope="function") - def table(request, db): - ... - request.addfinalizer(table_finalize) - ... - -And a given test module: - - def test_something(table): - ... - def test_otherthing(table): - pass - def test_thirdthing(): - pass - -Here is what happens:: - - db(request) executes with param == 1 - table(request, db) - test_something(table) - table_finalize() - table(request, db) - test_otherthing(table) - table_finalize() - db_finalize - db(request) executes with param == 2 - table(request, db) - test_something(table) - table_finalize() - table(request, db) - test_otherthing(table) - table_finalize() - db_finalize - test_thirdthing() - -Data structures --------------------- - -pytest internally maintains a dict of active funcargs with cache, param, -finalizer, (scopeitem?) information: - - active_funcargs = dict() - -if a parametrized "db" is activated: - - active_funcargs["db"] = FuncargInfo(dbvalue, paramindex, - FuncargFinalize(...), scopeitem) - -if a test is torn down and the next test requires a differently -parametrized "db": - - for argname in item.callspec.params: - if argname in active_funcargs: - funcarginfo = active_funcargs[argname] - if funcarginfo.param != item.callspec.params[argname]: - funcarginfo.callfinalizer() - del node2funcarg[funcarginfo.scopeitem] - del active_funcargs[argname] - nodes_to_be_torn_down = ... - for node in nodes_to_be_torn_down: - if node in node2funcarg: - argname = node2funcarg[node] - active_funcargs[argname].callfinalizer() - del node2funcarg[node] - del active_funcargs[argname] - -if a test is setup requiring a "db" funcarg: - - if "db" in active_funcargs: - return active_funcargs["db"][0] - funcarginfo = setup_funcarg() - active_funcargs["db"] = funcarginfo - node2funcarg[funcarginfo.scopeitem] = "db" - -Implementation plan for resources ------------------------------------------- - -1. Revert FuncargRequest to the old form, unmerge item/request - (done) -2. make funcarg factories be discovered at collection time -3. Introduce funcarg marker -4. Introduce funcarg scope parameter -5. Introduce funcarg parametrize parameter -6. make setup functions be discovered at collection time -7. (Introduce a pytest_fixture_protocol/setup_funcargs hook) - -methods and data structures --------------------------------- - -A FuncarcManager holds all information about funcarg definitions -including parametrization and scope definitions. It implements -a pytest_generate_tests hook which performs parametrization as appropriate. - -as a simple example, let's consider a tree where a test function requires -a "abc" funcarg and its factory defines it as parametrized and scoped -for Modules. When collections hits the function item, it creates -the metafunc object, and calls funcargdb.pytest_generate_tests(metafunc) -which looks up available funcarg factories and their scope and parametrization. -This information is equivalent to what can be provided today directly -at the function site and it should thus be relatively straight forward -to implement the additional way of defining parametrization/scoping. - -conftest loading: - each funcarg-factory will populate the session.funcargmanager - -When a test item is collected, it grows a dictionary -(funcargname2factorycalllist). A factory lookup is performed -for each required funcarg. The resulting factory call is stored -with the item. If a function is parametrized multiple items are -created with respective factory calls. Else if a factory is parametrized -multiple items and calls to the factory function are created as well. - -At setup time, an item populates a funcargs mapping, mapping names -to values. If a value is funcarg factories are queried for a given item -test functions and setup functions are put in a class -which looks up required funcarg factories. - - diff --git a/_pytest/junitxml.py b/_pytest/junitxml.py index 301633706..3a0e4a071 100644 --- a/_pytest/junitxml.py +++ b/_pytest/junitxml.py @@ -17,6 +17,7 @@ import re import sys import time import pytest +from _pytest import nodes from _pytest.config import filename_arg # Python 2.X and 3.X compatibility @@ -84,6 +85,9 @@ class _NodeReporter(object): def add_property(self, name, value): self.properties.append((str(name), bin_xml_escape(value))) + def add_attribute(self, name, value): + self.attrs[str(name)] = bin_xml_escape(value) + def make_properties_node(self): """Return a Junit node containing custom properties, if any. """ @@ -97,6 +101,7 @@ class _NodeReporter(object): def record_testreport(self, testreport): assert not self.testcase names = mangle_test_address(testreport.nodeid) + existing_attrs = self.attrs classnames = names[:-1] if self.xml.prefix: classnames.insert(0, self.xml.prefix) @@ -110,6 +115,7 @@ class _NodeReporter(object): if hasattr(testreport, "url"): attrs["url"] = testreport.url self.attrs = attrs + self.attrs.update(existing_attrs) # restore any user-defined attributes def to_xml(self): testcase = Junit.testcase(time=self.duration, **self.attrs) @@ -124,10 +130,47 @@ class _NodeReporter(object): self.append(node) def write_captured_output(self, report): - for capname in ('out', 'err'): - content = getattr(report, 'capstd' + capname) + content_out = report.capstdout + content_log = report.caplog + content_err = report.capstderr + + if content_log or content_out: + if content_log and self.xml.logging == 'system-out': + if content_out: + # syncing stdout and the log-output is not done yet. It's + # probably not worth the effort. Therefore, first the captured + # stdout is shown and then the captured logs. + content = '\n'.join([ + ' Captured Stdout '.center(80, '-'), + content_out, + '', + ' Captured Log '.center(80, '-'), + content_log]) + else: + content = content_log + else: + content = content_out + if content: - tag = getattr(Junit, 'system-' + capname) + tag = getattr(Junit, 'system-out') + self.append(tag(bin_xml_escape(content))) + + if content_log or content_err: + if content_log and self.xml.logging == 'system-err': + if content_err: + content = '\n'.join([ + ' Captured Stderr '.center(80, '-'), + content_err, + '', + ' Captured Log '.center(80, '-'), + content_log]) + else: + content = content_log + else: + content = content_err + + if content: + tag = getattr(Junit, 'system-err') self.append(tag(bin_xml_escape(content))) def append_pass(self, report): @@ -190,24 +233,56 @@ class _NodeReporter(object): @pytest.fixture -def record_xml_property(request): - """Add extra xml properties to the tag for the calling test. +def record_property(request): + """Add an extra properties the calling test. + User properties become part of the test report and are available to the + configured reporters, like JUnit XML. The fixture is callable with ``(name, value)``, with value being automatically xml-encoded. + + Example:: + + def test_function(record_property): + record_property("example_key", 1) + """ + def append_property(name, value): + request.node.user_properties.append((name, value)) + return append_property + + +@pytest.fixture +def record_xml_property(record_property): + """(Deprecated) use record_property.""" + import warnings + from _pytest import deprecated + warnings.warn( + deprecated.RECORD_XML_PROPERTY, + DeprecationWarning, + stacklevel=2 + ) + + return record_property + + +@pytest.fixture +def record_xml_attribute(request): + """Add extra xml attributes to the tag for the calling test. + The fixture is callable with ``(name, value)``, with value being + automatically xml-encoded """ request.node.warn( code='C3', - message='record_xml_property is an experimental feature', + message='record_xml_attribute is an experimental feature', ) xml = getattr(request.config, "_xml", None) if xml is not None: node_reporter = xml.node_reporter(request.node.nodeid) - return node_reporter.add_property + return node_reporter.add_attribute else: - def add_property_noop(name, value): + def add_attr_noop(name, value): pass - return add_property_noop + return add_attr_noop def pytest_addoption(parser): @@ -227,13 +302,18 @@ def pytest_addoption(parser): default=None, help="prepend prefix to classnames in junit-xml output") parser.addini("junit_suite_name", "Test suite name for JUnit report", default="pytest") + parser.addini("junit_logging", "Write captured log messages to JUnit report: " + "one of no|system-out|system-err", + default="no") # choices=['no', 'stdout', 'stderr']) def pytest_configure(config): xmlpath = config.option.xmlpath # prevent opening xmllog on slave nodes (xdist) if xmlpath and not hasattr(config, 'slaveinput'): - config._xml = LogXML(xmlpath, config.option.junitprefix, config.getini("junit_suite_name")) + config._xml = LogXML(xmlpath, config.option.junitprefix, + config.getini("junit_suite_name"), + config.getini("junit_logging")) config.pluginmanager.register(config._xml) @@ -252,7 +332,7 @@ def mangle_test_address(address): except ValueError: pass # convert file path to dotted path - names[0] = names[0].replace("/", '.') + names[0] = names[0].replace(nodes.SEP, '.') names[0] = _py_ext_re.sub("", names[0]) # put any params back names[-1] += possible_open_bracket + params @@ -260,11 +340,12 @@ def mangle_test_address(address): class LogXML(object): - def __init__(self, logfile, prefix, suite_name="pytest"): + def __init__(self, logfile, prefix, suite_name="pytest", logging="no"): logfile = os.path.expanduser(os.path.expandvars(logfile)) self.logfile = os.path.normpath(os.path.abspath(logfile)) self.prefix = prefix self.suite_name = suite_name + self.logging = logging self.stats = dict.fromkeys([ 'error', 'passed', @@ -372,14 +453,18 @@ class LogXML(object): if report.when == "teardown": reporter = self._opentestcase(report) reporter.write_captured_output(report) + + for propname, propvalue in report.user_properties: + reporter.add_property(propname, propvalue) + self.finalize(report) report_wid = getattr(report, "worker_id", None) report_ii = getattr(report, "item_index", None) close_report = next( (rep for rep in self.open_reports if (rep.nodeid == report.nodeid and - getattr(rep, "item_index", None) == report_ii and - getattr(rep, "worker_id", None) == report_wid + getattr(rep, "item_index", None) == report_ii and + getattr(rep, "worker_id", None) == report_wid ) ), None) if close_report: @@ -444,9 +529,9 @@ class LogXML(object): """ if self.global_properties: return Junit.properties( - [ - Junit.property(name=name, value=value) - for name, value in self.global_properties - ] + [ + Junit.property(name=name, value=value) + for name, value in self.global_properties + ] ) return '' diff --git a/_pytest/logging.py b/_pytest/logging.py new file mode 100644 index 000000000..89d1c7242 --- /dev/null +++ b/_pytest/logging.py @@ -0,0 +1,522 @@ +""" Access and control log capturing. """ +from __future__ import absolute_import, division, print_function + +import logging +from contextlib import closing, contextmanager +import re +import six + +from _pytest.config import create_terminal_writer +import pytest +import py + + +DEFAULT_LOG_FORMAT = '%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s' +DEFAULT_LOG_DATE_FORMAT = '%H:%M:%S' + + +class ColoredLevelFormatter(logging.Formatter): + """ + Colorize the %(levelname)..s part of the log format passed to __init__. + """ + + LOGLEVEL_COLOROPTS = { + logging.CRITICAL: {'red'}, + logging.ERROR: {'red', 'bold'}, + logging.WARNING: {'yellow'}, + logging.WARN: {'yellow'}, + logging.INFO: {'green'}, + logging.DEBUG: {'purple'}, + logging.NOTSET: set(), + } + LEVELNAME_FMT_REGEX = re.compile(r'%\(levelname\)([+-]?\d*s)') + + def __init__(self, terminalwriter, *args, **kwargs): + super(ColoredLevelFormatter, self).__init__( + *args, **kwargs) + if six.PY2: + self._original_fmt = self._fmt + else: + self._original_fmt = self._style._fmt + self._level_to_fmt_mapping = {} + + levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt) + if not levelname_fmt_match: + return + levelname_fmt = levelname_fmt_match.group() + + for level, color_opts in self.LOGLEVEL_COLOROPTS.items(): + formatted_levelname = levelname_fmt % { + 'levelname': logging.getLevelName(level)} + + # add ANSI escape sequences around the formatted levelname + color_kwargs = {name: True for name in color_opts} + colorized_formatted_levelname = terminalwriter.markup( + formatted_levelname, **color_kwargs) + self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub( + colorized_formatted_levelname, + self._fmt) + + def format(self, record): + fmt = self._level_to_fmt_mapping.get( + record.levelno, self._original_fmt) + if six.PY2: + self._fmt = fmt + else: + self._style._fmt = fmt + return super(ColoredLevelFormatter, self).format(record) + + +def get_option_ini(config, *names): + for name in names: + ret = config.getoption(name) # 'default' arg won't work as expected + if ret is None: + ret = config.getini(name) + if ret: + return ret + + +def pytest_addoption(parser): + """Add options to control log capturing.""" + group = parser.getgroup('logging') + + def add_option_ini(option, dest, default=None, type=None, **kwargs): + parser.addini(dest, default=default, type=type, + help='default value for ' + option) + group.addoption(option, dest=dest, **kwargs) + + add_option_ini( + '--no-print-logs', + dest='log_print', action='store_const', const=False, default=True, + type='bool', + help='disable printing caught logs on failed tests.') + add_option_ini( + '--log-level', + dest='log_level', default=None, + help='logging level used by the logging module') + add_option_ini( + '--log-format', + dest='log_format', default=DEFAULT_LOG_FORMAT, + help='log format as used by the logging module.') + add_option_ini( + '--log-date-format', + dest='log_date_format', default=DEFAULT_LOG_DATE_FORMAT, + help='log date format as used by the logging module.') + parser.addini( + 'log_cli', default=False, type='bool', + help='enable log display during test run (also known as "live logging").') + add_option_ini( + '--log-cli-level', + dest='log_cli_level', default=None, + help='cli logging level.') + add_option_ini( + '--log-cli-format', + dest='log_cli_format', default=None, + help='log format as used by the logging module.') + add_option_ini( + '--log-cli-date-format', + dest='log_cli_date_format', default=None, + help='log date format as used by the logging module.') + add_option_ini( + '--log-file', + dest='log_file', default=None, + help='path to a file when logging will be written to.') + add_option_ini( + '--log-file-level', + dest='log_file_level', default=None, + help='log file logging level.') + add_option_ini( + '--log-file-format', + dest='log_file_format', default=DEFAULT_LOG_FORMAT, + help='log format as used by the logging module.') + add_option_ini( + '--log-file-date-format', + dest='log_file_date_format', default=DEFAULT_LOG_DATE_FORMAT, + help='log date format as used by the logging module.') + + +@contextmanager +def catching_logs(handler, formatter=None, level=None): + """Context manager that prepares the whole logging machinery properly.""" + root_logger = logging.getLogger() + + if formatter is not None: + handler.setFormatter(formatter) + if level is not None: + handler.setLevel(level) + + # Adding the same handler twice would confuse logging system. + # Just don't do that. + add_new_handler = handler not in root_logger.handlers + + if add_new_handler: + root_logger.addHandler(handler) + if level is not None: + orig_level = root_logger.level + root_logger.setLevel(min(orig_level, level)) + try: + yield handler + finally: + if level is not None: + root_logger.setLevel(orig_level) + if add_new_handler: + root_logger.removeHandler(handler) + + +class LogCaptureHandler(logging.StreamHandler): + """A logging handler that stores log records and the log text.""" + + def __init__(self): + """Creates a new log handler.""" + logging.StreamHandler.__init__(self, py.io.TextIO()) + self.records = [] + + def emit(self, record): + """Keep the log records in a list in addition to the log text.""" + self.records.append(record) + logging.StreamHandler.emit(self, record) + + def reset(self): + self.records = [] + self.stream = py.io.TextIO() + + +class LogCaptureFixture(object): + """Provides access and control of log capturing.""" + + def __init__(self, item): + """Creates a new funcarg.""" + self._item = item + self._initial_log_levels = {} # type: Dict[str, int] # dict of log name -> log level + + def _finalize(self): + """Finalizes the fixture. + + This restores the log levels changed by :meth:`set_level`. + """ + # restore log levels + for logger_name, level in self._initial_log_levels.items(): + logger = logging.getLogger(logger_name) + logger.setLevel(level) + + @property + def handler(self): + """ + :rtype: LogCaptureHandler + """ + return self._item.catch_log_handler + + def get_records(self, when): + """ + Get the logging records for one of the possible test phases. + + :param str when: + Which test phase to obtain the records from. Valid values are: "setup", "call" and "teardown". + + :rtype: List[logging.LogRecord] + :return: the list of captured records at the given stage + + .. versionadded:: 3.4 + """ + handler = self._item.catch_log_handlers.get(when) + if handler: + return handler.records + else: + return [] + + @property + def text(self): + """Returns the log text.""" + return self.handler.stream.getvalue() + + @property + def records(self): + """Returns the list of log records.""" + return self.handler.records + + @property + def record_tuples(self): + """Returns a list of a striped down version of log records intended + for use in assertion comparison. + + The format of the tuple is: + + (logger_name, log_level, message) + """ + return [(r.name, r.levelno, r.getMessage()) for r in self.records] + + def clear(self): + """Reset the list of log records and the captured log text.""" + self.handler.reset() + + def set_level(self, level, logger=None): + """Sets the level for capturing of logs. The level will be restored to its previous value at the end of + the test. + + :param int level: the logger to level. + :param str logger: the logger to update the level. If not given, the root logger level is updated. + + .. versionchanged:: 3.4 + The levels of the loggers changed by this function will be restored to their initial values at the + end of the test. + """ + logger_name = logger + logger = logging.getLogger(logger_name) + # save the original log-level to restore it during teardown + self._initial_log_levels.setdefault(logger_name, logger.level) + logger.setLevel(level) + + @contextmanager + def at_level(self, level, logger=None): + """Context manager that sets the level for capturing of logs. After the end of the 'with' statement the + level is restored to its original value. + + :param int level: the logger to level. + :param str logger: the logger to update the level. If not given, the root logger level is updated. + """ + logger = logging.getLogger(logger) + orig_level = logger.level + logger.setLevel(level) + try: + yield + finally: + logger.setLevel(orig_level) + + +@pytest.fixture +def caplog(request): + """Access and control log capturing. + + Captured logs are available through the following methods:: + + * caplog.text() -> string containing formatted log output + * caplog.records() -> list of logging.LogRecord instances + * caplog.record_tuples() -> list of (logger_name, level, message) tuples + * caplog.clear() -> clear captured records and formatted log output string + """ + result = LogCaptureFixture(request.node) + yield result + result._finalize() + + +def get_actual_log_level(config, *setting_names): + """Return the actual logging level.""" + + for setting_name in setting_names: + log_level = config.getoption(setting_name) + if log_level is None: + log_level = config.getini(setting_name) + if log_level: + break + else: + return + + if isinstance(log_level, six.string_types): + log_level = log_level.upper() + try: + return int(getattr(logging, log_level, log_level)) + except ValueError: + # Python logging does not recognise this as a logging level + raise pytest.UsageError( + "'{0}' is not recognized as a logging level name for " + "'{1}'. Please consider passing the " + "logging level num instead.".format( + log_level, + setting_name)) + + +def pytest_configure(config): + config.pluginmanager.register(LoggingPlugin(config), 'logging-plugin') + + +@contextmanager +def _dummy_context_manager(): + yield + + +class LoggingPlugin(object): + """Attaches to the logging module and captures log messages for each test. + """ + + def __init__(self, config): + """Creates a new plugin to capture log messages. + + The formatter can be safely shared across all handlers so + create a single one for the entire test session here. + """ + self._config = config + + # enable verbose output automatically if live logging is enabled + if self._log_cli_enabled() and not config.getoption('verbose'): + # sanity check: terminal reporter should not have been loaded at this point + assert self._config.pluginmanager.get_plugin('terminalreporter') is None + config.option.verbose = 1 + + self.print_logs = get_option_ini(config, 'log_print') + self.formatter = logging.Formatter(get_option_ini(config, 'log_format'), + get_option_ini(config, 'log_date_format')) + self.log_level = get_actual_log_level(config, 'log_level') + + log_file = get_option_ini(config, 'log_file') + if log_file: + self.log_file_level = get_actual_log_level(config, 'log_file_level') + + log_file_format = get_option_ini(config, 'log_file_format', 'log_format') + log_file_date_format = get_option_ini(config, 'log_file_date_format', 'log_date_format') + # Each pytest runtests session will write to a clean logfile + self.log_file_handler = logging.FileHandler(log_file, mode='w') + log_file_formatter = logging.Formatter(log_file_format, datefmt=log_file_date_format) + self.log_file_handler.setFormatter(log_file_formatter) + else: + self.log_file_handler = None + + # initialized during pytest_runtestloop + self.log_cli_handler = None + + def _log_cli_enabled(self): + """Return True if log_cli should be considered enabled, either explicitly + or because --log-cli-level was given in the command-line. + """ + return self._config.getoption('--log-cli-level') is not None or \ + self._config.getini('log_cli') + + @contextmanager + def _runtest_for(self, item, when): + """Implements the internals of pytest_runtest_xxx() hook.""" + with catching_logs(LogCaptureHandler(), + formatter=self.formatter, level=self.log_level) as log_handler: + if self.log_cli_handler: + self.log_cli_handler.set_when(when) + + if item is None: + yield # run the test + return + + if not hasattr(item, 'catch_log_handlers'): + item.catch_log_handlers = {} + item.catch_log_handlers[when] = log_handler + item.catch_log_handler = log_handler + try: + yield # run test + finally: + del item.catch_log_handler + if when == 'teardown': + del item.catch_log_handlers + + if self.print_logs: + # Add a captured log section to the report. + log = log_handler.stream.getvalue().strip() + item.add_report_section(when, 'log', log) + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_setup(self, item): + with self._runtest_for(item, 'setup'): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_call(self, item): + with self._runtest_for(item, 'call'): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_teardown(self, item): + with self._runtest_for(item, 'teardown'): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_logstart(self): + if self.log_cli_handler: + self.log_cli_handler.reset() + with self._runtest_for(None, 'start'): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_logfinish(self): + with self._runtest_for(None, 'finish'): + yield + + @pytest.hookimpl(hookwrapper=True) + def pytest_runtestloop(self, session): + """Runs all collected test items.""" + self._setup_cli_logging() + with self.live_logs_context: + if self.log_file_handler is not None: + with closing(self.log_file_handler): + with catching_logs(self.log_file_handler, + level=self.log_file_level): + yield # run all the tests + else: + yield # run all the tests + + def _setup_cli_logging(self): + """Sets up the handler and logger for the Live Logs feature, if enabled. + + This must be done right before starting the loop so we can access the terminal reporter plugin. + """ + terminal_reporter = self._config.pluginmanager.get_plugin('terminalreporter') + if self._log_cli_enabled() and terminal_reporter is not None: + capture_manager = self._config.pluginmanager.get_plugin('capturemanager') + log_cli_handler = _LiveLoggingStreamHandler(terminal_reporter, capture_manager) + log_cli_format = get_option_ini(self._config, 'log_cli_format', 'log_format') + log_cli_date_format = get_option_ini(self._config, 'log_cli_date_format', 'log_date_format') + if self._config.option.color != 'no' and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(log_cli_format): + log_cli_formatter = ColoredLevelFormatter(create_terminal_writer(self._config), + log_cli_format, datefmt=log_cli_date_format) + else: + log_cli_formatter = logging.Formatter(log_cli_format, datefmt=log_cli_date_format) + log_cli_level = get_actual_log_level(self._config, 'log_cli_level', 'log_level') + self.log_cli_handler = log_cli_handler + self.live_logs_context = catching_logs(log_cli_handler, formatter=log_cli_formatter, level=log_cli_level) + else: + self.live_logs_context = _dummy_context_manager() + + +class _LiveLoggingStreamHandler(logging.StreamHandler): + """ + Custom StreamHandler used by the live logging feature: it will write a newline before the first log message + in each test. + + During live logging we must also explicitly disable stdout/stderr capturing otherwise it will get captured + and won't appear in the terminal. + """ + + def __init__(self, terminal_reporter, capture_manager): + """ + :param _pytest.terminal.TerminalReporter terminal_reporter: + :param _pytest.capture.CaptureManager capture_manager: + """ + logging.StreamHandler.__init__(self, stream=terminal_reporter) + self.capture_manager = capture_manager + self.reset() + self.set_when(None) + self._test_outcome_written = False + + def reset(self): + """Reset the handler; should be called before the start of each test""" + self._first_record_emitted = False + + def set_when(self, when): + """Prepares for the given test phase (setup/call/teardown)""" + self._when = when + self._section_name_shown = False + if when == 'start': + self._test_outcome_written = False + + def emit(self, record): + if self.capture_manager is not None: + self.capture_manager.suspend_global_capture() + try: + if not self._first_record_emitted: + self.stream.write('\n') + self._first_record_emitted = True + elif self._when in ('teardown', 'finish'): + if not self._test_outcome_written: + self._test_outcome_written = True + self.stream.write('\n') + if not self._section_name_shown and self._when: + self.stream.section('live log ' + self._when, sep='-', bold=True) + self._section_name_shown = True + logging.StreamHandler.emit(self, record) + finally: + if self.capture_manager is not None: + self.capture_manager.resume_global_capture() diff --git a/_pytest/main.py b/_pytest/main.py index ec4ec2cc7..9b59e03a2 100644 --- a/_pytest/main.py +++ b/_pytest/main.py @@ -1,22 +1,22 @@ """ core implementation of testing process: init, session, runtest loop. """ from __future__ import absolute_import, division, print_function +import contextlib import functools import os +import pkgutil +import six import sys import _pytest +from _pytest import nodes import _pytest._code import py -try: - from collections import MutableMapping as MappingMixin -except ImportError: - from UserDict import DictMixin as MappingMixin from _pytest.config import directory_arg, UsageError, hookimpl -from _pytest.runner import collect_one_node, exit +from _pytest.outcomes import exit +from _pytest.runner import collect_one_node -tracebackcutdir = py.path.local(_pytest.__file__).dirpath() # exitcodes for the command line EXIT_OK = 0 @@ -29,66 +29,68 @@ EXIT_NOTESTSCOLLECTED = 5 def pytest_addoption(parser): parser.addini("norecursedirs", "directory patterns to avoid for recursion", - type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv']) - parser.addini("testpaths", "directories to search for tests when no files or directories are given in the command line.", - type="args", default=[]) - #parser.addini("dirpatterns", + type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv']) + parser.addini("testpaths", "directories to search for tests when no files or directories are given in the " + "command line.", + type="args", default=[]) + # parser.addini("dirpatterns", # "patterns specifying possible locations of test files", # type="linelist", default=["**/test_*.txt", # "**/test_*.py", "**/*_test.py"] - #) + # ) group = parser.getgroup("general", "running and selection options") group._addoption('-x', '--exitfirst', action="store_const", - dest="maxfail", const=1, - help="exit instantly on first error or failed test."), + dest="maxfail", const=1, + help="exit instantly on first error or failed test."), group._addoption('--maxfail', metavar="num", - action="store", type=int, dest="maxfail", default=0, - help="exit after first num failures or errors.") + action="store", type=int, dest="maxfail", default=0, + help="exit after first num failures or errors.") group._addoption('--strict', action="store_true", - help="run pytest in strict mode, warnings become errors.") + help="marks not registered in configuration file raise errors.") group._addoption("-c", metavar="file", type=str, dest="inifilename", - help="load configuration from `file` instead of trying to locate one of the implicit configuration files.") + help="load configuration from `file` instead of trying to locate one of the implicit " + "configuration files.") group._addoption("--continue-on-collection-errors", action="store_true", - default=False, dest="continue_on_collection_errors", - help="Force test execution even if collection errors occur.") + default=False, dest="continue_on_collection_errors", + help="Force test execution even if collection errors occur.") + group._addoption("--rootdir", action="store", + dest="rootdir", + help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', " + "'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: " + "'$HOME/root_dir'.") group = parser.getgroup("collect", "collection") group.addoption('--collectonly', '--collect-only', action="store_true", - help="only collect tests, don't execute them."), + help="only collect tests, don't execute them."), group.addoption('--pyargs', action="store_true", - help="try to interpret all arguments as python packages.") + help="try to interpret all arguments as python packages.") group.addoption("--ignore", action="append", metavar="path", - help="ignore path during collection (multi-allowed).") + help="ignore path during collection (multi-allowed).") + group.addoption("--deselect", action="append", metavar="nodeid_prefix", + help="deselect item during collection (multi-allowed).") # when changing this to --conf-cut-dir, config.py Conftest.setinitial # needs upgrading as well group.addoption('--confcutdir', dest="confcutdir", default=None, - metavar="dir", type=functools.partial(directory_arg, optname="--confcutdir"), - help="only load conftest.py's relative to specified dir.") + metavar="dir", type=functools.partial(directory_arg, optname="--confcutdir"), + help="only load conftest.py's relative to specified dir.") group.addoption('--noconftest', action="store_true", - dest="noconftest", default=False, - help="Don't load any conftest.py files.") + dest="noconftest", default=False, + help="Don't load any conftest.py files.") group.addoption('--keepduplicates', '--keep-duplicates', action="store_true", - dest="keepduplicates", default=False, - help="Keep duplicate tests.") + dest="keepduplicates", default=False, + help="Keep duplicate tests.") + group.addoption('--collect-in-virtualenv', action='store_true', + dest='collect_in_virtualenv', default=False, + help="Don't ignore tests in a local virtualenv directory") group = parser.getgroup("debugconfig", - "test session debugging and configuration") + "test session debugging and configuration") group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir", - help="base temporary directory for this test run.") - - - -def pytest_namespace(): - """keeping this one works around a deeper startup issue in pytest - - i tried to find it for a while but the amount of time turned unsustainable, - so i put a hack in to revisit later - """ - return {} + help="base temporary directory for this test run.") def pytest_configure(config): - __import__('pytest').config = config # compatibiltiy + __import__('pytest').config = config # compatibiltiy def wrap_session(config, doit): @@ -105,6 +107,8 @@ def wrap_session(config, doit): session.exitstatus = doit(config, session) or 0 except UsageError: raise + except Failed: + session.exitstatus = EXIT_TESTSFAILED except KeyboardInterrupt: excinfo = _pytest._code.ExceptionInfo() if initstate < 2 and isinstance(excinfo.value, exit.Exception): @@ -112,7 +116,7 @@ def wrap_session(config, doit): excinfo.typename, excinfo.value.msg)) config.hook.pytest_keyboard_interrupt(excinfo=excinfo) session.exitstatus = EXIT_INTERRUPTED - except: + except: # noqa excinfo = _pytest._code.ExceptionInfo() config.notify_exception(excinfo, config.option) session.exitstatus = EXIT_INTERNALERROR @@ -160,22 +164,38 @@ def pytest_runtestloop(session): return True for i, item in enumerate(session.items): - nextitem = session.items[i+1] if i+1 < len(session.items) else None + nextitem = session.items[i + 1] if i + 1 < len(session.items) else None item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) + if session.shouldfail: + raise session.Failed(session.shouldfail) if session.shouldstop: raise session.Interrupted(session.shouldstop) return True +def _in_venv(path): + """Attempts to detect if ``path`` is the root of a Virtual Environment by + checking for the existence of the appropriate activate script""" + bindir = path.join('Scripts' if sys.platform.startswith('win') else 'bin') + if not bindir.isdir(): + return False + activates = ('activate', 'activate.csh', 'activate.fish', + 'Activate', 'Activate.bat', 'Activate.ps1') + return any([fname.basename in activates for fname in bindir.listdir()]) + + def pytest_ignore_collect(path, config): - p = path.dirpath() - ignore_paths = config._getconftest_pathlist("collect_ignore", path=p) + ignore_paths = config._getconftest_pathlist("collect_ignore", path=path.dirpath()) ignore_paths = ignore_paths or [] excludeopt = config.getoption("ignore") if excludeopt: ignore_paths.extend([py.path.local(x) for x in excludeopt]) - if path in ignore_paths: + if py.path.local(path) in ignore_paths: + return True + + allow_in_venv = config.getoption("collect_in_virtualenv") + if _in_venv(path) and not allow_in_venv: return True # Skip duplicate paths. @@ -190,7 +210,65 @@ def pytest_ignore_collect(path, config): return False -class FSHookProxy: +def pytest_collection_modifyitems(items, config): + deselect_prefixes = tuple(config.getoption("deselect") or []) + if not deselect_prefixes: + return + + remaining = [] + deselected = [] + for colitem in items: + if colitem.nodeid.startswith(deselect_prefixes): + deselected.append(colitem) + else: + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +@contextlib.contextmanager +def _patched_find_module(): + """Patch bug in pkgutil.ImpImporter.find_module + + When using pkgutil.find_loader on python<3.4 it removes symlinks + from the path due to a call to os.path.realpath. This is not consistent + with actually doing the import (in these versions, pkgutil and __import__ + did not share the same underlying code). This can break conftest + discovery for pytest where symlinks are involved. + + The only supported python<3.4 by pytest is python 2.7. + """ + if six.PY2: # python 3.4+ uses importlib instead + def find_module_patched(self, fullname, path=None): + # Note: we ignore 'path' argument since it is only used via meta_path + subname = fullname.split(".")[-1] + if subname != fullname and self.path is None: + return None + if self.path is None: + path = None + else: + # original: path = [os.path.realpath(self.path)] + path = [self.path] + try: + file, filename, etc = pkgutil.imp.find_module(subname, + path) + except ImportError: + return None + return pkgutil.ImpLoader(fullname, file, filename, etc) + + old_find_module = pkgutil.ImpImporter.find_module + pkgutil.ImpImporter.find_module = find_module_patched + try: + yield + finally: + pkgutil.ImpImporter.find_module = old_find_module + else: + yield + + +class FSHookProxy(object): def __init__(self, fspath, pm, remove_mods): self.fspath = fspath self.pm = pm @@ -201,373 +279,42 @@ class FSHookProxy: self.__dict__[name] = x return x -class _CompatProperty(object): - def __init__(self, name): - self.name = name - - def __get__(self, obj, owner): - if obj is None: - return self - - # TODO: reenable in the features branch - # warnings.warn( - # "usage of {owner!r}.{name} is deprecated, please use pytest.{name} instead".format( - # name=self.name, owner=type(owner).__name__), - # PendingDeprecationWarning, stacklevel=2) - return getattr(__import__('pytest'), self.name) - - - -class NodeKeywords(MappingMixin): - def __init__(self, node): - self.node = node - self.parent = node.parent - self._markers = {node.name: True} - - def __getitem__(self, key): - try: - return self._markers[key] - except KeyError: - if self.parent is None: - raise - return self.parent.keywords[key] - - def __setitem__(self, key, value): - self._markers[key] = value - - def __delitem__(self, key): - raise ValueError("cannot delete key in keywords dict") - - def __iter__(self): - seen = set(self._markers) - if self.parent is not None: - seen.update(self.parent.keywords) - return iter(seen) - - def __len__(self): - return len(self.__iter__()) - - def keys(self): - return list(self) - - def __repr__(self): - return "" % (self.node, ) - - -class Node(object): - """ base class for Collector and Item the test collection tree. - Collector subclasses have children, Items are terminal nodes.""" - - def __init__(self, name, parent=None, config=None, session=None): - #: a unique name within the scope of the parent node - self.name = name - - #: the parent collector node. - self.parent = parent - - #: the pytest config object - self.config = config or parent.config - - #: the session this node is part of - self.session = session or parent.session - - #: filesystem path where this node was collected from (can be None) - self.fspath = getattr(parent, 'fspath', None) - - #: keywords/markers collected from all scopes - self.keywords = NodeKeywords(self) - - #: allow adding of extra keywords to use for matching - self.extra_keyword_matches = set() - - # used for storing artificial fixturedefs for direct parametrization - self._name2pseudofixturedef = {} - - @property - def ihook(self): - """ fspath sensitive hook proxy used to call pytest hooks""" - return self.session.gethookproxy(self.fspath) - - Module = _CompatProperty("Module") - Class = _CompatProperty("Class") - Instance = _CompatProperty("Instance") - Function = _CompatProperty("Function") - File = _CompatProperty("File") - Item = _CompatProperty("Item") - - def _getcustomclass(self, name): - maybe_compatprop = getattr(type(self), name) - if isinstance(maybe_compatprop, _CompatProperty): - return getattr(__import__('pytest'), name) - else: - cls = getattr(self, name) - # TODO: reenable in the features branch - # warnings.warn("use of node.%s is deprecated, " - # "use pytest_pycollect_makeitem(...) to create custom " - # "collection nodes" % name, category=DeprecationWarning) - return cls - - def __repr__(self): - return "<%s %r>" %(self.__class__.__name__, - getattr(self, 'name', None)) - - def warn(self, code, message): - """ generate a warning with the given code and message for this - item. """ - assert isinstance(code, str) - fslocation = getattr(self, "location", None) - if fslocation is None: - fslocation = getattr(self, "fspath", None) - self.ihook.pytest_logwarning.call_historic(kwargs=dict( - code=code, message=message, - nodeid=self.nodeid, fslocation=fslocation)) - - # methods for ordering nodes - @property - def nodeid(self): - """ a ::-separated string denoting its collection tree address. """ - try: - return self._nodeid - except AttributeError: - self._nodeid = x = self._makeid() - return x - - def _makeid(self): - return self.parent.nodeid + "::" + self.name - - def __hash__(self): - return hash(self.nodeid) - - def setup(self): - pass - - def teardown(self): - pass - - def _memoizedcall(self, attrname, function): - exattrname = "_ex_" + attrname - failure = getattr(self, exattrname, None) - if failure is not None: - py.builtin._reraise(failure[0], failure[1], failure[2]) - if hasattr(self, attrname): - return getattr(self, attrname) - try: - res = function() - except py.builtin._sysex: - raise - except: - failure = sys.exc_info() - setattr(self, exattrname, failure) - raise - setattr(self, attrname, res) - return res - - def listchain(self): - """ return list of all parent collectors up to self, - starting from root of collection tree. """ - chain = [] - item = self - while item is not None: - chain.append(item) - item = item.parent - chain.reverse() - return chain - - def add_marker(self, marker): - """ dynamically add a marker object to the node. - - ``marker`` can be a string or pytest.mark.* instance. - """ - from _pytest.mark import MarkDecorator, MARK_GEN - if isinstance(marker, py.builtin._basestring): - marker = getattr(MARK_GEN, marker) - elif not isinstance(marker, MarkDecorator): - raise ValueError("is not a string or pytest.mark.* Marker") - self.keywords[marker.name] = marker - - def get_marker(self, name): - """ get a marker object from this node or None if - the node doesn't have a marker with that name. """ - val = self.keywords.get(name, None) - if val is not None: - from _pytest.mark import MarkInfo, MarkDecorator - if isinstance(val, (MarkDecorator, MarkInfo)): - return val - - def listextrakeywords(self): - """ Return a set of all extra keywords in self and any parents.""" - extra_keywords = set() - item = self - for item in self.listchain(): - extra_keywords.update(item.extra_keyword_matches) - return extra_keywords - - def listnames(self): - return [x.name for x in self.listchain()] - - def addfinalizer(self, fin): - """ register a function to be called when this node is finalized. - - This method can only be called when this node is active - in a setup chain, for example during self.setup(). - """ - self.session._setupstate.addfinalizer(fin, self) - - def getparent(self, cls): - """ get the next parent node (including ourself) - which is an instance of the given class""" - current = self - while current and not isinstance(current, cls): - current = current.parent - return current - - def _prunetraceback(self, excinfo): - pass - - def _repr_failure_py(self, excinfo, style=None): - fm = self.session._fixturemanager - if excinfo.errisinstance(fm.FixtureLookupError): - return excinfo.value.formatrepr() - tbfilter = True - if self.config.option.fulltrace: - style="long" - else: - tb = _pytest._code.Traceback([excinfo.traceback[-1]]) - self._prunetraceback(excinfo) - if len(excinfo.traceback) == 0: - excinfo.traceback = tb - tbfilter = False # prunetraceback already does it - if style == "auto": - style = "long" - # XXX should excinfo.getrepr record all data and toterminal() process it? - if style is None: - if self.config.option.tbstyle == "short": - style = "short" - else: - style = "long" - - try: - os.getcwd() - abspath = False - except OSError: - abspath = True - - return excinfo.getrepr(funcargs=True, abspath=abspath, - showlocals=self.config.option.showlocals, - style=style, tbfilter=tbfilter) - - repr_failure = _repr_failure_py - -class Collector(Node): - """ Collector instances create children through collect() - and thus iteratively build a tree. - """ - - class CollectError(Exception): - """ an error during collection, contains a custom message. """ - - def collect(self): - """ returns a list of children (items and collectors) - for this collection node. - """ - raise NotImplementedError("abstract") - - def repr_failure(self, excinfo): - """ represent a collection failure. """ - if excinfo.errisinstance(self.CollectError): - exc = excinfo.value - return str(exc.args[0]) - return self._repr_failure_py(excinfo, style="short") - - def _prunetraceback(self, excinfo): - if hasattr(self, 'fspath'): - traceback = excinfo.traceback - ntraceback = traceback.cut(path=self.fspath) - if ntraceback == traceback: - ntraceback = ntraceback.cut(excludepath=tracebackcutdir) - excinfo.traceback = ntraceback.filter() - -class FSCollector(Collector): - def __init__(self, fspath, parent=None, config=None, session=None): - fspath = py.path.local(fspath) # xxx only for test_resultlog.py? - name = fspath.basename - if parent is not None: - rel = fspath.relto(parent.fspath) - if rel: - name = rel - name = name.replace(os.sep, "/") - super(FSCollector, self).__init__(name, parent, config, session) - self.fspath = fspath - - def _makeid(self): - relpath = self.fspath.relto(self.config.rootdir) - if os.sep != "/": - relpath = relpath.replace(os.sep, "/") - return relpath - -class File(FSCollector): - """ base class for collecting tests from a file. """ - -class Item(Node): - """ a basic test invocation item. Note that for a single function - there might be multiple test invocation items. - """ - nextitem = None - - def __init__(self, name, parent=None, config=None, session=None): - super(Item, self).__init__(name, parent, config, session) - self._report_sections = [] - - def add_report_section(self, when, key, content): - if content: - self._report_sections.append((when, key, content)) - - def reportinfo(self): - return self.fspath, None, "" - - @property - def location(self): - try: - return self._location - except AttributeError: - location = self.reportinfo() - # bestrelpath is a quite slow function - cache = self.config.__dict__.setdefault("_bestrelpathcache", {}) - try: - fspath = cache[location[0]] - except KeyError: - fspath = self.session.fspath.bestrelpath(location[0]) - cache[location[0]] = fspath - location = (fspath, location[1], str(location[2])) - self._location = location - return location class NoMatch(Exception): """ raised if matching cannot locate a matching names. """ + class Interrupted(KeyboardInterrupt): """ signals an interrupted test run. """ - __module__ = 'builtins' # for py3 + __module__ = 'builtins' # for py3 -class Session(FSCollector): + +class Failed(Exception): + """ signals an stop as failed test run. """ + + +class Session(nodes.FSCollector): Interrupted = Interrupted + Failed = Failed def __init__(self, config): - FSCollector.__init__(self, config.rootdir, parent=None, - config=config, session=self) + nodes.FSCollector.__init__( + self, config.rootdir, parent=None, + config=config, session=self, nodeid="") self.testsfailed = 0 self.testscollected = 0 self.shouldstop = False + self.shouldfail = False self.trace = config.trace.root.get("collection") self._norecursepatterns = config.getini("norecursedirs") self.startdir = py.path.local() - self.config.pluginmanager.register(self, name="session") - def _makeid(self): - return "" + self.config.pluginmanager.register(self, name="session") @hookimpl(tryfirst=True) def pytest_collectstart(self): + if self.shouldfail: + raise self.Failed(self.shouldfail) if self.shouldstop: raise self.Interrupted(self.shouldstop) @@ -577,7 +324,7 @@ class Session(FSCollector): self.testsfailed += 1 maxfail = self.config.getvalue("maxfail") if maxfail and self.testsfailed >= maxfail: - self.shouldstop = "stopping after %d failures" % ( + self.shouldfail = "stopping after %d failures" % ( self.testsfailed) pytest_collectreport = pytest_runtest_logreport @@ -604,7 +351,7 @@ class Session(FSCollector): items = self._perform_collect(args, genitems) self.config.pluginmanager.check_pending() hook.pytest_collection_modifyitems(session=self, - config=self.config, items=items) + config=self.config, items=items) finally: hook.pytest_collection_finish(session=self) self.testscollected = len(items) @@ -692,9 +439,10 @@ class Session(FSCollector): """Convert a dotted module name to path. """ - import pkgutil + try: - loader = pkgutil.find_loader(x) + with _patched_find_module(): + loader = pkgutil.find_loader(x) except ImportError: return x if loader is None: @@ -702,7 +450,8 @@ class Session(FSCollector): # This method is sometimes invoked when AssertionRewritingHook, which # does not define a get_filename method, is already in place: try: - path = loader.get_filename(x) + with _patched_find_module(): + path = loader.get_filename(x) except AttributeError: # Retrieve path from AssertionRewritingHook: path = loader.modules[x][0].co_filename @@ -746,11 +495,11 @@ class Session(FSCollector): nextnames = names[1:] resultnodes = [] for node in matching: - if isinstance(node, Item): + if isinstance(node, nodes.Item): if not names: resultnodes.append(node) continue - assert isinstance(node, Collector) + assert isinstance(node, nodes.Collector) rep = collect_one_node(node) if rep.passed: has_matched = False @@ -772,11 +521,11 @@ class Session(FSCollector): def genitems(self, node): self.trace("genitems", node) - if isinstance(node, Item): + if isinstance(node, nodes.Item): node.ihook.pytest_itemcollected(item=node) yield node else: - assert isinstance(node, Collector) + assert isinstance(node, nodes.Collector) rep = collect_one_node(node) if rep.passed: for subnode in rep.result: diff --git a/_pytest/mark/__init__.py b/_pytest/mark/__init__.py new file mode 100644 index 000000000..7c96116d1 --- /dev/null +++ b/_pytest/mark/__init__.py @@ -0,0 +1,157 @@ +""" generic mechanism for marking and selecting python functions. """ +from __future__ import absolute_import, division, print_function +from _pytest.config import UsageError +from .structures import ( + ParameterSet, EMPTY_PARAMETERSET_OPTION, MARK_GEN, + Mark, MarkInfo, MarkDecorator, MarkGenerator, + transfer_markers, get_empty_parameterset_mark +) +from .legacy import matchkeyword, matchmark + +__all__ = [ + 'Mark', 'MarkInfo', 'MarkDecorator', 'MarkGenerator', + 'transfer_markers', 'get_empty_parameterset_mark' +] + + +class MarkerError(Exception): + + """Error in use of a pytest marker/attribute.""" + + +def param(*values, **kw): + """Specify a parameter in `pytest.mark.parametrize`_ calls or + :ref:`parametrized fixtures `. + + .. code-block:: python + + @pytest.mark.parametrize("test_input,expected", [ + ("3+5", 8), + pytest.param("6*9", 42, marks=pytest.mark.xfail), + ]) + def test_eval(test_input, expected): + assert eval(test_input) == expected + + :param values: variable args of the values of the parameter set, in order. + :keyword marks: a single mark or a list of marks to be applied to this parameter set. + :keyword str id: the id to attribute to this parameter set. + """ + return ParameterSet.param(*values, **kw) + + +def pytest_addoption(parser): + group = parser.getgroup("general") + group._addoption( + '-k', + action="store", dest="keyword", default='', metavar="EXPRESSION", + help="only run tests which match the given substring expression. " + "An expression is a python evaluatable expression " + "where all names are substring-matched against test names " + "and their parent classes. Example: -k 'test_method or test_" + "other' matches all test functions and classes whose name " + "contains 'test_method' or 'test_other', while -k 'not test_method' " + "matches those that don't contain 'test_method' in their names. " + "Additionally keywords are matched to classes and functions " + "containing extra names in their 'extra_keyword_matches' set, " + "as well as functions which have names assigned directly to them." + ) + + group._addoption( + "-m", + action="store", dest="markexpr", default="", metavar="MARKEXPR", + help="only run tests matching given mark expression. " + "example: -m 'mark1 and not mark2'." + ) + + group.addoption( + "--markers", action="store_true", + help="show markers (builtin, plugin and per-project ones)." + ) + + parser.addini("markers", "markers for test functions", 'linelist') + parser.addini( + EMPTY_PARAMETERSET_OPTION, + "default marker for empty parametersets") + + +def pytest_cmdline_main(config): + import _pytest.config + if config.option.markers: + config._do_configure() + tw = _pytest.config.create_terminal_writer(config) + for line in config.getini("markers"): + parts = line.split(":", 1) + name = parts[0] + rest = parts[1] if len(parts) == 2 else '' + tw.write("@pytest.mark.%s:" % name, bold=True) + tw.line(rest) + tw.line() + config._ensure_unconfigure() + return 0 + + +pytest_cmdline_main.tryfirst = True + + +def deselect_by_keyword(items, config): + keywordexpr = config.option.keyword.lstrip() + if keywordexpr.startswith("-"): + keywordexpr = "not " + keywordexpr[1:] + selectuntil = False + if keywordexpr[-1:] == ":": + selectuntil = True + keywordexpr = keywordexpr[:-1] + + remaining = [] + deselected = [] + for colitem in items: + if keywordexpr and not matchkeyword(colitem, keywordexpr): + deselected.append(colitem) + else: + if selectuntil: + keywordexpr = None + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +def deselect_by_mark(items, config): + matchexpr = config.option.markexpr + if not matchexpr: + return + + remaining = [] + deselected = [] + for item in items: + if matchmark(item, matchexpr): + remaining.append(item) + else: + deselected.append(item) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +def pytest_collection_modifyitems(items, config): + deselect_by_keyword(items, config) + deselect_by_mark(items, config) + + +def pytest_configure(config): + config._old_mark_config = MARK_GEN._config + if config.option.strict: + MARK_GEN._config = config + + empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION) + + if empty_parameterset not in ('skip', 'xfail', None, ''): + raise UsageError( + "{!s} must be one of skip and xfail," + " but it is {!r}".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset)) + + +def pytest_unconfigure(config): + MARK_GEN._config = getattr(config, '_old_mark_config', None) diff --git a/_pytest/mark/evaluate.py b/_pytest/mark/evaluate.py new file mode 100644 index 000000000..c89b4933a --- /dev/null +++ b/_pytest/mark/evaluate.py @@ -0,0 +1,118 @@ +import os +import six +import sys +import platform +import traceback + +from ..outcomes import fail, TEST_OUTCOME + + +def cached_eval(config, expr, d): + if not hasattr(config, '_evalcache'): + config._evalcache = {} + try: + return config._evalcache[expr] + except KeyError: + import _pytest._code + exprcode = _pytest._code.compile(expr, mode="eval") + config._evalcache[expr] = x = eval(exprcode, d) + return x + + +class MarkEvaluator(object): + def __init__(self, item, name): + self.item = item + self._marks = None + self._mark = None + self._mark_name = name + + def __bool__(self): + # dont cache here to prevent staleness + return bool(self._get_marks()) + __nonzero__ = __bool__ + + def wasvalid(self): + return not hasattr(self, 'exc') + + def _get_marks(self): + return [x for x in self.item.iter_markers() if x.name == self._mark_name] + + def invalidraise(self, exc): + raises = self.get('raises') + if not raises: + return + return not isinstance(exc, raises) + + def istrue(self): + try: + return self._istrue() + except TEST_OUTCOME: + self.exc = sys.exc_info() + if isinstance(self.exc[1], SyntaxError): + msg = [" " * (self.exc[1].offset + 4) + "^", ] + msg.append("SyntaxError: invalid syntax") + else: + msg = traceback.format_exception_only(*self.exc[:2]) + fail("Error evaluating %r expression\n" + " %s\n" + "%s" + % (self._mark_name, self.expr, "\n".join(msg)), + pytrace=False) + + def _getglobals(self): + d = {'os': os, 'sys': sys, 'platform': platform, 'config': self.item.config} + if hasattr(self.item, 'obj'): + d.update(self.item.obj.__globals__) + return d + + def _istrue(self): + if hasattr(self, 'result'): + return self.result + self._marks = self._get_marks() + + if self._marks: + self.result = False + for mark in self._marks: + self._mark = mark + if 'condition' in mark.kwargs: + args = (mark.kwargs['condition'],) + else: + args = mark.args + + for expr in args: + self.expr = expr + if isinstance(expr, six.string_types): + d = self._getglobals() + result = cached_eval(self.item.config, expr, d) + else: + if "reason" not in mark.kwargs: + # XXX better be checked at collection time + msg = "you need to specify reason=STRING " \ + "when using booleans as conditions." + fail(msg) + result = bool(expr) + if result: + self.result = True + self.reason = mark.kwargs.get('reason', None) + self.expr = expr + return self.result + + if not args: + self.result = True + self.reason = mark.kwargs.get('reason', None) + return self.result + return False + + def get(self, attr, default=None): + if self._mark is None: + return default + return self._mark.kwargs.get(attr, default) + + def getexplanation(self): + expl = getattr(self, 'reason', None) or self.get('reason', None) + if not expl: + if not hasattr(self, 'expr'): + return "" + else: + return "condition: " + str(self.expr) + return expl diff --git a/_pytest/mark/legacy.py b/_pytest/mark/legacy.py new file mode 100644 index 000000000..ec45f12af --- /dev/null +++ b/_pytest/mark/legacy.py @@ -0,0 +1,97 @@ +""" +this is a place where we put datastructures used by legacy apis +we hope ot remove +""" +import attr +import keyword + +from . import MarkInfo, MarkDecorator + +from _pytest.config import UsageError + + +@attr.s +class MarkMapping(object): + """Provides a local mapping for markers where item access + resolves to True if the marker is present. """ + + own_mark_names = attr.ib() + + @classmethod + def from_keywords(cls, keywords): + mark_names = set() + for key, value in keywords.items(): + if isinstance(value, MarkInfo) or isinstance(value, MarkDecorator): + mark_names.add(key) + return cls(mark_names) + + def __getitem__(self, name): + return name in self.own_mark_names + + +class KeywordMapping(object): + """Provides a local mapping for keywords. + Given a list of names, map any substring of one of these names to True. + """ + + def __init__(self, names): + self._names = names + + @classmethod + def from_item(cls, item): + mapped_names = set() + + # Add the names of the current item and any parent items + import pytest + for item in item.listchain(): + if not isinstance(item, pytest.Instance): + mapped_names.add(item.name) + + # Add the names added as extra keywords to current or parent items + for name in item.listextrakeywords(): + mapped_names.add(name) + + # Add the names attached to the current function through direct assignment + if hasattr(item, 'function'): + for name in item.function.__dict__: + mapped_names.add(name) + + return cls(mapped_names) + + def __getitem__(self, subname): + for name in self._names: + if subname in name: + return True + return False + + +python_keywords_allowed_list = ["or", "and", "not"] + + +def matchmark(colitem, markexpr): + """Tries to match on any marker names, attached to the given colitem.""" + return eval(markexpr, {}, MarkMapping.from_keywords(colitem.keywords)) + + +def matchkeyword(colitem, keywordexpr): + """Tries to match given keyword expression to given collector item. + + Will match on the name of colitem, including the names of its parents. + Only matches names of items which are either a :class:`Class` or a + :class:`Function`. + Additionally, matches on names in the 'extra_keyword_matches' set of + any item, as well as names directly assigned to test functions. + """ + mapping = KeywordMapping.from_item(colitem) + if " " not in keywordexpr: + # special case to allow for simple "-k pass" and "-k 1.3" + return mapping[keywordexpr] + elif keywordexpr.startswith("not ") and " " not in keywordexpr[4:]: + return not mapping[keywordexpr[4:]] + for kwd in keywordexpr.split(): + if keyword.iskeyword(kwd) and kwd not in python_keywords_allowed_list: + raise UsageError("Python keyword '{}' not accepted in expressions passed to '-k'".format(kwd)) + try: + return eval(keywordexpr, {}, mapping) + except SyntaxError: + raise UsageError("Wrong expression passed to '-k': {}".format(keywordexpr)) diff --git a/_pytest/mark.py b/_pytest/mark/structures.py similarity index 54% rename from _pytest/mark.py rename to _pytest/mark/structures.py index 961c3c409..5b33f3abb 100644 --- a/_pytest/mark.py +++ b/_pytest/mark/structures.py @@ -1,12 +1,17 @@ -""" generic mechanism for marking and selecting python functions. """ -from __future__ import absolute_import, division, print_function - import inspect import warnings from collections import namedtuple from operator import attrgetter -from .compat import imap -from .deprecated import MARK_INFO_ATTRIBUTE, MARK_PARAMETERSET_UNPACKING + +import attr + +from ..deprecated import MARK_PARAMETERSET_UNPACKING, MARK_INFO_ATTRIBUTE +from ..compat import NOTSET, getfslineno, MappingMixin +from six.moves import map, reduce + + +EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark" + def alias(name, warning=None): getter = attrgetter(name) @@ -18,6 +23,25 @@ def alias(name, warning=None): return property(getter if warning is None else warned, doc='alias for ' + name) +def istestfunc(func): + return hasattr(func, "__call__") and \ + getattr(func, "__name__", "") != "" + + +def get_empty_parameterset_mark(config, argnames, func): + requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION) + if requested_mark in ('', None, 'skip'): + mark = MARK_GEN.skip + elif requested_mark == 'xfail': + mark = MARK_GEN.xfail(run=False) + else: + raise LookupError(requested_mark) + fs, lineno = getfslineno(func) + reason = "got empty parameter set %r, function %s at %s:%d" % ( + argnames, func.__name__, fs, lineno) + return mark(reason=reason) + + class ParameterSet(namedtuple('ParameterSet', 'values, marks, id')): @classmethod def param(cls, *values, **kw): @@ -30,8 +54,8 @@ class ParameterSet(namedtuple('ParameterSet', 'values, marks, id')): def param_extract_id(id=None): return id - id = param_extract_id(**kw) - return cls(values, marks, id) + id_ = param_extract_id(**kw) + return cls(values, marks, id_) @classmethod def extract_from(cls, parameterset, legacy_force_tuple=False): @@ -66,221 +90,53 @@ class ParameterSet(namedtuple('ParameterSet', 'values, marks, id')): return cls(argval, marks=newmarks, id=None) - @property - def deprecated_arg_dict(self): - return dict((mark.name, mark) for mark in self.marks) - - -class MarkerError(Exception): - - """Error in use of a pytest marker/attribute.""" - - -def param(*values, **kw): - return ParameterSet.param(*values, **kw) - - -def pytest_addoption(parser): - group = parser.getgroup("general") - group._addoption( - '-k', - action="store", dest="keyword", default='', metavar="EXPRESSION", - help="only run tests which match the given substring expression. " - "An expression is a python evaluatable expression " - "where all names are substring-matched against test names " - "and their parent classes. Example: -k 'test_method or test_" - "other' matches all test functions and classes whose name " - "contains 'test_method' or 'test_other'. " - "Additionally keywords are matched to classes and functions " - "containing extra names in their 'extra_keyword_matches' set, " - "as well as functions which have names assigned directly to them." - ) - - group._addoption( - "-m", - action="store", dest="markexpr", default="", metavar="MARKEXPR", - help="only run tests matching given mark expression. " - "example: -m 'mark1 and not mark2'." - ) - - group.addoption( - "--markers", action="store_true", - help="show markers (builtin, plugin and per-project ones)." - ) - - parser.addini("markers", "markers for test functions", 'linelist') - - -def pytest_cmdline_main(config): - import _pytest.config - if config.option.markers: - config._do_configure() - tw = _pytest.config.create_terminal_writer(config) - for line in config.getini("markers"): - name, rest = line.split(":", 1) - tw.write("@pytest.mark.%s:" % name, bold=True) - tw.line(rest) - tw.line() - config._ensure_unconfigure() - return 0 - - -pytest_cmdline_main.tryfirst = True - - -def pytest_collection_modifyitems(items, config): - keywordexpr = config.option.keyword.lstrip() - matchexpr = config.option.markexpr - if not keywordexpr and not matchexpr: - return - # pytest used to allow "-" for negating - # but today we just allow "-" at the beginning, use "not" instead - # we probably remove "-" altogether soon - if keywordexpr.startswith("-"): - keywordexpr = "not " + keywordexpr[1:] - selectuntil = False - if keywordexpr[-1:] == ":": - selectuntil = True - keywordexpr = keywordexpr[:-1] - - remaining = [] - deselected = [] - for colitem in items: - if keywordexpr and not matchkeyword(colitem, keywordexpr): - deselected.append(colitem) + @classmethod + def _for_parametrize(cls, argnames, argvalues, func, config): + if not isinstance(argnames, (tuple, list)): + argnames = [x.strip() for x in argnames.split(",") if x.strip()] + force_tuple = len(argnames) == 1 else: - if selectuntil: - keywordexpr = None - if matchexpr: - if not matchmark(colitem, matchexpr): - deselected.append(colitem) - continue - remaining.append(colitem) + force_tuple = False + parameters = [ + ParameterSet.extract_from(x, legacy_force_tuple=force_tuple) + for x in argvalues] + del argvalues - if deselected: - config.hook.pytest_deselected(items=deselected) - items[:] = remaining + if not parameters: + mark = get_empty_parameterset_mark(config, argnames, func) + parameters.append(ParameterSet( + values=(NOTSET,) * len(argnames), + marks=[mark], + id=None, + )) + return argnames, parameters -class MarkMapping: - """Provides a local mapping for markers where item access - resolves to True if the marker is present. """ - def __init__(self, keywords): - mymarks = set() - for key, value in keywords.items(): - if isinstance(value, MarkInfo) or isinstance(value, MarkDecorator): - mymarks.add(key) - self._mymarks = mymarks +@attr.s(frozen=True) +class Mark(object): + #: name of the mark + name = attr.ib(type=str) + #: positional arguments of the mark decorator + args = attr.ib(type="List[object]") + #: keyword arguments of the mark decorator + kwargs = attr.ib(type="Dict[str, object]") - def __getitem__(self, name): - return name in self._mymarks + def combined_with(self, other): + """ + :param other: the mark to combine with + :type other: Mark + :rtype: Mark + + combines by appending aargs and merging the mappings + """ + assert self.name == other.name + return Mark( + self.name, self.args + other.args, + dict(self.kwargs, **other.kwargs)) -class KeywordMapping: - """Provides a local mapping for keywords. - Given a list of names, map any substring of one of these names to True. - """ - def __init__(self, names): - self._names = names - - def __getitem__(self, subname): - for name in self._names: - if subname in name: - return True - return False - - -def matchmark(colitem, markexpr): - """Tries to match on any marker names, attached to the given colitem.""" - return eval(markexpr, {}, MarkMapping(colitem.keywords)) - - -def matchkeyword(colitem, keywordexpr): - """Tries to match given keyword expression to given collector item. - - Will match on the name of colitem, including the names of its parents. - Only matches names of items which are either a :class:`Class` or a - :class:`Function`. - Additionally, matches on names in the 'extra_keyword_matches' set of - any item, as well as names directly assigned to test functions. - """ - mapped_names = set() - - # Add the names of the current item and any parent items - import pytest - for item in colitem.listchain(): - if not isinstance(item, pytest.Instance): - mapped_names.add(item.name) - - # Add the names added as extra keywords to current or parent items - for name in colitem.listextrakeywords(): - mapped_names.add(name) - - # Add the names attached to the current function through direct assignment - if hasattr(colitem, 'function'): - for name in colitem.function.__dict__: - mapped_names.add(name) - - mapping = KeywordMapping(mapped_names) - if " " not in keywordexpr: - # special case to allow for simple "-k pass" and "-k 1.3" - return mapping[keywordexpr] - elif keywordexpr.startswith("not ") and " " not in keywordexpr[4:]: - return not mapping[keywordexpr[4:]] - return eval(keywordexpr, {}, mapping) - - -def pytest_configure(config): - config._old_mark_config = MARK_GEN._config - if config.option.strict: - MARK_GEN._config = config - - -def pytest_unconfigure(config): - MARK_GEN._config = getattr(config, '_old_mark_config', None) - - -class MarkGenerator: - """ Factory for :class:`MarkDecorator` objects - exposed as - a ``pytest.mark`` singleton instance. Example:: - - import pytest - @pytest.mark.slowtest - def test_function(): - pass - - will set a 'slowtest' :class:`MarkInfo` object - on the ``test_function`` object. """ - _config = None - - - def __getattr__(self, name): - if name[0] == "_": - raise AttributeError("Marker name must NOT start with underscore") - if self._config is not None: - self._check(name) - return MarkDecorator(Mark(name, (), {})) - - def _check(self, name): - try: - if name in self._markers: - return - except AttributeError: - pass - self._markers = l = set() - for line in self._config.getini("markers"): - beginning = line.split(":", 1) - x = beginning[0].split("(", 1)[0] - l.add(x) - if name not in self._markers: - raise AttributeError("%r not a registered marker" % (name,)) - - -def istestfunc(func): - return hasattr(func, "__call__") and \ - getattr(func, "__name__", "") != "" - -class MarkDecorator: +@attr.s +class MarkDecorator(object): """ A decorator for test functions and test classes. When applied it will create :class:`MarkInfo` objects which may be :ref:`retrieved by hooks as item keywords `. @@ -313,9 +169,8 @@ class MarkDecorator: additional keyword or positional arguments. """ - def __init__(self, mark): - assert isinstance(mark, Mark), repr(mark) - self.mark = mark + + mark = attr.ib(validator=attr.validators.instance_of(Mark)) name = alias('mark.name') args = alias('mark.args') @@ -323,14 +178,25 @@ class MarkDecorator: @property def markname(self): - return self.name # for backward-compat (2.4.1 had this attr) + return self.name # for backward-compat (2.4.1 had this attr) def __eq__(self, other): - return self.mark == other.mark + return self.mark == other.mark if isinstance(other, MarkDecorator) else False def __repr__(self): return "" % (self.mark,) + def with_args(self, *args, **kwargs): + """ return a MarkDecorator with extra arguments added + + unlike call this can be used even if the sole argument is a callable/class + + :return: MarkDecorator + """ + + mark = Mark(self.name, args, kwargs) + return self.__class__(self.mark.combined_with(mark)) + def __call__(self, *args, **kwargs): """ if passed a single callable argument: decorate it with mark info. otherwise add *args/**kwargs in-place to mark information. """ @@ -344,9 +210,8 @@ class MarkDecorator: store_legacy_markinfo(func, self.mark) store_mark(func, self.mark) return func + return self.with_args(*args, **kwargs) - mark = Mark(self.name, args, kwargs) - return self.__class__(self.mark.combined_with(mark)) def get_unpacked_marks(obj): """ @@ -368,7 +233,7 @@ def store_mark(obj, mark): """ assert isinstance(mark, Mark), mark # always reassign name to avoid updating pytestmark - # in a referene that was only borrowed + # in a reference that was only borrowed obj.pytestmark = get_unpacked_marks(obj) + [mark] @@ -379,60 +244,12 @@ def store_legacy_markinfo(func, mark): raise TypeError("got {mark!r} instead of a Mark".format(mark=mark)) holder = getattr(func, mark.name, None) if holder is None: - holder = MarkInfo(mark) + holder = MarkInfo.for_mark(mark) setattr(func, mark.name, holder) else: holder.add_mark(mark) -class Mark(namedtuple('Mark', 'name, args, kwargs')): - - def combined_with(self, other): - assert self.name == other.name - return Mark( - self.name, self.args + other.args, - dict(self.kwargs, **other.kwargs)) - - -class MarkInfo(object): - """ Marking object created by :class:`MarkDecorator` instances. """ - def __init__(self, mark): - assert isinstance(mark, Mark), repr(mark) - self.combined = mark - self._marks = [mark] - - name = alias('combined.name', warning=MARK_INFO_ATTRIBUTE) - args = alias('combined.args', warning=MARK_INFO_ATTRIBUTE) - kwargs = alias('combined.kwargs', warning=MARK_INFO_ATTRIBUTE) - - def __repr__(self): - return "".format(self.combined) - - def add_mark(self, mark): - """ add a MarkInfo with the given args and kwargs. """ - self._marks.append(mark) - self.combined = self.combined.combined_with(mark) - - def __iter__(self): - """ yield MarkInfo objects each relating to a marking-call. """ - return imap(MarkInfo, self._marks) - - -MARK_GEN = MarkGenerator() - - -def _marked(func, mark): - """ Returns True if :func: is already marked with :mark:, False otherwise. - This can happen if marker is applied to class and the test file is - invoked more than once. - """ - try: - func_mark = getattr(func, mark.name) - except AttributeError: - return False - return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs - - def transfer_markers(funcobj, cls, mod): """ this function transfers class level markers and module level markers @@ -446,3 +263,152 @@ def transfer_markers(funcobj, cls, mod): for mark in get_unpacked_marks(obj): if not _marked(funcobj, mark): store_legacy_markinfo(funcobj, mark) + + +def _marked(func, mark): + """ Returns True if :func: is already marked with :mark:, False otherwise. + This can happen if marker is applied to class and the test file is + invoked more than once. + """ + try: + func_mark = getattr(func, getattr(mark, 'combined', mark).name) + except AttributeError: + return False + return any(mark == info.combined for info in func_mark) + + +@attr.s +class MarkInfo(object): + """ Marking object created by :class:`MarkDecorator` instances. """ + + _marks = attr.ib() + combined = attr.ib( + repr=False, + default=attr.Factory(lambda self: reduce(Mark.combined_with, self._marks), + takes_self=True)) + + name = alias('combined.name', warning=MARK_INFO_ATTRIBUTE) + args = alias('combined.args', warning=MARK_INFO_ATTRIBUTE) + kwargs = alias('combined.kwargs', warning=MARK_INFO_ATTRIBUTE) + + @classmethod + def for_mark(cls, mark): + return cls([mark]) + + def __repr__(self): + return "".format(self.combined) + + def add_mark(self, mark): + """ add a MarkInfo with the given args and kwargs. """ + self._marks.append(mark) + self.combined = self.combined.combined_with(mark) + + def __iter__(self): + """ yield MarkInfo objects each relating to a marking-call. """ + return map(MarkInfo.for_mark, self._marks) + + +class MarkGenerator(object): + """ Factory for :class:`MarkDecorator` objects - exposed as + a ``pytest.mark`` singleton instance. Example:: + + import pytest + @pytest.mark.slowtest + def test_function(): + pass + + will set a 'slowtest' :class:`MarkInfo` object + on the ``test_function`` object. """ + _config = None + + def __getattr__(self, name): + if name[0] == "_": + raise AttributeError("Marker name must NOT start with underscore") + if self._config is not None: + self._check(name) + return MarkDecorator(Mark(name, (), {})) + + def _check(self, name): + try: + if name in self._markers: + return + except AttributeError: + pass + self._markers = values = set() + for line in self._config.getini("markers"): + marker = line.split(":", 1)[0] + marker = marker.rstrip() + x = marker.split("(", 1)[0] + values.add(x) + if name not in self._markers: + raise AttributeError("%r not a registered marker" % (name,)) + + +MARK_GEN = MarkGenerator() + + +class NodeKeywords(MappingMixin): + def __init__(self, node): + self.node = node + self.parent = node.parent + self._markers = {node.name: True} + + def __getitem__(self, key): + try: + return self._markers[key] + except KeyError: + if self.parent is None: + raise + return self.parent.keywords[key] + + def __setitem__(self, key, value): + self._markers[key] = value + + def __delitem__(self, key): + raise ValueError("cannot delete key in keywords dict") + + def __iter__(self): + seen = self._seen() + return iter(seen) + + def _seen(self): + seen = set(self._markers) + if self.parent is not None: + seen.update(self.parent.keywords) + return seen + + def __len__(self): + return len(self._seen()) + + def __repr__(self): + return "" % (self.node, ) + + +@attr.s(cmp=False, hash=False) +class NodeMarkers(object): + """ + internal strucutre for storing marks belongong to a node + + ..warning:: + + unstable api + + """ + own_markers = attr.ib(default=attr.Factory(list)) + + def update(self, add_markers): + """update the own markers + """ + self.own_markers.extend(add_markers) + + def find(self, name): + """ + find markers in own nodes or parent nodes + needs a better place + """ + for mark in self.own_markers: + if mark.name == name: + yield mark + + def __iter__(self): + return iter(self.own_markers) diff --git a/_pytest/monkeypatch.py b/_pytest/monkeypatch.py index a70b23dda..78db6064d 100644 --- a/_pytest/monkeypatch.py +++ b/_pytest/monkeypatch.py @@ -4,8 +4,9 @@ from __future__ import absolute_import, division, print_function import os import sys import re +from contextlib import contextmanager -from py.builtin import _basestring +import six from _pytest.fixtures import fixture RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$") @@ -71,15 +72,15 @@ def annotated_getattr(obj, name, ann): obj = getattr(obj, name) except AttributeError: raise AttributeError( - '%r object at %s has no attribute %r' % ( - type(obj).__name__, ann, name - ) + '%r object at %s has no attribute %r' % ( + type(obj).__name__, ann, name + ) ) return obj def derive_importpath(import_path, raising): - if not isinstance(import_path, _basestring) or "." not in import_path: + if not isinstance(import_path, six.string_types) or "." not in import_path: raise TypeError("must be absolute import path string, not %r" % (import_path,)) module, attr = import_path.rsplit('.', 1) @@ -89,7 +90,7 @@ def derive_importpath(import_path, raising): return attr, target -class Notset: +class Notset(object): def __repr__(self): return "" @@ -97,7 +98,7 @@ class Notset: notset = Notset() -class MonkeyPatch: +class MonkeyPatch(object): """ Object returned by the ``monkeypatch`` fixture keeping a record of setattr/item/env/syspath changes. """ @@ -107,6 +108,29 @@ class MonkeyPatch: self._cwd = None self._savesyspath = None + @contextmanager + def context(self): + """ + Context manager that returns a new :class:`MonkeyPatch` object which + undoes any patching done inside the ``with`` block upon exit: + + .. code-block:: python + + import functools + def test_partial(monkeypatch): + with monkeypatch.context() as m: + m.setattr(functools, "partial", 3) + + Useful in situations where it is desired to undo some patches before the test ends, + such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples + of this see `#3290 `_. + """ + m = MonkeyPatch() + try: + yield m + finally: + m.undo() + def setattr(self, target, name, value=notset, raising=True): """ Set attribute value on target, memorizing the old value. By default raise AttributeError if the attribute did not exist. @@ -114,7 +138,7 @@ class MonkeyPatch: For convenience you can specify a string as ``target`` which will be interpreted as a dotted import path, with the last part being the attribute name. Example: - ``monkeypatch.setattr("os.getcwd", lambda x: "/")`` + ``monkeypatch.setattr("os.getcwd", lambda: "/")`` would set the ``getcwd`` function of the ``os`` module. The ``raising`` value determines if the setattr should fail @@ -125,7 +149,7 @@ class MonkeyPatch: import inspect if value is notset: - if not isinstance(target, _basestring): + if not isinstance(target, six.string_types): raise TypeError("use setattr(target, name, value) or " "setattr(target, value) with target being a dotted " "import string") @@ -155,7 +179,7 @@ class MonkeyPatch: """ __tracebackhide__ = True if name is notset: - if not isinstance(target, _basestring): + if not isinstance(target, six.string_types): raise TypeError("use delattr(target, name) or " "delattr(target) with target being a dotted " "import string") diff --git a/_pytest/nodes.py b/_pytest/nodes.py new file mode 100644 index 000000000..799ee078a --- /dev/null +++ b/_pytest/nodes.py @@ -0,0 +1,392 @@ +from __future__ import absolute_import, division, print_function +import os + +import six +import py +import attr + +import _pytest +import _pytest._code + +from _pytest.mark.structures import NodeKeywords, MarkInfo + +SEP = "/" + +tracebackcutdir = py.path.local(_pytest.__file__).dirpath() + + +def _splitnode(nodeid): + """Split a nodeid into constituent 'parts'. + + Node IDs are strings, and can be things like: + '' + 'testing/code' + 'testing/code/test_excinfo.py' + 'testing/code/test_excinfo.py::TestFormattedExcinfo::()' + + Return values are lists e.g. + [] + ['testing', 'code'] + ['testing', 'code', 'test_excinfo.py'] + ['testing', 'code', 'test_excinfo.py', 'TestFormattedExcinfo', '()'] + """ + if nodeid == '': + # If there is no root node at all, return an empty list so the caller's logic can remain sane + return [] + parts = nodeid.split(SEP) + # Replace single last element 'test_foo.py::Bar::()' with multiple elements 'test_foo.py', 'Bar', '()' + parts[-1:] = parts[-1].split("::") + return parts + + +def ischildnode(baseid, nodeid): + """Return True if the nodeid is a child node of the baseid. + + E.g. 'foo/bar::Baz::()' is a child of 'foo', 'foo/bar' and 'foo/bar::Baz', but not of 'foo/blorp' + """ + base_parts = _splitnode(baseid) + node_parts = _splitnode(nodeid) + if len(node_parts) < len(base_parts): + return False + return node_parts[:len(base_parts)] == base_parts + + +@attr.s +class _CompatProperty(object): + name = attr.ib() + + def __get__(self, obj, owner): + if obj is None: + return self + + # TODO: reenable in the features branch + # warnings.warn( + # "usage of {owner!r}.{name} is deprecated, please use pytest.{name} instead".format( + # name=self.name, owner=type(owner).__name__), + # PendingDeprecationWarning, stacklevel=2) + return getattr(__import__('pytest'), self.name) + + +class Node(object): + """ base class for Collector and Item the test collection tree. + Collector subclasses have children, Items are terminal nodes.""" + + def __init__(self, name, parent=None, config=None, session=None, fspath=None, nodeid=None): + #: a unique name within the scope of the parent node + self.name = name + + #: the parent collector node. + self.parent = parent + + #: the pytest config object + self.config = config or parent.config + + #: the session this node is part of + self.session = session or parent.session + + #: filesystem path where this node was collected from (can be None) + self.fspath = fspath or getattr(parent, 'fspath', None) + + #: keywords/markers collected from all scopes + self.keywords = NodeKeywords(self) + + #: the marker objects belonging to this node + self.own_markers = [] + + #: allow adding of extra keywords to use for matching + self.extra_keyword_matches = set() + + # used for storing artificial fixturedefs for direct parametrization + self._name2pseudofixturedef = {} + + if nodeid is not None: + self._nodeid = nodeid + else: + assert parent is not None + self._nodeid = self.parent.nodeid + "::" + self.name + + @property + def ihook(self): + """ fspath sensitive hook proxy used to call pytest hooks""" + return self.session.gethookproxy(self.fspath) + + Module = _CompatProperty("Module") + Class = _CompatProperty("Class") + Instance = _CompatProperty("Instance") + Function = _CompatProperty("Function") + File = _CompatProperty("File") + Item = _CompatProperty("Item") + + def _getcustomclass(self, name): + maybe_compatprop = getattr(type(self), name) + if isinstance(maybe_compatprop, _CompatProperty): + return getattr(__import__('pytest'), name) + else: + cls = getattr(self, name) + # TODO: reenable in the features branch + # warnings.warn("use of node.%s is deprecated, " + # "use pytest_pycollect_makeitem(...) to create custom " + # "collection nodes" % name, category=DeprecationWarning) + return cls + + def __repr__(self): + return "<%s %r>" % (self.__class__.__name__, + getattr(self, 'name', None)) + + def warn(self, code, message): + """ generate a warning with the given code and message for this + item. """ + assert isinstance(code, str) + fslocation = getattr(self, "location", None) + if fslocation is None: + fslocation = getattr(self, "fspath", None) + self.ihook.pytest_logwarning.call_historic(kwargs=dict( + code=code, message=message, + nodeid=self.nodeid, fslocation=fslocation)) + + # methods for ordering nodes + @property + def nodeid(self): + """ a ::-separated string denoting its collection tree address. """ + return self._nodeid + + def __hash__(self): + return hash(self.nodeid) + + def setup(self): + pass + + def teardown(self): + pass + + def listchain(self): + """ return list of all parent collectors up to self, + starting from root of collection tree. """ + chain = [] + item = self + while item is not None: + chain.append(item) + item = item.parent + chain.reverse() + return chain + + def add_marker(self, marker): + """ dynamically add a marker object to the node. + + ``marker`` can be a string or pytest.mark.* instance. + """ + from _pytest.mark import MarkDecorator, MARK_GEN + if isinstance(marker, six.string_types): + marker = getattr(MARK_GEN, marker) + elif not isinstance(marker, MarkDecorator): + raise ValueError("is not a string or pytest.mark.* Marker") + self.keywords[marker.name] = marker + self.own_markers.append(marker) + + def iter_markers(self): + """ + iterate over all markers of the node + """ + return (x[1] for x in self.iter_markers_with_node()) + + def iter_markers_with_node(self): + """ + iterate over all markers of the node + returns sequence of tuples (node, mark) + """ + for node in reversed(self.listchain()): + for mark in node.own_markers: + yield node, mark + + def get_marker(self, name): + """ get a marker object from this node or None if + the node doesn't have a marker with that name. + + ..warning:: + + deprecated + """ + markers = [x for x in self.iter_markers() if x.name == name] + if markers: + return MarkInfo(markers) + + def listextrakeywords(self): + """ Return a set of all extra keywords in self and any parents.""" + extra_keywords = set() + for item in self.listchain(): + extra_keywords.update(item.extra_keyword_matches) + return extra_keywords + + def listnames(self): + return [x.name for x in self.listchain()] + + def addfinalizer(self, fin): + """ register a function to be called when this node is finalized. + + This method can only be called when this node is active + in a setup chain, for example during self.setup(). + """ + self.session._setupstate.addfinalizer(fin, self) + + def getparent(self, cls): + """ get the next parent node (including ourself) + which is an instance of the given class""" + current = self + while current and not isinstance(current, cls): + current = current.parent + return current + + def _prunetraceback(self, excinfo): + pass + + def _repr_failure_py(self, excinfo, style=None): + fm = self.session._fixturemanager + if excinfo.errisinstance(fm.FixtureLookupError): + return excinfo.value.formatrepr() + tbfilter = True + if self.config.option.fulltrace: + style = "long" + else: + tb = _pytest._code.Traceback([excinfo.traceback[-1]]) + self._prunetraceback(excinfo) + if len(excinfo.traceback) == 0: + excinfo.traceback = tb + tbfilter = False # prunetraceback already does it + if style == "auto": + style = "long" + # XXX should excinfo.getrepr record all data and toterminal() process it? + if style is None: + if self.config.option.tbstyle == "short": + style = "short" + else: + style = "long" + + try: + os.getcwd() + abspath = False + except OSError: + abspath = True + + return excinfo.getrepr(funcargs=True, abspath=abspath, + showlocals=self.config.option.showlocals, + style=style, tbfilter=tbfilter) + + repr_failure = _repr_failure_py + + +class Collector(Node): + """ Collector instances create children through collect() + and thus iteratively build a tree. + """ + + class CollectError(Exception): + """ an error during collection, contains a custom message. """ + + def collect(self): + """ returns a list of children (items and collectors) + for this collection node. + """ + raise NotImplementedError("abstract") + + def repr_failure(self, excinfo): + """ represent a collection failure. """ + if excinfo.errisinstance(self.CollectError): + exc = excinfo.value + return str(exc.args[0]) + return self._repr_failure_py(excinfo, style="short") + + def _prunetraceback(self, excinfo): + if hasattr(self, 'fspath'): + traceback = excinfo.traceback + ntraceback = traceback.cut(path=self.fspath) + if ntraceback == traceback: + ntraceback = ntraceback.cut(excludepath=tracebackcutdir) + excinfo.traceback = ntraceback.filter() + + +def _check_initialpaths_for_relpath(session, fspath): + for initial_path in session._initialpaths: + if fspath.common(initial_path) == initial_path: + return fspath.relto(initial_path.dirname) + + +class FSCollector(Collector): + def __init__(self, fspath, parent=None, config=None, session=None, nodeid=None): + fspath = py.path.local(fspath) # xxx only for test_resultlog.py? + name = fspath.basename + if parent is not None: + rel = fspath.relto(parent.fspath) + if rel: + name = rel + name = name.replace(os.sep, SEP) + self.fspath = fspath + + session = session or parent.session + + if nodeid is None: + nodeid = self.fspath.relto(session.config.rootdir) + + if not nodeid: + nodeid = _check_initialpaths_for_relpath(session, fspath) + if os.sep != SEP: + nodeid = nodeid.replace(os.sep, SEP) + + super(FSCollector, self).__init__(name, parent, config, session, nodeid=nodeid, fspath=fspath) + + +class File(FSCollector): + """ base class for collecting tests from a file. """ + + +class Item(Node): + """ a basic test invocation item. Note that for a single function + there might be multiple test invocation items. + """ + nextitem = None + + def __init__(self, name, parent=None, config=None, session=None, nodeid=None): + super(Item, self).__init__(name, parent, config, session, nodeid=nodeid) + self._report_sections = [] + + #: user properties is a list of tuples (name, value) that holds user + #: defined properties for this test. + self.user_properties = [] + + def add_report_section(self, when, key, content): + """ + Adds a new report section, similar to what's done internally to add stdout and + stderr captured output:: + + item.add_report_section("call", "stdout", "report section contents") + + :param str when: + One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``. + :param str key: + Name of the section, can be customized at will. Pytest uses ``"stdout"`` and + ``"stderr"`` internally. + + :param str content: + The full contents as a string. + """ + if content: + self._report_sections.append((when, key, content)) + + def reportinfo(self): + return self.fspath, None, "" + + @property + def location(self): + try: + return self._location + except AttributeError: + location = self.reportinfo() + # bestrelpath is a quite slow function + cache = self.config.__dict__.setdefault("_bestrelpathcache", {}) + try: + fspath = cache[location[0]] + except KeyError: + fspath = self.session.fspath.bestrelpath(location[0]) + cache[location[0]] = fspath + location = (fspath, location[1], str(location[2])) + self._location = location + return location diff --git a/_pytest/nose.py b/_pytest/nose.py index 9d4fc0b6e..c81542ead 100644 --- a/_pytest/nose.py +++ b/_pytest/nose.py @@ -3,7 +3,6 @@ from __future__ import absolute_import, division, print_function import sys -import py from _pytest import unittest, runner, python from _pytest.config import hookimpl @@ -38,14 +37,15 @@ def pytest_runtest_setup(item): if not call_optional(item.obj, 'setup'): # call module level setup if there is no object level one call_optional(item.parent.obj, 'setup') - #XXX this implies we only call teardown when setup worked + # XXX this implies we only call teardown when setup worked item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item) + def teardown_nose(item): if is_potential_nosetest(item): if not call_optional(item.obj, 'teardown'): call_optional(item.parent.obj, 'teardown') - #if hasattr(item.parent, '_nosegensetup'): + # if hasattr(item.parent, '_nosegensetup'): # #call_optional(item._nosegensetup, 'teardown') # del item.parent._nosegensetup @@ -65,7 +65,7 @@ def is_potential_nosetest(item): def call_optional(obj, name): method = getattr(obj, name, None) isfixture = hasattr(method, "_pytestfixturefunction") - if method is not None and not isfixture and py.builtin.callable(method): + if method is not None and not isfixture and callable(method): # If there's any problems allow the exception to raise rather than # silently ignoring them method() diff --git a/_pytest/outcomes.py b/_pytest/outcomes.py new file mode 100644 index 000000000..7f0c18fa6 --- /dev/null +++ b/_pytest/outcomes.py @@ -0,0 +1,147 @@ +""" +exception classes and constants handling test outcomes +as well as functions creating them +""" +from __future__ import absolute_import, division, print_function +import py +import sys + + +class OutcomeException(BaseException): + """ OutcomeException and its subclass instances indicate and + contain info about test and collection outcomes. + """ + def __init__(self, msg=None, pytrace=True): + BaseException.__init__(self, msg) + self.msg = msg + self.pytrace = pytrace + + def __repr__(self): + if self.msg: + val = self.msg + if isinstance(val, bytes): + val = py._builtin._totext(val, errors='replace') + return val + return "<%s instance>" % (self.__class__.__name__,) + __str__ = __repr__ + + +TEST_OUTCOME = (OutcomeException, Exception) + + +class Skipped(OutcomeException): + # XXX hackish: on 3k we fake to live in the builtins + # in order to have Skipped exception printing shorter/nicer + __module__ = 'builtins' + + def __init__(self, msg=None, pytrace=True, allow_module_level=False): + OutcomeException.__init__(self, msg=msg, pytrace=pytrace) + self.allow_module_level = allow_module_level + + +class Failed(OutcomeException): + """ raised from an explicit call to pytest.fail() """ + __module__ = 'builtins' + + +class Exit(KeyboardInterrupt): + """ raised for immediate program exits (no tracebacks/summaries)""" + def __init__(self, msg="unknown reason"): + self.msg = msg + KeyboardInterrupt.__init__(self, msg) + +# exposed helper methods + + +def exit(msg): + """ exit testing process as if KeyboardInterrupt was triggered. """ + __tracebackhide__ = True + raise Exit(msg) + + +exit.Exception = Exit + + +def skip(msg="", **kwargs): + """ skip an executing test with the given message. Note: it's usually + better to use the pytest.mark.skipif marker to declare a test to be + skipped under certain conditions like mismatching platforms or + dependencies. See the pytest_skipping plugin for details. + + :kwarg bool allow_module_level: allows this function to be called at + module level, skipping the rest of the module. Default to False. + """ + __tracebackhide__ = True + allow_module_level = kwargs.pop('allow_module_level', False) + if kwargs: + keys = [k for k in kwargs.keys()] + raise TypeError('unexpected keyword arguments: {0}'.format(keys)) + raise Skipped(msg=msg, allow_module_level=allow_module_level) + + +skip.Exception = Skipped + + +def fail(msg="", pytrace=True): + """ explicitly fail an currently-executing test with the given Message. + + :arg pytrace: if false the msg represents the full failure information + and no python traceback will be reported. + """ + __tracebackhide__ = True + raise Failed(msg=msg, pytrace=pytrace) + + +fail.Exception = Failed + + +class XFailed(fail.Exception): + """ raised from an explicit call to pytest.xfail() """ + + +def xfail(reason=""): + """ xfail an executing test or setup functions with the given reason.""" + __tracebackhide__ = True + raise XFailed(reason) + + +xfail.Exception = XFailed + + +def importorskip(modname, minversion=None): + """ return imported module if it has at least "minversion" as its + __version__ attribute. If no minversion is specified the a skip + is only triggered if the module can not be imported. + """ + import warnings + __tracebackhide__ = True + compile(modname, '', 'eval') # to catch syntaxerrors + should_skip = False + + with warnings.catch_warnings(): + # make sure to ignore ImportWarnings that might happen because + # of existing directories with the same name we're trying to + # import but without a __init__.py file + warnings.simplefilter('ignore') + try: + __import__(modname) + except ImportError: + # Do not raise chained exception here(#1485) + should_skip = True + if should_skip: + raise Skipped("could not import %r" % (modname,), allow_module_level=True) + mod = sys.modules[modname] + if minversion is None: + return mod + verattr = getattr(mod, '__version__', None) + if minversion is not None: + try: + from pkg_resources import parse_version as pv + except ImportError: + raise Skipped("we have a required version for %r but can not import " + "pkg_resources to parse version strings." % (modname,), + allow_module_level=True) + if verattr is None or pv(verattr) < pv(minversion): + raise Skipped("module %r has __version__ %r, required is: %r" % ( + modname, verattr, minversion), allow_module_level=True) + return mod diff --git a/_pytest/pastebin.py b/_pytest/pastebin.py index 6f3ce8fed..b588b021b 100644 --- a/_pytest/pastebin.py +++ b/_pytest/pastebin.py @@ -2,6 +2,7 @@ from __future__ import absolute_import, division, print_function import pytest +import six import sys import tempfile @@ -9,14 +10,13 @@ import tempfile def pytest_addoption(parser): group = parser.getgroup("terminal reporting") group._addoption('--pastebin', metavar="mode", - action='store', dest="pastebin", default=None, - choices=['failed', 'all'], - help="send failed|all info to bpaste.net pastebin service.") + action='store', dest="pastebin", default=None, + choices=['failed', 'all'], + help="send failed|all info to bpaste.net pastebin service.") @pytest.hookimpl(trylast=True) def pytest_configure(config): - import py if config.option.pastebin == "all": tr = config.pluginmanager.getplugin('terminalreporter') # if no terminal reporter plugin is present, nothing we can do here; @@ -29,7 +29,7 @@ def pytest_configure(config): def tee_write(s, **kwargs): oldwrite(s, **kwargs) - if py.builtin._istext(s): + if isinstance(s, six.text_type): s = s.encode('utf-8') config._pastebinfile.write(s) @@ -97,4 +97,4 @@ def pytest_terminal_summary(terminalreporter): s = tw.stringio.getvalue() assert len(s) pastebinurl = create_new_paste(s) - tr.write_line("%s --> %s" %(msg, pastebinurl)) + tr.write_line("%s --> %s" % (msg, pastebinurl)) diff --git a/_pytest/pytester.py b/_pytest/pytester.py index 901caa340..c14a34d7e 100644 --- a/_pytest/pytester.py +++ b/_pytest/pytester.py @@ -1,4 +1,4 @@ -""" (disabled by default) support for testing pytest and pytest plugins. """ +"""(disabled by default) support for testing pytest and pytest plugins.""" from __future__ import absolute_import, division, print_function import codecs @@ -7,6 +7,7 @@ import os import platform import re import subprocess +import six import sys import time import traceback @@ -22,27 +23,26 @@ from _pytest.main import Session, EXIT_OK from _pytest.assertion.rewrite import AssertionRewritingHook +PYTEST_FULLPATH = os.path.abspath(pytest.__file__.rstrip("oc")).replace("$py.class", ".py") + + +IGNORE_PAM = [ # filenames added when obtaining details about the current user + u'/var/lib/sss/mc/passwd' +] + + def pytest_addoption(parser): - # group = parser.getgroup("pytester", "pytester (self-tests) options") parser.addoption('--lsof', - action="store_true", dest="lsof", default=False, - help=("run FD checks if lsof is available")) + action="store_true", dest="lsof", default=False, + help=("run FD checks if lsof is available")) parser.addoption('--runpytest', default="inprocess", dest="runpytest", - choices=("inprocess", "subprocess", ), - help=("run pytest sub runs in tests using an 'inprocess' " - "or 'subprocess' (python -m main) method")) + choices=("inprocess", "subprocess"), + help=("run pytest sub runs in tests using an 'inprocess' " + "or 'subprocess' (python -m main) method")) def pytest_configure(config): - # This might be called multiple times. Only take the first. - global _pytest_fullpath - try: - _pytest_fullpath - except NameError: - _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc")) - _pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py") - if config.getvalue("lsof"): checker = LsofFdLeakChecker() if checker.matching_platform(): @@ -62,7 +62,7 @@ class LsofFdLeakChecker(object): def _parse_lsof_output(self, out): def isopen(line): return line.startswith('f') and ("deleted" not in line and - 'mem' not in line and "txt" not in line and 'cwd' not in line) + 'mem' not in line and "txt" not in line and 'cwd' not in line) open_files = [] @@ -71,6 +71,8 @@ class LsofFdLeakChecker(object): fields = line.split('\0') fd = fields[0][1:] filename = fields[1][1:] + if filename in IGNORE_PAM: + continue if filename.startswith('/'): open_files.append((fd, filename)) @@ -80,8 +82,8 @@ class LsofFdLeakChecker(object): try: py.process.cmdexec("lsof -v") except (py.process.cmdexec.Error, UnicodeDecodeError): - # cmdexec may raise UnicodeDecodeError on Windows systems - # with locale other than english: + # cmdexec may raise UnicodeDecodeError on Windows systems with + # locale other than English: # https://bitbucket.org/pytest-dev/py/issues/66 return False else: @@ -114,14 +116,12 @@ class LsofFdLeakChecker(object): # XXX copied from execnet's conftest.py - needs to be merged winpymap = { 'python2.7': r'C:\Python27\python.exe', - 'python2.6': r'C:\Python26\python.exe', - 'python3.1': r'C:\Python31\python.exe', - 'python3.2': r'C:\Python32\python.exe', - 'python3.3': r'C:\Python33\python.exe', 'python3.4': r'C:\Python34\python.exe', 'python3.5': r'C:\Python35\python.exe', + 'python3.6': r'C:\Python36\python.exe', } + def getexecutable(name, cache={}): try: return cache[name] @@ -130,21 +130,21 @@ def getexecutable(name, cache={}): if executable: import subprocess popen = subprocess.Popen([str(executable), "--version"], - universal_newlines=True, stderr=subprocess.PIPE) + universal_newlines=True, stderr=subprocess.PIPE) out, err = popen.communicate() if name == "jython": if not err or "2.5" not in err: executable = None if "2.5.2" in err: - executable = None # http://bugs.jython.org/issue1790 + executable = None # http://bugs.jython.org/issue1790 elif popen.returncode != 0: - # Handle pyenv's 127. + # handle pyenv's 127 executable = None cache[name] = executable return executable -@pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4", - 'pypy', 'pypy3']) + +@pytest.fixture(params=['python2.7', 'python3.4', 'pypy', 'pypy3']) def anypython(request): name = request.param executable = getexecutable(name) @@ -159,15 +159,19 @@ def anypython(request): return executable # used at least by pytest-xdist plugin + + @pytest.fixture def _pytest(request): - """ Return a helper which offers a gethookrecorder(hook) - method which returns a HookRecorder instance which helps - to make assertions about called hooks. + """Return a helper which offers a gethookrecorder(hook) method which + returns a HookRecorder instance which helps to make assertions about called + hooks. + """ return PytestArg(request) -class PytestArg: + +class PytestArg(object): def __init__(self, request): self.request = request @@ -177,12 +181,12 @@ class PytestArg: return hookrecorder -def get_public_names(l): - """Only return names from iterator l without a leading underscore.""" - return [x for x in l if x[0] != "_"] +def get_public_names(values): + """Only return names from iterator values without a leading underscore.""" + return [x for x in values if x[0] != "_"] -class ParsedCall: +class ParsedCall(object): def __init__(self, name, kwargs): self.__dict__.update(kwargs) self._name = name @@ -190,14 +194,14 @@ class ParsedCall: def __repr__(self): d = self.__dict__.copy() del d['_name'] - return "" %(self._name, d) + return "" % (self._name, d) -class HookRecorder: +class HookRecorder(object): """Record all hooks called in a plugin manager. - This wraps all the hook calls in the plugin manager, recording - each call before propagating the normal calls. + This wraps all the hook calls in the plugin manager, recording each call + before propagating the normal calls. """ @@ -253,9 +257,9 @@ class HookRecorder: pytest.fail("\n".join(lines)) def getcall(self, name): - l = self.getcalls(name) - assert len(l) == 1, (name, l) - return l[0] + values = self.getcalls(name) + assert len(values) == 1, (name, values) + return values[0] # functionality for test reports @@ -264,9 +268,9 @@ class HookRecorder: return [x.report for x in self.getcalls(names)] def matchreport(self, inamepart="", - names="pytest_runtest_logreport pytest_collectreport", when=None): - """ return a testreport whose dotted import path matches """ - l = [] + names="pytest_runtest_logreport pytest_collectreport", when=None): + """return a testreport whose dotted import path matches""" + values = [] for rep in self.getreports(names=names): try: if not when and rep.when != "call" and rep.passed: @@ -277,14 +281,14 @@ class HookRecorder: if when and getattr(rep, 'when', None) != when: continue if not inamepart or inamepart in rep.nodeid.split("::"): - l.append(rep) - if not l: + values.append(rep) + if not values: raise ValueError("could not find test report matching %r: " "no test reports at all!" % (inamepart,)) - if len(l) > 1: + if len(values) > 1: raise ValueError( - "found 2 or more testreports matching %r: %s" %(inamepart, l)) - return l[0] + "found 2 or more testreports matching %r: %s" % (inamepart, values)) + return values[0] def getfailures(self, names='pytest_runtest_logreport pytest_collectreport'): @@ -298,7 +302,7 @@ class HookRecorder: skipped = [] failed = [] for rep in self.getreports( - "pytest_collectreport pytest_runtest_logreport"): + "pytest_collectreport pytest_runtest_logreport"): if rep.passed: if getattr(rep, "when", None) == "call": passed.append(rep) @@ -337,21 +341,24 @@ def testdir(request, tmpdir_factory): rex_outcome = re.compile(r"(\d+) ([\w-]+)") -class RunResult: + + +class RunResult(object): """The result of running a command. Attributes: - :ret: The return value. - :outlines: List of lines captured from stdout. - :errlines: List of lines captures from stderr. + :ret: the return value + :outlines: list of lines captured from stdout + :errlines: list of lines captures from stderr :stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to - reconstruct stdout or the commonly used - ``stdout.fnmatch_lines()`` method. - :stderrr: :py:class:`LineMatcher` of stderr. - :duration: Duration in seconds. + reconstruct stdout or the commonly used ``stdout.fnmatch_lines()`` + method + :stderr: :py:class:`LineMatcher` of stderr + :duration: duration in seconds """ + def __init__(self, ret, outlines, errlines, duration): self.ret = ret self.outlines = outlines @@ -361,8 +368,10 @@ class RunResult: self.duration = duration def parseoutcomes(self): - """ Return a dictionary of outcomestring->num from parsing - the terminal output that the test process produced.""" + """Return a dictionary of outcomestring->num from parsing the terminal + output that the test process produced. + + """ for line in reversed(self.outlines): if 'seconds' in line: outcomes = rex_outcome.findall(line) @@ -373,54 +382,78 @@ class RunResult: return d raise ValueError("Pytest terminal report not found") - def assert_outcomes(self, passed=0, skipped=0, failed=0): - """ assert that the specified outcomes appear with the respective - numbers (0 means it didn't occur) in the text output from a test run.""" + def assert_outcomes(self, passed=0, skipped=0, failed=0, error=0): + """Assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run. + + """ d = self.parseoutcomes() - assert passed == d.get("passed", 0) - assert skipped == d.get("skipped", 0) - assert failed == d.get("failed", 0) + obtained = { + 'passed': d.get('passed', 0), + 'skipped': d.get('skipped', 0), + 'failed': d.get('failed', 0), + 'error': d.get('error', 0), + } + assert obtained == dict(passed=passed, skipped=skipped, failed=failed, error=error) +class CwdSnapshot(object): + def __init__(self): + self.__saved = os.getcwd() -class Testdir: + def restore(self): + os.chdir(self.__saved) + + +class SysModulesSnapshot(object): + def __init__(self, preserve=None): + self.__preserve = preserve + self.__saved = dict(sys.modules) + + def restore(self): + if self.__preserve: + self.__saved.update( + (k, m) for k, m in sys.modules.items() if self.__preserve(k)) + sys.modules.clear() + sys.modules.update(self.__saved) + + +class SysPathsSnapshot(object): + def __init__(self): + self.__saved = list(sys.path), list(sys.meta_path) + + def restore(self): + sys.path[:], sys.meta_path[:] = self.__saved + + +class Testdir(object): """Temporary test directory with tools to test/run pytest itself. - This is based on the ``tmpdir`` fixture but provides a number of - methods which aid with testing pytest itself. Unless - :py:meth:`chdir` is used all methods will use :py:attr:`tmpdir` as - current working directory. + This is based on the ``tmpdir`` fixture but provides a number of methods + which aid with testing pytest itself. Unless :py:meth:`chdir` is used all + methods will use :py:attr:`tmpdir` as their current working directory. Attributes: - :tmpdir: The :py:class:`py.path.local` instance of the temporary - directory. + :tmpdir: The :py:class:`py.path.local` instance of the temporary directory. :plugins: A list of plugins to use with :py:meth:`parseconfig` and - :py:meth:`runpytest`. Initially this is an empty list but - plugins can be added to the list. The type of items to add to - the list depend on the method which uses them so refer to them - for details. + :py:meth:`runpytest`. Initially this is an empty list but plugins can + be added to the list. The type of items to add to the list depends on + the method using them so refer to them for details. """ def __init__(self, request, tmpdir_factory): self.request = request - self._mod_collections = WeakKeyDictionary() - # XXX remove duplication with tmpdir plugin - basetmp = tmpdir_factory.ensuretemp("testdir") + self._mod_collections = WeakKeyDictionary() name = request.function.__name__ - for i in range(100): - try: - tmpdir = basetmp.mkdir(name + str(i)) - except py.error.EEXIST: - continue - break - self.tmpdir = tmpdir + self.tmpdir = tmpdir_factory.mktemp(name, numbered=True) self.plugins = [] - self._savesyspath = (list(sys.path), list(sys.meta_path)) - self._savemodulekeys = set(sys.modules) - self.chdir() # always chdir + self._cwd_snapshot = CwdSnapshot() + self._sys_path_snapshot = SysPathsSnapshot() + self._sys_modules_snapshot = self.__take_sys_modules_snapshot() + self.chdir() self.request.addfinalizer(self.finalize) method = self.request.config.getoption("--runpytest") if method == "inprocess": @@ -434,29 +467,22 @@ class Testdir: def finalize(self): """Clean up global state artifacts. - Some methods modify the global interpreter state and this - tries to clean this up. It does not remove the temporary - directory however so it can be looked at after the test run - has finished. + Some methods modify the global interpreter state and this tries to + clean this up. It does not remove the temporary directory however so + it can be looked at after the test run has finished. """ - sys.path[:], sys.meta_path[:] = self._savesyspath - if hasattr(self, '_olddir'): - self._olddir.chdir() - self.delete_loaded_modules() + self._sys_modules_snapshot.restore() + self._sys_path_snapshot.restore() + self._cwd_snapshot.restore() - def delete_loaded_modules(self): - """Delete modules that have been loaded during a test. - - This allows the interpreter to catch module changes in case - the module is re-imported. - """ - for name in set(sys.modules).difference(self._savemodulekeys): - # some zope modules used by twisted-related tests keeps internal - # state and can't be deleted; we had some trouble in the past - # with zope.interface for example - if not name.startswith("zope"): - del sys.modules[name] + def __take_sys_modules_snapshot(self): + # some zope modules used by twisted-related tests keep internal state + # and can't be deleted; we had some trouble in the past with + # `zope.interface` for example + def preserve_module(name): + return name.startswith("zope") + return SysModulesSnapshot(preserve=preserve_module) def make_hook_recorder(self, pluginmanager): """Create a new :py:class:`HookRecorder` for a PluginManager.""" @@ -471,33 +497,26 @@ class Testdir: This is done automatically upon instantiation. """ - old = self.tmpdir.chdir() - if not hasattr(self, '_olddir'): - self._olddir = old + self.tmpdir.chdir() - def _makefile(self, ext, args, kwargs, encoding="utf-8"): + def _makefile(self, ext, args, kwargs, encoding='utf-8'): items = list(kwargs.items()) + + def to_text(s): + return s.decode(encoding) if isinstance(s, bytes) else six.text_type(s) + if args: - source = py.builtin._totext("\n").join( - map(py.builtin._totext, args)) + py.builtin._totext("\n") + source = u"\n".join(to_text(x) for x in args) basename = self.request.function.__name__ items.insert(0, (basename, source)) + ret = None - for name, value in items: - p = self.tmpdir.join(name).new(ext=ext) + for basename, value in items: + p = self.tmpdir.join(basename).new(ext=ext) p.dirpath().ensure_dir() source = Source(value) - - def my_totext(s, encoding="utf-8"): - if py.builtin._isbytes(s): - s = py.builtin._totext(s, encoding=encoding) - return s - - source_unicode = "\n".join([my_totext(line) for line in source.lines]) - source = py.builtin._totext(source_unicode) - content = source.strip().encode(encoding) # + "\n" - #content = content.rstrip() + "\n" - p.write(content, "wb") + source = u"\n".join(to_text(line) for line in source.lines) + p.write(source.strip().encode(encoding), "wb") if ret is None: ret = p return ret @@ -505,17 +524,15 @@ class Testdir: def makefile(self, ext, *args, **kwargs): """Create a new file in the testdir. - ext: The extension the file should use, including the dot. - E.g. ".py". + ext: The extension the file should use, including the dot, e.g. `.py`. - args: All args will be treated as strings and joined using - newlines. The result will be written as contents to the - file. The name of the file will be based on the test - function requesting this fixture. + args: All args will be treated as strings and joined using newlines. + The result will be written as contents to the file. The name of the + file will be based on the test function requesting this fixture. E.g. "testdir.makefile('.txt', 'line1', 'line2')" - kwargs: Each keyword is the name of a file, while the value of - it will be written as contents of the file. + kwargs: Each keyword is the name of a file, while the value of it will + be written as contents of the file. E.g. "testdir.makefile('.ini', pytest='[pytest]\naddopts=-rs\n')" """ @@ -545,14 +562,16 @@ class Testdir: def syspathinsert(self, path=None): """Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`. - This is undone automatically after the test. + This is undone automatically when this object dies at the end of each + test. + """ if path is None: path = self.tmpdir sys.path.insert(0, str(path)) - # a call to syspathinsert() usually means that the caller - # wants to import some dynamically created files. - # with python3 we thus invalidate import caches. + # a call to syspathinsert() usually means that the caller wants to + # import some dynamically created files, thus with python3 we + # invalidate its import caches self._possibly_invalidate_import_caches() def _possibly_invalidate_import_caches(self): @@ -572,8 +591,8 @@ class Testdir: def mkpydir(self, name): """Create a new python package. - This creates a (sub)directory with an empty ``__init__.py`` - file so that is recognised as a python package. + This creates a (sub)directory with an empty ``__init__.py`` file so it + gets recognised as a python package. """ p = self.mkdir(name) @@ -581,14 +600,15 @@ class Testdir: return p Session = Session + def getnode(self, config, arg): """Return the collection node of a file. :param config: :py:class:`_pytest.config.Config` instance, see - :py:meth:`parseconfig` and :py:meth:`parseconfigure` to - create the configuration. + :py:meth:`parseconfig` and :py:meth:`parseconfigure` to create the + configuration - :param arg: A :py:class:`py.path.local` instance of the file. + :param arg: a :py:class:`py.path.local` instance of the file """ session = Session(config) @@ -602,11 +622,10 @@ class Testdir: def getpathnode(self, path): """Return the collection node of a file. - This is like :py:meth:`getnode` but uses - :py:meth:`parseconfigure` to create the (configured) pytest - Config instance. + This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to + create the (configured) pytest Config instance. - :param path: A :py:class:`py.path.local` instance of the file. + :param path: a :py:class:`py.path.local` instance of the file """ config = self.parseconfigure(path) @@ -620,8 +639,8 @@ class Testdir: def genitems(self, colitems): """Generate all test items from a collection node. - This recurses into the collection node and returns a list of - all the test items contained within. + This recurses into the collection node and returns a list of all the + test items contained within. """ session = colitems[0].session @@ -633,10 +652,10 @@ class Testdir: def runitem(self, source): """Run the "test_func" Item. - The calling test instance (the class which contains the test - method) must provide a ``.getrunner()`` method which should - return a runner which can run the test protocol for a single - item, like e.g. :py:func:`_pytest.runner.runtestprotocol`. + The calling test instance (class containing the test method) must + provide a ``.getrunner()`` method which should return a runner which + can run the test protocol for a single item, e.g. + :py:func:`_pytest.runner.runtestprotocol`. """ # used from runner functional tests @@ -650,30 +669,26 @@ class Testdir: """Run a test module in process using ``pytest.main()``. This run writes "source" into a temporary file and runs - ``pytest.main()`` on it, returning a :py:class:`HookRecorder` - instance for the result. + ``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance + for the result. - :param source: The source code of the test module. + :param source: the source code of the test module - :param cmdlineargs: Any extra command line arguments to use. + :param cmdlineargs: any extra command line arguments to use - :return: :py:class:`HookRecorder` instance of the result. + :return: :py:class:`HookRecorder` instance of the result """ p = self.makepyfile(source) - l = list(cmdlineargs) + [p] - return self.inline_run(*l) + values = list(cmdlineargs) + [p] + return self.inline_run(*values) def inline_genitems(self, *args): """Run ``pytest.main(['--collectonly'])`` in-process. - Returns a tuple of the collected items and a - :py:class:`HookRecorder` instance. - - This runs the :py:func:`pytest.main` function to run all of - pytest inside the test process itself like - :py:meth:`inline_run`. However the return value is a tuple of - the collection items and a :py:class:`HookRecorder` instance. + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself like :py:meth:`inline_run`, but returns a + tuple of the collected items and a :py:class:`HookRecorder` instance. """ rec = self.inline_run("--collect-only", *args) @@ -683,60 +698,78 @@ class Testdir: def inline_run(self, *args, **kwargs): """Run ``pytest.main()`` in-process, returning a HookRecorder. - This runs the :py:func:`pytest.main` function to run all of - pytest inside the test process itself. This means it can - return a :py:class:`HookRecorder` instance which gives more - detailed results from then run then can be done by matching - stdout/stderr from :py:meth:`runpytest`. + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself. This means it can return a + :py:class:`HookRecorder` instance which gives more detailed results + from that run than can be done by matching stdout/stderr from + :py:meth:`runpytest`. - :param args: Any command line arguments to pass to - :py:func:`pytest.main`. + :param args: command line arguments to pass to :py:func:`pytest.main` - :param plugin: (keyword-only) Extra plugin instances the - ``pytest.main()`` instance should use. + :param plugin: (keyword-only) extra plugin instances the + ``pytest.main()`` instance should use + + :return: a :py:class:`HookRecorder` instance - :return: A :py:class:`HookRecorder` instance. """ - # When running py.test inline any plugins active in the main - # test process are already imported. So this disables the - # warning which will trigger to say they can no longer be - # re-written, which is fine as they are already re-written. - orig_warn = AssertionRewritingHook._warn_already_imported + finalizers = [] + try: + # When running py.test inline any plugins active in the main test + # process are already imported. So this disables the warning which + # will trigger to say they can no longer be rewritten, which is + # fine as they have already been rewritten. + orig_warn = AssertionRewritingHook._warn_already_imported - def revert(): - AssertionRewritingHook._warn_already_imported = orig_warn + def revert_warn_already_imported(): + AssertionRewritingHook._warn_already_imported = orig_warn + finalizers.append(revert_warn_already_imported) + AssertionRewritingHook._warn_already_imported = lambda *a: None - self.request.addfinalizer(revert) - AssertionRewritingHook._warn_already_imported = lambda *a: None + # Any sys.module or sys.path changes done while running py.test + # inline should be reverted after the test run completes to avoid + # clashing with later inline tests run within the same pytest test, + # e.g. just because they use matching test module names. + finalizers.append(self.__take_sys_modules_snapshot().restore) + finalizers.append(SysPathsSnapshot().restore) - rec = [] + # Important note: + # - our tests should not leave any other references/registrations + # laying around other than possibly loaded test modules + # referenced from sys.modules, as nothing will clean those up + # automatically - class Collect: - def pytest_configure(x, config): - rec.append(self.make_hook_recorder(config.pluginmanager)) + rec = [] - plugins = kwargs.get("plugins") or [] - plugins.append(Collect()) - ret = pytest.main(list(args), plugins=plugins) - self.delete_loaded_modules() - if len(rec) == 1: - reprec = rec.pop() - else: - class reprec: - pass - reprec.ret = ret + class Collect(object): + def pytest_configure(x, config): + rec.append(self.make_hook_recorder(config.pluginmanager)) - # typically we reraise keyboard interrupts from the child run - # because it's our user requesting interruption of the testing - if ret == 2 and not kwargs.get("no_reraise_ctrlc"): - calls = reprec.getcalls("pytest_keyboard_interrupt") - if calls and calls[-1].excinfo.type == KeyboardInterrupt: - raise KeyboardInterrupt() - return reprec + plugins = kwargs.get("plugins") or [] + plugins.append(Collect()) + ret = pytest.main(list(args), plugins=plugins) + if len(rec) == 1: + reprec = rec.pop() + else: + class reprec(object): + pass + reprec.ret = ret + + # typically we reraise keyboard interrupts from the child run + # because it's our user requesting interruption of the testing + if ret == 2 and not kwargs.get("no_reraise_ctrlc"): + calls = reprec.getcalls("pytest_keyboard_interrupt") + if calls and calls[-1].excinfo.type == KeyboardInterrupt: + raise KeyboardInterrupt() + return reprec + finally: + for finalizer in finalizers: + finalizer() def runpytest_inprocess(self, *args, **kwargs): - """ Return result of running pytest in-process, providing a similar - interface to what self.runpytest() provides. """ + """Return result of running pytest in-process, providing a similar + interface to what self.runpytest() provides. + + """ if kwargs.get("syspathinsert"): self.syspathinsert() now = time.time() @@ -747,13 +780,13 @@ class Testdir: reprec = self.inline_run(*args, **kwargs) except SystemExit as e: - class reprec: + class reprec(object): ret = e.args[0] except Exception: traceback.print_exc() - class reprec: + class reprec(object): ret = 3 finally: out, err = capture.readouterr() @@ -763,12 +796,12 @@ class Testdir: res = RunResult(reprec.ret, out.split("\n"), err.split("\n"), - time.time()-now) + time.time() - now) res.reprec = reprec return res def runpytest(self, *args, **kwargs): - """ Run pytest inline or in a subprocess, depending on the command line + """Run pytest inline or in a subprocess, depending on the command line option "--runpytest" and return a :py:class:`RunResult`. """ @@ -779,23 +812,23 @@ class Testdir: args = [str(x) for x in args] for x in args: if str(x).startswith('--basetemp'): - #print ("basedtemp exists: %s" %(args,)) + # print("basedtemp exists: %s" %(args,)) break else: args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp')) - #print ("added basetemp: %s" %(args,)) + # print("added basetemp: %s" %(args,)) return args def parseconfig(self, *args): """Return a new pytest Config instance from given commandline args. - This invokes the pytest bootstrapping code in _pytest.config - to create a new :py:class:`_pytest.core.PluginManager` and - call the pytest_cmdline_parse hook to create new + This invokes the pytest bootstrapping code in _pytest.config to create + a new :py:class:`_pytest.core.PluginManager` and call the + pytest_cmdline_parse hook to create a new :py:class:`_pytest.config.Config` instance. - If :py:attr:`plugins` has been populated they should be plugin - modules which will be registered with the PluginManager. + If :py:attr:`plugins` has been populated they should be plugin modules + to be registered with the PluginManager. """ args = self._ensure_basetemp(args) @@ -811,9 +844,8 @@ class Testdir: def parseconfigure(self, *args): """Return a new pytest configured Config instance. - This returns a new :py:class:`_pytest.config.Config` instance - like :py:meth:`parseconfig`, but also calls the - pytest_configure hook. + This returns a new :py:class:`_pytest.config.Config` instance like + :py:meth:`parseconfig`, but also calls the pytest_configure hook. """ config = self.parseconfig(*args) @@ -821,57 +853,56 @@ class Testdir: self.request.addfinalizer(config._ensure_unconfigure) return config - def getitem(self, source, funcname="test_func"): + def getitem(self, source, funcname="test_func"): """Return the test item for a test function. - This writes the source to a python file and runs pytest's - collection on the resulting module, returning the test item - for the requested function name. + This writes the source to a python file and runs pytest's collection on + the resulting module, returning the test item for the requested + function name. - :param source: The module source. + :param source: the module source - :param funcname: The name of the test function for which the - Item must be returned. + :param funcname: the name of the test function for which to return a + test item """ items = self.getitems(source) for item in items: if item.name == funcname: return item - assert 0, "%r item not found in module:\n%s\nitems: %s" %( + assert 0, "%r item not found in module:\n%s\nitems: %s" % ( funcname, source, items) - def getitems(self, source): + def getitems(self, source): """Return all test items collected from the module. - This writes the source to a python file and runs pytest's - collection on the resulting module, returning all test items - contained within. + This writes the source to a python file and runs pytest's collection on + the resulting module, returning all test items contained within. """ modcol = self.getmodulecol(source) return self.genitems([modcol]) - def getmodulecol(self, source, configargs=(), withinit=False): + def getmodulecol(self, source, configargs=(), withinit=False): """Return the module collection node for ``source``. - This writes ``source`` to a file using :py:meth:`makepyfile` - and then runs the pytest collection on it, returning the - collection node for the test module. + This writes ``source`` to a file using :py:meth:`makepyfile` and then + runs the pytest collection on it, returning the collection node for the + test module. - :param source: The source code of the module to collect. + :param source: the source code of the module to collect - :param configargs: Any extra arguments to pass to - :py:meth:`parseconfigure`. + :param configargs: any extra arguments to pass to + :py:meth:`parseconfigure` - :param withinit: Whether to also write a ``__init__.py`` file - to the temporary directory to ensure it is a package. + :param withinit: whether to also write an ``__init__.py`` file to the + same directory to ensure it is a package """ kw = {self.request.function.__name__: Source(source).strip()} path = self.makepyfile(**kw) if withinit: - self.makepyfile(__init__ = "#") + self.makepyfile(__init__="#") self.config = config = self.parseconfigure(path, *configargs) node = self.getnode(config, path) @@ -880,13 +911,12 @@ class Testdir: def collect_by_name(self, modcol, name): """Return the collection node for name from the module collection. - This will search a module collection node for a collection - node matching the given name. + This will search a module collection node for a collection node + matching the given name. - :param modcol: A module collection node, see - :py:meth:`getmodulecol`. + :param modcol: a module collection node; see :py:meth:`getmodulecol` - :param name: The name of the node to return. + :param name: the name of the node to return """ if modcol not in self._mod_collections: @@ -898,8 +928,8 @@ class Testdir: def popen(self, cmdargs, stdout, stderr, **kw): """Invoke subprocess.Popen. - This calls subprocess.Popen making sure the current working - directory is the PYTHONPATH. + This calls subprocess.Popen making sure the current working directory + is in the PYTHONPATH. You probably want to use :py:meth:`run` instead. @@ -908,14 +938,16 @@ class Testdir: env['PYTHONPATH'] = os.pathsep.join(filter(None, [ str(os.getcwd()), env.get('PYTHONPATH', '')])) kw['env'] = env - return subprocess.Popen(cmdargs, - stdout=stdout, stderr=stderr, **kw) + + popen = subprocess.Popen(cmdargs, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw) + popen.stdin.close() + + return popen def run(self, *cmdargs): """Run a command with arguments. - Run a process using subprocess.Popen saving the stdout and - stderr. + Run a process using subprocess.Popen saving the stdout and stderr. Returns a :py:class:`RunResult`. @@ -933,7 +965,7 @@ class Testdir: try: now = time.time() popen = self.popen(cmdargs, stdout=f1, stderr=f2, - close_fds=(sys.platform != "win32")) + close_fds=(sys.platform != "win32")) ret = popen.wait() finally: f1.close() @@ -948,7 +980,7 @@ class Testdir: f2.close() self._dump_lines(out, sys.stdout) self._dump_lines(err, sys.stderr) - return RunResult(ret, out, err, time.time()-now) + return RunResult(ret, out, err, time.time() - now) def _dump_lines(self, lines, fp): try: @@ -958,14 +990,15 @@ class Testdir: print("couldn't print to %s because of encoding" % (fp,)) def _getpytestargs(self): - # we cannot use "(sys.executable,script)" - # because on windows the script is e.g. a pytest.exe - return (sys.executable, _pytest_fullpath,) # noqa + # we cannot use `(sys.executable, script)` because on Windows the + # script is e.g. `pytest.exe` + return (sys.executable, PYTEST_FULLPATH) # noqa def runpython(self, script): """Run a python script using sys.executable as interpreter. Returns a :py:class:`RunResult`. + """ return self.run(sys.executable, script) @@ -976,25 +1009,18 @@ class Testdir: def runpytest_subprocess(self, *args, **kwargs): """Run pytest as a subprocess with given arguments. - Any plugins added to the :py:attr:`plugins` list will added - using the ``-p`` command line option. Addtionally - ``--basetemp`` is used put any temporary files and directories - in a numbered directory prefixed with "runpytest-" so they do - not conflict with the normal numberd pytest location for - temporary files and directories. + Any plugins added to the :py:attr:`plugins` list will added using the + ``-p`` command line option. Additionally ``--basetemp`` is used put + any temporary files and directories in a numbered directory prefixed + with "runpytest-" so they do not conflict with the normal numbered + pytest location for temporary files and directories. Returns a :py:class:`RunResult`. """ p = py.path.local.make_numbered_dir(prefix="runpytest-", - keep=None, rootdir=self.tmpdir) - args = ('--basetemp=%s' % p, ) + args - #for x in args: - # if '--confcutdir' in str(x): - # break - #else: - # pass - # args = ('--confcutdir=.',) + args + keep=None, rootdir=self.tmpdir) + args = ('--basetemp=%s' % p,) + args plugins = [x for x in self.plugins if isinstance(x, str)] if plugins: args = ('-p', plugins[0]) + args @@ -1004,8 +1030,8 @@ class Testdir: def spawn_pytest(self, string, expect_timeout=10.0): """Run pytest using pexpect. - This makes sure to use the right pytest and sets up the - temporary directory locations. + This makes sure to use the right pytest and sets up the temporary + directory locations. The pexpect child is returned. @@ -1019,6 +1045,7 @@ class Testdir: """Run a command using pexpect. The pexpect child is returned. + """ pexpect = pytest.importorskip("pexpect", "3.0") if hasattr(sys, 'pypy_version_info') and '64' in platform.machine(): @@ -1031,21 +1058,24 @@ class Testdir: child.timeout = expect_timeout return child + def getdecoded(out): - try: - return out.decode("utf-8") - except UnicodeDecodeError: - return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % ( - py.io.saferepr(out),) + try: + return out.decode("utf-8") + except UnicodeDecodeError: + return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % ( + py.io.saferepr(out),) -class LineComp: +class LineComp(object): def __init__(self): self.stringio = py.io.TextIO() def assert_contains_lines(self, lines2): - """ assert that lines2 are contained (linearly) in lines1. - return a list of extralines found. + """Assert that lines2 are contained (linearly) in lines1. + + Return a list of extralines found. + """ __tracebackhide__ = True val = self.stringio.getvalue() @@ -1055,18 +1085,18 @@ class LineComp: return LineMatcher(lines1).fnmatch_lines(lines2) -class LineMatcher: +class LineMatcher(object): """Flexible matching of text. This is a convenience class to test large texts like the output of commands. - The constructor takes a list of lines without their trailing - newlines, i.e. ``text.splitlines()``. + The constructor takes a list of lines without their trailing newlines, i.e. + ``text.splitlines()``. """ - def __init__(self, lines): + def __init__(self, lines): self.lines = lines self._log_output = [] @@ -1082,16 +1112,34 @@ class LineMatcher: return lines2 def fnmatch_lines_random(self, lines2): + """Check lines exist in the output using in any order. + + Lines are checked using ``fnmatch.fnmatch``. The argument is a list of + lines which have to occur in the output, in any order. + + """ + self._match_lines_random(lines2, fnmatch) + + def re_match_lines_random(self, lines2): + """Check lines exist in the output using ``re.match``, in any order. + + The argument is a list of lines which have to occur in the output, in + any order. + + """ + self._match_lines_random(lines2, lambda name, pat: re.match(pat, name)) + + def _match_lines_random(self, lines2, match_func): """Check lines exist in the output. - The argument is a list of lines which have to occur in the - output, in any order. Each line can contain glob whildcards. + The argument is a list of lines which have to occur in the output, in + any order. Each line can contain glob whildcards. """ lines2 = self._getlines(lines2) for line in lines2: for x in self.lines: - if line == x or fnmatch(x, line): + if line == x or match_func(x, line): self._log("matched: ", repr(line)) break else: @@ -1102,10 +1150,11 @@ class LineMatcher: """Return all lines following the given line in the text. The given line can contain glob wildcards. + """ for i, line in enumerate(self.lines): if fnline == line or fnmatch(line, fnline): - return self.lines[i+1:] + return self.lines[i + 1:] raise ValueError("line %r not found in output" % fnline) def _log(self, *args): @@ -1116,12 +1165,36 @@ class LineMatcher: return '\n'.join(self._log_output) def fnmatch_lines(self, lines2): - """Search the text for matching lines. + """Search captured text for matching lines using ``fnmatch.fnmatch``. - The argument is a list of lines which have to match and can - use glob wildcards. If they do not match an pytest.fail() is - called. The matches and non-matches are also printed on - stdout. + The argument is a list of lines which have to match and can use glob + wildcards. If they do not match a pytest.fail() is called. The + matches and non-matches are also printed on stdout. + + """ + self._match_lines(lines2, fnmatch, 'fnmatch') + + def re_match_lines(self, lines2): + """Search captured text for matching lines using ``re.match``. + + The argument is a list of lines which have to match using ``re.match``. + If they do not match a pytest.fail() is called. + + The matches and non-matches are also printed on stdout. + + """ + self._match_lines(lines2, lambda name, pat: re.match(pat, name), 're.match') + + def _match_lines(self, lines2, match_func, match_nickname): + """Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``. + + :param list[str] lines2: list of string patterns to match. The actual + format depends on ``match_func`` + :param match_func: a callable ``match_func(line, pattern)`` where line + is the captured line from stdout/stderr and pattern is the matching + pattern + :param str match_nickname: the nickname for the match function that + will be logged to stdout when a match occurs """ lines2 = self._getlines(lines2) @@ -1136,8 +1209,8 @@ class LineMatcher: if line == nextline: self._log("exact match:", repr(line)) break - elif fnmatch(nextline, line): - self._log("fnmatch:", repr(line)) + elif match_func(nextline, line): + self._log("%s:" % match_nickname, repr(line)) self._log(" with:", repr(nextline)) break else: diff --git a/_pytest/python.py b/_pytest/python.py index e10282a8c..94f83a37d 100644 --- a/_pytest/python.py +++ b/_pytest/python.py @@ -6,28 +6,41 @@ import inspect import sys import os import collections +import warnings +from textwrap import dedent from itertools import count + import py +import six from _pytest.mark import MarkerError from _pytest.config import hookimpl import _pytest -import _pytest._pluggy as pluggy +import pluggy from _pytest import fixtures -from _pytest import main +from _pytest import nodes +from _pytest import deprecated from _pytest.compat import ( - isclass, isfunction, is_generator, _escape_strings, + isclass, isfunction, is_generator, ascii_escaped, REGEX_TYPE, STRING_TYPES, NoneType, NOTSET, get_real_func, getfslineno, safe_getattr, safe_str, getlocation, enum, ) -from _pytest.runner import fail -from _pytest.mark import transfer_markers +from _pytest.outcomes import fail +from _pytest.mark.structures import transfer_markers, get_unpacked_marks -cutdir1 = py.path.local(pluggy.__file__.rstrip("oc")) -cutdir2 = py.path.local(_pytest.__file__).dirpath() -cutdir3 = py.path.local(py.__file__).dirpath() + +# relative paths that we use to filter traceback entries from appearing to the user; +# see filter_traceback +# note: if we need to add more paths than what we have now we should probably use a list +# for better maintenance +_pluggy_dir = py.path.local(pluggy.__file__.rstrip("oc")) +# pluggy is either a package or a single module depending on the version +if _pluggy_dir.basename == '__init__.py': + _pluggy_dir = _pluggy_dir.dirpath() +_pytest_dir = py.path.local(_pytest.__file__).dirpath() +_py_dir = py.path.local(py.__file__).dirpath() def filter_traceback(entry): @@ -42,11 +55,10 @@ def filter_traceback(entry): is_generated = '<' in raw_filename and '>' in raw_filename if is_generated: return False - # entry.path might point to an inexisting file, in which case it will - # alsso return a str object. see #1133 + # entry.path might point to an non-existing file, in which case it will + # also return a str object. see #1133 p = py.path.local(entry.path) - return p != cutdir1 and not p.relto(cutdir2) and not p.relto(cutdir3) - + return not p.relto(_pluggy_dir) and not p.relto(_pytest_dir) and not p.relto(_py_dir) def pyobj_property(name): @@ -62,8 +74,8 @@ def pyobj_property(name): def pytest_addoption(parser): group = parser.getgroup("general") group.addoption('--fixtures', '--funcargs', - action="store_true", dest="showfixtures", default=False, - help="show available fixtures, sorted by plugin appearance") + action="store_true", dest="showfixtures", default=False, + help="show available fixtures, sorted by plugin appearance") group.addoption( '--fixtures-per-test', action="store_true", @@ -72,20 +84,20 @@ def pytest_addoption(parser): help="show fixtures per test", ) parser.addini("usefixtures", type="args", default=[], - help="list of default fixtures to be used with this project") + help="list of default fixtures to be used with this project") parser.addini("python_files", type="args", - default=['test_*.py', '*_test.py'], - help="glob-style file patterns for Python test module discovery") - parser.addini("python_classes", type="args", default=["Test",], - help="prefixes or glob names for Python test class discovery") - parser.addini("python_functions", type="args", default=["test",], - help="prefixes or glob names for Python test function and " - "method discovery") + default=['test_*.py', '*_test.py'], + help="glob-style file patterns for Python test module discovery") + parser.addini("python_classes", type="args", default=["Test", ], + help="prefixes or glob names for Python test class discovery") + parser.addini("python_functions", type="args", default=["test", ], + help="prefixes or glob names for Python test function and " + "method discovery") group.addoption("--import-mode", default="prepend", - choices=["prepend", "append"], dest="importmode", - help="prepend/append to sys.path when importing test modules, " - "default is to prepend.") + choices=["prepend", "append"], dest="importmode", + help="prepend/append to sys.path when importing test modules, " + "default is to prepend.") def pytest_cmdline_main(config): @@ -105,28 +117,26 @@ def pytest_generate_tests(metafunc): if hasattr(metafunc.function, attr): msg = "{0} has '{1}', spelling should be 'parametrize'" raise MarkerError(msg.format(metafunc.function.__name__, attr)) - try: - markers = metafunc.function.parametrize - except AttributeError: - return - for marker in markers: - metafunc.parametrize(*marker.args, **marker.kwargs) + for marker in metafunc.definition.iter_markers(): + if marker.name == 'parametrize': + metafunc.parametrize(*marker.args, **marker.kwargs) + def pytest_configure(config): config.addinivalue_line("markers", - "parametrize(argnames, argvalues): call a test function multiple " - "times passing in different arguments in turn. argvalues generally " - "needs to be a list of values if argnames specifies only one name " - "or a list of tuples of values if argnames specifies multiple names. " - "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " - "decorated test function, one with arg1=1 and another with arg1=2." - "see http://pytest.org/latest/parametrize.html for more info and " - "examples." - ) + "parametrize(argnames, argvalues): call a test function multiple " + "times passing in different arguments in turn. argvalues generally " + "needs to be a list of values if argnames specifies only one name " + "or a list of tuples of values if argnames specifies multiple names. " + "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " + "decorated test function, one with arg1=1 and another with arg1=2." + "see http://pytest.org/latest/parametrize.html for more info and " + "examples." + ) config.addinivalue_line("markers", - "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " - "all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures " - ) + "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " + "all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures " + ) @hookimpl(trylast=True) @@ -151,13 +161,15 @@ def pytest_collect_file(path, parent): if path.fnmatch(pat): break else: - return + return ihook = parent.session.gethookproxy(path) return ihook.pytest_pycollect_makemodule(path=path, parent=parent) + def pytest_pycollect_makemodule(path, parent): return Module(path, parent) + @hookimpl(hookwrapper=True) def pytest_pycollect_makeitem(collector, name, obj): outcome = yield @@ -176,9 +188,8 @@ def pytest_pycollect_makeitem(collector, name, obj): # or a funtools.wrapped. # We musn't if it's been wrapped with mock.patch (python 2 only) if not (isfunction(obj) or isfunction(get_real_func(obj))): - collector.warn(code="C2", message= - "cannot collect %r because it is not a function." - % name, ) + collector.warn(code="C2", message="cannot collect %r because it is not a function." + % name, ) elif getattr(obj, "__test__", True): if is_generator(obj): res = Generator(name, parent=collector) @@ -186,22 +197,32 @@ def pytest_pycollect_makeitem(collector, name, obj): res = list(collector._genfunctions(name, obj)) outcome.force_result(res) + def pytest_make_parametrize_id(config, val, argname=None): return None - class PyobjContext(object): module = pyobj_property("Module") cls = pyobj_property("Class") instance = pyobj_property("Instance") + class PyobjMixin(PyobjContext): + _ALLOW_MARKERS = True + + def __init__(self, *k, **kw): + super(PyobjMixin, self).__init__(*k, **kw) + def obj(): def fget(self): obj = getattr(self, '_obj', None) if obj is None: self._obj = obj = self._getobj() + # XXX evil hack + # used to avoid Instance collector marker duplication + if self._ALLOW_MARKERS: + self.own_markers.extend(get_unpacked_marks(self.obj)) return obj def fset(self, value): @@ -253,7 +274,8 @@ class PyobjMixin(PyobjContext): assert isinstance(lineno, int) return fspath, lineno, modpath -class PyCollector(PyobjMixin, main.Collector): + +class PyCollector(PyobjMixin, nodes.Collector): def funcnamefilter(self, name): return self._matches_prefix_or_glob_option('python_functions', name) @@ -271,10 +293,22 @@ class PyCollector(PyobjMixin, main.Collector): return self._matches_prefix_or_glob_option('python_classes', name) def istestfunction(self, obj, name): - return ( - (self.funcnamefilter(name) or self.isnosetest(obj)) and - safe_getattr(obj, "__call__", False) and fixtures.getfixturemarker(obj) is None - ) + if self.funcnamefilter(name) or self.isnosetest(obj): + if isinstance(obj, staticmethod): + # static methods need to be unwrapped + obj = safe_getattr(obj, '__func__', False) + if obj is False: + # Python 2.6 wraps in a different way that we won't try to handle + msg = "cannot collect static method %r because " \ + "it is not a function (always the case in Python 2.6)" + self.warn( + code="C2", message=msg % name) + return False + return ( + safe_getattr(obj, "__call__", False) and fixtures.getfixturemarker(obj) is None + ) + else: + return False def istestclass(self, obj, name): return self.classnamefilter(name) or self.isnosetest(obj) @@ -305,23 +339,27 @@ class PyCollector(PyobjMixin, main.Collector): for basecls in inspect.getmro(self.obj.__class__): dicts.append(basecls.__dict__) seen = {} - l = [] + values = [] for dic in dicts: for name, obj in list(dic.items()): if name in seen: continue seen[name] = True - res = self.makeitem(name, obj) + res = self._makeitem(name, obj) if res is None: continue if not isinstance(res, list): res = [res] - l.extend(res) - l.sort(key=lambda item: item.reportinfo()[:2]) - return l + values.extend(res) + values.sort(key=lambda item: item.reportinfo()[:2]) + return values def makeitem(self, name, obj): - #assert self.ihook.fspath == self.fspath, self + warnings.warn(deprecated.COLLECTOR_MAKEITEM, stacklevel=2) + self._makeitem(name, obj) + + def _makeitem(self, name, obj): + # assert self.ihook.fspath == self.fspath, self return self.ihook.pytest_pycollect_makeitem( collector=self, name=name, obj=obj) @@ -331,9 +369,15 @@ class PyCollector(PyobjMixin, main.Collector): cls = clscol and clscol.obj or None transfer_markers(funcobj, cls, module) fm = self.session._fixturemanager - fixtureinfo = fm.getfixtureinfo(self, funcobj, cls) - metafunc = Metafunc(funcobj, fixtureinfo, self.config, - cls=cls, module=module) + + definition = FunctionDefinition( + name=name, + parent=self, + callobj=funcobj, + ) + fixtureinfo = fm.getfixtureinfo(definition, funcobj, cls) + + metafunc = Metafunc(definition, fixtureinfo, self.config, cls=cls, module=module) methods = [] if hasattr(module, "pytest_generate_tests"): methods.append(module.pytest_generate_tests) @@ -357,12 +401,12 @@ class PyCollector(PyobjMixin, main.Collector): yield Function(name=subname, parent=self, callspec=callspec, callobj=funcobj, fixtureinfo=fixtureinfo, - keywords={callspec.id:True}, + keywords={callspec.id: True}, originalname=name, ) -class Module(main.File, PyCollector): +class Module(nodes.File, PyCollector): """ Collector for test classes and functions. """ def _getobj(self): @@ -390,7 +434,7 @@ class Module(main.File, PyCollector): " %s\n" "HINT: remove __pycache__ / .pyc files and/or use a " "unique basename for your test file modules" - % e.args + % e.args ) except ImportError: from _pytest._code.code import ExceptionInfo @@ -409,9 +453,10 @@ class Module(main.File, PyCollector): if e.allow_module_level: raise raise self.CollectError( - "Using pytest.skip outside of a test is not allowed. If you are " - "trying to decorate a test function, use the @pytest.mark.skip " - "or @pytest.mark.skipif decorators instead." + "Using pytest.skip outside of a test is not allowed. " + "To decorate a test function, use the @pytest.mark.skip " + "or @pytest.mark.skipif decorators instead, and to skip a " + "module use `pytestmark = pytest.mark.{skip,skipif}." ) self.config.pluginmanager.consider_module(mod) return mod @@ -462,12 +507,13 @@ def _get_xunit_func(obj, name): class Class(PyCollector): """ Collector for test methods. """ + def collect(self): if not safe_getattr(self.obj, "__test__", True): return [] if hasinit(self.obj): self.warn("C1", "cannot collect test class %r because it has a " - "__init__ constructor" % self.obj.__name__) + "__init__ constructor" % self.obj.__name__) return [] elif hasnew(self.obj): self.warn("C1", "cannot collect test class %r because it has a " @@ -488,7 +534,13 @@ class Class(PyCollector): fin_class = getattr(fin_class, '__func__', fin_class) self.addfinalizer(lambda: fin_class(self.obj)) + class Instance(PyCollector): + _ALLOW_MARKERS = False # hack, destroy later + # instances share the object with their parents in a way + # that duplicates markers instances if not taken out + # can be removed at node strucutre reorganization time + def _getobj(self): return self.parent.obj() @@ -500,6 +552,7 @@ class Instance(PyCollector): self.obj = self._getobj() return self.obj + class FunctionMixin(PyobjMixin): """ mixin for the code common to Function and Generator. """ @@ -535,7 +588,6 @@ class FunctionMixin(PyobjMixin): if ntraceback == traceback: ntraceback = ntraceback.cut(path=path) if ntraceback == traceback: - #ntraceback = ntraceback.cut(excludepath=cutdir2) ntraceback = ntraceback.filter(filter_traceback) if not ntraceback: ntraceback = traceback @@ -553,7 +605,7 @@ class FunctionMixin(PyobjMixin): if not excinfo.value.pytrace: return py._builtin._totext(excinfo.value) return super(FunctionMixin, self)._repr_failure_py(excinfo, - style=style) + style=style) def repr_failure(self, excinfo, outerr=None): assert outerr is None, "XXX outerr usage is deprecated" @@ -572,28 +624,28 @@ class Generator(FunctionMixin, PyCollector): self.session._setupstate.prepare(self) # see FunctionMixin.setup and test_setupstate_is_preserved_134 self._preservedparent = self.parent.obj - l = [] + values = [] seen = {} for i, x in enumerate(self.obj()): name, call, args = self.getcallargs(x) if not callable(call): - raise TypeError("%r yielded non callable test %r" %(self.obj, call,)) + raise TypeError("%r yielded non callable test %r" % (self.obj, call,)) if name is None: name = "[%d]" % i else: name = "['%s']" % name if name in seen: - raise ValueError("%r generated tests with non-unique name %r" %(self, name)) + raise ValueError("%r generated tests with non-unique name %r" % (self, name)) seen[name] = True - l.append(self.Function(name, self, args=args, callobj=call)) - self.config.warn('C1', deprecated.YIELD_TESTS, fslocation=self.fspath) - return l + values.append(self.Function(name, self, args=args, callobj=call)) + self.warn('C1', deprecated.YIELD_TESTS) + return values def getcallargs(self, obj): if not isinstance(obj, (tuple, list)): obj = (obj,) # explicit naming - if isinstance(obj[0], py.builtin._basestring): + if isinstance(obj[0], six.string_types): name = obj[0] obj = obj[1:] else: @@ -624,14 +676,14 @@ class CallSpec2(object): self._globalid_args = set() self._globalparam = NOTSET self._arg2scopenum = {} # used for sorting parametrized resources - self.keywords = {} + self.marks = [] self.indices = {} def copy(self, metafunc): cs = CallSpec2(self.metafunc) cs.funcargs.update(self.funcargs) cs.params.update(self.params) - cs.keywords.update(self.keywords) + cs.marks.extend(self.marks) cs.indices.update(self.indices) cs._arg2scopenum.update(self._arg2scopenum) cs._idlist = list(self._idlist) @@ -642,7 +694,7 @@ class CallSpec2(object): def _checkargnotcontained(self, arg): if arg in self.params or arg in self.funcargs: - raise ValueError("duplicate %r" %(arg,)) + raise ValueError("duplicate %r" % (arg,)) def getparam(self, name): try: @@ -656,16 +708,16 @@ class CallSpec2(object): def id(self): return "-".join(map(str, filter(None, self._idlist))) - def setmulti(self, valtypes, argnames, valset, id, keywords, scopenum, - param_index): - for arg,val in zip(argnames, valset): + def setmulti2(self, valtypes, argnames, valset, id, marks, scopenum, + param_index): + for arg, val in zip(argnames, valset): self._checkargnotcontained(arg) valtype_for_arg = valtypes[arg] getattr(self, valtype_for_arg)[arg] = val self.indices[arg] = param_index self._arg2scopenum[arg] = scopenum self._idlist.append(id) - self.keywords.update(keywords) + self.marks.extend(marks) def setall(self, funcargs, id, param): for x in funcargs: @@ -682,20 +734,23 @@ class CallSpec2(object): class Metafunc(fixtures.FuncargnamesCompatAttr): """ - Metafunc objects are passed to the ``pytest_generate_tests`` hook. + Metafunc objects are passed to the :func:`pytest_generate_tests <_pytest.hookspec.pytest_generate_tests>` hook. They help to inspect a test function and to generate tests according to test configuration or values specified in the class or module where a test function is defined. """ - def __init__(self, function, fixtureinfo, config, cls=None, module=None): + + def __init__(self, definition, fixtureinfo, config, cls=None, module=None): #: access to the :class:`_pytest.config.Config` object for the test session + assert isinstance(definition, FunctionDefinition) or type(definition).__name__ == "DefinitionMock" + self.definition = definition self.config = config #: the module object where the test function is defined in. self.module = module #: underlying python test function - self.function = function + self.function = definition.obj #: set of fixture names required by the test function self.fixturenames = fixtureinfo.names_closure @@ -704,11 +759,11 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): self.cls = cls self._calls = [] - self._ids = py.builtin.set() + self._ids = set() self._arg2fixturedefs = fixtureinfo.name2fixturedefs def parametrize(self, argnames, argvalues, indirect=False, ids=None, - scope=None): + scope=None): """ Add new invocations to the underlying test function using the list of argvalues for the given argnames. Parametrization is performed during the collection phase. If you need to setup expensive resources @@ -747,30 +802,13 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): to set a dynamic scope using test context or configuration. """ from _pytest.fixtures import scope2index - from _pytest.mark import MARK_GEN, ParameterSet + from _pytest.mark import ParameterSet from py.io import saferepr - if not isinstance(argnames, (tuple, list)): - argnames = [x.strip() for x in argnames.split(",") if x.strip()] - force_tuple = len(argnames) == 1 - else: - force_tuple = False - parameters = [ - ParameterSet.extract_from(x, legacy_force_tuple=force_tuple) - for x in argvalues] + argnames, parameters = ParameterSet._for_parametrize( + argnames, argvalues, self.function, self.config) del argvalues - if not parameters: - fs, lineno = getfslineno(self.function) - reason = "got empty parameter set %r, function %s at %s:%d" % ( - argnames, self.function.__name__, fs, lineno) - mark = MARK_GEN.skip(reason=reason) - parameters.append(ParameterSet( - values=(NOTSET,) * len(argnames), - marks=[mark], - id=None, - )) - if scope is None: scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) @@ -784,7 +822,7 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): name = 'fixture' if indirect else 'argument' raise ValueError( "%r uses no %s %r" % ( - self.function, name, arg)) + self.function, name, arg)) if indirect is True: valtypes = dict.fromkeys(argnames, "params") @@ -806,7 +844,7 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): raise ValueError('%d tests specified with %d ids' % ( len(parameters), len(ids))) for id_value in ids: - if id_value is not None and not isinstance(id_value, py.builtin._basestring): + if id_value is not None and not isinstance(id_value, six.string_types): msg = 'ids must be list of strings, found: %s (type: %s)' raise ValueError(msg % (saferepr(id_value), type(id_value).__name__)) ids = idmaker(argnames, parameters, idfn, ids, self.config) @@ -820,15 +858,19 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): 'equal to the number of names ({1})'.format( param.values, argnames)) newcallspec = callspec.copy(self) - newcallspec.setmulti(valtypes, argnames, param.values, a_id, - param.deprecated_arg_dict, scopenum, param_index) + newcallspec.setmulti2(valtypes, argnames, param.values, a_id, + param.marks, scopenum, param_index) newcalls.append(newcallspec) self._calls = newcalls def addcall(self, funcargs=None, id=NOTSET, param=NOTSET): - """ (deprecated, use parametrize) Add a new call to the underlying - test function during the collection phase of a test run. Note that - request.addcall() is called during the test collection phase prior and + """ Add a new call to the underlying test function during the collection phase of a test run. + + .. deprecated:: 3.3 + + Use :meth:`parametrize` instead. + + Note that request.addcall() is called during the test collection phase prior and independently to actual test execution. You should only use addcall() if you need to specify multiple arguments of a test function. @@ -841,6 +883,8 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): :arg param: a parameter which will be exposed to a later fixture function invocation through the ``request.param`` attribute. """ + if self.config: + self.config.warn('C1', message=deprecated.METAFUNC_ADD_CALL, fslocation=None) assert funcargs is None or isinstance(funcargs, dict) if funcargs is not None: for name in funcargs: @@ -875,7 +919,7 @@ def _find_parametrized_scope(argnames, arg2fixturedefs, indirect): from _pytest.fixtures import scopes indirect_as_list = isinstance(indirect, (list, tuple)) all_arguments_are_fixtures = indirect is True or \ - indirect_as_list and len(indirect) == argnames + indirect_as_list and len(indirect) == argnames if all_arguments_are_fixtures: fixturedefs = arg2fixturedefs or {} used_scopes = [fixturedef[0].scope for name, fixturedef in fixturedefs.items()] @@ -900,7 +944,7 @@ def _idval(val, argname, idx, idfn, config=None): msg += '\nUpdate your code as this will raise an error in pytest-4.0.' warnings.warn(msg, DeprecationWarning) if s: - return _escape_strings(s) + return ascii_escaped(s) if config: hook_id = config.hook.pytest_make_parametrize_id( @@ -909,16 +953,16 @@ def _idval(val, argname, idx, idfn, config=None): return hook_id if isinstance(val, STRING_TYPES): - return _escape_strings(val) + return ascii_escaped(val) elif isinstance(val, (float, int, bool, NoneType)): return str(val) elif isinstance(val, REGEX_TYPE): - return _escape_strings(val.pattern) + return ascii_escaped(val.pattern) elif enum is not None and isinstance(val, enum.Enum): return str(val) - elif isclass(val) and hasattr(val, '__name__'): + elif (isclass(val) or isfunction(val)) and hasattr(val, '__name__'): return val.__name__ - return str(argname)+str(idx) + return str(argname) + str(idx) def _idvalset(idx, parameterset, argnames, idfn, ids, config=None): @@ -929,7 +973,7 @@ def _idvalset(idx, parameterset, argnames, idfn, ids, config=None): for val, argname in zip(parameterset.values, argnames)] return "-".join(this_id) else: - return _escape_strings(ids[idx]) + return ascii_escaped(ids[idx]) def idmaker(argnames, parametersets, idfn=None, ids=None, config=None): @@ -958,52 +1002,48 @@ def _show_fixtures_per_test(config, session): tw = _pytest.config.create_terminal_writer(config) verbose = config.getvalue("verbose") - def get_best_rel(func): + def get_best_relpath(func): loc = getlocation(func, curdir) return curdir.bestrelpath(loc) def write_fixture(fixture_def): argname = fixture_def.argname - if verbose <= 0 and argname.startswith("_"): return if verbose > 0: - bestrel = get_best_rel(fixture_def.func) + bestrel = get_best_relpath(fixture_def.func) funcargspec = "{0} -- {1}".format(argname, bestrel) else: funcargspec = argname tw.line(funcargspec, green=True) - - INDENT = ' {0}' fixture_doc = fixture_def.func.__doc__ - if fixture_doc: - for line in fixture_doc.strip().split('\n'): - tw.line(INDENT.format(line.strip())) + write_docstring(tw, fixture_doc) else: - tw.line(INDENT.format('no docstring available'), red=True) + tw.line(' no docstring available', red=True) def write_item(item): - name2fixturedefs = item._fixtureinfo.name2fixturedefs - - if not name2fixturedefs: - # The given test item does not use any fixtures + try: + info = item._fixtureinfo + except AttributeError: + # doctests items have no _fixtureinfo attribute + return + if not info.name2fixturedefs: + # this test item does not use any fixtures return - bestrel = get_best_rel(item.function) - tw.line() tw.sep('-', 'fixtures used by {0}'.format(item.name)) - tw.sep('-', '({0})'.format(bestrel)) - for argname, fixture_defs in sorted(name2fixturedefs.items()): - assert fixture_defs is not None - if not fixture_defs: + tw.sep('-', '({0})'.format(get_best_relpath(item.function))) + # dict key not used in loop but needed for sorting + for _, fixturedefs in sorted(info.name2fixturedefs.items()): + assert fixturedefs is not None + if not fixturedefs: continue - # The last fixture def item in the list is expected - # to be the one used by the test item - write_fixture(fixture_defs[-1]) + # last item is expected to be the one used by the test item + write_fixture(fixturedefs[-1]) - for item in session.items: - write_item(item) + for session_item in session.items: + write_item(session_item) def showfixtures(config): @@ -1043,35 +1083,48 @@ def _showfixtures_main(config, session): if currentmodule != module: if not module.startswith("_pytest."): tw.line() - tw.sep("-", "fixtures defined from %s" %(module,)) + tw.sep("-", "fixtures defined from %s" % (module,)) currentmodule = module if verbose <= 0 and argname[0] == "_": continue if verbose > 0: - funcargspec = "%s -- %s" %(argname, bestrel,) + funcargspec = "%s -- %s" % (argname, bestrel,) else: funcargspec = argname tw.line(funcargspec, green=True) loc = getlocation(fixturedef.func, curdir) doc = fixturedef.func.__doc__ or "" if doc: - for line in doc.strip().split("\n"): - tw.line(" " + line.strip()) + write_docstring(tw, doc) else: - tw.line(" %s: no docstring available" %(loc,), - red=True) + tw.line(" %s: no docstring available" % (loc,), + red=True) +def write_docstring(tw, doc): + INDENT = " " + doc = doc.rstrip() + if "\n" in doc: + firstline, rest = doc.split("\n", 1) + else: + firstline, rest = doc, "" -# -# the basic pytest Function item -# + if firstline.strip(): + tw.line(INDENT + firstline.strip()) -class Function(FunctionMixin, main.Item, fixtures.FuncargnamesCompatAttr): + if rest: + for line in dedent(rest).split("\n"): + tw.write(INDENT + line + "\n") + + +class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr): """ a Function Item is responsible for setting up and executing a Python test function. """ _genid = None + # disable since functions handle it themselfes + _ALLOW_MARKERS = False + def __init__(self, name, parent, args=None, config=None, callspec=None, callobj=NOTSET, keywords=None, session=None, fixtureinfo=None, originalname=None): @@ -1082,9 +1135,17 @@ class Function(FunctionMixin, main.Item, fixtures.FuncargnamesCompatAttr): self.obj = callobj self.keywords.update(self.obj.__dict__) + self.own_markers.extend(get_unpacked_marks(self.obj)) if callspec: self.callspec = callspec - self.keywords.update(callspec.keywords) + # this is total hostile and a mess + # keywords are broken by design by now + # this will be redeemed later + for mark in callspec.marks: + # feel free to cry, this was broken for years before + # and keywords cant fix it per design + self.keywords[mark.name] = mark + self.own_markers.extend(callspec.marks) if keywords: self.keywords.update(keywords) @@ -1123,7 +1184,7 @@ class Function(FunctionMixin, main.Item, fixtures.FuncargnamesCompatAttr): def _getobj(self): name = self.name - i = name.find("[") # parametrization + i = name.find("[") # parametrization if i != -1: name = name[:i] return getattr(self.parent.obj, name) @@ -1143,3 +1204,15 @@ class Function(FunctionMixin, main.Item, fixtures.FuncargnamesCompatAttr): def setup(self): super(Function, self).setup() fixtures.fillfixtures(self) + + +class FunctionDefinition(Function): + """ + internal hack until we get actual definition nodes instead of the + crappy metafunc hack + """ + + def runtest(self): + raise RuntimeError("function definitions are not supposed to be used") + + setup = runtest diff --git a/_pytest/python_api.py b/_pytest/python_api.py index 1b27ba327..8e09a4a6f 100644 --- a/_pytest/python_api.py +++ b/_pytest/python_api.py @@ -2,14 +2,278 @@ import math import sys import py +from six import binary_type, text_type +from six.moves import zip, filterfalse +from more_itertools.more import always_iterable from _pytest.compat import isclass -from _pytest.runner import fail +from _pytest.outcomes import fail import _pytest._code + + +def _cmp_raises_type_error(self, other): + """__cmp__ implementation which raises TypeError. Used + by Approx base classes to implement only == and != and raise a + TypeError for other comparisons. + + Needed in Python 2 only, Python 3 all it takes is not implementing the + other operators at all. + """ + __tracebackhide__ = True + raise TypeError('Comparison operators other than == and != not supported by approx objects') + + # builtin pytest.approx helper -class approx(object): +class ApproxBase(object): + """ + Provide shared utilities for making approximate comparisons between numbers + or sequences of numbers. + """ + + # Tell numpy to use our `__eq__` operator instead of its + __array_ufunc__ = None + __array_priority__ = 100 + + def __init__(self, expected, rel=None, abs=None, nan_ok=False): + self.expected = expected + self.abs = abs + self.rel = rel + self.nan_ok = nan_ok + + def __repr__(self): + raise NotImplementedError + + def __eq__(self, actual): + return all( + a == self._approx_scalar(x) + for a, x in self._yield_comparisons(actual)) + + __hash__ = None + + def __ne__(self, actual): + return not (actual == self) + + if sys.version_info[0] == 2: + __cmp__ = _cmp_raises_type_error + + def _approx_scalar(self, x): + return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) + + def _yield_comparisons(self, actual): + """ + Yield all the pairs of numbers to be compared. This is used to + implement the `__eq__` method. + """ + raise NotImplementedError + + +class ApproxNumpy(ApproxBase): + """ + Perform approximate comparisons for numpy arrays. + """ + + def __repr__(self): + # It might be nice to rewrite this function to account for the + # shape of the array... + import numpy as np + + return "approx({0!r})".format(list( + self._approx_scalar(x) for x in np.asarray(self.expected))) + + if sys.version_info[0] == 2: + __cmp__ = _cmp_raises_type_error + + def __eq__(self, actual): + import numpy as np + + # self.expected is supposed to always be an array here + + if not np.isscalar(actual): + try: + actual = np.asarray(actual) + except: # noqa + raise TypeError("cannot compare '{0}' to numpy.ndarray".format(actual)) + + if not np.isscalar(actual) and actual.shape != self.expected.shape: + return False + + return ApproxBase.__eq__(self, actual) + + def _yield_comparisons(self, actual): + import numpy as np + + # `actual` can either be a numpy array or a scalar, it is treated in + # `__eq__` before being passed to `ApproxBase.__eq__`, which is the + # only method that calls this one. + + if np.isscalar(actual): + for i in np.ndindex(self.expected.shape): + yield actual, np.asscalar(self.expected[i]) + else: + for i in np.ndindex(self.expected.shape): + yield np.asscalar(actual[i]), np.asscalar(self.expected[i]) + + +class ApproxMapping(ApproxBase): + """ + Perform approximate comparisons for mappings where the values are numbers + (the keys can be anything). + """ + + def __repr__(self): + return "approx({0!r})".format(dict( + (k, self._approx_scalar(v)) + for k, v in self.expected.items())) + + def __eq__(self, actual): + if set(actual.keys()) != set(self.expected.keys()): + return False + + return ApproxBase.__eq__(self, actual) + + def _yield_comparisons(self, actual): + for k in self.expected.keys(): + yield actual[k], self.expected[k] + + +class ApproxSequence(ApproxBase): + """ + Perform approximate comparisons for sequences of numbers. + """ + + def __repr__(self): + seq_type = type(self.expected) + if seq_type not in (tuple, list, set): + seq_type = list + return "approx({0!r})".format(seq_type( + self._approx_scalar(x) for x in self.expected)) + + def __eq__(self, actual): + if len(actual) != len(self.expected): + return False + return ApproxBase.__eq__(self, actual) + + def _yield_comparisons(self, actual): + return zip(actual, self.expected) + + +class ApproxScalar(ApproxBase): + """ + Perform approximate comparisons for single numbers only. + """ + DEFAULT_ABSOLUTE_TOLERANCE = 1e-12 + DEFAULT_RELATIVE_TOLERANCE = 1e-6 + + def __repr__(self): + """ + Return a string communicating both the expected value and the tolerance + for the comparison being made, e.g. '1.0 +- 1e-6'. Use the unicode + plus/minus symbol if this is python3 (it's too hard to get right for + python2). + """ + if isinstance(self.expected, complex): + return str(self.expected) + + # Infinities aren't compared using tolerances, so don't show a + # tolerance. + if math.isinf(self.expected): + return str(self.expected) + + # If a sensible tolerance can't be calculated, self.tolerance will + # raise a ValueError. In this case, display '???'. + try: + vetted_tolerance = '{:.1e}'.format(self.tolerance) + except ValueError: + vetted_tolerance = '???' + + if sys.version_info[0] == 2: + return '{0} +- {1}'.format(self.expected, vetted_tolerance) + else: + return u'{0} \u00b1 {1}'.format(self.expected, vetted_tolerance) + + def __eq__(self, actual): + """ + Return true if the given value is equal to the expected value within + the pre-specified tolerance. + """ + if _is_numpy_array(actual): + return ApproxNumpy(actual, self.abs, self.rel, self.nan_ok) == self.expected + + # Short-circuit exact equality. + if actual == self.expected: + return True + + # Allow the user to control whether NaNs are considered equal to each + # other or not. The abs() calls are for compatibility with complex + # numbers. + if math.isnan(abs(self.expected)): + return self.nan_ok and math.isnan(abs(actual)) + + # Infinity shouldn't be approximately equal to anything but itself, but + # if there's a relative tolerance, it will be infinite and infinity + # will seem approximately equal to everything. The equal-to-itself + # case would have been short circuited above, so here we can just + # return false if the expected value is infinite. The abs() call is + # for compatibility with complex numbers. + if math.isinf(abs(self.expected)): + return False + + # Return true if the two numbers are within the tolerance. + return abs(self.expected - actual) <= self.tolerance + + __hash__ = None + + @property + def tolerance(self): + """ + Return the tolerance for the comparison. This could be either an + absolute tolerance or a relative tolerance, depending on what the user + specified or which would be larger. + """ + def set_default(x, default): + return x if x is not None else default + + # Figure out what the absolute tolerance should be. ``self.abs`` is + # either None or a value specified by the user. + absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE) + + if absolute_tolerance < 0: + raise ValueError("absolute tolerance can't be negative: {}".format(absolute_tolerance)) + if math.isnan(absolute_tolerance): + raise ValueError("absolute tolerance can't be NaN.") + + # If the user specified an absolute tolerance but not a relative one, + # just return the absolute tolerance. + if self.rel is None: + if self.abs is not None: + return absolute_tolerance + + # Figure out what the relative tolerance should be. ``self.rel`` is + # either None or a value specified by the user. This is done after + # we've made sure the user didn't ask for an absolute tolerance only, + # because we don't want to raise errors about the relative tolerance if + # we aren't even going to use it. + relative_tolerance = set_default(self.rel, self.DEFAULT_RELATIVE_TOLERANCE) * abs(self.expected) + + if relative_tolerance < 0: + raise ValueError("relative tolerance can't be negative: {}".format(absolute_tolerance)) + if math.isnan(relative_tolerance): + raise ValueError("relative tolerance can't be NaN.") + + # Return the larger of the relative and absolute tolerances. + return max(relative_tolerance, absolute_tolerance) + + +class ApproxDecimal(ApproxScalar): + from decimal import Decimal + + DEFAULT_ABSOLUTE_TOLERANCE = Decimal('1e-12') + DEFAULT_RELATIVE_TOLERANCE = Decimal('1e-6') + + +def approx(expected, rel=None, abs=None, nan_ok=False): """ Assert that two numbers (or two sets of numbers) are equal to each other within some tolerance. @@ -45,21 +309,42 @@ class approx(object): >>> 0.1 + 0.2 == approx(0.3) True - The same syntax also works on sequences of numbers:: + The same syntax also works for sequences of numbers:: >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6)) True + Dictionary *values*:: + + >>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6}) + True + + ``numpy`` arrays:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP + True + + And for a ``numpy`` array against a scalar:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP + True + By default, ``approx`` considers numbers within a relative tolerance of ``1e-6`` (i.e. one part in a million) of its expected value to be equal. This treatment would lead to surprising results if the expected value was ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``. To handle this case less surprisingly, ``approx`` also considers numbers within an absolute tolerance of ``1e-12`` of its expected value to be - equal. Infinite numbers are another special case. They are only - considered equal to themselves, regardless of the relative tolerance. Both - the relative and absolute tolerances can be changed by passing arguments to - the ``approx`` constructor:: + equal. Infinity and NaN are special cases. Infinity is only considered + equal to itself, regardless of the relative tolerance. NaN is not + considered equal to anything by default, but you can make it be equal to + itself by setting the ``nan_ok`` argument to True. (This is meant to + facilitate comparing arrays that use NaN to mean "no data".) + + Both the relative and absolute tolerances can be changed by passing + arguments to the ``approx`` constructor:: >>> 1.0001 == approx(1) False @@ -121,140 +406,75 @@ class approx(object): is asymmetric and you can think of ``b`` as the reference value. In the special case that you explicitly specify an absolute tolerance but not a relative tolerance, only the absolute tolerance is considered. + + .. warning:: + + .. versionchanged:: 3.2 + + In order to avoid inconsistent behavior, ``TypeError`` is + raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons. + The example below illustrates the problem:: + + assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10) + assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10) + + In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)`` + to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to + comparison. This is because the call hierarchy of rich comparisons + follows a fixed behavior. `More information...`__ + + __ https://docs.python.org/3/reference/datamodel.html#object.__ge__ """ - def __init__(self, expected, rel=None, abs=None): - self.expected = expected - self.abs = abs - self.rel = rel + from collections import Mapping, Sequence + from _pytest.compat import STRING_TYPES as String + from decimal import Decimal - def __repr__(self): - return ', '.join(repr(x) for x in self.expected) + # Delegate the comparison to a class that knows how to deal with the type + # of the expected value (e.g. int, float, list, dict, numpy.array, etc). + # + # This architecture is really driven by the need to support numpy arrays. + # The only way to override `==` for arrays without requiring that approx be + # the left operand is to inherit the approx object from `numpy.ndarray`. + # But that can't be a general solution, because it requires (1) numpy to be + # installed and (2) the expected value to be a numpy array. So the general + # solution is to delegate each type of expected value to a different class. + # + # This has the advantage that it made it easy to support mapping types + # (i.e. dict). The old code accepted mapping types, but would only compare + # their keys, which is probably not what most people would expect. - def __eq__(self, actual): - from collections import Iterable - if not isinstance(actual, Iterable): - actual = [actual] - if len(actual) != len(self.expected): - return False - return all(a == x for a, x in zip(actual, self.expected)) + if _is_numpy_array(expected): + cls = ApproxNumpy + elif isinstance(expected, Mapping): + cls = ApproxMapping + elif isinstance(expected, Sequence) and not isinstance(expected, String): + cls = ApproxSequence + elif isinstance(expected, Decimal): + cls = ApproxDecimal + else: + cls = ApproxScalar - __hash__ = None - - def __ne__(self, actual): - return not (actual == self) - - @property - def expected(self): - # Regardless of whether the user-specified expected value is a number - # or a sequence of numbers, return a list of ApproxNotIterable objects - # that can be compared against. - from collections import Iterable - approx_non_iter = lambda x: ApproxNonIterable(x, self.rel, self.abs) - if isinstance(self._expected, Iterable): - return [approx_non_iter(x) for x in self._expected] - else: - return [approx_non_iter(self._expected)] - - @expected.setter - def expected(self, expected): - self._expected = expected + return cls(expected, rel, abs, nan_ok) -class ApproxNonIterable(object): +def _is_numpy_array(obj): """ - Perform approximate comparisons for single numbers only. - - In other words, the ``expected`` attribute for objects of this class must - be some sort of number. This is in contrast to the ``approx`` class, where - the ``expected`` attribute can either be a number of a sequence of numbers. - This class is responsible for making comparisons, while ``approx`` is - responsible for abstracting the difference between numbers and sequences of - numbers. Although this class can stand on its own, it's only meant to be - used within ``approx``. + Return true if the given object is a numpy array. Make a special effort to + avoid importing numpy unless it's really necessary. """ + import inspect - def __init__(self, expected, rel=None, abs=None): - self.expected = expected - self.abs = abs - self.rel = rel + for cls in inspect.getmro(type(obj)): + if cls.__module__ == 'numpy': + try: + import numpy as np + return isinstance(obj, np.ndarray) + except ImportError: + pass - def __repr__(self): - if isinstance(self.expected, complex): - return str(self.expected) + return False - # Infinities aren't compared using tolerances, so don't show a - # tolerance. - if math.isinf(self.expected): - return str(self.expected) - - # If a sensible tolerance can't be calculated, self.tolerance will - # raise a ValueError. In this case, display '???'. - try: - vetted_tolerance = '{:.1e}'.format(self.tolerance) - except ValueError: - vetted_tolerance = '???' - - if sys.version_info[0] == 2: - return '{0} +- {1}'.format(self.expected, vetted_tolerance) - else: - return u'{0} \u00b1 {1}'.format(self.expected, vetted_tolerance) - - def __eq__(self, actual): - # Short-circuit exact equality. - if actual == self.expected: - return True - - # Infinity shouldn't be approximately equal to anything but itself, but - # if there's a relative tolerance, it will be infinite and infinity - # will seem approximately equal to everything. The equal-to-itself - # case would have been short circuited above, so here we can just - # return false if the expected value is infinite. The abs() call is - # for compatibility with complex numbers. - if math.isinf(abs(self.expected)): - return False - - # Return true if the two numbers are within the tolerance. - return abs(self.expected - actual) <= self.tolerance - - __hash__ = None - - def __ne__(self, actual): - return not (actual == self) - - @property - def tolerance(self): - set_default = lambda x, default: x if x is not None else default - - # Figure out what the absolute tolerance should be. ``self.abs`` is - # either None or a value specified by the user. - absolute_tolerance = set_default(self.abs, 1e-12) - - if absolute_tolerance < 0: - raise ValueError("absolute tolerance can't be negative: {}".format(absolute_tolerance)) - if math.isnan(absolute_tolerance): - raise ValueError("absolute tolerance can't be NaN.") - - # If the user specified an absolute tolerance but not a relative one, - # just return the absolute tolerance. - if self.rel is None: - if self.abs is not None: - return absolute_tolerance - - # Figure out what the relative tolerance should be. ``self.rel`` is - # either None or a value specified by the user. This is done after - # we've made sure the user didn't ask for an absolute tolerance only, - # because we don't want to raise errors about the relative tolerance if - # we aren't even going to use it. - relative_tolerance = set_default(self.rel, 1e-6) * abs(self.expected) - - if relative_tolerance < 0: - raise ValueError("relative tolerance can't be negative: {}".format(absolute_tolerance)) - if math.isnan(relative_tolerance): - raise ValueError("relative tolerance can't be NaN.") - - # Return the larger of the relative and absolute tolerances. - return max(relative_tolerance, absolute_tolerance) # builtin pytest.raises helper @@ -263,10 +483,13 @@ def raises(expected_exception, *args, **kwargs): Assert that a code block/function call raises ``expected_exception`` and raise a failure exception otherwise. + :arg message: if specified, provides a custom failure message if the + exception is not raised + :arg match: if specified, asserts that the exception matches a text or regex + This helper produces a ``ExceptionInfo()`` object (see below). - If using Python 2.5 or above, you may use this function as a - context manager:: + You may use this function as a context manager:: >>> with raises(ZeroDivisionError): ... 1/0 @@ -282,7 +505,6 @@ def raises(expected_exception, *args, **kwargs): ... Failed: Expecting ZeroDivisionError - .. note:: When using ``pytest.raises`` as a context manager, it's worthwhile to @@ -306,7 +528,8 @@ def raises(expected_exception, *args, **kwargs): ... >>> assert exc_info.type == ValueError - Or you can use the keyword argument ``match`` to assert that the + + Since version ``3.1`` you can use the keyword argument ``match`` to assert that the exception matches a text or regex:: >>> with raises(ValueError, match='must be 0 or None'): @@ -315,8 +538,12 @@ def raises(expected_exception, *args, **kwargs): >>> with raises(ValueError, match=r'must be \d+$'): ... raise ValueError("value must be 42") + **Legacy forms** - Or you can specify a callable by passing a to-be-called lambda:: + The forms below are fully supported but are discouraged for new code because the + context manager form is regarded as more readable and less error-prone. + + It is possible to specify a callable by passing a to-be-called lambda:: >>> raises(ZeroDivisionError, lambda: 1/0) @@ -330,13 +557,17 @@ def raises(expected_exception, *args, **kwargs): >>> raises(ZeroDivisionError, f, x=0) - A third possibility is to use a string to be executed:: + It is also possible to pass a string to be evaluated at runtime:: >>> raises(ZeroDivisionError, "f(0)") - .. autoclass:: _pytest._code.ExceptionInfo - :members: + The string will be evaluated using the same ``locals()`` and ``globals()`` + at the moment of the ``raises`` call. + + .. currentmodule:: _pytest._code + + Consult the API of ``excinfo`` objects: :class:`ExceptionInfo`. .. note:: Similar to caught exception objects in Python, explicitly clearing @@ -354,14 +585,11 @@ def raises(expected_exception, *args, **kwargs): """ __tracebackhide__ = True - msg = ("exceptions must be old-style classes or" - " derived from BaseException, not %s") - if isinstance(expected_exception, tuple): - for exc in expected_exception: - if not isclass(exc): - raise TypeError(msg % type(exc)) - elif not isclass(expected_exception): - raise TypeError(msg % type(expected_exception)) + base_type = (type, text_type, binary_type) + for exc in filterfalse(isclass, always_iterable(expected_exception, base_type)): + msg = ("exceptions must be old-style classes or" + " derived from BaseException, not %s") + raise TypeError(msg % type(exc)) message = "DID NOT RAISE {0}".format(expected_exception) match_expr = None @@ -371,7 +599,10 @@ def raises(expected_exception, *args, **kwargs): message = kwargs.pop("message") if "match" in kwargs: match_expr = kwargs.pop("match") - message += " matching '{0}'".format(match_expr) + if kwargs: + msg = 'Unexpected keyword arguments passed to pytest.raises: ' + msg += ', '.join(kwargs.keys()) + raise TypeError(msg) return RaisesContext(expected_exception, message, match_expr) elif isinstance(args[0], str): code, = args @@ -379,7 +610,7 @@ def raises(expected_exception, *args, **kwargs): frame = sys._getframe(1) loc = frame.f_locals.copy() loc.update(kwargs) - #print "raises frame scope: %r" % frame.f_locals + # print "raises frame scope: %r" % frame.f_locals try: code = _pytest._code.Source(code).compile() py.builtin.exec_(code, frame.f_globals, loc) @@ -414,17 +645,10 @@ class RaisesContext(object): __tracebackhide__ = True if tp[0] is None: fail(self.message) - if sys.version_info < (2, 7): - # py26: on __exit__() exc_value often does not contain the - # exception value. - # http://bugs.python.org/issue7853 - if not isinstance(tp[1], BaseException): - exc_type, value, traceback = tp - tp = exc_type, exc_type(value), traceback self.excinfo.__init__(tp) suppress_exception = issubclass(self.excinfo.type, self.expected_exception) if sys.version_info[0] == 2 and suppress_exception: sys.exc_clear() - if self.match_expr: + if self.match_expr and suppress_exception: self.excinfo.match(self.match_expr) return suppress_exception diff --git a/_pytest/recwarn.py b/_pytest/recwarn.py index 9cc404a49..ab0f79c75 100644 --- a/_pytest/recwarn.py +++ b/_pytest/recwarn.py @@ -7,15 +7,16 @@ import _pytest._code import py import sys import warnings + +import re + from _pytest.fixtures import yield_fixture +from _pytest.outcomes import fail @yield_fixture def recwarn(): - """Return a WarningsRecorder instance that provides these methods: - - * ``pop(category=None)``: return last warning matching the category. - * ``clear()``: clear list of warnings + """Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. See http://docs.python.org/library/warnings.html for information on warning categories. @@ -84,11 +85,11 @@ class _DeprecatedCallContext(object): def warns(expected_warning, *args, **kwargs): """Assert that code raises a particular class of warning. - Specifically, the input @expected_warning can be a warning class or - tuple of warning classes, and the code must return that warning - (if a single class) or one of those warnings (if a tuple). + Specifically, the parameter ``expected_warning`` can be a warning class or + sequence of warning classes, and the inside the ``with`` block must issue a warning of that class or + classes. - This helper produces a list of ``warnings.WarningMessage`` objects, + This helper produces a list of :class:`warnings.WarningMessage` objects, one for each warning raised. This function can be used as a context manager, or any of the other ways @@ -96,10 +97,28 @@ def warns(expected_warning, *args, **kwargs): >>> with warns(RuntimeWarning): ... warnings.warn("my warning", RuntimeWarning) + + In the context manager form you may use the keyword argument ``match`` to assert + that the exception matches a text or regex:: + + >>> with warns(UserWarning, match='must be 0 or None'): + ... warnings.warn("value must be 0 or None", UserWarning) + + >>> with warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("value must be 42", UserWarning) + + >>> with warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("this is not here", UserWarning) + Traceback (most recent call last): + ... + Failed: DID NOT WARN. No warnings of type ...UserWarning... was emitted... + """ - wcheck = WarningsChecker(expected_warning) + match_expr = None if not args: - return wcheck + if "match" in kwargs: + match_expr = kwargs.pop("match") + return WarningsChecker(expected_warning, match_expr=match_expr) elif isinstance(args[0], str): code, = args assert isinstance(code, str) @@ -107,12 +126,12 @@ def warns(expected_warning, *args, **kwargs): loc = frame.f_locals.copy() loc.update(kwargs) - with wcheck: + with WarningsChecker(expected_warning, match_expr=match_expr): code = _pytest._code.Source(code).compile() py.builtin.exec_(code, frame.f_globals, loc) else: func = args[0] - with wcheck: + with WarningsChecker(expected_warning, match_expr=match_expr): return func(*args[1:], **kwargs) @@ -172,7 +191,7 @@ class WarningsRecorder(warnings.catch_warnings): class WarningsChecker(WarningsRecorder): - def __init__(self, expected_warning=None): + def __init__(self, expected_warning=None, match_expr=None): super(WarningsChecker, self).__init__() msg = ("exceptions must be old-style classes or " @@ -187,6 +206,7 @@ class WarningsChecker(WarningsRecorder): raise TypeError(msg % type(expected_warning)) self.expected_warning = expected_warning + self.match_expr = match_expr def __exit__(self, *exc_info): super(WarningsChecker, self).__exit__(*exc_info) @@ -197,8 +217,17 @@ class WarningsChecker(WarningsRecorder): if not any(issubclass(r.category, self.expected_warning) for r in self): __tracebackhide__ = True - from _pytest.runner import fail fail("DID NOT WARN. No warnings of type {0} was emitted. " "The list of emitted warnings is: {1}.".format( - self.expected_warning, - [each.message for each in self])) + self.expected_warning, + [each.message for each in self])) + elif self.match_expr is not None: + for r in self: + if issubclass(r.category, self.expected_warning): + if re.compile(self.match_expr).search(str(r.message)): + break + else: + fail("DID NOT WARN. No warnings of type {0} matching" + " ('{1}') was emitted. The list of emitted warnings" + " is: {2}.".format(self.expected_warning, self.match_expr, + [each.message for each in self])) diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py index 3e4b00cf9..9f9c2d1f6 100644 --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -6,11 +6,13 @@ from __future__ import absolute_import, division, print_function import py import os + def pytest_addoption(parser): group = parser.getgroup("terminal reporting", "resultlog plugin options") group.addoption('--resultlog', '--result-log', action="store", - metavar="path", default=None, - help="DEPRECATED path for machine-readable result log.") + metavar="path", default=None, + help="DEPRECATED path for machine-readable result log.") + def pytest_configure(config): resultlog = config.option.resultlog @@ -19,13 +21,14 @@ def pytest_configure(config): dirname = os.path.dirname(os.path.abspath(resultlog)) if not os.path.isdir(dirname): os.makedirs(dirname) - logfile = open(resultlog, 'w', 1) # line buffered + logfile = open(resultlog, 'w', 1) # line buffered config._resultlog = ResultLog(config, logfile) config.pluginmanager.register(config._resultlog) from _pytest.deprecated import RESULT_LOG config.warn('C1', RESULT_LOG) + def pytest_unconfigure(config): resultlog = getattr(config, '_resultlog', None) if resultlog: @@ -33,6 +36,7 @@ def pytest_unconfigure(config): del config._resultlog config.pluginmanager.unregister(resultlog) + def generic_path(item): chain = item.listchain() gpath = [chain[0].name] @@ -56,10 +60,11 @@ def generic_path(item): fspath = newfspath return ''.join(gpath) + class ResultLog(object): def __init__(self, config, logfile): self.config = config - self.logfile = logfile # preferably line buffered + self.logfile = logfile # preferably line buffered def write_log_entry(self, testpath, lettercode, longrepr): print("%s %s" % (lettercode, testpath), file=self.logfile) diff --git a/_pytest/runner.py b/_pytest/runner.py index fd0b549a9..6792387db 100644 --- a/_pytest/runner.py +++ b/_pytest/runner.py @@ -2,22 +2,24 @@ from __future__ import absolute_import, division, print_function import bdb +import os import sys from time import time import py from _pytest._code.code import TerminalRepr, ExceptionInfo - - +from _pytest.outcomes import skip, Skipped, TEST_OUTCOME # # pytest plugin hooks + def pytest_addoption(parser): group = parser.getgroup("terminal reporting", "reporting", after="general") group.addoption('--durations', - action="store", type=int, default=None, metavar="N", - help="show N slowest setup/test durations (N=0 for all)."), + action="store", type=int, default=None, metavar="N", + help="show N slowest setup/test durations (N=0 for all)."), + def pytest_terminal_summary(terminalreporter): durations = terminalreporter.config.option.durations @@ -42,24 +44,28 @@ def pytest_terminal_summary(terminalreporter): for rep in dlist: nodeid = rep.nodeid.replace("::()::", "::") tr.write_line("%02.2fs %-8s %s" % - (rep.duration, rep.when, nodeid)) + (rep.duration, rep.when, nodeid)) + def pytest_sessionstart(session): session._setupstate = SetupState() + + def pytest_sessionfinish(session): session._setupstate.teardown_all() -class NodeInfo: - def __init__(self, location): - self.location = location def pytest_runtest_protocol(item, nextitem): item.ihook.pytest_runtest_logstart( nodeid=item.nodeid, location=item.location, ) runtestprotocol(item, nextitem=nextitem) + item.ihook.pytest_runtest_logfinish( + nodeid=item.nodeid, location=item.location, + ) return True + def runtestprotocol(item, log=True, nextitem=None): hasrequest = hasattr(item, "_request") if hasrequest and not item._request: @@ -72,7 +78,7 @@ def runtestprotocol(item, log=True, nextitem=None): if not item.config.option.setuponly: reports.append(call_and_report(item, "call", log)) reports.append(call_and_report(item, "teardown", log, - nextitem=nextitem)) + nextitem=nextitem)) # after all teardown hooks have been called # want funcargs and request info to go away if hasrequest: @@ -80,6 +86,7 @@ def runtestprotocol(item, log=True, nextitem=None): item.funcargs = None return reports + def show_test_item(item): """Show test function, parameters and the fixtures of the test item.""" tw = item.config.get_terminal_writer() @@ -90,10 +97,14 @@ def show_test_item(item): if used_fixtures: tw.write(' (fixtures used: {0})'.format(', '.join(used_fixtures))) + def pytest_runtest_setup(item): + _update_current_test_var(item, 'setup') item.session._setupstate.prepare(item) + def pytest_runtest_call(item): + _update_current_test_var(item, 'call') try: item.runtest() except Exception: @@ -106,8 +117,28 @@ def pytest_runtest_call(item): del tb # Get rid of it in this namespace raise + def pytest_runtest_teardown(item, nextitem): + _update_current_test_var(item, 'teardown') item.session._setupstate.teardown_exact(item, nextitem) + _update_current_test_var(item, None) + + +def _update_current_test_var(item, when): + """ + Update PYTEST_CURRENT_TEST to reflect the current item and stage. + + If ``when`` is None, delete PYTEST_CURRENT_TEST from the environment. + """ + var_name = 'PYTEST_CURRENT_TEST' + if when: + value = '{0} ({1})'.format(item.nodeid, when) + # don't allow null bytes on environment variables (see #2644, #2957) + value = value.replace('\x00', '(null)') + os.environ[var_name] = value + else: + os.environ.pop(var_name) + def pytest_report_teststatus(report): if report.when in ("setup", "teardown"): @@ -133,21 +164,25 @@ def call_and_report(item, when, log=True, **kwds): hook.pytest_exception_interact(node=item, call=call, report=report) return report + def check_interactive_exception(call, report): return call.excinfo and not ( - hasattr(report, "wasxfail") or - call.excinfo.errisinstance(skip.Exception) or - call.excinfo.errisinstance(bdb.BdbQuit)) + hasattr(report, "wasxfail") or + call.excinfo.errisinstance(skip.Exception) or + call.excinfo.errisinstance(bdb.BdbQuit)) + def call_runtest_hook(item, when, **kwds): hookname = "pytest_runtest_" + when ihook = getattr(item.ihook, hookname) return CallInfo(lambda: ihook(item=item, **kwds), when=when) -class CallInfo: + +class CallInfo(object): """ Result/Exception info a function invocation. """ #: None or ExceptionInfo object. excinfo = None + def __init__(self, func, when): #: context of invocation: one of "setup", "call", #: "teardown", "memocollect" @@ -158,7 +193,7 @@ class CallInfo: except KeyboardInterrupt: self.stop = time() raise - except: + except: # noqa self.excinfo = ExceptionInfo() self.stop = time() @@ -169,6 +204,7 @@ class CallInfo: status = "result: %r" % (self.result,) return "" % (self.when, status) + def getslaveinfoline(node): try: return node._slaveinfocache @@ -179,6 +215,7 @@ def getslaveinfoline(node): d['id'], d['sysplatform'], ver, d['executable']) return s + class BaseReport(object): def __init__(self, **kw): @@ -219,6 +256,14 @@ class BaseReport(object): exc = tw.stringio.getvalue() return exc.strip() + @property + def caplog(self): + """Return captured log lines, if log capturing is enabled + + .. versionadded:: 3.5 + """ + return '\n'.join(content for (prefix, content) in self.get_sections('Captured log')) + @property def capstdout(self): """Return captured text from stdout, if capturing is enabled @@ -243,10 +288,11 @@ class BaseReport(object): def fspath(self): return self.nodeid.split("::")[0] + def pytest_runtest_makereport(item, call): when = call.when - duration = call.stop-call.start - keywords = dict([(x,1) for x in item.keywords]) + duration = call.stop - call.start + keywords = dict([(x, 1) for x in item.keywords]) excinfo = call.excinfo sections = [] if not call.excinfo: @@ -264,21 +310,23 @@ def pytest_runtest_makereport(item, call): outcome = "failed" if call.when == "call": longrepr = item.repr_failure(excinfo) - else: # exception in setup or teardown + else: # exception in setup or teardown longrepr = item._repr_failure_py(excinfo, - style=item.config.option.tbstyle) + style=item.config.option.tbstyle) for rwhen, key, content in item._report_sections: - sections.append(("Captured %s %s" %(key, rwhen), content)) + sections.append(("Captured %s %s" % (key, rwhen), content)) return TestReport(item.nodeid, item.location, keywords, outcome, longrepr, when, - sections, duration) + sections, duration, user_properties=item.user_properties) + class TestReport(BaseReport): """ Basic test report object (also used for setup and teardown calls if they fail). """ + def __init__(self, nodeid, location, keywords, outcome, - longrepr, when, sections=(), duration=0, **extra): + longrepr, when, sections=(), duration=0, user_properties=(), **extra): #: normalized collection node id self.nodeid = nodeid @@ -300,6 +348,10 @@ class TestReport(BaseReport): #: one of 'setup', 'call', 'teardown' to indicate runtest phase. self.when = when + #: user properties is a list of tuples (name, value) that holds user + #: defined properties of the test + self.user_properties = user_properties + #: list of pairs ``(str, str)`` of extra information which needs to #: marshallable. Used by pytest to add captured text #: from ``stdout`` and ``stderr``, but may be used by other plugins @@ -315,14 +367,17 @@ class TestReport(BaseReport): return "" % ( self.nodeid, self.when, self.outcome) + class TeardownErrorReport(BaseReport): outcome = "failed" when = "teardown" + def __init__(self, longrepr, **extra): self.longrepr = longrepr self.sections = [] self.__dict__.update(extra) + def pytest_make_collect_report(collector): call = CallInfo( lambda: list(collector.collect()), @@ -344,7 +399,7 @@ def pytest_make_collect_report(collector): errorinfo = CollectErrorRepr(errorinfo) longrepr = errorinfo rep = CollectReport(collector.nodeid, outcome, longrepr, - getattr(call, 'result', None)) + getattr(call, 'result', None)) rep.call = call # see collect_one_node return rep @@ -365,16 +420,20 @@ class CollectReport(BaseReport): def __repr__(self): return "" % ( - self.nodeid, len(self.result), self.outcome) + self.nodeid, len(self.result), self.outcome) + class CollectErrorRepr(TerminalRepr): def __init__(self, msg): self.longrepr = msg + def toterminal(self, out): out.line(self.longrepr, red=True) + class SetupState(object): """ shared state for setting up/tearing down test items or collectors. """ + def __init__(self): self.stack = [] self._finalizers = {} @@ -385,8 +444,8 @@ class SetupState(object): is called at the end of teardown_all(). """ assert colitem and not isinstance(colitem, tuple) - assert py.builtin.callable(finalizer) - #assert colitem in self.stack # some unit tests don't setup stack :/ + assert callable(finalizer) + # assert colitem in self.stack # some unit tests don't setup stack :/ self._finalizers.setdefault(colitem, []).append(finalizer) def _pop_and_teardown(self): @@ -400,7 +459,7 @@ class SetupState(object): fin = finalizers.pop() try: fin() - except Exception: + except TEST_OUTCOME: # XXX Only first exception will be seen by user, # ideally all should be reported. if exc is None: @@ -414,7 +473,7 @@ class SetupState(object): colitem.teardown() for colitem in self._finalizers: assert colitem is None or colitem in self.stack \ - or isinstance(colitem, tuple) + or isinstance(colitem, tuple) def teardown_all(self): while self.stack: @@ -447,10 +506,11 @@ class SetupState(object): self.stack.append(col) try: col.setup() - except Exception: + except TEST_OUTCOME: col._prepare_exc = sys.exc_info() raise + def collect_one_node(collector): ihook = collector.ihook ihook.pytest_collectstart(collector=collector) @@ -459,122 +519,3 @@ def collect_one_node(collector): if call and check_interactive_exception(call, rep): ihook.pytest_exception_interact(node=collector, call=call, report=rep) return rep - - -# ============================================================= -# Test OutcomeExceptions and helpers for creating them. - - -class OutcomeException(Exception): - """ OutcomeException and its subclass instances indicate and - contain info about test and collection outcomes. - """ - def __init__(self, msg=None, pytrace=True): - Exception.__init__(self, msg) - self.msg = msg - self.pytrace = pytrace - - def __repr__(self): - if self.msg: - val = self.msg - if isinstance(val, bytes): - val = py._builtin._totext(val, errors='replace') - return val - return "<%s instance>" %(self.__class__.__name__,) - __str__ = __repr__ - -class Skipped(OutcomeException): - # XXX hackish: on 3k we fake to live in the builtins - # in order to have Skipped exception printing shorter/nicer - __module__ = 'builtins' - - def __init__(self, msg=None, pytrace=True, allow_module_level=False): - OutcomeException.__init__(self, msg=msg, pytrace=pytrace) - self.allow_module_level = allow_module_level - - -class Failed(OutcomeException): - """ raised from an explicit call to pytest.fail() """ - __module__ = 'builtins' - - -class Exit(KeyboardInterrupt): - """ raised for immediate program exits (no tracebacks/summaries)""" - def __init__(self, msg="unknown reason"): - self.msg = msg - KeyboardInterrupt.__init__(self, msg) - -# exposed helper methods - -def exit(msg): - """ exit testing process as if KeyboardInterrupt was triggered. """ - __tracebackhide__ = True - raise Exit(msg) - - -exit.Exception = Exit - - -def skip(msg=""): - """ skip an executing test with the given message. Note: it's usually - better to use the pytest.mark.skipif marker to declare a test to be - skipped under certain conditions like mismatching platforms or - dependencies. See the pytest_skipping plugin for details. - """ - __tracebackhide__ = True - raise Skipped(msg=msg) - - -skip.Exception = Skipped - - -def fail(msg="", pytrace=True): - """ explicitly fail an currently-executing test with the given Message. - - :arg pytrace: if false the msg represents the full failure information - and no python traceback will be reported. - """ - __tracebackhide__ = True - raise Failed(msg=msg, pytrace=pytrace) - - -fail.Exception = Failed - - -def importorskip(modname, minversion=None): - """ return imported module if it has at least "minversion" as its - __version__ attribute. If no minversion is specified the a skip - is only triggered if the module can not be imported. - """ - import warnings - __tracebackhide__ = True - compile(modname, '', 'eval') # to catch syntaxerrors - should_skip = False - - with warnings.catch_warnings(): - # make sure to ignore ImportWarnings that might happen because - # of existing directories with the same name we're trying to - # import but without a __init__.py file - warnings.simplefilter('ignore') - try: - __import__(modname) - except ImportError: - # Do not raise chained exception here(#1485) - should_skip = True - if should_skip: - raise Skipped("could not import %r" %(modname,), allow_module_level=True) - mod = sys.modules[modname] - if minversion is None: - return mod - verattr = getattr(mod, '__version__', None) - if minversion is not None: - try: - from pkg_resources import parse_version as pv - except ImportError: - raise Skipped("we have a required version for %r but can not import " - "pkg_resources to parse version strings." % (modname,), - allow_module_level=True) - if verattr is None or pv(verattr) < pv(minversion): - raise Skipped("module %r has __version__ %r, required is: %r" %( - modname, verattr, minversion), allow_module_level=True) - return mod diff --git a/_pytest/setuponly.py b/_pytest/setuponly.py index 15e195ad5..a1c7457d7 100644 --- a/_pytest/setuponly.py +++ b/_pytest/setuponly.py @@ -44,7 +44,7 @@ def _show_fixture_action(fixturedef, msg): config = fixturedef._fixturemanager.config capman = config.pluginmanager.getplugin('capturemanager') if capman: - out, err = capman.suspendcapture() + out, err = capman.suspend_global_capture() tw = config.get_terminal_writer() tw.line() @@ -63,7 +63,7 @@ def _show_fixture_action(fixturedef, msg): tw.write('[{0}]'.format(fixturedef.cached_param)) if capman: - capman.resumecapture() + capman.resume_global_capture() sys.stdout.write(out) sys.stderr.write(err) diff --git a/_pytest/skipping.py b/_pytest/skipping.py index 5af1ca404..f62edcf9a 100644 --- a/_pytest/skipping.py +++ b/_pytest/skipping.py @@ -1,26 +1,22 @@ """ support for skip/xfail functions and markers. """ from __future__ import absolute_import, division, print_function -import os -import sys -import traceback - -import py from _pytest.config import hookimpl -from _pytest.mark import MarkInfo, MarkDecorator -from _pytest.runner import fail, skip +from _pytest.mark.evaluate import MarkEvaluator +from _pytest.outcomes import fail, skip, xfail + def pytest_addoption(parser): group = parser.getgroup("general") group.addoption('--runxfail', - action="store_true", dest="runxfail", default=False, - help="run tests even if they are marked xfail") + action="store_true", dest="runxfail", default=False, + help="run tests even if they are marked xfail") - parser.addini("xfail_strict", "default for the strict parameter of xfail " - "markers when not given explicitly (default: " - "False)", - default=False, - type="bool") + parser.addini("xfail_strict", + "default for the strict parameter of xfail " + "markers when not given explicitly (default: False)", + default=False, + type="bool") def pytest_configure(config): @@ -33,151 +29,45 @@ def pytest_configure(config): def nop(*args, **kwargs): pass - nop.Exception = XFailed + nop.Exception = xfail.Exception setattr(pytest, "xfail", nop) config.addinivalue_line("markers", - "skip(reason=None): skip the given test function with an optional reason. " - "Example: skip(reason=\"no way of currently testing this\") skips the " - "test." - ) + "skip(reason=None): skip the given test function with an optional reason. " + "Example: skip(reason=\"no way of currently testing this\") skips the " + "test." + ) config.addinivalue_line("markers", - "skipif(condition): skip the given test function if eval(condition) " - "results in a True value. Evaluation happens within the " - "module global context. Example: skipif('sys.platform == \"win32\"') " - "skips the test if we are on the win32 platform. see " - "http://pytest.org/latest/skipping.html" - ) + "skipif(condition): skip the given test function if eval(condition) " + "results in a True value. Evaluation happens within the " + "module global context. Example: skipif('sys.platform == \"win32\"') " + "skips the test if we are on the win32 platform. see " + "http://pytest.org/latest/skipping.html" + ) config.addinivalue_line("markers", - "xfail(condition, reason=None, run=True, raises=None, strict=False): " - "mark the test function as an expected failure if eval(condition) " - "has a True value. Optionally specify a reason for better reporting " - "and run=False if you don't even want to execute the test function. " - "If only specific exception(s) are expected, you can list them in " - "raises, and if the test fails in other ways, it will be reported as " - "a true failure. See http://pytest.org/latest/skipping.html" - ) - - -class XFailed(fail.Exception): - """ raised from an explicit call to pytest.xfail() """ - - -def xfail(reason=""): - """ xfail an executing test or setup functions with the given reason.""" - __tracebackhide__ = True - raise XFailed(reason) - - -xfail.Exception = XFailed - - -class MarkEvaluator: - def __init__(self, item, name): - self.item = item - self.name = name - - @property - def holder(self): - return self.item.keywords.get(self.name) - - def __bool__(self): - return bool(self.holder) - __nonzero__ = __bool__ - - def wasvalid(self): - return not hasattr(self, 'exc') - - def invalidraise(self, exc): - raises = self.get('raises') - if not raises: - return - return not isinstance(exc, raises) - - def istrue(self): - try: - return self._istrue() - except Exception: - self.exc = sys.exc_info() - if isinstance(self.exc[1], SyntaxError): - msg = [" " * (self.exc[1].offset + 4) + "^", ] - msg.append("SyntaxError: invalid syntax") - else: - msg = traceback.format_exception_only(*self.exc[:2]) - fail("Error evaluating %r expression\n" - " %s\n" - "%s" - % (self.name, self.expr, "\n".join(msg)), - pytrace=False) - - def _getglobals(self): - d = {'os': os, 'sys': sys, 'config': self.item.config} - if hasattr(self.item, 'obj'): - d.update(self.item.obj.__globals__) - return d - - def _istrue(self): - if hasattr(self, 'result'): - return self.result - if self.holder: - if self.holder.args or 'condition' in self.holder.kwargs: - self.result = False - # "holder" might be a MarkInfo or a MarkDecorator; only - # MarkInfo keeps track of all parameters it received in an - # _arglist attribute - marks = getattr(self.holder, '_marks', None) \ - or [self.holder.mark] - for _, args, kwargs in marks: - if 'condition' in kwargs: - args = (kwargs['condition'],) - for expr in args: - self.expr = expr - if isinstance(expr, py.builtin._basestring): - d = self._getglobals() - result = cached_eval(self.item.config, expr, d) - else: - if "reason" not in kwargs: - # XXX better be checked at collection time - msg = "you need to specify reason=STRING " \ - "when using booleans as conditions." - fail(msg) - result = bool(expr) - if result: - self.result = True - self.reason = kwargs.get('reason', None) - self.expr = expr - return self.result - else: - self.result = True - return getattr(self, 'result', False) - - def get(self, attr, default=None): - return self.holder.kwargs.get(attr, default) - - def getexplanation(self): - expl = getattr(self, 'reason', None) or self.get('reason', None) - if not expl: - if not hasattr(self, 'expr'): - return "" - else: - return "condition: " + str(self.expr) - return expl + "xfail(condition, reason=None, run=True, raises=None, strict=False): " + "mark the test function as an expected failure if eval(condition) " + "has a True value. Optionally specify a reason for better reporting " + "and run=False if you don't even want to execute the test function. " + "If only specific exception(s) are expected, you can list them in " + "raises, and if the test fails in other ways, it will be reported as " + "a true failure. See http://pytest.org/latest/skipping.html" + ) @hookimpl(tryfirst=True) def pytest_runtest_setup(item): # Check if skip or skipif are specified as pytest marks + item._skipped_by_mark = False + eval_skipif = MarkEvaluator(item, 'skipif') + if eval_skipif.istrue(): + item._skipped_by_mark = True + skip(eval_skipif.getexplanation()) - skipif_info = item.keywords.get('skipif') - if isinstance(skipif_info, (MarkInfo, MarkDecorator)): - eval_skipif = MarkEvaluator(item, 'skipif') - if eval_skipif.istrue(): - item._evalskip = eval_skipif - skip(eval_skipif.getexplanation()) - - skip_info = item.keywords.get('skip') - if isinstance(skip_info, (MarkInfo, MarkDecorator)): - item._evalskip = True + for skip_info in item.iter_markers(): + if skip_info.name != 'skip': + continue + item._skipped_by_mark = True if 'reason' in skip_info.kwargs: skip(skip_info.kwargs['reason']) elif skip_info.args: @@ -224,7 +114,6 @@ def pytest_runtest_makereport(item, call): outcome = yield rep = outcome.get_result() evalxfail = getattr(item, '_evalxfail', None) - evalskip = getattr(item, '_evalskip', None) # unitttest special case, see setting of _unexpectedsuccess if hasattr(item, '_unexpectedsuccess') and rep.when == "call": from _pytest.compat import _is_unittest_unexpected_success_a_failure @@ -238,12 +127,12 @@ def pytest_runtest_makereport(item, call): rep.outcome = "passed" rep.wasxfail = rep.longrepr elif item.config.option.runxfail: - pass # don't interefere + pass # don't interefere elif call.excinfo and call.excinfo.errisinstance(xfail.Exception): rep.wasxfail = "reason: " + call.excinfo.value.msg rep.outcome = "skipped" elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \ - evalxfail.istrue(): + evalxfail.istrue(): if call.excinfo: if evalxfail.invalidraise(call.excinfo.value): rep.outcome = "failed" @@ -260,7 +149,7 @@ def pytest_runtest_makereport(item, call): else: rep.outcome = "passed" rep.wasxfail = explanation - elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple: + elif getattr(item, '_skipped_by_mark', False) and rep.skipped and type(rep.longrepr) is tuple: # skipped by mark.skipif; change the location of the failure # to point to the item definition, otherwise it will display # the location of where the skip exception was raised within pytest @@ -268,7 +157,10 @@ def pytest_runtest_makereport(item, call): filename, line = item.location[:2] rep.longrepr = filename, line, reason + # called by terminalreporter progress reporting + + def pytest_report_teststatus(report): if hasattr(report, "wasxfail"): if report.skipped: @@ -276,11 +168,14 @@ def pytest_report_teststatus(report): elif report.passed: return "xpassed", "X", ("XPASS", {'yellow': True}) + # called by the terminalreporter instance/plugin + + def pytest_terminal_summary(terminalreporter): tr = terminalreporter if not tr.reportchars: - #for name in "xfailed skipped failed xpassed": + # for name in "xfailed skipped failed xpassed": # if not tr.stats.get(name, 0): # tr.write_line("HINT: use '-r' option to see extra " # "summary info about tests") @@ -289,18 +184,8 @@ def pytest_terminal_summary(terminalreporter): lines = [] for char in tr.reportchars: - if char == "x": - show_xfailed(terminalreporter, lines) - elif char == "X": - show_xpassed(terminalreporter, lines) - elif char in "fF": - show_simple(terminalreporter, lines, 'failed', "FAIL %s") - elif char in "sS": - show_skipped(terminalreporter, lines) - elif char == "E": - show_simple(terminalreporter, lines, 'error', "ERROR %s") - elif char == 'p': - show_simple(terminalreporter, lines, 'passed', "PASSED %s") + action = REPORTCHAR_ACTIONS.get(char, lambda tr, lines: None) + action(terminalreporter, lines) if lines: tr._tw.sep("=", "short test summary info") @@ -336,45 +221,65 @@ def show_xpassed(terminalreporter, lines): lines.append("XPASS %s %s" % (pos, reason)) -def cached_eval(config, expr, d): - if not hasattr(config, '_evalcache'): - config._evalcache = {} - try: - return config._evalcache[expr] - except KeyError: - import _pytest._code - exprcode = _pytest._code.compile(expr, mode="eval") - config._evalcache[expr] = x = eval(exprcode, d) - return x - - def folded_skips(skipped): d = {} for event in skipped: key = event.longrepr assert len(key) == 3, (event, key) + keywords = getattr(event, 'keywords', {}) + # folding reports with global pytestmark variable + # this is workaround, because for now we cannot identify the scope of a skip marker + # TODO: revisit after marks scope would be fixed + when = getattr(event, 'when', None) + if when == 'setup' and 'skip' in keywords and 'pytestmark' not in keywords: + key = (key[0], None, key[2]) d.setdefault(key, []).append(event) - l = [] + values = [] for key, events in d.items(): - l.append((len(events),) + key) - return l + values.append((len(events),) + key) + return values def show_skipped(terminalreporter, lines): tr = terminalreporter skipped = tr.stats.get('skipped', []) if skipped: - #if not tr.hasopt('skipped'): + # if not tr.hasopt('skipped'): # tr.write_line( # "%d skipped tests, specify -rs for more info" % # len(skipped)) # return fskips = folded_skips(skipped) if fskips: - #tr.write_sep("_", "skipped test summary") + # tr.write_sep("_", "skipped test summary") for num, fspath, lineno, reason in fskips: if reason.startswith("Skipped: "): reason = reason[9:] - lines.append( - "SKIP [%d] %s:%d: %s" % - (num, fspath, lineno, reason)) + if lineno is not None: + lines.append( + "SKIP [%d] %s:%d: %s" % + (num, fspath, lineno + 1, reason)) + else: + lines.append( + "SKIP [%d] %s: %s" % + (num, fspath, reason)) + + +def shower(stat, format): + def show_(terminalreporter, lines): + return show_simple(terminalreporter, lines, stat, format) + + return show_ + + +REPORTCHAR_ACTIONS = { + 'x': show_xfailed, + 'X': show_xpassed, + 'f': shower('failed', "FAIL %s"), + 'F': shower('failed', "FAIL %s"), + 's': show_skipped, + 'S': show_skipped, + 'p': shower('passed', "PASSED %s"), + 'E': shower('error', "ERROR %s") + +} diff --git a/_pytest/terminal.py b/_pytest/terminal.py index af89d0fc2..f8ad33c10 100644 --- a/_pytest/terminal.py +++ b/_pytest/terminal.py @@ -5,50 +5,96 @@ This is a good source for looking at the various reporting hooks. from __future__ import absolute_import, division, print_function import itertools -from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \ - EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED -import pytest -import py +import platform import sys import time -import platform -import _pytest._pluggy as pluggy +import pluggy +import py +import six +from more_itertools import collapse + +import pytest +from _pytest import nodes +from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \ + EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED + + +import argparse + + +class MoreQuietAction(argparse.Action): + """ + a modified copy of the argparse count action which counts down and updates + the legacy quiet attribute at the same time + + used to unify verbosity handling + """ + def __init__(self, + option_strings, + dest, + default=None, + required=False, + help=None): + super(MoreQuietAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + default=default, + required=required, + help=help) + + def __call__(self, parser, namespace, values, option_string=None): + new_count = getattr(namespace, self.dest, 0) - 1 + setattr(namespace, self.dest, new_count) + # todo Deprecate config.quiet + namespace.quiet = getattr(namespace, 'quiet', 0) + 1 def pytest_addoption(parser): group = parser.getgroup("terminal reporting", "reporting", after="general") - group._addoption('-v', '--verbose', action="count", - dest="verbose", default=0, help="increase verbosity."), - group._addoption('-q', '--quiet', action="count", - dest="quiet", default=0, help="decrease verbosity."), + group._addoption('-v', '--verbose', action="count", default=0, + dest="verbose", help="increase verbosity."), + group._addoption('-q', '--quiet', action=MoreQuietAction, default=0, + dest="verbose", help="decrease verbosity."), + group._addoption("--verbosity", dest='verbose', type=int, default=0, + help="set verbosity") group._addoption('-r', - action="store", dest="reportchars", default='', metavar="chars", - help="show extra test summary info as specified by chars (f)ailed, " - "(E)error, (s)skipped, (x)failed, (X)passed, " - "(p)passed, (P)passed with output, (a)all except pP. " - "Warnings are displayed at all times except when " - "--disable-warnings is set") + action="store", dest="reportchars", default='', metavar="chars", + help="show extra test summary info as specified by chars (f)ailed, " + "(E)error, (s)skipped, (x)failed, (X)passed, " + "(p)passed, (P)passed with output, (a)all except pP. " + "Warnings are displayed at all times except when " + "--disable-warnings is set") group._addoption('--disable-warnings', '--disable-pytest-warnings', default=False, dest='disable_warnings', action='store_true', help='disable warnings summary') group._addoption('-l', '--showlocals', - action="store_true", dest="showlocals", default=False, - help="show locals in tracebacks (disabled by default).") + action="store_true", dest="showlocals", default=False, + help="show locals in tracebacks (disabled by default).") group._addoption('--tb', metavar="style", - action="store", dest="tbstyle", default='auto', - choices=['auto', 'long', 'short', 'no', 'line', 'native'], - help="traceback print mode (auto/long/short/line/native/no).") + action="store", dest="tbstyle", default='auto', + choices=['auto', 'long', 'short', 'no', 'line', 'native'], + help="traceback print mode (auto/long/short/line/native/no).") + group._addoption('--show-capture', + action="store", dest="showcapture", + choices=['no', 'stdout', 'stderr', 'log', 'all'], default='all', + help="Controls how captured stdout/stderr/log is shown on failed tests. " + "Default is 'all'.") group._addoption('--fulltrace', '--full-trace', - action="store_true", default=False, - help="don't cut any tracebacks (default is to cut).") + action="store_true", default=False, + help="don't cut any tracebacks (default is to cut).") group._addoption('--color', metavar="color", - action="store", dest="color", default='auto', - choices=['yes', 'no', 'auto'], - help="color terminal output (yes/no/auto).") + action="store", dest="color", default='auto', + choices=['yes', 'no', 'auto'], + help="color terminal output (yes/no/auto).") + + parser.addini("console_output_style", + help="console output: classic or with additional progress information (classic|progress).", + default='progress') + def pytest_configure(config): - config.option.verbose -= config.option.quiet reporter = TerminalReporter(config, sys.stdout) config.pluginmanager.register(reporter, 'terminalreporter') if config.option.debug or config.option.traceconfig: @@ -57,6 +103,7 @@ def pytest_configure(config): reporter.write_line("[traceconfig] " + msg) config.trace.root.setprocessor("pytest:config", mywriter) + def getreportopt(config): reportopts = "" reportchars = config.option.reportchars @@ -72,6 +119,7 @@ def getreportopt(config): reportopts = 'fEsxXw' return reportopts + def pytest_report_teststatus(report): if report.passed: letter = "." @@ -84,10 +132,11 @@ def pytest_report_teststatus(report): return report.outcome, letter, report.outcome.upper() -class WarningReport: +class WarningReport(object): """ Simple structure to hold warnings information captured by ``pytest_logwarning``. """ + def __init__(self, code, message, nodeid=None, fslocation=None): """ :param code: unused @@ -118,7 +167,7 @@ class WarningReport: return None -class TerminalReporter: +class TerminalReporter(object): def __init__(self, config, file=None): import _pytest.config self.config = config @@ -127,17 +176,32 @@ class TerminalReporter: self.showfspath = self.verbosity >= 0 self.showlongtestinfo = self.verbosity > 0 self._numcollected = 0 + self._session = None self.stats = {} self.startdir = py.path.local() if file is None: file = sys.stdout - self._tw = self.writer = _pytest.config.create_terminal_writer(config, - file) + self._tw = _pytest.config.create_terminal_writer(config, file) + # self.writer will be deprecated in pytest-3.4 + self.writer = self._tw + self._screen_width = self._tw.fullwidth self.currentfspath = None self.reportchars = getreportopt(config) self.hasmarkup = self._tw.hasmarkup self.isatty = file.isatty() + self._progress_nodeids_reported = set() + self._show_progress_info = self._determine_show_progress_info() + + def _determine_show_progress_info(self): + """Return True if we should display progress information based on the current config""" + # do not show progress if we are not capturing output (#3038) + if self.config.getoption('capture') == 'no': + return False + # do not show progress if we are showing fixture setup/teardown + if self.config.getoption('setupshow'): + return False + return self.config.getini('console_output_style') == 'progress' def hasopt(self, char): char = {'xfailed': 'x', 'skipped': 's'}.get(char, char) @@ -146,6 +210,8 @@ class TerminalReporter: def write_fspath_result(self, nodeid, res): fspath = self.config.rootdir.join(nodeid.split("::")[0]) if fspath != self.currentfspath: + if self.currentfspath is not None: + self._write_progress_information_filling_space() self.currentfspath = fspath fspath = self.startdir.bestrelpath(fspath) self._tw.line() @@ -170,14 +236,28 @@ class TerminalReporter: self._tw.write(content, **markup) def write_line(self, line, **markup): - if not py.builtin._istext(line): - line = py.builtin.text(line, errors="replace") + if not isinstance(line, six.text_type): + line = six.text_type(line, errors="replace") self.ensure_newline() self._tw.line(line, **markup) def rewrite(self, line, **markup): + """ + Rewinds the terminal cursor to the beginning and writes the given line. + + :kwarg erase: if True, will also add spaces until the full terminal width to ensure + previous lines are properly erased. + + The rest of the keyword arguments are markup instructions. + """ + erase = markup.pop('erase', False) + if erase: + fill_count = self._tw.fullwidth - len(line) - 1 + fill = ' ' * fill_count + else: + fill = '' line = str(line) - self._tw.write("\r" + line, **markup) + self._tw.write("\r" + line + fill, **markup) def write_sep(self, sep, title=None, **markup): self.ensure_newline() @@ -190,7 +270,7 @@ class TerminalReporter: self._tw.line(msg, **kw) def pytest_internalerror(self, excrepr): - for line in py.builtin.text(excrepr).split("\n"): + for line in six.text_type(excrepr).split("\n"): self.write_line("INTERNALERROR> " + line) return 1 @@ -225,38 +305,76 @@ class TerminalReporter: rep = report res = self.config.hook.pytest_report_teststatus(report=rep) cat, letter, word = res + if isinstance(word, tuple): + word, markup = word + else: + markup = None self.stats.setdefault(cat, []).append(rep) self._tests_ran = True if not letter and not word: # probably passed setup/teardown return + running_xdist = hasattr(rep, 'node') if self.verbosity <= 0: - if not hasattr(rep, 'node') and self.showfspath: + if not running_xdist and self.showfspath: self.write_fspath_result(rep.nodeid, letter) else: self._tw.write(letter) else: - if isinstance(word, tuple): - word, markup = word - else: + self._progress_nodeids_reported.add(rep.nodeid) + if markup is None: if rep.passed: - markup = {'green':True} + markup = {'green': True} elif rep.failed: - markup = {'red':True} + markup = {'red': True} elif rep.skipped: - markup = {'yellow':True} + markup = {'yellow': True} + else: + markup = {} line = self._locationline(rep.nodeid, *rep.location) - if not hasattr(rep, 'node'): + if not running_xdist: self.write_ensure_prefix(line, word, **markup) - #self._tw.write(word, **markup) + if self._show_progress_info: + self._write_progress_information_filling_space() else: self.ensure_newline() - if hasattr(rep, 'node'): - self._tw.write("[%s] " % rep.node.gateway.id) + self._tw.write("[%s]" % rep.node.gateway.id) + if self._show_progress_info: + self._tw.write(self._get_progress_information_message() + " ", cyan=True) + else: + self._tw.write(' ') self._tw.write(word, **markup) self._tw.write(" " + line) self.currentfspath = -2 + def pytest_runtest_logfinish(self, nodeid): + if self.verbosity <= 0 and self._show_progress_info: + self._progress_nodeids_reported.add(nodeid) + last_item = len(self._progress_nodeids_reported) == self._session.testscollected + if last_item: + self._write_progress_information_filling_space() + else: + past_edge = self._tw.chars_on_current_line + self._PROGRESS_LENGTH + 1 >= self._screen_width + if past_edge: + msg = self._get_progress_information_message() + self._tw.write(msg + '\n', cyan=True) + + _PROGRESS_LENGTH = len(' [100%]') + + def _get_progress_information_message(self): + if self.config.getoption('capture') == 'no': + return '' + collected = self._session.testscollected + if collected: + progress = len(self._progress_nodeids_reported) * 100 // collected + return ' [{:3d}%]'.format(progress) + return ' [100%]' + + def _write_progress_information_filling_space(self): + msg = self._get_progress_information_message() + fill = ' ' * (self._tw.fullwidth - self._tw.chars_on_current_line - len(msg) - 1) + self.write(fill + msg, cyan=True) + def pytest_collection(self): if not self.isatty and self.config.option.verbose >= 1: self.write("collecting ... ", bold=True) @@ -269,7 +387,7 @@ class TerminalReporter: items = [x for x in report.result if isinstance(x, pytest.Item)] self._numcollected += len(items) if self.isatty: - #self.write_fspath_result(report.nodeid, 'E') + # self.write_fspath_result(report.nodeid, 'E') self.report_collect() def report_collect(self, final=False): @@ -278,6 +396,7 @@ class TerminalReporter: errors = len(self.stats.get('error', [])) skipped = len(self.stats.get('skipped', [])) + deselected = len(self.stats.get('deselected', [])) if final: line = "collected " else: @@ -285,20 +404,24 @@ class TerminalReporter: line += str(self._numcollected) + " item" + ('' if self._numcollected == 1 else 's') if errors: line += " / %d errors" % errors + if deselected: + line += " / %d deselected" % deselected if skipped: line += " / %d skipped" % skipped if self.isatty: + self.rewrite(line, bold=True, erase=True) if final: - line += " \n" - self.rewrite(line, bold=True) + self.write('\n') else: self.write_line(line) + @pytest.hookimpl(trylast=True) def pytest_collection_modifyitems(self): self.report_collect(True) @pytest.hookimpl(trylast=True) def pytest_sessionstart(self, session): + self._session = session self._sessionstarttime = time.time() if not self.showheader: return @@ -316,8 +439,11 @@ class TerminalReporter: self.write_line(msg) lines = self.config.hook.pytest_report_header( config=self.config, startdir=self.startdir) + self._write_report_lines_from_hooks(lines) + + def _write_report_lines_from_hooks(self, lines): lines.reverse() - for line in flatten(lines): + for line in collapse(lines): self.write_line(line) def pytest_report_header(self, config): @@ -342,10 +468,9 @@ class TerminalReporter: rep.toterminal(self._tw) return 1 return 0 - if not self.showheader: - return - #for i, testarg in enumerate(self.config.args): - # self.write_line("test path %d: %s" %(i+1, testarg)) + lines = self.config.hook.pytest_report_collectionfinish( + config=self.config, startdir=self.startdir, items=session.items) + self._write_report_lines_from_hooks(lines) def _printcollecteditems(self, items): # to print out items and their parent collectors @@ -368,14 +493,14 @@ class TerminalReporter: stack = [] indent = "" for item in items: - needed_collectors = item.listchain()[1:] # strip root node + needed_collectors = item.listchain()[1:] # strip root node while stack: if stack == needed_collectors[:len(stack)]: break stack.pop() for col in needed_collectors[len(stack):]: stack.append(col) - #if col.name == "()": + # if col.name == "()": # continue indent = (len(stack) - 1) * " " self._tw.line("%s%s" % (indent, col)) @@ -391,16 +516,19 @@ class TerminalReporter: if exitstatus in summary_exit_codes: self.config.hook.pytest_terminal_summary(terminalreporter=self, exitstatus=exitstatus) - self.summary_errors() - self.summary_failures() - self.summary_warnings() - self.summary_passes() if exitstatus == EXIT_INTERRUPTED: self._report_keyboardinterrupt() del self._keyboardinterrupt_memo - self.summary_deselected() self.summary_stats() + @pytest.hookimpl(hookwrapper=True) + def pytest_terminal_summary(self): + self.summary_errors() + self.summary_failures() + yield + self.summary_warnings() + self.summary_passes() + def pytest_keyboard_interrupt(self, excinfo): self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True) @@ -424,15 +552,15 @@ class TerminalReporter: line = self.config.cwd_relative_nodeid(nodeid) if domain and line.endswith(domain): line = line[:-len(domain)] - l = domain.split("[") - l[0] = l[0].replace('.', '::') # don't replace '.' in params - line += "[".join(l) + values = domain.split("[") + values[0] = values[0].replace('.', '::') # don't replace '.' in params + line += "[".join(values) return line # collect_fspath comes from testid which has a "/"-normalized path if fspath: res = mkrel(nodeid).replace("::()", "") # parens-normalization - if nodeid.split("::")[0] != fspath.replace("\\", "/"): + if nodeid.split("::")[0] != fspath.replace("\\", nodes.SEP): res += " <- " + self.startdir.bestrelpath(fspath) else: res = "[location]" @@ -443,7 +571,7 @@ class TerminalReporter: fspath, lineno, domain = rep.location return domain else: - return "test session" # XXX? + return "test session" # XXX? def _getcrashline(self, rep): try: @@ -458,11 +586,11 @@ class TerminalReporter: # summaries for sessionfinish # def getreports(self, name): - l = [] + values = [] for x in self.stats.get(name, []): if not hasattr(x, '_pdbshown'): - l.append(x) - return l + values.append(x) + return values def summary_warnings(self): if self.hasopt("w"): @@ -473,9 +601,9 @@ class TerminalReporter: grouped = itertools.groupby(all_warnings, key=lambda wr: wr.get_location(self.config)) self.write_sep("=", "warnings summary", yellow=True, bold=False) - for location, warnings in grouped: + for location, warning_records in grouped: self._tw.line(str(location) or '') - for w in warnings: + for w in warning_records: lines = w.message.splitlines() indented = '\n'.join(' ' + x for x in lines) self._tw.line(indented) @@ -502,7 +630,6 @@ class TerminalReporter: content = content[:-1] self._tw.line(content) - def summary_failures(self): if self.config.option.tbstyle != "no": reports = self.getreports('failed') @@ -542,7 +669,12 @@ class TerminalReporter: def _outrep_summary(self, rep): rep.toterminal(self._tw) + showcapture = self.config.option.showcapture + if showcapture == 'no': + return for secname, content in rep.sections: + if showcapture != 'all' and showcapture not in secname: + continue self._tw.sep("-", secname) if content[-1:] == "\n": content = content[:-1] @@ -559,10 +691,6 @@ class TerminalReporter: if self.verbosity == -1: self.write_line(msg, **markup) - def summary_deselected(self): - if 'deselected' in self.stats: - self.write_sep("=", "%d tests deselected" % ( - len(self.stats['deselected'])), bold=True) def repr_pythonversion(v=None): if v is None: @@ -572,13 +700,6 @@ def repr_pythonversion(v=None): except (TypeError, ValueError): return str(v) -def flatten(l): - for x in l: - if isinstance(x, (list, tuple)): - for y in flatten(x): - yield y - else: - yield x def build_summary_stats_line(stats): keys = ("failed passed skipped deselected " @@ -586,7 +707,7 @@ def build_summary_stats_line(stats): unknown_key_seen = False for key in stats.keys(): if key not in keys: - if key: # setup/teardown reports have an empty key, ignore them + if key: # setup/teardown reports have an empty key, ignore them keys.append(key) unknown_key_seen = True parts = [] @@ -613,7 +734,7 @@ def build_summary_stats_line(stats): def _plugin_nameversions(plugininfo): - l = [] + values = [] for plugin, dist in plugininfo: # gets us name and version! name = '{dist.project_name}-{dist.version}'.format(dist=dist) @@ -622,6 +743,6 @@ def _plugin_nameversions(plugininfo): name = name[7:] # we decided to print python package names # they can have more than one plugin - if name not in l: - l.append(name) - return l + if name not in values: + values.append(name) + return values diff --git a/_pytest/tmpdir.py b/_pytest/tmpdir.py index 596014059..315ead302 100644 --- a/_pytest/tmpdir.py +++ b/_pytest/tmpdir.py @@ -8,7 +8,7 @@ import py from _pytest.monkeypatch import MonkeyPatch -class TempdirFactory: +class TempdirFactory(object): """Factory for temporary directories under the common base temp directory. The base directory can be configured using the ``--basetemp`` option. @@ -25,7 +25,7 @@ class TempdirFactory: provides an empty unique-per-test-invocation directory and is guaranteed to be empty. """ - #py.log._apiwarn(">1.1", "use tmpdir function argument") + # py.log._apiwarn(">1.1", "use tmpdir function argument") return self.getbasetemp().ensure(string, dir=dir) def mktemp(self, basename, numbered=True): @@ -38,7 +38,7 @@ class TempdirFactory: p = basetemp.mkdir(basename) else: p = py.path.local.make_numbered_dir(prefix=basename, - keep=0, rootdir=basetemp, lock_timeout=None) + keep=0, rootdir=basetemp, lock_timeout=None) self.trace("mktemp", p) return p @@ -116,6 +116,8 @@ def tmpdir(request, tmpdir_factory): created as a sub directory of the base temporary directory. The returned object is a `py.path.local`_ path object. + + .. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html """ name = request.node.name name = re.sub(r"[\W]", "_", name) diff --git a/_pytest/unittest.py b/_pytest/unittest.py index 0cf0f1726..3ddb39495 100644 --- a/_pytest/unittest.py +++ b/_pytest/unittest.py @@ -7,9 +7,8 @@ import traceback # for transferring markers import _pytest._code from _pytest.config import hookimpl -from _pytest.runner import fail, skip +from _pytest.outcomes import fail, skip, xfail from _pytest.python import transfer_markers, Class, Module, Function -from _pytest.skipping import MarkEvaluator, xfail def pytest_pycollect_makeitem(collector, name, obj): @@ -109,13 +108,13 @@ class TestCaseFunction(Function): except TypeError: try: try: - l = traceback.format_exception(*rawexcinfo) - l.insert(0, "NOTE: Incompatible Exception Representation, " - "displaying natively:\n\n") - fail("".join(l), pytrace=False) + values = traceback.format_exception(*rawexcinfo) + values.insert(0, "NOTE: Incompatible Exception Representation, " + "displaying natively:\n\n") + fail("".join(values), pytrace=False) except (fail.Exception, KeyboardInterrupt): raise - except: + except: # noqa fail("ERROR: Unknown Incompatible Exception " "representation:\n%r" % (rawexcinfo,), pytrace=False) except KeyboardInterrupt: @@ -134,8 +133,7 @@ class TestCaseFunction(Function): try: skip(reason) except skip.Exception: - self._evalskip = MarkEvaluator(self, 'SkipTest') - self._evalskip.result = True + self._skipped_by_mark = True self._addexcinfo(sys.exc_info()) def addExpectedFailure(self, testcase, rawexcinfo, reason=""): @@ -158,7 +156,7 @@ class TestCaseFunction(Function): # analog to pythons Lib/unittest/case.py:run testMethod = getattr(self._testcase, self._testcase._testMethodName) if (getattr(self._testcase.__class__, "__unittest_skip__", False) or - getattr(testMethod, "__unittest_skip__", False)): + getattr(testMethod, "__unittest_skip__", False)): # If the class or method was skipped. skip_why = (getattr(self._testcase.__class__, '__unittest_skip_why__', '') or getattr(testMethod, '__unittest_skip_why__', '')) @@ -210,7 +208,7 @@ def pytest_runtest_protocol(item): check_testcase_implements_trial_reporter() def excstore(self, exc_value=None, exc_type=None, exc_tb=None, - captureVars=None): + captureVars=None): if exc_value is None: self._rawexcinfo = sys.exc_info() else: @@ -219,7 +217,7 @@ def pytest_runtest_protocol(item): self._rawexcinfo = (exc_type, exc_value, exc_tb) try: Failure__init__(self, exc_value, exc_type, exc_tb, - captureVars=captureVars) + captureVars=captureVars) except TypeError: Failure__init__(self, exc_value, exc_type, exc_tb) diff --git a/_pytest/vendored_packages/README.md b/_pytest/vendored_packages/README.md deleted file mode 100644 index b5fe6febb..000000000 --- a/_pytest/vendored_packages/README.md +++ /dev/null @@ -1,13 +0,0 @@ -This directory vendors the `pluggy` module. - -For a more detailed discussion for the reasons to vendoring this -package, please see [this issue](https://github.com/pytest-dev/pytest/issues/944). - -To update the current version, execute: - -``` -$ pip install -U pluggy== --no-compile --target=_pytest/vendored_packages -``` - -And commit the modified files. The `pluggy-.dist-info` directory -created by `pip` should be added as well. diff --git a/_pytest/vendored_packages/__init__.py b/_pytest/vendored_packages/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst b/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst deleted file mode 100644 index da0e7a6ed..000000000 --- a/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,11 +0,0 @@ - -Plugin registration and hook calling for Python -=============================================== - -This is the plugin manager as used by pytest but stripped -of pytest specific details. - -During the 0.x series this plugin does not have much documentation -except extensive docstrings in the pluggy.py module. - - diff --git a/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER b/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e38..000000000 --- a/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt b/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt deleted file mode 100644 index 121017d08..000000000 --- a/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 holger krekel (rather uses bitbucket/hpk42) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA b/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA deleted file mode 100644 index bd88517c9..000000000 --- a/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA +++ /dev/null @@ -1,40 +0,0 @@ -Metadata-Version: 2.0 -Name: pluggy -Version: 0.4.0 -Summary: plugin and hook calling mechanisms for python -Home-page: https://github.com/pytest-dev/pluggy -Author: Holger Krekel -Author-email: holger at merlinux.eu -License: MIT license -Platform: unix -Platform: linux -Platform: osx -Platform: win32 -Classifier: Development Status :: 4 - Beta -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: POSIX -Classifier: Operating System :: Microsoft :: Windows -Classifier: Operating System :: MacOS :: MacOS X -Classifier: Topic :: Software Development :: Testing -Classifier: Topic :: Software Development :: Libraries -Classifier: Topic :: Utilities -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 - - -Plugin registration and hook calling for Python -=============================================== - -This is the plugin manager as used by pytest but stripped -of pytest specific details. - -During the 0.x series this plugin does not have much documentation -except extensive docstrings in the pluggy.py module. - - diff --git a/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD b/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD deleted file mode 100644 index 3003a3bf2..000000000 --- a/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD +++ /dev/null @@ -1,9 +0,0 @@ -pluggy.py,sha256=u0oG9cv-oLOkNvEBlwnnu8pp1AyxpoERgUO00S3rvpQ,31543 -pluggy-0.4.0.dist-info/DESCRIPTION.rst,sha256=ltvjkFd40LW_xShthp6RRVM6OB_uACYDFR3kTpKw7o4,307 -pluggy-0.4.0.dist-info/LICENSE.txt,sha256=ruwhUOyV1HgE9F35JVL9BCZ9vMSALx369I4xq9rhpkM,1134 -pluggy-0.4.0.dist-info/METADATA,sha256=pe2hbsqKFaLHC6wAQPpFPn0KlpcPfLBe_BnS4O70bfk,1364 -pluggy-0.4.0.dist-info/RECORD,, -pluggy-0.4.0.dist-info/WHEEL,sha256=9Z5Xm-eel1bTS7e6ogYiKz0zmPEqDwIypurdHN1hR40,116 -pluggy-0.4.0.dist-info/metadata.json,sha256=T3go5L2qOa_-H-HpCZi3EoVKb8sZ3R-fOssbkWo2nvM,1119 -pluggy-0.4.0.dist-info/top_level.txt,sha256=xKSCRhai-v9MckvMuWqNz16c1tbsmOggoMSwTgcpYHE,7 -pluggy-0.4.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 diff --git a/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL b/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL deleted file mode 100644 index 8b6dd1b5a..000000000 --- a/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.29.0) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json b/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json deleted file mode 100644 index cde22aff0..000000000 --- a/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: POSIX", "Operating System :: Microsoft :: Windows", "Operating System :: MacOS :: MacOS X", "Topic :: Software Development :: Testing", "Topic :: Software Development :: Libraries", "Topic :: Utilities", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5"], "extensions": {"python.details": {"contacts": [{"email": "holger at merlinux.eu", "name": "Holger Krekel", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "https://github.com/pytest-dev/pluggy"}}}, "generator": "bdist_wheel (0.29.0)", "license": "MIT license", "metadata_version": "2.0", "name": "pluggy", "platform": "unix", "summary": "plugin and hook calling mechanisms for python", "version": "0.4.0"} \ No newline at end of file diff --git a/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt b/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt deleted file mode 100644 index 11bdb5c1f..000000000 --- a/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -pluggy diff --git a/_pytest/vendored_packages/pluggy.py b/_pytest/vendored_packages/pluggy.py deleted file mode 100644 index aebddad01..000000000 --- a/_pytest/vendored_packages/pluggy.py +++ /dev/null @@ -1,802 +0,0 @@ -""" -PluginManager, basic initialization and tracing. - -pluggy is the cristallized core of plugin management as used -by some 150 plugins for pytest. - -Pluggy uses semantic versioning. Breaking changes are only foreseen for -Major releases (incremented X in "X.Y.Z"). If you want to use pluggy in -your project you should thus use a dependency restriction like -"pluggy>=0.1.0,<1.0" to avoid surprises. - -pluggy is concerned with hook specification, hook implementations and hook -calling. For any given hook specification a hook call invokes up to N implementations. -A hook implementation can influence its position and type of execution: -if attributed "tryfirst" or "trylast" it will be tried to execute -first or last. However, if attributed "hookwrapper" an implementation -can wrap all calls to non-hookwrapper implementations. A hookwrapper -can thus execute some code ahead and after the execution of other hooks. - -Hook specification is done by way of a regular python function where -both the function name and the names of all its arguments are significant. -Each hook implementation function is verified against the original specification -function, including the names of all its arguments. To allow for hook specifications -to evolve over the livetime of a project, hook implementations can -accept less arguments. One can thus add new arguments and semantics to -a hook specification by adding another argument typically without breaking -existing hook implementations. - -The chosen approach is meant to let a hook designer think carefuly about -which objects are needed by an extension writer. By contrast, subclass-based -extension mechanisms often expose a lot more state and behaviour than needed, -thus restricting future developments. - -Pluggy currently consists of functionality for: - -- a way to register new hook specifications. Without a hook - specification no hook calling can be performed. - -- a registry of plugins which contain hook implementation functions. It - is possible to register plugins for which a hook specification is not yet - known and validate all hooks when the system is in a more referentially - consistent state. Setting an "optionalhook" attribution to a hook - implementation will avoid PluginValidationError's if a specification - is missing. This allows to have optional integration between plugins. - -- a "hook" relay object from which you can launch 1:N calls to - registered hook implementation functions - -- a mechanism for ordering hook implementation functions - -- mechanisms for two different type of 1:N calls: "firstresult" for when - the call should stop when the first implementation returns a non-None result. - And the other (default) way of guaranteeing that all hook implementations - will be called and their non-None result collected. - -- mechanisms for "historic" extension points such that all newly - registered functions will receive all hook calls that happened - before their registration. - -- a mechanism for discovering plugin objects which are based on - setuptools based entry points. - -- a simple tracing mechanism, including tracing of plugin calls and - their arguments. - -""" -import sys -import inspect - -__version__ = '0.4.0' - -__all__ = ["PluginManager", "PluginValidationError", "HookCallError", - "HookspecMarker", "HookimplMarker"] - -_py3 = sys.version_info > (3, 0) - - -class HookspecMarker: - """ Decorator helper class for marking functions as hook specifications. - - You can instantiate it with a project_name to get a decorator. - Calling PluginManager.add_hookspecs later will discover all marked functions - if the PluginManager uses the same project_name. - """ - - def __init__(self, project_name): - self.project_name = project_name - - def __call__(self, function=None, firstresult=False, historic=False): - """ if passed a function, directly sets attributes on the function - which will make it discoverable to add_hookspecs(). If passed no - function, returns a decorator which can be applied to a function - later using the attributes supplied. - - If firstresult is True the 1:N hook call (N being the number of registered - hook implementation functions) will stop at I<=N when the I'th function - returns a non-None result. - - If historic is True calls to a hook will be memorized and replayed - on later registered plugins. - - """ - def setattr_hookspec_opts(func): - if historic and firstresult: - raise ValueError("cannot have a historic firstresult hook") - setattr(func, self.project_name + "_spec", - dict(firstresult=firstresult, historic=historic)) - return func - - if function is not None: - return setattr_hookspec_opts(function) - else: - return setattr_hookspec_opts - - -class HookimplMarker: - """ Decorator helper class for marking functions as hook implementations. - - You can instantiate with a project_name to get a decorator. - Calling PluginManager.register later will discover all marked functions - if the PluginManager uses the same project_name. - """ - def __init__(self, project_name): - self.project_name = project_name - - def __call__(self, function=None, hookwrapper=False, optionalhook=False, - tryfirst=False, trylast=False): - - """ if passed a function, directly sets attributes on the function - which will make it discoverable to register(). If passed no function, - returns a decorator which can be applied to a function later using - the attributes supplied. - - If optionalhook is True a missing matching hook specification will not result - in an error (by default it is an error if no matching spec is found). - - If tryfirst is True this hook implementation will run as early as possible - in the chain of N hook implementations for a specfication. - - If trylast is True this hook implementation will run as late as possible - in the chain of N hook implementations. - - If hookwrapper is True the hook implementations needs to execute exactly - one "yield". The code before the yield is run early before any non-hookwrapper - function is run. The code after the yield is run after all non-hookwrapper - function have run. The yield receives an ``_CallOutcome`` object representing - the exception or result outcome of the inner calls (including other hookwrapper - calls). - - """ - def setattr_hookimpl_opts(func): - setattr(func, self.project_name + "_impl", - dict(hookwrapper=hookwrapper, optionalhook=optionalhook, - tryfirst=tryfirst, trylast=trylast)) - return func - - if function is None: - return setattr_hookimpl_opts - else: - return setattr_hookimpl_opts(function) - - -def normalize_hookimpl_opts(opts): - opts.setdefault("tryfirst", False) - opts.setdefault("trylast", False) - opts.setdefault("hookwrapper", False) - opts.setdefault("optionalhook", False) - - -class _TagTracer: - def __init__(self): - self._tag2proc = {} - self.writer = None - self.indent = 0 - - def get(self, name): - return _TagTracerSub(self, (name,)) - - def format_message(self, tags, args): - if isinstance(args[-1], dict): - extra = args[-1] - args = args[:-1] - else: - extra = {} - - content = " ".join(map(str, args)) - indent = " " * self.indent - - lines = [ - "%s%s [%s]\n" % (indent, content, ":".join(tags)) - ] - - for name, value in extra.items(): - lines.append("%s %s: %s\n" % (indent, name, value)) - return lines - - def processmessage(self, tags, args): - if self.writer is not None and args: - lines = self.format_message(tags, args) - self.writer(''.join(lines)) - try: - self._tag2proc[tags](tags, args) - except KeyError: - pass - - def setwriter(self, writer): - self.writer = writer - - def setprocessor(self, tags, processor): - if isinstance(tags, str): - tags = tuple(tags.split(":")) - else: - assert isinstance(tags, tuple) - self._tag2proc[tags] = processor - - -class _TagTracerSub: - def __init__(self, root, tags): - self.root = root - self.tags = tags - - def __call__(self, *args): - self.root.processmessage(self.tags, args) - - def setmyprocessor(self, processor): - self.root.setprocessor(self.tags, processor) - - def get(self, name): - return self.__class__(self.root, self.tags + (name,)) - - -def _raise_wrapfail(wrap_controller, msg): - co = wrap_controller.gi_code - raise RuntimeError("wrap_controller at %r %s:%d %s" % - (co.co_name, co.co_filename, co.co_firstlineno, msg)) - - -def _wrapped_call(wrap_controller, func): - """ Wrap calling to a function with a generator which needs to yield - exactly once. The yield point will trigger calling the wrapped function - and return its _CallOutcome to the yield point. The generator then needs - to finish (raise StopIteration) in order for the wrapped call to complete. - """ - try: - next(wrap_controller) # first yield - except StopIteration: - _raise_wrapfail(wrap_controller, "did not yield") - call_outcome = _CallOutcome(func) - try: - wrap_controller.send(call_outcome) - _raise_wrapfail(wrap_controller, "has second yield") - except StopIteration: - pass - return call_outcome.get_result() - - -class _CallOutcome: - """ Outcome of a function call, either an exception or a proper result. - Calling the ``get_result`` method will return the result or reraise - the exception raised when the function was called. """ - excinfo = None - - def __init__(self, func): - try: - self.result = func() - except BaseException: - self.excinfo = sys.exc_info() - - def force_result(self, result): - self.result = result - self.excinfo = None - - def get_result(self): - if self.excinfo is None: - return self.result - else: - ex = self.excinfo - if _py3: - raise ex[1].with_traceback(ex[2]) - _reraise(*ex) # noqa - -if not _py3: - exec(""" -def _reraise(cls, val, tb): - raise cls, val, tb -""") - - -class _TracedHookExecution: - def __init__(self, pluginmanager, before, after): - self.pluginmanager = pluginmanager - self.before = before - self.after = after - self.oldcall = pluginmanager._inner_hookexec - assert not isinstance(self.oldcall, _TracedHookExecution) - self.pluginmanager._inner_hookexec = self - - def __call__(self, hook, hook_impls, kwargs): - self.before(hook.name, hook_impls, kwargs) - outcome = _CallOutcome(lambda: self.oldcall(hook, hook_impls, kwargs)) - self.after(outcome, hook.name, hook_impls, kwargs) - return outcome.get_result() - - def undo(self): - self.pluginmanager._inner_hookexec = self.oldcall - - -class PluginManager(object): - """ Core Pluginmanager class which manages registration - of plugin objects and 1:N hook calling. - - You can register new hooks by calling ``add_hookspec(module_or_class)``. - You can register plugin objects (which contain hooks) by calling - ``register(plugin)``. The Pluginmanager is initialized with a - prefix that is searched for in the names of the dict of registered - plugin objects. An optional excludefunc allows to blacklist names which - are not considered as hooks despite a matching prefix. - - For debugging purposes you can call ``enable_tracing()`` - which will subsequently send debug information to the trace helper. - """ - - def __init__(self, project_name, implprefix=None): - """ if implprefix is given implementation functions - will be recognized if their name matches the implprefix. """ - self.project_name = project_name - self._name2plugin = {} - self._plugin2hookcallers = {} - self._plugin_distinfo = [] - self.trace = _TagTracer().get("pluginmanage") - self.hook = _HookRelay(self.trace.root.get("hook")) - self._implprefix = implprefix - self._inner_hookexec = lambda hook, methods, kwargs: \ - _MultiCall(methods, kwargs, hook.spec_opts).execute() - - def _hookexec(self, hook, methods, kwargs): - # called from all hookcaller instances. - # enable_tracing will set its own wrapping function at self._inner_hookexec - return self._inner_hookexec(hook, methods, kwargs) - - def register(self, plugin, name=None): - """ Register a plugin and return its canonical name or None if the name - is blocked from registering. Raise a ValueError if the plugin is already - registered. """ - plugin_name = name or self.get_canonical_name(plugin) - - if plugin_name in self._name2plugin or plugin in self._plugin2hookcallers: - if self._name2plugin.get(plugin_name, -1) is None: - return # blocked plugin, return None to indicate no registration - raise ValueError("Plugin already registered: %s=%s\n%s" % - (plugin_name, plugin, self._name2plugin)) - - # XXX if an error happens we should make sure no state has been - # changed at point of return - self._name2plugin[plugin_name] = plugin - - # register matching hook implementations of the plugin - self._plugin2hookcallers[plugin] = hookcallers = [] - for name in dir(plugin): - hookimpl_opts = self.parse_hookimpl_opts(plugin, name) - if hookimpl_opts is not None: - normalize_hookimpl_opts(hookimpl_opts) - method = getattr(plugin, name) - hookimpl = HookImpl(plugin, plugin_name, method, hookimpl_opts) - hook = getattr(self.hook, name, None) - if hook is None: - hook = _HookCaller(name, self._hookexec) - setattr(self.hook, name, hook) - elif hook.has_spec(): - self._verify_hook(hook, hookimpl) - hook._maybe_apply_history(hookimpl) - hook._add_hookimpl(hookimpl) - hookcallers.append(hook) - return plugin_name - - def parse_hookimpl_opts(self, plugin, name): - method = getattr(plugin, name) - try: - res = getattr(method, self.project_name + "_impl", None) - except Exception: - res = {} - if res is not None and not isinstance(res, dict): - # false positive - res = None - elif res is None and self._implprefix and name.startswith(self._implprefix): - res = {} - return res - - def unregister(self, plugin=None, name=None): - """ unregister a plugin object and all its contained hook implementations - from internal data structures. """ - if name is None: - assert plugin is not None, "one of name or plugin needs to be specified" - name = self.get_name(plugin) - - if plugin is None: - plugin = self.get_plugin(name) - - # if self._name2plugin[name] == None registration was blocked: ignore - if self._name2plugin.get(name): - del self._name2plugin[name] - - for hookcaller in self._plugin2hookcallers.pop(plugin, []): - hookcaller._remove_plugin(plugin) - - return plugin - - def set_blocked(self, name): - """ block registrations of the given name, unregister if already registered. """ - self.unregister(name=name) - self._name2plugin[name] = None - - def is_blocked(self, name): - """ return True if the name blogs registering plugins of that name. """ - return name in self._name2plugin and self._name2plugin[name] is None - - def add_hookspecs(self, module_or_class): - """ add new hook specifications defined in the given module_or_class. - Functions are recognized if they have been decorated accordingly. """ - names = [] - for name in dir(module_or_class): - spec_opts = self.parse_hookspec_opts(module_or_class, name) - if spec_opts is not None: - hc = getattr(self.hook, name, None) - if hc is None: - hc = _HookCaller(name, self._hookexec, module_or_class, spec_opts) - setattr(self.hook, name, hc) - else: - # plugins registered this hook without knowing the spec - hc.set_specification(module_or_class, spec_opts) - for hookfunction in (hc._wrappers + hc._nonwrappers): - self._verify_hook(hc, hookfunction) - names.append(name) - - if not names: - raise ValueError("did not find any %r hooks in %r" % - (self.project_name, module_or_class)) - - def parse_hookspec_opts(self, module_or_class, name): - method = getattr(module_or_class, name) - return getattr(method, self.project_name + "_spec", None) - - def get_plugins(self): - """ return the set of registered plugins. """ - return set(self._plugin2hookcallers) - - def is_registered(self, plugin): - """ Return True if the plugin is already registered. """ - return plugin in self._plugin2hookcallers - - def get_canonical_name(self, plugin): - """ Return canonical name for a plugin object. Note that a plugin - may be registered under a different name which was specified - by the caller of register(plugin, name). To obtain the name - of an registered plugin use ``get_name(plugin)`` instead.""" - return getattr(plugin, "__name__", None) or str(id(plugin)) - - def get_plugin(self, name): - """ Return a plugin or None for the given name. """ - return self._name2plugin.get(name) - - def has_plugin(self, name): - """ Return True if a plugin with the given name is registered. """ - return self.get_plugin(name) is not None - - def get_name(self, plugin): - """ Return name for registered plugin or None if not registered. """ - for name, val in self._name2plugin.items(): - if plugin == val: - return name - - def _verify_hook(self, hook, hookimpl): - if hook.is_historic() and hookimpl.hookwrapper: - raise PluginValidationError( - "Plugin %r\nhook %r\nhistoric incompatible to hookwrapper" % - (hookimpl.plugin_name, hook.name)) - - for arg in hookimpl.argnames: - if arg not in hook.argnames: - raise PluginValidationError( - "Plugin %r\nhook %r\nargument %r not available\n" - "plugin definition: %s\n" - "available hookargs: %s" % - (hookimpl.plugin_name, hook.name, arg, - _formatdef(hookimpl.function), ", ".join(hook.argnames))) - - def check_pending(self): - """ Verify that all hooks which have not been verified against - a hook specification are optional, otherwise raise PluginValidationError""" - for name in self.hook.__dict__: - if name[0] != "_": - hook = getattr(self.hook, name) - if not hook.has_spec(): - for hookimpl in (hook._wrappers + hook._nonwrappers): - if not hookimpl.optionalhook: - raise PluginValidationError( - "unknown hook %r in plugin %r" % - (name, hookimpl.plugin)) - - def load_setuptools_entrypoints(self, entrypoint_name): - """ Load modules from querying the specified setuptools entrypoint name. - Return the number of loaded plugins. """ - from pkg_resources import (iter_entry_points, DistributionNotFound, - VersionConflict) - for ep in iter_entry_points(entrypoint_name): - # is the plugin registered or blocked? - if self.get_plugin(ep.name) or self.is_blocked(ep.name): - continue - try: - plugin = ep.load() - except DistributionNotFound: - continue - except VersionConflict as e: - raise PluginValidationError( - "Plugin %r could not be loaded: %s!" % (ep.name, e)) - self.register(plugin, name=ep.name) - self._plugin_distinfo.append((plugin, ep.dist)) - return len(self._plugin_distinfo) - - def list_plugin_distinfo(self): - """ return list of distinfo/plugin tuples for all setuptools registered - plugins. """ - return list(self._plugin_distinfo) - - def list_name_plugin(self): - """ return list of name/plugin pairs. """ - return list(self._name2plugin.items()) - - def get_hookcallers(self, plugin): - """ get all hook callers for the specified plugin. """ - return self._plugin2hookcallers.get(plugin) - - def add_hookcall_monitoring(self, before, after): - """ add before/after tracing functions for all hooks - and return an undo function which, when called, - will remove the added tracers. - - ``before(hook_name, hook_impls, kwargs)`` will be called ahead - of all hook calls and receive a hookcaller instance, a list - of HookImpl instances and the keyword arguments for the hook call. - - ``after(outcome, hook_name, hook_impls, kwargs)`` receives the - same arguments as ``before`` but also a :py:class:`_CallOutcome <_pytest.vendored_packages.pluggy._CallOutcome>` object - which represents the result of the overall hook call. - """ - return _TracedHookExecution(self, before, after).undo - - def enable_tracing(self): - """ enable tracing of hook calls and return an undo function. """ - hooktrace = self.hook._trace - - def before(hook_name, methods, kwargs): - hooktrace.root.indent += 1 - hooktrace(hook_name, kwargs) - - def after(outcome, hook_name, methods, kwargs): - if outcome.excinfo is None: - hooktrace("finish", hook_name, "-->", outcome.result) - hooktrace.root.indent -= 1 - - return self.add_hookcall_monitoring(before, after) - - def subset_hook_caller(self, name, remove_plugins): - """ Return a new _HookCaller instance for the named method - which manages calls to all registered plugins except the - ones from remove_plugins. """ - orig = getattr(self.hook, name) - plugins_to_remove = [plug for plug in remove_plugins if hasattr(plug, name)] - if plugins_to_remove: - hc = _HookCaller(orig.name, orig._hookexec, orig._specmodule_or_class, - orig.spec_opts) - for hookimpl in (orig._wrappers + orig._nonwrappers): - plugin = hookimpl.plugin - if plugin not in plugins_to_remove: - hc._add_hookimpl(hookimpl) - # we also keep track of this hook caller so it - # gets properly removed on plugin unregistration - self._plugin2hookcallers.setdefault(plugin, []).append(hc) - return hc - return orig - - -class _MultiCall: - """ execute a call into multiple python functions/methods. """ - - # XXX note that the __multicall__ argument is supported only - # for pytest compatibility reasons. It was never officially - # supported there and is explicitely deprecated since 2.8 - # so we can remove it soon, allowing to avoid the below recursion - # in execute() and simplify/speed up the execute loop. - - def __init__(self, hook_impls, kwargs, specopts={}): - self.hook_impls = hook_impls - self.kwargs = kwargs - self.kwargs["__multicall__"] = self - self.specopts = specopts - - def execute(self): - all_kwargs = self.kwargs - self.results = results = [] - firstresult = self.specopts.get("firstresult") - - while self.hook_impls: - hook_impl = self.hook_impls.pop() - try: - args = [all_kwargs[argname] for argname in hook_impl.argnames] - except KeyError: - for argname in hook_impl.argnames: - if argname not in all_kwargs: - raise HookCallError( - "hook call must provide argument %r" % (argname,)) - if hook_impl.hookwrapper: - return _wrapped_call(hook_impl.function(*args), self.execute) - res = hook_impl.function(*args) - if res is not None: - if firstresult: - return res - results.append(res) - - if not firstresult: - return results - - def __repr__(self): - status = "%d meths" % (len(self.hook_impls),) - if hasattr(self, "results"): - status = ("%d results, " % len(self.results)) + status - return "<_MultiCall %s, kwargs=%r>" % (status, self.kwargs) - - -def varnames(func, startindex=None): - """ return argument name tuple for a function, method, class or callable. - - In case of a class, its "__init__" method is considered. - For methods the "self" parameter is not included unless you are passing - an unbound method with Python3 (which has no supports for unbound methods) - """ - cache = getattr(func, "__dict__", {}) - try: - return cache["_varnames"] - except KeyError: - pass - if inspect.isclass(func): - try: - func = func.__init__ - except AttributeError: - return () - startindex = 1 - else: - if not inspect.isfunction(func) and not inspect.ismethod(func): - try: - func = getattr(func, '__call__', func) - except Exception: - return () - if startindex is None: - startindex = int(inspect.ismethod(func)) - - try: - rawcode = func.__code__ - except AttributeError: - return () - try: - x = rawcode.co_varnames[startindex:rawcode.co_argcount] - except AttributeError: - x = () - else: - defaults = func.__defaults__ - if defaults: - x = x[:-len(defaults)] - try: - cache["_varnames"] = x - except TypeError: - pass - return x - - -class _HookRelay: - """ hook holder object for performing 1:N hook calls where N is the number - of registered plugins. - - """ - - def __init__(self, trace): - self._trace = trace - - -class _HookCaller(object): - def __init__(self, name, hook_execute, specmodule_or_class=None, spec_opts=None): - self.name = name - self._wrappers = [] - self._nonwrappers = [] - self._hookexec = hook_execute - if specmodule_or_class is not None: - assert spec_opts is not None - self.set_specification(specmodule_or_class, spec_opts) - - def has_spec(self): - return hasattr(self, "_specmodule_or_class") - - def set_specification(self, specmodule_or_class, spec_opts): - assert not self.has_spec() - self._specmodule_or_class = specmodule_or_class - specfunc = getattr(specmodule_or_class, self.name) - argnames = varnames(specfunc, startindex=inspect.isclass(specmodule_or_class)) - assert "self" not in argnames # sanity check - self.argnames = ["__multicall__"] + list(argnames) - self.spec_opts = spec_opts - if spec_opts.get("historic"): - self._call_history = [] - - def is_historic(self): - return hasattr(self, "_call_history") - - def _remove_plugin(self, plugin): - def remove(wrappers): - for i, method in enumerate(wrappers): - if method.plugin == plugin: - del wrappers[i] - return True - if remove(self._wrappers) is None: - if remove(self._nonwrappers) is None: - raise ValueError("plugin %r not found" % (plugin,)) - - def _add_hookimpl(self, hookimpl): - if hookimpl.hookwrapper: - methods = self._wrappers - else: - methods = self._nonwrappers - - if hookimpl.trylast: - methods.insert(0, hookimpl) - elif hookimpl.tryfirst: - methods.append(hookimpl) - else: - # find last non-tryfirst method - i = len(methods) - 1 - while i >= 0 and methods[i].tryfirst: - i -= 1 - methods.insert(i + 1, hookimpl) - - def __repr__(self): - return "<_HookCaller %r>" % (self.name,) - - def __call__(self, **kwargs): - assert not self.is_historic() - return self._hookexec(self, self._nonwrappers + self._wrappers, kwargs) - - def call_historic(self, proc=None, kwargs=None): - self._call_history.append((kwargs or {}, proc)) - # historizing hooks don't return results - self._hookexec(self, self._nonwrappers + self._wrappers, kwargs) - - def call_extra(self, methods, kwargs): - """ Call the hook with some additional temporarily participating - methods using the specified kwargs as call parameters. """ - old = list(self._nonwrappers), list(self._wrappers) - for method in methods: - opts = dict(hookwrapper=False, trylast=False, tryfirst=False) - hookimpl = HookImpl(None, "", method, opts) - self._add_hookimpl(hookimpl) - try: - return self(**kwargs) - finally: - self._nonwrappers, self._wrappers = old - - def _maybe_apply_history(self, method): - if self.is_historic(): - for kwargs, proc in self._call_history: - res = self._hookexec(self, [method], kwargs) - if res and proc is not None: - proc(res[0]) - - -class HookImpl: - def __init__(self, plugin, plugin_name, function, hook_impl_opts): - self.function = function - self.argnames = varnames(self.function) - self.plugin = plugin - self.opts = hook_impl_opts - self.plugin_name = plugin_name - self.__dict__.update(hook_impl_opts) - - -class PluginValidationError(Exception): - """ plugin failed validation. """ - - -class HookCallError(Exception): - """ Hook was called wrongly. """ - - -if hasattr(inspect, 'signature'): - def _formatdef(func): - return "%s%s" % ( - func.__name__, - str(inspect.signature(func)) - ) -else: - def _formatdef(func): - return "%s%s" % ( - func.__name__, - inspect.formatargspec(*inspect.getargspec(func)) - ) diff --git a/_pytest/warnings.py b/_pytest/warnings.py index 4fe28bd31..d8b9fc460 100644 --- a/_pytest/warnings.py +++ b/_pytest/warnings.py @@ -39,8 +39,9 @@ def pytest_addoption(parser): '-W', '--pythonwarnings', action='append', help="set which warnings to report, see -W option of python itself.") parser.addini("filterwarnings", type="linelist", - help="Each line specifies warning filter pattern which would be passed" - "to warnings.filterwarnings. Process after -W and --pythonwarnings.") + help="Each line specifies a pattern for " + "warnings.filterwarnings. " + "Processed after -W and --pythonwarnings.") @contextmanager @@ -59,6 +60,11 @@ def catch_warnings_for_item(item): for arg in inifilters: _setoption(warnings, arg) + for mark in item.iter_markers(): + if mark.name == 'filterwarnings': + for arg in mark.args: + warnings._setoption(arg) + yield for warning in log: @@ -66,8 +72,10 @@ def catch_warnings_for_item(item): unicode_warning = False if compat._PY2 and any(isinstance(m, compat.UNICODE_TYPES) for m in warn_msg.args): - new_args = [compat.safe_str(m) for m in warn_msg.args] - unicode_warning = warn_msg.args != new_args + new_args = [] + for m in warn_msg.args: + new_args.append(compat.ascii_escaped(m) if isinstance(m, compat.UNICODE_TYPES) else m) + unicode_warning = list(warn_msg.args) != new_args warn_msg.args = new_args msg = warnings.formatwarning( @@ -78,7 +86,7 @@ def catch_warnings_for_item(item): if unicode_warning: warnings.warn( "Warning is using unicode non convertible to ascii, " - "converting to a safe representation:\n %s" % msg, + "converting to a safe representation:\n %s" % msg, UnicodeWarning) diff --git a/appveyor.yml b/appveyor.yml index cc72b4b70..4f4afe15c 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -10,9 +10,7 @@ environment: - TOXENV: "coveralls" # note: please use "tox --listenvs" to populate the build matrix below - TOXENV: "linting" - - TOXENV: "py26" - TOXENV: "py27" - - TOXENV: "py33" - TOXENV: "py34" - TOXENV: "py35" - TOXENV: "py36" @@ -20,12 +18,16 @@ environment: - TOXENV: "py27-pexpect" - TOXENV: "py27-xdist" - TOXENV: "py27-trial" - - TOXENV: "py35-pexpect" - - TOXENV: "py35-xdist" - - TOXENV: "py35-trial" + - TOXENV: "py27-numpy" + - TOXENV: "py27-pluggymaster" + - TOXENV: "py36-pexpect" + - TOXENV: "py36-xdist" + - TOXENV: "py36-trial" + - TOXENV: "py36-numpy" + - TOXENV: "py36-pluggymaster" - TOXENV: "py27-nobyte" - TOXENV: "doctesting" - - TOXENV: "freeze" + - TOXENV: "py35-freeze" - TOXENV: "docs" install: @@ -34,7 +36,7 @@ install: - if "%TOXENV%" == "pypy" call scripts\install-pypy.bat - - C:\Python35\python -m pip install tox + - C:\Python36\python -m pip install --upgrade --pre tox build: false # Not a C# project, build stuff at the test step instead. diff --git a/changelog/2147.removal b/changelog/2147.removal deleted file mode 100644 index d5f80a108..000000000 --- a/changelog/2147.removal +++ /dev/null @@ -1 +0,0 @@ -All old-style specific behavior in current classes in the pytest's API is considered deprecated at this point and will be removed in a future release. This affects Python 2 users only and in rare situations. diff --git a/changelog/2427.removal b/changelog/2427.removal deleted file mode 100644 index c7ed8e17a..000000000 --- a/changelog/2427.removal +++ /dev/null @@ -1 +0,0 @@ -introduce deprecation warnings for legacy marks based parametersets diff --git a/changelog/2434.bugfix b/changelog/2434.bugfix deleted file mode 100644 index 172a992c4..000000000 --- a/changelog/2434.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix decode error in Python 2 for doctests in docstrings. diff --git a/changelog/2440.bugfix b/changelog/2440.bugfix deleted file mode 100644 index 7f1f7d504..000000000 --- a/changelog/2440.bugfix +++ /dev/null @@ -1 +0,0 @@ -Exceptions raised during teardown by finalizers are now suppressed until all finalizers are called, with the initial exception reraised. diff --git a/changelog/2464.bugfix b/changelog/2464.bugfix deleted file mode 100644 index 12062fd9e..000000000 --- a/changelog/2464.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix incorrect "collected items" report when specifying tests on the command-line. diff --git a/changelog/2469.bugfix b/changelog/2469.bugfix deleted file mode 100644 index 492c62e08..000000000 --- a/changelog/2469.bugfix +++ /dev/null @@ -1,4 +0,0 @@ -``deprecated_call`` in context-manager form now captures deprecation warnings even if -the same warning has already been raised. Also, ``deprecated_call`` will always produce -the same error message (previously it would produce different messages in context-manager vs. -function-call mode). diff --git a/changelog/2474.trivial b/changelog/2474.trivial deleted file mode 100644 index 9ea3fb651..000000000 --- a/changelog/2474.trivial +++ /dev/null @@ -1 +0,0 @@ -Create invoke tasks for updating the vendored packages. \ No newline at end of file diff --git a/changelog/2486.bugfix b/changelog/2486.bugfix deleted file mode 100644 index 97917197c..000000000 --- a/changelog/2486.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix internal error when trying to detect the start of a recursive traceback. diff --git a/changelog/2489.trivial b/changelog/2489.trivial deleted file mode 100644 index c997d7e1e..000000000 --- a/changelog/2489.trivial +++ /dev/null @@ -1 +0,0 @@ -Internal code move: move code for pytest.approx/pytest.raises to own files in order to cut down the size of python.py \ No newline at end of file diff --git a/changelog/2493.doc b/changelog/2493.doc deleted file mode 100644 index 619963041..000000000 --- a/changelog/2493.doc +++ /dev/null @@ -1 +0,0 @@ -Explicitly state for which hooks the calls stop after the first non-None result. \ No newline at end of file diff --git a/changelog/2499.trivial b/changelog/2499.trivial deleted file mode 100644 index 1b4341725..000000000 --- a/changelog/2499.trivial +++ /dev/null @@ -1 +0,0 @@ -Update copyright dates in LICENSE, README.rst and in the documentation. diff --git a/changelog/2516.feature b/changelog/2516.feature deleted file mode 100644 index 6436de16a..000000000 --- a/changelog/2516.feature +++ /dev/null @@ -1 +0,0 @@ -Now test function objects have a ``pytestmark`` attribute containing a list of marks applied directly to the test function, as opposed to marks inherited from parent classes or modules. \ No newline at end of file diff --git a/changelog/3008.bugfix.rst b/changelog/3008.bugfix.rst new file mode 100644 index 000000000..780c54773 --- /dev/null +++ b/changelog/3008.bugfix.rst @@ -0,0 +1 @@ +A rare race-condition which might result in corrupted ``.pyc`` files on Windows has been hopefully solved. diff --git a/changelog/3008.trivial.rst b/changelog/3008.trivial.rst new file mode 100644 index 000000000..74231da09 --- /dev/null +++ b/changelog/3008.trivial.rst @@ -0,0 +1 @@ +``pytest`` now depends on the `python-atomicwrites `_ library. diff --git a/changelog/3180.feature.rst b/changelog/3180.feature.rst new file mode 100644 index 000000000..31db646f4 --- /dev/null +++ b/changelog/3180.feature.rst @@ -0,0 +1 @@ +Support for Python 3.7's builtin ``breakpoint()`` method, see `Using the builtin breakpoint function `_ for details. diff --git a/changelog/3290.feature b/changelog/3290.feature new file mode 100644 index 000000000..a40afcb1a --- /dev/null +++ b/changelog/3290.feature @@ -0,0 +1,2 @@ +``monkeypatch`` now supports a ``context()`` function which acts as a context manager which undoes all patching done +within the ``with`` block. diff --git a/changelog/3307.feature.rst b/changelog/3307.feature.rst new file mode 100644 index 000000000..dde449066 --- /dev/null +++ b/changelog/3307.feature.rst @@ -0,0 +1,3 @@ +pytest not longer changes the log level of the root logger when the +``log-level`` parameter has greater numeric value than that of the level of +the root logger, which makes it play better with custom logging configuration in user code. diff --git a/changelog/3317.feature b/changelog/3317.feature new file mode 100644 index 000000000..be4625c6e --- /dev/null +++ b/changelog/3317.feature @@ -0,0 +1 @@ +introduce correct per node mark handling and deprecate the always incorrect existing mark handling \ No newline at end of file diff --git a/changelog/3330.trivial.rst b/changelog/3330.trivial.rst new file mode 100644 index 000000000..ce5ec5882 --- /dev/null +++ b/changelog/3330.trivial.rst @@ -0,0 +1 @@ +Remove internal ``_pytest.terminal.flatten`` function in favor of ``more_itertools.collapse``. diff --git a/changelog/3339.trivial b/changelog/3339.trivial new file mode 100644 index 000000000..20196e144 --- /dev/null +++ b/changelog/3339.trivial @@ -0,0 +1 @@ +Import some modules from ``collections`` instead of ``collections.abc`` as the former modules trigger ``DeprecationWarning`` in Python 3.7. diff --git a/changelog/3348.bugfix.rst b/changelog/3348.bugfix.rst new file mode 100644 index 000000000..7cf13ab2c --- /dev/null +++ b/changelog/3348.bugfix.rst @@ -0,0 +1 @@ +``pytest.raises`` now raises ``TypeError`` when receiving an unknown keyword argument. diff --git a/changelog/3360.trivial b/changelog/3360.trivial new file mode 100644 index 000000000..3b0e89e1f --- /dev/null +++ b/changelog/3360.trivial @@ -0,0 +1,2 @@ +record_property is no longer experimental, removing the warnings was forgotten. + diff --git a/changelog/3372.bugfix.rst b/changelog/3372.bugfix.rst new file mode 100644 index 000000000..722bdab1e --- /dev/null +++ b/changelog/3372.bugfix.rst @@ -0,0 +1 @@ +``pytest.raises`` now works with exception classes that look like iterables. diff --git a/changelog/README.rst b/changelog/README.rst new file mode 100644 index 000000000..35d3a40ed --- /dev/null +++ b/changelog/README.rst @@ -0,0 +1,32 @@ +This directory contains "newsfragments" which are short files that contain a small **ReST**-formatted +text that will be added to the next ``CHANGELOG``. + +The ``CHANGELOG`` will be read by users, so this description should be aimed to pytest users +instead of describing internal changes which are only relevant to the developers. + +Make sure to use full sentences with correct case and punctuation, for example:: + + Fix issue with non-ascii messages from the ``warnings`` module. + +Each file should be named like ``..rst``, where +```` is an issue number, and ```` is one of: + +* ``feature``: new user facing features, like new command-line options and new behavior. +* ``bugfix``: fixes a reported bug. +* ``doc``: documentation improvement, like rewording an entire session or adding missing docs. +* ``removal``: feature deprecation or removal. +* ``vendor``: changes in packages vendored in pytest. +* ``trivial``: fixing a small typo or internal change that might be noteworthy. + +So for example: ``123.feature.rst``, ``456.bugfix.rst``. + +If your PR fixes an issue, use that number here. If there is no issue, +then after you submit the PR and get the PR number you can add a +changelog using that instead. + +If you are not sure what issue type to use, don't hesitate to ask in your PR. + +Note that the ``towncrier`` tool will automatically +reflow your text, so it will work best if you stick to a single paragraph, but multiple sentences and links are OK +and encouraged. You can install ``towncrier`` and then run ``towncrier --draft`` +if you want to get a preview of how your change will look in the final release notes. diff --git a/changelog/_template.rst b/changelog/_template.rst index 66c850ffd..a898abc15 100644 --- a/changelog/_template.rst +++ b/changelog/_template.rst @@ -13,7 +13,8 @@ {% if definitions[category]['showcontent'] %} {% for text, values in sections[section][category]|dictsort(by='value') %} -- {{ text }}{% if category != 'vendor' %} (`{{ values[0] }} `_){% endif %} +{% set issue_joiner = joiner(', ') %} +- {{ text }}{% if category != 'vendor' %} ({% for value in values|sort %}{{ issue_joiner() }}`{{ value }} `_{% endfor %}){% endif %} {% endfor %} diff --git a/doc/en/Makefile b/doc/en/Makefile index 286bbd8e7..fa8e8266a 100644 --- a/doc/en/Makefile +++ b/doc/en/Makefile @@ -13,8 +13,6 @@ PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . REGENDOC_ARGS := \ - --normalize "/={8,} (.*) ={8,}/======= \1 ========/" \ - --normalize "/_{8,} (.*) _{8,}/_______ \1 ________/" \ --normalize "/in \d+.\d+ seconds/in 0.12 seconds/" \ --normalize "@/tmp/pytest-of-.*/pytest-\d+@PYTEST_TMPDIR@" \ --normalize "@pytest-(\d+)\\.[^ ,]+@pytest-\1.x.y@" \ diff --git a/doc/en/_templates/globaltoc.html b/doc/en/_templates/globaltoc.html index af427198a..0e088d67e 100644 --- a/doc/en/_templates/globaltoc.html +++ b/doc/en/_templates/globaltoc.html @@ -2,14 +2,16 @@ {%- if display_toc %} diff --git a/doc/en/_templates/links.html b/doc/en/_templates/links.html index d855a013f..91157dfb7 100644 --- a/doc/en/_templates/links.html +++ b/doc/en/_templates/links.html @@ -1,7 +1,5 @@

Useful Links

    -
  • The pytest Website
  • -
  • Contribution Guide
  • pytest @ PyPI
  • pytest @ GitHub
  • 3rd party plugins
  • diff --git a/doc/en/announce/index.rst b/doc/en/announce/index.rst index dbb4e24d6..b03e0f79d 100644 --- a/doc/en/announce/index.rst +++ b/doc/en/announce/index.rst @@ -6,6 +6,20 @@ Release announcements :maxdepth: 2 + release-3.5.0 + release-3.4.2 + release-3.4.1 + release-3.4.0 + release-3.3.2 + release-3.3.1 + release-3.3.0 + release-3.2.5 + release-3.2.4 + release-3.2.3 + release-3.2.2 + release-3.2.1 + release-3.2.0 + release-3.1.3 release-3.1.2 release-3.1.1 release-3.1.0 diff --git a/doc/en/announce/release-2.7.0.rst b/doc/en/announce/release-2.7.0.rst index 07ae44ca1..4e317ff8f 100644 --- a/doc/en/announce/release-2.7.0.rst +++ b/doc/en/announce/release-2.7.0.rst @@ -62,7 +62,7 @@ holger krekel - fix issue655: work around different ways that cause python2/3 to leak sys.exc_info into fixtures/tests causing failures in 3rd party code -- fix issue615: assertion re-writing did not correctly escape % signs +- fix issue615: assertion rewriting did not correctly escape % signs when formatting boolean operations, which tripped over mixing booleans with modulo operators. Thanks to Tom Viner for the report, triaging and fix. diff --git a/doc/en/announce/release-3.1.3.rst b/doc/en/announce/release-3.1.3.rst new file mode 100644 index 000000000..a55280626 --- /dev/null +++ b/doc/en/announce/release-3.1.3.rst @@ -0,0 +1,23 @@ +pytest-3.1.3 +======================================= + +pytest 3.1.3 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +* Antoine Legrand +* Bruno Oliveira +* Max Moroz +* Raphael Pierzina +* Ronny Pfannschmidt +* Ryan Fitzpatrick + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-3.2.0.rst b/doc/en/announce/release-3.2.0.rst new file mode 100644 index 000000000..4d2830edd --- /dev/null +++ b/doc/en/announce/release-3.2.0.rst @@ -0,0 +1,48 @@ +pytest-3.2.0 +======================================= + +The pytest team is proud to announce the 3.2.0 release! + +pytest is a mature Python testing tool with more than a 1600 tests +against itself, passing on many different interpreters and platforms. + +This release contains a number of bugs fixes and improvements, so users are encouraged +to take a look at the CHANGELOG: + + http://doc.pytest.org/en/latest/changelog.html + +For complete documentation, please visit: + + http://docs.pytest.org + +As usual, you can upgrade from pypi via: + + pip install -U pytest + +Thanks to all who contributed to this release, among them: + +* Alex Hartoto +* Andras Tim +* Bruno Oliveira +* Daniel Hahler +* Florian Bruhin +* Floris Bruynooghe +* John Still +* Jordan Moldow +* Kale Kundert +* Lawrence Mitchell +* Llandy Riveron Del Risco +* Maik Figura +* Martin Altmayer +* Mihai Capotă +* Nathaniel Waisbrot +* Nguyễn Hồng Quân +* Pauli Virtanen +* Raphael Pierzina +* Ronny Pfannschmidt +* Segev Finer +* V.Kuznetsov + + +Happy testing, +The Pytest Development Team diff --git a/doc/en/announce/release-3.2.1.rst b/doc/en/announce/release-3.2.1.rst new file mode 100644 index 000000000..899ffcd4b --- /dev/null +++ b/doc/en/announce/release-3.2.1.rst @@ -0,0 +1,22 @@ +pytest-3.2.1 +======================================= + +pytest 3.2.1 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +* Alex Gaynor +* Bruno Oliveira +* Florian Bruhin +* Ronny Pfannschmidt +* Srinivas Reddy Thatiparthy + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-3.2.2.rst b/doc/en/announce/release-3.2.2.rst new file mode 100644 index 000000000..599bf8727 --- /dev/null +++ b/doc/en/announce/release-3.2.2.rst @@ -0,0 +1,28 @@ +pytest-3.2.2 +======================================= + +pytest 3.2.2 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +* Andreas Pelme +* Antonio Hidalgo +* Bruno Oliveira +* Felipe Dau +* Fernando Macedo +* Jesús Espino +* Joan Massich +* Joe Talbott +* Kirill Pinchuk +* Ronny Pfannschmidt +* Xuan Luong + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-3.2.3.rst b/doc/en/announce/release-3.2.3.rst new file mode 100644 index 000000000..589374974 --- /dev/null +++ b/doc/en/announce/release-3.2.3.rst @@ -0,0 +1,23 @@ +pytest-3.2.3 +======================================= + +pytest 3.2.3 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +* Bruno Oliveira +* Evan +* Joe Hamman +* Oliver Bestwalter +* Ronny Pfannschmidt +* Xuan Luong + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-3.2.4.rst b/doc/en/announce/release-3.2.4.rst new file mode 100644 index 000000000..44bfcc27e --- /dev/null +++ b/doc/en/announce/release-3.2.4.rst @@ -0,0 +1,36 @@ +pytest-3.2.4 +======================================= + +pytest 3.2.4 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +* Bruno Oliveira +* Christian Boelsen +* Christoph Buchner +* Daw-Ran Liou +* Florian Bruhin +* Franck Michea +* Leonard Lausen +* Matty G +* Owen Tuz +* Pavel Karateev +* Pierre GIRAUD +* Ronny Pfannschmidt +* Stephen Finucane +* Sviatoslav Abakumov +* Thomas Hisch +* Tom Dalton +* Xuan Luong +* Yorgos Pagles +* Семён Марьясин + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-3.2.5.rst b/doc/en/announce/release-3.2.5.rst new file mode 100644 index 000000000..a520ce2b3 --- /dev/null +++ b/doc/en/announce/release-3.2.5.rst @@ -0,0 +1,18 @@ +pytest-3.2.5 +======================================= + +pytest 3.2.5 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +* Bruno Oliveira + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-3.3.0.rst b/doc/en/announce/release-3.3.0.rst new file mode 100644 index 000000000..e0740e7d5 --- /dev/null +++ b/doc/en/announce/release-3.3.0.rst @@ -0,0 +1,50 @@ +pytest-3.3.0 +======================================= + +The pytest team is proud to announce the 3.3.0 release! + +pytest is a mature Python testing tool with more than a 1600 tests +against itself, passing on many different interpreters and platforms. + +This release contains a number of bugs fixes and improvements, so users are encouraged +to take a look at the CHANGELOG: + + http://doc.pytest.org/en/latest/changelog.html + +For complete documentation, please visit: + + http://docs.pytest.org + +As usual, you can upgrade from pypi via: + + pip install -U pytest + +Thanks to all who contributed to this release, among them: + +* Anthony Sottile +* Bruno Oliveira +* Ceridwen +* Daniel Hahler +* Dirk Thomas +* Dmitry Malinovsky +* Florian Bruhin +* George Y. Kussumoto +* Hugo +* Jesús Espino +* Joan Massich +* Ofir +* OfirOshir +* Ronny Pfannschmidt +* Samuel Dion-Girardeau +* Srinivas Reddy Thatiparthy +* Sviatoslav Abakumov +* Tarcisio Fischer +* Thomas Hisch +* Tyler Goodlet +* hugovk +* je +* prokaktus + + +Happy testing, +The Pytest Development Team diff --git a/doc/en/announce/release-3.3.1.rst b/doc/en/announce/release-3.3.1.rst new file mode 100644 index 000000000..074c3d5ac --- /dev/null +++ b/doc/en/announce/release-3.3.1.rst @@ -0,0 +1,25 @@ +pytest-3.3.1 +======================================= + +pytest 3.3.1 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +* Bruno Oliveira +* Daniel Hahler +* Eugene Prikazchikov +* Florian Bruhin +* Roland Puntaier +* Ronny Pfannschmidt +* Sebastian Rahlf +* Tom Viner + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-3.3.2.rst b/doc/en/announce/release-3.3.2.rst new file mode 100644 index 000000000..a994aff25 --- /dev/null +++ b/doc/en/announce/release-3.3.2.rst @@ -0,0 +1,28 @@ +pytest-3.3.2 +======================================= + +pytest 3.3.2 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +* Anthony Sottile +* Antony Lee +* Austin +* Bruno Oliveira +* Florian Bruhin +* Floris Bruynooghe +* Henk-Jaap Wagenaar +* Jurko Gospodnetić +* Ronny Pfannschmidt +* Srinivas Reddy Thatiparthy +* Thomas Hisch + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-3.4.0.rst b/doc/en/announce/release-3.4.0.rst new file mode 100644 index 000000000..df1e004f1 --- /dev/null +++ b/doc/en/announce/release-3.4.0.rst @@ -0,0 +1,52 @@ +pytest-3.4.0 +======================================= + +The pytest team is proud to announce the 3.4.0 release! + +pytest is a mature Python testing tool with more than a 1600 tests +against itself, passing on many different interpreters and platforms. + +This release contains a number of bugs fixes and improvements, so users are encouraged +to take a look at the CHANGELOG: + + http://doc.pytest.org/en/latest/changelog.html + +For complete documentation, please visit: + + http://docs.pytest.org + +As usual, you can upgrade from pypi via: + + pip install -U pytest + +Thanks to all who contributed to this release, among them: + +* Aaron +* Alan Velasco +* Anders Hovmöller +* Andrew Toolan +* Anthony Sottile +* Aron Coyle +* Brian Maissy +* Bruno Oliveira +* Cyrus Maden +* Florian Bruhin +* Henk-Jaap Wagenaar +* Ian Lesperance +* Jon Dufresne +* Jurko Gospodnetić +* Kate +* Kimberly +* Per A. Brodtkorb +* Pierre-Alexandre Fonta +* Raphael Castaneda +* Ronny Pfannschmidt +* ST John +* Segev Finer +* Thomas Hisch +* Tzu-ping Chung +* feuillemorte + + +Happy testing, +The Pytest Development Team diff --git a/doc/en/announce/release-3.4.1.rst b/doc/en/announce/release-3.4.1.rst new file mode 100644 index 000000000..0c5932e62 --- /dev/null +++ b/doc/en/announce/release-3.4.1.rst @@ -0,0 +1,27 @@ +pytest-3.4.1 +======================================= + +pytest 3.4.1 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +* Aaron +* Alan Velasco +* Andy Freeland +* Brian Maissy +* Bruno Oliveira +* Florian Bruhin +* Jason R. Coombs +* Marcin Bachry +* Pedro Algarvio +* Ronny Pfannschmidt + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-3.4.2.rst b/doc/en/announce/release-3.4.2.rst new file mode 100644 index 000000000..59bbf6191 --- /dev/null +++ b/doc/en/announce/release-3.4.2.rst @@ -0,0 +1,28 @@ +pytest-3.4.2 +======================================= + +pytest 3.4.2 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +* Allan Feldman +* Bruno Oliveira +* Florian Bruhin +* Jason R. Coombs +* Kyle Altendorf +* Maik Figura +* Ronny Pfannschmidt +* codetriage-readme-bot +* feuillemorte +* joshm91 +* mike + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-3.5.0.rst b/doc/en/announce/release-3.5.0.rst new file mode 100644 index 000000000..54a05cea2 --- /dev/null +++ b/doc/en/announce/release-3.5.0.rst @@ -0,0 +1,51 @@ +pytest-3.5.0 +======================================= + +The pytest team is proud to announce the 3.5.0 release! + +pytest is a mature Python testing tool with more than a 1600 tests +against itself, passing on many different interpreters and platforms. + +This release contains a number of bugs fixes and improvements, so users are encouraged +to take a look at the CHANGELOG: + + http://doc.pytest.org/en/latest/changelog.html + +For complete documentation, please visit: + + http://docs.pytest.org + +As usual, you can upgrade from pypi via: + + pip install -U pytest + +Thanks to all who contributed to this release, among them: + +* Allan Feldman +* Brian Maissy +* Bruno Oliveira +* Carlos Jenkins +* Daniel Hahler +* Florian Bruhin +* Jason R. Coombs +* Jeffrey Rackauckas +* Jordan Speicher +* Julien Palard +* Kale Kundert +* Kostis Anagnostopoulos +* Kyle Altendorf +* Maik Figura +* Pedro Algarvio +* Ronny Pfannschmidt +* Tadeu Manoel +* Tareq Alayan +* Thomas Hisch +* William Lee +* codetriage-readme-bot +* feuillemorte +* joshm91 +* mike + + +Happy testing, +The Pytest Development Team diff --git a/doc/en/assert.rst b/doc/en/assert.rst index d3d06804e..4a852978e 100644 --- a/doc/en/assert.rst +++ b/doc/en/assert.rst @@ -25,15 +25,15 @@ to assert that your function returns a certain value. If this assertion fails you will see the return value of the function call:: $ pytest test_assert1.py - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: - collected 1 items + collected 1 item - test_assert1.py F + test_assert1.py F [100%] - ======= FAILURES ======== - _______ test_function ________ + ================================= FAILURES ================================= + ______________________________ test_function _______________________________ def test_function(): > assert f() == 4 @@ -41,7 +41,7 @@ you will see the return value of the function call:: E + where 3 = f() test_assert1.py:5: AssertionError - ======= 1 failed in 0.12 seconds ======== + ========================= 1 failed in 0.12 seconds ========================= ``pytest`` has support for showing the values of the most common subexpressions including calls, attributes, comparisons, and binary and unary @@ -119,9 +119,9 @@ exceptions your own code is deliberately raising, whereas using like documenting unfixed bugs (where the test describes what "should" happen) or bugs in dependencies. -If you want to test that a regular expression matches on the string -representation of an exception (like the ``TestCase.assertRaisesRegexp`` method -from ``unittest``) you can use the ``ExceptionInfo.match`` method:: +Also, the context manager form accepts a ``match`` keyword parameter to test +that a regular expression matches on the string representation of an exception +(like the ``TestCase.assertRaisesRegexp`` method from ``unittest``):: import pytest @@ -129,12 +129,11 @@ from ``unittest``) you can use the ``ExceptionInfo.match`` method:: raise ValueError("Exception 123 raised") def test_match(): - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match=r'.* 123 .*'): myfunc() - excinfo.match(r'.* 123 .*') The regexp parameter of the ``match`` method is matched with the ``re.search`` -function. So in the above example ``excinfo.match('123')`` would have worked as +function. So in the above example ``match='123'`` would have worked as well. @@ -169,15 +168,15 @@ when it encounters comparisons. For example:: if you run this module:: $ pytest test_assert2.py - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: - collected 1 items + collected 1 item - test_assert2.py F + test_assert2.py F [100%] - ======= FAILURES ======== - _______ test_set_comparison ________ + ================================= FAILURES ================================= + ___________________________ test_set_comparison ____________________________ def test_set_comparison(): set1 = set("1308") @@ -191,7 +190,7 @@ if you run this module:: E Use -v to get the full diff test_assert2.py:5: AssertionError - ======= 1 failed in 0.12 seconds ======== + ========================= 1 failed in 0.12 seconds ========================= Special comparisons are done for a number of cases: @@ -210,8 +209,8 @@ the ``pytest_assertrepr_compare`` hook. .. autofunction:: _pytest.hookspec.pytest_assertrepr_compare :noindex: -As an example consider adding the following hook in a conftest.py which -provides an alternative explanation for ``Foo`` objects:: +As an example consider adding the following hook in a :ref:`conftest.py ` +file which provides an alternative explanation for ``Foo`` objects:: # content of conftest.py from test_foocompare import Foo @@ -239,9 +238,9 @@ you can run the test module and get the custom output defined in the conftest file:: $ pytest -q test_foocompare.py - F - ======= FAILURES ======== - _______ test_compare ________ + F [100%] + ================================= FAILURES ================================= + _______________________________ test_compare _______________________________ def test_compare(): f1 = Foo(1) diff --git a/doc/en/backwards-compatibility.rst b/doc/en/backwards-compatibility.rst index 8ceada52d..55506e7c3 100644 --- a/doc/en/backwards-compatibility.rst +++ b/doc/en/backwards-compatibility.rst @@ -10,3 +10,11 @@ With the pytest 3.0 release we introduced a clear communication scheme for when To communicate changes we are already issuing deprecation warnings, but they are not displayed by default. In pytest 3.0 we changed the default setting so that pytest deprecation warnings are displayed if not explicitly silenced (with ``--disable-pytest-warnings``). We will only remove deprecated functionality in major releases (e.g. if we deprecate something in 3.0 we will remove it in 4.0), and keep it around for at least two minor releases (e.g. if we deprecate something in 3.9 and 4.0 is the next release, we will not remove it in 4.0 but in 5.0). + + +Deprecation Roadmap +------------------- + +We track deprecation and removal of features using milestones and the `deprecation `_ and `removal `_ labels on GitHub. + +Following our deprecation policy, after starting issuing deprecation warnings we keep features for *at least* two minor versions before considering removal. diff --git a/doc/en/builtin.rst b/doc/en/builtin.rst index 26dbd44cb..7a71827e9 100644 --- a/doc/en/builtin.rst +++ b/doc/en/builtin.rst @@ -1,81 +1,18 @@ +:orphan: .. _`pytest helpers`: Pytest API and builtin fixtures ================================================ -This is a list of ``pytest.*`` API functions and fixtures. + +Most of the information of this page has been moved over to :ref:`reference`. For information on plugin hooks and objects, see :ref:`plugins`. For information on the ``pytest.mark`` mechanism, see :ref:`mark`. -For the below objects, you can also interactively ask for help, e.g. by -typing on the Python interactive prompt something like:: - - import pytest - help(pytest) - -.. currentmodule:: pytest - -Invoking pytest interactively ---------------------------------------------------- - -.. autofunction:: main - -More examples at :ref:`pytest.main-usage` - - -Helpers for assertions about Exceptions/Warnings --------------------------------------------------------- - -.. autofunction:: raises - -Examples at :ref:`assertraises`. - -.. autofunction:: deprecated_call - -Comparing floating point numbers --------------------------------- - -.. autoclass:: approx - -Raising a specific test outcome --------------------------------------- - -You can use the following functions in your test, fixture or setup -functions to force a certain test outcome. Note that most often -you can rather use declarative marks, see :ref:`skipping`. - -.. autofunction:: _pytest.runner.fail -.. autofunction:: _pytest.runner.skip -.. autofunction:: _pytest.runner.importorskip -.. autofunction:: _pytest.skipping.xfail -.. autofunction:: _pytest.runner.exit - -Fixtures and requests ------------------------------------------------------ - -To mark a fixture function: - -.. autofunction:: _pytest.fixtures.fixture - -Tutorial at :ref:`fixtures`. - -The ``request`` object that can be used from fixture functions. - -.. autoclass:: _pytest.fixtures.FixtureRequest() - :members: - - -.. _builtinfixtures: -.. _builtinfuncargs: - -Builtin fixtures/function arguments ------------------------------------------ - -You can ask for available builtin or project-custom -:ref:`fixtures ` by typing:: +For information about fixtures, see :ref:`fixtures`. To see a complete list of available fixtures, type:: $ pytest -q --fixtures cache @@ -89,43 +26,80 @@ You can ask for available builtin or project-custom Values can be any object handled by the json stdlib module. capsys - Enable capturing of writes to sys.stdout/sys.stderr and make + Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make captured output available via ``capsys.readouterr()`` method calls - which return a ``(out, err)`` tuple. + which return a ``(out, err)`` namedtuple. ``out`` and ``err`` will be ``text`` + objects. + capsysbinary + Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make + captured output available via ``capsys.readouterr()`` method calls + which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``bytes`` + objects. capfd - Enable capturing of writes to file descriptors 1 and 2 and make + Enable capturing of writes to file descriptors ``1`` and ``2`` and make captured output available via ``capfd.readouterr()`` method calls - which return a ``(out, err)`` tuple. + which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``text`` + objects. + capfdbinary + Enable capturing of write to file descriptors 1 and 2 and make + captured output available via ``capfdbinary.readouterr`` method calls + which return a ``(out, err)`` tuple. ``out`` and ``err`` will be + ``bytes`` objects. doctest_namespace - Inject names into the doctest namespace. + Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests. pytestconfig - the pytest config object with access to command line opts. - record_xml_property - Add extra xml properties to the tag for the calling test. + Session-scoped fixture that returns the :class:`_pytest.config.Config` object. + + Example:: + + def test_foo(pytestconfig): + if pytestconfig.getoption("verbose"): + ... + record_property + Add an extra properties the calling test. + User properties become part of the test report and are available to the + configured reporters, like JUnit XML. The fixture is callable with ``(name, value)``, with value being automatically xml-encoded. + + Example:: + + def test_function(record_property): + record_property("example_key", 1) + record_xml_property + (Deprecated) use record_property. + record_xml_attribute + Add extra xml attributes to the tag for the calling test. + The fixture is callable with ``(name, value)``, with value being + automatically xml-encoded + caplog + Access and control log capturing. + + Captured logs are available through the following methods:: + + * caplog.text() -> string containing formatted log output + * caplog.records() -> list of logging.LogRecord instances + * caplog.record_tuples() -> list of (logger_name, level, message) tuples + * caplog.clear() -> clear captured records and formatted log output string monkeypatch The returned ``monkeypatch`` fixture provides these helper methods to modify objects, dictionaries or os.environ:: - monkeypatch.setattr(obj, name, value, raising=True) - monkeypatch.delattr(obj, name, raising=True) - monkeypatch.setitem(mapping, name, value) - monkeypatch.delitem(obj, name, raising=True) - monkeypatch.setenv(name, value, prepend=False) - monkeypatch.delenv(name, value, raising=True) - monkeypatch.syspath_prepend(path) - monkeypatch.chdir(path) + monkeypatch.setattr(obj, name, value, raising=True) + monkeypatch.delattr(obj, name, raising=True) + monkeypatch.setitem(mapping, name, value) + monkeypatch.delitem(obj, name, raising=True) + monkeypatch.setenv(name, value, prepend=False) + monkeypatch.delenv(name, value, raising=True) + monkeypatch.syspath_prepend(path) + monkeypatch.chdir(path) All modifications will be undone after the requesting test function or fixture has finished. The ``raising`` parameter determines if a KeyError or AttributeError will be raised if the set/deletion operation has no target. recwarn - Return a WarningsRecorder instance that provides these methods: - - * ``pop(category=None)``: return last warning matching the category. - * ``clear()``: clear list of warnings + Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. See http://docs.python.org/library/warnings.html for information on warning categories. @@ -137,5 +111,13 @@ You can ask for available builtin or project-custom created as a sub directory of the base temporary directory. The returned object is a `py.path.local`_ path object. + + .. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html no tests ran in 0.12 seconds + +You can also interactively ask for help, e.g. by typing on the Python interactive prompt something like:: + + import pytest + help(pytest) + diff --git a/doc/en/cache.rst b/doc/en/cache.rst index 688b6dd04..10543ef3b 100644 --- a/doc/en/cache.rst +++ b/doc/en/cache.rst @@ -1,16 +1,12 @@ +.. _`cache_provider`: +.. _cache: + + Cache: working with cross-testrun state ======================================= .. versionadded:: 2.8 -.. warning:: - - The functionality of this core plugin was previously distributed - as a third party plugin named ``pytest-cache``. The core plugin - is compatible regarding command line options and API usage except that you - can only store/receive data between test runs that is json-serializable. - - Usage --------- @@ -50,9 +46,9 @@ First, let's create 50 test invocation of which only 2 fail:: If you run this for the first time you will see two failures:: $ pytest -q - .................F.......F........................ - ======= FAILURES ======== - _______ test_num[17] ________ + .................F.......F........................ [100%] + ================================= FAILURES ================================= + _______________________________ test_num[17] _______________________________ i = 17 @@ -63,7 +59,7 @@ If you run this for the first time you will see two failures:: E Failed: bad luck test_50.py:6: Failed - _______ test_num[25] ________ + _______________________________ test_num[25] _______________________________ i = 25 @@ -79,16 +75,16 @@ If you run this for the first time you will see two failures:: If you then run it with ``--lf``:: $ pytest --lf - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y - run-last-failure: rerun last 2 failures rootdir: $REGENDOC_TMPDIR, inifile: - collected 50 items + collected 50 items / 48 deselected + run-last-failure: rerun previous 2 failures - test_50.py FF + test_50.py FF [100%] - ======= FAILURES ======== - _______ test_num[17] ________ + ================================= FAILURES ================================= + _______________________________ test_num[17] _______________________________ i = 17 @@ -99,7 +95,7 @@ If you then run it with ``--lf``:: E Failed: bad luck test_50.py:6: Failed - _______ test_num[25] ________ + _______________________________ test_num[25] _______________________________ i = 25 @@ -110,8 +106,7 @@ If you then run it with ``--lf``:: E Failed: bad luck test_50.py:6: Failed - ======= 48 tests deselected ======== - ======= 2 failed, 48 deselected in 0.12 seconds ======== + ================= 2 failed, 48 deselected in 0.12 seconds ================== You have run only the two failing test from the last run, while 48 tests have not been run ("deselected"). @@ -121,16 +116,16 @@ previous failures will be executed first (as can be seen from the series of ``FF`` and dots):: $ pytest --ff - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y - run-last-failure: rerun last 2 failures first rootdir: $REGENDOC_TMPDIR, inifile: collected 50 items + run-last-failure: rerun previous 2 failures first - test_50.py FF................................................ + test_50.py FF................................................ [100%] - ======= FAILURES ======== - _______ test_num[17] ________ + ================================= FAILURES ================================= + _______________________________ test_num[17] _______________________________ i = 17 @@ -141,7 +136,7 @@ of ``FF`` and dots):: E Failed: bad luck test_50.py:6: Failed - _______ test_num[25] ________ + _______________________________ test_num[25] _______________________________ i = 25 @@ -152,10 +147,24 @@ of ``FF`` and dots):: E Failed: bad luck test_50.py:6: Failed - ======= 2 failed, 48 passed in 0.12 seconds ======== + =================== 2 failed, 48 passed in 0.12 seconds ==================== .. _`config.cache`: +New ``--nf``, ``--new-first`` options: run new tests first followed by the rest +of the tests, in both cases tests are also sorted by the file modified time, +with more recent files coming first. + +Behavior when no tests failed in the last run +--------------------------------------------- + +When no tests failed in the last run, or when no cached ``lastfailed`` data was +found, ``pytest`` can be configured either to run all of the tests or no tests, +using the ``--last-failed-no-failures`` option, which takes one of the following values:: + + pytest --last-failed-no-failures all # run all tests (default behavior) + pytest --last-failed-no-failures none # run no tests and exit + The new config.cache object -------------------------------- @@ -186,9 +195,9 @@ If you run this command once, it will take a while because of the sleep:: $ pytest -q - F - ======= FAILURES ======== - _______ test_function ________ + F [100%] + ================================= FAILURES ================================= + ______________________________ test_function _______________________________ mydata = 42 @@ -203,9 +212,9 @@ If you run it a second time the value will be retrieved from the cache and this will be quick:: $ pytest -q - F - ======= FAILURES ======== - _______ test_function ________ + F [100%] + ================================= FAILURES ================================= + ______________________________ test_function _______________________________ mydata = 42 @@ -216,7 +225,7 @@ the cache and this will be quick:: test_caching.py:14: AssertionError 1 failed in 0.12 seconds -See the `cache-api`_ for more details. +See the :ref:`cache-api` for more details. Inspecting Cache content @@ -226,17 +235,19 @@ You can always peek at the content of the cache using the ``--cache-show`` command line option:: $ py.test --cache-show - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: - cachedir: $REGENDOC_TMPDIR/.cache + cachedir: $REGENDOC_TMPDIR/.pytest_cache ------------------------------- cache values ------------------------------- cache/lastfailed contains: {'test_caching.py::test_function': True} + cache/nodeids contains: + ['test_caching.py::test_function'] example/value contains: 42 - ======= no tests ran in 0.12 seconds ======== + ======================= no tests ran in 0.12 seconds ======================= Clearing Cache content ------------------------------- @@ -251,22 +262,3 @@ servers where isolation and correctness is more important than speed. -.. _`cache-api`: - -config.cache API ------------------- - -The ``config.cache`` object allows other plugins, -including ``conftest.py`` files, -to safely and flexibly store and retrieve values across -test runs because the ``config`` object is available -in many places. - -Under the hood, the cache plugin uses the simple -dumps/loads API of the json stdlib module - -.. currentmodule:: _pytest.cacheprovider - -.. automethod:: Cache.get -.. automethod:: Cache.set -.. automethod:: Cache.makedir diff --git a/doc/en/capture.rst b/doc/en/capture.rst index 58ebdf840..901def602 100644 --- a/doc/en/capture.rst +++ b/doc/en/capture.rst @@ -9,7 +9,8 @@ Default stdout/stderr/stdin capturing behaviour During test execution any output sent to ``stdout`` and ``stderr`` is captured. If a test or a setup method fails its according captured -output will usually be shown along with the failure traceback. +output will usually be shown along with the failure traceback. (this +behavior can be configured by the ``--show-capture`` command-line option). In addition, ``stdin`` is set to a "null" object which will fail on attempts to read from it because it is rarely desired @@ -63,15 +64,15 @@ and running this module will show you precisely the output of the failing function and hide the other one:: $ pytest - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - test_module.py .F + test_module.py .F [100%] - ======= FAILURES ======== - _______ test_func2 ________ + ================================= FAILURES ================================= + ________________________________ test_func2 ________________________________ def test_func2(): > assert False @@ -80,26 +81,26 @@ of the failing function and hide the other one:: test_module.py:9: AssertionError -------------------------- Captured stdout setup --------------------------- setting up - ======= 1 failed, 1 passed in 0.12 seconds ======== + ==================== 1 failed, 1 passed in 0.12 seconds ==================== Accessing captured output from a test function --------------------------------------------------- -The ``capsys`` and ``capfd`` fixtures allow to access stdout/stderr -output created during test execution. Here is an example test function -that performs some output related checks: +The ``capsys``, ``capsysbinary``, ``capfd``, and ``capfdbinary`` fixtures +allow access to stdout/stderr output created during test execution. Here is +an example test function that performs some output related checks: .. code-block:: python def test_myoutput(capsys): # or use "capfd" for fd-level - print ("hello") + print("hello") sys.stderr.write("world\n") - out, err = capsys.readouterr() - assert out == "hello\n" - assert err == "world\n" - print ("next") - out, err = capsys.readouterr() - assert out == "next\n" + captured = capsys.readouterr() + assert captured.out == "hello\n" + assert captured.err == "world\n" + print("next") + captured = capsys.readouterr() + assert captured.out == "next\n" The ``readouterr()`` call snapshots the output so far - and capturing will be continued. After the test @@ -110,11 +111,30 @@ output streams and also interacts well with pytest's own per-test capturing. If you want to capture on filedescriptor level you can use -the ``capfd`` function argument which offers the exact +the ``capfd`` fixture which offers the exact same interface but allows to also capture output from libraries or subprocesses that directly write to operating system level output streams (FD1 and FD2). +.. versionadded:: 3.3 + +The return value from ``readouterr`` changed to a ``namedtuple`` with two attributes, ``out`` and ``err``. + +.. versionadded:: 3.3 + +If the code under test writes non-textual data, you can capture this using +the ``capsysbinary`` fixture which instead returns ``bytes`` from +the ``readouterr`` method. The ``capfsysbinary`` fixture is currently only +available in python 3. + + +.. versionadded:: 3.3 + +If the code under test writes non-textual data, you can capture this using +the ``capfdbinary`` fixture which instead returns ``bytes`` from +the ``readouterr`` method. The ``capfdbinary`` fixture operates on the +filedescriptor level. + .. versionadded:: 3.0 diff --git a/doc/en/conf.py b/doc/en/conf.py index 40f1e4165..f5c17404b 100644 --- a/doc/en/conf.py +++ b/doc/en/conf.py @@ -18,8 +18,11 @@ # The full version, including alpha/beta/rc tags. # The short X.Y version. -import os, sys +import os +import sys + from _pytest import __version__ as version + release = ".".join(version.split(".")[:2]) # If extensions (or modules to document with autodoc) are in another directory, @@ -38,7 +41,7 @@ todo_include_todos = 1 # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.autosummary', - 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode'] + 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode', 'sphinxcontrib_trio'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -310,9 +313,7 @@ texinfo_documents = [ # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'python': ('http://docs.python.org/', None), -# 'lib': ("http://docs.python.org/2.7library/", None), - } +intersphinx_mapping = {'python': ('http://docs.python.org/3', None)} def setup(app): diff --git a/doc/en/contact.rst b/doc/en/contact.rst index d4a1a03de..83d496640 100644 --- a/doc/en/contact.rst +++ b/doc/en/contact.rst @@ -19,9 +19,9 @@ Contact channels - `pytest-commit at python.org (mailing list)`_: for commits and new issues - :doc:`contribution guide ` for help on submitting pull - requests to bitbucket (including using git via gitifyhg). + requests to GitHub. -- #pylib on irc.freenode.net IRC channel for random questions. +- ``#pylib`` on irc.freenode.net IRC channel for random questions. - private mail to Holger.Krekel at gmail com if you want to communicate sensitive issues @@ -46,6 +46,5 @@ Contact channels .. _`py-dev`: .. _`development mailing list`: .. _`pytest-dev at python.org (mailing list)`: http://mail.python.org/mailman/listinfo/pytest-dev -.. _`py-svn`: .. _`pytest-commit at python.org (mailing list)`: http://mail.python.org/mailman/listinfo/pytest-commit diff --git a/doc/en/contents.rst b/doc/en/contents.rst index 9f4a9a1be..79c4fce3e 100644 --- a/doc/en/contents.rst +++ b/doc/en/contents.rst @@ -14,7 +14,6 @@ Full pytest documentation usage existingtestsuite assert - builtin fixture monkeypatch tmpdir @@ -30,15 +29,20 @@ Full pytest documentation xunit_setup plugins writing_plugins + logging + reference - example/index goodpractices + pythonpath customize + example/index bash-completion backwards-compatibility + historical-notes license contributing + development_guide talks projects faq diff --git a/doc/en/customize.rst b/doc/en/customize.rst index ce0a36c11..e89cdc002 100644 --- a/doc/en/customize.rst +++ b/doc/en/customize.rst @@ -1,5 +1,5 @@ -Basic test configuration -=================================== +Configuration +============= Command line options and configuration file settings ----------------------------------------------------------------- @@ -15,17 +15,35 @@ which were registered by installed plugins. .. _rootdir: .. _inifiles: -initialization: determining rootdir and inifile +Initialization: determining rootdir and inifile ----------------------------------------------- .. versionadded:: 2.7 -pytest determines a "rootdir" for each test run which depends on +pytest determines a ``rootdir`` for each test run which depends on the command line arguments (specified test files, paths) and on -the existence of inifiles. The determined rootdir and ini-file are -printed as part of the pytest header. The rootdir is used for constructing -"nodeids" during collection and may also be used by plugins to store -project/testrun-specific information. +the existence of *ini-files*. The determined ``rootdir`` and *ini-file* are +printed as part of the pytest header during startup. + +Here's a summary what ``pytest`` uses ``rootdir`` for: + +* Construct *nodeids* during collection; each test is assigned + a unique *nodeid* which is rooted at the ``rootdir`` and takes in account full path, + class name, function name and parametrization (if any). + +* Is used by plugins as a stable location to store project/test run specific information; + for example, the internal :ref:`cache ` plugin creates a ``.cache`` subdirectory + in ``rootdir`` to store its cross-test run state. + +Important to emphasize that ``rootdir`` is **NOT** used to modify ``sys.path``/``PYTHONPATH`` or +influence how modules are imported. See :ref:`pythonpath` for more details. + +``--rootdir=path`` command-line option can be used to force a specific directory. +The directory passed may contain environment variables when it is used in conjunction +with ``addopts`` in a ``pytest.ini`` file. + +Finding the ``rootdir`` +~~~~~~~~~~~~~~~~~~~~~~~ Here is the algorithm which finds the rootdir from ``args``: @@ -112,153 +130,30 @@ progress output, you can write it into a configuration file: # content of pytest.ini # (or tox.ini or setup.cfg) [pytest] - addopts = -rsxX -q + addopts = -ra -q -Alternatively, you can set a PYTEST_ADDOPTS environment variable to add command +Alternatively, you can set a ``PYTEST_ADDOPTS`` environment variable to add command line options while the environment is in use:: - export PYTEST_ADDOPTS="-rsxX -q" + export PYTEST_ADDOPTS="-v" -From now on, running ``pytest`` will add the specified options. +Here's how the command-line is built in the presence of ``addopts`` or the environment variable:: + $PYTEST_ADDOTPS + +So if the user executes in the command-line:: + + pytest -m slow + +The actual command line executed is:: + + pytest -ra -q -v -m slow + +Note that as usual for other command-line applications, in case of conflicting options the last one wins, so the example +above will show verbose output because ``-v`` overwrites ``-q``. Builtin configuration file options ---------------------------------------------- -.. confval:: minversion - - Specifies a minimal pytest version required for running tests. - - minversion = 2.1 # will fail if we run with pytest-2.0 - -.. confval:: addopts - - Add the specified ``OPTS`` to the set of command line arguments as if they - had been specified by the user. Example: if you have this ini file content: - - .. code-block:: ini - - [pytest] - addopts = --maxfail=2 -rf # exit after 2 failures, report fail info - - issuing ``pytest test_hello.py`` actually means:: - - pytest --maxfail=2 -rf test_hello.py - - Default is to add no options. - -.. confval:: norecursedirs - - Set the directory basename patterns to avoid when recursing - for test discovery. The individual (fnmatch-style) patterns are - applied to the basename of a directory to decide if to recurse into it. - Pattern matching characters:: - - * matches everything - ? matches any single character - [seq] matches any character in seq - [!seq] matches any char not in seq - - Default patterns are ``'.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv'``. - Setting a ``norecursedirs`` replaces the default. Here is an example of - how to avoid certain directories: - - .. code-block:: ini - - # content of pytest.ini - [pytest] - norecursedirs = .svn _build tmp* - - This would tell ``pytest`` to not look into typical subversion or - sphinx-build directories or into any ``tmp`` prefixed directory. - -.. confval:: testpaths - - .. versionadded:: 2.8 - - Sets list of directories that should be searched for tests when - no specific directories, files or test ids are given in the command line when - executing pytest from the :ref:`rootdir ` directory. - Useful when all project tests are in a known location to speed up - test collection and to avoid picking up undesired tests by accident. - - .. code-block:: ini - - # content of pytest.ini - [pytest] - testpaths = testing doc - - This tells pytest to only look for tests in ``testing`` and ``doc`` - directories when executing from the root directory. - -.. confval:: python_files - - One or more Glob-style file patterns determining which python files - are considered as test modules. - -.. confval:: python_classes - - One or more name prefixes or glob-style patterns determining which classes - are considered for test collection. Here is an example of how to collect - tests from classes that end in ``Suite``: - - .. code-block:: ini - - # content of pytest.ini - [pytest] - python_classes = *Suite - - Note that ``unittest.TestCase`` derived classes are always collected - regardless of this option, as ``unittest``'s own collection framework is used - to collect those tests. - -.. confval:: python_functions - - One or more name prefixes or glob-patterns determining which test functions - and methods are considered tests. Here is an example of how - to collect test functions and methods that end in ``_test``: - - .. code-block:: ini - - # content of pytest.ini - [pytest] - python_functions = *_test - - Note that this has no effect on methods that live on a ``unittest - .TestCase`` derived class, as ``unittest``'s own collection framework is used - to collect those tests. - - See :ref:`change naming conventions` for more detailed examples. - -.. confval:: doctest_optionflags - - One or more doctest flag names from the standard ``doctest`` module. - :doc:`See how pytest handles doctests `. - -.. confval:: confcutdir - - Sets a directory where search upwards for ``conftest.py`` files stops. - By default, pytest will stop searching for ``conftest.py`` files upwards - from ``pytest.ini``/``tox.ini``/``setup.cfg`` of the project if any, - or up to the file-system root. - - -.. confval:: filterwarnings - - .. versionadded:: 3.1 - - Sets a list of filters and actions that should be taken for matched - warnings. By default all warnings emitted during the test session - will be displayed in a summary at the end of the test session. - - .. code-block:: ini - - # content of pytest.ini - [pytest] - filterwarnings = - error - ignore::DeprecationWarning - - This tells pytest to ignore deprecation warnings and turn all other warnings - into errors. For more information please refer to :ref:`warnings`. +For the full list of options consult the :ref:`reference documentation `. diff --git a/doc/en/development_guide.rst b/doc/en/development_guide.rst new file mode 100644 index 000000000..2dac82880 --- /dev/null +++ b/doc/en/development_guide.rst @@ -0,0 +1,55 @@ +================= +Development Guide +================= + +Some general guidelines regarding development in pytest for maintainers and contributors. Nothing here +is set in stone and can't be changed, feel free to suggest improvements or changes in the workflow. + + +Code Style +---------- + +* `PEP-8 `_ +* `flake8 `_ for quality checks +* `invoke `_ to automate development tasks + + +Branches +-------- + +We have two long term branches: + +* ``master``: contains the code for the next bugfix release. +* ``features``: contains the code with new features for the next minor release. + +The official repository usually does not contain topic branches, developers and contributors should create topic +branches in their own forks. + +Exceptions can be made for cases where more than one contributor is working on the same +topic or where it makes sense to use some automatic capability of the main repository, such as automatic docs from +`readthedocs `_ for a branch dealing with documentation refactoring. + +Issues +------ + +Any question, feature, bug or proposal is welcome as an issue. Users are encouraged to use them whenever they need. + +GitHub issues should use labels to categorize them. Labels should be created sporadically, to fill a niche; we should +avoid creating labels just for the sake of creating them. + +Each label should include a description in the GitHub's interface stating its purpose. + +Temporary labels +~~~~~~~~~~~~~~~~ + +To classify issues for a special event it is encouraged to create a temporary label. This helps those involved to find +the relevant issues to work on. Examples of that are sprints in Python events or global hacking events. + +* ``temporary: EP2017 sprint``: candidate issues or PRs tackled during the EuroPython 2017 + +Issues created at those events should have other relevant labels added as well. + +Those labels should be removed after they are no longer relevant. + + +.. include:: ../../HOWTORELEASE.rst diff --git a/doc/en/doctest.rst b/doc/en/doctest.rst index 24c068a86..2ee7110b3 100644 --- a/doc/en/doctest.rst +++ b/doc/en/doctest.rst @@ -61,14 +61,14 @@ and another like this:: then you can just invoke ``pytest`` without command line options:: $ pytest - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini - collected 1 items + collected 1 item - mymodule.py . + mymodule.py . [100%] - ======= 1 passed in 0.12 seconds ======== + ========================= 1 passed in 0.12 seconds ========================= It is possible to use fixtures using the ``getfixture`` helper:: @@ -81,9 +81,9 @@ Also, :ref:`usefixtures` and :ref:`autouse` fixtures are supported when executing text doctest files. The standard ``doctest`` module provides some setting flags to configure the -strictness of doctest tests. In pytest You can enable those flags those flags -using the configuration file. To make pytest ignore trailing whitespaces and -ignore lengthy exception stack traces you can just write: +strictness of doctest tests. In pytest, you can enable those flags using the +configuration file. To make pytest ignore trailing whitespaces and ignore +lengthy exception stack traces you can just write: .. code-block:: ini @@ -115,6 +115,13 @@ itself:: >>> get_unicode_greeting() # doctest: +ALLOW_UNICODE 'Hello' +By default, pytest would report only the first failure for a given doctest. If +you want to continue the test even when you have failures, do:: + + pytest --doctest-modules --doctest-continue-on-failure + + +.. _`doctest_namespace`: The 'doctest_namespace' fixture ------------------------------- diff --git a/doc/en/example/assertion/failure_demo.py b/doc/en/example/assertion/failure_demo.py index d31fba2ad..3ae0268d3 100644 --- a/doc/en/example/assertion/failure_demo.py +++ b/doc/en/example/assertion/failure_demo.py @@ -157,12 +157,14 @@ class TestRaises(object): # thanks to Matthew Scott for this test def test_dynamic_compile_shows_nicely(): + import imp + import sys src = 'def foo():\n assert 1 == 0\n' name = 'abc-123' - module = py.std.imp.new_module(name) + module = imp.new_module(name) code = _pytest._code.compile(src, name, 'exec') py.builtin.exec_(code, module.__dict__) - py.std.sys.modules[name] = module + sys.modules[name] = module module.foo() diff --git a/doc/en/example/assertion/global_testmodule_config/test_hello.py b/doc/en/example/assertion/global_testmodule_config/test_hello_world.py similarity index 100% rename from doc/en/example/assertion/global_testmodule_config/test_hello.py rename to doc/en/example/assertion/global_testmodule_config/test_hello_world.py diff --git a/doc/en/example/attic.rst b/doc/en/example/attic.rst index 6004ebb8f..9e124a5d0 100644 --- a/doc/en/example/attic.rst +++ b/doc/en/example/attic.rst @@ -17,7 +17,7 @@ example: specifying and selecting acceptance tests class AcceptFixture(object): def __init__(self, request): - if not request.config.option.acceptance: + if not request.config.getoption('acceptance'): pytest.skip("specify -A to run acceptance tests") self.tmpdir = request.config.mktemp(request.function.__name__, numbered=True) diff --git a/doc/en/example/costlysetup/sub1/__init__.py b/doc/en/example/costlysetup/sub_a/__init__.py similarity index 100% rename from doc/en/example/costlysetup/sub1/__init__.py rename to doc/en/example/costlysetup/sub_a/__init__.py diff --git a/doc/en/example/costlysetup/sub1/test_quick.py b/doc/en/example/costlysetup/sub_a/test_quick.py similarity index 100% rename from doc/en/example/costlysetup/sub1/test_quick.py rename to doc/en/example/costlysetup/sub_a/test_quick.py diff --git a/doc/en/example/costlysetup/sub2/__init__.py b/doc/en/example/costlysetup/sub_b/__init__.py similarity index 100% rename from doc/en/example/costlysetup/sub2/__init__.py rename to doc/en/example/costlysetup/sub_b/__init__.py diff --git a/doc/en/example/costlysetup/sub2/test_two.py b/doc/en/example/costlysetup/sub_b/test_two.py similarity index 100% rename from doc/en/example/costlysetup/sub2/test_two.py rename to doc/en/example/costlysetup/sub_b/test_two.py diff --git a/doc/en/example/index.rst b/doc/en/example/index.rst index 363de5ab7..f63cb822a 100644 --- a/doc/en/example/index.rst +++ b/doc/en/example/index.rst @@ -1,8 +1,8 @@ .. _examples: -Usages and Examples -=========================================== +Examples and customization tricks +================================= Here is a (growing) list of examples. :ref:`Contact ` us if you need more examples or have questions. Also take a look at the diff --git a/doc/en/example/markers.rst b/doc/en/example/markers.rst index 338f707a5..b162c938c 100644 --- a/doc/en/example/markers.rst +++ b/doc/en/example/markers.rst @@ -30,32 +30,30 @@ You can "mark" a test function with custom metadata like this:: You can then restrict a test run to only run tests marked with ``webtest``:: $ pytest -v -m webtest - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 - cachedir: .cache + cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: - collecting ... collected 4 items + collecting ... collected 4 items / 3 deselected - test_server.py::test_send_http PASSED + test_server.py::test_send_http PASSED [100%] - ======= 3 tests deselected ======== - ======= 1 passed, 3 deselected in 0.12 seconds ======== + ================== 1 passed, 3 deselected in 0.12 seconds ================== Or the inverse, running all tests except the webtest ones:: $ pytest -v -m "not webtest" - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 - cachedir: .cache + cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: - collecting ... collected 4 items + collecting ... collected 4 items / 1 deselected - test_server.py::test_something_quick PASSED - test_server.py::test_another PASSED - test_server.py::TestClass::test_method PASSED + test_server.py::test_something_quick PASSED [ 33%] + test_server.py::test_another PASSED [ 66%] + test_server.py::TestClass::test_method PASSED [100%] - ======= 1 tests deselected ======== - ======= 3 passed, 1 deselected in 0.12 seconds ======== + ================== 3 passed, 1 deselected in 0.12 seconds ================== Selecting tests based on their node ID -------------------------------------- @@ -65,42 +63,42 @@ arguments to select only specified tests. This makes it easy to select tests based on their module, class, method, or function name:: $ pytest -v test_server.py::TestClass::test_method - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 - cachedir: .cache + cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: - collecting ... collected 5 items + collecting ... collected 1 item - test_server.py::TestClass::test_method PASSED + test_server.py::TestClass::test_method PASSED [100%] - ======= 1 passed in 0.12 seconds ======== + ========================= 1 passed in 0.12 seconds ========================= You can also select on the class:: $ pytest -v test_server.py::TestClass - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 - cachedir: .cache + cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: - collecting ... collected 4 items + collecting ... collected 1 item - test_server.py::TestClass::test_method PASSED + test_server.py::TestClass::test_method PASSED [100%] - ======= 1 passed in 0.12 seconds ======== + ========================= 1 passed in 0.12 seconds ========================= Or select multiple nodes:: $ pytest -v test_server.py::TestClass test_server.py::test_send_http - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 - cachedir: .cache + cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: - collecting ... collected 8 items + collecting ... collected 2 items - test_server.py::TestClass::test_method PASSED - test_server.py::test_send_http PASSED + test_server.py::TestClass::test_method PASSED [ 50%] + test_server.py::test_send_http PASSED [100%] - ======= 2 passed in 0.12 seconds ======== + ========================= 2 passed in 0.12 seconds ========================= .. _node-id: @@ -129,58 +127,59 @@ exact match on markers that ``-m`` provides. This makes it easy to select tests based on their names:: $ pytest -v -k http # running with the above defined example module - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 - cachedir: .cache + cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: - collecting ... collected 4 items + collecting ... collected 4 items / 3 deselected - test_server.py::test_send_http PASSED + test_server.py::test_send_http PASSED [100%] - ======= 3 tests deselected ======== - ======= 1 passed, 3 deselected in 0.12 seconds ======== + ================== 1 passed, 3 deselected in 0.12 seconds ================== And you can also run all tests except the ones that match the keyword:: $ pytest -k "not send_http" -v - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 - cachedir: .cache + cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: - collecting ... collected 4 items + collecting ... collected 4 items / 1 deselected - test_server.py::test_something_quick PASSED - test_server.py::test_another PASSED - test_server.py::TestClass::test_method PASSED + test_server.py::test_something_quick PASSED [ 33%] + test_server.py::test_another PASSED [ 66%] + test_server.py::TestClass::test_method PASSED [100%] - ======= 1 tests deselected ======== - ======= 3 passed, 1 deselected in 0.12 seconds ======== + ================== 3 passed, 1 deselected in 0.12 seconds ================== Or to select "http" and "quick" tests:: $ pytest -k "http or quick" -v - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 - cachedir: .cache + cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: - collecting ... collected 4 items + collecting ... collected 4 items / 2 deselected - test_server.py::test_send_http PASSED - test_server.py::test_something_quick PASSED + test_server.py::test_send_http PASSED [ 50%] + test_server.py::test_something_quick PASSED [100%] - ======= 2 tests deselected ======== - ======= 2 passed, 2 deselected in 0.12 seconds ======== + ================== 2 passed, 2 deselected in 0.12 seconds ================== .. note:: - If you are using expressions such as "X and Y" then both X and Y - need to be simple non-keyword names. For example, "pass" or "from" - will result in SyntaxErrors because "-k" evaluates the expression. + If you are using expressions such as ``"X and Y"`` then both ``X`` and ``Y`` + need to be simple non-keyword names. For example, ``"pass"`` or ``"from"`` + will result in SyntaxErrors because ``"-k"`` evaluates the expression using + Python's `eval`_ function. - However, if the "-k" argument is a simple string, no such restrictions - apply. Also "-k 'not STRING'" has no restrictions. You can also - specify numbers like "-k 1.3" to match tests which are parametrized - with the float "1.3". +.. _`eval`: https://docs.python.org/3.6/library/functions.html#eval + + + However, if the ``"-k"`` argument is a simple string, no such restrictions + apply. Also ``"-k 'not STRING'"`` has no restrictions. You can also + specify numbers like ``"-k 1.3"`` to match tests which are parametrized + with the float ``"1.3"``. Registering markers ------------------------------------- @@ -223,13 +222,12 @@ For an example on how to add and work with markers from a plugin, see It is recommended to explicitly register markers so that: - * there is one place in your test suite defining your markers + * There is one place in your test suite defining your markers - * asking for existing markers via ``pytest --markers`` gives good output + * Asking for existing markers via ``pytest --markers`` gives good output - * typos in function markers are treated as an error if you use - the ``--strict`` option. Future versions of ``pytest`` are probably - going to start treating non-registered markers as errors at some point. + * Typos in function markers are treated as an error if you use + the ``--strict`` option. .. _`scoped-marking`: @@ -332,11 +330,10 @@ specifies via named environments:: "env(name): mark test to run only on named environment") def pytest_runtest_setup(item): - envmarker = item.get_marker("env") - if envmarker is not None: - envname = envmarker.args[0] - if envname != item.config.getoption("-E"): - pytest.skip("test requires env %r" % envname) + envnames = [mark.args[0] for mark in item.iter_markers() if mark.name == "env"] + if envnames: + if item.config.getoption("-E") not in envnames: + pytest.skip("test requires env in %r" % envnames) A test file using this local plugin:: @@ -351,26 +348,26 @@ and an example invocations specifying a different environment than what the test needs:: $ pytest -E stage2 - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: - collected 1 items + collected 1 item - test_someenv.py s + test_someenv.py s [100%] - ======= 1 skipped in 0.12 seconds ======== + ======================== 1 skipped in 0.12 seconds ========================= and here is one that specifies exactly the environment needed:: $ pytest -E stage1 - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: - collected 1 items + collected 1 item - test_someenv.py . + test_someenv.py . [100%] - ======= 1 passed in 0.12 seconds ======== + ========================= 1 passed in 0.12 seconds ========================= The ``--markers`` option always gives you a list of available markers:: @@ -392,6 +389,48 @@ The ``--markers`` option always gives you a list of available markers:: @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. +.. _`passing callables to custom markers`: + +Passing a callable to custom markers +-------------------------------------------- + +.. regendoc:wipe + +Below is the config file that will be used in the next examples:: + + # content of conftest.py + import sys + + def pytest_runtest_setup(item): + for marker in item.iter_markers(): + if marker.name == 'my_marker': + print(marker) + sys.stdout.flush() + +A custom marker can have its argument set, i.e. ``args`` and ``kwargs`` properties, defined by either invoking it as a callable or using ``pytest.mark.MARKER_NAME.with_args``. These two methods achieve the same effect most of the time. + +However, if there is a callable as the single positional argument with no keyword arguments, using the ``pytest.mark.MARKER_NAME(c)`` will not pass ``c`` as a positional argument but decorate ``c`` with the custom marker (see :ref:`MarkDecorator `). Fortunately, ``pytest.mark.MARKER_NAME.with_args`` comes to the rescue:: + + # content of test_custom_marker.py + import pytest + + def hello_world(*args, **kwargs): + return 'Hello World' + + @pytest.mark.my_marker.with_args(hello_world) + def test_with_args(): + pass + +The output is as follows:: + + $ pytest -q -s + Mark(name='my_marker', args=(,), kwargs={}) + . + 1 passed in 0.12 seconds + +We can see that the custom marker has its argument set extended with the function ``hello_world``. This is the key difference between creating a custom marker as a callable, which invokes ``__call__`` behind the scenes, and using ``with_args``. + + Reading markers which were set from multiple places ---------------------------------------------------- @@ -419,10 +458,9 @@ test function. From a conftest file we can read it like this:: import sys def pytest_runtest_setup(item): - g = item.get_marker("glob") - if g is not None: - for info in g: - print ("glob args=%s kwargs=%s" %(info.args, info.kwargs)) + for mark in item.iter_markers(): + if mark.name == 'glob': + print ("glob args=%s kwargs=%s" %(mark.args, mark.kwargs)) sys.stdout.flush() Let's run this without capturing output and see what we get:: @@ -453,11 +491,10 @@ for your particular platform, you could use the following plugin:: ALL = set("darwin linux win32".split()) def pytest_runtest_setup(item): - if isinstance(item, item.Function): - plat = sys.platform - if not item.get_marker(plat): - if ALL.intersection(item.keywords): - pytest.skip("cannot run on platform %s" %(plat)) + supported_platforms = ALL.intersection(mark.name for mark in item.iter_markers()) + plat = sys.platform + if supported_platforms and plat not in supported_platforms: + pytest.skip("cannot run on platform %s" % (plat)) then tests will be skipped if they were specified for a different platform. Let's do a little test file to show how this looks like:: @@ -484,29 +521,28 @@ Let's do a little test file to show how this looks like:: then you will see two tests skipped and two executed tests as expected:: $ pytest -rs # this option reports skip reasons - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - test_plat.py s.s. - ======= short test summary info ======== + test_plat.py s.s. [100%] + ========================= short test summary info ========================== SKIP [2] $REGENDOC_TMPDIR/conftest.py:12: cannot run on platform linux - ======= 2 passed, 2 skipped in 0.12 seconds ======== + =================== 2 passed, 2 skipped in 0.12 seconds ==================== Note that if you specify a platform via the marker-command line option like this:: $ pytest -m linux - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: - collected 4 items + collected 4 items / 3 deselected - test_plat.py . + test_plat.py . [100%] - ======= 3 tests deselected ======== - ======= 1 passed, 3 deselected in 0.12 seconds ======== + ================== 1 passed, 3 deselected in 0.12 seconds ================== then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests. @@ -550,47 +586,45 @@ We want to dynamically define two markers and can do it in a We can now use the ``-m option`` to select one set:: $ pytest -m interface --tb=short - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: - collected 4 items + collected 4 items / 2 deselected - test_module.py FF + test_module.py FF [100%] - ======= FAILURES ======== - _______ test_interface_simple ________ + ================================= FAILURES ================================= + __________________________ test_interface_simple ___________________________ test_module.py:3: in test_interface_simple assert 0 E assert 0 - _______ test_interface_complex ________ + __________________________ test_interface_complex __________________________ test_module.py:6: in test_interface_complex assert 0 E assert 0 - ======= 2 tests deselected ======== - ======= 2 failed, 2 deselected in 0.12 seconds ======== + ================== 2 failed, 2 deselected in 0.12 seconds ================== or to select both "event" and "interface" tests:: $ pytest -m "interface or event" --tb=short - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: - collected 4 items + collected 4 items / 1 deselected - test_module.py FFF + test_module.py FFF [100%] - ======= FAILURES ======== - _______ test_interface_simple ________ + ================================= FAILURES ================================= + __________________________ test_interface_simple ___________________________ test_module.py:3: in test_interface_simple assert 0 E assert 0 - _______ test_interface_complex ________ + __________________________ test_interface_complex __________________________ test_module.py:6: in test_interface_complex assert 0 E assert 0 - _______ test_event_simple ________ + ____________________________ test_event_simple _____________________________ test_module.py:9: in test_event_simple assert 0 E assert 0 - ======= 1 tests deselected ======== - ======= 3 failed, 1 deselected in 0.12 seconds ======== + ================== 3 failed, 1 deselected in 0.12 seconds ================== diff --git a/doc/en/example/multipython.py b/doc/en/example/multipython.py index 586f44184..66079be7e 100644 --- a/doc/en/example/multipython.py +++ b/doc/en/example/multipython.py @@ -6,7 +6,7 @@ import py import pytest import _pytest._code -pythonlist = ['python2.6', 'python2.7', 'python3.4', 'python3.5'] +pythonlist = ['python2.7', 'python3.4', 'python3.5'] @pytest.fixture(params=pythonlist) def python1(request, tmpdir): picklefile = tmpdir.join("data.pickle") diff --git a/doc/en/example/nonpython.rst b/doc/en/example/nonpython.rst index 5784f6ed6..dd25e888f 100644 --- a/doc/en/example/nonpython.rst +++ b/doc/en/example/nonpython.rst @@ -26,19 +26,19 @@ and if you installed `PyYAML`_ or a compatible YAML-parser you can now execute the test specification:: nonpython $ pytest test_simple.yml - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/nonpython, inifile: collected 2 items - test_simple.yml F. + test_simple.yml F. [100%] - ======= FAILURES ======== - _______ usecase: hello ________ + ================================= FAILURES ================================= + ______________________________ usecase: hello ______________________________ usecase execution failed spec failed: 'some': 'other' no further details known at this point. - ======= 1 failed, 1 passed in 0.12 seconds ======== + ==================== 1 failed, 1 passed in 0.12 seconds ==================== .. regendoc:wipe @@ -58,21 +58,21 @@ your own domain specific testing language this way. consulted when reporting in ``verbose`` mode:: nonpython $ pytest -v - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 - cachedir: .cache + cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR/nonpython, inifile: collecting ... collected 2 items - test_simple.yml::hello FAILED - test_simple.yml::ok PASSED + test_simple.yml::hello FAILED [ 50%] + test_simple.yml::ok PASSED [100%] - ======= FAILURES ======== - _______ usecase: hello ________ + ================================= FAILURES ================================= + ______________________________ usecase: hello ______________________________ usecase execution failed spec failed: 'some': 'other' no further details known at this point. - ======= 1 failed, 1 passed in 0.12 seconds ======== + ==================== 1 failed, 1 passed in 0.12 seconds ==================== .. regendoc:wipe @@ -80,7 +80,7 @@ While developing your custom test collection and execution it's also interesting to just look at the collection tree:: nonpython $ pytest --collect-only - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/nonpython, inifile: collected 2 items @@ -88,4 +88,4 @@ interesting to just look at the collection tree:: - ======= no tests ran in 0.12 seconds ======== + ======================= no tests ran in 0.12 seconds ======================= diff --git a/doc/en/example/parametrize.rst b/doc/en/example/parametrize.rst index bb286b472..dd01b2527 100644 --- a/doc/en/example/parametrize.rst +++ b/doc/en/example/parametrize.rst @@ -36,7 +36,7 @@ Now we add a test configuration like this:: def pytest_generate_tests(metafunc): if 'param1' in metafunc.fixturenames: - if metafunc.config.option.all: + if metafunc.config.getoption('all'): end = 5 else: end = 2 @@ -45,16 +45,16 @@ Now we add a test configuration like this:: This means that we only run 2 tests if we do not pass ``--all``:: $ pytest -q test_compute.py - .. + .. [100%] 2 passed in 0.12 seconds We run only two computations, so we see two dots. let's run the full monty:: $ pytest -q --all - ....F - ======= FAILURES ======== - _______ test_compute[4] ________ + ....F [100%] + ================================= FAILURES ================================= + _____________________________ test_compute[4] ______________________________ param1 = 4 @@ -138,7 +138,7 @@ objects, they are still using the default pytest representation:: $ pytest test_time.py --collect-only - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 8 items @@ -152,7 +152,7 @@ objects, they are still using the default pytest representation:: - ======= no tests ran in 0.12 seconds ======== + ======================= no tests ran in 0.12 seconds ======================= In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs together with the actual data, instead of listing them separately. @@ -194,20 +194,20 @@ only have to work a bit to construct the correct arguments for pytest's this is a fully self-contained example which you can run with:: $ pytest test_scenarios.py - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - test_scenarios.py .... + test_scenarios.py .... [100%] - ======= 4 passed in 0.12 seconds ======== + ========================= 4 passed in 0.12 seconds ========================= If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function:: $ pytest --collect-only test_scenarios.py - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items @@ -219,7 +219,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia - ======= no tests ran in 0.12 seconds ======== + ======================= no tests ran in 0.12 seconds ======================= Note that we told ``metafunc.parametrize()`` that your scenario values should be considered class-scoped. With pytest-2.3 this leads to a @@ -272,7 +272,7 @@ creates a database object for the actual test invocations:: Let's first see how it looks like at collection time:: $ pytest test_backends.py --collect-only - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items @@ -280,14 +280,14 @@ Let's first see how it looks like at collection time:: - ======= no tests ran in 0.12 seconds ======== + ======================= no tests ran in 0.12 seconds ======================= And then when we run the test:: $ pytest -q test_backends.py - .F - ======= FAILURES ======== - _______ test_db_initialized[d2] ________ + .F [100%] + ================================= FAILURES ================================= + _________________________ test_db_initialized[d2] __________________________ db = @@ -333,14 +333,14 @@ will be passed to respective fixture function:: The result of this test will be successful:: $ pytest test_indirect_list.py --collect-only - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: - collected 1 items + collected 1 item - ======= no tests ran in 0.12 seconds ======== + ======================= no tests ran in 0.12 seconds ======================= .. regendoc:wipe @@ -350,7 +350,7 @@ Parametrizing test methods through per-class configuration .. _`unittest parametrizer`: https://github.com/testing-cabal/unittest-ext/blob/master/params.py -Here is an example ``pytest_generate_function`` function implementing a +Here is an example ``pytest_generate_tests`` function implementing a parametrization scheme similar to Michael Foord's `unittest parametrizer`_ but in a lot less code:: @@ -381,9 +381,9 @@ Our test generator looks up a class-level definition which specifies which argument sets to use for each test function. Let's run it:: $ pytest -q - F.. - ======= FAILURES ======== - _______ TestClass.test_equals[1-2] ________ + F.. [100%] + ================================= FAILURES ================================= + ________________________ TestClass.test_equals[1-2] ________________________ self = , a = 1, b = 2 @@ -411,10 +411,8 @@ is to be run with different sets of arguments for its three arguments: Running it results in some skips if we don't have all the python interpreters installed and otherwise runs all combinations (5 interpreters times 5 interpreters times 3 objects to serialize/deserialize):: . $ pytest -rs -q multipython.py - sssssssssssssss.........sss.........sss......... - ======= short test summary info ======== - SKIP [21] $REGENDOC_TMPDIR/CWD/multipython.py:23: 'python2.6' not found - 27 passed, 21 skipped in 0.12 seconds + ........................... [100%] + 27 passed in 0.12 seconds Indirect parametrization of optional implementations/imports -------------------------------------------------------------------- @@ -460,16 +458,16 @@ And finally a little test module:: If you run this with reporting for skips enabled:: $ pytest -rs test_module.py - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - test_module.py .s - ======= short test summary info ======== - SKIP [1] $REGENDOC_TMPDIR/conftest.py:10: could not import 'opt2' + test_module.py .s [100%] + ========================= short test summary info ========================== + SKIP [1] $REGENDOC_TMPDIR/conftest.py:11: could not import 'opt2' - ======= 1 passed, 1 skipped in 0.12 seconds ======== + =================== 1 passed, 1 skipped in 0.12 seconds ==================== You'll see that we don't have a ``opt2`` module and thus the second test run of our ``test_func1`` was skipped. A few notes: @@ -485,4 +483,54 @@ of our ``test_func1`` was skipped. A few notes: values as well. +Set marks or test ID for individual parametrized test +-------------------------------------------------------------------- +Use ``pytest.param`` to apply marks or set test ID to individual parametrized test. +For example:: + + # content of test_pytest_param_example.py + import pytest + @pytest.mark.parametrize('test_input,expected', [ + ('3+5', 8), + pytest.param('1+7', 8, + marks=pytest.mark.basic), + pytest.param('2+4', 6, + marks=pytest.mark.basic, + id='basic_2+4'), + pytest.param('6*9', 42, + marks=[pytest.mark.basic, pytest.mark.xfail], + id='basic_6*9'), + ]) + def test_eval(test_input, expected): + assert eval(test_input) == expected + +In this example, we have 4 parametrized tests. Except for the first test, +we mark the rest three parametrized tests with the custom marker ``basic``, +and for the fourth test we also use the built-in mark ``xfail`` to indicate this +test is expected to fail. For explicitness, we set test ids for some tests. + +Then run ``pytest`` with verbose mode and with only the ``basic`` marker:: + + pytest -v -m basic + ============================================ test session starts ============================================= + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: + collected 4 items + + test_pytest_param_example.py::test_eval[1+7-8] PASSED + test_pytest_param_example.py::test_eval[basic_2+4] PASSED + test_pytest_param_example.py::test_eval[basic_6*9] xfail + ========================================== short test summary info =========================================== + XFAIL test_pytest_param_example.py::test_eval[basic_6*9] + + ============================================= 1 tests deselected ============================================= + +As the result: + +- Four tests were collected +- One test was deselected because it doesn't have the ``basic`` mark. +- Three tests with the ``basic`` mark was selected. +- The test ``test_eval[1+7-8]`` passed, but the name is autogenerated and confusing. +- The test ``test_eval[basic_2+4]`` passed. +- The test ``test_eval[basic_6*9]`` was expected to fail and did fail. diff --git a/doc/en/example/pythoncollection.rst b/doc/en/example/pythoncollection.rst index 8d36c2e37..fc8dbf1b5 100644 --- a/doc/en/example/pythoncollection.rst +++ b/doc/en/example/pythoncollection.rst @@ -39,6 +39,14 @@ you will see that ``pytest`` only collects test-modules, which do not match the ======= 5 passed in 0.02 seconds ======= +Deselect tests during test collection +------------------------------------- + +Tests can individually be deselected during collection by passing the ``--deselect=item`` option. +For example, say ``tests/foobar/test_foobar_01.py`` contains ``test_a`` and ``test_b``. +You can run all of the tests within ``tests/`` *except* for ``tests/foobar/test_foobar_01.py::test_a`` +by invoking ``pytest`` with ``--deselect tests/foobar/test_foobar_01.py::test_a``. +``pytest`` allows multiple ``--deselect`` options. Keeping duplicate paths specified from command line ---------------------------------------------------- @@ -116,7 +124,7 @@ that match ``*_check``. For example, if we have:: then the test collection looks like this:: $ pytest --collect-only - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 2 items @@ -126,7 +134,7 @@ then the test collection looks like this:: - ======= no tests ran in 0.12 seconds ======== + ======================= no tests ran in 0.12 seconds ======================= .. note:: @@ -162,7 +170,7 @@ Finding out what is collected You can always peek at the collection tree without running tests like this:: . $ pytest --collect-only pythoncollection.py - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 3 items @@ -173,23 +181,25 @@ You can always peek at the collection tree without running tests like this:: - ======= no tests ran in 0.12 seconds ======== + ======================= no tests ran in 0.12 seconds ======================= -customizing test collection to find all .py files ---------------------------------------------------------- +.. _customizing-test-collection: + +Customizing test collection +--------------------------- .. regendoc:wipe -You can easily instruct ``pytest`` to discover tests from every python file:: - +You can easily instruct ``pytest`` to discover tests from every Python file:: # content of pytest.ini [pytest] python_files = *.py -However, many projects will have a ``setup.py`` which they don't want to be imported. Moreover, there may files only importable by a specific python version. -For such cases you can dynamically define files to be ignored by listing -them in a ``conftest.py`` file:: +However, many projects will have a ``setup.py`` which they don't want to be +imported. Moreover, there may files only importable by a specific python +version. For such cases you can dynamically define files to be ignored by +listing them in a ``conftest.py`` file:: # content of conftest.py import sys @@ -198,7 +208,7 @@ them in a ``conftest.py`` file:: if sys.version_info[0] > 2: collect_ignore.append("pkg/module_py2.py") -And then if you have a module file like this:: +and then if you have a module file like this:: # content of pkg/module_py2.py def test_only_on_python2(): @@ -207,13 +217,13 @@ And then if you have a module file like this:: except Exception, e: pass -and a setup.py dummy file like this:: +and a ``setup.py`` dummy file like this:: # content of setup.py 0/0 # will raise exception if imported -then a pytest run on Python2 will find the one test and will leave out the -setup.py file:: +If you run with a Python 2 interpreter then you will find the one test and will +leave out the ``setup.py`` file:: #$ pytest --collect-only ====== test session starts ====== @@ -225,13 +235,13 @@ setup.py file:: ====== no tests ran in 0.04 seconds ====== -If you run with a Python3 interpreter both the one test and the setup.py file -will be left out:: +If you run with a Python 3 interpreter both the one test and the ``setup.py`` +file will be left out:: $ pytest --collect-only - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 0 items - ======= no tests ran in 0.12 seconds ======== + ======================= no tests ran in 0.12 seconds ======================= diff --git a/doc/en/example/reportingdemo.rst b/doc/en/example/reportingdemo.rst index 47c18851d..256fe9a16 100644 --- a/doc/en/example/reportingdemo.rst +++ b/doc/en/example/reportingdemo.rst @@ -10,15 +10,15 @@ not showing the nice colors here in the HTML that you get on the terminal - we are working on that):: assertion $ pytest failure_demo.py - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/assertion, inifile: collected 42 items - failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF [100%] - ======= FAILURES ======== - _______ test_generative[0] ________ + ================================= FAILURES ================================= + ____________________________ test_generative[0] ____________________________ param1 = 3, param2 = 6 @@ -27,7 +27,7 @@ get on the terminal - we are working on that):: E assert (3 * 2) < 6 failure_demo.py:16: AssertionError - _______ TestFailing.test_simple ________ + _________________________ TestFailing.test_simple __________________________ self = @@ -43,7 +43,7 @@ get on the terminal - we are working on that):: E + and 43 = .g at 0xdeadbeef>() failure_demo.py:29: AssertionError - _______ TestFailing.test_simple_multiline ________ + ____________________ TestFailing.test_simple_multiline _____________________ self = @@ -63,7 +63,7 @@ get on the terminal - we are working on that):: E assert 42 == 54 failure_demo.py:12: AssertionError - _______ TestFailing.test_not ________ + ___________________________ TestFailing.test_not ___________________________ self = @@ -75,7 +75,7 @@ get on the terminal - we are working on that):: E + where 42 = .f at 0xdeadbeef>() failure_demo.py:39: AssertionError - _______ TestSpecialisedExplanations.test_eq_text ________ + _________________ TestSpecialisedExplanations.test_eq_text _________________ self = @@ -86,7 +86,7 @@ get on the terminal - we are working on that):: E + eggs failure_demo.py:43: AssertionError - _______ TestSpecialisedExplanations.test_eq_similar_text ________ + _____________ TestSpecialisedExplanations.test_eq_similar_text _____________ self = @@ -99,7 +99,7 @@ get on the terminal - we are working on that):: E ? ^ failure_demo.py:46: AssertionError - _______ TestSpecialisedExplanations.test_eq_multiline_text ________ + ____________ TestSpecialisedExplanations.test_eq_multiline_text ____________ self = @@ -112,7 +112,7 @@ get on the terminal - we are working on that):: E bar failure_demo.py:49: AssertionError - _______ TestSpecialisedExplanations.test_eq_long_text ________ + ______________ TestSpecialisedExplanations.test_eq_long_text _______________ self = @@ -129,7 +129,7 @@ get on the terminal - we are working on that):: E ? ^ failure_demo.py:54: AssertionError - _______ TestSpecialisedExplanations.test_eq_long_text_multiline ________ + _________ TestSpecialisedExplanations.test_eq_long_text_multiline __________ self = @@ -149,7 +149,7 @@ get on the terminal - we are working on that):: E ...Full output truncated (7 lines hidden), use '-vv' to show failure_demo.py:59: AssertionError - _______ TestSpecialisedExplanations.test_eq_list ________ + _________________ TestSpecialisedExplanations.test_eq_list _________________ self = @@ -160,7 +160,7 @@ get on the terminal - we are working on that):: E Use -v to get the full diff failure_demo.py:62: AssertionError - _______ TestSpecialisedExplanations.test_eq_list_long ________ + ______________ TestSpecialisedExplanations.test_eq_list_long _______________ self = @@ -173,7 +173,7 @@ get on the terminal - we are working on that):: E Use -v to get the full diff failure_demo.py:67: AssertionError - _______ TestSpecialisedExplanations.test_eq_dict ________ + _________________ TestSpecialisedExplanations.test_eq_dict _________________ self = @@ -191,7 +191,7 @@ get on the terminal - we are working on that):: E ...Full output truncated (2 lines hidden), use '-vv' to show failure_demo.py:70: AssertionError - _______ TestSpecialisedExplanations.test_eq_set ________ + _________________ TestSpecialisedExplanations.test_eq_set __________________ self = @@ -209,7 +209,7 @@ get on the terminal - we are working on that):: E ...Full output truncated (2 lines hidden), use '-vv' to show failure_demo.py:73: AssertionError - _______ TestSpecialisedExplanations.test_eq_longer_list ________ + _____________ TestSpecialisedExplanations.test_eq_longer_list ______________ self = @@ -220,7 +220,7 @@ get on the terminal - we are working on that):: E Use -v to get the full diff failure_demo.py:76: AssertionError - _______ TestSpecialisedExplanations.test_in_list ________ + _________________ TestSpecialisedExplanations.test_in_list _________________ self = @@ -229,7 +229,7 @@ get on the terminal - we are working on that):: E assert 1 in [0, 2, 3, 4, 5] failure_demo.py:79: AssertionError - _______ TestSpecialisedExplanations.test_not_in_text_multiline ________ + __________ TestSpecialisedExplanations.test_not_in_text_multiline __________ self = @@ -248,7 +248,7 @@ get on the terminal - we are working on that):: E ...Full output truncated (2 lines hidden), use '-vv' to show failure_demo.py:83: AssertionError - _______ TestSpecialisedExplanations.test_not_in_text_single ________ + ___________ TestSpecialisedExplanations.test_not_in_text_single ____________ self = @@ -261,7 +261,7 @@ get on the terminal - we are working on that):: E ? +++ failure_demo.py:87: AssertionError - _______ TestSpecialisedExplanations.test_not_in_text_single_long ________ + _________ TestSpecialisedExplanations.test_not_in_text_single_long _________ self = @@ -287,7 +287,7 @@ get on the terminal - we are working on that):: E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ failure_demo.py:95: AssertionError - _______ test_attribute ________ + ______________________________ test_attribute ______________________________ def test_attribute(): class Foo(object): @@ -298,7 +298,7 @@ get on the terminal - we are working on that):: E + where 1 = .Foo object at 0xdeadbeef>.b failure_demo.py:102: AssertionError - _______ test_attribute_instance ________ + _________________________ test_attribute_instance __________________________ def test_attribute_instance(): class Foo(object): @@ -309,7 +309,7 @@ get on the terminal - we are working on that):: E + where .Foo object at 0xdeadbeef> = .Foo'>() failure_demo.py:108: AssertionError - _______ test_attribute_failure ________ + __________________________ test_attribute_failure __________________________ def test_attribute_failure(): class Foo(object): @@ -329,7 +329,7 @@ get on the terminal - we are working on that):: E Exception: Failed to get attrib failure_demo.py:114: Exception - _______ test_attribute_multiple ________ + _________________________ test_attribute_multiple __________________________ def test_attribute_multiple(): class Foo(object): @@ -344,7 +344,7 @@ get on the terminal - we are working on that):: E + where .Bar object at 0xdeadbeef> = .Bar'>() failure_demo.py:125: AssertionError - _______ TestRaises.test_raises ________ + __________________________ TestRaises.test_raises __________________________ self = @@ -358,8 +358,8 @@ get on the terminal - we are working on that):: > int(s) E ValueError: invalid literal for int() with base 10: 'qwe' - <0-codegen $PYTHON_PREFIX/lib/python3.5/site-packages/_pytest/python.py:1219>:1: ValueError - _______ TestRaises.test_raises_doesnt ________ + <0-codegen $PYTHON_PREFIX/lib/python3.5/site-packages/_pytest/python_api.py:613>:1: ValueError + ______________________ TestRaises.test_raises_doesnt _______________________ self = @@ -368,7 +368,7 @@ get on the terminal - we are working on that):: E Failed: DID NOT RAISE failure_demo.py:137: Failed - _______ TestRaises.test_raise ________ + __________________________ TestRaises.test_raise ___________________________ self = @@ -377,7 +377,7 @@ get on the terminal - we are working on that):: E ValueError: demo error failure_demo.py:140: ValueError - _______ TestRaises.test_tupleerror ________ + ________________________ TestRaises.test_tupleerror ________________________ self = @@ -399,7 +399,7 @@ get on the terminal - we are working on that):: failure_demo.py:148: TypeError --------------------------- Captured stdout call --------------------------- l is [1, 2, 3] - _______ TestRaises.test_some_error ________ + ________________________ TestRaises.test_some_error ________________________ self = @@ -408,26 +408,28 @@ get on the terminal - we are working on that):: E NameError: name 'namenotexi' is not defined failure_demo.py:151: NameError - _______ test_dynamic_compile_shows_nicely ________ + ____________________ test_dynamic_compile_shows_nicely _____________________ def test_dynamic_compile_shows_nicely(): + import imp + import sys src = 'def foo():\n assert 1 == 0\n' name = 'abc-123' - module = py.std.imp.new_module(name) + module = imp.new_module(name) code = _pytest._code.compile(src, name, 'exec') py.builtin.exec_(code, module.__dict__) - py.std.sys.modules[name] = module + sys.modules[name] = module > module.foo() - failure_demo.py:166: + failure_demo.py:168: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def foo(): > assert 1 == 0 E AssertionError - <2-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:163>:2: AssertionError - _______ TestMoreErrors.test_complex_error ________ + <2-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:165>:2: AssertionError + ____________________ TestMoreErrors.test_complex_error _____________________ self = @@ -438,7 +440,7 @@ get on the terminal - we are working on that):: return 43 > somefunc(f(), g()) - failure_demo.py:176: + failure_demo.py:178: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ failure_demo.py:9: in somefunc otherfunc(x,y) @@ -451,7 +453,7 @@ get on the terminal - we are working on that):: E assert 44 == 43 failure_demo.py:6: AssertionError - _______ TestMoreErrors.test_z1_unpack_error ________ + ___________________ TestMoreErrors.test_z1_unpack_error ____________________ self = @@ -460,8 +462,8 @@ get on the terminal - we are working on that):: > a,b = l E ValueError: not enough values to unpack (expected 2, got 0) - failure_demo.py:180: ValueError - _______ TestMoreErrors.test_z2_type_error ________ + failure_demo.py:182: ValueError + ____________________ TestMoreErrors.test_z2_type_error _____________________ self = @@ -470,8 +472,8 @@ get on the terminal - we are working on that):: > a,b = l E TypeError: 'int' object is not iterable - failure_demo.py:184: TypeError - _______ TestMoreErrors.test_startswith ________ + failure_demo.py:186: TypeError + ______________________ TestMoreErrors.test_startswith ______________________ self = @@ -483,8 +485,8 @@ get on the terminal - we are working on that):: E + where False = ('456') E + where = '123'.startswith - failure_demo.py:189: AssertionError - _______ TestMoreErrors.test_startswith_nested ________ + failure_demo.py:191: AssertionError + __________________ TestMoreErrors.test_startswith_nested ___________________ self = @@ -500,8 +502,8 @@ get on the terminal - we are working on that):: E + where '123' = .f at 0xdeadbeef>() E + and '456' = .g at 0xdeadbeef>() - failure_demo.py:196: AssertionError - _______ TestMoreErrors.test_global_func ________ + failure_demo.py:198: AssertionError + _____________________ TestMoreErrors.test_global_func ______________________ self = @@ -511,8 +513,8 @@ get on the terminal - we are working on that):: E + where False = isinstance(43, float) E + where 43 = globf(42) - failure_demo.py:199: AssertionError - _______ TestMoreErrors.test_instance ________ + failure_demo.py:201: AssertionError + _______________________ TestMoreErrors.test_instance _______________________ self = @@ -522,8 +524,8 @@ get on the terminal - we are working on that):: E assert 42 != 42 E + where 42 = .x - failure_demo.py:203: AssertionError - _______ TestMoreErrors.test_compare ________ + failure_demo.py:205: AssertionError + _______________________ TestMoreErrors.test_compare ________________________ self = @@ -532,8 +534,8 @@ get on the terminal - we are working on that):: E assert 11 < 5 E + where 11 = globf(10) - failure_demo.py:206: AssertionError - _______ TestMoreErrors.test_try_finally ________ + failure_demo.py:208: AssertionError + _____________________ TestMoreErrors.test_try_finally ______________________ self = @@ -543,8 +545,8 @@ get on the terminal - we are working on that):: > assert x == 0 E assert 1 == 0 - failure_demo.py:211: AssertionError - _______ TestCustomAssertMsg.test_single_line ________ + failure_demo.py:213: AssertionError + ___________________ TestCustomAssertMsg.test_single_line ___________________ self = @@ -557,8 +559,8 @@ get on the terminal - we are working on that):: E assert 1 == 2 E + where 1 = .A'>.a - failure_demo.py:222: AssertionError - _______ TestCustomAssertMsg.test_multiline ________ + failure_demo.py:224: AssertionError + ____________________ TestCustomAssertMsg.test_multiline ____________________ self = @@ -574,8 +576,8 @@ get on the terminal - we are working on that):: E assert 1 == 2 E + where 1 = .A'>.a - failure_demo.py:228: AssertionError - _______ TestCustomAssertMsg.test_custom_repr ________ + failure_demo.py:230: AssertionError + ___________________ TestCustomAssertMsg.test_custom_repr ___________________ self = @@ -594,5 +596,11 @@ get on the terminal - we are working on that):: E assert 1 == 2 E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a - failure_demo.py:238: AssertionError - ======= 42 failed in 0.12 seconds ======== + failure_demo.py:240: AssertionError + ============================= warnings summary ============================= + None + Metafunc.addcall is deprecated and scheduled to be removed in pytest 4.0. + Please use Metafunc.parametrize instead. + + -- Docs: http://doc.pytest.org/en/latest/warnings.html + ================== 42 failed, 1 warnings in 0.12 seconds =================== diff --git a/doc/en/example/simple.rst b/doc/en/example/simple.rst index da831244b..3dc942018 100644 --- a/doc/en/example/simple.rst +++ b/doc/en/example/simple.rst @@ -3,6 +3,8 @@ Basic patterns and examples ========================================================== +.. _request example: + Pass different values to a test function, depending on command line options ---------------------------------------------------------------------------- @@ -41,9 +43,9 @@ provide the ``cmdopt`` through a :ref:`fixture function `: Let's run this without supplying our new option:: $ pytest -q test_sample.py - F - ======= FAILURES ======== - _______ test_answer ________ + F [100%] + ================================= FAILURES ================================= + _______________________________ test_answer ________________________________ cmdopt = 'type1' @@ -63,9 +65,9 @@ Let's run this without supplying our new option:: And now with supplying a command line option:: $ pytest -q --cmdopt=type2 - F - ======= FAILURES ======== - _______ test_answer ________ + F [100%] + ================================= FAILURES ================================= + _______________________________ test_answer ________________________________ cmdopt = 'type2' @@ -112,12 +114,12 @@ of subprocesses close to your CPU. Running in an empty directory with the above conftest.py:: $ pytest - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - ======= no tests ran in 0.12 seconds ======== + ======================= no tests ran in 0.12 seconds ======================= .. _`excontrolskip`: @@ -127,7 +129,7 @@ Control skipping of tests according to command line option .. regendoc:wipe Here is a ``conftest.py`` file adding a ``--runslow`` command -line option to control skipping of ``slow`` marked tests: +line option to control skipping of ``pytest.mark.slow`` marked tests: .. code-block:: python @@ -136,7 +138,16 @@ line option to control skipping of ``slow`` marked tests: import pytest def pytest_addoption(parser): parser.addoption("--runslow", action="store_true", - help="run slow tests") + default=False, help="run slow tests") + + def pytest_collection_modifyitems(config, items): + if config.getoption("--runslow"): + # --runslow given in cli: do not skip slow tests + return + skip_slow = pytest.mark.skip(reason="need --runslow option to run") + for item in items: + if "slow" in item.keywords: + item.add_marker(skip_slow) We can now write a test module like this: @@ -146,45 +157,39 @@ We can now write a test module like this: import pytest - slow = pytest.mark.skipif( - not pytest.config.getoption("--runslow"), - reason="need --runslow option to run" - ) - - def test_func_fast(): pass - @slow + @pytest.mark.slow def test_func_slow(): pass and when running it will see a skipped "slow" test:: $ pytest -rs # "-rs" means report details on the little 's' - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - test_module.py .s - ======= short test summary info ======== - SKIP [1] test_module.py:13: need --runslow option to run + test_module.py .s [100%] + ========================= short test summary info ========================== + SKIP [1] test_module.py:8: need --runslow option to run - ======= 1 passed, 1 skipped in 0.12 seconds ======== + =================== 1 passed, 1 skipped in 0.12 seconds ==================== Or run it including the ``slow`` marked test:: $ pytest --runslow - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - test_module.py .. + test_module.py .. [100%] - ======= 2 passed in 0.12 seconds ======== + ========================= 2 passed in 0.12 seconds ========================= Writing well integrated assertion helpers -------------------------------------------------- @@ -215,9 +220,9 @@ unless the ``--full-trace`` command line option is specified. Let's run our little function:: $ pytest -q test_checkconfig.py - F - ======= FAILURES ======== - _______ test_something ________ + F [100%] + ================================= FAILURES ================================= + ______________________________ test_something ______________________________ def test_something(): > checkconfig(42) @@ -302,13 +307,13 @@ It's easy to present extra information in a ``pytest`` run: which will add the string to the test header accordingly:: $ pytest - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y project deps: mylib-1.1 rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - ======= no tests ran in 0.12 seconds ======== + ======================= no tests ran in 0.12 seconds ======================= .. regendoc:wipe @@ -327,25 +332,25 @@ display more information if applicable: which will add info only when run with "--v":: $ pytest -v - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 - cachedir: .cache + cachedir: .pytest_cache info1: did you know that ... did you? rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 0 items - ======= no tests ran in 0.12 seconds ======== + ======================= no tests ran in 0.12 seconds ======================= and nothing when run plainly:: $ pytest - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - ======= no tests ran in 0.12 seconds ======== + ======================= no tests ran in 0.12 seconds ======================= profiling test duration -------------------------- @@ -363,29 +368,29 @@ out which tests are the slowest. Let's make an artificial test suite: import time def test_funcfast(): - pass - - def test_funcslow1(): time.sleep(0.1) - def test_funcslow2(): + def test_funcslow1(): time.sleep(0.2) + def test_funcslow2(): + time.sleep(0.3) + Now we can profile which test functions execute the slowest:: $ pytest --durations=3 - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - test_some_are_slow.py ... + test_some_are_slow.py ... [100%] - ======= slowest 3 test durations ======== - 0.20s call test_some_are_slow.py::test_funcslow2 - 0.10s call test_some_are_slow.py::test_funcslow1 - 0.00s setup test_some_are_slow.py::test_funcfast - ======= 3 passed in 0.12 seconds ======== + ========================= slowest 3 test durations ========================= + 0.30s call test_some_are_slow.py::test_funcslow2 + 0.20s call test_some_are_slow.py::test_funcslow1 + 0.10s call test_some_are_slow.py::test_funcfast + ========================= 3 passed in 0.12 seconds ========================= incremental testing - test steps --------------------------------------------------- @@ -440,18 +445,15 @@ tests in a class. Here is a test module example: If we run this:: $ pytest -rx - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - test_step.py .Fx. - ======= short test summary info ======== - XFAIL test_step.py::TestUserHandling::()::test_deletion - reason: previous test failed (test_modification) + test_step.py .Fx. [100%] - ======= FAILURES ======== - _______ TestUserHandling.test_modification ________ + ================================= FAILURES ================================= + ____________________ TestUserHandling.test_modification ____________________ self = @@ -460,7 +462,10 @@ If we run this:: E assert 0 test_step.py:9: AssertionError - ======= 1 failed, 2 passed, 1 xfailed in 0.12 seconds ======== + ========================= short test summary info ========================== + XFAIL test_step.py::TestUserHandling::()::test_deletion + reason: previous test failed (test_modification) + ============== 1 failed, 2 passed, 1 xfailed in 0.12 seconds =============== We'll see that ``test_deletion`` was not executed because ``test_modification`` failed. It is reported as an "expected failure". @@ -519,27 +524,27 @@ the ``db`` fixture: We can run this:: $ pytest - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 7 items - test_step.py .Fx. - a/test_db.py F - a/test_db2.py F - b/test_error.py E + test_step.py .Fx. [ 57%] + a/test_db.py F [ 71%] + a/test_db2.py F [ 85%] + b/test_error.py E [100%] - ======= ERRORS ======== - _______ ERROR at setup of test_root ________ + ================================== ERRORS ================================== + _______________________ ERROR at setup of test_root ________________________ file $REGENDOC_TMPDIR/b/test_error.py, line 1 def test_root(db): # no db here, will error out E fixture 'db' not found - > available fixtures: cache, capfd, capsys, doctest_namespace, monkeypatch, pytestconfig, record_xml_property, recwarn, tmpdir, tmpdir_factory + > available fixtures: cache, capfd, capfdbinary, caplog, capsys, capsysbinary, doctest_namespace, monkeypatch, pytestconfig, record_property, record_xml_attribute, record_xml_property, recwarn, tmpdir, tmpdir_factory > use 'pytest --fixtures [testpath]' for help on them. $REGENDOC_TMPDIR/b/test_error.py:1 - ======= FAILURES ======== - _______ TestUserHandling.test_modification ________ + ================================= FAILURES ================================= + ____________________ TestUserHandling.test_modification ____________________ self = @@ -548,7 +553,7 @@ We can run this:: E assert 0 test_step.py:9: AssertionError - _______ test_a1 ________ + _________________________________ test_a1 __________________________________ db = @@ -558,7 +563,7 @@ We can run this:: E assert 0 a/test_db.py:2: AssertionError - _______ test_a2 ________ + _________________________________ test_a2 __________________________________ db = @@ -568,7 +573,7 @@ We can run this:: E assert 0 a/test_db2.py:2: AssertionError - ======= 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ======== + ========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ========== The two test modules in the ``a`` directory see the same ``db`` fixture instance while the one test in the sister-directory ``b`` doesn't see it. We could of course @@ -627,15 +632,15 @@ if you then have failing tests: and run them:: $ pytest test_module.py - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - test_module.py FF + test_module.py FF [100%] - ======= FAILURES ======== - _______ test_fail1 ________ + ================================= FAILURES ================================= + ________________________________ test_fail1 ________________________________ tmpdir = local('PYTEST_TMPDIR/test_fail10') @@ -644,14 +649,14 @@ and run them:: E assert 0 test_module.py:2: AssertionError - _______ test_fail2 ________ + ________________________________ test_fail2 ________________________________ def test_fail2(): > assert 0 E assert 0 test_module.py:4: AssertionError - ======= 2 failed in 0.12 seconds ======== + ========================= 2 failed in 0.12 seconds ========================= you will have a "failures" file which contains the failing test ids:: @@ -721,7 +726,7 @@ if you then have failing tests: and run it:: $ pytest -s test_module.py - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items @@ -730,8 +735,8 @@ and run it:: Fexecuting test failed test_module.py::test_call_fails F - ======= ERRORS ======== - _______ ERROR at setup of test_setup_fails ________ + ================================== ERRORS ================================== + ____________________ ERROR at setup of test_setup_fails ____________________ @pytest.fixture def other(): @@ -739,8 +744,8 @@ and run it:: E assert 0 test_module.py:6: AssertionError - ======= FAILURES ======== - _______ test_call_fails ________ + ================================= FAILURES ================================= + _____________________________ test_call_fails ______________________________ something = None @@ -749,18 +754,61 @@ and run it:: E assert 0 test_module.py:12: AssertionError - _______ test_fail2 ________ + ________________________________ test_fail2 ________________________________ def test_fail2(): > assert 0 E assert 0 test_module.py:15: AssertionError - ======= 2 failed, 1 error in 0.12 seconds ======== + ==================== 2 failed, 1 error in 0.12 seconds ===================== You'll see that the fixture finalizers could use the precise reporting information. +.. _pytest current test env: + +``PYTEST_CURRENT_TEST`` environment variable +-------------------------------------------- + +.. versionadded:: 3.2 + +Sometimes a test session might get stuck and there might be no easy way to figure out +which test got stuck, for example if pytest was run in quiet mode (``-q``) or you don't have access to the console +output. This is particularly a problem if the problem helps only sporadically, the famous "flaky" kind of tests. + +``pytest`` sets a ``PYTEST_CURRENT_TEST`` environment variable when running tests, which can be inspected +by process monitoring utilities or libraries like `psutil `_ to discover which +test got stuck if necessary: + +.. code-block:: python + + import psutil + + for pid in psutil.pids(): + environ = psutil.Process(pid).environ() + if 'PYTEST_CURRENT_TEST' in environ: + print(f'pytest process {pid} running: {environ["PYTEST_CURRENT_TEST"]}') + +During the test session pytest will set ``PYTEST_CURRENT_TEST`` to the current test +:ref:`nodeid ` and the current stage, which can be ``setup``, ``call`` +and ``teardown``. + +For example, when running a single test function named ``test_foo`` from ``foo_module.py``, +``PYTEST_CURRENT_TEST`` will be set to: + +#. ``foo_module.py::test_foo (setup)`` +#. ``foo_module.py::test_foo (call)`` +#. ``foo_module.py::test_foo (teardown)`` + +In that order. + +.. note:: + + The contents of ``PYTEST_CURRENT_TEST`` is meant to be human readable and the actual format + can be changed between releases (even bug fixes) so it shouldn't be relied on for scripting + or automation. + Freezing pytest --------------- @@ -782,15 +830,20 @@ Instead of freezing the pytest runner as a separate executable, you can make your frozen program work as the pytest runner by some clever argument handling during program startup. This allows you to have a single executable, which is usually more convenient. +Please note that the mechanism for plugin discovery used by pytest +(setupttools entry points) doesn't work with frozen executables so pytest +can't find any third party plugins automatically. To include third party plugins +like ``pytest-timeout`` they must be imported explicitly and passed on to pytest.main. .. code-block:: python # contents of app_main.py import sys + import pytest_timeout # Third party plugin if len(sys.argv) > 1 and sys.argv[1] == '--pytest': import pytest - sys.exit(pytest.main(sys.argv[2:])) + sys.exit(pytest.main(sys.argv[2:], plugins=[pytest_timeout])) else: # normal application execution: at this point argv can be parsed # by your argument-parsing library of choice as usual @@ -801,3 +854,4 @@ This allows you to execute tests using the frozen application with standard ``pytest`` command-line options:: ./app_main --pytest --verbose --tb=long --junitxml=results.xml test-suite/ + diff --git a/doc/en/fixture.rst b/doc/en/fixture.rst index c5bf67053..6f3debd80 100644 --- a/doc/en/fixture.rst +++ b/doc/en/fixture.rst @@ -27,7 +27,7 @@ functions: * fixture management scales from simple unit to complex functional testing, allowing to parametrize fixtures and tests according to configuration and component options, or to re-use fixtures - across class, module or whole test session scopes. + across function, class, module or whole test session scopes. In addition, pytest continues to support :ref:`xunitsetup`. You can mix both styles, moving incrementally from classic to new style, as you @@ -57,7 +57,7 @@ using it:: @pytest.fixture def smtp(): import smtplib - return smtplib.SMTP("smtp.gmail.com") + return smtplib.SMTP("smtp.gmail.com", 587, timeout=5) def test_ehlo(smtp): response, msg = smtp.ehlo() @@ -69,26 +69,26 @@ will discover and call the :py:func:`@pytest.fixture <_pytest.python.fixture>` marked ``smtp`` fixture function. Running the test looks like this:: $ pytest test_smtpsimple.py - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: - collected 1 items - - test_smtpsimple.py F - - ======= FAILURES ======== - _______ test_ehlo ________ - + collected 1 item + + test_smtpsimple.py F [100%] + + ================================= FAILURES ================================= + ________________________________ test_ehlo _________________________________ + smtp = - + def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 > assert 0 # for demo purposes E assert 0 - + test_smtpsimple.py:11: AssertionError - ======= 1 failed in 0.12 seconds ======== + ========================= 1 failed in 0.12 seconds ========================= In the failure traceback we see that the test function was called with a ``smtp`` argument, the ``smtplib.SMTP()`` instance created by the fixture @@ -109,7 +109,7 @@ Note that if you misspell a function argument or want to use one that isn't available, you'll see an error with a list of available function arguments. -.. Note:: +.. note:: You can always issue:: @@ -117,12 +117,6 @@ with a list of available function arguments. to see available fixtures. - In versions prior to 2.3 there was no ``@pytest.fixture`` marker - and you had to use a magic ``pytest_funcarg__NAME`` prefix - for the fixture factory. This remains and will remain supported - but is not anymore advertised as the primary means of declaring fixture - functions. - Fixtures: a prime example of dependency injection --------------------------------------------------- @@ -133,10 +127,39 @@ It's a prime example of `dependency injection`_ where fixture functions take the role of the *injector* and test functions are the *consumers* of fixture objects. +.. _`conftest.py`: +.. _`conftest`: + +``conftest.py``: sharing fixture functions +------------------------------------------ + +If during implementing your tests you realize that you +want to use a fixture function from multiple test files you can move it +to a ``conftest.py`` file. +You don't need to import the fixture you want to use in a test, it +automatically gets discovered by pytest. The discovery of +fixture functions starts at test classes, then test modules, then +``conftest.py`` files and finally builtin and third party plugins. + +You can also use the ``conftest.py`` file to implement +:ref:`local per-directory plugins `. + +Sharing test data +----------------- + +If you want to make test data from files available to your tests, a good way +to do this is by loading these data in a fixture for use by your tests. +This makes use of the automatic caching mechanisms of pytest. + +Another good approach is by adding the data files in the ``tests`` folder. +There are also community plugins available to help managing this aspect of +testing, e.g. `pytest-datadir `__ +and `pytest-datafiles `__. + .. _smtpshared: -Sharing a fixture across tests in a module (or class/session) ------------------------------------------------------------------ +Scope: sharing a fixture instance across tests in a class, module or session +---------------------------------------------------------------------------- .. regendoc:wipe @@ -145,10 +168,12 @@ usually time-expensive to create. Extending the previous example, we can add a ``scope='module'`` parameter to the :py:func:`@pytest.fixture <_pytest.python.fixture>` invocation to cause the decorated ``smtp`` fixture function to only be invoked once -per test module. Multiple test functions in a test module will thus -each receive the same ``smtp`` fixture instance. The next example puts -the fixture function into a separate ``conftest.py`` file so -that tests from multiple test modules in the directory can +per test *module* (the default is to invoke once per test *function*). +Multiple test functions in a test module will thus +each receive the same ``smtp`` fixture instance, thus saving time. + +The next example puts the fixture function into a separate ``conftest.py`` file +so that tests from multiple test modules in the directory can access the fixture function:: # content of conftest.py @@ -157,7 +182,7 @@ access the fixture function:: @pytest.fixture(scope="module") def smtp(): - return smtplib.SMTP("smtp.gmail.com") + return smtplib.SMTP("smtp.gmail.com", 587, timeout=5) The name of the fixture again is ``smtp`` and you can access its result by listing the name ``smtp`` as an input parameter in any test or fixture @@ -180,38 +205,38 @@ We deliberately insert failing ``assert 0`` statements in order to inspect what is going on and can now run the tests:: $ pytest test_module.py - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - - test_module.py FF - - ======= FAILURES ======== - _______ test_ehlo ________ - + + test_module.py FF [100%] + + ================================= FAILURES ================================= + ________________________________ test_ehlo _________________________________ + smtp = - + def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 assert b"smtp.gmail.com" in msg > assert 0 # for demo purposes E assert 0 - + test_module.py:6: AssertionError - _______ test_noop ________ - + ________________________________ test_noop _________________________________ + smtp = - + def test_noop(smtp): response, msg = smtp.noop() assert response == 250 > assert 0 # for demo purposes E assert 0 - + test_module.py:11: AssertionError - ======= 2 failed in 0.12 seconds ======== + ========================= 2 failed in 0.12 seconds ========================= You see the two ``assert 0`` failing and more importantly you can also see that the same (module-scoped) ``smtp`` object was passed into the two @@ -229,6 +254,52 @@ instance, you can simply declare it: # the returned fixture value will be shared for # all tests needing it +Finally, the ``class`` scope will invoke the fixture once per test *class*. + + +Higher-scoped fixtures are instantiated first +--------------------------------------------- + +.. versionadded:: 3.5 + +Within a function request for features, fixture of higher-scopes (such as ``session``) are instantiated first than +lower-scoped fixtures (such as ``function`` or ``class``). The relative order of fixtures of same scope follows +the declared order in the test function and honours dependencies between fixtures. + +Consider the code below: + +.. code-block:: python + + @pytest.fixture(scope="session") + def s1(): + pass + + @pytest.fixture(scope="module") + def m1(): + pass + + @pytest.fixture + def f1(tmpdir): + pass + + @pytest.fixture + def f2(): + pass + + def test_foo(f1, m1, f2, s1): + ... + + +The fixtures requested by ``test_foo`` will be instantiated in the following order: + +1. ``s1``: is the highest-scoped fixture (``session``). +2. ``m1``: is the second highest-scoped fixture (``module``). +3. ``tmpdir``: is a ``function``-scoped fixture, required by ``f1``: it needs to be instantiated at this point + because it is a dependency of ``f1``. +4. ``f1``: is the first ``function``-scoped fixture in ``test_foo`` parameter list. +5. ``f2``: is the last ``function``-scoped fixture in ``test_foo`` parameter list. + + .. _`finalization`: Fixture finalization / executing teardown code @@ -247,7 +318,7 @@ the code after the *yield* statement serves as the teardown code: @pytest.fixture(scope="module") def smtp(): - smtp = smtplib.SMTP("smtp.gmail.com") + smtp = smtplib.SMTP("smtp.gmail.com", 587, timeout=5) yield smtp # provide the fixture value print("teardown smtp") smtp.close() @@ -260,7 +331,7 @@ Let's execute it:: $ pytest -s -q --tb=no FFteardown smtp - + 2 failed in 0.12 seconds We see that the ``smtp`` instance is finalized after the two @@ -281,7 +352,7 @@ Note that we can also seamlessly use the ``yield`` syntax with ``with`` statemen @pytest.fixture(scope="module") def smtp(): - with smtplib.SMTP("smtp.gmail.com") as smtp: + with smtplib.SMTP("smtp.gmail.com", 587, timeout=5) as smtp: yield smtp # provide the fixture value @@ -292,14 +363,6 @@ the ``with`` statement ends. Note that if an exception happens during the *setup* code (before the ``yield`` keyword), the *teardown* code (after the ``yield``) will not be called. - -.. note:: - Prior to version 2.10, in order to use a ``yield`` statement to execute teardown code one - had to mark a fixture using the ``yield_fixture`` marker. From 2.10 onward, normal - fixtures can use ``yield`` directly so the ``yield_fixture`` decorator is no longer needed - and considered deprecated. - - An alternative option for executing *teardown* code is to make use of the ``addfinalizer`` method of the `request-context`_ object to register finalization functions. @@ -314,7 +377,7 @@ Here's the ``smtp`` fixture changed to use ``addfinalizer`` for cleanup: @pytest.fixture(scope="module") def smtp(request): - smtp = smtplib.SMTP("smtp.gmail.com") + smtp = smtplib.SMTP("smtp.gmail.com", 587, timeout=5) def fin(): print ("teardown smtp") smtp.close() @@ -350,7 +413,7 @@ ends, but ``addfinalizer`` has two key differences over ``yield``: Fixtures can introspect the requesting test context ------------------------------------------------------------- -Fixture function can accept the :py:class:`request ` object +Fixture functions can accept the :py:class:`request ` object to introspect the "requesting" test function, class or module context. Further extending the previous ``smtp`` fixture example, let's read an optional server URL from the test module which uses our fixture:: @@ -362,7 +425,7 @@ read an optional server URL from the test module which uses our fixture:: @pytest.fixture(scope="module") def smtp(request): server = getattr(request.module, "smtpserver", "smtp.gmail.com") - smtp = smtplib.SMTP(server) + smtp = smtplib.SMTP(server, 587, timeout=5) yield smtp print ("finalizing %s (%s)" % (smtp, server)) smtp.close() @@ -373,7 +436,7 @@ again, nothing much has changed:: $ pytest -s -q --tb=no FFfinalizing (smtp.gmail.com) - + 2 failed in 0.12 seconds Let's quickly create another test module that actually sets the @@ -389,9 +452,9 @@ server URL in its module namespace:: Running it:: $ pytest -qq --tb=short test_anothersmtp.py - F - ======= FAILURES ======== - _______ test_showhelo ________ + F [100%] + ================================= FAILURES ================================= + ______________________________ test_showhelo _______________________________ test_anothersmtp.py:5: in test_showhelo assert 0, smtp.helo() E AssertionError: (250, b'mail.python.org') @@ -426,7 +489,7 @@ through the special :py:class:`request ` object:: @pytest.fixture(scope="module", params=["smtp.gmail.com", "mail.python.org"]) def smtp(request): - smtp = smtplib.SMTP(request.param) + smtp = smtplib.SMTP(request.param, 587, timeout=5) yield smtp print ("finalizing %s" % smtp) smtp.close() @@ -438,54 +501,54 @@ a value via ``request.param``. No test function code needs to change. So let's just do another run:: $ pytest -q test_module.py - FFFF - ======= FAILURES ======== - _______ test_ehlo[smtp.gmail.com] ________ - + FFFF [100%] + ================================= FAILURES ================================= + ________________________ test_ehlo[smtp.gmail.com] _________________________ + smtp = - + def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 assert b"smtp.gmail.com" in msg > assert 0 # for demo purposes E assert 0 - + test_module.py:6: AssertionError - _______ test_noop[smtp.gmail.com] ________ - + ________________________ test_noop[smtp.gmail.com] _________________________ + smtp = - + def test_noop(smtp): response, msg = smtp.noop() assert response == 250 > assert 0 # for demo purposes E assert 0 - + test_module.py:11: AssertionError - _______ test_ehlo[mail.python.org] ________ - + ________________________ test_ehlo[mail.python.org] ________________________ + smtp = - + def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 > assert b"smtp.gmail.com" in msg - E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nSIZE 51200000\nETRN\nSTARTTLS\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8' - + E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8' + test_module.py:5: AssertionError -------------------------- Captured stdout setup --------------------------- finalizing - _______ test_noop[mail.python.org] ________ - + ________________________ test_noop[mail.python.org] ________________________ + smtp = - + def test_noop(smtp): response, msg = smtp.noop() assert response == 250 > assert 0 # for demo purposes E assert 0 - + test_module.py:11: AssertionError ------------------------- Captured stdout teardown ------------------------- finalizing @@ -540,7 +603,7 @@ return ``None`` then pytest's auto-generated ID will be used. Running the above tests results in the following test IDs being used:: $ pytest --collect-only - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 10 items @@ -557,8 +620,42 @@ Running the above tests results in the following test IDs being used:: + + ======================= no tests ran in 0.12 seconds ======================= - ======= no tests ran in 0.12 seconds ======== +.. _`fixture-parametrize-marks`: + +Using marks with parametrized fixtures +-------------------------------------- + +:func:`pytest.param` can be used to apply marks in values sets of parametrized fixtures in the same way +that they can be used with :ref:`@pytest.mark.parametrize <@pytest.mark.parametrize>`. + +Example:: + + # content of test_fixture_marks.py + import pytest + @pytest.fixture(params=[0, 1, pytest.param(2, marks=pytest.mark.skip)]) + def data_set(request): + return request.param + + def test_data(data_set): + pass + +Running this test will *skip* the invocation of ``data_set`` with value ``2``:: + + $ pytest test_fixture_marks.py -v + =========================== test session starts ============================ + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 + cachedir: .pytest_cache + rootdir: $REGENDOC_TMPDIR, inifile: + collecting ... collected 3 items + + test_fixture_marks.py::test_data[0] PASSED [ 33%] + test_fixture_marks.py::test_data[1] PASSED [ 66%] + test_fixture_marks.py::test_data[2] SKIPPED [100%] + + =================== 2 passed, 1 skipped in 0.12 seconds ==================== .. _`interdependent fixtures`: @@ -591,16 +688,16 @@ Here we declare an ``app`` fixture which receives the previously defined ``smtp`` fixture and instantiates an ``App`` object with it. Let's run it:: $ pytest -v test_appsetup.py - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 - cachedir: .cache + cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 2 items - - test_appsetup.py::test_smtp_exists[smtp.gmail.com] PASSED - test_appsetup.py::test_smtp_exists[mail.python.org] PASSED - - ======= 2 passed in 0.12 seconds ======== + + test_appsetup.py::test_smtp_exists[smtp.gmail.com] PASSED [ 50%] + test_appsetup.py::test_smtp_exists[mail.python.org] PASSED [100%] + + ========================= 2 passed in 0.12 seconds ========================= Due to the parametrization of ``smtp`` the test will run twice with two different ``App`` instances and respective smtp servers. There is no @@ -660,46 +757,46 @@ to show the setup/teardown flow:: Let's run the tests in verbose mode and with looking at the print-output:: $ pytest -v -s test_module.py - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 - cachedir: .cache + cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 8 items - + test_module.py::test_0[1] SETUP otherarg 1 RUN test0 with otherarg 1 PASSED TEARDOWN otherarg 1 - + test_module.py::test_0[2] SETUP otherarg 2 RUN test0 with otherarg 2 PASSED TEARDOWN otherarg 2 - + test_module.py::test_1[mod1] SETUP modarg mod1 RUN test1 with modarg mod1 PASSED - test_module.py::test_2[1-mod1] SETUP otherarg 1 + test_module.py::test_2[mod1-1] SETUP otherarg 1 RUN test2 with otherarg 1 and modarg mod1 PASSED TEARDOWN otherarg 1 - - test_module.py::test_2[2-mod1] SETUP otherarg 2 + + test_module.py::test_2[mod1-2] SETUP otherarg 2 RUN test2 with otherarg 2 and modarg mod1 PASSED TEARDOWN otherarg 2 - + test_module.py::test_1[mod2] TEARDOWN modarg mod1 SETUP modarg mod2 RUN test1 with modarg mod2 PASSED - test_module.py::test_2[1-mod2] SETUP otherarg 1 + test_module.py::test_2[mod2-1] SETUP otherarg 1 RUN test2 with otherarg 1 and modarg mod2 PASSED TEARDOWN otherarg 1 - - test_module.py::test_2[2-mod2] SETUP otherarg 2 + + test_module.py::test_2[mod2-2] SETUP otherarg 2 RUN test2 with otherarg 2 and modarg mod2 PASSED TEARDOWN otherarg 2 TEARDOWN modarg mod2 - - - ======= 8 passed in 0.12 seconds ======== + + + ========================= 8 passed in 0.12 seconds ========================= You can see that the parametrized module-scoped ``modarg`` resource caused an ordering of test execution that lead to the fewest possible "active" resources. @@ -762,7 +859,7 @@ you specified a "cleandir" function argument to each of them. Let's run it to verify our fixture is activated and the tests pass:: $ pytest -q - .. + .. [100%] 2 passed in 0.12 seconds You can specify multiple fixtures like this: @@ -843,7 +940,7 @@ class-level ``usefixtures`` decorator. If we run it, we get two passing tests:: $ pytest -q - .. + .. [100%] 2 passed in 0.12 seconds Here is how autouse fixtures work in other scopes: @@ -872,7 +969,7 @@ into a conftest.py file **without** using ``autouse``:: # content of conftest.py @pytest.fixture - def transact(self, request, db): + def transact(request, db): db.begin() yield db.rollback() @@ -888,17 +985,6 @@ All test methods in this TestClass will use the transaction fixture while other test classes or functions in the module will not use it unless they also add a ``transact`` reference. - -Shifting (visibility of) fixture functions ----------------------------------------------------- - -If during implementing your tests you realize that you -want to use a fixture function from multiple test files you can move it -to a :ref:`conftest.py ` file or even separately installable -:ref:`plugins ` without changing test code. The discovery of -fixtures functions starts at test classes, then test modules, then -``conftest.py`` files and finally builtin and third party plugins. - Overriding fixtures on various levels ------------------------------------- diff --git a/doc/en/getting-started.rst b/doc/en/getting-started.rst index 59abd4c79..0965c2a61 100644 --- a/doc/en/getting-started.rst +++ b/doc/en/getting-started.rst @@ -1,39 +1,40 @@ Installation and Getting Started =================================== -**Pythons**: Python 2.6,2.7,3.3,3.4,3.5, Jython, PyPy-2.3 +**Pythons**: Python 2.7, 3.4, 3.5, 3.6, Jython, PyPy-2.3 **Platforms**: Unix/Posix and Windows **PyPI package name**: `pytest `_ -**dependencies**: `py `_, +**Dependencies**: `py `_, `colorama (Windows) `_, -`argparse (py26) `_. -**documentation as PDF**: `download latest `_ +**Documentation as PDF**: `download latest `_ + +``pytest`` is a framework that makes building simple and scalable tests easy. Tests are expressive and readable—no boilerplate code required. Get started in minutes with a small unit test or complex functional test for your application or library. .. _`getstarted`: -.. _installation: +.. _`installation`: -Installation +Install ``pytest`` ---------------------------------------- -Installation:: +1. Run the following command in your command line:: pip install -U pytest -To check your installation has installed the correct version:: +2. Check that you installed the correct version:: $ pytest --version This is pytest version 3.x.y, imported from $PYTHON_PREFIX/lib/python3.5/site-packages/pytest.py .. _`simpletest`: -Our first test run +Create your first test ---------------------------------------------------------- -Let's create a first test file with a simple test function:: +Create a simple test function with just four lines of code:: # content of test_sample.py def func(x): @@ -42,18 +43,18 @@ Let's create a first test file with a simple test function:: def test_answer(): assert func(3) == 5 -That's it. You can execute the test function now:: +That’s it. You can now execute the test function:: $ pytest - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: - collected 1 items + collected 1 item - test_sample.py F + test_sample.py F [100%] - ======= FAILURES ======== - _______ test_answer ________ + ================================= FAILURES ================================= + _______________________________ test_answer ________________________________ def test_answer(): > assert func(3) == 5 @@ -61,32 +62,24 @@ That's it. You can execute the test function now:: E + where 4 = func(3) test_sample.py:5: AssertionError - ======= 1 failed in 0.12 seconds ======== + ========================= 1 failed in 0.12 seconds ========================= -We got a failure report because our little ``func(3)`` call did not return ``5``. +This test returns a failure report because ``func(3)`` does not return ``5``. .. note:: - You can simply use the ``assert`` statement for asserting test - expectations. pytest's :ref:`assert introspection` will intelligently - report intermediate values of the assert expression freeing - you from the need to learn the many names of `JUnit legacy methods`_. + You can use the ``assert`` statement to verify test expectations. pytest’s `Advanced assertion introspection `_ will intelligently report intermediate values of the assert expression so you can avoid the many names `of JUnit legacy methods `_. -.. _`JUnit legacy methods`: http://docs.python.org/library/unittest.html#test-cases - -.. _`assert statement`: http://docs.python.org/reference/simple_stmts.html#the-assert-statement - -Running multiple tests +Run multiple tests ---------------------------------------------------------- -``pytest`` will run all files in the current directory and its subdirectories of the form test_*.py or \*_test.py. More generally, it follows :ref:`standard test discovery rules `. +``pytest`` will run all files of the form test_*.py or \*_test.py in the current directory and its subdirectories. More generally, it follows :ref:`standard test discovery rules `. -Asserting that a certain exception is raised +Assert that a certain exception is raised -------------------------------------------------------------- -If you want to assert that some code raises an exception you can -use the ``raises`` helper:: +Use the ``raises`` helper to assert that some code raises an exception:: # content of test_sysexit.py import pytest @@ -97,18 +90,16 @@ use the ``raises`` helper:: with pytest.raises(SystemExit): f() -Running it with, this time in "quiet" reporting mode:: +Execute the test function with “quiet” reporting mode:: $ pytest -q test_sysexit.py - . + . [100%] 1 passed in 0.12 seconds -Grouping multiple tests in a class +Group multiple tests in a class -------------------------------------------------------------- -Once you start to have more than a few tests it often makes sense -to group tests logically, in classes and modules. Let's write a class -containing two tests:: +Once you develop multiple tests, you may want to group them into a class. pytest makes it easy to create a class containing more than one test:: # content of test_class.py class TestClass(object): @@ -120,14 +111,12 @@ containing two tests:: x = "hello" assert hasattr(x, 'check') -The two tests are found because of the standard :ref:`test discovery`. -There is no need to subclass anything. We can simply -run the module by passing its filename:: +``pytest`` discovers all tests following its :ref:`Conventions for Python test discovery `, so it finds both ``test_`` prefixed functions. There is no need to subclass anything. We can simply run the module by passing its filename:: $ pytest -q test_class.py - .F - ======= FAILURES ======== - _______ TestClass.test_two ________ + .F [100%] + ================================= FAILURES ================================= + ____________________________ TestClass.test_two ____________________________ self = @@ -140,31 +129,24 @@ run the module by passing its filename:: test_class.py:8: AssertionError 1 failed, 1 passed in 0.12 seconds -The first test passed, the second failed. Again we can easily see -the intermediate values used in the assertion, helping us to -understand the reason for the failure. +The first test passed and the second failed. You can easily see the intermediate values in the assertion to help you understand the reason for the failure. -Going functional: requesting a unique temporary directory +Request a unique temporary directory for functional tests -------------------------------------------------------------- -For functional tests one often needs to create some files -and pass them to application objects. pytest provides -:ref:`builtinfixtures` which allow to request arbitrary -resources, for example a unique temporary directory:: +``pytest`` provides `Builtin fixtures/function arguments `_ to request arbitrary resources, like a unique temporary directory:: # content of test_tmpdir.py def test_needsfiles(tmpdir): print (tmpdir) assert 0 -We list the name ``tmpdir`` in the test function signature and -``pytest`` will lookup and call a fixture factory to create the resource -before performing the test function call. Let's just run it:: +List the name ``tmpdir`` in the test function signature and ``pytest`` will lookup and call a fixture factory to create the resource before performing the test function call. Before the test runs, ``pytest`` creates a unique-per-test-invocation temporary directory:: $ pytest -q test_tmpdir.py - F - ======= FAILURES ======== - _______ test_needsfiles ________ + F [100%] + ================================= FAILURES ================================= + _____________________________ test_needsfiles ______________________________ tmpdir = local('PYTEST_TMPDIR/test_needsfiles0') @@ -178,22 +160,22 @@ before performing the test function call. Let's just run it:: PYTEST_TMPDIR/test_needsfiles0 1 failed in 0.12 seconds -Before the test runs, a unique-per-test-invocation temporary directory -was created. More info at :ref:`tmpdir handling`. +More info on tmpdir handling is available at :ref:`Temporary directories and files `. -You can find out what kind of builtin :ref:`fixtures` exist by typing:: +Find out what kind of builtin :ref:`pytest fixtures ` exist with the command:: pytest --fixtures # shows builtin and custom fixtures -Where to go next +Continue reading ------------------------------------- -Here are a few suggestions where to go next: +Check out additional pytest resources to help you customize tests for your unique workflow: -* :ref:`cmdline` for command line invocation examples -* :ref:`good practices ` for virtualenv, test layout -* :ref:`existingtestsuite` for working with pre-existing tests -* :ref:`fixtures` for providing a functional baseline to your tests -* :ref:`plugins` managing and writing plugins +* ":ref:`cmdline`" for command line invocation examples +* ":ref:`existingtestsuite`" for working with pre-existing tests +* ":ref:`mark`" for information on the ``pytest.mark`` mechanism +* ":ref:`fixtures`" for providing a functional baseline to your tests +* ":ref:`plugins`" for managing and writing plugins +* ":ref:`goodpractices`" for virtualenv and test layouts .. include:: links.inc diff --git a/doc/en/goodpractices.rst b/doc/en/goodpractices.rst index 92cd9ed81..16fdd24c3 100644 --- a/doc/en/goodpractices.rst +++ b/doc/en/goodpractices.rst @@ -122,7 +122,7 @@ want to distribute them along with your application:: test_view.py ... -In this scheme, it is easy to your run tests using the ``--pyargs`` option:: +In this scheme, it is easy to run your tests using the ``--pyargs`` option:: pytest --pyargs mypkg @@ -249,15 +249,6 @@ by putting them into a ``[tool:pytest]`` section: python_files = testing/*/*.py -.. note:: - Prior to 3.0, the supported section name was ``[pytest]``. Due to how - this may collide with some distutils commands, the recommended - section name for ``setup.cfg`` files is now ``[tool:pytest]``. - - Note that for ``pytest.ini`` and ``tox.ini`` files the section - name is ``[pytest]``. - - Manual Integration ^^^^^^^^^^^^^^^^^^ @@ -276,7 +267,7 @@ your own setuptools Test command for invoking pytest. def initialize_options(self): TestCommand.initialize_options(self) - self.pytest_args = [] + self.pytest_args = '' def run_tests(self): import shlex diff --git a/doc/en/historical-notes.rst b/doc/en/historical-notes.rst new file mode 100644 index 000000000..028ceff9b --- /dev/null +++ b/doc/en/historical-notes.rst @@ -0,0 +1,177 @@ +Historical Notes +================ + +This page lists features or behavior from previous versions of pytest which have changed over the years. They are +kept here as a historical note so users looking at old code can find documentation related to them. + +cache plugin integrated into the core +------------------------------------- + +.. versionadded:: 2.8 + +The functionality of the :ref:`core cache ` plugin was previously distributed +as a third party plugin named ``pytest-cache``. The core plugin +is compatible regarding command line options and API usage except that you +can only store/receive data between test runs that is json-serializable. + + +funcargs and ``pytest_funcarg__`` +--------------------------------- + +.. versionchanged:: 2.3 + +In versions prior to 2.3 there was no ``@pytest.fixture`` marker +and you had to use a magic ``pytest_funcarg__NAME`` prefix +for the fixture factory. This remains and will remain supported +but is not anymore advertised as the primary means of declaring fixture +functions. + + +``@pytest.yield_fixture`` decorator +----------------------------------- + +.. versionchanged:: 2.10 + +Prior to version 2.10, in order to use a ``yield`` statement to execute teardown code one +had to mark a fixture using the ``yield_fixture`` marker. From 2.10 onward, normal +fixtures can use ``yield`` directly so the ``yield_fixture`` decorator is no longer needed +and considered deprecated. + + +``[pytest]`` header in ``setup.cfg`` +------------------------------------ + +.. versionchanged:: 3.0 + +Prior to 3.0, the supported section name was ``[pytest]``. Due to how +this may collide with some distutils commands, the recommended +section name for ``setup.cfg`` files is now ``[tool:pytest]``. + +Note that for ``pytest.ini`` and ``tox.ini`` files the section +name is ``[pytest]``. + + +Applying marks to ``@pytest.mark.parametrize`` parameters +--------------------------------------------------------- + +.. versionchanged:: 3.1 + +Prior to version 3.1 the supported mechanism for marking values +used the syntax:: + + import pytest + @pytest.mark.parametrize("test_input,expected", [ + ("3+5", 8), + ("2+4", 6), + pytest.mark.xfail(("6*9", 42),), + ]) + def test_eval(test_input, expected): + assert eval(test_input) == expected + + +This was an initial hack to support the feature but soon was demonstrated to be incomplete, +broken for passing functions or applying multiple marks with the same name but different parameters. + +The old syntax is planned to be removed in pytest-4.0. + + +``@pytest.mark.parametrize`` argument names as a tuple +------------------------------------------------------ + +.. versionchanged:: 2.4 + +In versions prior to 2.4 one needed to specify the argument +names as a tuple. This remains valid but the simpler ``"name1,name2,..."`` +comma-separated-string syntax is now advertised first because +it's easier to write and produces less line noise. + + +setup: is now an "autouse fixture" +---------------------------------- + +.. versionchanged:: 2.3 + +During development prior to the pytest-2.3 release the name +``pytest.setup`` was used but before the release it was renamed +and moved to become part of the general fixture mechanism, +namely :ref:`autouse fixtures` + + +.. _string conditions: + +Conditions as strings instead of booleans +----------------------------------------- + +.. versionchanged:: 2.4 + +Prior to pytest-2.4 the only way to specify skipif/xfail conditions was +to use strings:: + + import sys + @pytest.mark.skipif("sys.version_info >= (3,3)") + def test_function(): + ... + +During test function setup the skipif condition is evaluated by calling +``eval('sys.version_info >= (3,0)', namespace)``. The namespace contains +all the module globals, and ``os`` and ``sys`` as a minimum. + +Since pytest-2.4 :ref:`boolean conditions ` are considered preferable +because markers can then be freely imported between test modules. +With strings you need to import not only the marker but all variables +used by the marker, which violates encapsulation. + +The reason for specifying the condition as a string was that ``pytest`` can +report a summary of skip conditions based purely on the condition string. +With conditions as booleans you are required to specify a ``reason`` string. + +Note that string conditions will remain fully supported and you are free +to use them if you have no need for cross-importing markers. + +The evaluation of a condition string in ``pytest.mark.skipif(conditionstring)`` +or ``pytest.mark.xfail(conditionstring)`` takes place in a namespace +dictionary which is constructed as follows: + +* the namespace is initialized by putting the ``sys`` and ``os`` modules + and the pytest ``config`` object into it. + +* updated with the module globals of the test function for which the + expression is applied. + +The pytest ``config`` object allows you to skip based on a test +configuration value which you might have added:: + + @pytest.mark.skipif("not config.getvalue('db')") + def test_function(...): + ... + +The equivalent with "boolean conditions" is:: + + @pytest.mark.skipif(not pytest.config.getvalue("db"), + reason="--db was not specified") + def test_function(...): + pass + +.. note:: + + You cannot use ``pytest.config.getvalue()`` in code + imported before pytest's argument parsing takes place. For example, + ``conftest.py`` files are imported before command line parsing and thus + ``config.getvalue()`` will not execute correctly. + +``pytest.set_trace()`` +---------------------- + +.. versionchanged:: 2.4 + +Previous to version 2.4 to set a break point in code one needed to use ``pytest.set_trace()``:: + + import pytest + def test_function(): + ... + pytest.set_trace() # invoke PDB debugger and tracing + + +This is no longer needed and one can use the native ``import pdb;pdb.set_trace()`` call directly. + +For more details see :ref:`breakpoints`. diff --git a/doc/en/index.rst b/doc/en/index.rst index 77e019d70..66c59f08d 100644 --- a/doc/en/index.rst +++ b/doc/en/index.rst @@ -24,15 +24,15 @@ An example of a simple test: To execute it:: $ pytest - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: - collected 1 items + collected 1 item - test_sample.py F + test_sample.py F [100%] - ======= FAILURES ======== - _______ test_answer ________ + ================================= FAILURES ================================= + _______________________________ test_answer ________________________________ def test_answer(): > assert inc(3) == 5 @@ -40,7 +40,7 @@ To execute it:: E + where 4 = inc(3) test_sample.py:5: AssertionError - ======= 1 failed in 0.12 seconds ======== + ========================= 1 failed in 0.12 seconds ========================= Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used. See :ref:`Getting Started ` for more examples. @@ -57,9 +57,9 @@ Features - Can run :ref:`unittest ` (including trial) and :ref:`nose ` test suites out of the box; -- Python2.6+, Python3.3+, PyPy-2.3, Jython-2.5 (untested); +- Python 2.7, Python 3.4+, PyPy 2.3, Jython 2.5 (untested); -- Rich plugin architecture, with over 150+ :ref:`external plugins ` and thriving community; +- Rich plugin architecture, with over 315+ `external plugins `_ and thriving community; Documentation diff --git a/doc/en/logging.rst b/doc/en/logging.rst new file mode 100644 index 000000000..44cfaaa28 --- /dev/null +++ b/doc/en/logging.rst @@ -0,0 +1,241 @@ +.. _logging: + +Logging +------- + +.. versionadded:: 3.3 +.. versionchanged:: 3.4 + +pytest captures log messages of level ``WARNING`` or above automatically and displays them in their own section +for each failed test in the same manner as captured stdout and stderr. + +Running without options:: + + pytest + +Shows failed tests like so:: + + ----------------------- Captured stdlog call ---------------------- + test_reporting.py 26 WARNING text going to logger + ----------------------- Captured stdout call ---------------------- + text going to stdout + ----------------------- Captured stderr call ---------------------- + text going to stderr + ==================== 2 failed in 0.02 seconds ===================== + +By default each captured log message shows the module, line number, log level +and message. + +If desired the log and date format can be specified to +anything that the logging module supports by passing specific formatting options:: + + pytest --log-format="%(asctime)s %(levelname)s %(message)s" \ + --log-date-format="%Y-%m-%d %H:%M:%S" + +Shows failed tests like so:: + + ----------------------- Captured stdlog call ---------------------- + 2010-04-10 14:48:44 WARNING text going to logger + ----------------------- Captured stdout call ---------------------- + text going to stdout + ----------------------- Captured stderr call ---------------------- + text going to stderr + ==================== 2 failed in 0.02 seconds ===================== + +These options can also be customized through ``pytest.ini`` file: + +.. code-block:: ini + + [pytest] + log_format = %(asctime)s %(levelname)s %(message)s + log_date_format = %Y-%m-%d %H:%M:%S + +Further it is possible to disable reporting of captured content (stdout, +stderr and logs) on failed tests completely with:: + + pytest --show-capture=no + + +caplog fixture +^^^^^^^^^^^^^^ + +Inside tests it is possible to change the log level for the captured log +messages. This is supported by the ``caplog`` fixture:: + + def test_foo(caplog): + caplog.set_level(logging.INFO) + pass + +By default the level is set on the root logger, +however as a convenience it is also possible to set the log level of any +logger:: + + def test_foo(caplog): + caplog.set_level(logging.CRITICAL, logger='root.baz') + pass + +The log levels set are restored automatically at the end of the test. + +It is also possible to use a context manager to temporarily change the log +level inside a ``with`` block:: + + def test_bar(caplog): + with caplog.at_level(logging.INFO): + pass + +Again, by default the level of the root logger is affected but the level of any +logger can be changed instead with:: + + def test_bar(caplog): + with caplog.at_level(logging.CRITICAL, logger='root.baz'): + pass + +Lastly all the logs sent to the logger during the test run are made available on +the fixture in the form of both the ``logging.LogRecord`` instances and the final log text. +This is useful for when you want to assert on the contents of a message:: + + def test_baz(caplog): + func_under_test() + for record in caplog.records: + assert record.levelname != 'CRITICAL' + assert 'wally' not in caplog.text + +For all the available attributes of the log records see the +``logging.LogRecord`` class. + +You can also resort to ``record_tuples`` if all you want to do is to ensure, +that certain messages have been logged under a given logger name with a given +severity and message:: + + def test_foo(caplog): + logging.getLogger().info('boo %s', 'arg') + + assert caplog.record_tuples == [ + ('root', logging.INFO, 'boo arg'), + ] + +You can call ``caplog.clear()`` to reset the captured log records in a test:: + + def test_something_with_clearing_records(caplog): + some_method_that_creates_log_records() + caplog.clear() + your_test_method() + assert ['Foo'] == [rec.message for rec in caplog.records] + + +The ``caplop.records`` attribute contains records from the current stage only, so +inside the ``setup`` phase it contains only setup logs, same with the ``call`` and +``teardown`` phases. + +To access logs from other stages, use the ``caplog.get_records(when)`` method. As an example, +if you want to make sure that tests which use a certain fixture never log any warnings, you can inspect +the records for the ``setup`` and ``call`` stages during teardown like so: + +.. code-block:: python + + + @pytest.fixture + def window(caplog): + window = create_window() + yield window + for when in ('setup', 'call'): + messages = [x.message for x in caplog.get_records(when) if x.level == logging.WARNING] + if messages: + pytest.fail('warning messages encountered during testing: {}'.format(messages)) + + + +The full API is available at :class:`_pytest.logging.LogCaptureFixture`. + + +.. _live_logs: + +Live Logs +^^^^^^^^^ + +By setting the :confval:`log_cli` configuration option to ``true``, pytest will output +logging records as they are emitted directly into the console. + +You can specify the logging level for which log records with equal or higher +level are printed to the console by passing ``--log-cli-level``. This setting +accepts the logging level names as seen in python's documentation or an integer +as the logging level num. + +Additionally, you can also specify ``--log-cli-format`` and +``--log-cli-date-format`` which mirror and default to ``--log-format`` and +``--log-date-format`` if not provided, but are applied only to the console +logging handler. + +All of the CLI log options can also be set in the configuration INI file. The +option names are: + +* ``log_cli_level`` +* ``log_cli_format`` +* ``log_cli_date_format`` + +If you need to record the whole test suite logging calls to a file, you can pass +``--log-file=/path/to/log/file``. This log file is opened in write mode which +means that it will be overwritten at each run tests session. + +You can also specify the logging level for the log file by passing +``--log-file-level``. This setting accepts the logging level names as seen in +python's documentation(ie, uppercased level names) or an integer as the logging +level num. + +Additionally, you can also specify ``--log-file-format`` and +``--log-file-date-format`` which are equal to ``--log-format`` and +``--log-date-format`` but are applied to the log file logging handler. + +All of the log file options can also be set in the configuration INI file. The +option names are: + +* ``log_file`` +* ``log_file_level`` +* ``log_file_format`` +* ``log_file_date_format`` + +.. _log_release_notes: + +Release notes +^^^^^^^^^^^^^ + +This feature was introduced as a drop-in replacement for the `pytest-catchlog +`_ plugin and they conflict +with each other. The backward compatibility API with ``pytest-capturelog`` +has been dropped when this feature was introduced, so if for that reason you +still need ``pytest-catchlog`` you can disable the internal feature by +adding to your ``pytest.ini``: + +.. code-block:: ini + + [pytest] + addopts=-p no:logging + + +.. _log_changes_3_4: + +Incompatible changes in pytest 3.4 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This feature was introduced in ``3.3`` and some **incompatible changes** have been +made in ``3.4`` after community feedback: + +* Log levels are no longer changed unless explicitly requested by the :confval:`log_level` configuration + or ``--log-level`` command-line options. This allows users to configure logger objects themselves. +* :ref:`Live Logs ` is now disabled by default and can be enabled setting the + :confval:`log_cli` configuration option to ``true``. When enabled, the verbosity is increased so logging for each + test is visible. +* :ref:`Live Logs ` are now sent to ``sys.stdout`` and no longer require the ``-s`` command-line option + to work. + +If you want to partially restore the logging behavior of version ``3.3``, you can add this options to your ``ini`` +file: + +.. code-block:: ini + + [pytest] + log_cli=true + log_level=NOTSET + +More details about the discussion that lead to this changes can be read in +issue `#3013 `_. diff --git a/doc/en/mark.rst b/doc/en/mark.rst index ab9546d31..bee60f923 100644 --- a/doc/en/mark.rst +++ b/doc/en/mark.rst @@ -4,12 +4,12 @@ Marking test functions with attributes ================================================================= -.. currentmodule:: _pytest.mark By using the ``pytest.mark`` helper you can easily set metadata on your test functions. There are some builtin markers, for example: +* :ref:`skip ` - always skip a test function * :ref:`skipif ` - skip a test function if a certain condition is met * :ref:`xfail ` - produce an "expected failure" outcome if a certain condition is met @@ -26,15 +26,62 @@ which also serve as documentation. :ref:`fixtures `. -API reference for mark related objects ------------------------------------------------- +.. currentmodule:: _pytest.mark.structures +.. autoclass:: Mark + :members: + :noindex: -.. autoclass:: MarkGenerator - :members: -.. autoclass:: MarkDecorator - :members: +.. `marker-iteration` -.. autoclass:: MarkInfo - :members: +Marker iteration +================= + +.. versionadded:: 3.6 + +pytest's marker implementation traditionally worked by simply updating the ``__dict__`` attribute of functions to add markers, in a cumulative manner. As a result of the this, markers would unintendely be passed along class hierarchies in surprising ways plus the API for retriving them was inconsistent, as markers from parameterization would be stored differently than markers applied using the ``@pytest.mark`` decorator and markers added via ``node.add_marker``. + +This state of things made it technically next to impossible to use data from markers correctly without having a deep understanding of the internals, leading to subtle and hard to understand bugs in more advanced usages. + +Depending on how a marker got declared/changed one would get either a ``MarkerInfo`` which might contain markers from sibling classes, +``MarkDecorators`` when marks came from parameterization or from a ``node.add_marker`` call, discarding prior marks. Also ``MarkerInfo`` acts like a single mark, when it in fact repressents a merged view on multiple marks with the same name. + +On top of that markers where not accessible the same way for modules, classes, and functions/methods, +in fact, markers where only accessible in functions, even if they where declared on classes/modules. + +A new API to access markers has been introduced in pytest 3.6 in order to solve the problems with the initial design, providing :func:`_pytest.nodes.Node.iter_markers` method to iterate over markers in a consistent manner and reworking the internals, which solved great deal of problems with the initial design. + +Here is a non-exhaustive list of issues fixed by the new implementation: + + +* Marks don't pick up nested classes (`#199 `_). + +* markers stains on all related classes (`#568 `_). + +* combining marks - args and kwargs calculation (`#2897 `_). + +* ``request.node.get_marker('name')`` returns ``None`` for markers applied in classes (`#902 `_). + +* marks applied in parametrize are stored as markdecorator (`#2400 `_). + +* fix marker interaction in a backward incompatible way (`#1670 `_). + +* Refactor marks to get rid of the current "marks transfer" mechanism (`#2363 `_). + +* Introduce FunctionDefinition node, use it in generate_tests (`#2522 `_). + +* remove named marker attributes and collect markers in items (`#891 `_). + +* skipif mark from parametrize hides module level skipif mark (`#1540 `_). + +* skipif + parametrize not skipping tests (`#1296 `_). + +* marker transfer incompatible with inheritance (`#535 `_). + +More details can be found in the `original PR `_. + +.. note:: + + in a future major relase of pytest we will introduce class based markers, + at which points markers will no longer be limited to instances of :py:class:`Mark` diff --git a/doc/en/monkeypatch.rst b/doc/en/monkeypatch.rst index 0c07b2f44..b25e07f9a 100644 --- a/doc/en/monkeypatch.rst +++ b/doc/en/monkeypatch.rst @@ -61,15 +61,27 @@ so that any attempts within tests to create http requests will fail. ``compile``, etc., because it might break pytest's internals. If that's unavoidable, passing ``--tb=native``, ``--assert=plain`` and ``--capture=no`` might help although there's no guarantee. + +.. note:: + + Mind that patching ``stdlib`` functions and some third-party libraries used by pytest + might break pytest itself, therefore in those cases it is recommended to use + :meth:`MonkeyPatch.context` to limit the patching to the block you want tested: + + .. code-block:: python + + import functools + def test_partial(monkeypatch): + with monkeypatch.context() as m: + m.setattr(functools, "partial", 3) + assert functools.partial == 3 + + See issue `#3290 `_ for details. -Method reference of the monkeypatch fixture -------------------------------------------- +.. currentmodule:: _pytest.monkeypatch -.. autoclass:: MonkeyPatch - :members: - -``monkeypatch.setattr/delattr/delitem/delenv()`` all -by default raise an Exception if the target does not exist. -Pass ``raising=False`` if you want to skip this check. +API Reference +------------- +Consult the docs for the :class:`MonkeyPatch` class. diff --git a/doc/en/nose.rst b/doc/en/nose.rst index 5effd0d7b..10a10633a 100644 --- a/doc/en/nose.rst +++ b/doc/en/nose.rst @@ -26,7 +26,7 @@ Supported nose Idioms * setup and teardown at module/class/method level * SkipTest exceptions and markers * setup/teardown decorators -* ``yield``-based tests and their setup +* ``yield``-based tests and their setup (considered deprecated as of pytest 3.0) * ``__test__`` attribute on modules/classes/functions * general usage of nose utilities diff --git a/doc/en/parametrize.rst b/doc/en/parametrize.rst index fdd963b1d..ba2cd3cce 100644 --- a/doc/en/parametrize.rst +++ b/doc/en/parametrize.rst @@ -33,7 +33,7 @@ pytest enables test parametrization at several levels: .. versionchanged:: 2.4 Several improvements. -The builtin ``pytest.mark.parametrize`` decorator enables +The builtin :ref:`pytest.mark.parametrize ref` decorator enables parametrization of arguments for a test function. Here is a typical example of a test function that implements checking that a certain input leads to an expected output:: @@ -53,15 +53,15 @@ tuples so that the ``test_eval`` function will run three times using them in turn:: $ pytest - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - test_expectation.py ..F + test_expectation.py ..F [100%] - ======= FAILURES ======== - _______ test_eval[6*9-42] ________ + ================================= FAILURES ================================= + ____________________________ test_eval[6*9-42] _____________________________ test_input = '6*9', expected = 42 @@ -76,7 +76,7 @@ them in turn:: E + where 54 = eval('6*9') test_expectation.py:8: AssertionError - ======= 1 failed, 2 passed in 0.12 seconds ======== + ==================== 1 failed, 2 passed in 0.12 seconds ==================== As designed in this example, only one pair of input/output values fails the simple test function. And as usual with test function arguments, @@ -99,37 +99,17 @@ for example with the builtin ``mark.xfail``:: def test_eval(test_input, expected): assert eval(test_input) == expected -.. note:: - - prior to version 3.1 the supported mechanism for marking values - used the syntax:: - - import pytest - @pytest.mark.parametrize("test_input,expected", [ - ("3+5", 8), - ("2+4", 6), - pytest.mark.xfail(("6*9", 42),), - ]) - def test_eval(test_input, expected): - assert eval(test_input) == expected - - - This was an initial hack to support the feature but soon was demonstrated to be incomplete, - broken for passing functions or applying multiple marks with the same name but different parameters. - The old syntax will be removed in pytest-4.0. - - Let's run this:: $ pytest - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - test_expectation.py ..x + test_expectation.py ..x [100%] - ======= 2 passed, 1 xfailed in 0.12 seconds ======== + =================== 2 passed, 1 xfailed in 0.12 seconds ==================== The one parameter set which caused a failure previously now shows up as an "xfailed (expected to fail)" test. @@ -143,15 +123,8 @@ To get all combinations of multiple parametrized arguments you can stack def test_foo(x, y): pass -This will run the test with the arguments set to x=0/y=2, x=0/y=3, x=1/y=2 and -x=1/y=3. - -.. note:: - - In versions prior to 2.4 one needed to specify the argument - names as a tuple. This remains valid but the simpler ``"name1,name2,..."`` - comma-separated-string syntax is now advertised first because - it's easier to write and produces less line noise. +This will run the test with the arguments set to ``x=0/y=2``, ``x=1/y=2``, +``x=0/y=3``, and ``x=1/y=3`` exhausting parameters in the order of the decorators. .. _`pytest_generate_tests`: @@ -187,20 +160,20 @@ command line option and the parametrization of our test function:: def pytest_generate_tests(metafunc): if 'stringinput' in metafunc.fixturenames: metafunc.parametrize("stringinput", - metafunc.config.option.stringinput) + metafunc.config.getoption('stringinput')) If we now pass two stringinput values, our test will run twice:: $ pytest -q --stringinput="hello" --stringinput="world" test_strings.py - .. + .. [100%] 2 passed in 0.12 seconds Let's also run with a stringinput that will lead to a failing test:: $ pytest -q --stringinput="!" test_strings.py - F - ======= FAILURES ======== - _______ test_valid_string[!] ________ + F [100%] + ================================= FAILURES ================================= + ___________________________ test_valid_string[!] ___________________________ stringinput = '!' @@ -220,19 +193,16 @@ If you don't specify a stringinput it will be skipped because list:: $ pytest -q -rs test_strings.py - s - ======= short test summary info ======== - SKIP [1] test_strings.py:1: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:1 + s [100%] + ========================= short test summary info ========================== + SKIP [1] test_strings.py: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:1 1 skipped in 0.12 seconds +Note that when calling ``metafunc.parametrize`` multiple times with different parameter sets, all parameter names across +those sets cannot be duplicated, otherwise an error will be raised. + +More examples +------------- + For further examples, you might want to look at :ref:`more parametrization examples `. - -.. _`metafunc object`: - -The **metafunc** object -------------------------------------------- - -.. currentmodule:: _pytest.python -.. autoclass:: Metafunc - :members: diff --git a/doc/en/plugins.rst b/doc/en/plugins.rst index ec031e9e0..3d1226d34 100644 --- a/doc/en/plugins.rst +++ b/doc/en/plugins.rst @@ -27,9 +27,6 @@ Here is a little annotated list for some popular plugins: for `twisted `_ apps, starting a reactor and processing deferreds from test functions. -* `pytest-catchlog `_: - to capture and assert about messages from the logging module - * `pytest-cov `_: coverage reporting, compatible with distributed testing @@ -64,10 +61,11 @@ status against different pytest and Python versions, please visit You may also discover more plugins through a `pytest- pypi.python.org search`_. -.. _`available installable plugins`: .. _`pytest- pypi.python.org search`: http://pypi.python.org/pypi?%3Aaction=search&term=pytest-&submit=search +.. _`available installable plugins`: + Requiring/Loading plugins in a test module or conftest file ----------------------------------------------------------- @@ -82,6 +80,12 @@ will be loaded as well. which will import the specified module as a ``pytest`` plugin. +.. note:: + Requiring plugins using a ``pytest_plugins`` variable in non-root + ``conftest.py`` files is deprecated. See + :ref:`full explanation ` + in the Writing plugins section. + .. _`findpluginname`: Finding out which plugins are active @@ -94,7 +98,7 @@ environment you can type:: and will get an extended test header which shows activated plugins and their names. It will also print local plugins aka -:ref:`conftest.py ` files when they are loaded. +:ref:`conftest.py ` files when they are loaded. .. _`cmdunregister`: @@ -123,36 +127,3 @@ CI server), you can set ``PYTEST_ADDOPTS`` environment variable to See :ref:`findpluginname` for how to obtain the name of a plugin. .. _`builtin plugins`: - -Pytest default plugin reference -------------------------------- - - -You can find the source code for the following plugins -in the `pytest repository `_. - -.. autosummary:: - - _pytest.assertion - _pytest.cacheprovider - _pytest.capture - _pytest.config - _pytest.doctest - _pytest.helpconfig - _pytest.junitxml - _pytest.mark - _pytest.monkeypatch - _pytest.nose - _pytest.pastebin - _pytest.debugging - _pytest.pytester - _pytest.python - _pytest.recwarn - _pytest.resultlog - _pytest.runner - _pytest.main - _pytest.skipping - _pytest.terminal - _pytest.tmpdir - _pytest.unittest - diff --git a/doc/en/projects.rst b/doc/en/projects.rst index a2edbf68f..51f2d94fd 100644 --- a/doc/en/projects.rst +++ b/doc/en/projects.rst @@ -37,7 +37,7 @@ Here are some examples of projects using ``pytest`` (please send notes via :ref: * `mwlib `_ mediawiki parser and utility library * `The Translate Toolkit `_ for localization and conversion * `execnet `_ rapid multi-Python deployment -* `pylib `_ cross-platform path, IO, dynamic code library +* `pylib `_ cross-platform path, IO, dynamic code library * `Pacha `_ configuration management in five minutes * `bbfreeze `_ create standalone executables from Python scripts * `pdb++ `_ a fancier version of PDB @@ -58,7 +58,7 @@ Here are some examples of projects using ``pytest`` (please send notes via :ref: * `katcp `_ Telescope communication protocol over Twisted * `kss plugin timer `_ * `pyudev `_ a pure Python binding to the Linux library libudev -* `pytest-localserver `_ a plugin for pytest that provides an httpserver and smtpserver +* `pytest-localserver `_ a plugin for pytest that provides an httpserver and smtpserver * `pytest-monkeyplus `_ a plugin that extends monkeypatch These projects help integrate ``pytest`` into other Python frameworks: diff --git a/doc/en/proposals/parametrize_with_fixtures.rst b/doc/en/proposals/parametrize_with_fixtures.rst index 381bc98f1..146032aa4 100644 --- a/doc/en/proposals/parametrize_with_fixtures.rst +++ b/doc/en/proposals/parametrize_with_fixtures.rst @@ -1,8 +1,13 @@ :orphan: -========================= -Parametrize with fixtures -========================= +=================================== +PROPOSAL: Parametrize with fixtures +=================================== + +.. warning:: + + This document outlines a proposal around using fixtures as input + of parametrized tests or fixtures. Problem ------- @@ -108,8 +113,13 @@ the following values. Alternative approach -------------------- -A new helper function named ``fixture_request`` tells pytest to yield all -parameters of a fixture. +A new helper function named ``fixture_request`` would tell pytest to yield +all parameters marked as a fixture. + +.. note:: + + The `pytest-lazy-fixture `_ plugin implements a very + similar solution to the proposal below, make sure to check it out. .. code-block:: python diff --git a/doc/en/pythonpath.rst b/doc/en/pythonpath.rst new file mode 100644 index 000000000..b64742768 --- /dev/null +++ b/doc/en/pythonpath.rst @@ -0,0 +1,76 @@ +.. _pythonpath: + +pytest import mechanisms and ``sys.path``/``PYTHONPATH`` +======================================================== + +Here's a list of scenarios where pytest may need to change ``sys.path`` in order +to import test modules or ``conftest.py`` files. + +Test modules / ``conftest.py`` files inside packages +---------------------------------------------------- + +Consider this file and directory layout:: + + root/ + |- foo/ + |- __init__.py + |- conftest.py + |- bar/ + |- __init__.py + |- tests/ + |- __init__.py + |- test_foo.py + + +When executing:: + + pytest root/ + + + +pytest will find ``foo/bar/tests/test_foo.py`` and realize it is part of a package given that +there's an ``__init__.py`` file in the same folder. It will then search upwards until it can find the +last folder which still contains an ``__init__.py`` file in order to find the package *root* (in +this case ``foo/``). To load the module, it will insert ``root/`` to the front of +``sys.path`` (if not there already) in order to load +``test_foo.py`` as the *module* ``foo.bar.tests.test_foo``. + +The same logic applies to the ``conftest.py`` file: it will be imported as ``foo.conftest`` module. + +Preserving the full package name is important when tests live in a package to avoid problems +and allow test modules to have duplicated names. This is also discussed in details in +:ref:`test discovery`. + +Standalone test modules / ``conftest.py`` files +----------------------------------------------- + +Consider this file and directory layout:: + + root/ + |- foo/ + |- conftest.py + |- bar/ + |- tests/ + |- test_foo.py + + +When executing:: + + pytest root/ + +pytest will find ``foo/bar/tests/test_foo.py`` and realize it is NOT part of a package given that +there's no ``__init__.py`` file in the same folder. It will then add ``root/foo/bar/tests`` to +``sys.path`` in order to import ``test_foo.py`` as the *module* ``test_foo``. The same is done +with the ``conftest.py`` file by adding ``root/foo`` to ``sys.path`` to import it as ``conftest``. + +For this reason this layout cannot have test modules with the same name, as they all will be +imported in the global import namespace. + +This is also discussed in details in :ref:`test discovery`. + +Invoking ``pytest`` versus ``python -m pytest`` +----------------------------------------------- + +Running pytest with ``python -m pytest [...]`` instead of ``pytest [...]`` yields nearly +equivalent behaviour, except that the former call will add the current directory to ``sys.path``. +See also :ref:`cmdline`. diff --git a/doc/en/reference.rst b/doc/en/reference.rst new file mode 100644 index 000000000..be2180c53 --- /dev/null +++ b/doc/en/reference.rst @@ -0,0 +1,1294 @@ + +Reference +========= + +This page contains the full reference to pytest's API. + +.. contents:: + :depth: 3 + :local: + +Functions +--------- + +pytest.approx +~~~~~~~~~~~~~ + +.. autofunction:: _pytest.python_api.approx + +pytest.fail +~~~~~~~~~~~ + +**Tutorial**: :ref:`skipping` + +.. autofunction:: _pytest.outcomes.fail + +pytest.skip +~~~~~~~~~~~ + +.. autofunction:: _pytest.outcomes.skip(msg, [allow_module_level=False]) + +pytest.importorskip +~~~~~~~~~~~~~~~~~~~ + +.. autofunction:: _pytest.outcomes.importorskip + +pytest.xfail +~~~~~~~~~~~~ + +.. autofunction:: _pytest.outcomes.xfail + +pytest.exit +~~~~~~~~~~~ + +.. autofunction:: _pytest.outcomes.exit + +pytest.main +~~~~~~~~~~~ + +.. autofunction:: _pytest.config.main + +pytest.param +~~~~~~~~~~~~~ + +.. autofunction:: pytest.param(*values, [id], [marks]) + +pytest.raises +~~~~~~~~~~~~~ + +**Tutorial**: :ref:`assertraises`. + +.. autofunction:: pytest.raises(expected_exception: Exception, [match], [message]) + :with: excinfo + +pytest.deprecated_call +~~~~~~~~~~~~~~~~~~~~~~ + +**Tutorial**: :ref:`ensuring_function_triggers`. + +.. autofunction:: pytest.deprecated_call() + :with: + +pytest.register_assert_rewrite +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Tutorial**: :ref:`assertion-rewriting`. + +.. autofunction:: pytest.register_assert_rewrite + +pytest.warns +~~~~~~~~~~~~ + +**Tutorial**: :ref:`assertwarnings` + +.. autofunction:: pytest.warns(expected_warning: Exception, [match]) + :with: + + +.. _`marks ref`: + +Marks +----- + +Marks can be used apply meta data to *test functions* (but not fixtures), which can then be accessed by +fixtures or plugins. + + + + +.. _`pytest.mark.filterwarnings ref`: + +pytest.mark.filterwarnings +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Tutorial**: :ref:`filterwarnings`. + +Add warning filters to marked test items. + +.. py:function:: pytest.mark.filterwarnings(filter) + + :keyword str filter: + A *warning specification string*, which is composed of contents of the tuple ``(action, message, category, module, lineno)`` + as specified in `The Warnings filter `_ section of + the Python documentation, separated by ``":"``. Optional fields can be omitted. + + For example: + + .. code-block:: python + + @pytest.mark.warnings("ignore:.*usage will be deprecated.*:DeprecationWarning") + def test_foo(): + ... + + +.. _`pytest.mark.parametrize ref`: + +pytest.mark.parametrize +~~~~~~~~~~~~~~~~~~~~~~~ + +**Tutorial**: :doc:`parametrize`. + +.. automethod:: _pytest.python.Metafunc.parametrize + + +.. _`pytest.mark.skip ref`: + +pytest.mark.skip +~~~~~~~~~~~~~~~~ + +**Tutorial**: :ref:`skip`. + +Unconditionally skip a test function. + +.. py:function:: pytest.mark.skip(*, reason=None) + + :keyword str reason: Reason why the test function is being skipped. + + +.. _`pytest.mark.skipif ref`: + +pytest.mark.skipif +~~~~~~~~~~~~~~~~~~ + +**Tutorial**: :ref:`xfail`. + +Skip a test function if a condition is ``True``. + +.. py:function:: pytest.mark.skipif(condition, *, reason=None) + + :type condition: bool or str + :param condition: ``True/False`` if the condition should be skipped or a :ref:`condition string `. + :keyword str reason: Reason why the test function is being skipped. + + +.. _`pytest.mark.xfail ref`: + +pytest.mark.xfail +~~~~~~~~~~~~~~~~~~ + +**Tutorial**: :ref:`xfail`. + +Marks a test function as *expected to fail*. + +.. py:function:: pytest.mark.xfail(condition=None, *, reason=None, raises=None, run=True, strict=False) + + :type condition: bool or str + :param condition: ``True/False`` if the condition should be marked as xfail or a :ref:`condition string `. + :keyword str reason: Reason why the test function is marked as xfail. + :keyword Exception raises: Exception subclass expected to be raised by the test function; other exceptions will fail the test. + :keyword bool run: + If the test function should actually be executed. If ``False``, the function will always xfail and will + not be executed (useful a function is segfaulting). + :keyword bool strict: + * If ``False`` (the default) the function will be shown in the terminal output as ``xfailed`` if it fails + and as ``xpass`` if it passes. In both cases this will not cause the test suite to fail as a whole. This + is particularly useful to mark *flaky* tests (tests that random at fail) to be tackled later. + * If ``True``, the function will be shown in the terminal output as ``xfailed`` if it fails, but if it + unexpectedly passes then it will **fail** the test suite. This is particularly useful to mark functions + that are always failing and there should be a clear indication if they unexpectedly start to pass (for example + a new release of a library fixes a known bug). + + +custom marks +~~~~~~~~~~~~ + +Marks are created dynamically using the factory object ``pytest.mark`` and applied as a decorator. + +For example: + +.. code-block:: python + + @pytest.mark.timeout(10, 'slow', method='thread') + def test_function(): + ... + +Will create and attach a :class:`Mark <_pytest.mark.structures.Mark>` object to the collected +:class:`Item <_pytest.nodes.Item>`, which can then be accessed by fixtures or hooks with +:meth:`Node.iter_markers <_pytest.nodes.Node.iter_markers>`. The ``mark`` object will have the following attributes: + +.. code-block:: python + + mark.args == (10, 'slow') + mark.kwargs == {'method': 'thread'} + + +Fixtures +-------- + +**Tutorial**: :ref:`fixture`. + +Fixtures are requested by test functions or other fixtures by declaring them as argument names. + + +Example of a test requiring a fixture: + +.. code-block:: python + + def test_output(capsys): + print('hello') + out, err = capsys.readouterr() + assert out == 'hello\n' + + +Example of a fixture requiring another fixture: + +.. code-block:: python + + @pytest.fixture + def db_session(tmpdir): + fn = tmpdir / 'db.file' + return connect(str(fn)) + +For more details, consult the full :ref:`fixtures docs `. + + +@pytest.fixture +~~~~~~~~~~~~~~~ + +.. autofunction:: pytest.fixture + :decorator: + + +.. _`cache-api`: + +config.cache +~~~~~~~~~~~~ + +**Tutorial**: :ref:`cache`. + +The ``config.cache`` object allows other plugins and fixtures +to store and retrieve values across test runs. To access it from fixtures +request ``pytestconfig`` into your fixture and get it with ``pytestconfig.cache``. + +Under the hood, the cache plugin uses the simple +``dumps``/``loads`` API of the :py:mod:`json` stdlib module. + +.. currentmodule:: _pytest.cacheprovider + +.. automethod:: Cache.get +.. automethod:: Cache.set +.. automethod:: Cache.makedir + + +capsys +~~~~~~ + +**Tutorial**: :doc:`capture`. + +.. currentmodule:: _pytest.capture + +.. autofunction:: capsys() + :no-auto-options: + + Returns an instance of :py:class:`CaptureFixture`. + + Example: + + .. code-block:: python + + def test_output(capsys): + print("hello") + captured = capsys.readouterr() + assert captured.out == "hello\n" + +.. autoclass:: CaptureFixture() + :members: + + +capsysbinary +~~~~~~~~~~~~ + +**Tutorial**: :doc:`capture`. + +.. autofunction:: capsysbinary() + :no-auto-options: + + Returns an instance of :py:class:`CaptureFixture`. + + Example: + + .. code-block:: python + + def test_output(capsysbinary): + print("hello") + captured = capsysbinary.readouterr() + assert captured.out == b"hello\n" + + +capfd +~~~~~~ + +**Tutorial**: :doc:`capture`. + +.. autofunction:: capfd() + :no-auto-options: + + Returns an instance of :py:class:`CaptureFixture`. + + Example: + + .. code-block:: python + + def test_system_echo(capfd): + os.system('echo "hello"') + captured = capsys.readouterr() + assert captured.out == "hello\n" + + +capfdbinary +~~~~~~~~~~~~ + +**Tutorial**: :doc:`capture`. + +.. autofunction:: capfdbinary() + :no-auto-options: + + Returns an instance of :py:class:`CaptureFixture`. + + Example: + + .. code-block:: python + + def test_system_echo(capfdbinary): + os.system('echo "hello"') + captured = capfdbinary.readouterr() + assert captured.out == b"hello\n" + + +doctest_namespace +~~~~~~~~~~~~~~~~~ + +**Tutorial**: :doc:`doctest`. + +.. autofunction:: _pytest.doctest.doctest_namespace() + + Usually this fixture is used in conjunction with another ``autouse`` fixture: + + .. code-block:: python + + @pytest.fixture(autouse=True) + def add_np(doctest_namespace): + doctest_namespace['np'] = numpy + + For more details: :ref:`doctest_namespace`. + + +request +~~~~~~~ + +**Tutorial**: :ref:`request example`. + +The ``request`` fixture is a special fixture providing information of the requesting test function. + +.. autoclass:: _pytest.fixtures.FixtureRequest() + :members: + + +pytestconfig +~~~~~~~~~~~~ + +.. autofunction:: _pytest.fixtures.pytestconfig() + + +record_property +~~~~~~~~~~~~~~~~~~~ + +**Tutorial**: :ref:`record_property example`. + +.. autofunction:: _pytest.junitxml.record_property() + +caplog +~~~~~~ + +**Tutorial**: :doc:`logging`. + +.. autofunction:: _pytest.logging.caplog() + :no-auto-options: + + This returns a :class:`_pytest.logging.LogCaptureFixture` instance. + +.. autoclass:: _pytest.logging.LogCaptureFixture + :members: + + +monkeypatch +~~~~~~~~~~~ + +.. currentmodule:: _pytest.monkeypatch + +**Tutorial**: :doc:`monkeypatch`. + +.. autofunction:: _pytest.monkeypatch.monkeypatch() + :no-auto-options: + + This returns a :class:`MonkeyPatch` instance. + +.. autoclass:: _pytest.monkeypatch.MonkeyPatch + :members: + +testdir +~~~~~~~ + +.. currentmodule:: _pytest.pytester + +This fixture provides a :class:`Testdir` instance useful for black-box testing of test files, making it ideal to +test plugins. + +To use it, include in your top-most ``conftest.py`` file:: + + pytest_plugins = 'pytester' + + + +.. autoclass:: Testdir() + :members: runpytest,runpytest_subprocess,runpytest_inprocess,makeconftest,makepyfile + +.. autoclass:: RunResult() + :members: + +.. autoclass:: LineMatcher() + :members: + + +recwarn +~~~~~~~ + +**Tutorial**: :ref:`assertwarnings` + +.. currentmodule:: _pytest.recwarn + +.. autofunction:: recwarn() + :no-auto-options: + +.. autoclass:: _pytest.recwarn.WarningsRecorder() + :members: + +Each recorded warning is an instance of :class:`warnings.WarningMessage`. + +.. note:: + :class:`RecordedWarning` was changed from a plain class to a namedtuple in pytest 3.1 + +.. note:: + ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated + differently; see :ref:`ensuring_function_triggers`. + + +tmpdir +~~~~~~ + +**Tutorial**: :doc:`tmpdir` + +.. currentmodule:: _pytest.tmpdir + +.. autofunction:: tmpdir() + :no-auto-options: + + +tmpdir_factory +~~~~~~~~~~~~~~ + +**Tutorial**: :ref:`tmpdir factory example` + +.. _`tmpdir factory api`: + +``tmpdir_factory`` instances have the following methods: + +.. currentmodule:: _pytest.tmpdir + +.. automethod:: TempdirFactory.mktemp +.. automethod:: TempdirFactory.getbasetemp + + +.. _`hook-reference`: + +Hooks +----- + +**Tutorial**: :doc:`writing_plugins`. + +.. currentmodule:: _pytest.hookspec + +Reference to all hooks which can be implemented by :ref:`conftest.py files ` and :ref:`plugins `. + +Bootstrapping hooks +~~~~~~~~~~~~~~~~~~~ + +Bootstrapping hooks called for plugins registered early enough (internal and setuptools plugins). + +.. autofunction:: pytest_load_initial_conftests +.. autofunction:: pytest_cmdline_preparse +.. autofunction:: pytest_cmdline_parse +.. autofunction:: pytest_cmdline_main + +Initialization hooks +~~~~~~~~~~~~~~~~~~~~ + +Initialization hooks called for plugins and ``conftest.py`` files. + +.. autofunction:: pytest_addoption +.. autofunction:: pytest_addhooks +.. autofunction:: pytest_configure +.. autofunction:: pytest_unconfigure +.. autofunction:: pytest_sessionstart +.. autofunction:: pytest_sessionfinish + +Test running hooks +~~~~~~~~~~~~~~~~~~ + +All runtest related hooks receive a :py:class:`pytest.Item <_pytest.main.Item>` object. + +.. autofunction:: pytest_runtestloop +.. autofunction:: pytest_runtest_protocol +.. autofunction:: pytest_runtest_logstart +.. autofunction:: pytest_runtest_logfinish +.. autofunction:: pytest_runtest_setup +.. autofunction:: pytest_runtest_call +.. autofunction:: pytest_runtest_teardown +.. autofunction:: pytest_runtest_makereport + +For deeper understanding you may look at the default implementation of +these hooks in :py:mod:`_pytest.runner` and maybe also +in :py:mod:`_pytest.pdb` which interacts with :py:mod:`_pytest.capture` +and its input/output capturing in order to immediately drop +into interactive debugging when a test failure occurs. + +The :py:mod:`_pytest.terminal` reported specifically uses +the reporting hook to print information about a test run. + +Collection hooks +~~~~~~~~~~~~~~~~ + +``pytest`` calls the following hooks for collecting files and directories: + +.. autofunction:: pytest_collection +.. autofunction:: pytest_ignore_collect +.. autofunction:: pytest_collect_directory +.. autofunction:: pytest_collect_file + +For influencing the collection of objects in Python modules +you can use the following hook: + +.. autofunction:: pytest_pycollect_makeitem +.. autofunction:: pytest_generate_tests +.. autofunction:: pytest_make_parametrize_id + +After collection is complete, you can modify the order of +items, delete or otherwise amend the test items: + +.. autofunction:: pytest_collection_modifyitems + +Reporting hooks +~~~~~~~~~~~~~~~ + +Session related reporting hooks: + +.. autofunction:: pytest_collectstart +.. autofunction:: pytest_itemcollected +.. autofunction:: pytest_collectreport +.. autofunction:: pytest_deselected +.. autofunction:: pytest_report_header +.. autofunction:: pytest_report_collectionfinish +.. autofunction:: pytest_report_teststatus +.. autofunction:: pytest_terminal_summary +.. autofunction:: pytest_fixture_setup +.. autofunction:: pytest_fixture_post_finalizer + +And here is the central hook for reporting about +test execution: + +.. autofunction:: pytest_runtest_logreport + +You can also use this hook to customize assertion representation for some +types: + +.. autofunction:: pytest_assertrepr_compare + + +Debugging/Interaction hooks +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +There are few hooks which can be used for special +reporting or interaction with exceptions: + +.. autofunction:: pytest_internalerror +.. autofunction:: pytest_keyboard_interrupt +.. autofunction:: pytest_exception_interact +.. autofunction:: pytest_enter_pdb + + +Objects +------- + +Full reference to objects accessible from :ref:`fixtures ` or :ref:`hooks `. + + +CallInfo +~~~~~~~~ + +.. autoclass:: _pytest.runner.CallInfo() + :members: + + +Class +~~~~~ + +.. autoclass:: _pytest.python.Class() + :members: + :show-inheritance: + +Collector +~~~~~~~~~ + +.. autoclass:: _pytest.nodes.Collector() + :members: + :show-inheritance: + +Config +~~~~~~ + +.. autoclass:: _pytest.config.Config() + :members: + +ExceptionInfo +~~~~~~~~~~~~~ + +.. autoclass:: _pytest._code.ExceptionInfo + :members: + +FixtureDef +~~~~~~~~~~ + +.. autoclass:: _pytest.fixtures.FixtureDef() + :members: + :show-inheritance: + +FSCollector +~~~~~~~~~~~ + +.. autoclass:: _pytest.nodes.FSCollector() + :members: + :show-inheritance: + +Function +~~~~~~~~ + +.. autoclass:: _pytest.python.Function() + :members: + :show-inheritance: + +Item +~~~~ + +.. autoclass:: _pytest.nodes.Item() + :members: + :show-inheritance: + +MarkDecorator +~~~~~~~~~~~~~ + +.. autoclass:: _pytest.mark.MarkDecorator + :members: + + +MarkGenerator +~~~~~~~~~~~~~ + +.. autoclass:: _pytest.mark.MarkGenerator + :members: + + +MarkInfo +~~~~~~~~ + +.. autoclass:: _pytest.mark.MarkInfo + :members: + + +Mark +~~~~ + +.. autoclass:: _pytest.mark.structures.Mark + :members: + + +Metafunc +~~~~~~~~ + +.. autoclass:: _pytest.python.Metafunc + :members: + +Module +~~~~~~ + +.. autoclass:: _pytest.python.Module() + :members: + :show-inheritance: + +Node +~~~~ + +.. autoclass:: _pytest.nodes.Node() + :members: + +Parser +~~~~~~ + +.. autoclass:: _pytest.config.Parser() + :members: + +PluginManager +~~~~~~~~~~~~~ + +.. autoclass:: pluggy.PluginManager() + :members: + + +PytestPluginManager +~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: _pytest.config.PytestPluginManager() + :members: + :undoc-members: + :show-inheritance: + +Session +~~~~~~~ + +.. autoclass:: _pytest.main.Session() + :members: + :show-inheritance: + +TestReport +~~~~~~~~~~ + +.. autoclass:: _pytest.runner.TestReport() + :members: + :inherited-members: + +_Result +~~~~~~~ + +.. autoclass:: pluggy._Result + :members: + +Special Variables +----------------- + +pytest treats some global variables in a special manner when defined in a test module. + + +pytest_plugins +~~~~~~~~~~~~~~ + +**Tutorial**: :ref:`available installable plugins` + +Can be declared at the **global** level in *test modules* and *conftest.py files* to register additional plugins. +Can be either a ``str`` or ``Sequence[str]``. + +.. code-block:: python + + pytest_plugins = "myapp.testsupport.myplugin" + +.. code-block:: python + + pytest_plugins = ("myapp.testsupport.tools", "myapp.testsupport.regression") + + +pytest_mark +~~~~~~~~~~~ + +**Tutorial**: :ref:`scoped-marking` + +Can be declared at the **global** level in *test modules* to apply one or more :ref:`marks ` to all +test functions and methods. Can be either a single mark or a sequence of marks. + +.. code-block:: python + + import pytest + pytestmark = pytest.mark.webtest + + +.. code-block:: python + + import pytest + pytestmark = (pytest.mark.integration, pytest.mark.slow) + +PYTEST_DONT_REWRITE (module docstring) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The text ``PYTEST_DONT_REWRITE`` can be add to any **module docstring** to disable +:ref:`assertion rewriting ` for that module. + + +Environment Variables +--------------------- + +Environment variables that can be used to change pytest's behavior. + +PYTEST_ADDOPTS +~~~~~~~~~~~~~~ + +This contains a command-line (parsed by the py:mod:`shlex` module) that will be **prepended** to the command line given +by the user, see :ref:`adding default options` for more information. + +PYTEST_DEBUG +~~~~~~~~~~~~ + +When set, pytest will print tracing and debug information. + +PYTEST_PLUGINS +~~~~~~~~~~~~~~ + +Contains comma-separated list of modules that should be loaded as plugins: + +.. code-block:: bash + + export PYTEST_PLUGINS=mymodule.plugin,xdisst + + +PYTEST_CURRENT_TEST +~~~~~~~~~~~~~~~~~~~ + +This is not meant to be set by users, but is set by pytest internally with the name of the current test so other +processes can inspect it, see :ref:`pytest current test env` for more information. + + +.. _`ini options ref`: + +Configuration Options +--------------------- + +Here is a list of builtin configuration options that may be written in a ``pytest.ini``, ``tox.ini`` or ``setup.cfg`` +file, usually located at the root of your repository. All options must be under a ``[pytest]`` section +(``[tool:pytest]`` for ``setup.cfg`` files). + +Configuration file options may be overwritten in the command-line by using ``-o/--override``, which can also be +passed multiple times. The expected format is ``name=value``. For example:: + + pytest -o console_output_style=classic -o cache_dir=/tmp/mycache + + +.. confval:: addopts + + Add the specified ``OPTS`` to the set of command line arguments as if they + had been specified by the user. Example: if you have this ini file content: + + .. code-block:: ini + + # content of pytest.ini + [pytest] + addopts = --maxfail=2 -rf # exit after 2 failures, report fail info + + issuing ``pytest test_hello.py`` actually means:: + + pytest --maxfail=2 -rf test_hello.py + + Default is to add no options. + + +.. confval:: cache_dir + + .. versionadded:: 3.2 + + Sets a directory where stores content of cache plugin. Default directory is + ``.cache`` which is created in :ref:`rootdir `. Directory may be + relative or absolute path. If setting relative path, then directory is created + relative to :ref:`rootdir `. Additionally path may contain environment + variables, that will be expanded. For more information about cache plugin + please refer to :ref:`cache_provider`. + + +.. confval:: confcutdir + + Sets a directory where search upwards for ``conftest.py`` files stops. + By default, pytest will stop searching for ``conftest.py`` files upwards + from ``pytest.ini``/``tox.ini``/``setup.cfg`` of the project if any, + or up to the file-system root. + + +.. confval:: console_output_style + + .. versionadded:: 3.3 + + Sets the console output style while running tests: + + * ``classic``: classic pytest output. + * ``progress``: like classic pytest output, but with a progress indicator. + + The default is ``progress``, but you can fallback to ``classic`` if you prefer or + the new mode is causing unexpected problems: + + .. code-block:: ini + + # content of pytest.ini + [pytest] + console_output_style = classic + + +.. confval:: doctest_encoding + + .. versionadded:: 3.1 + + Default encoding to use to decode text files with docstrings. + :doc:`See how pytest handles doctests `. + + +.. confval:: doctest_optionflags + + One or more doctest flag names from the standard ``doctest`` module. + :doc:`See how pytest handles doctests `. + + +.. confval:: empty_parameter_set_mark + + .. versionadded:: 3.4 + + Allows to pick the action for empty parametersets in parameterization + + * ``skip`` skips tests with a empty parameterset (default) + * ``xfail`` marks tests with a empty parameterset as xfail(run=False) + + .. code-block:: ini + + # content of pytest.ini + [pytest] + empty_parameter_set_mark = xfail + + .. note:: + + The default value of this option is planned to change to ``xfail`` in future releases + as this is considered less error prone, see `#3155 `_ + for more details. + + +.. confval:: filterwarnings + + .. versionadded:: 3.1 + + Sets a list of filters and actions that should be taken for matched + warnings. By default all warnings emitted during the test session + will be displayed in a summary at the end of the test session. + + .. code-block:: ini + + # content of pytest.ini + [pytest] + filterwarnings = + error + ignore::DeprecationWarning + + This tells pytest to ignore deprecation warnings and turn all other warnings + into errors. For more information please refer to :ref:`warnings`. + + +.. confval:: junit_suite_name + + .. versionadded:: 3.1 + + To set the name of the root test suite xml item, you can configure the ``junit_suite_name`` option in your config file: + + .. code-block:: ini + + [pytest] + junit_suite_name = my_suite + + +.. confval:: log_cli_date_format + + .. versionadded:: 3.3 + + Sets a :py:func:`time.strftime`-compatible string that will be used when formatting dates for live logging. + + .. code-block:: ini + + [pytest] + log_cli_date_format = %Y-%m-%d %H:%M:%S + + For more information, see :ref:`live_logs`. + +.. confval:: log_cli_format + + .. versionadded:: 3.3 + + Sets a :py:mod:`logging`-compatible string used to format live logging messages. + + .. code-block:: ini + + [pytest] + log_cli_format = %(asctime)s %(levelname)s %(message)s + + For more information, see :ref:`live_logs`. + + +.. confval:: log_cli_level + + .. versionadded:: 3.3 + + Sets the minimum log message level that should be captured for live logging. The integer value or + the names of the levels can be used. + + .. code-block:: ini + + [pytest] + log_cli_level = INFO + + For more information, see :ref:`live_logs`. + + +.. confval:: log_date_format + + .. versionadded:: 3.3 + + Sets a :py:func:`time.strftime`-compatible string that will be used when formatting dates for logging capture. + + .. code-block:: ini + + [pytest] + log_date_format = %Y-%m-%d %H:%M:%S + + For more information, see :ref:`logging`. + + +.. confval:: log_file + + .. versionadded:: 3.3 + + Sets a file name relative to the ``pytest.ini`` file where log messages should be written to, in addition + to the other logging facilities that are active. + + .. code-block:: ini + + [pytest] + log_file = logs/pytest-logs.txt + + For more information, see :ref:`logging`. + + +.. confval:: log_file_date_format + + .. versionadded:: 3.3 + + Sets a :py:func:`time.strftime`-compatible string that will be used when formatting dates for the logging file. + + .. code-block:: ini + + [pytest] + log_file_date_format = %Y-%m-%d %H:%M:%S + + For more information, see :ref:`logging`. + +.. confval:: log_file_format + + .. versionadded:: 3.3 + + Sets a :py:mod:`logging`-compatible string used to format logging messages redirected to the logging file. + + .. code-block:: ini + + [pytest] + log_file_format = %(asctime)s %(levelname)s %(message)s + + For more information, see :ref:`logging`. + +.. confval:: log_file_level + + .. versionadded:: 3.3 + + Sets the minimum log message level that should be captured for the logging file. The integer value or + the names of the levels can be used. + + .. code-block:: ini + + [pytest] + log_file_level = INFO + + For more information, see :ref:`logging`. + + +.. confval:: log_format + + .. versionadded:: 3.3 + + Sets a :py:mod:`logging`-compatible string used to format captured logging messages. + + .. code-block:: ini + + [pytest] + log_format = %(asctime)s %(levelname)s %(message)s + + For more information, see :ref:`logging`. + + +.. confval:: log_level + + .. versionadded:: 3.3 + + Sets the minimum log message level that should be captured for logging capture. The integer value or + the names of the levels can be used. + + .. code-block:: ini + + [pytest] + log_level = INFO + + For more information, see :ref:`logging`. + + +.. confval:: log_print + + .. versionadded:: 3.3 + + If set to ``False``, will disable displaying captured logging messages for failed tests. + + .. code-block:: ini + + [pytest] + log_print = False + + For more information, see :ref:`logging`. + + +.. confval:: markers + + List of markers that are allowed in test functions, enforced when ``--strict`` command-line argument is used. + You can use a marker name per line, indented from the option name. + + .. code-block:: ini + + [pytest] + markers = + slow + serial + +.. confval:: minversion + + Specifies a minimal pytest version required for running tests. + + .. code-block:: ini + + # content of pytest.ini + [pytest] + minversion = 3.0 # will fail if we run with pytest-2.8 + + +.. confval:: norecursedirs + + Set the directory basename patterns to avoid when recursing + for test discovery. The individual (fnmatch-style) patterns are + applied to the basename of a directory to decide if to recurse into it. + Pattern matching characters:: + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq + + Default patterns are ``'.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv'``. + Setting a ``norecursedirs`` replaces the default. Here is an example of + how to avoid certain directories: + + .. code-block:: ini + + [pytest] + norecursedirs = .svn _build tmp* + + This would tell ``pytest`` to not look into typical subversion or + sphinx-build directories or into any ``tmp`` prefixed directory. + + Additionally, ``pytest`` will attempt to intelligently identify and ignore a + virtualenv by the presence of an activation script. Any directory deemed to + be the root of a virtual environment will not be considered during test + collection unless ``‑‑collect‑in‑virtualenv`` is given. Note also that + ``norecursedirs`` takes precedence over ``‑‑collect‑in‑virtualenv``; e.g. if + you intend to run tests in a virtualenv with a base directory that matches + ``'.*'`` you *must* override ``norecursedirs`` in addition to using the + ``‑‑collect‑in‑virtualenv`` flag. + + +.. confval:: python_classes + + One or more name prefixes or glob-style patterns determining which classes + are considered for test collection. By default, pytest will consider any + class prefixed with ``Test`` as a test collection. Here is an example of how + to collect tests from classes that end in ``Suite``: + + .. code-block:: ini + + [pytest] + python_classes = *Suite + + Note that ``unittest.TestCase`` derived classes are always collected + regardless of this option, as ``unittest``'s own collection framework is used + to collect those tests. + + +.. confval:: python_files + + One or more Glob-style file patterns determining which python files + are considered as test modules. By default, pytest will consider + any file matching with ``test_*.py`` and ``*_test.py`` globs as a test + module. + + +.. confval:: python_functions + + One or more name prefixes or glob-patterns determining which test functions + and methods are considered tests. By default, pytest will consider any + function prefixed with ``test`` as a test. Here is an example of how + to collect test functions and methods that end in ``_test``: + + .. code-block:: ini + + [pytest] + python_functions = *_test + + Note that this has no effect on methods that live on a ``unittest + .TestCase`` derived class, as ``unittest``'s own collection framework is used + to collect those tests. + + See :ref:`change naming conventions` for more detailed examples. + + +.. confval:: testpaths + + .. versionadded:: 2.8 + + Sets list of directories that should be searched for tests when + no specific directories, files or test ids are given in the command line when + executing pytest from the :ref:`rootdir ` directory. + Useful when all project tests are in a known location to speed up + test collection and to avoid picking up undesired tests by accident. + + .. code-block:: ini + + [pytest] + testpaths = testing doc + + This tells pytest to only look for tests in ``testing`` and ``doc`` + directories when executing from the root directory. + + +.. confval:: usefixtures + + List of fixtures that will be applied to all test functions; this is semantically the same to apply + the ``@pytest.mark.usefixtures`` marker to all test functions. + + + .. code-block:: ini + + [pytest] + usefixtures = + clean_db + + +.. confval:: xfail_strict + + If set to ``True``, tests marked with ``@pytest.mark.xfail`` that actually succeed will by default fail the + test suite. + For more information, see :ref:`xfail strict tutorial`. + + + .. code-block:: ini + + [pytest] + xfail_strict = True diff --git a/doc/en/requirements.txt b/doc/en/requirements.txt index 72bb60a81..e3cc47ed5 100644 --- a/doc/en/requirements.txt +++ b/doc/en/requirements.txt @@ -1,3 +1,4 @@ # pinning sphinx to 1.4.* due to search issues with rtd: # https://github.com/rtfd/readthedocs-sphinx-ext/issues/25 sphinx ==1.4.* +sphinxcontrib-trio diff --git a/doc/en/setup.rst b/doc/en/setup.rst deleted file mode 100644 index fe2353465..000000000 --- a/doc/en/setup.rst +++ /dev/null @@ -1,10 +0,0 @@ - -setup: is now an "autouse fixture" -======================================================== - -During development prior to the pytest-2.3 release the name -``pytest.setup`` was used but before the release it was renamed -and moved to become part of the general fixture mechanism, -namely :ref:`autouse fixtures` - - diff --git a/doc/en/skipping.rst b/doc/en/skipping.rst index 72b7b0433..9bac02c8c 100644 --- a/doc/en/skipping.rst +++ b/doc/en/skipping.rst @@ -3,7 +3,7 @@ .. _skipping: Skip and xfail: dealing with tests that cannot succeed -===================================================================== +====================================================== You can mark test functions that cannot be run on certain platforms or that you expect to fail so pytest can deal with them accordingly and @@ -16,17 +16,22 @@ resource which is not available at the moment (for example a database). A **xfail** means that you expect a test to fail for some reason. A common example is a test for a feature not yet implemented, or a bug not yet fixed. +When a test passes despite being expected to fail (marked with ``pytest.mark.xfail``), +it's an **xpass** and will be reported in the test summary. ``pytest`` counts and lists *skip* and *xfail* tests separately. Detailed information about skipped/xfailed tests is not shown by default to avoid cluttering the output. You can use the ``-r`` option to see details corresponding to the "short" letters shown in the test progress:: - pytest -rxs # show extra info on skips and xfails + pytest -rxXs # show extra info on xfailed, xpassed, and skipped tests + +More details on the ``-r`` option can be found by running ``pytest -h``. (See :ref:`how to change command line options defaults`) .. _skipif: +.. _skip: .. _`condition booleans`: Skipping test functions @@ -53,9 +58,21 @@ by calling the ``pytest.skip(reason)`` function: if not valid_config(): pytest.skip("unsupported configuration") -The imperative method is useful when it is not possible to evaluate the skip condition +It is also possible to skip the whole module using +``pytest.skip(reason, allow_module_level=True)`` at the module level: + +.. code-block:: python + + import pytest + + if not pytest.config.getoption("--custom-flag"): + pytest.skip("--custom-flag is missing, skipping tests", allow_module_level=True) + +The imperative method is useful when it is not possible to evaluate the skip condition during import time. +**Reference**: :ref:`pytest.mark.skip ref` + ``skipif`` ~~~~~~~~~~ @@ -63,16 +80,16 @@ during import time. If you wish to skip something conditionally then you can use ``skipif`` instead. Here is an example of marking a test function to be skipped -when run on a Python3.3 interpreter:: +when run on a Python3.6 interpreter:: import sys - @pytest.mark.skipif(sys.version_info < (3,3), - reason="requires python3.3") + @pytest.mark.skipif(sys.version_info < (3,6), + reason="requires python3.6") def test_function(): ... If the condition evaluates to ``True`` during collection, the test function will be skipped, -with the specified reason appearing in the summary when using ``-rs``. +with the specified reason appearing in the summary when using ``-rs``. You can share ``skipif`` markers between modules. Consider this test module:: @@ -101,6 +118,8 @@ Alternatively, you can use :ref:`condition strings ` instead of booleans, but they can't be shared between modules easily so they are supported mainly for backward compatibility reasons. +**Reference**: :ref:`pytest.mark.skipif ref` + Skip all test functions of a class or module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -117,6 +136,12 @@ You can use the ``skipif`` marker (as any other marker) on classes:: If the condition is ``True``, this marker will produce a skip result for each of the test methods of that class. +.. warning:: + + The use of ``skipif`` on classes that use inheritance is strongly + discouraged. `A Known bug `_ + in pytest's markers may cause unexpected behavior in super classes. + If you want to skip all test functions of a module, you may use the ``pytestmark`` name on the global level: @@ -131,6 +156,16 @@ will be skipped if any of the skip conditions is true. .. _`whole class- or module level`: mark.html#scoped-marking +Skipping files or directories +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Sometimes you may need to skip an entire file or directory, for example if the +tests rely on Python version-specific features or contain code that you do not +wish pytest to run. In this case, you must exclude the files and directories +from collection. Refer to :ref:`customizing-test-collection` for more +information. + + Skipping on a missing import dependency ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -201,15 +236,10 @@ This will unconditionally make ``test_function`` ``XFAIL``. Note that no other c after ``pytest.xfail`` call, differently from the marker. That's because it is implemented internally by raising a known exception. -Here's the signature of the ``xfail`` **marker** (not the function), using Python 3 keyword-only -arguments syntax: - -.. code-block:: python - - def xfail(condition=None, *, reason=None, raises=None, run=True, strict=False): - +**Reference**: :ref:`pytest.mark.xfail ref` +.. _`xfail strict tutorial`: ``strict`` parameter ~~~~~~~~~~~~~~~~~~~~ @@ -243,8 +273,8 @@ You can change the default value of the ``strict`` parameter using the As with skipif_ you can also mark your expectation of a failure on a particular platform:: - @pytest.mark.xfail(sys.version_info >= (3,3), - reason="python3.3 api changes") + @pytest.mark.xfail(sys.version_info >= (3,6), + reason="python3.6 api changes") def test_function(): ... @@ -300,13 +330,13 @@ Here is a simple test file with the several usages: Running it with the report-on-xfail option gives this output:: example $ pytest -rx xfail_demo.py - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/example, inifile: collected 7 items - xfail_demo.py xxxxxxx - ======= short test summary info ======== + xfail_demo.py xxxxxxx [100%] + ========================= short test summary info ========================== XFAIL xfail_demo.py::test_hello XFAIL xfail_demo.py::test_hello2 reason: [NOTRUN] @@ -320,7 +350,7 @@ Running it with the report-on-xfail option gives this output:: reason: reason XFAIL xfail_demo.py::test_hello7 - ======= 7 xfailed in 0.12 seconds ======== + ======================== 7 xfailed in 0.12 seconds ========================= .. _`skip/xfail with parametrize`: @@ -345,66 +375,3 @@ test instances when using parametrize: ]) def test_increment(n, expected): assert n + 1 == expected - - -.. _string conditions: - -Conditions as strings instead of booleans ------------------------------------------ - -Prior to pytest-2.4 the only way to specify skipif/xfail conditions was -to use strings:: - - import sys - @pytest.mark.skipif("sys.version_info >= (3,3)") - def test_function(): - ... - -During test function setup the skipif condition is evaluated by calling -``eval('sys.version_info >= (3,0)', namespace)``. The namespace contains -all the module globals, and ``os`` and ``sys`` as a minimum. - -Since pytest-2.4 `condition booleans`_ are considered preferable -because markers can then be freely imported between test modules. -With strings you need to import not only the marker but all variables -used by the marker, which violates encapsulation. - -The reason for specifying the condition as a string was that ``pytest`` can -report a summary of skip conditions based purely on the condition string. -With conditions as booleans you are required to specify a ``reason`` string. - -Note that string conditions will remain fully supported and you are free -to use them if you have no need for cross-importing markers. - -The evaluation of a condition string in ``pytest.mark.skipif(conditionstring)`` -or ``pytest.mark.xfail(conditionstring)`` takes place in a namespace -dictionary which is constructed as follows: - -* the namespace is initialized by putting the ``sys`` and ``os`` modules - and the pytest ``config`` object into it. - -* updated with the module globals of the test function for which the - expression is applied. - -The pytest ``config`` object allows you to skip based on a test -configuration value which you might have added:: - - @pytest.mark.skipif("not config.getvalue('db')") - def test_function(...): - ... - -The equivalent with "boolean conditions" is:: - - @pytest.mark.skipif(not pytest.config.getvalue("db"), - reason="--db was not specified") - def test_function(...): - pass - -.. note:: - - You cannot use ``pytest.config.getvalue()`` in code - imported before pytest's argument parsing takes place. For example, - ``conftest.py`` files are imported before command line parsing and thus - ``config.getvalue()`` will not execute correctly. - - diff --git a/doc/en/tmpdir.rst b/doc/en/tmpdir.rst index 642bb0814..2a53adad9 100644 --- a/doc/en/tmpdir.rst +++ b/doc/en/tmpdir.rst @@ -28,15 +28,15 @@ Running this would result in a passed test except for the last ``assert 0`` line which we use to look at values:: $ pytest test_tmpdir.py - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: - collected 1 items + collected 1 item - test_tmpdir.py F + test_tmpdir.py F [100%] - ======= FAILURES ======== - _______ test_create_file ________ + ================================= FAILURES ================================= + _____________________________ test_create_file _____________________________ tmpdir = local('PYTEST_TMPDIR/test_create_file0') @@ -49,7 +49,9 @@ Running this would result in a passed test except for the last E assert 0 test_tmpdir.py:7: AssertionError - ======= 1 failed in 0.12 seconds ======== + ========================= 1 failed in 0.12 seconds ========================= + +.. _`tmpdir factory example`: The 'tmpdir_factory' fixture ---------------------------- @@ -81,12 +83,8 @@ to save time: img = load_image(image_file) # compute and test histogram -``tmpdir_factory`` instances have the following methods: +See :ref:`tmpdir_factory API ` for details. -.. currentmodule:: _pytest.tmpdir - -.. automethod:: TempdirFactory.mktemp -.. automethod:: TempdirFactory.getbasetemp .. _`base temporary directory`: @@ -106,6 +104,4 @@ When distributing tests on the local machine, ``pytest`` takes care to configure a basetemp directory for the sub processes such that all temporary data lands below a single per-test run basetemp directory. -.. _`py.path.local`: http://py.rtfd.org/en/latest/path.html - - +.. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html diff --git a/doc/en/unittest.rst b/doc/en/unittest.rst index 06180f19d..b44bda44f 100644 --- a/doc/en/unittest.rst +++ b/doc/en/unittest.rst @@ -2,58 +2,77 @@ .. _`unittest.TestCase`: .. _`unittest`: -Support for unittest.TestCase / Integration of fixtures -===================================================================== +unittest.TestCase Support +========================= -.. _`unittest.py style`: http://docs.python.org/library/unittest.html +``pytest`` supports running Python ``unittest``-based tests out of the box. +It's meant for leveraging existing ``unittest``-based test suites +to use pytest as a test runner and also allow to incrementally adapt +the test suite to take full advantage of pytest's features. -``pytest`` has support for running Python `unittest.py style`_ tests. -It's meant for leveraging existing unittest-style projects -to use pytest features. Concretely, pytest will automatically -collect ``unittest.TestCase`` subclasses and their ``test`` methods in -test files. It will invoke typical setup/teardown methods and -generally try to make test suites written to run on unittest, to also -run using ``pytest``. We assume here that you are familiar with writing -``unittest.TestCase`` style tests and rather focus on -integration aspects. +To run an existing ``unittest``-style test suite using ``pytest``, type:: -Note that this is meant as a provisional way of running your test code -until you fully convert to pytest-style tests. To fully take advantage of -:ref:`fixtures `, :ref:`parametrization ` and -:ref:`hooks ` you should convert (tools like `unittest2pytest -`__ are helpful). -Also, not all 3rd party pluging are expected to work best with -``unittest.TestCase`` style tests. + pytest tests -Usage -------------------------------------------------------------------- -After :ref:`installation` type:: +pytest will automatically collect ``unittest.TestCase`` subclasses and +their ``test`` methods in ``test_*.py`` or ``*_test.py`` files. - pytest +Almost all ``unittest`` features are supported: -and you should be able to run your unittest-style tests if they -are contained in ``test_*`` modules. If that works for you then -you can make use of most :ref:`pytest features `, for example -``--pdb`` debugging in failures, using :ref:`plain assert-statements `, -:ref:`more informative tracebacks `, stdout-capturing or -distributing tests to multiple CPUs via the ``-nNUM`` option if you -installed the ``pytest-xdist`` plugin. Please refer to -the general ``pytest`` documentation for many more examples. +* ``@unittest.skip`` style decorators; +* ``setUp/tearDown``; +* ``setUpClass/tearDownClass()``; -.. note:: +.. _`load_tests protocol`: https://docs.python.org/3/library/unittest.html#load-tests-protocol +.. _`setUpModule/tearDownModule`: https://docs.python.org/3/library/unittest.html#setupmodule-and-teardownmodule +.. _`subtests`: https://docs.python.org/3/library/unittest.html#distinguishing-test-iterations-using-subtests - Running tests from ``unittest.TestCase`` subclasses with ``--pdb`` will - disable tearDown and cleanup methods for the case that an Exception - occurs. This allows proper post mortem debugging for all applications - which have significant logic in their tearDown machinery. However, - supporting this feature has the following side effect: If people - overwrite ``unittest.TestCase`` ``__call__`` or ``run``, they need to - to overwrite ``debug`` in the same way (this is also true for standard - unittest). +Up to this point pytest does not have support for the following features: -Mixing pytest fixtures into unittest.TestCase style tests ------------------------------------------------------------ +* `load_tests protocol`_; +* `setUpModule/tearDownModule`_; +* `subtests`_; + +Benefits out of the box +----------------------- + +By running your test suite with pytest you can make use of several features, +in most cases without having to modify existing code: + +* Obtain :ref:`more informative tracebacks `; +* :ref:`stdout and stderr ` capturing; +* :ref:`Test selection options ` using ``-k`` and ``-m`` flags; +* :ref:`maxfail`; +* :ref:`--pdb ` command-line option for debugging on test failures + (see :ref:`note ` below); +* Distribute tests to multiple CPUs using the `pytest-xdist `_ plugin; +* Use :ref:`plain assert-statements ` instead of ``self.assert*`` functions (`unittest2pytest + `__ is immensely helpful in this); + + +pytest features in ``unittest.TestCase`` subclasses +--------------------------------------------------- + +The following pytest features work in ``unittest.TestCase`` subclasses: + +* :ref:`Marks `: :ref:`skip `, :ref:`skipif `, :ref:`xfail `; +* :ref:`Auto-use fixtures `; + +The following pytest features **do not** work, and probably +never will due to different design philosophies: + +* :ref:`Fixtures ` (except for ``autouse`` fixtures, see :ref:`below `); +* :ref:`Parametrization `; +* :ref:`Custom hooks `; + + +Third party plugins may or may not work well, depending on the plugin and the test suite. + +.. _mixing-fixtures: + +Mixing pytest fixtures into ``unittest.TestCase`` subclasses using marks +------------------------------------------------------------------------ Running your unittest with ``pytest`` allows you to use its :ref:`fixture mechanism ` with ``unittest.TestCase`` style @@ -107,15 +126,15 @@ Due to the deliberately failing assert statements, we can take a look at the ``self.db`` values in the traceback:: $ pytest test_unittest_db.py - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - test_unittest_db.py FF + test_unittest_db.py FF [100%] - ======= FAILURES ======== - _______ MyTest.test_method1 ________ + ================================= FAILURES ================================= + ___________________________ MyTest.test_method1 ____________________________ self = @@ -126,7 +145,7 @@ the ``self.db`` values in the traceback:: E assert 0 test_unittest_db.py:9: AssertionError - _______ MyTest.test_method2 ________ + ___________________________ MyTest.test_method2 ____________________________ self = @@ -136,15 +155,15 @@ the ``self.db`` values in the traceback:: E assert 0 test_unittest_db.py:12: AssertionError - ======= 2 failed in 0.12 seconds ======== + ========================= 2 failed in 0.12 seconds ========================= This default pytest traceback shows that the two test methods share the same ``self.db`` instance which was our intention when writing the class-scoped fixture function above. -autouse fixtures and accessing other fixtures -------------------------------------------------------------------- +Using autouse fixtures and accessing other fixtures +--------------------------------------------------- Although it's usually better to explicitly declare use of fixtures you need for a given test, you may sometimes want to have fixtures that are @@ -165,6 +184,7 @@ creation of a per-test temporary directory:: import unittest class MyTest(unittest.TestCase): + @pytest.fixture(autouse=True) def initdir(self, tmpdir): tmpdir.chdir() # change to pytest-provided temporary directory @@ -183,7 +203,7 @@ on the class like in the previous example. Running this test module ...:: $ pytest -q test_unittest_cleandir.py - . + . [100%] 1 passed in 0.12 seconds ... gives us one passed test because the ``initdir`` fixture function @@ -191,12 +211,35 @@ was executed ahead of the ``test_method``. .. note:: - While pytest supports receiving fixtures via :ref:`test function arguments ` for non-unittest test methods, ``unittest.TestCase`` methods cannot directly receive fixture - function arguments as implementing that is likely to inflict + ``unittest.TestCase`` methods cannot directly receive fixture + arguments as implementing that is likely to inflict on the ability to run general unittest.TestCase test suites. - Maybe optional support would be possible, though. If unittest finally - grows a plugin system that should help as well. In the meanwhile, the - above ``usefixtures`` and ``autouse`` examples should help to mix in - pytest fixtures into unittest suites. And of course you can also start - to selectively leave away the ``unittest.TestCase`` subclassing, use - plain asserts and get the unlimited pytest feature set. + + The above ``usefixtures`` and ``autouse`` examples should help to mix in + pytest fixtures into unittest suites. + + You can also gradually move away from subclassing from ``unittest.TestCase`` to *plain asserts* + and then start to benefit from the full pytest feature set step by step. + +.. _pdb-unittest-note: + +.. note:: + + Running tests from ``unittest.TestCase`` subclasses with ``--pdb`` will + disable tearDown and cleanup methods for the case that an Exception + occurs. This allows proper post mortem debugging for all applications + which have significant logic in their tearDown machinery. However, + supporting this feature has the following side effect: If people + overwrite ``unittest.TestCase`` ``__call__`` or ``run``, they need to + to overwrite ``debug`` in the same way (this is also true for standard + unittest). + +.. note:: + + Due to architectural differences between the two frameworks, setup and + teardown for ``unittest``-based tests is performed during the ``call`` phase + of testing instead of in ``pytest``'s standard ``setup`` and ``teardown`` + stages. This can be important to understand in some situations, particularly + when reasoning about errors. For example, if a ``unittest``-based suite + exhibits errors during setup, ``pytest`` will report no errors during its + ``setup`` phase and will instead raise the error during ``call``. diff --git a/doc/en/usage.rst b/doc/en/usage.rst index 763328f5a..3d5b0536e 100644 --- a/doc/en/usage.rst +++ b/doc/en/usage.rst @@ -17,7 +17,7 @@ You can invoke testing through the Python interpreter from the command line:: python -m pytest [...] This is almost equivalent to invoking the command line script ``pytest [...]`` -directly, except that python will also add the current directory to ``sys.path``. +directly, except that calling via ``python`` will also add the current directory to ``sys.path``. Possible exit codes -------------------------------------------------------------- @@ -41,6 +41,8 @@ Getting help on version, option names, environment variables pytest -h | --help # show help on command line and config file options +.. _maxfail: + Stopping after the first (or N) failures --------------------------------------------------- @@ -49,26 +51,69 @@ To stop the testing process after the first (N) failures:: pytest -x # stop after first failure pytest --maxfail=2 # stop after two failures +.. _select-tests: + Specifying tests / selecting tests --------------------------------------------------- -Several test run options:: +Pytest supports several ways to run and select tests from the command-line. - pytest test_mod.py # run tests in module - pytest somepath # run all tests below somepath - pytest -k stringexpr # only run tests with names that match the - # "string expression", e.g. "MyClass and not method" - # will select TestMyClass.test_something - # but not TestMyClass.test_method_simple - pytest test_mod.py::test_func # only run tests that match the "node ID", - # e.g. "test_mod.py::test_func" will select - # only test_func in test_mod.py - pytest test_mod.py::TestClass::test_method # run a single method in - # a single class +**Run tests in a module** -Import 'pkg' and use its filesystem location to find and run tests:: +:: - pytest --pyargs pkg # run all tests found below directory of pkg + pytest test_mod.py + +**Run tests in a directory** + +:: + + pytest testing/ + +**Run tests by keyword expressions** + +:: + + pytest -k "MyClass and not method" + +This will run tests which contain names that match the given *string expression*, which can +include Python operators that use filenames, class names and function names as variables. +The example above will run ``TestMyClass.test_something`` but not ``TestMyClass.test_method_simple``. + +.. _nodeids: + +**Run tests by node ids** + +Each collected test is assigned a unique ``nodeid`` which consist of the module filename followed +by specifiers like class names, function names and parameters from parametrization, separated by ``::`` characters. + +To run a specific test within a module:: + + pytest test_mod.py::test_func + + +Another example specifying a test method in the command line:: + + pytest test_mod.py::TestClass::test_method + +**Run tests by marker expressions** + +:: + + pytest -m slow + +Will run all tests which are decorated with the ``@pytest.mark.slow`` decorator. + +For more information see :ref:`marks `. + +**Run tests from packages** + +:: + + pytest --pyargs pkg.testing + +This will import ``pkg.testing`` and use its filesystem location to find and run tests from. + Modifying Python traceback printing ---------------------------------------------- @@ -94,6 +139,9 @@ with Ctrl+C to find out where the tests are *hanging*. By default no output will be shown (because KeyboardInterrupt is caught by pytest). By using this option you make sure a trace is shown. + +.. _pdb-option: + Dropping to PDB_ (Python Debugger) on failures ----------------------------------------------- @@ -123,22 +171,15 @@ for example:: >>> sys.last_value AssertionError('assert result == "ok"',) -Setting a breakpoint / aka ``set_trace()`` ----------------------------------------------------- +.. _breakpoints: -If you want to set a breakpoint and enter the ``pdb.set_trace()`` you -can use a helper:: +Setting breakpoints +------------------- - import pytest - def test_function(): - ... - pytest.set_trace() # invoke PDB debugger and tracing +.. versionadded: 2.4.0 -.. versionadded: 2.0.0 - -Prior to pytest version 2.0.0 you could only enter PDB_ tracing if you disabled -capturing on the command line via ``pytest -s``. In later versions, pytest -automatically disables its output capture when you enter PDB_ tracing: +To set a breakpoint in your code use the native Python ``import pdb;pdb.set_trace()`` call +in your code and pytest automatically disables its output capture for that test: * Output capture in other tests is not affected. * Any prior test output that has already been captured and will be processed as @@ -148,12 +189,19 @@ automatically disables its output capture when you enter PDB_ tracing: for test output occurring after you exit the interactive PDB_ tracing session and continue with the regular test run. -.. versionadded: 2.4.0 -Since pytest version 2.4.0 you can also use the native Python -``import pdb;pdb.set_trace()`` call to enter PDB_ tracing without having to use -the ``pytest.set_trace()`` wrapper or explicitly disable pytest's output -capturing via ``pytest -s``. +.. _`breakpoint-builtin`: + +Using the builtin breakpoint function +------------------------------------- + +Python 3.7 introduces a builtin ``breakpoint()`` function. +Pytest supports the use of ``breakpoint()`` with the following behaviours: + + - When ``breakpoint()`` is called and ``PYTHONBREAKPOINT`` is set to the default value, pytest will use the custom internal PDB trace UI instead of the system default ``Pdb``. + - When tests are complete, the system will default back to the system ``Pdb`` trace UI. + - If ``--pdb`` is called on execution of pytest, the custom internal Pdb trace UI is used on ``bothbreakpoint()`` and failed tests/unhandled exceptions. + - If ``--pdbcls`` is used, the custom class debugger will be executed when a test fails (as expected within existing behaviour), but also when ``breakpoint()`` is called from within a test, the custom class debugger will be instantiated. .. _durations: @@ -186,19 +234,26 @@ To set the name of the root test suite xml item, you can configure the ``junit_s [pytest] junit_suite_name = my_suite -record_xml_property -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. _record_property example: + +record_property +^^^^^^^^^^^^^^^ .. versionadded:: 2.8 +.. versionchanged:: 3.5 + + Fixture renamed from ``record_xml_property`` to ``record_property`` as user + properties are now available to all reporters. + ``record_xml_property`` is now deprecated. If you want to log additional information for a test, you can use the -``record_xml_property`` fixture: +``record_property`` fixture: .. code-block:: python - def test_function(record_xml_property): - record_xml_property("example_key", 1) - assert 0 + def test_function(record_property): + record_property("example_key", 1) + assert True This will add an extra property ``example_key="1"`` to the generated ``testcase`` tag: @@ -211,17 +266,106 @@ This will add an extra property ``example_key="1"`` to the generated +Alternatively, you can integrate this functionality with custom markers: + +.. code-block:: python + + # content of conftest.py + + def pytest_collection_modifyitems(session, config, items): + for item in items: + for marker in item.iter_markers(): + if marker.name == 'test_id': + test_id = marker.args[0] + item.user_properties.append(('test_id', test_id)) + +And in your tests: + +.. code-block:: python + + # content of test_function.py + import pytest + @pytest.mark.test_id(1501) + def test_function(): + assert True + +Will result in: + +.. code-block:: xml + + + + + + + .. warning:: - ``record_xml_property`` is an experimental feature, and its interface might be replaced - by something more powerful and general in future versions. The - functionality per-se will be kept, however. - - Currently it does not work when used with the ``pytest-xdist`` plugin. + ``record_property`` is an experimental feature and may change in the future. Also please note that using this feature will break any schema verification. This might be a problem when used with some CI servers. +record_xml_attribute +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. versionadded:: 3.4 + +To add an additional xml attribute to a testcase element, you can use +``record_xml_attribute`` fixture. This can also be used to override existing values: + +.. code-block:: python + + def test_function(record_xml_attribute): + record_xml_attribute("assertions", "REQ-1234") + record_xml_attribute("classname", "custom_classname") + print('hello world') + assert True + +Unlike ``record_property``, this will not add a new child element. +Instead, this will add an attribute ``assertions="REQ-1234"`` inside the generated +``testcase`` tag and override the default ``classname`` with ``"classname=custom_classname"``: + +.. code-block:: xml + + + + hello world + + + +.. warning:: + + ``record_xml_attribute`` is an experimental feature, and its interface might be replaced + by something more powerful and general in future versions. The + functionality per-se will be kept, however. + + Using this over ``record_xml_property`` can help when using ci tools to parse the xml report. + However, some parsers are quite strict about the elements and attributes that are allowed. + Many tools use an xsd schema (like the example below) to validate incoming xml. + Make sure you are using attribute names that are allowed by your parser. + + Below is the Scheme used by Jenkins to validate the XML report: + + .. code-block:: xml + + + + + + + + + + + + + + + + + + LogXML: add_global_property ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -243,7 +387,7 @@ to all testcases you can use ``LogXML.add_global_properties`` my_junit.add_global_property('ARCH', 'PPC') my_junit.add_global_property('STORAGE_TYPE', 'CEPH') - @pytest.mark.usefixtures(log_global_env_facts) + @pytest.mark.usefixtures(log_global_env_facts.__name__) def start_and_prepare_env(): pass @@ -276,6 +420,13 @@ Creating resultlog format files This option is rarely used and is scheduled for removal in 4.0. + An alternative for users which still need similar functionality is to use the + `pytest-tap `_ plugin which provides + a stream of test data. + + If you have any concerns, please don't hesitate to + `open an issue `_. + To create plain-text machine-readable result files you can issue:: pytest --resultlog=path @@ -345,7 +496,17 @@ Running it will show that ``MyPlugin`` was added and its hook was invoked:: $ python myinvoke.py - *** test run reporting finishing + . [100%]*** test run reporting finishing +.. note:: + + Calling ``pytest.main()`` will result in importing your tests and any modules + that they import. Due to the caching mechanism of python's import system, + making subsequent calls to ``pytest.main()`` from the same process will not + reflect changes to those files between the calls. For this reason, making + multiple calls to ``pytest.main()`` from the same process (in order to re-run + tests, for example) is not recommended. + + .. include:: links.inc diff --git a/doc/en/warnings.rst b/doc/en/warnings.rst index c807167ef..f7b67f5f2 100644 --- a/doc/en/warnings.rst +++ b/doc/en/warnings.rst @@ -21,20 +21,20 @@ and displays them at the end of the session:: Running pytest now produces this output:: $ pytest test_show_warnings.py - ======= test session starts ======== + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: - collected 1 items + collected 1 item - test_show_warnings.py . + test_show_warnings.py . [100%] - ======= warnings summary ======== + ============================= warnings summary ============================= test_show_warnings.py::test_one $REGENDOC_TMPDIR/test_show_warnings.py:4: UserWarning: api v1, should use functions from v2 warnings.warn(UserWarning("api v1, should use functions from v2")) -- Docs: http://doc.pytest.org/en/latest/warnings.html - ======= 1 passed, 1 warnings in 0.12 seconds ======== + =================== 1 passed, 1 warnings in 0.12 seconds =================== Pytest by default catches all warnings except for ``DeprecationWarning`` and ``PendingDeprecationWarning``. @@ -42,9 +42,9 @@ The ``-W`` flag can be passed to control which warnings will be displayed or eve them into errors:: $ pytest -q test_show_warnings.py -W error::UserWarning - F - ======= FAILURES ======== - _______ test_one ________ + F [100%] + ================================= FAILURES ================================= + _________________________________ test_one _________________________________ def test_one(): > assert api_v1() == 1 @@ -78,6 +78,49 @@ Both ``-W`` command-line option and ``filterwarnings`` ini option are based on P `-W option`_ and `warnings.simplefilter`_, so please refer to those sections in the Python documentation for other examples and advanced usage. + +.. _`filterwarnings`: + +``@pytest.mark.filterwarnings`` +------------------------------- + +.. versionadded:: 3.2 + +You can use the ``@pytest.mark.filterwarnings`` to add warning filters to specific test items, +allowing you to have finer control of which warnings should be captured at test, class or +even module level: + +.. code-block:: python + + import warnings + + def api_v1(): + warnings.warn(UserWarning("api v1, should use functions from v2")) + return 1 + + @pytest.mark.filterwarnings('ignore:api v1') + def test_one(): + assert api_v1() == 1 + + +Filters applied using a mark take precedence over filters passed on the command line or configured +by the ``filterwarnings`` ini option. + +You may apply a filter to all tests of a class by using the ``filterwarnings`` mark as a class +decorator or to all tests in a module by setting the ``pytestmark`` variable: + +.. code-block:: python + + # turns all warnings into errors for this module + pytestmark = pytest.mark.filterwarnings('error') + + +.. note:: + + Except for these features, pytest does not change the python warning filter; it only captures + and displays the warnings which are issued with respect to the currently configured filter, + including changes to the filter made by test functions or by the system under test. + .. note:: ``DeprecationWarning`` and ``PendingDeprecationWarning`` are hidden by the standard library @@ -134,7 +177,20 @@ which works in a similar manner to :ref:`raises `:: with pytest.warns(UserWarning): warnings.warn("my warning", UserWarning) -The test will fail if the warning in question is not raised. +The test will fail if the warning in question is not raised. The keyword +argument ``match`` to assert that the exception matches a text or regex:: + + >>> with warns(UserWarning, match='must be 0 or None'): + ... warnings.warn("value must be 0 or None", UserWarning) + + >>> with warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("value must be 42", UserWarning) + + >>> with warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("this is not here", UserWarning) + Traceback (most recent call last): + ... + Failed: DID NOT WARN. No warnings of type ...UserWarning... was emitted... You can also call ``pytest.warns`` on a function or code string:: @@ -197,23 +253,11 @@ The ``recwarn`` fixture will record warnings for the whole function:: Both ``recwarn`` and ``pytest.warns`` return the same interface for recorded warnings: a WarningsRecorder instance. To view the recorded warnings, you can iterate over this instance, call ``len`` on it to get the number of recorded -warnings, or index into it to get a particular recorded warning. It also -provides these methods: +warnings, or index into it to get a particular recorded warning. -.. autoclass:: _pytest.recwarn.WarningsRecorder() - :members: +.. currentmodule:: _pytest.warnings -Each recorded warning has the attributes ``message``, ``category``, -``filename``, ``lineno``, ``file``, and ``line``. The ``category`` is the -class of the warning. The ``message`` is the warning itself; calling -``str(message)`` will return the actual message of the warning. - -.. note:: - :class:`RecordedWarning` was changed from a plain class to a namedtuple in pytest 3.1 - -.. note:: - ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated - differently; see :ref:`ensuring_function_triggers`. +Full API: :class:`WarningsRecorder`. .. _`ensuring a function triggers a deprecation warning`: diff --git a/doc/en/writing_plugins.rst b/doc/en/writing_plugins.rst index 9f5190c3e..7da09dbbb 100644 --- a/doc/en/writing_plugins.rst +++ b/doc/en/writing_plugins.rst @@ -12,9 +12,9 @@ only want to use but not write plugins. A plugin contains one or multiple hook functions. :ref:`Writing hooks ` explains the basics and details of how you can write a hook function yourself. ``pytest`` implements all aspects of configuration, collection, running and -reporting by calling `well specified hooks`_ of the following plugins: +reporting by calling :ref:`well specified hooks ` of the following plugins: -* :ref:`builtin plugins`: loaded from pytest's internal ``_pytest`` directory. +* builtin plugins: loaded from pytest's internal ``_pytest`` directory. * :ref:`external plugins `: modules discovered through `setuptools entry points`_ @@ -49,7 +49,7 @@ Plugin discovery order at tool startup Note that pytest does not find ``conftest.py`` files in deeper nested sub directories at tool startup. It is usually a good idea to keep - your conftest.py file in the top level test or project root directory. + your ``conftest.py`` file in the top level test or project root directory. * by recursively loading all plugins specified by the ``pytest_plugins`` variable in ``conftest.py`` files @@ -57,9 +57,7 @@ Plugin discovery order at tool startup .. _`pytest/plugin`: http://bitbucket.org/pytest-dev/pytest/src/tip/pytest/plugin/ .. _`conftest.py plugins`: -.. _`conftest.py`: .. _`localplugin`: -.. _`conftest`: .. _`local conftest plugins`: conftest.py: local per-directory plugins @@ -87,17 +85,19 @@ sub directory but not for other directories:: Here is how you might run it:: - pytest test_flat.py # will not show "setting up" - pytest a/test_sub.py # will show "setting up" +     pytest test_flat.py --capture=no # will not show "setting up" + pytest a/test_sub.py --capture=no # will show "setting up" -.. Note:: +.. note:: If you have ``conftest.py`` files which do not reside in a python package directory (i.e. one containing an ``__init__.py``) then "import conftest" can be ambiguous because there might be other - ``conftest.py`` files as well on your PYTHONPATH or ``sys.path``. + ``conftest.py`` files as well on your ``PYTHONPATH`` or ``sys.path``. It is thus good practice for projects to either put ``conftest.py`` under a package scope or to never import anything from a - conftest.py file. + ``conftest.py`` file. + + See also: :ref:`pythonpath`. Writing your own plugin @@ -109,10 +109,10 @@ If you want to write a plugin, there are many real-life examples you can copy from: * a custom collection example plugin: :ref:`yaml plugin` -* around 20 :ref:`builtin plugins` which provide pytest's own functionality +* builtin plugins which provide pytest's own functionality * many `external plugins `_ providing additional features -All of these plugins implement the documented `well specified hooks`_ +All of these plugins implement :ref:`hooks ` and/or :ref:`fixtures ` to extend and add functionality. .. note:: @@ -122,8 +122,8 @@ to extend and add functionality. for authoring plugins. The template provides an excellent starting point with a working plugin, - tests running with tox, comprehensive README and - entry-pointy already pre-configured. + tests running with tox, a comprehensive README file as well as a + pre-configured entry-point. Also consider :ref:`contributing your plugin to pytest-dev` once it has some happy users other than yourself. @@ -167,7 +167,7 @@ it in your setuptools-invocation: If a package is installed this way, ``pytest`` will load ``myproject.pluginmodule`` as a plugin which can define -`well specified hooks`_. +:ref:`hooks `. .. note:: @@ -176,6 +176,8 @@ If a package is installed this way, ``pytest`` will load to make it easy for users to find your plugin. +.. _assertion-rewriting: + Assertion Rewriting ------------------- @@ -184,18 +186,19 @@ statements and the detailed introspection of expressions upon assertion failures. This is provided by "assertion rewriting" which modifies the parsed AST before it gets compiled to bytecode. This is done via a :pep:`302` import hook which gets installed early on when -``pytest`` starts up and will perform this re-writing when modules get +``pytest`` starts up and will perform this rewriting when modules get imported. However since we do not want to test different bytecode -then you will run in production this hook only re-writes test modules +then you will run in production this hook only rewrites test modules themselves as well as any modules which are part of plugins. Any -other imported module will not be re-written and normal assertion +other imported module will not be rewritten and normal assertion behaviour will happen. If you have assertion helpers in other modules where you would need assertion rewriting to be enabled you need to ask ``pytest`` -explicitly to re-write this module before it gets imported. +explicitly to rewrite this module before it gets imported. .. autofunction:: pytest.register_assert_rewrite + :noindex: This is especially important when you write a pytest plugin which is created using a package. The import hook only treats ``conftest.py`` @@ -216,10 +219,10 @@ With the following typical ``setup.py`` extract: ... ) -In this case only ``pytest_foo/plugin.py`` will be re-written. If the +In this case only ``pytest_foo/plugin.py`` will be rewritten. If the helper module also contains assert statements which need to be -re-written it needs to be marked as such, before it gets imported. -This is easiest by marking it for re-writing inside the +rewritten it needs to be marked as such, before it gets imported. +This is easiest by marking it for rewriting inside the ``__init__.py`` module, which will always be imported first when a module inside a package is imported. This way ``plugin.py`` can still import ``helper.py`` normally. The contents of @@ -254,6 +257,18 @@ application modules: if ``myapp.testsupport.myplugin`` also declares ``pytest_plugins``, the contents of the variable will also be loaded as plugins, and so on. +.. _`requiring plugins in non-root conftests`: + +.. note:: + Requiring plugins using a ``pytest_plugins`` variable in non-root + ``conftest.py`` files is deprecated. + + This is important because ``conftest.py`` files implement per-directory + hook implementations, but once a plugin is imported, it will affect the + entire directory tree. In order to avoid confusion, defining + ``pytest_plugins`` in any ``conftest.py`` file which is not located in the + tests root directory is deprecated, and will raise a warning. + This mechanism makes it easy to share fixtures within applications or even external applications without the need to create external plugins using the ``setuptools``'s entry point technique. @@ -263,7 +278,7 @@ for assertion rewriting (see :func:`pytest.register_assert_rewrite`). However for this to have any effect the module must not be imported already; if it was already imported at the time the ``pytest_plugins`` statement is processed, a warning will result and -assertions inside the plugin will not be re-written. To fix this you +assertions inside the plugin will not be rewritten. To fix this you can either call :func:`pytest.register_assert_rewrite` yourself before the module is imported, or you can arrange the code to delay the importing until after the plugin is registered. @@ -278,7 +293,7 @@ the plugin manager like this: .. sourcecode:: python - plugin = config.pluginmanager.getplugin("name_of_plugin") + plugin = config.pluginmanager.get_plugin("name_of_plugin") If you want to look at the names of existing plugins, use the ``--trace-config`` option. @@ -286,34 +301,101 @@ the ``--trace-config`` option. Testing plugins --------------- -pytest comes with some facilities that you can enable for testing your -plugin. Given that you have an installed plugin you can enable the -:py:class:`testdir <_pytest.pytester.Testdir>` fixture via specifying a -command line option to include the pytester plugin (``-p pytester``) or -by putting ``pytest_plugins = "pytester"`` into your test or -``conftest.py`` file. You then will have a ``testdir`` fixture which you -can use like this:: +pytest comes with a plugin named ``pytester`` that helps you write tests for +your plugin code. The plugin is disabled by default, so you will have to enable +it before you can use it. - # content of test_myplugin.py +You can do so by adding the following line to a ``conftest.py`` file in your +testing directory: - pytest_plugins = "pytester" # to get testdir fixture +.. code-block:: python - def test_myplugin(testdir): + # content of conftest.py + + pytest_plugins = ["pytester"] + +Alternatively you can invoke pytest with the ``-p pytester`` command line +option. + +This will allow you to use the :py:class:`testdir <_pytest.pytester.Testdir>` +fixture for testing your plugin code. + +Let's demonstrate what you can do with the plugin with an example. Imagine we +developed a plugin that provides a fixture ``hello`` which yields a function +and we can invoke this function with one optional parameter. It will return a +string value of ``Hello World!`` if we do not supply a value or ``Hello +{value}!`` if we do supply a string value. + +.. code-block:: python + + # -*- coding: utf-8 -*- + + import pytest + + def pytest_addoption(parser): + group = parser.getgroup('helloworld') + group.addoption( + '--name', + action='store', + dest='name', + default='World', + help='Default "name" for hello().' + ) + + @pytest.fixture + def hello(request): + name = request.config.getoption('name') + + def _hello(name=None): + if not name: + name = request.config.getoption('name') + return "Hello {name}!".format(name=name) + + return _hello + + +Now the ``testdir`` fixture provides a convenient API for creating temporary +``conftest.py`` files and test files. It also allows us to run the tests and +return a result object, with which we can assert the tests' outcomes. + +.. code-block:: python + + def test_hello(testdir): + """Make sure that our plugin works.""" + + # create a temporary conftest.py file + testdir.makeconftest(""" + import pytest + + @pytest.fixture(params=[ + "Brianna", + "Andreas", + "Floris", + ]) + def name(request): + return request.param + """) + + # create a temporary pytest test file testdir.makepyfile(""" - def test_example(): - pass - """) - result = testdir.runpytest("--verbose") - result.stdout.fnmatch_lines(""" - test_example* + def test_hello_default(hello): + assert hello() == "Hello World!" + + def test_hello_name(hello, name): + assert hello(name) == "Hello {0}!".format(name) """) -Note that by default ``testdir.runpytest()`` will perform a pytest -in-process. You can pass the command line option ``--runpytest=subprocess`` -to have it happen in a subprocess. + # run all tests with pytest + result = testdir.runpytest() + + # check that all 4 tests passed + result.assert_outcomes(passed=4) + + +For more information about the result object that ``runpytest()`` returns, and +the methods that it provides please check out the :py:class:`RunResult +<_pytest.pytester.RunResult>` documentation. -Also see the :py:class:`RunResult <_pytest.pytester.RunResult>` for more -methods of the result object that you get from a call to ``runpytest``. .. _`writinghooks`: @@ -385,7 +467,7 @@ hook wrappers and passes the same arguments as to the regular hooks. At the yield point of the hook wrapper pytest will execute the next hook implementations and return their result to the yield point in the form of -a :py:class:`CallOutcome <_pytest.vendored_packages.pluggy._CallOutcome>` instance which encapsulates a result or +a :py:class:`Result ` instance which encapsulates a result or exception info. The yield point itself will thus typically not raise exceptions (unless there are bugs). @@ -395,19 +477,24 @@ Here is an example definition of a hook wrapper:: @pytest.hookimpl(hookwrapper=True) def pytest_pyfunc_call(pyfuncitem): - # do whatever you want before the next hook executes + do_something_before_next_hook_executes() outcome = yield # outcome.excinfo may be None or a (cls, val, tb) tuple res = outcome.get_result() # will raise if outcome was exception - # postprocess result + + post_process_result(res) + + outcome.force_result(new_res) # to override the return value to the plugin system Note that hook wrappers don't return results themselves, they merely perform tracing or other side effects around the actual hook implementations. If the result of the underlying hook is a mutable object, they may modify that result but it's probably better to avoid it. +For more information, consult the `pluggy documentation `_. + Hook function ordering / call example ------------------------------------- @@ -450,7 +537,7 @@ Here is the order of execution: Plugin1). 4. Plugin3's pytest_collection_modifyitems then executing the code after the yield - point. The yield receives a :py:class:`CallOutcome <_pytest.vendored_packages.pluggy._CallOutcome>` instance which encapsulates + point. The yield receives a :py:class:`Result ` instance which encapsulates the result from calling the non-wrappers. Wrappers shall not modify the result. It's possible to use ``tryfirst`` and ``trylast`` also in conjunction with @@ -468,6 +555,7 @@ implemented by other plugins in order to alter behaviour or interact with the new plugin: .. autofunction:: pytest_addhooks + :noindex: Hooks are usually declared as do-nothing functions that contain only documentation describing when the hook will be called and what return values @@ -505,167 +593,7 @@ declaring the hook functions directly in your plugin module, for example:: This has the added benefit of allowing you to conditionally install hooks depending on which plugins are installed. -.. _`well specified hooks`: - -.. currentmodule:: _pytest.hookspec - -pytest hook reference -===================== -Initialization, command line and configuration hooks ----------------------------------------------------- - -.. autofunction:: pytest_load_initial_conftests -.. autofunction:: pytest_cmdline_preparse -.. autofunction:: pytest_cmdline_parse -.. autofunction:: pytest_addoption -.. autofunction:: pytest_cmdline_main -.. autofunction:: pytest_configure -.. autofunction:: pytest_unconfigure - -Generic "runtest" hooks ------------------------ - -All runtest related hooks receive a :py:class:`pytest.Item <_pytest.main.Item>` object. - -.. autofunction:: pytest_runtest_protocol -.. autofunction:: pytest_runtest_setup -.. autofunction:: pytest_runtest_call -.. autofunction:: pytest_runtest_teardown -.. autofunction:: pytest_runtest_makereport - -For deeper understanding you may look at the default implementation of -these hooks in :py:mod:`_pytest.runner` and maybe also -in :py:mod:`_pytest.pdb` which interacts with :py:mod:`_pytest.capture` -and its input/output capturing in order to immediately drop -into interactive debugging when a test failure occurs. - -The :py:mod:`_pytest.terminal` reported specifically uses -the reporting hook to print information about a test run. - -Collection hooks ----------------- - -``pytest`` calls the following hooks for collecting files and directories: - -.. autofunction:: pytest_ignore_collect -.. autofunction:: pytest_collect_directory -.. autofunction:: pytest_collect_file - -For influencing the collection of objects in Python modules -you can use the following hook: - -.. autofunction:: pytest_pycollect_makeitem -.. autofunction:: pytest_generate_tests -.. autofunction:: pytest_make_parametrize_id - -After collection is complete, you can modify the order of -items, delete or otherwise amend the test items: - -.. autofunction:: pytest_collection_modifyitems - -Reporting hooks ---------------- - -Session related reporting hooks: - -.. autofunction:: pytest_collectstart -.. autofunction:: pytest_itemcollected -.. autofunction:: pytest_collectreport -.. autofunction:: pytest_deselected -.. autofunction:: pytest_report_header -.. autofunction:: pytest_report_teststatus -.. autofunction:: pytest_terminal_summary -.. autofunction:: pytest_fixture_setup -.. autofunction:: pytest_fixture_post_finalizer - -And here is the central hook for reporting about -test execution: - -.. autofunction:: pytest_runtest_logreport - -You can also use this hook to customize assertion representation for some -types: - -.. autofunction:: pytest_assertrepr_compare -Debugging/Interaction hooks ---------------------------- - -There are few hooks which can be used for special -reporting or interaction with exceptions: - -.. autofunction:: pytest_internalerror -.. autofunction:: pytest_keyboard_interrupt -.. autofunction:: pytest_exception_interact -.. autofunction:: pytest_enter_pdb - - -Reference of objects involved in hooks -====================================== - -.. autoclass:: _pytest.config.Config() - :members: - -.. autoclass:: _pytest.config.Parser() - :members: - -.. autoclass:: _pytest.main.Node() - :members: - -.. autoclass:: _pytest.main.Collector() - :members: - :show-inheritance: - -.. autoclass:: _pytest.main.Item() - :members: - :show-inheritance: - -.. autoclass:: _pytest.python.Module() - :members: - :show-inheritance: - -.. autoclass:: _pytest.python.Class() - :members: - :show-inheritance: - -.. autoclass:: _pytest.python.Function() - :members: - :show-inheritance: - -.. autoclass:: _pytest.fixtures.FixtureDef() - :members: - :show-inheritance: - -.. autoclass:: _pytest.runner.CallInfo() - :members: - -.. autoclass:: _pytest.runner.TestReport() - :members: - :inherited-members: - -.. autoclass:: _pytest.vendored_packages.pluggy._CallOutcome() - :members: - -.. autofunction:: _pytest.config.get_plugin_manager() - -.. autoclass:: _pytest.config.PytestPluginManager() - :members: - :undoc-members: - :show-inheritance: - -.. autoclass:: _pytest.vendored_packages.pluggy.PluginManager() - :members: - -.. currentmodule:: _pytest.pytester - -.. autoclass:: Testdir() - :members: runpytest,runpytest_subprocess,runpytest_inprocess,makeconftest,makepyfile - -.. autoclass:: RunResult() - :members: - -.. autoclass:: LineMatcher() - :members: diff --git a/extra/get_issues.py b/extra/get_issues.py index 2a8f8c316..99378b2f5 100644 --- a/extra/get_issues.py +++ b/extra/get_issues.py @@ -1,6 +1,5 @@ import json import py -import textwrap issues_url = "https://api.github.com/repos/pytest-dev/pytest/issues" diff --git a/pytest.py b/pytest.py index da6b64910..d3aebbff9 100644 --- a/pytest.py +++ b/pytest.py @@ -7,7 +7,7 @@ pytest: unit and functional testing with Python. # else we are imported from _pytest.config import ( - main, UsageError, _preloadplugins, cmdline, + main, UsageError, cmdline, hookspec, hookimpl ) from _pytest.fixtures import fixture, yield_fixture @@ -16,10 +16,10 @@ from _pytest.freeze_support import freeze_includes from _pytest import __version__ from _pytest.debugging import pytestPDB as __pytestPDB from _pytest.recwarn import warns, deprecated_call -from _pytest.runner import fail, skip, importorskip, exit +from _pytest.outcomes import fail, skip, importorskip, exit, xfail from _pytest.mark import MARK_GEN as mark, param -from _pytest.skipping import xfail -from _pytest.main import Item, Collector, File, Session +from _pytest.main import Session +from _pytest.nodes import Item, Collector, File from _pytest.fixtures import fillfixtures as _fillfuncargs from _pytest.python import ( Module, Class, Instance, Function, Generator, @@ -75,5 +75,4 @@ if __name__ == '__main__': else: from _pytest.compat import _setup_collect_fakemodule - _preloadplugins() # to populate pytest.* namespace so help(pytest) works _setup_collect_fakemodule() diff --git a/scripts/call-tox.bat b/scripts/call-tox.bat index 3ca9eb6d7..86fb25c1d 100644 --- a/scripts/call-tox.bat +++ b/scripts/call-tox.bat @@ -5,4 +5,4 @@ if "%TOXENV%" == "coveralls" ( exit /b 0 ) ) -C:\Python35\python -m tox +C:\Python36\python -m tox diff --git a/setup.py b/setup.py index 751868c04..d6defba6b 100644 --- a/setup.py +++ b/setup.py @@ -16,43 +16,69 @@ classifiers = [ 'Topic :: Utilities', ] + [ ('Programming Language :: Python :: %s' % x) - for x in '2 2.6 2.7 3 3.3 3.4 3.5 3.6'.split() + for x in '2 2.7 3 3.4 3.5 3.6 3.7'.split() ] with open('README.rst') as fd: long_description = fd.read() -def has_environment_marker_support(): +def get_environment_marker_support_level(): """ - Tests that setuptools has support for PEP-426 environment marker support. + Tests how well setuptools supports PEP-426 environment marker. The first known release to support it is 0.7 (and the earliest on PyPI seems to be 0.7.2 - so we're using that), see: http://pythonhosted.org/setuptools/history.html#id142 + so we're using that), see: https://setuptools.readthedocs.io/en/latest/history.html#id350 + + The support is later enhanced to allow direct conditional inclusions inside install_requires, + which is now recommended by setuptools. It first appeared in 36.2.0, went broken with 36.2.1, and + again worked since 36.2.2, so we're using that. See: + https://setuptools.readthedocs.io/en/latest/history.html#v36-2-2 + https://github.com/pypa/setuptools/issues/1099 References: * https://wheel.readthedocs.io/en/latest/index.html#defining-conditional-dependencies * https://www.python.org/dev/peps/pep-0426/#environment-markers + * https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies """ try: - return pkg_resources.parse_version(setuptools.__version__) >= pkg_resources.parse_version('0.7.2') + version = pkg_resources.parse_version(setuptools.__version__) + if version >= pkg_resources.parse_version('36.2.2'): + return 2 + if version >= pkg_resources.parse_version('0.7.2'): + return 1 except Exception as exc: sys.stderr.write("Could not test setuptool's version: %s\n" % exc) - return False + return 0 def main(): - install_requires = ['py>=1.4.33', 'setuptools'] # pluggy is vendored in _pytest.vendored_packages extras_require = {} - if has_environment_marker_support(): - extras_require[':python_version=="2.6"'] = ['argparse'] + install_requires = [ + 'py>=1.5.0', + 'six>=1.10.0', + 'setuptools', + 'attrs>=17.4.0', + 'more-itertools>=4.0.0', + 'atomicwrites>=1.0', + ] + # if _PYTEST_SETUP_SKIP_PLUGGY_DEP is set, skip installing pluggy; + # used by tox.ini to test with pluggy master + if '_PYTEST_SETUP_SKIP_PLUGGY_DEP' not in os.environ: + install_requires.append('pluggy>=0.5,<0.7') + environment_marker_support_level = get_environment_marker_support_level() + if environment_marker_support_level >= 2: + install_requires.append('funcsigs;python_version<"3.0"') + install_requires.append('colorama;sys_platform=="win32"') + elif environment_marker_support_level == 1: + extras_require[':python_version<"3.0"'] = ['funcsigs'] extras_require[':sys_platform=="win32"'] = ['colorama'] else: - if sys.version_info < (2, 7): - install_requires.append('argparse') if sys.platform == 'win32': install_requires.append('colorama') + if sys.version_info < (3, 0): + install_requires.append('funcsigs') setup( name='pytest', @@ -64,17 +90,20 @@ def main(): url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], - author='Holger Krekel, Bruno Oliveira, Ronny Pfannschmidt, Floris Bruynooghe, Brianna Laugher, Florian Bruhin and others', - entry_points={'console_scripts': - ['pytest=pytest:main', 'py.test=pytest:main']}, + author=( + 'Holger Krekel, Bruno Oliveira, Ronny Pfannschmidt, ' + 'Floris Bruynooghe, Brianna Laugher, Florian Bruhin and others'), + entry_points={'console_scripts': [ + 'pytest=pytest:main', 'py.test=pytest:main']}, classifiers=classifiers, keywords="test unittest", cmdclass={'test': PyTest}, # the following should be enabled for release setup_requires=['setuptools-scm'], + python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*', install_requires=install_requires, extras_require=extras_require, - packages=['_pytest', '_pytest.assertion', '_pytest._code', '_pytest.vendored_packages'], + packages=['_pytest', '_pytest.assertion', '_pytest._code', '_pytest.mark'], py_modules=['pytest'], zip_safe=False, ) @@ -82,10 +111,13 @@ def main(): class PyTest(Command): user_options = [] + def initialize_options(self): pass + def finalize_options(self): pass + def run(self): import subprocess PPATH = [x for x in os.environ.get('PYTHONPATH', '').split(':') if x] diff --git a/tasks/__init__.py b/tasks/__init__.py index 992f4a4ad..8ea038f0a 100644 --- a/tasks/__init__.py +++ b/tasks/__init__.py @@ -4,10 +4,9 @@ Invoke tasks to help with pytest development and release process. import invoke -from . import generate, vendoring +from . import generate ns = invoke.Collection( generate, - vendoring ) diff --git a/tasks/generate.py b/tasks/generate.py index fa8ee6557..268b36fd6 100644 --- a/tasks/generate.py +++ b/tasks/generate.py @@ -1,4 +1,6 @@ -import os +""" +Invoke development tasks. +""" from pathlib import Path from subprocess import check_output, check_call @@ -57,7 +59,7 @@ def regen(ctx): @invoke.task() def make_tag(ctx, version): - """Create a new (local) tag for the release, only if the repository is clean.""" + """Create a new, local tag for the release, only if the repository is clean.""" from git import Repo repo = Repo('.') @@ -74,89 +76,31 @@ def make_tag(ctx, version): repo.create_tag(version) -@invoke.task() -def devpi_upload(ctx, version, user, password=None): - """Creates and uploads a package to devpi for testing.""" - if password: - print("[generate.devpi_upload] devpi login {}".format(user)) - check_call(['devpi', 'login', user, '--password', password]) - - check_call(['devpi', 'use', 'https://devpi.net/{}/dev'.format(user)]) - - env = os.environ.copy() - env['SETUPTOOLS_SCM_PRETEND_VERSION'] = version - check_call(['devpi', 'upload', '--formats', 'sdist,bdist_wheel'], env=env) - print("[generate.devpi_upload] package uploaded") - - @invoke.task(help={ 'version': 'version being released', - 'user': 'name of the user on devpi to stage the generated package', - 'password': 'user password on devpi to stage the generated package ' - '(if not given assumed logged in)', }) -def pre_release(ctx, version, user, password=None): - """Generates new docs, release announcements and uploads a new release to devpi for testing.""" +def pre_release(ctx, version): + """Generates new docs, release announcements and creates a local tag.""" announce(ctx, version) regen(ctx) changelog(ctx, version, write_out=True) msg = 'Preparing release version {}'.format(version) check_call(['git', 'commit', '-a', '-m', msg]) - + make_tag(ctx, version) - devpi_upload(ctx, version=version, user=user, password=password) - print() print('[generate.pre_release] Please push your branch and open a PR.') @invoke.task(help={ 'version': 'version being released', - 'user': 'name of the user on devpi to stage the generated package', - 'pypi_name': 'name of the pypi configuration section in your ~/.pypirc', -}) -def publish_release(ctx, version, user, pypi_name): - """Publishes a package previously created by the 'pre_release' command.""" - from git import Repo - repo = Repo('.') - tag_names = [x.name for x in repo.tags] - if version not in tag_names: - print('Could not find tag for version {}, exiting...'.format(version)) - raise invoke.Exit(code=2) - - check_call(['devpi', 'use', 'https://devpi.net/{}/dev'.format(user)]) - check_call(['devpi', 'push', 'pytest=={}'.format(version), 'pypi:{}'.format(pypi_name)]) - check_call(['git', 'push', 'git@github.com:pytest-dev/pytest.git', version]) - - emails = [ - 'pytest-dev@python.org', - 'python-announce-list@python.org' - ] - if version.endswith('.0'): - emails.append('testing-in-python@lists.idyll.org') - print('Version {} has been published to PyPI!'.format(version)) - print() - print('Please send an email announcement with the contents from:') - print() - print(' doc/en/announce/release-{}.rst'.format(version)) - print() - print('To the following mail lists:') - print() - print(' ', ','.join(emails)) - print() - print('And announce it on twitter adding the #pytest hash tag.') - - -@invoke.task(help={ - 'version': 'version being released', - 'write_out': 'write changes to the actial changelog' + 'write_out': 'write changes to the actual changelog' }) def changelog(ctx, version, write_out=False): if write_out: addopts = [] else: addopts = ['--draft'] - check_call(['towncrier', '--version', version] + addopts) - + check_call(['towncrier', '--yes', '--version', version] + addopts) diff --git a/tasks/requirements.txt b/tasks/requirements.txt index eb12df3e9..7f41521e6 100644 --- a/tasks/requirements.txt +++ b/tasks/requirements.txt @@ -1,4 +1,4 @@ -invoke -tox gitpython -towncrier \ No newline at end of file +invoke +towncrier +tox diff --git a/tasks/vendoring.py b/tasks/vendoring.py deleted file mode 100644 index 867f2946b..000000000 --- a/tasks/vendoring.py +++ /dev/null @@ -1,23 +0,0 @@ -from __future__ import absolute_import, print_function -import py -import invoke - -VENDOR_TARGET = py.path.local("_pytest/vendored_packages") -GOOD_FILES = 'README.md', '__init__.py' - -@invoke.task() -def remove_libs(ctx): - print("removing vendored libs") - for path in VENDOR_TARGET.listdir(): - if path.basename not in GOOD_FILES: - print(" ", path) - path.remove() - -@invoke.task(pre=[remove_libs]) -def update_libs(ctx): - print("installing libs") - ctx.run("pip install -t {target} pluggy".format(target=VENDOR_TARGET)) - ctx.run("git add {target}".format(target=VENDOR_TARGET)) - print("Please commit to finish the update after running the tests:") - print() - print(' git commit -am "Updated vendored libs"') diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 9cb2650de..89a44911f 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -2,6 +2,9 @@ from __future__ import absolute_import, division, print_function import os import sys +import types + +import six import _pytest._code import py @@ -74,14 +77,13 @@ class TestGeneralUsage(object): print("---unconfigure") """) result = testdir.runpytest("-s", "asd") - assert result.ret == 4 # EXIT_USAGEERROR + assert result.ret == 4 # EXIT_USAGEERROR result.stderr.fnmatch_lines(["ERROR: file not found*asd"]) result.stdout.fnmatch_lines([ "*---configure", "*---unconfigure", ]) - def test_config_preparse_plugin_option(self, testdir): testdir.makepyfile(pytest_xyz=""" def pytest_addoption(parser): @@ -119,7 +121,7 @@ class TestGeneralUsage(object): testdir.makepyfile(import_fails="import does_not_work") result = testdir.runpytest(p) result.stdout.fnmatch_lines([ - #XXX on jython this fails: "> import import_fails", + # XXX on jython this fails: "> import import_fails", "ImportError while importing test module*", "*No module named *does_not_work*", ]) @@ -131,7 +133,7 @@ class TestGeneralUsage(object): result = testdir.runpytest(p1, p2) assert result.ret result.stderr.fnmatch_lines([ - "*ERROR: not found:*%s" %(p2.basename,) + "*ERROR: not found:*%s" % (p2.basename,) ]) def test_issue486_better_reporting_on_conftest_load_failure(self, testdir): @@ -147,7 +149,6 @@ class TestGeneralUsage(object): *ERROR*could not load*conftest.py* """) - def test_early_skip(self, testdir): testdir.mkdir("xyz") testdir.makeconftest(""" @@ -217,8 +218,8 @@ class TestGeneralUsage(object): assert not result.ret def test_issue109_sibling_conftests_not_loaded(self, testdir): - sub1 = testdir.tmpdir.mkdir("sub1") - sub2 = testdir.tmpdir.mkdir("sub2") + sub1 = testdir.mkdir("sub1") + sub2 = testdir.mkdir("sub2") sub1.join("conftest.py").write("assert 0") result = testdir.runpytest(sub2) assert result.ret == EXIT_NOTESTSCOLLECTED @@ -255,7 +256,7 @@ class TestGeneralUsage(object): if path.basename.startswith("conftest"): return MyCollector(path, parent) """) - result = testdir.runpytest(c.basename+"::"+"xyz") + result = testdir.runpytest(c.basename + "::" + "xyz") assert result.ret == 0 result.stdout.fnmatch_lines([ "*1 pass*", @@ -310,7 +311,7 @@ class TestGeneralUsage(object): x """) result = testdir.runpytest() - assert result.ret == 3 # internal error + assert result.ret == 3 # internal error result.stderr.fnmatch_lines([ "INTERNAL*pytest_configure*", "INTERNAL*x*", @@ -346,7 +347,7 @@ class TestGeneralUsage(object): Importing a module that didn't exist, even if the ImportError was gracefully handled, would make our test crash. - Use recwarn here to silence this warning in Python 2.6 and 2.7: + Use recwarn here to silence this warning in Python 2.7: ImportWarning: Not importing directory '...\not_a_package': missing __init__.py """ testdir.mkdir('not_a_package') @@ -398,11 +399,11 @@ class TestGeneralUsage(object): p = tmpdir.join('test_test_plugins_given_as_strings.py') p.write('def test_foo(): pass') - mod = py.std.types.ModuleType("myplugin") + mod = types.ModuleType("myplugin") monkeypatch.setitem(sys.modules, 'myplugin', mod) assert pytest.main(args=[str(tmpdir)], plugins=['myplugin']) == 0 - def test_parameterized_with_bytes_regex(self, testdir): + def test_parametrized_with_bytes_regex(self, testdir): p = testdir.makepyfile(""" import re import pytest @@ -410,12 +411,25 @@ class TestGeneralUsage(object): def test_stuff(r): pass """ - ) + ) res = testdir.runpytest(p) res.stdout.fnmatch_lines([ '*1 passed*' ]) + def test_parametrized_with_null_bytes(self, testdir): + """Test parametrization with values that contain null bytes and unicode characters (#2644, #2957)""" + p = testdir.makepyfile(u""" + # encoding: UTF-8 + import pytest + + @pytest.mark.parametrize("data", [b"\\x00", "\\x00", u'ação']) + def test_foo(data): + assert data + """) + res = testdir.runpytest(p) + res.assert_outcomes(passed=3) + class TestInvocationVariants(object): def test_earlyinit(self, testdir): @@ -440,8 +454,8 @@ class TestInvocationVariants(object): #collect #cmdline #Item - #assert collect.Item is Item - #assert collect.Collector is Collector + # assert collect.Item is Item + # assert collect.Collector is Collector main skip xfail @@ -479,17 +493,17 @@ class TestInvocationVariants(object): def test_python_minus_m_invocation_ok(self, testdir): p1 = testdir.makepyfile("def test_hello(): pass") - res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1)) + res = testdir.run(sys.executable, "-m", "pytest", str(p1)) assert res.ret == 0 def test_python_minus_m_invocation_fail(self, testdir): p1 = testdir.makepyfile("def test_fail(): 0/0") - res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1)) + res = testdir.run(sys.executable, "-m", "pytest", str(p1)) assert res.ret == 1 def test_python_pytest_package(self, testdir): p1 = testdir.makepyfile("def test_pass(): pass") - res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1)) + res = testdir.run(sys.executable, "-m", "pytest", str(p1)) assert res.ret == 0 res.stdout.fnmatch_lines(["*1 passed*"]) @@ -522,7 +536,7 @@ class TestInvocationVariants(object): path = testdir.mkpydir("tpkg") path.join("test_hello.py").write('raise ImportError') - result = testdir.runpytest_subprocess("--pyargs", "tpkg.test_hello") + result = testdir.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True) assert result.ret != 0 result.stdout.fnmatch_lines([ @@ -540,14 +554,14 @@ class TestInvocationVariants(object): result.stdout.fnmatch_lines([ "*2 passed*" ]) - result = testdir.runpytest("--pyargs", "tpkg.test_hello") + result = testdir.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True) assert result.ret == 0 result.stdout.fnmatch_lines([ "*1 passed*" ]) def join_pythonpath(what): - cur = py.std.os.environ.get('PYTHONPATH') + cur = os.environ.get('PYTHONPATH') if cur: return str(what) + os.pathsep + cur return what @@ -564,7 +578,7 @@ class TestInvocationVariants(object): ]) monkeypatch.setenv('PYTHONPATH', join_pythonpath(testdir)) - result = testdir.runpytest("--pyargs", "tpkg.test_missing") + result = testdir.runpytest("--pyargs", "tpkg.test_missing", syspathinsert=True) assert result.ret != 0 result.stderr.fnmatch_lines([ "*not*found*test_missing*", @@ -592,11 +606,11 @@ class TestInvocationVariants(object): # The structure of the test directory is now: # . # ├── hello - # │   └── ns_pkg - # │   ├── __init__.py - # │   └── hello - # │   ├── __init__.py - # │   └── test_hello.py + # │ └── ns_pkg + # │ ├── __init__.py + # │ └── hello + # │ ├── __init__.py + # │ └── test_hello.py # └── world # └── ns_pkg # ├── __init__.py @@ -605,7 +619,7 @@ class TestInvocationVariants(object): # └── test_world.py def join_pythonpath(*dirs): - cur = py.std.os.environ.get('PYTHONPATH') + cur = os.environ.get('PYTHONPATH') if cur: dirs += (cur,) return os.pathsep.join(str(p) for p in dirs) @@ -614,24 +628,96 @@ class TestInvocationVariants(object): monkeypatch.syspath_prepend(p) # mixed module and filenames: - result = testdir.runpytest("--pyargs", "-v", "ns_pkg.hello", "world/ns_pkg") + os.chdir('world') + result = testdir.runpytest("--pyargs", "-v", "ns_pkg.hello", "ns_pkg/world") assert result.ret == 0 result.stdout.fnmatch_lines([ - "*test_hello.py::test_hello*PASSED", - "*test_hello.py::test_other*PASSED", - "*test_world.py::test_world*PASSED", - "*test_world.py::test_other*PASSED", + "*test_hello.py::test_hello*PASSED*", + "*test_hello.py::test_other*PASSED*", + "*test_world.py::test_world*PASSED*", + "*test_world.py::test_other*PASSED*", "*4 passed*" ]) # specify tests within a module + testdir.chdir() result = testdir.runpytest("--pyargs", "-v", "ns_pkg.world.test_world::test_other") assert result.ret == 0 result.stdout.fnmatch_lines([ - "*test_world.py::test_other*PASSED", + "*test_world.py::test_other*PASSED*", "*1 passed*" ]) + @pytest.mark.skipif(not hasattr(os, "symlink"), reason="requires symlinks") + def test_cmdline_python_package_symlink(self, testdir, monkeypatch): + """ + test --pyargs option with packages with path containing symlink can + have conftest.py in their package (#2985) + """ + # dummy check that we can actually create symlinks: on Windows `os.symlink` is available, + # but normal users require special admin privileges to create symlinks. + if sys.platform == 'win32': + try: + os.symlink(str(testdir.tmpdir.ensure('tmpfile')), str(testdir.tmpdir.join('tmpfile2'))) + except OSError as e: + pytest.skip(six.text_type(e.args[0])) + monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', raising=False) + + search_path = ["lib", os.path.join("local", "lib")] + + dirname = "lib" + d = testdir.mkdir(dirname) + foo = d.mkdir("foo") + foo.ensure("__init__.py") + lib = foo.mkdir('bar') + lib.ensure("__init__.py") + lib.join("test_bar.py"). \ + write("def test_bar(): pass\n" + "def test_other(a_fixture):pass") + lib.join("conftest.py"). \ + write("import pytest\n" + "@pytest.fixture\n" + "def a_fixture():pass") + + d_local = testdir.mkdir("local") + symlink_location = os.path.join(str(d_local), "lib") + if six.PY2: + os.symlink(str(d), symlink_location) + else: + os.symlink(str(d), symlink_location, target_is_directory=True) + + # The structure of the test directory is now: + # . + # ├── local + # │ └── lib -> ../lib + # └── lib + # └── foo + # ├── __init__.py + # └── bar + # ├── __init__.py + # ├── conftest.py + # └── test_bar.py + + def join_pythonpath(*dirs): + cur = os.getenv('PYTHONPATH') + if cur: + dirs += (cur,) + return os.pathsep.join(str(p) for p in dirs) + + monkeypatch.setenv('PYTHONPATH', join_pythonpath(*search_path)) + for p in search_path: + monkeypatch.syspath_prepend(p) + + # module picked up in symlink-ed directory: + result = testdir.runpytest("--pyargs", "-v", "foo.bar") + testdir.chdir() + assert result.ret == 0 + result.stdout.fnmatch_lines([ + "*lib/foo/bar/test_bar.py::test_bar*PASSED*", + "*lib/foo/bar/test_bar.py::test_other*PASSED*", + "*2 passed*" + ]) + def test_cmdline_python_package_not_exists(self, testdir): result = testdir.runpytest("--pyargs", "tpkgwhatv") assert result.ret @@ -676,7 +762,6 @@ class TestInvocationVariants(object): import _pytest.config assert type(_pytest.config.get_plugin_manager()) is _pytest.config.PytestPluginManager - def test_has_plugin(self, request): """Test hasplugin function of the plugin manager (#932).""" assert request.config.pluginmanager.hasplugin('python') @@ -719,12 +804,12 @@ class TestDurations(object): result = testdir.runpytest("--durations=0") assert result.ret == 0 for x in "123": - for y in 'call',: #'setup', 'call', 'teardown': + for y in 'call', : # 'setup', 'call', 'teardown': for line in result.stdout.lines: if ("test_%s" % x) in line and y in line: break else: - raise AssertionError("not found %s %s" % (x,y)) + raise AssertionError("not found %s %s" % (x, y)) def test_with_deselected(self, testdir): testdir.makepyfile(self.source) @@ -764,6 +849,7 @@ class TestDurationWithFixture(object): def test_2(): time.sleep(frag) """ + def test_setup_function(self, testdir): testdir.makepyfile(self.source) result = testdir.runpytest("--durations=10") @@ -816,7 +902,7 @@ def test_deferred_hook_checking(testdir): testdir.syspathinsert() testdir.makepyfile(**{ 'plugin.py': """ - class Hooks: + class Hooks(object): def pytest_my_hook(self, config): pass @@ -835,3 +921,70 @@ def test_deferred_hook_checking(testdir): }) result = testdir.runpytest() result.stdout.fnmatch_lines(['* 1 passed *']) + + +def test_fixture_values_leak(testdir): + """Ensure that fixture objects are properly destroyed by the garbage collector at the end of their expected + life-times (#2981). + """ + testdir.makepyfile(""" + import attr + import gc + import pytest + import weakref + + @attr.s + class SomeObj(object): + name = attr.ib() + + fix_of_test1_ref = None + session_ref = None + + @pytest.fixture(scope='session') + def session_fix(): + global session_ref + obj = SomeObj(name='session-fixture') + session_ref = weakref.ref(obj) + return obj + + @pytest.fixture + def fix(session_fix): + global fix_of_test1_ref + obj = SomeObj(name='local-fixture') + fix_of_test1_ref = weakref.ref(obj) + return obj + + def test1(fix): + assert fix_of_test1_ref() is fix + + def test2(): + gc.collect() + # fixture "fix" created during test1 must have been destroyed by now + assert fix_of_test1_ref() is None + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines(['* 2 passed *']) + + +def test_fixture_order_respects_scope(testdir): + """Ensure that fixtures are created according to scope order, regression test for #2405 + """ + testdir.makepyfile(''' + import pytest + + data = {} + + @pytest.fixture(scope='module') + def clean_data(): + data.clear() + + @pytest.fixture(autouse=True) + def add_data(): + data.update(value=True) + + @pytest.mark.usefixtures('clean_data') + def test_value(): + assert data.get('value') + ''') + result = testdir.runpytest() + assert result.ret == 0 diff --git a/testing/code/test_code.py b/testing/code/test_code.py index 479a2e7cc..209a8ef19 100644 --- a/testing/code/test_code.py +++ b/testing/code/test_code.py @@ -1,9 +1,11 @@ +# coding: utf-8 from __future__ import absolute_import, division, print_function import sys import _pytest._code import py import pytest +from test_excinfo import TWMock def test_ne(): @@ -12,6 +14,7 @@ def test_ne(): code2 = _pytest._code.Code(compile('foo = "baz"', '', 'exec')) assert code2 != code1 + def test_code_gives_back_name_for_not_existing_file(): name = 'abc-123' co_code = compile("pass\n", name, 'exec') @@ -20,6 +23,7 @@ def test_code_gives_back_name_for_not_existing_file(): assert str(code.path) == name assert code.fullsource is None + def test_code_with_class(): class A(object): pass @@ -30,11 +34,13 @@ if True: def x(): pass + def test_code_fullsource(): code = _pytest._code.Code(x) full = code.fullsource assert 'test_code_fullsource()' in str(full) + def test_code_source(): code = _pytest._code.Code(x) src = code.source() @@ -42,6 +48,7 @@ def test_code_source(): pass""" assert str(src) == expected + def test_frame_getsourcelineno_myself(): def func(): return sys._getframe(0) @@ -50,6 +57,7 @@ def test_frame_getsourcelineno_myself(): source, lineno = f.code.fullsource, f.lineno assert source[lineno].startswith(" return sys._getframe(0)") + def test_getstatement_empty_fullsource(): def func(): return sys._getframe(0) @@ -62,6 +70,7 @@ def test_getstatement_empty_fullsource(): finally: f.code.__class__.fullsource = prop + def test_code_from_func(): co = _pytest._code.Code(test_frame_getsourcelineno_myself) assert co.firstlineno @@ -92,6 +101,7 @@ def test_unicode_handling_syntax_error(): if sys.version_info[0] < 3: unicode(excinfo) + def test_code_getargs(): def f1(x): pass @@ -141,8 +151,10 @@ class TestExceptionInfo(object): def test_bad_getsource(self): try: - if False: pass - else: assert False + if False: + pass + else: + assert False except AssertionError: exci = _pytest._code.ExceptionInfo() assert exci.getrepr() @@ -152,11 +164,33 @@ class TestTracebackEntry(object): def test_getsource(self): try: - if False: pass - else: assert False + if False: + pass + else: + assert False except AssertionError: exci = _pytest._code.ExceptionInfo() entry = exci.traceback[0] source = entry.getsource() - assert len(source) == 4 - assert 'else: assert False' in source[3] + assert len(source) == 6 + assert 'assert False' in source[5] + + +class TestReprFuncArgs(object): + + def test_not_raise_exception_with_mixed_encoding(self): + from _pytest._code.code import ReprFuncArgs + + tw = TWMock() + + args = [ + ('unicode_string', u"São Paulo"), + ('utf8_string', 'S\xc3\xa3o Paulo'), + ] + + r = ReprFuncArgs(args) + r.toterminal(tw) + if sys.version_info[0] >= 3: + assert tw.lines[0] == 'unicode_string = São Paulo, utf8_string = São Paulo' + else: + assert tw.lines[0] == 'unicode_string = São Paulo, utf8_string = São Paulo' diff --git a/testing/code/test_excinfo.py b/testing/code/test_excinfo.py index 0b074d64a..6b4adf001 100644 --- a/testing/code/test_excinfo.py +++ b/testing/code/test_excinfo.py @@ -1,8 +1,9 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function -import sys import operator +import os +import sys import _pytest import py import pytest @@ -12,9 +13,6 @@ from _pytest._code.code import ( ReprExceptionInfo, ExceptionChainRepr) -queue = py.builtin._tryimport('queue', 'Queue') - -failsonjython = pytest.mark.xfail("sys.platform.startswith('java')") from test_source import astonly try: @@ -24,23 +22,32 @@ except ImportError: else: invalidate_import_caches = getattr(importlib, "invalidate_caches", None) -import pytest +queue = py.builtin._tryimport('queue', 'Queue') + +failsonjython = pytest.mark.xfail("sys.platform.startswith('java')") + pytest_version_info = tuple(map(int, pytest.__version__.split(".")[:3])) + class TWMock(object): WRITE = object() def __init__(self): self.lines = [] self.is_writing = False + def sep(self, sep, line=None): self.lines.append((sep, line)) + def write(self, msg, **kw): self.lines.append((TWMock.WRITE, msg)) + def line(self, line, **kw): self.lines.append(line) + def markup(self, text, **kw): return text + def get_write_msg(self, idx): flag, msg = self.lines[idx] assert flag == TWMock.WRITE @@ -48,6 +55,7 @@ class TWMock(object): fullwidth = 80 + def test_excinfo_simple(): try: raise ValueError @@ -55,6 +63,7 @@ def test_excinfo_simple(): info = _pytest._code.ExceptionInfo() assert info.type == ValueError + def test_excinfo_getstatement(): def g(): raise ValueError @@ -69,28 +78,35 @@ def test_excinfo_getstatement(): linenumbers = [_pytest._code.getrawcode(f).co_firstlineno - 1 + 4, _pytest._code.getrawcode(f).co_firstlineno - 1 + 1, _pytest._code.getrawcode(g).co_firstlineno - 1 + 1, ] - l = list(excinfo.traceback) - foundlinenumbers = [x.lineno for x in l] + values = list(excinfo.traceback) + foundlinenumbers = [x.lineno for x in values] assert foundlinenumbers == linenumbers - #for x in info: + # for x in info: # print "%s:%d %s" %(x.path.relto(root), x.lineno, x.statement) - #xxx + # xxx # testchain for getentries test below + + def f(): # raise ValueError # + + def g(): # __tracebackhide__ = True f() # + + def h(): # g() # + class TestTraceback_f_g_h(object): def setup_method(self, method): try: @@ -101,8 +117,8 @@ class TestTraceback_f_g_h(object): def test_traceback_entries(self): tb = self.excinfo.traceback entries = list(tb) - assert len(tb) == 4 # maybe fragile test - assert len(entries) == 4 # maybe fragile test + assert len(tb) == 4 # maybe fragile test + assert len(entries) == 4 # maybe fragile test names = ['f', 'g', 'h'] for entry in entries: try: @@ -113,7 +129,7 @@ class TestTraceback_f_g_h(object): def test_traceback_entry_getsource(self): tb = self.excinfo.traceback - s = str(tb[-1].getsource() ) + s = str(tb[-1].getsource()) assert s.startswith("def f():") assert s.endswith("raise ValueError") @@ -129,10 +145,10 @@ class TestTraceback_f_g_h(object): xyz() """) try: - exec (source.compile()) + exec(source.compile()) except NameError: tb = _pytest._code.ExceptionInfo().traceback - print (tb[-1].getsource()) + print(tb[-1].getsource()) s = str(tb[-1].getsource()) assert s.startswith("def xyz():\n try:") assert s.strip().endswith("except somenoname:") @@ -143,7 +159,7 @@ class TestTraceback_f_g_h(object): traceback = self.excinfo.traceback newtraceback = traceback.cut(path=path, firstlineno=firstlineno) assert len(newtraceback) == 1 - newtraceback = traceback.cut(path=path, lineno=firstlineno+2) + newtraceback = traceback.cut(path=path, lineno=firstlineno + 2) assert len(newtraceback) == 1 def test_traceback_cut_excludepath(self, testdir): @@ -210,7 +226,7 @@ class TestTraceback_f_g_h(object): def f(n): if n == 0: raise RuntimeError("hello") - f(n-1) + f(n - 1) excinfo = pytest.raises(RuntimeError, f, 100) monkeypatch.delattr(excinfo.traceback.__class__, "recursionindex") @@ -229,7 +245,7 @@ class TestTraceback_f_g_h(object): def f(n): try: do_stuff() - except: + except: # noqa reraise_me() excinfo = pytest.raises(RuntimeError, f, 8) @@ -238,7 +254,7 @@ class TestTraceback_f_g_h(object): assert recindex is None def test_traceback_messy_recursion(self): - #XXX: simplified locally testable version + # XXX: simplified locally testable version decorator = pytest.importorskip('decorator').decorator def log(f, *k, **kw): @@ -294,44 +310,47 @@ class TestTraceback_f_g_h(object): assert entry.lineno == co.firstlineno + 2 assert entry.frame.code.name == 'g' + def test_excinfo_exconly(): excinfo = pytest.raises(ValueError, h) assert excinfo.exconly().startswith('ValueError') excinfo = pytest.raises(ValueError, - "raise ValueError('hello\\nworld')") + "raise ValueError('hello\\nworld')") msg = excinfo.exconly(tryshort=True) assert msg.startswith('ValueError') assert msg.endswith("world") + def test_excinfo_repr(): excinfo = pytest.raises(ValueError, h) s = repr(excinfo) assert s == "" + def test_excinfo_str(): excinfo = pytest.raises(ValueError, h) s = str(excinfo) - assert s.startswith(__file__[:-9]) # pyc file and $py.class + assert s.startswith(__file__[:-9]) # pyc file and $py.class assert s.endswith("ValueError") - assert len(s.split(":")) >= 3 # on windows it's 4 + assert len(s.split(":")) >= 3 # on windows it's 4 + def test_excinfo_errisinstance(): excinfo = pytest.raises(ValueError, h) assert excinfo.errisinstance(ValueError) + def test_excinfo_no_sourcecode(): try: - exec ("raise ValueError()") + exec("raise ValueError()") except ValueError: excinfo = _pytest._code.ExceptionInfo() s = str(excinfo.traceback[-1]) - if py.std.sys.version_info < (2,5): - assert s == " File '':1 in ?\n ???\n" - else: - assert s == " File '':1 in \n ???\n" + assert s == " File '':1 in \n ???\n" + def test_excinfo_no_python_sourcecode(tmpdir): - #XXX: simplified locally testable version + # XXX: simplified locally testable version tmpdir.join('test.txt').write("{{ h()}}:") jinja2 = pytest.importorskip('jinja2') @@ -339,10 +358,10 @@ def test_excinfo_no_python_sourcecode(tmpdir): env = jinja2.Environment(loader=loader) template = env.get_template('test.txt') excinfo = pytest.raises(ValueError, - template.render, h=h) + template.render, h=h) for item in excinfo.traceback: - print(item) #XXX: for some reason jinja.Template.render is printed in full - item.source # shouldnt fail + print(item) # XXX: for some reason jinja.Template.render is printed in full + item.source # shouldnt fail if item.path.basename == 'test.txt': assert str(item.source) == '{{ h()}}:' @@ -358,6 +377,7 @@ def test_entrysource_Queue_example(): s = str(source).strip() assert s.startswith("def get") + def test_codepath_Queue_example(): try: queue.Queue().get(timeout=0.001) @@ -369,11 +389,13 @@ def test_codepath_Queue_example(): assert path.basename.lower() == "queue.py" assert path.check() + def test_match_succeeds(): with pytest.raises(ZeroDivisionError) as excinfo: 0 // 0 excinfo.match(r'.*zero.*') + def test_match_raises_error(testdir): testdir.makepyfile(""" import pytest @@ -388,6 +410,7 @@ def test_match_raises_error(testdir): "*AssertionError*Pattern*[123]*not found*", ]) + class TestFormattedExcinfo(object): @pytest.fixture @@ -406,10 +429,10 @@ class TestFormattedExcinfo(object): def excinfo_from_exec(self, source): source = _pytest._code.Source(source).strip() try: - exec (source.compile()) + exec(source.compile()) except KeyboardInterrupt: raise - except: + except: # noqa return _pytest._code.ExceptionInfo() assert 0, "did not raise" @@ -442,17 +465,16 @@ class TestFormattedExcinfo(object): 'E AssertionError' ] - def test_repr_source_not_existing(self): pr = FormattedExcinfo() co = compile("raise ValueError()", "", "exec") try: - exec (co) + exec(co) except ValueError: excinfo = _pytest._code.ExceptionInfo() repr = pr.repr_excinfo(excinfo) assert repr.reprtraceback.reprentries[1].lines[0] == "> ???" - if py.std.sys.version_info[0] >= 3: + if sys.version_info[0] >= 3: assert repr.chain[0][0].reprentries[1].lines[0] == "> ???" def test_repr_many_line_source_not_existing(self): @@ -462,12 +484,12 @@ a = 1 raise ValueError() """, "", "exec") try: - exec (co) + exec(co) except ValueError: excinfo = _pytest._code.ExceptionInfo() repr = pr.repr_excinfo(excinfo) assert repr.reprtraceback.reprentries[1].lines[0] == "> ???" - if py.std.sys.version_info[0] >= 3: + if sys.version_info[0] >= 3: assert repr.chain[0][0].reprentries[1].lines[0] == "> ???" def test_repr_source_failing_fullsource(self): @@ -492,7 +514,7 @@ raise ValueError() class FakeTracebackEntry(_pytest._code.Traceback.Entry): def __init__(self, tb, excinfo=None): - self.lineno = 5+3 + self.lineno = 5 + 3 @property def frame(self): @@ -522,20 +544,18 @@ raise ValueError() tb = FakeRawTB() excinfo.traceback = Traceback(tb) - fail = IOError() # noqa + fail = IOError() repr = pr.repr_excinfo(excinfo) assert repr.reprtraceback.reprentries[0].lines[0] == "> ???" - if py.std.sys.version_info[0] >= 3: + if sys.version_info[0] >= 3: assert repr.chain[0][0].reprentries[0].lines[0] == "> ???" - fail = py.error.ENOENT # noqa repr = pr.repr_excinfo(excinfo) assert repr.reprtraceback.reprentries[0].lines[0] == "> ???" - if py.std.sys.version_info[0] >= 3: + if sys.version_info[0] >= 3: assert repr.chain[0][0].reprentries[0].lines[0] == "> ???" - def test_repr_local(self): p = FormattedExcinfo(showlocals=True) loc = {'y': 5, 'z': 7, 'x': 3, '@x': 2, '__builtins__': {}} @@ -575,19 +595,19 @@ raise ValueError() loc = repr_entry.reprfileloc assert loc.path == mod.__file__ assert loc.lineno == 3 - #assert loc.message == "ValueError: hello" + # assert loc.message == "ValueError: hello" def test_repr_tracebackentry_lines2(self, importasmod): mod = importasmod(""" def func1(m, x, y, z): raise ValueError("hello\\nworld") """) - excinfo = pytest.raises(ValueError, mod.func1, "m"*90, 5, 13, "z"*120) + excinfo = pytest.raises(ValueError, mod.func1, "m" * 90, 5, 13, "z" * 120) excinfo.traceback = excinfo.traceback.filter() entry = excinfo.traceback[-1] p = FormattedExcinfo(funcargs=True) reprfuncargs = p.repr_args(entry) - assert reprfuncargs.args[0] == ('m', repr("m"*90)) + assert reprfuncargs.args[0] == ('m', repr("m" * 90)) assert reprfuncargs.args[1] == ('x', '5') assert reprfuncargs.args[2] == ('y', '13') assert reprfuncargs.args[3] == ('z', repr("z" * 120)) @@ -720,7 +740,7 @@ raise ValueError() repr = p.repr_excinfo(excinfo) assert repr.reprtraceback assert len(repr.reprtraceback.reprentries) == len(reprtb.reprentries) - if py.std.sys.version_info[0] >= 3: + if sys.version_info[0] >= 3: assert repr.chain[0][0] assert len(repr.chain[0][0].reprentries) == len(reprtb.reprentries) assert repr.reprcrash.path.endswith("mod.py") @@ -740,7 +760,7 @@ raise ValueError() def raiseos(): raise OSError(2) - monkeypatch.setattr(py.std.os, 'getcwd', raiseos) + monkeypatch.setattr(os, 'getcwd', raiseos) assert p._makepath(__file__) == __file__ p.repr_traceback(excinfo) @@ -798,10 +818,10 @@ raise ValueError() for style in ("short", "long", "no"): for showlocals in (True, False): repr = excinfo.getrepr(style=style, showlocals=showlocals) - if py.std.sys.version_info[0] < 3: + if sys.version_info[0] < 3: assert isinstance(repr, ReprExceptionInfo) assert repr.reprtraceback.style == style - if py.std.sys.version_info[0] >= 3: + if sys.version_info[0] >= 3: assert isinstance(repr, ExceptionChainRepr) for repr in repr.chain: assert repr[0].style == style @@ -934,10 +954,10 @@ raise ValueError() @pytest.mark.parametrize('reproptions', [ {'style': style, 'showlocals': showlocals, 'funcargs': funcargs, 'tbfilter': tbfilter - } for style in ("long", "short", "no") - for showlocals in (True, False) - for tbfilter in (True, False) - for funcargs in (True, False)]) + } for style in ("long", "short", "no") + for showlocals in (True, False) + for tbfilter in (True, False) + for funcargs in (True, False)]) def test_format_excinfo(self, importasmod, reproptions): mod = importasmod(""" def g(x): @@ -969,7 +989,8 @@ raise ValueError() r = excinfo.getrepr(style="long") tw = TWMock() r.toterminal(tw) - for line in tw.lines: print (line) + for line in tw.lines: + print(line) assert tw.lines[0] == "" assert tw.lines[1] == " def f():" assert tw.lines[2] == "> g()" @@ -1016,19 +1037,20 @@ raise ValueError() r = excinfo.getrepr(style="long") tw = TWMock() r.toterminal(tw) - for line in tw.lines: print (line) - assert tw.lines[0] == "" - assert tw.lines[1] == " def f():" - assert tw.lines[2] == " try:" - assert tw.lines[3] == "> g()" - assert tw.lines[4] == "" + for line in tw.lines: + print(line) + assert tw.lines[0] == "" + assert tw.lines[1] == " def f():" + assert tw.lines[2] == " try:" + assert tw.lines[3] == "> g()" + assert tw.lines[4] == "" line = tw.get_write_msg(5) assert line.endswith('mod.py') - assert tw.lines[6] == ':6: ' - assert tw.lines[7] == ("_ ", None) - assert tw.lines[8] == "" - assert tw.lines[9] == " def g():" - assert tw.lines[10] == "> raise ValueError()" + assert tw.lines[6] == ':6: ' + assert tw.lines[7] == ("_ ", None) + assert tw.lines[8] == "" + assert tw.lines[9] == " def g():" + assert tw.lines[10] == "> raise ValueError()" assert tw.lines[11] == "E ValueError" assert tw.lines[12] == "" line = tw.get_write_msg(13) @@ -1071,6 +1093,36 @@ raise ValueError() assert line.endswith('mod.py') assert tw.lines[47] == ":15: AttributeError" + @pytest.mark.skipif("sys.version_info[0] < 3") + def test_exc_repr_with_raise_from_none_chain_suppression(self, importasmod): + mod = importasmod(""" + def f(): + try: + g() + except Exception: + raise AttributeError() from None + def g(): + raise ValueError() + """) + excinfo = pytest.raises(AttributeError, mod.f) + r = excinfo.getrepr(style="long") + tw = TWMock() + r.toterminal(tw) + for line in tw.lines: + print(line) + assert tw.lines[0] == "" + assert tw.lines[1] == " def f():" + assert tw.lines[2] == " try:" + assert tw.lines[3] == " g()" + assert tw.lines[4] == " except Exception:" + assert tw.lines[5] == "> raise AttributeError() from None" + assert tw.lines[6] == "E AttributeError" + assert tw.lines[7] == "" + line = tw.get_write_msg(8) + assert line.endswith('mod.py') + assert tw.lines[9] == ":6: AttributeError" + assert len(tw.lines) == 10 + @pytest.mark.skipif("sys.version_info[0] < 3") @pytest.mark.parametrize('reason, description', [ ('cause', 'The above exception was the direct cause of the following exception:'), @@ -1163,7 +1215,7 @@ def test_exception_repr_extraction_error_on_recursion(): try: a(numpy_like()) - except: + except: # noqa from _pytest._code.code import ExceptionInfo from _pytest.pytester import LineMatcher exc_info = ExceptionInfo() @@ -1187,12 +1239,9 @@ def test_no_recursion_index_on_recursion_error(): return getattr(self, '_' + attr) RecursionDepthError().trigger - except: + except: # noqa from _pytest._code.code import ExceptionInfo exc_info = ExceptionInfo() - if sys.version_info[:2] == (2, 6): - assert "'RecursionDepthError' object has no attribute '___" in str(exc_info.getrepr()) - else: - assert 'maximum recursion' in str(exc_info.getrepr()) + assert 'maximum recursion' in str(exc_info.getrepr()) else: assert 0 diff --git a/testing/code/test_source.py b/testing/code/test_source.py index bdbc00d19..ee731ed4f 100644 --- a/testing/code/test_source.py +++ b/testing/code/test_source.py @@ -2,21 +2,20 @@ # disable flake check on this file because some constructs are strange # or redundant on purpose and can't be disable on a line-by-line basis from __future__ import absolute_import, division, print_function +import inspect import sys import _pytest._code import py import pytest from _pytest._code import Source -from _pytest._code.source import _ast +from _pytest._code.source import ast -if _ast is not None: - astonly = pytest.mark.nothing -else: - astonly = pytest.mark.xfail("True", reason="only works with AST-compile") +astonly = pytest.mark.nothing failsonjython = pytest.mark.xfail("sys.platform.startswith('java')") + def test_source_str_function(): x = Source("3") assert str(x) == "3" @@ -34,6 +33,7 @@ def test_source_str_function(): """, rstrip=True) assert str(x) == "\n3" + def test_unicode(): try: unicode @@ -45,10 +45,12 @@ def test_unicode(): val = eval(co) assert isinstance(val, unicode) + def test_source_from_function(): source = _pytest._code.Source(test_source_str_function) assert str(source).startswith('def test_source_str_function():') + def test_source_from_method(): class TestClass(object): def test_method(self): @@ -57,11 +59,13 @@ def test_source_from_method(): assert source.lines == ["def test_method(self):", " pass"] + def test_source_from_lines(): lines = ["a \n", "b\n", "c"] source = _pytest._code.Source(lines) assert source.lines == ['a ', 'b', 'c'] + def test_source_from_inner_function(): def f(): pass @@ -70,6 +74,7 @@ def test_source_from_inner_function(): source = _pytest._code.Source(f) assert str(source).startswith('def f():') + def test_source_putaround_simple(): source = Source("raise ValueError") source = source.putaround( @@ -78,7 +83,7 @@ def test_source_putaround_simple(): x = 42 else: x = 23""") - assert str(source)=="""\ + assert str(source) == """\ try: raise ValueError except ValueError: @@ -86,6 +91,7 @@ except ValueError: else: x = 23""" + def test_source_putaround(): source = Source() source = source.putaround(""" @@ -94,24 +100,28 @@ def test_source_putaround(): """) assert str(source).strip() == "if 1:\n x=1" + def test_source_strips(): source = Source("") assert source == Source() assert str(source) == '' assert source.strip() == source + def test_source_strip_multiline(): source = Source() source.lines = ["", " hello", " "] source2 = source.strip() assert source2.lines == [" hello"] + def test_syntaxerror_rerepresentation(): ex = pytest.raises(SyntaxError, _pytest._code.compile, 'xyz xyz') assert ex.value.lineno == 1 - assert ex.value.offset in (4,7) # XXX pypy/jython versus cpython? + assert ex.value.offset in (4, 7) # XXX pypy/jython versus cpython? assert ex.value.text.strip(), 'x x' + def test_isparseable(): assert Source("hello").isparseable() assert Source("if 1:\n pass").isparseable() @@ -120,6 +130,7 @@ def test_isparseable(): assert not Source(" \nif 1:\npass").isparseable() assert not Source(chr(0)).isparseable() + class TestAccesses(object): source = Source("""\ def f(x): @@ -127,6 +138,7 @@ class TestAccesses(object): def g(x): pass """) + def test_getrange(self): x = self.source[0:2] assert x.isparseable() @@ -141,8 +153,9 @@ class TestAccesses(object): assert len(self.source) == 4 def test_iter(self): - l = [x for x in self.source] - assert len(l) == 4 + values = [x for x in self.source] + assert len(values) == 4 + class TestSourceParsingAndCompiling(object): source = Source("""\ @@ -155,12 +168,12 @@ class TestSourceParsingAndCompiling(object): def test_compile(self): co = _pytest._code.compile("x=3") d = {} - exec (co, d) + exec(co, d) assert d['x'] == 3 def test_compile_and_getsource_simple(self): co = _pytest._code.compile("x=3") - exec (co) + exec(co) source = _pytest._code.Source(co) assert str(source) == "x=3" @@ -175,22 +188,22 @@ class TestSourceParsingAndCompiling(object): def f(): raise ValueError() """) - source1 = py.std.inspect.getsource(co1) + source1 = inspect.getsource(co1) assert 'KeyError' in source1 - source2 = py.std.inspect.getsource(co2) + source2 = inspect.getsource(co2) assert 'ValueError' in source2 def test_getstatement(self): - #print str(self.source) + # print str(self.source) ass = str(self.source[1:]) for i in range(1, 4): - #print "trying start in line %r" % self.source[i] + # print "trying start in line %r" % self.source[i] s = self.source.getstatement(i) #x = s.deindent() assert str(s) == ass def test_getstatementrange_triple_quoted(self): - #print str(self.source) + # print str(self.source) source = Source("""hello(''' ''')""") s = source.getstatement(0) @@ -211,12 +224,12 @@ class TestSourceParsingAndCompiling(object): """) assert len(source) == 7 # check all lineno's that could occur in a traceback - #assert source.getstatementrange(0) == (0, 7) - #assert source.getstatementrange(1) == (1, 5) + # assert source.getstatementrange(0) == (0, 7) + # assert source.getstatementrange(1) == (1, 5) assert source.getstatementrange(2) == (2, 3) assert source.getstatementrange(3) == (3, 4) assert source.getstatementrange(4) == (4, 5) - #assert source.getstatementrange(5) == (0, 7) + # assert source.getstatementrange(5) == (0, 7) assert source.getstatementrange(6) == (6, 7) def test_getstatementrange_bug(self): @@ -258,17 +271,15 @@ class TestSourceParsingAndCompiling(object): assert getstatement(2, source).lines == source.lines[2:3] assert getstatement(3, source).lines == source.lines[3:4] - @pytest.mark.skipif("sys.version_info < (2,6)") def test_getstatementrange_out_of_bounds_py3(self): source = Source("if xxx:\n from .collections import something") r = source.getstatementrange(1) - assert r == (1,2) + assert r == (1, 2) def test_getstatementrange_with_syntaxerror_issue7(self): source = Source(":") pytest.raises(SyntaxError, lambda: source.getstatementrange(0)) - @pytest.mark.skipif("sys.version_info < (2,6)") def test_compile_to_ast(self): import ast source = Source("x = 4") @@ -283,7 +294,7 @@ class TestSourceParsingAndCompiling(object): excinfo = pytest.raises(AssertionError, "f(6)") frame = excinfo.traceback[-1].frame stmt = frame.code.fullsource.getstatement(frame.lineno) - #print "block", str(block) + # print "block", str(block) assert str(stmt).strip().startswith('assert') @pytest.mark.parametrize('name', ['', None, 'my']) @@ -291,9 +302,9 @@ class TestSourceParsingAndCompiling(object): def check(comp, name): co = comp(self.source, name) if not name: - expected = "codegen %s:%d>" %(mypath, mylineno+2+2) + expected = "codegen %s:%d>" % (mypath, mylineno + 2 + 2) else: - expected = "codegen %r %s:%d>" % (name, mypath, mylineno+2+2) + expected = "codegen %r %s:%d>" % (name, mypath, mylineno + 2 + 2) fn = co.co_filename assert fn.endswith(expected) @@ -307,6 +318,7 @@ class TestSourceParsingAndCompiling(object): def test_offsetless_synerr(self): pytest.raises(SyntaxError, _pytest._code.compile, "lambda a,a: 0", mode='eval') + def test_getstartingblock_singleline(): class A(object): def __init__(self, *args): @@ -315,22 +327,9 @@ def test_getstartingblock_singleline(): x = A('x', 'y') - l = [i for i in x.source.lines if i.strip()] - assert len(l) == 1 + values = [i for i in x.source.lines if i.strip()] + assert len(values) == 1 -def test_getstartingblock_multiline(): - class A(object): - def __init__(self, *args): - frame = sys._getframe(1) - self.source = _pytest._code.Frame(frame).statement - - x = A('x', - 'y' \ - , - 'z') - - l = [i for i in x.source.lines if i.strip()] - assert len(l) == 4 def test_getline_finally(): def c(): pass @@ -345,6 +344,7 @@ def test_getline_finally(): source = excinfo.traceback[-1].statement assert str(source).strip() == 'c(1)' + def test_getfuncsource_dynamic(): source = """ def f(): @@ -374,7 +374,6 @@ def test_deindent(): c = '''while True: pass ''' - import inspect lines = deindent(inspect.getsource(f).splitlines()) assert lines == ["def f():", " c = '''while True:", " pass", "'''"] @@ -386,7 +385,7 @@ def test_deindent(): lines = deindent(source.splitlines()) assert lines == ['', 'def f():', ' def g():', ' pass', ' '] -@pytest.mark.xfail("sys.version_info[:3] < (2,7,0)") + def test_source_of_class_at_eof_without_newline(tmpdir): # this test fails because the implicit inspect.getsource(A) below # does not return the "x = 1" last line. @@ -400,10 +399,12 @@ def test_source_of_class_at_eof_without_newline(tmpdir): s2 = _pytest._code.Source(tmpdir.join("a.py").pyimport().A) assert str(source).strip() == str(s2).strip() + if True: def x(): pass + def test_getsource_fallback(): from _pytest._code.source import getsource expected = """def x(): @@ -411,6 +412,7 @@ def test_getsource_fallback(): src = getsource(x) assert src == expected + def test_idem_compile_and_getsource(): from _pytest._code.source import getsource expected = "def x(): pass" @@ -418,12 +420,14 @@ def test_idem_compile_and_getsource(): src = getsource(co) assert src == expected + def test_findsource_fallback(): from _pytest._code.source import findsource src, lineno = findsource(x) assert 'test_findsource_simple' in str(src) assert src[lineno] == ' def x():' + def test_findsource(): from _pytest._code.source import findsource co = _pytest._code.compile("""if 1: @@ -450,27 +454,30 @@ def test_getfslineno(): fspath, lineno = getfslineno(f) assert fspath.basename == "test_source.py" - assert lineno == _pytest._code.getrawcode(f).co_firstlineno - 1 # see findsource + assert lineno == _pytest._code.getrawcode(f).co_firstlineno - 1 # see findsource class A(object): pass fspath, lineno = getfslineno(A) - _, A_lineno = py.std.inspect.findsource(A) + _, A_lineno = inspect.findsource(A) assert fspath.basename == "test_source.py" assert lineno == A_lineno assert getfslineno(3) == ("", -1) + class B(object): pass B.__name__ = "B2" assert getfslineno(B)[1] == -1 + def test_code_of_object_instance_with_call(): class A(object): pass pytest.raises(TypeError, lambda: _pytest._code.Source(A())) + class WithCall(object): def __call__(self): pass @@ -490,10 +497,12 @@ def getstatement(lineno, source): ast, start, end = getstatementrange_ast(lineno, source) return source[start:end] + def test_oneline(): source = getstatement(0, "raise ValueError") assert str(source) == "raise ValueError" + def test_comment_and_no_newline_at_end(): from _pytest._code.source import getstatementrange_ast source = Source(['def test_basic_complex():', @@ -502,10 +511,12 @@ def test_comment_and_no_newline_at_end(): ast, start, end = getstatementrange_ast(1, source) assert end == 2 + def test_oneline_and_comment(): source = getstatement(0, "raise ValueError\n#hello") assert str(source) == "raise ValueError" + @pytest.mark.xfail(hasattr(sys, "pypy_version_info"), reason='does not work on pypy') def test_comments(): @@ -521,29 +532,33 @@ def test_comments(): comment 4 """ ''' - for line in range(2,6): + for line in range(2, 6): assert str(getstatement(line, source)) == ' x = 1' - for line in range(6,10): + for line in range(6, 10): assert str(getstatement(line, source)) == ' assert False' assert str(getstatement(10, source)) == '"""' + def test_comment_in_statement(): source = '''test(foo=1, # comment 1 bar=2) ''' - for line in range(1,3): + for line in range(1, 3): assert str(getstatement(line, source)) == \ - 'test(foo=1,\n # comment 1\n bar=2)' + 'test(foo=1,\n # comment 1\n bar=2)' + def test_single_line_else(): source = getstatement(1, "if False: 2\nelse: 3") assert str(source) == "else: 3" + def test_single_line_finally(): source = getstatement(1, "try: 1\nfinally: 3") assert str(source) == "finally: 3" + def test_issue55(): source = ('def round_trip(dinp):\n assert 1 == dinp\n' 'def test_rt():\n round_trip("""\n""")\n') @@ -560,6 +575,7 @@ x = 3 """) assert str(source) == "raise ValueError(\n 23\n)" + class TestTry(object): pytestmark = astonly source = """\ @@ -587,6 +603,7 @@ else: source = getstatement(5, self.source) assert str(source) == " raise KeyError()" + class TestTryFinally(object): source = """\ try: @@ -604,7 +621,6 @@ finally: assert str(source) == " raise IndexError(1)" - class TestIf(object): pytestmark = astonly source = """\ @@ -632,6 +648,7 @@ else: source = getstatement(5, self.source) assert str(source) == " y = 7" + def test_semicolon(): s = """\ hello ; pytest.skip() @@ -639,6 +656,7 @@ hello ; pytest.skip() source = getstatement(0, s) assert str(source) == s.strip() + def test_def_online(): s = """\ def func(): raise ValueError(42) @@ -649,6 +667,7 @@ def something(): source = getstatement(0, s) assert str(source) == "def func(): raise ValueError(42)" + def XXX_test_expression_multiline(): source = """\ something diff --git a/testing/code/test_source_multiline_block.py b/testing/code/test_source_multiline_block.py new file mode 100644 index 000000000..b356d191f --- /dev/null +++ b/testing/code/test_source_multiline_block.py @@ -0,0 +1,26 @@ +# flake8: noqa +import sys + +import _pytest._code + + +def test_getstartingblock_multiline(): + """ + This test was originally found in test_source.py, but it depends on the weird + formatting of the ``x = A`` construct seen here and our autopep8 tool can only exclude entire + files (it does not support excluding lines/blocks using the traditional #noqa comment yet, + see hhatto/autopep8#307). It was considered better to just move this single test to its own + file and exclude it from autopep8 than try to complicate things. + """ + class A(object): + def __init__(self, *args): + frame = sys._getframe(1) + self.source = _pytest._code.Frame(frame).statement + + x = A('x', + 'y' + , + 'z') + + values = [i for i in x.source.lines if i.strip()] + assert len(values) == 4 diff --git a/testing/deprecated_test.py b/testing/deprecated_test.py index 0c41a71bf..cb66472c9 100644 --- a/testing/deprecated_test.py +++ b/testing/deprecated_test.py @@ -9,12 +9,16 @@ def test_yield_tests_deprecation(testdir): def test_gen(): yield "m1", func1, 15, 3*5 yield "m2", func1, 42, 6*7 + def test_gen2(): + for k in range(10): + yield func1, 1, 1 """) result = testdir.runpytest('-ra') result.stdout.fnmatch_lines([ '*yield tests are deprecated, and scheduled to be removed in pytest 4.0*', '*2 passed*', ]) + assert result.stdout.str().count('yield tests are deprecated') == 2 def test_funcarg_prefix_deprecation(testdir): @@ -44,6 +48,15 @@ def test_pytest_setup_cfg_deprecated(testdir): result.stdout.fnmatch_lines(['*pytest*section in setup.cfg files is deprecated*use*tool:pytest*instead*']) +def test_pytest_custom_cfg_deprecated(testdir): + testdir.makefile('.cfg', custom=''' + [pytest] + addopts = --verbose + ''') + result = testdir.runpytest("-c", "custom.cfg") + result.stdout.fnmatch_lines(['*pytest*section in custom.cfg files is deprecated*use*tool:pytest*instead*']) + + def test_str_args_deprecated(tmpdir, testdir): """Deprecate passing strings to pytest.main(). Scheduled for removal in pytest-4.0.""" from _pytest.main import EXIT_NOTESTSCOLLECTED @@ -54,7 +67,6 @@ def test_str_args_deprecated(tmpdir, testdir): warnings.append(message) ret = pytest.main("%s -x" % tmpdir, plugins=[Collect()]) - testdir.delete_loaded_modules() msg = ('passing a string to pytest.main() is deprecated, ' 'pass a list of arguments instead.') assert msg in warnings @@ -74,4 +86,118 @@ def test_resultlog_is_deprecated(testdir): pass ''') result = testdir.runpytest('--result-log=%s' % testdir.tmpdir.join('result.log')) - result.stdout.fnmatch_lines(['*--result-log is deprecated and scheduled for removal in pytest 4.0*']) + result.stdout.fnmatch_lines([ + '*--result-log is deprecated and scheduled for removal in pytest 4.0*', + '*See https://docs.pytest.org/*/usage.html#creating-resultlog-format-files for more information*', + ]) + + +@pytest.mark.filterwarnings('always:Metafunc.addcall is deprecated') +def test_metafunc_addcall_deprecated(testdir): + testdir.makepyfile(""" + def pytest_generate_tests(metafunc): + metafunc.addcall({'i': 1}) + metafunc.addcall({'i': 2}) + def test_func(i): + pass + """) + res = testdir.runpytest('-s') + assert res.ret == 0 + res.stdout.fnmatch_lines([ + "*Metafunc.addcall is deprecated*", + "*2 passed, 2 warnings*", + ]) + + +def test_terminal_reporter_writer_attr(pytestconfig): + """Check that TerminalReporter._tw is also available as 'writer' (#2984) + This attribute is planned to be deprecated in 3.4. + """ + try: + import xdist # noqa + pytest.skip('xdist workers disable the terminal reporter plugin') + except ImportError: + pass + terminal_reporter = pytestconfig.pluginmanager.get_plugin('terminalreporter') + assert terminal_reporter.writer is terminal_reporter._tw + + +@pytest.mark.parametrize('plugin', ['catchlog', 'capturelog']) +def test_pytest_catchlog_deprecated(testdir, plugin): + testdir.makepyfile(""" + def test_func(pytestconfig): + pytestconfig.pluginmanager.register(None, 'pytest_{0}') + """.format(plugin)) + res = testdir.runpytest() + assert res.ret == 0 + res.stdout.fnmatch_lines([ + "*pytest-*log plugin has been merged into the core*", + "*1 passed, 1 warnings*", + ]) + + +def test_pytest_plugins_in_non_top_level_conftest_deprecated(testdir): + from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST + subdirectory = testdir.tmpdir.join("subdirectory") + subdirectory.mkdir() + # create the inner conftest with makeconftest and then move it to the subdirectory + testdir.makeconftest(""" + pytest_plugins=['capture'] + """) + testdir.tmpdir.join("conftest.py").move(subdirectory.join("conftest.py")) + # make the top level conftest + testdir.makeconftest(""" + import warnings + warnings.filterwarnings('always', category=DeprecationWarning) + """) + testdir.makepyfile(""" + def test_func(): + pass + """) + res = testdir.runpytest_subprocess() + assert res.ret == 0 + res.stderr.fnmatch_lines('*' + str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0]) + + +def test_pytest_plugins_in_non_top_level_conftest_deprecated_no_top_level_conftest(testdir): + from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST + subdirectory = testdir.tmpdir.join('subdirectory') + subdirectory.mkdir() + testdir.makeconftest(""" + import warnings + warnings.filterwarnings('always', category=DeprecationWarning) + pytest_plugins=['capture'] + """) + testdir.tmpdir.join("conftest.py").move(subdirectory.join("conftest.py")) + + testdir.makepyfile(""" + def test_func(): + pass + """) + + res = testdir.runpytest_subprocess() + assert res.ret == 0 + res.stderr.fnmatch_lines('*' + str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0]) + + +def test_pytest_plugins_in_non_top_level_conftest_deprecated_no_false_positives(testdir): + from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST + subdirectory = testdir.tmpdir.join('subdirectory') + subdirectory.mkdir() + testdir.makeconftest(""" + pass + """) + testdir.tmpdir.join("conftest.py").move(subdirectory.join("conftest.py")) + + testdir.makeconftest(""" + import warnings + warnings.filterwarnings('always', category=DeprecationWarning) + pytest_plugins=['capture'] + """) + testdir.makepyfile(""" + def test_func(): + pass + """) + res = testdir.runpytest_subprocess() + assert res.ret == 0 + assert str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0] not in res.stderr.str() diff --git a/testing/freeze/create_executable.py b/testing/freeze/create_executable.py index 8cf259c40..f4f6088ef 100644 --- a/testing/freeze/create_executable.py +++ b/testing/freeze/create_executable.py @@ -10,4 +10,3 @@ if __name__ == '__main__': hidden.extend(['--hidden-import', x]) args = ['pyinstaller', '--noconfirm'] + hidden + ['runtests_script.py'] subprocess.check_call(' '.join(args), shell=True) - diff --git a/testing/freeze/runtests_script.py b/testing/freeze/runtests_script.py index cb961fc6c..d281601c0 100644 --- a/testing/freeze/runtests_script.py +++ b/testing/freeze/runtests_script.py @@ -6,4 +6,4 @@ py.test main(). if __name__ == '__main__': import sys import pytest - sys.exit(pytest.main()) \ No newline at end of file + sys.exit(pytest.main()) diff --git a/testing/freeze/tests/test_trivial.py b/testing/freeze/tests/test_trivial.py index 6cf6b05ad..45622b850 100644 --- a/testing/freeze/tests/test_trivial.py +++ b/testing/freeze/tests/test_trivial.py @@ -2,5 +2,6 @@ def test_upper(): assert 'foo'.upper() == 'FOO' + def test_lower(): - assert 'FOO'.lower() == 'foo' \ No newline at end of file + assert 'FOO'.lower() == 'foo' diff --git a/testing/freeze/tox_run.py b/testing/freeze/tox_run.py index 5310ac1b7..3fc388040 100644 --- a/testing/freeze/tox_run.py +++ b/testing/freeze/tox_run.py @@ -9,4 +9,4 @@ if __name__ == '__main__': executable = os.path.join(os.getcwd(), 'dist', 'runtests_script', 'runtests_script') if sys.platform.startswith('win'): executable += '.exe' - sys.exit(os.system('%s tests' % executable)) \ No newline at end of file + sys.exit(os.system('%s tests' % executable)) diff --git a/testing/logging/test_fixture.py b/testing/logging/test_fixture.py new file mode 100644 index 000000000..24576719d --- /dev/null +++ b/testing/logging/test_fixture.py @@ -0,0 +1,122 @@ +# -*- coding: utf-8 -*- +import logging + +import pytest + +logger = logging.getLogger(__name__) +sublogger = logging.getLogger(__name__ + '.baz') + + +def test_fixture_help(testdir): + result = testdir.runpytest('--fixtures') + result.stdout.fnmatch_lines(['*caplog*']) + + +def test_change_level(caplog): + caplog.set_level(logging.INFO) + logger.debug('handler DEBUG level') + logger.info('handler INFO level') + + caplog.set_level(logging.CRITICAL, logger=sublogger.name) + sublogger.warning('logger WARNING level') + sublogger.critical('logger CRITICAL level') + + assert 'DEBUG' not in caplog.text + assert 'INFO' in caplog.text + assert 'WARNING' not in caplog.text + assert 'CRITICAL' in caplog.text + + +def test_change_level_undo(testdir): + """Ensure that 'set_level' is undone after the end of the test""" + testdir.makepyfile(''' + import logging + + def test1(caplog): + caplog.set_level(logging.INFO) + # using + operator here so fnmatch_lines doesn't match the code in the traceback + logging.info('log from ' + 'test1') + assert 0 + + def test2(caplog): + # using + operator here so fnmatch_lines doesn't match the code in the traceback + logging.info('log from ' + 'test2') + assert 0 + ''') + result = testdir.runpytest_subprocess() + result.stdout.fnmatch_lines([ + '*log from test1*', + '*2 failed in *', + ]) + assert 'log from test2' not in result.stdout.str() + + +def test_with_statement(caplog): + with caplog.at_level(logging.INFO): + logger.debug('handler DEBUG level') + logger.info('handler INFO level') + + with caplog.at_level(logging.CRITICAL, logger=sublogger.name): + sublogger.warning('logger WARNING level') + sublogger.critical('logger CRITICAL level') + + assert 'DEBUG' not in caplog.text + assert 'INFO' in caplog.text + assert 'WARNING' not in caplog.text + assert 'CRITICAL' in caplog.text + + +def test_log_access(caplog): + caplog.set_level(logging.INFO) + logger.info('boo %s', 'arg') + assert caplog.records[0].levelname == 'INFO' + assert caplog.records[0].msg == 'boo %s' + assert 'boo arg' in caplog.text + + +def test_record_tuples(caplog): + caplog.set_level(logging.INFO) + logger.info('boo %s', 'arg') + + assert caplog.record_tuples == [ + (__name__, logging.INFO, 'boo arg'), + ] + + +def test_unicode(caplog): + caplog.set_level(logging.INFO) + logger.info(u'bū') + assert caplog.records[0].levelname == 'INFO' + assert caplog.records[0].msg == u'bū' + assert u'bū' in caplog.text + + +def test_clear(caplog): + caplog.set_level(logging.INFO) + logger.info(u'bū') + assert len(caplog.records) + assert caplog.text + caplog.clear() + assert not len(caplog.records) + assert not caplog.text + + +@pytest.fixture +def logging_during_setup_and_teardown(caplog): + caplog.set_level('INFO') + logger.info('a_setup_log') + yield + logger.info('a_teardown_log') + assert [x.message for x in caplog.get_records('teardown')] == ['a_teardown_log'] + + +def test_caplog_captures_for_all_stages(caplog, logging_during_setup_and_teardown): + assert not caplog.records + assert not caplog.get_records('call') + logger.info('a_call_log') + assert [x.message for x in caplog.get_records('call')] == ['a_call_log'] + + assert [x.message for x in caplog.get_records('setup')] == ['a_setup_log'] + + # This reachers into private API, don't use this type of thing in real tests! + assert set(caplog._item.catch_log_handlers.keys()) == {'setup', 'call'} diff --git a/testing/logging/test_formatter.py b/testing/logging/test_formatter.py new file mode 100644 index 000000000..10a921470 --- /dev/null +++ b/testing/logging/test_formatter.py @@ -0,0 +1,29 @@ +import logging + +import py.io +from _pytest.logging import ColoredLevelFormatter + + +def test_coloredlogformatter(): + logfmt = '%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s' + + record = logging.LogRecord( + name='dummy', level=logging.INFO, pathname='dummypath', lineno=10, + msg='Test Message', args=(), exc_info=False) + + class ColorConfig(object): + class option(object): + pass + + tw = py.io.TerminalWriter() + tw.hasmarkup = True + formatter = ColoredLevelFormatter(tw, logfmt) + output = formatter.format(record) + assert output == ('dummypath 10 ' + '\x1b[32mINFO \x1b[0m Test Message') + + tw.hasmarkup = False + formatter = ColoredLevelFormatter(tw, logfmt) + output = formatter.format(record) + assert output == ('dummypath 10 ' + 'INFO Test Message') diff --git a/testing/logging/test_reporting.py b/testing/logging/test_reporting.py new file mode 100644 index 000000000..699df0e60 --- /dev/null +++ b/testing/logging/test_reporting.py @@ -0,0 +1,777 @@ +# -*- coding: utf-8 -*- +import re +import os + +import six + +import pytest + + +def test_nothing_logged(testdir): + testdir.makepyfile(''' + import sys + + def test_foo(): + sys.stdout.write('text going to stdout') + sys.stderr.write('text going to stderr') + assert False + ''') + result = testdir.runpytest() + assert result.ret == 1 + result.stdout.fnmatch_lines(['*- Captured stdout call -*', + 'text going to stdout']) + result.stdout.fnmatch_lines(['*- Captured stderr call -*', + 'text going to stderr']) + with pytest.raises(pytest.fail.Exception): + result.stdout.fnmatch_lines(['*- Captured *log call -*']) + + +def test_messages_logged(testdir): + testdir.makepyfile(''' + import sys + import logging + + logger = logging.getLogger(__name__) + + def test_foo(): + sys.stdout.write('text going to stdout') + sys.stderr.write('text going to stderr') + logger.info('text going to logger') + assert False + ''') + result = testdir.runpytest('--log-level=INFO') + assert result.ret == 1 + result.stdout.fnmatch_lines(['*- Captured *log call -*', + '*text going to logger*']) + result.stdout.fnmatch_lines(['*- Captured stdout call -*', + 'text going to stdout']) + result.stdout.fnmatch_lines(['*- Captured stderr call -*', + 'text going to stderr']) + + +def test_root_logger_affected(testdir): + testdir.makepyfile(""" + import logging + logger = logging.getLogger() + def test_foo(): + logger.info('info text ' + 'going to logger') + logger.warning('warning text ' + 'going to logger') + logger.error('error text ' + 'going to logger') + + assert 0 + """) + log_file = testdir.tmpdir.join('pytest.log').strpath + result = testdir.runpytest('--log-level=ERROR', '--log-file=pytest.log') + assert result.ret == 1 + + # the capture log calls in the stdout section only contain the + # logger.error msg, because --log-level=ERROR + result.stdout.fnmatch_lines(['*error text going to logger*']) + with pytest.raises(pytest.fail.Exception): + result.stdout.fnmatch_lines(['*warning text going to logger*']) + with pytest.raises(pytest.fail.Exception): + result.stdout.fnmatch_lines(['*info text going to logger*']) + + # the log file should contain the warning and the error log messages and + # not the info one, because the default level of the root logger is + # WARNING. + assert os.path.isfile(log_file) + with open(log_file) as rfh: + contents = rfh.read() + assert "info text going to logger" not in contents + assert "warning text going to logger" in contents + assert "error text going to logger" in contents + + +def test_log_cli_level_log_level_interaction(testdir): + testdir.makepyfile(""" + import logging + logger = logging.getLogger() + + def test_foo(): + logger.debug('debug text ' + 'going to logger') + logger.info('info text ' + 'going to logger') + logger.warning('warning text ' + 'going to logger') + logger.error('error text ' + 'going to logger') + assert 0 + """) + + result = testdir.runpytest('--log-cli-level=INFO', '--log-level=ERROR') + assert result.ret == 1 + + result.stdout.fnmatch_lines([ + '*-- live log call --*', + '*INFO*info text going to logger', + '*WARNING*warning text going to logger', + '*ERROR*error text going to logger', + '=* 1 failed in *=', + ]) + assert 'DEBUG' not in result.stdout.str() + + +def test_setup_logging(testdir): + testdir.makepyfile(''' + import logging + + logger = logging.getLogger(__name__) + + def setup_function(function): + logger.info('text going to logger from setup') + + def test_foo(): + logger.info('text going to logger from call') + assert False + ''') + result = testdir.runpytest('--log-level=INFO') + assert result.ret == 1 + result.stdout.fnmatch_lines(['*- Captured *log setup -*', + '*text going to logger from setup*', + '*- Captured *log call -*', + '*text going to logger from call*']) + + +def test_teardown_logging(testdir): + testdir.makepyfile(''' + import logging + + logger = logging.getLogger(__name__) + + def test_foo(): + logger.info('text going to logger from call') + + def teardown_function(function): + logger.info('text going to logger from teardown') + assert False + ''') + result = testdir.runpytest('--log-level=INFO') + assert result.ret == 1 + result.stdout.fnmatch_lines(['*- Captured *log call -*', + '*text going to logger from call*', + '*- Captured *log teardown -*', + '*text going to logger from teardown*']) + + +def test_disable_log_capturing(testdir): + testdir.makepyfile(''' + import sys + import logging + + logger = logging.getLogger(__name__) + + def test_foo(): + sys.stdout.write('text going to stdout') + logger.warning('catch me if you can!') + sys.stderr.write('text going to stderr') + assert False + ''') + result = testdir.runpytest('--no-print-logs') + print(result.stdout) + assert result.ret == 1 + result.stdout.fnmatch_lines(['*- Captured stdout call -*', + 'text going to stdout']) + result.stdout.fnmatch_lines(['*- Captured stderr call -*', + 'text going to stderr']) + with pytest.raises(pytest.fail.Exception): + result.stdout.fnmatch_lines(['*- Captured *log call -*']) + + +def test_disable_log_capturing_ini(testdir): + testdir.makeini( + ''' + [pytest] + log_print=False + ''' + ) + testdir.makepyfile(''' + import sys + import logging + + logger = logging.getLogger(__name__) + + def test_foo(): + sys.stdout.write('text going to stdout') + logger.warning('catch me if you can!') + sys.stderr.write('text going to stderr') + assert False + ''') + result = testdir.runpytest() + print(result.stdout) + assert result.ret == 1 + result.stdout.fnmatch_lines(['*- Captured stdout call -*', + 'text going to stdout']) + result.stdout.fnmatch_lines(['*- Captured stderr call -*', + 'text going to stderr']) + with pytest.raises(pytest.fail.Exception): + result.stdout.fnmatch_lines(['*- Captured *log call -*']) + + +@pytest.mark.parametrize('enabled', [True, False]) +def test_log_cli_enabled_disabled(testdir, enabled): + msg = 'critical message logged by test' + testdir.makepyfile(''' + import logging + def test_log_cli(): + logging.critical("{}") + '''.format(msg)) + if enabled: + testdir.makeini(''' + [pytest] + log_cli=true + ''') + result = testdir.runpytest() + if enabled: + result.stdout.fnmatch_lines([ + 'test_log_cli_enabled_disabled.py::test_log_cli ', + '*-- live log call --*', + 'test_log_cli_enabled_disabled.py* CRITICAL critical message logged by test', + 'PASSED*', + ]) + else: + assert msg not in result.stdout.str() + + +def test_log_cli_default_level(testdir): + # Default log file level + testdir.makepyfile(''' + import pytest + import logging + def test_log_cli(request): + plugin = request.config.pluginmanager.getplugin('logging-plugin') + assert plugin.log_cli_handler.level == logging.NOTSET + logging.getLogger('catchlog').info("INFO message won't be shown") + logging.getLogger('catchlog').warning("WARNING message will be shown") + ''') + testdir.makeini(''' + [pytest] + log_cli=true + ''') + + result = testdir.runpytest() + + # fnmatch_lines does an assertion internally + result.stdout.fnmatch_lines([ + 'test_log_cli_default_level.py::test_log_cli ', + 'test_log_cli_default_level.py*WARNING message will be shown*', + ]) + assert "INFO message won't be shown" not in result.stdout.str() + # make sure that that we get a '0' exit code for the testsuite + assert result.ret == 0 + + +def test_log_cli_default_level_multiple_tests(testdir, request): + """Ensure we reset the first newline added by the live logger between tests""" + filename = request.node.name + '.py' + testdir.makepyfile(''' + import logging + + def test_log_1(): + logging.warning("log message from test_log_1") + + def test_log_2(): + logging.warning("log message from test_log_2") + ''') + testdir.makeini(''' + [pytest] + log_cli=true + ''') + + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + '{}::test_log_1 '.format(filename), + '*WARNING*log message from test_log_1*', + 'PASSED *50%*', + '{}::test_log_2 '.format(filename), + '*WARNING*log message from test_log_2*', + 'PASSED *100%*', + '=* 2 passed in *=', + ]) + + +def test_log_cli_default_level_sections(testdir, request): + """Check that with live logging enable we are printing the correct headers during + start/setup/call/teardown/finish.""" + filename = request.node.name + '.py' + testdir.makeconftest(''' + import pytest + import logging + + def pytest_runtest_logstart(): + logging.warning('>>>>> START >>>>>') + + def pytest_runtest_logfinish(): + logging.warning('<<<<< END <<<<<<<') + ''') + + testdir.makepyfile(''' + import pytest + import logging + + @pytest.fixture + def fix(request): + logging.warning("log message from setup of {}".format(request.node.name)) + yield + logging.warning("log message from teardown of {}".format(request.node.name)) + + def test_log_1(fix): + logging.warning("log message from test_log_1") + + def test_log_2(fix): + logging.warning("log message from test_log_2") + ''') + testdir.makeini(''' + [pytest] + log_cli=true + ''') + + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + '{}::test_log_1 '.format(filename), + '*-- live log start --*', + '*WARNING* >>>>> START >>>>>*', + '*-- live log setup --*', + '*WARNING*log message from setup of test_log_1*', + '*-- live log call --*', + '*WARNING*log message from test_log_1*', + 'PASSED *50%*', + '*-- live log teardown --*', + '*WARNING*log message from teardown of test_log_1*', + '*-- live log finish --*', + '*WARNING* <<<<< END <<<<<<<*', + + '{}::test_log_2 '.format(filename), + '*-- live log start --*', + '*WARNING* >>>>> START >>>>>*', + '*-- live log setup --*', + '*WARNING*log message from setup of test_log_2*', + '*-- live log call --*', + '*WARNING*log message from test_log_2*', + 'PASSED *100%*', + '*-- live log teardown --*', + '*WARNING*log message from teardown of test_log_2*', + '*-- live log finish --*', + '*WARNING* <<<<< END <<<<<<<*', + '=* 2 passed in *=', + ]) + + +def test_live_logs_unknown_sections(testdir, request): + """Check that with live logging enable we are printing the correct headers during + start/setup/call/teardown/finish.""" + filename = request.node.name + '.py' + testdir.makeconftest(''' + import pytest + import logging + + def pytest_runtest_protocol(item, nextitem): + logging.warning('Unknown Section!') + + def pytest_runtest_logstart(): + logging.warning('>>>>> START >>>>>') + + def pytest_runtest_logfinish(): + logging.warning('<<<<< END <<<<<<<') + ''') + + testdir.makepyfile(''' + import pytest + import logging + + @pytest.fixture + def fix(request): + logging.warning("log message from setup of {}".format(request.node.name)) + yield + logging.warning("log message from teardown of {}".format(request.node.name)) + + def test_log_1(fix): + logging.warning("log message from test_log_1") + + ''') + testdir.makeini(''' + [pytest] + log_cli=true + ''') + + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + '*WARNING*Unknown Section*', + '{}::test_log_1 '.format(filename), + '*WARNING* >>>>> START >>>>>*', + '*-- live log setup --*', + '*WARNING*log message from setup of test_log_1*', + '*-- live log call --*', + '*WARNING*log message from test_log_1*', + 'PASSED *100%*', + '*-- live log teardown --*', + '*WARNING*log message from teardown of test_log_1*', + '*WARNING* <<<<< END <<<<<<<*', + '=* 1 passed in *=', + ]) + + +def test_sections_single_new_line_after_test_outcome(testdir, request): + """Check that only a single new line is written between log messages during + teardown/finish.""" + filename = request.node.name + '.py' + testdir.makeconftest(''' + import pytest + import logging + + def pytest_runtest_logstart(): + logging.warning('>>>>> START >>>>>') + + def pytest_runtest_logfinish(): + logging.warning('<<<<< END <<<<<<<') + logging.warning('<<<<< END <<<<<<<') + ''') + + testdir.makepyfile(''' + import pytest + import logging + + @pytest.fixture + def fix(request): + logging.warning("log message from setup of {}".format(request.node.name)) + yield + logging.warning("log message from teardown of {}".format(request.node.name)) + logging.warning("log message from teardown of {}".format(request.node.name)) + + def test_log_1(fix): + logging.warning("log message from test_log_1") + ''') + testdir.makeini(''' + [pytest] + log_cli=true + ''') + + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + '{}::test_log_1 '.format(filename), + '*-- live log start --*', + '*WARNING* >>>>> START >>>>>*', + '*-- live log setup --*', + '*WARNING*log message from setup of test_log_1*', + '*-- live log call --*', + '*WARNING*log message from test_log_1*', + 'PASSED *100%*', + '*-- live log teardown --*', + '*WARNING*log message from teardown of test_log_1*', + '*-- live log finish --*', + '*WARNING* <<<<< END <<<<<<<*', + '*WARNING* <<<<< END <<<<<<<*', + '=* 1 passed in *=', + ]) + assert re.search(r'(.+)live log teardown(.+)\n(.+)WARNING(.+)\n(.+)WARNING(.+)', + result.stdout.str(), re.MULTILINE) is not None + assert re.search(r'(.+)live log finish(.+)\n(.+)WARNING(.+)\n(.+)WARNING(.+)', + result.stdout.str(), re.MULTILINE) is not None + + +def test_log_cli_level(testdir): + # Default log file level + testdir.makepyfile(''' + import pytest + import logging + def test_log_cli(request): + plugin = request.config.pluginmanager.getplugin('logging-plugin') + assert plugin.log_cli_handler.level == logging.INFO + logging.getLogger('catchlog').debug("This log message won't be shown") + logging.getLogger('catchlog').info("This log message will be shown") + print('PASSED') + ''') + testdir.makeini(''' + [pytest] + log_cli=true + ''') + + result = testdir.runpytest('-s', '--log-cli-level=INFO') + + # fnmatch_lines does an assertion internally + result.stdout.fnmatch_lines([ + 'test_log_cli_level.py*This log message will be shown', + 'PASSED', # 'PASSED' on its own line because the log message prints a new line + ]) + assert "This log message won't be shown" not in result.stdout.str() + + # make sure that that we get a '0' exit code for the testsuite + assert result.ret == 0 + + result = testdir.runpytest('-s', '--log-level=INFO') + + # fnmatch_lines does an assertion internally + result.stdout.fnmatch_lines([ + 'test_log_cli_level.py* This log message will be shown', + 'PASSED', # 'PASSED' on its own line because the log message prints a new line + ]) + assert "This log message won't be shown" not in result.stdout.str() + + # make sure that that we get a '0' exit code for the testsuite + assert result.ret == 0 + + +def test_log_cli_ini_level(testdir): + testdir.makeini( + """ + [pytest] + log_cli=true + log_cli_level = INFO + """) + testdir.makepyfile(''' + import pytest + import logging + def test_log_cli(request): + plugin = request.config.pluginmanager.getplugin('logging-plugin') + assert plugin.log_cli_handler.level == logging.INFO + logging.getLogger('catchlog').debug("This log message won't be shown") + logging.getLogger('catchlog').info("This log message will be shown") + print('PASSED') + ''') + + result = testdir.runpytest('-s') + + # fnmatch_lines does an assertion internally + result.stdout.fnmatch_lines([ + 'test_log_cli_ini_level.py* This log message will be shown', + 'PASSED', # 'PASSED' on its own line because the log message prints a new line + ]) + assert "This log message won't be shown" not in result.stdout.str() + + # make sure that that we get a '0' exit code for the testsuite + assert result.ret == 0 + + +@pytest.mark.parametrize('cli_args', ['', + '--log-level=WARNING', + '--log-file-level=WARNING', + '--log-cli-level=WARNING']) +def test_log_cli_auto_enable(testdir, request, cli_args): + """Check that live logs are enabled if --log-level or --log-cli-level is passed on the CLI. + It should not be auto enabled if the same configs are set on the INI file. + """ + testdir.makepyfile(''' + import pytest + import logging + + def test_log_1(): + logging.info("log message from test_log_1 not to be shown") + logging.warning("log message from test_log_1") + + ''') + testdir.makeini(''' + [pytest] + log_level=INFO + log_cli_level=INFO + ''') + + result = testdir.runpytest(cli_args) + if cli_args == '--log-cli-level=WARNING': + result.stdout.fnmatch_lines([ + '*::test_log_1 ', + '*-- live log call --*', + '*WARNING*log message from test_log_1*', + 'PASSED *100%*', + '=* 1 passed in *=', + ]) + assert 'INFO' not in result.stdout.str() + else: + result.stdout.fnmatch_lines([ + '*test_log_cli_auto_enable*100%*', + '=* 1 passed in *=', + ]) + assert 'INFO' not in result.stdout.str() + assert 'WARNING' not in result.stdout.str() + + +def test_log_file_cli(testdir): + # Default log file level + testdir.makepyfile(''' + import pytest + import logging + def test_log_file(request): + plugin = request.config.pluginmanager.getplugin('logging-plugin') + assert plugin.log_file_handler.level == logging.WARNING + logging.getLogger('catchlog').info("This log message won't be shown") + logging.getLogger('catchlog').warning("This log message will be shown") + print('PASSED') + ''') + + log_file = testdir.tmpdir.join('pytest.log').strpath + + result = testdir.runpytest('-s', '--log-file={0}'.format(log_file), '--log-file-level=WARNING') + + # fnmatch_lines does an assertion internally + result.stdout.fnmatch_lines([ + 'test_log_file_cli.py PASSED', + ]) + + # make sure that that we get a '0' exit code for the testsuite + assert result.ret == 0 + assert os.path.isfile(log_file) + with open(log_file) as rfh: + contents = rfh.read() + assert "This log message will be shown" in contents + assert "This log message won't be shown" not in contents + + +def test_log_file_cli_level(testdir): + # Default log file level + testdir.makepyfile(''' + import pytest + import logging + def test_log_file(request): + plugin = request.config.pluginmanager.getplugin('logging-plugin') + assert plugin.log_file_handler.level == logging.INFO + logging.getLogger('catchlog').debug("This log message won't be shown") + logging.getLogger('catchlog').info("This log message will be shown") + print('PASSED') + ''') + + log_file = testdir.tmpdir.join('pytest.log').strpath + + result = testdir.runpytest('-s', + '--log-file={0}'.format(log_file), + '--log-file-level=INFO') + + # fnmatch_lines does an assertion internally + result.stdout.fnmatch_lines([ + 'test_log_file_cli_level.py PASSED', + ]) + + # make sure that that we get a '0' exit code for the testsuite + assert result.ret == 0 + assert os.path.isfile(log_file) + with open(log_file) as rfh: + contents = rfh.read() + assert "This log message will be shown" in contents + assert "This log message won't be shown" not in contents + + +def test_log_level_not_changed_by_default(testdir): + testdir.makepyfile(''' + import logging + def test_log_file(): + assert logging.getLogger().level == logging.WARNING + ''') + result = testdir.runpytest('-s') + result.stdout.fnmatch_lines('* 1 passed in *') + + +def test_log_file_ini(testdir): + log_file = testdir.tmpdir.join('pytest.log').strpath + + testdir.makeini( + """ + [pytest] + log_file={0} + log_file_level=WARNING + """.format(log_file)) + testdir.makepyfile(''' + import pytest + import logging + def test_log_file(request): + plugin = request.config.pluginmanager.getplugin('logging-plugin') + assert plugin.log_file_handler.level == logging.WARNING + logging.getLogger('catchlog').info("This log message won't be shown") + logging.getLogger('catchlog').warning("This log message will be shown") + print('PASSED') + ''') + + result = testdir.runpytest('-s') + + # fnmatch_lines does an assertion internally + result.stdout.fnmatch_lines([ + 'test_log_file_ini.py PASSED', + ]) + + # make sure that that we get a '0' exit code for the testsuite + assert result.ret == 0 + assert os.path.isfile(log_file) + with open(log_file) as rfh: + contents = rfh.read() + assert "This log message will be shown" in contents + assert "This log message won't be shown" not in contents + + +def test_log_file_ini_level(testdir): + log_file = testdir.tmpdir.join('pytest.log').strpath + + testdir.makeini( + """ + [pytest] + log_file={0} + log_file_level = INFO + """.format(log_file)) + testdir.makepyfile(''' + import pytest + import logging + def test_log_file(request): + plugin = request.config.pluginmanager.getplugin('logging-plugin') + assert plugin.log_file_handler.level == logging.INFO + logging.getLogger('catchlog').debug("This log message won't be shown") + logging.getLogger('catchlog').info("This log message will be shown") + print('PASSED') + ''') + + result = testdir.runpytest('-s') + + # fnmatch_lines does an assertion internally + result.stdout.fnmatch_lines([ + 'test_log_file_ini_level.py PASSED', + ]) + + # make sure that that we get a '0' exit code for the testsuite + assert result.ret == 0 + assert os.path.isfile(log_file) + with open(log_file) as rfh: + contents = rfh.read() + assert "This log message will be shown" in contents + assert "This log message won't be shown" not in contents + + +@pytest.mark.parametrize('has_capture_manager', [True, False]) +def test_live_logging_suspends_capture(has_capture_manager, request): + """Test that capture manager is suspended when we emitting messages for live logging. + + This tests the implementation calls instead of behavior because it is difficult/impossible to do it using + ``testdir`` facilities because they do their own capturing. + + We parametrize the test to also make sure _LiveLoggingStreamHandler works correctly if no capture manager plugin + is installed. + """ + import logging + from functools import partial + from _pytest.capture import CaptureManager + from _pytest.logging import _LiveLoggingStreamHandler + + class MockCaptureManager: + calls = [] + + def suspend_global_capture(self): + self.calls.append('suspend_global_capture') + + def resume_global_capture(self): + self.calls.append('resume_global_capture') + + # sanity check + assert CaptureManager.suspend_capture_item + assert CaptureManager.resume_global_capture + + class DummyTerminal(six.StringIO): + + def section(self, *args, **kwargs): + pass + + out_file = DummyTerminal() + capture_manager = MockCaptureManager() if has_capture_manager else None + handler = _LiveLoggingStreamHandler(out_file, capture_manager) + handler.set_when('call') + + logger = logging.getLogger(__name__ + '.test_live_logging_suspends_capture') + logger.addHandler(handler) + request.addfinalizer(partial(logger.removeHandler, handler)) + + logger.critical('some message') + if has_capture_manager: + assert MockCaptureManager.calls == ['suspend_global_capture', 'resume_global_capture'] + else: + assert MockCaptureManager.calls == [] + assert out_file.getvalue() == '\nsome message\n' diff --git a/testing/python/approx.py b/testing/python/approx.py index d7063e215..9ca21bdf8 100644 --- a/testing/python/approx.py +++ b/testing/python/approx.py @@ -1,4 +1,5 @@ # encoding: utf-8 +import operator import sys import pytest import doctest @@ -23,18 +24,24 @@ class MyDocTestRunner(doctest.DocTestRunner): class TestApprox(object): def test_repr_string(self): - # for some reason in Python 2.6 it is not displaying the tolerance representation correctly plus_minus = u'\u00b1' if sys.version_info[0] > 2 else u'+-' tol1, tol2, infr = '1.0e-06', '2.0e-06', 'inf' - if sys.version_info[:2] == (2, 6): - tol1, tol2, infr = '???', '???', '???' assert repr(approx(1.0)) == '1.0 {pm} {tol1}'.format(pm=plus_minus, tol1=tol1) - assert repr(approx([1.0, 2.0])) == '1.0 {pm} {tol1}, 2.0 {pm} {tol2}'.format(pm=plus_minus, tol1=tol1, tol2=tol2) + assert repr(approx([1.0, 2.0])) == 'approx([1.0 {pm} {tol1}, 2.0 {pm} {tol2}])'.format( + pm=plus_minus, tol1=tol1, tol2=tol2) + assert repr(approx((1.0, 2.0))) == 'approx((1.0 {pm} {tol1}, 2.0 {pm} {tol2}))'.format( + pm=plus_minus, tol1=tol1, tol2=tol2) assert repr(approx(inf)) == 'inf' assert repr(approx(1.0, rel=nan)) == '1.0 {pm} ???'.format(pm=plus_minus) assert repr(approx(1.0, rel=inf)) == '1.0 {pm} {infr}'.format(pm=plus_minus, infr=infr) assert repr(approx(1.0j, rel=inf)) == '1j' + # Dictionaries aren't ordered, so we need to check both orders. + assert repr(approx({'a': 1.0, 'b': 2.0})) in ( + "approx({{'a': 1.0 {pm} {tol1}, 'b': 2.0 {pm} {tol2}}})".format(pm=plus_minus, tol1=tol1, tol2=tol2), + "approx({{'b': 2.0 {pm} {tol2}, 'a': 1.0 {pm} {tol1}}})".format(pm=plus_minus, tol1=tol1, tol2=tol2), + ) + def test_operator_overloading(self): assert 1 == approx(1, rel=1e-6, abs=1e-12) assert not (1 != approx(1, rel=1e-6, abs=1e-12)) @@ -43,30 +50,30 @@ class TestApprox(object): def test_exactly_equal(self): examples = [ - (2.0, 2.0), - (0.1e200, 0.1e200), - (1.123e-300, 1.123e-300), - (12345, 12345.0), - (0.0, -0.0), - (345678, 345678), - (Decimal('1.0001'), Decimal('1.0001')), - (Fraction(1, 3), Fraction(-1, -3)), + (2.0, 2.0), + (0.1e200, 0.1e200), + (1.123e-300, 1.123e-300), + (12345, 12345.0), + (0.0, -0.0), + (345678, 345678), + (Decimal('1.0001'), Decimal('1.0001')), + (Fraction(1, 3), Fraction(-1, -3)), ] for a, x in examples: assert a == approx(x) def test_opposite_sign(self): examples = [ - (eq, 1e-100, -1e-100), - (ne, 1e100, -1e100), + (eq, 1e-100, -1e-100), + (ne, 1e100, -1e100), ] for op, a, x in examples: assert op(a, approx(x)) def test_zero_tolerance(self): within_1e10 = [ - (1.1e-100, 1e-100), - (-1.1e-100, -1e-100), + (1.1e-100, 1e-100), + (-1.1e-100, -1e-100), ] for a, x in within_1e10: assert x == approx(x, rel=0.0, abs=0.0) @@ -79,11 +86,11 @@ class TestApprox(object): def test_negative_tolerance(self): # Negative tolerances are not allowed. illegal_kwargs = [ - dict(rel=-1e100), - dict(abs=-1e100), - dict(rel=1e100, abs=-1e100), - dict(rel=-1e100, abs=1e100), - dict(rel=-1e100, abs=-1e100), + dict(rel=-1e100), + dict(abs=-1e100), + dict(rel=1e100, abs=-1e100), + dict(rel=-1e100, abs=1e100), + dict(rel=-1e100, abs=-1e100), ] for kwargs in illegal_kwargs: with pytest.raises(ValueError): @@ -92,10 +99,10 @@ class TestApprox(object): def test_inf_tolerance(self): # Everything should be equal if the tolerance is infinite. large_diffs = [ - (1, 1000), - (1e-50, 1e50), - (-1.0, -1e300), - (0.0, 10), + (1, 1000), + (1e-50, 1e50), + (-1.0, -1e300), + (0.0, 10), ] for a, x in large_diffs: assert a != approx(x, rel=0.0, abs=0.0) @@ -107,8 +114,8 @@ class TestApprox(object): # If the relative tolerance is zero but the expected value is infinite, # the actual tolerance is a NaN, which should be an error. illegal_kwargs = [ - dict(rel=inf, abs=0.0), - dict(rel=inf, abs=inf), + dict(rel=inf, abs=0.0), + dict(rel=inf, abs=inf), ] for kwargs in illegal_kwargs: with pytest.raises(ValueError): @@ -116,9 +123,9 @@ class TestApprox(object): def test_nan_tolerance(self): illegal_kwargs = [ - dict(rel=nan), - dict(abs=nan), - dict(rel=nan, abs=nan), + dict(rel=nan), + dict(abs=nan), + dict(rel=nan, abs=nan), ] for kwargs in illegal_kwargs: with pytest.raises(ValueError): @@ -135,15 +142,15 @@ class TestApprox(object): # None of the other tests (except the doctests) should be affected by # the choice of defaults. examples = [ - # Relative tolerance used. - (eq, 1e100 + 1e94, 1e100), - (ne, 1e100 + 2e94, 1e100), - (eq, 1e0 + 1e-6, 1e0), - (ne, 1e0 + 2e-6, 1e0), - # Absolute tolerance used. - (eq, 1e-100, + 1e-106), - (eq, 1e-100, + 2e-106), - (eq, 1e-100, 0), + # Relative tolerance used. + (eq, 1e100 + 1e94, 1e100), + (ne, 1e100 + 2e94, 1e100), + (eq, 1e0 + 1e-6, 1e0), + (ne, 1e0 + 2e-6, 1e0), + # Absolute tolerance used. + (eq, 1e-100, + 1e-106), + (eq, 1e-100, + 2e-106), + (eq, 1e-100, 0), ] for op, a, x in examples: assert op(a, approx(x)) @@ -166,9 +173,9 @@ class TestApprox(object): def test_relative_tolerance(self): within_1e8_rel = [ - (1e8 + 1e0, 1e8), - (1e0 + 1e-8, 1e0), - (1e-8 + 1e-16, 1e-8), + (1e8 + 1e0, 1e8), + (1e0 + 1e-8, 1e0), + (1e-8 + 1e-16, 1e-8), ] for a, x in within_1e8_rel: assert a == approx(x, rel=5e-8, abs=0.0) @@ -176,9 +183,9 @@ class TestApprox(object): def test_absolute_tolerance(self): within_1e8_abs = [ - (1e8 + 9e-9, 1e8), - (1e0 + 9e-9, 1e0), - (1e-8 + 9e-9, 1e-8), + (1e8 + 9e-9, 1e8), + (1e0 + 9e-9, 1e0), + (1e-8 + 9e-9, 1e-8), ] for a, x in within_1e8_abs: assert a == approx(x, rel=0, abs=5e-8) @@ -186,106 +193,171 @@ class TestApprox(object): def test_expecting_zero(self): examples = [ - (ne, 1e-6, 0.0), - (ne, -1e-6, 0.0), - (eq, 1e-12, 0.0), - (eq, -1e-12, 0.0), - (ne, 2e-12, 0.0), - (ne, -2e-12, 0.0), - (ne, inf, 0.0), - (ne, nan, 0.0), - ] + (ne, 1e-6, 0.0), + (ne, -1e-6, 0.0), + (eq, 1e-12, 0.0), + (eq, -1e-12, 0.0), + (ne, 2e-12, 0.0), + (ne, -2e-12, 0.0), + (ne, inf, 0.0), + (ne, nan, 0.0), + ] for op, a, x in examples: assert op(a, approx(x, rel=0.0, abs=1e-12)) assert op(a, approx(x, rel=1e-6, abs=1e-12)) def test_expecting_inf(self): examples = [ - (eq, inf, inf), - (eq, -inf, -inf), - (ne, inf, -inf), - (ne, 0.0, inf), - (ne, nan, inf), + (eq, inf, inf), + (eq, -inf, -inf), + (ne, inf, -inf), + (ne, 0.0, inf), + (ne, nan, inf), ] for op, a, x in examples: assert op(a, approx(x)) def test_expecting_nan(self): examples = [ - (nan, nan), - (-nan, -nan), - (nan, -nan), - (0.0, nan), - (inf, nan), + (eq, nan, nan), + (eq, -nan, -nan), + (eq, nan, -nan), + (ne, 0.0, nan), + (ne, inf, nan), ] - for a, x in examples: - # If there is a relative tolerance and the expected value is NaN, - # the actual tolerance is a NaN, which should be an error. - with pytest.raises(ValueError): - a != approx(x, rel=inf) + for op, a, x in examples: + # Nothing is equal to NaN by default. + assert a != approx(x) - # You can make comparisons against NaN by not specifying a relative - # tolerance, so only an absolute tolerance is calculated. - assert a != approx(x, abs=inf) - - def test_expecting_sequence(self): - within_1e8 = [ - (1e8 + 1e0, 1e8), - (1e0 + 1e-8, 1e0), - (1e-8 + 1e-16, 1e-8), - ] - actual, expected = zip(*within_1e8) - assert actual == approx(expected, rel=5e-8, abs=0.0) - - def test_expecting_sequence_wrong_len(self): - assert [1, 2] != approx([1]) - assert [1, 2] != approx([1,2,3]) - - def test_complex(self): - within_1e6 = [ - ( 1.000001 + 1.0j, 1.0 + 1.0j), - (1.0 + 1.000001j, 1.0 + 1.0j), - (-1.000001 + 1.0j, -1.0 + 1.0j), - (1.0 - 1.000001j, 1.0 - 1.0j), - ] - for a, x in within_1e6: - assert a == approx(x, rel=5e-6, abs=0) - assert a != approx(x, rel=5e-7, abs=0) + # If ``nan_ok=True``, then NaN is equal to NaN. + assert op(a, approx(x, nan_ok=True)) def test_int(self): within_1e6 = [ - (1000001, 1000000), - (-1000001, -1000000), + (1000001, 1000000), + (-1000001, -1000000), ] for a, x in within_1e6: assert a == approx(x, rel=5e-6, abs=0) assert a != approx(x, rel=5e-7, abs=0) + assert approx(x, rel=5e-6, abs=0) == a + assert approx(x, rel=5e-7, abs=0) != a def test_decimal(self): within_1e6 = [ - (Decimal('1.000001'), Decimal('1.0')), - (Decimal('-1.000001'), Decimal('-1.0')), + (Decimal('1.000001'), Decimal('1.0')), + (Decimal('-1.000001'), Decimal('-1.0')), ] for a, x in within_1e6: + assert a == approx(x) assert a == approx(x, rel=Decimal('5e-6'), abs=0) assert a != approx(x, rel=Decimal('5e-7'), abs=0) + assert approx(x, rel=Decimal('5e-6'), abs=0) == a + assert approx(x, rel=Decimal('5e-7'), abs=0) != a def test_fraction(self): within_1e6 = [ - (1 + Fraction(1, 1000000), Fraction(1)), - (-1 - Fraction(-1, 1000000), Fraction(-1)), + (1 + Fraction(1, 1000000), Fraction(1)), + (-1 - Fraction(-1, 1000000), Fraction(-1)), ] for a, x in within_1e6: assert a == approx(x, rel=5e-6, abs=0) assert a != approx(x, rel=5e-7, abs=0) + assert approx(x, rel=5e-6, abs=0) == a + assert approx(x, rel=5e-7, abs=0) != a + + def test_complex(self): + within_1e6 = [ + (1.000001 + 1.0j, 1.0 + 1.0j), + (1.0 + 1.000001j, 1.0 + 1.0j), + (-1.000001 + 1.0j, -1.0 + 1.0j), + (1.0 - 1.000001j, 1.0 - 1.0j), + ] + for a, x in within_1e6: + assert a == approx(x, rel=5e-6, abs=0) + assert a != approx(x, rel=5e-7, abs=0) + assert approx(x, rel=5e-6, abs=0) == a + assert approx(x, rel=5e-7, abs=0) != a + + def test_list(self): + actual = [1 + 1e-7, 2 + 1e-8] + expected = [1, 2] + + # Return false if any element is outside the tolerance. + assert actual == approx(expected, rel=5e-7, abs=0) + assert actual != approx(expected, rel=5e-8, abs=0) + assert approx(expected, rel=5e-7, abs=0) == actual + assert approx(expected, rel=5e-8, abs=0) != actual + + def test_list_wrong_len(self): + assert [1, 2] != approx([1]) + assert [1, 2] != approx([1, 2, 3]) + + def test_tuple(self): + actual = (1 + 1e-7, 2 + 1e-8) + expected = (1, 2) + + # Return false if any element is outside the tolerance. + assert actual == approx(expected, rel=5e-7, abs=0) + assert actual != approx(expected, rel=5e-8, abs=0) + assert approx(expected, rel=5e-7, abs=0) == actual + assert approx(expected, rel=5e-8, abs=0) != actual + + def test_tuple_wrong_len(self): + assert (1, 2) != approx((1,)) + assert (1, 2) != approx((1, 2, 3)) + + def test_dict(self): + actual = {'a': 1 + 1e-7, 'b': 2 + 1e-8} + # Dictionaries became ordered in python3.6, so switch up the order here + # to make sure it doesn't matter. + expected = {'b': 2, 'a': 1} + + # Return false if any element is outside the tolerance. + assert actual == approx(expected, rel=5e-7, abs=0) + assert actual != approx(expected, rel=5e-8, abs=0) + assert approx(expected, rel=5e-7, abs=0) == actual + assert approx(expected, rel=5e-8, abs=0) != actual + + def test_dict_wrong_len(self): + assert {'a': 1, 'b': 2} != approx({'a': 1}) + assert {'a': 1, 'b': 2} != approx({'a': 1, 'c': 2}) + assert {'a': 1, 'b': 2} != approx({'a': 1, 'b': 2, 'c': 3}) + + def test_numpy_array(self): + np = pytest.importorskip('numpy') + + actual = np.array([1 + 1e-7, 2 + 1e-8]) + expected = np.array([1, 2]) + + # Return false if any element is outside the tolerance. + assert actual == approx(expected, rel=5e-7, abs=0) + assert actual != approx(expected, rel=5e-8, abs=0) + assert approx(expected, rel=5e-7, abs=0) == expected + assert approx(expected, rel=5e-8, abs=0) != actual + + # Should be able to compare lists with numpy arrays. + assert list(actual) == approx(expected, rel=5e-7, abs=0) + assert list(actual) != approx(expected, rel=5e-8, abs=0) + assert actual == approx(list(expected), rel=5e-7, abs=0) + assert actual != approx(list(expected), rel=5e-8, abs=0) + + def test_numpy_array_wrong_shape(self): + np = pytest.importorskip('numpy') + + a12 = np.array([[1, 2]]) + a21 = np.array([[1], [2]]) + + assert a12 != approx(a21) + assert a21 != approx(a12) def test_doctests(self): parser = doctest.DocTestParser() test = parser.get_doctest( - approx.__doc__, - {'approx': approx}, - approx.__name__, - None, None, + approx.__doc__, + {'approx': approx}, + approx.__name__, + None, None, ) runner = MyDocTestRunner() runner.run(test) @@ -301,12 +373,43 @@ class TestApprox(object): assert [3] == [pytest.approx(4)] """) expected = '4.0e-06' - # for some reason in Python 2.6 it is not displaying the tolerance representation correctly - if sys.version_info[:2] == (2, 6): - expected = '???' result = testdir.runpytest() result.stdout.fnmatch_lines([ '*At index 0 diff: 3 != 4 * {0}'.format(expected), '=* 1 failed in *=', ]) + @pytest.mark.parametrize('op', [ + pytest.param(operator.le, id='<='), + pytest.param(operator.lt, id='<'), + pytest.param(operator.ge, id='>='), + pytest.param(operator.gt, id='>'), + ]) + def test_comparison_operator_type_error(self, op): + """ + pytest.approx should raise TypeError for operators other than == and != (#2003). + """ + with pytest.raises(TypeError): + op(1, approx(1, rel=1e-6, abs=1e-12)) + + def test_numpy_array_with_scalar(self): + np = pytest.importorskip('numpy') + + actual = np.array([1 + 1e-7, 1 - 1e-8]) + expected = 1.0 + + assert actual == approx(expected, rel=5e-7, abs=0) + assert actual != approx(expected, rel=5e-8, abs=0) + assert approx(expected, rel=5e-7, abs=0) == actual + assert approx(expected, rel=5e-8, abs=0) != actual + + def test_numpy_scalar_with_array(self): + np = pytest.importorskip('numpy') + + actual = 1.0 + expected = np.array([1 + 1e-7, 1 - 1e-8]) + + assert actual == approx(expected, rel=5e-7, abs=0) + assert actual != approx(expected, rel=5e-8, abs=0) + assert approx(expected, rel=5e-7, abs=0) == actual + assert approx(expected, rel=5e-8, abs=0) != actual diff --git a/testing/python/collect.py b/testing/python/collect.py index 236421f1c..de40486a8 100644 --- a/testing/python/collect.py +++ b/testing/python/collect.py @@ -4,12 +4,11 @@ import sys from textwrap import dedent import _pytest._code -import py import pytest -from _pytest.main import ( - Collector, - EXIT_NOTESTSCOLLECTED -) +from _pytest.main import EXIT_NOTESTSCOLLECTED +from _pytest.nodes import Collector + +ignore_parametrized_marks = pytest.mark.filterwarnings('ignore:Applying marks directly to parameters') class TestModule(object): @@ -22,7 +21,7 @@ class TestModule(object): b = testdir.mkdir("b") p = a.ensure("test_whatever.py") p.pyimport() - del py.std.sys.modules['test_whatever'] + del sys.modules['test_whatever'] b.ensure("test_whatever.py") result = testdir.runpytest() result.stdout.fnmatch_lines([ @@ -143,6 +142,29 @@ class TestClass(object): "*collected 0*", ]) + def test_static_method(self, testdir): + """Support for collecting staticmethod tests (#2528, #2699)""" + testdir.getmodulecol(""" + import pytest + class Test(object): + @staticmethod + def test_something(): + pass + + @pytest.fixture + def fix(self): + return 1 + + @staticmethod + def test_fix(fix): + assert fix == 1 + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + "*collected 2 items*", + "*2 passed in*", + ]) + def test_setup_teardown_class_as_classmethod(self, testdir): testdir.makepyfile(test_mod1=""" class TestClassMethod(object): @@ -419,10 +441,10 @@ class TestFunction(object): pass f1 = pytest.Function(name="name", parent=session, config=config, - args=(1,), callobj=func1) + args=(1,), callobj=func1) assert f1 == f1 - f2 = pytest.Function(name="name",config=config, - callobj=func2, parent=session) + f2 = pytest.Function(name="name", config=config, + callobj=func2, parent=session) assert f1 != f2 def test_issue197_parametrize_emptyset(self, testdir): @@ -476,7 +498,6 @@ class TestFunction(object): rec = testdir.inline_run() rec.assertoutcome(passed=2) - def test_parametrize_with_non_hashable_values_indirect(self, testdir): """Test parametrization with non-hashable values with indirect parametrization.""" testdir.makepyfile(""" @@ -504,7 +525,6 @@ class TestFunction(object): rec = testdir.inline_run() rec.assertoutcome(passed=2) - def test_parametrize_overrides_fixture(self, testdir): """Test parametrization when parameter overrides existing fixture with same name.""" testdir.makepyfile(""" @@ -532,7 +552,6 @@ class TestFunction(object): rec = testdir.inline_run() rec.assertoutcome(passed=3) - def test_parametrize_overrides_parametrized_fixture(self, testdir): """Test parametrization when parameter overrides existing parametrized fixture with same name.""" testdir.makepyfile(""" @@ -550,7 +569,8 @@ class TestFunction(object): rec = testdir.inline_run() rec.assertoutcome(passed=1) - def test_parametrize_with_mark(selfself, testdir): + @ignore_parametrized_marks + def test_parametrize_with_mark(self, testdir): items = testdir.getitems(""" import pytest @pytest.mark.foo @@ -623,6 +643,7 @@ class TestFunction(object): assert colitems[2].name == 'test2[a-c]' assert colitems[3].name == 'test2[b-c]' + @ignore_parametrized_marks def test_parametrize_skipif(self, testdir): testdir.makepyfile(""" import pytest @@ -636,6 +657,7 @@ class TestFunction(object): result = testdir.runpytest() result.stdout.fnmatch_lines('* 2 passed, 1 skipped in *') + @ignore_parametrized_marks def test_parametrize_skip(self, testdir): testdir.makepyfile(""" import pytest @@ -649,6 +671,7 @@ class TestFunction(object): result = testdir.runpytest() result.stdout.fnmatch_lines('* 2 passed, 1 skipped in *') + @ignore_parametrized_marks def test_parametrize_skipif_no_skip(self, testdir): testdir.makepyfile(""" import pytest @@ -662,6 +685,7 @@ class TestFunction(object): result = testdir.runpytest() result.stdout.fnmatch_lines('* 1 failed, 2 passed in *') + @ignore_parametrized_marks def test_parametrize_xfail(self, testdir): testdir.makepyfile(""" import pytest @@ -675,6 +699,7 @@ class TestFunction(object): result = testdir.runpytest() result.stdout.fnmatch_lines('* 2 passed, 1 xfailed in *') + @ignore_parametrized_marks def test_parametrize_passed(self, testdir): testdir.makepyfile(""" import pytest @@ -688,6 +713,7 @@ class TestFunction(object): result = testdir.runpytest() result.stdout.fnmatch_lines('* 2 passed, 1 xpassed in *') + @ignore_parametrized_marks def test_parametrize_xfail_passed(self, testdir): testdir.makepyfile(""" import pytest @@ -724,7 +750,7 @@ class TestSorting(object): assert fn1 == fn2 assert fn1 != modcol - if py.std.sys.version_info < (3, 0): + if sys.version_info < (3, 0): assert cmp(fn1, fn2) == 0 assert hash(fn1) == hash(fn2) @@ -733,11 +759,11 @@ class TestSorting(object): assert not (fn1 == fn3) assert fn1 != fn3 - for fn in fn1,fn2,fn3: + for fn in fn1, fn2, fn3: assert fn != 3 assert fn != modcol - assert fn != [1,2,3] - assert [1,2,3] != fn + assert fn != [1, 2, 3] + assert [1, 2, 3] != fn assert modcol != fn def test_allow_sane_sorting_for_decorators(self, testdir): @@ -782,10 +808,12 @@ class TestConftestCustomization(object): def test_customized_pymakemodule_issue205_subdir(self, testdir): b = testdir.mkdir("a").mkdir("b") b.join("conftest.py").write(_pytest._code.Source(""" - def pytest_pycollect_makemodule(__multicall__): - mod = __multicall__.execute() + import pytest + @pytest.hookimpl(hookwrapper=True) + def pytest_pycollect_makemodule(): + outcome = yield + mod = outcome.get_result() mod.obj.hello = "world" - return mod """)) b.join("test_module.py").write(_pytest._code.Source(""" def test_hello(): @@ -802,7 +830,7 @@ class TestConftestCustomization(object): def pytest_pycollect_makeitem(): outcome = yield if outcome.excinfo is None: - result = outcome.result + result = outcome.get_result() if result: for func in result: func._some123 = "world" @@ -836,11 +864,11 @@ class TestConftestCustomization(object): def test_makeitem_non_underscore(self, testdir, monkeypatch): modcol = testdir.getmodulecol("def _hello(): pass") - l = [] + values = [] monkeypatch.setattr(pytest.Module, 'makeitem', - lambda self, name, obj: l.append(name)) - l = modcol.collect() - assert '_hello' not in l + lambda self, name, obj: values.append(name)) + values = modcol.collect() + assert '_hello' not in values def test_issue2369_collect_module_fileext(self, testdir): """Ensure we can collect files with weird file extensions as Python @@ -851,10 +879,10 @@ class TestConftestCustomization(object): import sys, os, imp from _pytest.python import Module - class Loader: + class Loader(object): def load_module(self, name): return imp.load_source(name, name + ".narf") - class Finder: + class Finder(object): def find_module(self, name, path=None): if os.path.exists(name + ".narf"): return Loader() @@ -870,6 +898,7 @@ class TestConftestCustomization(object): result = testdir.runpytest_subprocess() result.stdout.fnmatch_lines('*1 passed*') + def test_setup_only_available_in_subdir(testdir): sub1 = testdir.mkpydir("sub1") sub2 = testdir.mkpydir("sub2") @@ -896,6 +925,7 @@ def test_setup_only_available_in_subdir(testdir): result = testdir.runpytest("-v", "-s") result.assert_outcomes(passed=2) + def test_modulecol_roundtrip(testdir): modcol = testdir.getmodulecol("pass", withinit=True) trail = modcol.nodeid @@ -923,13 +953,13 @@ class TestTracebackCutting(object): out = result.stdout.str() assert "xyz" in out assert "conftest.py:5: ValueError" in out - numentries = out.count("_ _ _") # separator for traceback entries + numentries = out.count("_ _ _") # separator for traceback entries assert numentries == 0 result = testdir.runpytest("--fulltrace", p) out = result.stdout.str() assert "conftest.py:5: ValueError" in out - numentries = out.count("_ _ _ _") # separator for traceback entries + numentries = out.count("_ _ _ _") # separator for traceback entries assert numentries > 3 def test_traceback_error_during_import(self, testdir): @@ -1180,6 +1210,7 @@ def test_collector_attributes(testdir): "*1 passed*", ]) + def test_customize_through_attributes(testdir): testdir.makeconftest(""" import pytest @@ -1349,7 +1380,6 @@ def test_skip_duplicates_by_default(testdir): ]) - def test_keep_duplicates(testdir): """Test for issue https://github.com/pytest-dev/pytest/issues/1609 (#1609) diff --git a/testing/python/fixture.py b/testing/python/fixture.py index 26cde4c93..2ba890d05 100644 --- a/testing/python/fixture.py +++ b/testing/python/fixture.py @@ -2,31 +2,39 @@ from textwrap import dedent import _pytest._code import pytest -import sys from _pytest.pytester import get_public_names -from _pytest.fixtures import FixtureLookupError +from _pytest.fixtures import FixtureLookupError, FixtureRequest from _pytest import fixtures + def test_getfuncargnames(): - def f(): pass + def f(): + pass assert not fixtures.getfuncargnames(f) - def g(arg): pass + def g(arg): + pass assert fixtures.getfuncargnames(g) == ('arg',) - def h(arg1, arg2="hello"): pass + def h(arg1, arg2="hello"): + pass assert fixtures.getfuncargnames(h) == ('arg1',) - def h(arg1, arg2, arg3="hello"): pass + def h(arg1, arg2, arg3="hello"): + pass assert fixtures.getfuncargnames(h) == ('arg1', 'arg2') class A(object): def f(self, arg1, arg2="hello"): pass + @staticmethod + def static(arg1, arg2): + pass + assert fixtures.getfuncargnames(A().f) == ('arg1',) - if sys.version_info < (3,0): - assert fixtures.getfuncargnames(A.f) == ('arg1',) + assert fixtures.getfuncargnames(A.static, cls=A) == ('arg1', 'arg2') + class TestFillFixtures(object): def test_fillfuncargs_exposed(self): @@ -44,7 +52,7 @@ class TestFillFixtures(object): def test_func(some): pass """) - result = testdir.runpytest() # "--collect-only") + result = testdir.runpytest() # "--collect-only") assert result.ret != 0 result.stdout.fnmatch_lines([ "*def test_func(some)*", @@ -439,7 +447,6 @@ class TestFillFixtures(object): ]) assert "INTERNAL" not in result.stdout.str() - def test_fixture_excinfo_leak(self, testdir): # on python2 sys.excinfo would leak into fixture executions testdir.makepyfile(""" @@ -512,6 +519,41 @@ class TestRequestBasic(object): assert len(arg2fixturedefs) == 1 assert arg2fixturedefs['something'][0].argname == "something" + def test_request_garbage(self, testdir): + testdir.makepyfile(""" + import sys + import pytest + import gc + + @pytest.fixture(autouse=True) + def something(request): + # this method of test doesn't work on pypy + if hasattr(sys, "pypy_version_info"): + yield + else: + original = gc.get_debug() + gc.set_debug(gc.DEBUG_SAVEALL) + gc.collect() + + yield + + gc.collect() + leaked_types = sum(1 for _ in gc.garbage + if 'PseudoFixtureDef' in str(_)) + + gc.garbage[:] = [] + + try: + assert leaked_types == 0 + finally: + gc.set_debug(original) + + def test_func(): + pass + """) + reprec = testdir.inline_run() + reprec.assertoutcome(passed=1) + def test_getfixturevalue_recursive(self, testdir): testdir.makeconftest(""" import pytest @@ -537,12 +579,12 @@ class TestRequestBasic(object): def test_getfixturevalue(self, testdir, getfixmethod): item = testdir.getitem(""" import pytest - l = [2] + values = [2] @pytest.fixture def something(request): return 1 @pytest.fixture def other(request): - return l.pop() + return values.pop() def test_func(something): pass """) import contextlib @@ -551,7 +593,8 @@ class TestRequestBasic(object): else: # see #1830 for a cleaner way to accomplish this @contextlib.contextmanager - def expecting_no_warning(): yield + def expecting_no_warning(): + yield warning_expectation = expecting_no_warning() @@ -610,15 +653,15 @@ class TestRequestBasic(object): def test_request_addfinalizer_failing_setup(self, testdir): testdir.makepyfile(""" import pytest - l = [1] + values = [1] @pytest.fixture def myfix(request): - request.addfinalizer(l.pop) + request.addfinalizer(values.pop) assert 0 def test_fix(myfix): pass def test_finalizer_ran(): - assert not l + assert not values """) reprec = testdir.inline_run("-s") reprec.assertoutcome(failed=1, passed=1) @@ -626,36 +669,35 @@ class TestRequestBasic(object): def test_request_addfinalizer_failing_setup_module(self, testdir): testdir.makepyfile(""" import pytest - l = [1, 2] + values = [1, 2] @pytest.fixture(scope="module") def myfix(request): - request.addfinalizer(l.pop) - request.addfinalizer(l.pop) + request.addfinalizer(values.pop) + request.addfinalizer(values.pop) assert 0 def test_fix(myfix): pass """) reprec = testdir.inline_run("-s") mod = reprec.getcalls("pytest_runtest_setup")[0].item.module - assert not mod.l - + assert not mod.values def test_request_addfinalizer_partial_setup_failure(self, testdir): p = testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture def something(request): - request.addfinalizer(lambda: l.append(None)) + request.addfinalizer(lambda: values.append(None)) def test_func(something, missingarg): pass def test_second(): - assert len(l) == 1 + assert len(values) == 1 """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "*1 error*" # XXX the whole module collection fails - ]) + ]) def test_request_subrequest_addfinalizer_exceptions(self, testdir): """ @@ -664,7 +706,7 @@ class TestRequestBasic(object): """ testdir.makepyfile(""" import pytest - l = [] + values = [] def _excepts(where): raise Exception('Error in %s fixture' % where) @pytest.fixture @@ -672,17 +714,17 @@ class TestRequestBasic(object): return request @pytest.fixture def something(subrequest): - subrequest.addfinalizer(lambda: l.append(1)) - subrequest.addfinalizer(lambda: l.append(2)) + subrequest.addfinalizer(lambda: values.append(1)) + subrequest.addfinalizer(lambda: values.append(2)) subrequest.addfinalizer(lambda: _excepts('something')) @pytest.fixture def excepts(subrequest): subrequest.addfinalizer(lambda: _excepts('excepts')) - subrequest.addfinalizer(lambda: l.append(3)) + subrequest.addfinalizer(lambda: values.append(3)) def test_first(something, excepts): pass def test_second(): - assert l == [3, 2, 1] + assert values == [3, 2, 1] """) result = testdir.runpytest() result.stdout.fnmatch_lines([ @@ -737,13 +779,13 @@ class TestRequestBasic(object): def test_setupdecorator_and_xunit(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(scope='module', autouse=True) def setup_module(): - l.append("module") + values.append("module") @pytest.fixture(autouse=True) def setup_function(): - l.append("function") + values.append("function") def test_func(): pass @@ -751,14 +793,14 @@ class TestRequestBasic(object): class TestClass(object): @pytest.fixture(scope="class", autouse=True) def setup_class(self): - l.append("class") + values.append("class") @pytest.fixture(autouse=True) def setup_method(self): - l.append("method") + values.append("method") def test_method(self): pass def test_all(): - assert l == ["module", "function", "class", + assert values == ["module", "function", "class", "function", "method", "function"] """) reprec = testdir.inline_run("-v") @@ -815,9 +857,10 @@ class TestRequestBasic(object): reprec = testdir.inline_run() reprec.assertoutcome(passed=2) + class TestRequestMarking(object): def test_applymarker(self, testdir): - item1,item2 = testdir.getitems(""" + item1, item2 = testdir.getitems(""" import pytest @pytest.fixture @@ -875,6 +918,7 @@ class TestRequestMarking(object): reprec = testdir.inline_run() reprec.assertoutcome(passed=2) + class TestRequestCachedSetup(object): def test_request_cachedsetup_defaultmodule(self, testdir): reprec = testdir.inline_runsource(""" @@ -917,10 +961,10 @@ class TestRequestCachedSetup(object): def test_request_cachedsetup_extrakey(self, testdir): item1 = testdir.getitem("def test_func(): pass") req1 = fixtures.FixtureRequest(item1) - l = ["hello", "world"] + values = ["hello", "world"] def setup(): - return l.pop() + return values.pop() ret1 = req1.cached_setup(setup, extrakey=1) ret2 = req1.cached_setup(setup, extrakey=2) @@ -934,24 +978,24 @@ class TestRequestCachedSetup(object): def test_request_cachedsetup_cache_deletion(self, testdir): item1 = testdir.getitem("def test_func(): pass") req1 = fixtures.FixtureRequest(item1) - l = [] + values = [] def setup(): - l.append("setup") + values.append("setup") def teardown(val): - l.append("teardown") + values.append("teardown") req1.cached_setup(setup, teardown, scope="function") - assert l == ['setup'] + assert values == ['setup'] # artificial call of finalizer setupstate = req1._pyfuncitem.session._setupstate setupstate._callfinalizers(item1) - assert l == ["setup", "teardown"] + assert values == ["setup", "teardown"] req1.cached_setup(setup, teardown, scope="function") - assert l == ["setup", "teardown", "setup"] + assert values == ["setup", "teardown", "setup"] setupstate._callfinalizers(item1) - assert l == ["setup", "teardown", "setup", "teardown"] + assert values == ["setup", "teardown", "setup", "teardown"] def test_request_cached_setup_two_args(self, testdir): testdir.makepyfile(""" @@ -993,17 +1037,17 @@ class TestRequestCachedSetup(object): def test_request_cached_setup_functional(self, testdir): testdir.makepyfile(test_0=""" import pytest - l = [] + values = [] @pytest.fixture def something(request): val = request.cached_setup(fsetup, fteardown) return val def fsetup(mycache=[1]): - l.append(mycache.pop()) - return l + values.append(mycache.pop()) + return values def fteardown(something): - l.remove(something[0]) - l.append(2) + values.remove(something[0]) + values.append(2) def test_list_once(something): assert something == [1] def test_list_twice(something): @@ -1012,7 +1056,7 @@ class TestRequestCachedSetup(object): testdir.makepyfile(test_1=""" import test_0 # should have run already def test_check_test0_has_teardown_correct(): - assert test_0.l == [2] + assert test_0.values == [2] """) result = testdir.runpytest("-v") result.stdout.fnmatch_lines([ @@ -1040,6 +1084,7 @@ class TestRequestCachedSetup(object): "*ZeroDivisionError*", ]) + class TestFixtureUsages(object): def test_noargfixturedec(self, testdir): testdir.makepyfile(""" @@ -1136,10 +1181,10 @@ class TestFixtureUsages(object): def test_funcarg_parametrized_and_used_twice(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(params=[1,2]) def arg1(request): - l.append(1) + values.append(1) return request.param @pytest.fixture() @@ -1148,7 +1193,7 @@ class TestFixtureUsages(object): def test_add(arg1, arg2): assert arg2 == arg1 + 1 - assert len(l) == arg1 + assert len(values) == arg1 """) result = testdir.runpytest() result.stdout.fnmatch_lines([ @@ -1189,8 +1234,8 @@ class TestFixtureUsages(object): """) reprec = testdir.inline_run() - l = reprec.getfailedcollections() - assert len(l) == 1 + values = reprec.getfailedcollections() + assert len(values) == 1 def test_request_can_be_overridden(self, testdir): testdir.makepyfile(""" @@ -1209,20 +1254,20 @@ class TestFixtureUsages(object): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(scope="class") def myfix(request): request.cls.hello = "world" - l.append(1) + values.append(1) class TestClass(object): def test_one(self): assert self.hello == "world" - assert len(l) == 1 + assert len(values) == 1 def test_two(self): assert self.hello == "world" - assert len(l) == 1 + assert len(values) == 1 pytest.mark.usefixtures("myfix")(TestClass) """) reprec = testdir.inline_run() @@ -1276,7 +1321,7 @@ class TestFixtureUsages(object): testdir.makepyfile(""" import pytest - l = [] + values = [] def f(): yield 1 yield 2 @@ -1290,14 +1335,14 @@ class TestFixtureUsages(object): return request.param def test_1(arg): - l.append(arg) + values.append(arg) def test_2(arg2): - l.append(arg2*10) + values.append(arg2*10) """) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=4) - l = reprec.getcalls("pytest_runtest_call")[0].item.module.l - assert l == [1,2, 10,20] + values = reprec.getcalls("pytest_runtest_call")[0].item.module.values + assert values == [1, 2, 10, 20] class TestFixtureManagerParseFactories(object): @@ -1447,19 +1492,19 @@ class TestAutouseDiscovery(object): testdir.makepyfile(""" import pytest class TestA(object): - l = [] + values = [] @pytest.fixture(autouse=True) def setup1(self): - self.l.append(1) + self.values.append(1) def test_setup1(self): - assert self.l == [1] + assert self.values == [1] class TestB(object): - l = [] + values = [] @pytest.fixture(autouse=True) def setup2(self): - self.l.append(1) + self.values.append(1) def test_setup2(self): - assert self.l == [1] + assert self.values == [1] """) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) @@ -1542,22 +1587,22 @@ class TestAutouseDiscovery(object): def test_autouse_in_module_and_two_classes(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(autouse=True) def append1(): - l.append("module") + values.append("module") def test_x(): - assert l == ["module"] + assert values == ["module"] class TestA(object): @pytest.fixture(autouse=True) def append2(self): - l.append("A") + values.append("A") def test_hello(self): - assert l == ["module", "module", "A"], l + assert values == ["module", "module", "A"], values class TestA2(object): def test_world(self): - assert l == ["module", "module", "A", "module"], l + assert values == ["module", "module", "A", "module"], values """) reprec = testdir.inline_run() reprec.assertoutcome(passed=3) @@ -1598,28 +1643,26 @@ class TestAutouseManagement(object): reprec = testdir.inline_run() reprec.assertoutcome(passed=2) - - def test_funcarg_and_setup(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(scope="module") def arg(): - l.append(1) + values.append(1) return 0 @pytest.fixture(scope="module", autouse=True) def something(arg): - l.append(2) + values.append(2) def test_hello(arg): - assert len(l) == 2 - assert l == [1,2] + assert len(values) == 2 + assert values == [1,2] assert arg == 0 def test_hello2(arg): - assert len(l) == 2 - assert l == [1,2] + assert len(values) == 2 + assert values == [1,2] assert arg == 0 """) reprec = testdir.inline_run() @@ -1628,20 +1671,20 @@ class TestAutouseManagement(object): def test_uses_parametrized_resource(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(params=[1,2]) def arg(request): return request.param @pytest.fixture(autouse=True) def something(arg): - l.append(arg) + values.append(arg) def test_hello(): - if len(l) == 1: - assert l == [1] - elif len(l) == 2: - assert l == [1, 2] + if len(values) == 1: + assert values == [1] + elif len(values) == 2: + assert values == [1, 2] else: 0/0 @@ -1653,7 +1696,7 @@ class TestAutouseManagement(object): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(scope="session", params=[1,2]) def arg(request): @@ -1662,14 +1705,14 @@ class TestAutouseManagement(object): @pytest.fixture(scope="function", autouse=True) def append(request, arg): if request.function.__name__ == "test_some": - l.append(arg) + values.append(arg) def test_some(): pass def test_result(arg): - assert len(l) == arg - assert l[:arg] == [1,2][:arg] + assert len(values) == arg + assert values[:arg] == [1,2][:arg] """) reprec = testdir.inline_run("-v", "-s") reprec.assertoutcome(passed=4) @@ -1679,7 +1722,7 @@ class TestAutouseManagement(object): import pytest import pprint - l = [] + values = [] @pytest.fixture(scope="function", params=[1,2]) def farg(request): @@ -1692,7 +1735,7 @@ class TestAutouseManagement(object): @pytest.fixture(scope="function", autouse=True) def append(request, farg, carg): def fin(): - l.append("fin_%s%s" % (carg, farg)) + values.append("fin_%s%s" % (carg, farg)) request.addfinalizer(fin) """) testdir.makepyfile(""" @@ -1706,29 +1749,29 @@ class TestAutouseManagement(object): pass """) confcut = "--confcutdir={0}".format(testdir.tmpdir) - reprec = testdir.inline_run("-v","-s", confcut) + reprec = testdir.inline_run("-v", "-s", confcut) reprec.assertoutcome(passed=8) config = reprec.getcalls("pytest_unconfigure")[0].config - l = config.pluginmanager._getconftestmodules(p)[0].l - assert l == ["fin_a1", "fin_a2", "fin_b1", "fin_b2"] * 2 + values = config.pluginmanager._getconftestmodules(p)[0].values + assert values == ["fin_a1", "fin_a2", "fin_b1", "fin_b2"] * 2 def test_scope_ordering(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(scope="function", autouse=True) def fappend2(): - l.append(2) + values.append(2) @pytest.fixture(scope="class", autouse=True) def classappend3(): - l.append(3) + values.append(3) @pytest.fixture(scope="module", autouse=True) def mappend(): - l.append(1) + values.append(1) class TestHallo(object): def test_method(self): - assert l == [1,3,2] + assert values == [1,3,2] """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) @@ -1736,65 +1779,67 @@ class TestAutouseManagement(object): def test_parametrization_setup_teardown_ordering(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] def pytest_generate_tests(metafunc): + if metafunc.cls is None: + assert metafunc.function is test_finish if metafunc.cls is not None: metafunc.parametrize("item", [1,2], scope="class") class TestClass(object): @pytest.fixture(scope="class", autouse=True) def addteardown(self, item, request): - l.append("setup-%d" % item) - request.addfinalizer(lambda: l.append("teardown-%d" % item)) + values.append("setup-%d" % item) + request.addfinalizer(lambda: values.append("teardown-%d" % item)) def test_step1(self, item): - l.append("step1-%d" % item) + values.append("step1-%d" % item) def test_step2(self, item): - l.append("step2-%d" % item) + values.append("step2-%d" % item) def test_finish(): - print (l) - assert l == ["setup-1", "step1-1", "step2-1", "teardown-1", + print (values) + assert values == ["setup-1", "step1-1", "step2-1", "teardown-1", "setup-2", "step1-2", "step2-2", "teardown-2",] """) - reprec = testdir.inline_run() + reprec = testdir.inline_run('-s') reprec.assertoutcome(passed=5) def test_ordering_autouse_before_explicit(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(autouse=True) def fix1(): - l.append(1) + values.append(1) @pytest.fixture() def arg1(): - l.append(2) + values.append(2) def test_hello(arg1): - assert l == [1,2] + assert values == [1,2] """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) @pytest.mark.issue226 - @pytest.mark.parametrize("param1", ["", "params=[1]"], ids=["p00","p01"]) - @pytest.mark.parametrize("param2", ["", "params=[1]"], ids=["p10","p11"]) + @pytest.mark.parametrize("param1", ["", "params=[1]"], ids=["p00", "p01"]) + @pytest.mark.parametrize("param2", ["", "params=[1]"], ids=["p10", "p11"]) def test_ordering_dependencies_torndown_first(self, testdir, param1, param2): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(%(param1)s) def arg1(request): - request.addfinalizer(lambda: l.append("fin1")) - l.append("new1") + request.addfinalizer(lambda: values.append("fin1")) + values.append("new1") @pytest.fixture(%(param2)s) def arg2(request, arg1): - request.addfinalizer(lambda: l.append("fin2")) - l.append("new2") + request.addfinalizer(lambda: values.append("fin2")) + values.append("new2") def test_arg(arg2): pass def test_check(): - assert l == ["new1", "new2", "fin2", "fin1"] + assert values == ["new1", "new2", "fin2", "fin1"] """ % locals()) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=2) @@ -1807,11 +1852,11 @@ class TestFixtureMarker(object): @pytest.fixture(params=["a", "b", "c"]) def arg(request): return request.param - l = [] + values = [] def test_param(arg): - l.append(arg) + values.append(arg) def test_result(): - assert l == list("abc") + assert values == list("abc") """) reprec = testdir.inline_run() reprec.assertoutcome(passed=4) @@ -1855,21 +1900,21 @@ class TestFixtureMarker(object): def test_scope_session(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(scope="module") def arg(): - l.append(1) + values.append(1) return 1 def test_1(arg): assert arg == 1 def test_2(arg): assert arg == 1 - assert len(l) == 1 + assert len(values) == 1 class TestClass(object): def test3(self, arg): assert arg == 1 - assert len(l) == 1 + assert len(values) == 1 """) reprec = testdir.inline_run() reprec.assertoutcome(passed=3) @@ -1877,10 +1922,10 @@ class TestFixtureMarker(object): def test_scope_session_exc(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(scope="session") def fix(): - l.append(1) + values.append(1) pytest.skip('skipping') def test_1(fix): @@ -1888,7 +1933,7 @@ class TestFixtureMarker(object): def test_2(fix): pass def test_last(): - assert l == [1] + assert values == [1] """) reprec = testdir.inline_run() reprec.assertoutcome(skipped=2, passed=1) @@ -1896,11 +1941,11 @@ class TestFixtureMarker(object): def test_scope_session_exc_two_fix(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] m = [] @pytest.fixture(scope="session") def a(): - l.append(1) + values.append(1) pytest.skip('skipping') @pytest.fixture(scope="session") def b(a): @@ -1911,7 +1956,7 @@ class TestFixtureMarker(object): def test_2(b): pass def test_last(): - assert l == [1] + assert values == [1] assert m == [] """) reprec = testdir.inline_run() @@ -1949,21 +1994,21 @@ class TestFixtureMarker(object): def test_scope_module_uses_session(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(scope="module") def arg(): - l.append(1) + values.append(1) return 1 def test_1(arg): assert arg == 1 def test_2(arg): assert arg == 1 - assert len(l) == 1 + assert len(values) == 1 class TestClass(object): def test3(self, arg): assert arg == 1 - assert len(l) == 1 + assert len(values) == 1 """) reprec = testdir.inline_run() reprec.assertoutcome(passed=3) @@ -2058,17 +2103,17 @@ class TestFixtureMarker(object): @pytest.fixture(scope="module", params=["a", "b", "c"]) def arg(request): return request.param - l = [] + values = [] def test_param(arg): - l.append(arg) + values.append(arg) """) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=3) - l = reprec.getcalls("pytest_runtest_call")[0].item.module.l - assert len(l) == 3 - assert "a" in l - assert "b" in l - assert "c" in l + values = reprec.getcalls("pytest_runtest_call")[0].item.module.values + assert len(values) == 3 + assert "a" in values + assert "b" in values + assert "c" in values def test_scope_mismatch(self, testdir): testdir.makeconftest(""" @@ -2099,18 +2144,22 @@ class TestFixtureMarker(object): def arg(request): return request.param - l = [] + values = [] def test_1(arg): - l.append(arg) + values.append(arg) def test_2(arg): - l.append(arg) + values.append(arg) """) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=4) - l = reprec.getcalls("pytest_runtest_call")[0].item.module.l - assert l == [1,1,2,2] + values = reprec.getcalls("pytest_runtest_call")[0].item.module.values + assert values == [1, 1, 2, 2] def test_module_parametrized_ordering(self, testdir): + testdir.makeini(""" + [pytest] + console_output_style=classic + """) testdir.makeconftest(""" import pytest @@ -2156,11 +2205,56 @@ class TestFixtureMarker(object): test_mod1.py::test_func1[m2] PASSED """) - def test_class_ordering(self, testdir): + def test_dynamic_parametrized_ordering(self, testdir): + testdir.makeini(""" + [pytest] + console_output_style=classic + """) testdir.makeconftest(""" import pytest - l = [] + def pytest_configure(config): + class DynamicFixturePlugin(object): + @pytest.fixture(scope='session', params=['flavor1', 'flavor2']) + def flavor(self, request): + return request.param + config.pluginmanager.register(DynamicFixturePlugin(), 'flavor-fixture') + + @pytest.fixture(scope='session', params=['vxlan', 'vlan']) + def encap(request): + return request.param + + @pytest.fixture(scope='session', autouse='True') + def reprovision(request, flavor, encap): + pass + """) + testdir.makepyfile(""" + def test(reprovision): + pass + def test2(reprovision): + pass + """) + result = testdir.runpytest("-v") + result.stdout.fnmatch_lines(""" + test_dynamic_parametrized_ordering.py::test[flavor1-vxlan] PASSED + test_dynamic_parametrized_ordering.py::test2[flavor1-vxlan] PASSED + test_dynamic_parametrized_ordering.py::test[flavor2-vxlan] PASSED + test_dynamic_parametrized_ordering.py::test2[flavor2-vxlan] PASSED + test_dynamic_parametrized_ordering.py::test[flavor2-vlan] PASSED + test_dynamic_parametrized_ordering.py::test2[flavor2-vlan] PASSED + test_dynamic_parametrized_ordering.py::test[flavor1-vlan] PASSED + test_dynamic_parametrized_ordering.py::test2[flavor1-vlan] PASSED + """) + + def test_class_ordering(self, testdir): + testdir.makeini(""" + [pytest] + console_output_style=classic + """) + testdir.makeconftest(""" + import pytest + + values = [] @pytest.fixture(scope="function", params=[1,2]) def farg(request): @@ -2173,7 +2267,7 @@ class TestFixtureMarker(object): @pytest.fixture(scope="function", autouse=True) def append(request, farg, carg): def fin(): - l.append("fin_%s%s" % (carg, farg)) + values.append("fin_%s%s" % (carg, farg)) request.addfinalizer(fin) """) testdir.makepyfile(""" @@ -2189,19 +2283,19 @@ class TestFixtureMarker(object): pass """) result = testdir.runpytest("-vs") - result.stdout.fnmatch_lines(""" - test_class_ordering.py::TestClass2::test_1[1-a] PASSED - test_class_ordering.py::TestClass2::test_1[2-a] PASSED - test_class_ordering.py::TestClass2::test_2[1-a] PASSED - test_class_ordering.py::TestClass2::test_2[2-a] PASSED - test_class_ordering.py::TestClass2::test_1[1-b] PASSED - test_class_ordering.py::TestClass2::test_1[2-b] PASSED - test_class_ordering.py::TestClass2::test_2[1-b] PASSED - test_class_ordering.py::TestClass2::test_2[2-b] PASSED - test_class_ordering.py::TestClass::test_3[1-a] PASSED - test_class_ordering.py::TestClass::test_3[2-a] PASSED - test_class_ordering.py::TestClass::test_3[1-b] PASSED - test_class_ordering.py::TestClass::test_3[2-b] PASSED + result.stdout.re_match_lines(r""" + test_class_ordering.py::TestClass2::test_1\[a-1\] PASSED + test_class_ordering.py::TestClass2::test_1\[a-2\] PASSED + test_class_ordering.py::TestClass2::test_2\[a-1\] PASSED + test_class_ordering.py::TestClass2::test_2\[a-2\] PASSED + test_class_ordering.py::TestClass2::test_1\[b-1\] PASSED + test_class_ordering.py::TestClass2::test_1\[b-2\] PASSED + test_class_ordering.py::TestClass2::test_2\[b-1\] PASSED + test_class_ordering.py::TestClass2::test_2\[b-2\] PASSED + test_class_ordering.py::TestClass::test_3\[a-1\] PASSED + test_class_ordering.py::TestClass::test_3\[a-2\] PASSED + test_class_ordering.py::TestClass::test_3\[b-1\] PASSED + test_class_ordering.py::TestClass::test_3\[b-2\] PASSED """) def test_parametrize_separated_order_higher_scope_first(self, testdir): @@ -2211,30 +2305,30 @@ class TestFixtureMarker(object): @pytest.fixture(scope="function", params=[1, 2]) def arg(request): param = request.param - request.addfinalizer(lambda: l.append("fin:%s" % param)) - l.append("create:%s" % param) + request.addfinalizer(lambda: values.append("fin:%s" % param)) + values.append("create:%s" % param) return request.param @pytest.fixture(scope="module", params=["mod1", "mod2"]) def modarg(request): param = request.param - request.addfinalizer(lambda: l.append("fin:%s" % param)) - l.append("create:%s" % param) + request.addfinalizer(lambda: values.append("fin:%s" % param)) + values.append("create:%s" % param) return request.param - l = [] + values = [] def test_1(arg): - l.append("test1") + values.append("test1") def test_2(modarg): - l.append("test2") + values.append("test2") def test_3(arg, modarg): - l.append("test3") + values.append("test3") def test_4(modarg, arg): - l.append("test4") + values.append("test4") """) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=12) - l = reprec.getcalls("pytest_runtest_call")[0].item.module.l + values = reprec.getcalls("pytest_runtest_call")[0].item.module.values expected = [ 'create:1', 'test1', 'fin:1', 'create:2', 'test1', 'fin:2', 'create:mod1', 'test2', 'create:1', 'test3', @@ -2243,10 +2337,10 @@ class TestFixtureMarker(object): 'fin:mod1', 'create:mod2', 'test2', 'create:1', 'test3', 'fin:1', 'create:2', 'test3', 'fin:2', 'create:1', 'test4', 'fin:1', 'create:2', 'test4', 'fin:2', - 'fin:mod2'] + 'fin:mod2'] import pprint - pprint.pprint(list(zip(l, expected))) - assert l == expected + pprint.pprint(list(zip(values, expected))) + assert values == expected def test_parametrized_fixture_teardown_order(self, testdir): testdir.makepyfile(""" @@ -2255,29 +2349,29 @@ class TestFixtureMarker(object): def param1(request): return request.param - l = [] + values = [] class TestClass(object): @classmethod @pytest.fixture(scope="class", autouse=True) def setup1(self, request, param1): - l.append(1) + values.append(1) request.addfinalizer(self.teardown1) @classmethod def teardown1(self): - assert l.pop() == 1 + assert values.pop() == 1 @pytest.fixture(scope="class", autouse=True) def setup2(self, request, param1): - l.append(2) + values.append(2) request.addfinalizer(self.teardown2) @classmethod def teardown2(self): - assert l.pop() == 2 + assert values.pop() == 2 def test(self): pass def test_finish(): - assert not l + assert not values """) result = testdir.runpytest("-v") result.stdout.fnmatch_lines(""" @@ -2342,42 +2436,42 @@ class TestFixtureMarker(object): def test_request_is_clean(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(params=[1, 2]) def fix(request): - request.addfinalizer(lambda: l.append(request.param)) + request.addfinalizer(lambda: values.append(request.param)) def test_fix(fix): pass """) reprec = testdir.inline_run("-s") - l = reprec.getcalls("pytest_runtest_call")[0].item.module.l - assert l == [1,2] + values = reprec.getcalls("pytest_runtest_call")[0].item.module.values + assert values == [1, 2] def test_parametrize_separated_lifecycle(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(scope="module", params=[1, 2]) def arg(request): x = request.param - request.addfinalizer(lambda: l.append("fin%s" % x)) + request.addfinalizer(lambda: values.append("fin%s" % x)) return request.param def test_1(arg): - l.append(arg) + values.append(arg) def test_2(arg): - l.append(arg) + values.append(arg) """) reprec = testdir.inline_run("-vs") reprec.assertoutcome(passed=4) - l = reprec.getcalls("pytest_runtest_call")[0].item.module.l + values = reprec.getcalls("pytest_runtest_call")[0].item.module.values import pprint - pprint.pprint(l) - #assert len(l) == 6 - assert l[0] == l[1] == 1 - assert l[2] == "fin1" - assert l[3] == l[4] == 2 - assert l[5] == "fin2" + pprint.pprint(values) + # assert len(values) == 6 + assert values[0] == values[1] == 1 + assert values[2] == "fin1" + assert values[3] == values[4] == 2 + assert values[5] == "fin2" def test_parametrize_function_scoped_finalizers_called(self, testdir): testdir.makepyfile(""" @@ -2386,28 +2480,27 @@ class TestFixtureMarker(object): @pytest.fixture(scope="function", params=[1, 2]) def arg(request): x = request.param - request.addfinalizer(lambda: l.append("fin%s" % x)) + request.addfinalizer(lambda: values.append("fin%s" % x)) return request.param - l = [] + values = [] def test_1(arg): - l.append(arg) + values.append(arg) def test_2(arg): - l.append(arg) + values.append(arg) def test_3(): - assert len(l) == 8 - assert l == [1, "fin1", 2, "fin2", 1, "fin1", 2, "fin2"] + assert len(values) == 8 + assert values == [1, "fin1", 2, "fin2", 1, "fin1", 2, "fin2"] """) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=5) - @pytest.mark.issue246 @pytest.mark.parametrize("scope", ["session", "function", "module"]) def test_finalizer_order_on_parametrization(self, scope, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(scope=%(scope)r, params=["1"]) def fix1(request): @@ -2416,13 +2509,13 @@ class TestFixtureMarker(object): @pytest.fixture(scope=%(scope)r) def fix2(request, base): def cleanup_fix2(): - assert not l, "base should not have been finalized" + assert not values, "base should not have been finalized" request.addfinalizer(cleanup_fix2) @pytest.fixture(scope=%(scope)r) def base(request, fix1): def cleanup_base(): - l.append("fin_base") + values.append("fin_base") print ("finalizing base") request.addfinalizer(cleanup_base) @@ -2440,29 +2533,29 @@ class TestFixtureMarker(object): def test_class_scope_parametrization_ordering(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(params=["John", "Doe"], scope="class") def human(request): - request.addfinalizer(lambda: l.append("fin %s" % request.param)) + request.addfinalizer(lambda: values.append("fin %s" % request.param)) return request.param class TestGreetings(object): def test_hello(self, human): - l.append("test_hello") + values.append("test_hello") class TestMetrics(object): def test_name(self, human): - l.append("test_name") + values.append("test_name") def test_population(self, human): - l.append("test_population") + values.append("test_population") """) reprec = testdir.inline_run() reprec.assertoutcome(passed=6) - l = reprec.getcalls("pytest_runtest_call")[0].item.module.l - assert l == ["test_hello", "fin John", "test_hello", "fin Doe", - "test_name", "test_population", "fin John", - "test_name", "test_population", "fin Doe"] + values = reprec.getcalls("pytest_runtest_call")[0].item.module.values + assert values == ["test_hello", "fin John", "test_hello", "fin Doe", + "test_name", "test_population", "fin John", + "test_name", "test_population", "fin Doe"] def test_parametrize_setup_function(self, testdir): testdir.makepyfile(""" @@ -2474,21 +2567,21 @@ class TestFixtureMarker(object): @pytest.fixture(scope="module", autouse=True) def mysetup(request, arg): - request.addfinalizer(lambda: l.append("fin%s" % arg)) - l.append("setup%s" % arg) + request.addfinalizer(lambda: values.append("fin%s" % arg)) + values.append("setup%s" % arg) - l = [] + values = [] def test_1(arg): - l.append(arg) + values.append(arg) def test_2(arg): - l.append(arg) + values.append(arg) def test_3(): import pprint - pprint.pprint(l) + pprint.pprint(values) if arg == 1: - assert l == ["setup1", 1, 1, ] + assert values == ["setup1", 1, 1, ] elif arg == 2: - assert l == ["setup1", 1, 1, "fin1", + assert values == ["setup1", 1, 1, "fin1", "setup2", 2, 2, ] """) @@ -2542,9 +2635,42 @@ class TestFixtureMarker(object): '*test_foo*alpha*', '*test_foo*beta*']) + @pytest.mark.issue920 + def test_deterministic_fixture_collection(self, testdir, monkeypatch): + testdir.makepyfile(""" + import pytest + + @pytest.fixture(scope="module", + params=["A", + "B", + "C"]) + def A(request): + return request.param + + @pytest.fixture(scope="module", + params=["DDDDDDDDD", "EEEEEEEEEEEE", "FFFFFFFFFFF", "banansda"]) + def B(request, A): + return request.param + + def test_foo(B): + # Something funky is going on here. + # Despite specified seeds, on what is collected, + # sometimes we get unexpected passes. hashing B seems + # to help? + assert hash(B) or True + """) + monkeypatch.setenv("PYTHONHASHSEED", "1") + out1 = testdir.runpytest_subprocess("-v") + monkeypatch.setenv("PYTHONHASHSEED", "2") + out2 = testdir.runpytest_subprocess("-v") + out1 = [line for line in out1.outlines if line.startswith("test_deterministic_fixture_collection.py::test_foo")] + out2 = [line for line in out2.outlines if line.startswith("test_deterministic_fixture_collection.py::test_foo")] + assert len(out1) == 12 + assert out1 == out2 + class TestRequestScopeAccess(object): - pytestmark = pytest.mark.parametrize(("scope", "ok", "error"),[ + pytestmark = pytest.mark.parametrize(("scope", "ok", "error"), [ ["session", "", "fspath class function module"], ["module", "module fspath", "cls function"], ["class", "module fspath cls", "function"], @@ -2565,7 +2691,7 @@ class TestRequestScopeAccess(object): assert request.config def test_func(): pass - """ %(scope, ok.split(), error.split())) + """ % (scope, ok.split(), error.split())) reprec = testdir.inline_run("-l") reprec.assertoutcome(passed=1) @@ -2583,10 +2709,11 @@ class TestRequestScopeAccess(object): assert request.config def test_func(arg): pass - """ %(scope, ok.split(), error.split())) + """ % (scope, ok.split(), error.split())) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) + class TestErrors(object): def test_subfactory_missing_funcarg(self, testdir): testdir.makepyfile(""" @@ -2615,13 +2742,13 @@ class TestErrors(object): request.addfinalizer(f) return object() - l = [] + values = [] def test_1(fix1): - l.append(fix1) + values.append(fix1) def test_2(fix1): - l.append(fix1) + values.append(fix1) def test_3(): - assert l[0] != l[1] + assert values[0] != values[1] """) result = testdir.runpytest() result.stdout.fnmatch_lines(""" @@ -2632,8 +2759,6 @@ class TestErrors(object): *3 pass*2 error* """) - - def test_setupfunc_missing_funcarg(self, testdir): testdir.makepyfile(""" import pytest @@ -2651,6 +2776,7 @@ class TestErrors(object): "*1 error*", ]) + class TestShowFixtures(object): def test_funcarg_compat(self, testdir): config = testdir.parseconfigure("--funcargs") @@ -2659,18 +2785,16 @@ class TestShowFixtures(object): def test_show_fixtures(self, testdir): result = testdir.runpytest("--fixtures") result.stdout.fnmatch_lines([ - "*tmpdir*", - "*temporary directory*", - ] - ) + "*tmpdir*", + "*temporary directory*", + ]) def test_show_fixtures_verbose(self, testdir): result = testdir.runpytest("--fixtures", "-v") result.stdout.fnmatch_lines([ - "*tmpdir*--*tmpdir.py*", - "*temporary directory*", - ] - ) + "*tmpdir*--*tmpdir.py*", + "*temporary directory*", + ]) def test_show_fixtures_testmodule(self, testdir): p = testdir.makepyfile(''' @@ -2713,7 +2837,7 @@ class TestShowFixtures(object): """) def test_show_fixtures_trimmed_doc(self, testdir): - p = testdir.makepyfile(''' + p = testdir.makepyfile(dedent(''' import pytest @pytest.fixture def arg1(): @@ -2729,9 +2853,9 @@ class TestShowFixtures(object): line2 """ - ''') + ''')) result = testdir.runpytest("--fixtures", p) - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines(dedent(""" * fixtures defined from test_show_fixtures_trimmed_doc * arg2 line1 @@ -2740,8 +2864,64 @@ class TestShowFixtures(object): line1 line2 - """) + """)) + def test_show_fixtures_indented_doc(self, testdir): + p = testdir.makepyfile(dedent(''' + import pytest + @pytest.fixture + def fixture1(): + """ + line1 + indented line + """ + ''')) + result = testdir.runpytest("--fixtures", p) + result.stdout.fnmatch_lines(dedent(""" + * fixtures defined from test_show_fixtures_indented_doc * + fixture1 + line1 + indented line + """)) + + def test_show_fixtures_indented_doc_first_line_unindented(self, testdir): + p = testdir.makepyfile(dedent(''' + import pytest + @pytest.fixture + def fixture1(): + """line1 + line2 + indented line + """ + ''')) + result = testdir.runpytest("--fixtures", p) + result.stdout.fnmatch_lines(dedent(""" + * fixtures defined from test_show_fixtures_indented_doc_first_line_unindented * + fixture1 + line1 + line2 + indented line + """)) + + def test_show_fixtures_indented_in_class(self, testdir): + p = testdir.makepyfile(dedent(''' + import pytest + class TestClass(object): + @pytest.fixture + def fixture1(self): + """line1 + line2 + indented line + """ + ''')) + result = testdir.runpytest("--fixtures", p) + result.stdout.fnmatch_lines(dedent(""" + * fixtures defined from test_show_fixtures_indented_in_class * + fixture1 + line1 + line2 + indented line + """)) def test_show_fixtures_different_files(self, testdir): """ @@ -2929,6 +3109,7 @@ class TestContextManagerFixtureFuncs(object): result = testdir.runpytest("-s") result.stdout.fnmatch_lines("*mew*") + class TestParameterizedSubRequest(object): def test_call_from_fixture(self, testdir): testfile = testdir.makepyfile(""" @@ -3036,4 +3217,226 @@ class TestParameterizedSubRequest(object): """.format(fixfile.strpath, testfile.basename)) +def test_pytest_fixture_setup_and_post_finalizer_hook(testdir): + testdir.makeconftest(""" + from __future__ import print_function + def pytest_fixture_setup(fixturedef, request): + print('ROOT setup hook called for {0} from {1}'.format(fixturedef.argname, request.node.name)) + def pytest_fixture_post_finalizer(fixturedef, request): + print('ROOT finalizer hook called for {0} from {1}'.format(fixturedef.argname, request.node.name)) + """) + testdir.makepyfile(**{ + 'tests/conftest.py': """ + from __future__ import print_function + def pytest_fixture_setup(fixturedef, request): + print('TESTS setup hook called for {0} from {1}'.format(fixturedef.argname, request.node.name)) + def pytest_fixture_post_finalizer(fixturedef, request): + print('TESTS finalizer hook called for {0} from {1}'.format(fixturedef.argname, request.node.name)) + """, + 'tests/test_hooks.py': """ + from __future__ import print_function + import pytest + @pytest.fixture() + def my_fixture(): + return 'some' + + def test_func(my_fixture): + print('TEST test_func') + assert my_fixture == 'some' + """ + }) + result = testdir.runpytest("-s") + assert result.ret == 0 + result.stdout.fnmatch_lines([ + "*TESTS setup hook called for my_fixture from test_func*", + "*ROOT setup hook called for my_fixture from test_func*", + "*TEST test_func*", + "*TESTS finalizer hook called for my_fixture from test_func*", + "*ROOT finalizer hook called for my_fixture from test_func*", + ]) + + +class TestScopeOrdering(object): + """Class of tests that ensure fixtures are ordered based on their scopes (#2405)""" + + @pytest.mark.parametrize('use_mark', [True, False]) + def test_func_closure_module_auto(self, testdir, use_mark): + """Semantically identical to the example posted in #2405 when ``use_mark=True``""" + testdir.makepyfile(""" + import pytest + + @pytest.fixture(scope='module', autouse={autouse}) + def m1(): pass + + if {use_mark}: + pytestmark = pytest.mark.usefixtures('m1') + + @pytest.fixture(scope='function', autouse=True) + def f1(): pass + + def test_func(m1): + pass + """.format(autouse=not use_mark, use_mark=use_mark)) + items, _ = testdir.inline_genitems() + request = FixtureRequest(items[0]) + assert request.fixturenames == 'm1 f1'.split() + + def test_func_closure_with_native_fixtures(self, testdir, monkeypatch): + """Sanity check that verifies the order returned by the closures and the actual fixture execution order: + The execution order may differ because of fixture inter-dependencies. + """ + monkeypatch.setattr(pytest, 'FIXTURE_ORDER', [], raising=False) + testdir.makepyfile(""" + import pytest + + FIXTURE_ORDER = pytest.FIXTURE_ORDER + + @pytest.fixture(scope="session") + def s1(): + FIXTURE_ORDER.append('s1') + + @pytest.fixture(scope="module") + def m1(): + FIXTURE_ORDER.append('m1') + + @pytest.fixture(scope='session') + def my_tmpdir_factory(): + FIXTURE_ORDER.append('my_tmpdir_factory') + + @pytest.fixture + def my_tmpdir(my_tmpdir_factory): + FIXTURE_ORDER.append('my_tmpdir') + + @pytest.fixture + def f1(my_tmpdir): + FIXTURE_ORDER.append('f1') + + @pytest.fixture + def f2(): + FIXTURE_ORDER.append('f2') + + def test_foo(f1, m1, f2, s1): pass + """) + items, _ = testdir.inline_genitems() + request = FixtureRequest(items[0]) + # order of fixtures based on their scope and position in the parameter list + assert request.fixturenames == 's1 my_tmpdir_factory m1 f1 f2 my_tmpdir'.split() + testdir.runpytest() + # actual fixture execution differs: dependent fixtures must be created first ("my_tmpdir") + assert pytest.FIXTURE_ORDER == 's1 my_tmpdir_factory m1 my_tmpdir f1 f2'.split() + + def test_func_closure_module(self, testdir): + testdir.makepyfile(""" + import pytest + + @pytest.fixture(scope='module') + def m1(): pass + + @pytest.fixture(scope='function') + def f1(): pass + + def test_func(f1, m1): + pass + """) + items, _ = testdir.inline_genitems() + request = FixtureRequest(items[0]) + assert request.fixturenames == 'm1 f1'.split() + + def test_func_closure_scopes_reordered(self, testdir): + """Test ensures that fixtures are ordered by scope regardless of the order of the parameters, although + fixtures of same scope keep the declared order + """ + testdir.makepyfile(""" + import pytest + + @pytest.fixture(scope='session') + def s1(): pass + + @pytest.fixture(scope='module') + def m1(): pass + + @pytest.fixture(scope='function') + def f1(): pass + + @pytest.fixture(scope='function') + def f2(): pass + + class Test: + + @pytest.fixture(scope='class') + def c1(cls): pass + + def test_func(self, f2, f1, c1, m1, s1): + pass + """) + items, _ = testdir.inline_genitems() + request = FixtureRequest(items[0]) + assert request.fixturenames == 's1 m1 c1 f2 f1'.split() + + def test_func_closure_same_scope_closer_root_first(self, testdir): + """Auto-use fixtures of same scope are ordered by closer-to-root first""" + testdir.makeconftest(""" + import pytest + + @pytest.fixture(scope='module', autouse=True) + def m_conf(): pass + """) + testdir.makepyfile(**{ + 'sub/conftest.py': """ + import pytest + + @pytest.fixture(scope='module', autouse=True) + def m_sub(): pass + """, + 'sub/test_func.py': """ + import pytest + + @pytest.fixture(scope='module', autouse=True) + def m_test(): pass + + @pytest.fixture(scope='function') + def f1(): pass + + def test_func(m_test, f1): + pass + """}) + items, _ = testdir.inline_genitems() + request = FixtureRequest(items[0]) + assert request.fixturenames == 'm_conf m_sub m_test f1'.split() + + def test_func_closure_all_scopes_complex(self, testdir): + """Complex test involving all scopes and mixing autouse with normal fixtures""" + testdir.makeconftest(""" + import pytest + + @pytest.fixture(scope='session') + def s1(): pass + """) + testdir.makepyfile(""" + import pytest + + @pytest.fixture(scope='module', autouse=True) + def m1(): pass + + @pytest.fixture(scope='module') + def m2(s1): pass + + @pytest.fixture(scope='function') + def f1(): pass + + @pytest.fixture(scope='function') + def f2(): pass + + class Test: + + @pytest.fixture(scope='class', autouse=True) + def c1(self): + pass + + def test_func(self, f2, f1, m2): + pass + """) + items, _ = testdir.inline_genitems() + request = FixtureRequest(items[0]) + assert request.fixturenames == 's1 m1 m2 c1 f2 f1'.split() diff --git a/testing/python/integration.py b/testing/python/integration.py index 4f888276b..aade04fa9 100644 --- a/testing/python/integration.py +++ b/testing/python/integration.py @@ -4,7 +4,7 @@ from _pytest import runner class TestOEJSKITSpecials(object): - def test_funcarg_non_pycollectobj(self, testdir): # rough jstests usage + def test_funcarg_non_pycollectobj(self, testdir): # rough jstests usage testdir.makeconftest(""" import pytest def pytest_pycollect_makeitem(collector, name, obj): @@ -30,7 +30,7 @@ class TestOEJSKITSpecials(object): pytest._fillfuncargs(clscol) assert clscol.funcargs['arg1'] == 42 - def test_autouse_fixture(self, testdir): # rough jstests usage + def test_autouse_fixture(self, testdir): # rough jstests usage testdir.makeconftest(""" import pytest def pytest_pycollect_makeitem(collector, name, obj): @@ -76,6 +76,7 @@ def test_wrapped_getfslineno(): fs2, lineno2 = python.getfslineno(wrap) assert lineno > lineno2, "getfslineno does not unwrap correctly" + class TestMockDecoration(object): def test_wrapped_getfuncargnames(self): from _pytest.compat import getfuncargnames @@ -92,8 +93,8 @@ class TestMockDecoration(object): def f(x): pass - l = getfuncargnames(f) - assert l == ("x",) + values = getfuncargnames(f) + assert values == ("x",) def test_wrapped_getfuncargnames_patching(self): from _pytest.compat import getfuncargnames @@ -109,8 +110,8 @@ class TestMockDecoration(object): def f(x, y, z): pass - l = getfuncargnames(f) - assert l == ("y", "z") + values = getfuncargnames(f) + assert values == ("y", "z") def test_unittest_mock(self, testdir): pytest.importorskip("unittest.mock") @@ -146,6 +147,28 @@ class TestMockDecoration(object): reprec = testdir.inline_run() reprec.assertoutcome(passed=1) + def test_unittest_mock_and_pypi_mock(self, testdir): + pytest.importorskip("unittest.mock") + pytest.importorskip("mock", "1.0.1") + testdir.makepyfile(""" + import mock + import unittest.mock + class TestBoth(object): + @unittest.mock.patch("os.path.abspath") + def test_hello(self, abspath): + import os + os.path.abspath("hello") + abspath.assert_any_call("hello") + + @mock.patch("os.path.abspath") + def test_hello_mock(self, abspath): + import os + os.path.abspath("hello") + abspath.assert_any_call("hello") + """) + reprec = testdir.inline_run() + reprec.assertoutcome(passed=2) + def test_mock(self, testdir): pytest.importorskip("mock", "1.0.1") testdir.makepyfile(""" @@ -173,7 +196,7 @@ class TestMockDecoration(object): reprec.assertoutcome(passed=2) calls = reprec.getcalls("pytest_runtest_logreport") funcnames = [call.report.location[2] for call in calls - if call.report.when == "call"] + if call.report.when == "call"] assert funcnames == ["T.test_hello", "test_someting"] def test_mock_sorting(self, testdir): @@ -246,6 +269,7 @@ class TestReRunTests(object): *2 passed* """) + def test_pytestconfig_is_session_scoped(): from _pytest.fixtures import pytestconfig assert pytestconfig._pytestfixturefunction.scope == "session" diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py index 380dbf0e6..9b70c3305 100644 --- a/testing/python/metafunc.py +++ b/testing/python/metafunc.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- import re import sys - +import attr import _pytest._code import py import pytest @@ -14,7 +14,7 @@ PY3 = sys.version_info >= (3, 0) class TestMetafunc(object): - def Metafunc(self, func): + def Metafunc(self, func, config=None): # the unit tests of this class check if things work correctly # on the funcarg level, so we don't need a full blown # initiliazation @@ -24,18 +24,26 @@ class TestMetafunc(object): def __init__(self, names): self.names_closure = names + @attr.s + class DefinitionMock(object): + obj = attr.ib() + names = fixtures.getfuncargnames(func) fixtureinfo = FixtureInfo(names) - return python.Metafunc(func, fixtureinfo, None) + definition = DefinitionMock(func) + return python.Metafunc(definition, fixtureinfo, config) def test_no_funcargs(self, testdir): - def function(): pass + def function(): + pass + metafunc = self.Metafunc(function) assert not metafunc.fixturenames repr(metafunc._calls) def test_function_basic(self): - def func(arg1, arg2="qwe"): pass + def func(arg1, arg2="qwe"): + pass metafunc = self.Metafunc(func) assert len(metafunc.fixturenames) == 1 assert 'arg1' in metafunc.fixturenames @@ -43,7 +51,8 @@ class TestMetafunc(object): assert metafunc.cls is None def test_addcall_no_args(self): - def func(arg1): pass + def func(arg1): + pass metafunc = self.Metafunc(func) metafunc.addcall() assert len(metafunc._calls) == 1 @@ -52,7 +61,8 @@ class TestMetafunc(object): assert not hasattr(call, 'param') def test_addcall_id(self): - def func(arg1): pass + def func(arg1): + pass metafunc = self.Metafunc(func) pytest.raises(ValueError, "metafunc.addcall(id=None)") @@ -65,10 +75,12 @@ class TestMetafunc(object): assert metafunc._calls[1].id == "2" def test_addcall_param(self): - def func(arg1): pass + def func(arg1): + pass metafunc = self.Metafunc(func) - class obj(object): pass + class obj(object): + pass metafunc.addcall(param=obj) metafunc.addcall(param=obj) @@ -79,11 +91,13 @@ class TestMetafunc(object): assert metafunc._calls[2].getparam("arg1") == 1 def test_addcall_funcargs(self): - def func(x): pass + def func(x): + pass metafunc = self.Metafunc(func) - class obj(object): pass + class obj(object): + pass metafunc.addcall(funcargs={"x": 2}) metafunc.addcall(funcargs={"x": 3}) @@ -94,17 +108,19 @@ class TestMetafunc(object): assert not hasattr(metafunc._calls[1], 'param') def test_parametrize_error(self): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) - metafunc.parametrize("x", [1,2]) - pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5,6])) - pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5,6])) - metafunc.parametrize("y", [1,2]) - pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5,6])) - pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5,6])) + metafunc.parametrize("x", [1, 2]) + pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5, 6])) + pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5, 6])) + metafunc.parametrize("y", [1, 2]) + pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5, 6])) + pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5, 6])) def test_parametrize_bad_scope(self, testdir): - def func(x): pass + def func(x): + pass metafunc = self.Metafunc(func) try: metafunc.parametrize("x", [1], scope='doggy') @@ -112,42 +128,59 @@ class TestMetafunc(object): assert "has an unsupported scope value 'doggy'" in str(ve) def test_parametrize_and_id(self): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) - metafunc.parametrize("x", [1,2], ids=['basic', 'advanced']) + metafunc.parametrize("x", [1, 2], ids=['basic', 'advanced']) metafunc.parametrize("y", ["abc", "def"]) ids = [x.id for x in metafunc._calls] assert ids == ["basic-abc", "basic-def", "advanced-abc", "advanced-def"] def test_parametrize_and_id_unicode(self): """Allow unicode strings for "ids" parameter in Python 2 (##1905)""" - def func(x): pass + def func(x): + pass metafunc = self.Metafunc(func) metafunc.parametrize("x", [1, 2], ids=[u'basic', u'advanced']) ids = [x.id for x in metafunc._calls] assert ids == [u"basic", u"advanced"] def test_parametrize_with_wrong_number_of_ids(self, testdir): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) pytest.raises(ValueError, lambda: - metafunc.parametrize("x", [1,2], ids=['basic'])) + metafunc.parametrize("x", [1, 2], ids=['basic'])) pytest.raises(ValueError, lambda: - metafunc.parametrize(("x","y"), [("abc", "def"), - ("ghi", "jkl")], ids=["one"])) + metafunc.parametrize(("x", "y"), [("abc", "def"), + ("ghi", "jkl")], ids=["one"])) @pytest.mark.issue510 def test_parametrize_empty_list(self): - def func( y): pass - metafunc = self.Metafunc(func) + def func(y): + pass + + class MockConfig(object): + def getini(self, name): + return '' + + @property + def hook(self): + return self + + def pytest_make_parametrize_id(self, **kw): + pass + + metafunc = self.Metafunc(func, MockConfig()) metafunc.parametrize("y", []) - assert 'skip' in metafunc._calls[0].keywords + assert 'skip' == metafunc._calls[0].marks[0].name def test_parametrize_with_userobjects(self): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) class A(object): @@ -178,11 +211,27 @@ class TestMetafunc(object): """ from _pytest.python import _idval values = [ - (u'', ''), - (u'ascii', 'ascii'), - (u'ação', 'a\\xe7\\xe3o'), - (u'josé@blah.com', 'jos\\xe9@blah.com'), - (u'δοκ.ιμή@παράδειγμα.δοκιμή', '\\u03b4\\u03bf\\u03ba.\\u03b9\\u03bc\\u03ae@\\u03c0\\u03b1\\u03c1\\u03ac\\u03b4\\u03b5\\u03b9\\u03b3\\u03bc\\u03b1.\\u03b4\\u03bf\\u03ba\\u03b9\\u03bc\\u03ae'), + ( + u'', + '' + ), + ( + u'ascii', + 'ascii' + ), + ( + u'ação', + 'a\\xe7\\xe3o' + ), + ( + u'josé@blah.com', + 'jos\\xe9@blah.com' + ), + ( + u'δοκ.ιμή@παράδειγμα.δοκιμή', + '\\u03b4\\u03bf\\u03ba.\\u03b9\\u03bc\\u03ae@\\u03c0\\u03b1\\u03c1\\u03ac\\u03b4\\u03b5\\u03b9\\u03b3' + '\\u03bc\\u03b1.\\u03b4\\u03bf\\u03ba\\u03b9\\u03bc\\u03ae' + ), ] for val, expected in values: assert _idval(val, 'a', 6, None) == expected @@ -204,6 +253,25 @@ class TestMetafunc(object): for val, expected in values: assert _idval(val, 'a', 6, None) == expected + def test_class_or_function_idval(self): + """unittest for the expected behavior to obtain ids for parametrized + values that are classes or functions: their __name__. + """ + from _pytest.python import _idval + + class TestClass(object): + pass + + def test_function(): + pass + + values = [ + (TestClass, "TestClass"), + (test_function, "test_function"), + ] + for val, expected in values: + assert _idval(val, 'a', 6, None) == expected + @pytest.mark.issue250 def test_idmaker_autoname(self): from _pytest.python import idmaker @@ -279,7 +347,7 @@ class TestMetafunc(object): assert result == ["10.0-IndexError()", "20-KeyError()", "three-b2", - ] + ] @pytest.mark.issue351 def test_idmaker_idfn_unique_names(self): @@ -291,11 +359,11 @@ class TestMetafunc(object): result = idmaker(("a", "b"), [pytest.param(10.0, IndexError()), pytest.param(20, KeyError()), pytest.param("three", [1, 2, 3]), - ], idfn=ids) + ], idfn=ids) assert result == ["a-a0", "a-a1", "a-a2", - ] + ] @pytest.mark.issue351 def test_idmaker_idfn_exception(self): @@ -331,7 +399,6 @@ class TestMetafunc(object): "\nUpdate your code as this will raise an error in pytest-4.0.", ] - def test_parametrize_ids_exception(self, testdir): """ :param testdir: the instance of Testdir class, a temporary @@ -371,15 +438,16 @@ class TestMetafunc(object): def test_idmaker_with_ids_unique_names(self): from _pytest.python import idmaker - result = idmaker(("a"), map(pytest.param, [1,2,3,4,5]), + result = idmaker(("a"), map(pytest.param, [1, 2, 3, 4, 5]), ids=["a", "a", "b", "c", "b"]) assert result == ["a0", "a1", "b0", "c", "b1"] def test_addcall_and_parametrize(self): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) metafunc.addcall({'x': 1}) - metafunc.parametrize('y', [2,3]) + metafunc.parametrize('y', [2, 3]) assert len(metafunc._calls) == 2 assert metafunc._calls[0].funcargs == {'x': 1, 'y': 2} assert metafunc._calls[1].funcargs == {'x': 1, 'y': 3} @@ -388,19 +456,21 @@ class TestMetafunc(object): @pytest.mark.issue714 def test_parametrize_indirect(self): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) metafunc.parametrize('x', [1], indirect=True) - metafunc.parametrize('y', [2,3], indirect=True) + metafunc.parametrize('y', [2, 3], indirect=True) assert len(metafunc._calls) == 2 assert metafunc._calls[0].funcargs == {} assert metafunc._calls[1].funcargs == {} - assert metafunc._calls[0].params == dict(x=1,y=2) - assert metafunc._calls[1].params == dict(x=1,y=3) + assert metafunc._calls[0].params == dict(x=1, y=2) + assert metafunc._calls[1].params == dict(x=1, y=3) @pytest.mark.issue714 def test_parametrize_indirect_list(self): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) metafunc.parametrize('x, y', [('a', 'b')], indirect=['x']) assert metafunc._calls[0].funcargs == dict(y='b') @@ -408,7 +478,8 @@ class TestMetafunc(object): @pytest.mark.issue714 def test_parametrize_indirect_list_all(self): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) metafunc.parametrize('x, y', [('a', 'b')], indirect=['x', 'y']) assert metafunc._calls[0].funcargs == {} @@ -416,7 +487,8 @@ class TestMetafunc(object): @pytest.mark.issue714 def test_parametrize_indirect_list_empty(self): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) metafunc.parametrize('x, y', [('a', 'b')], indirect=[]) assert metafunc._calls[0].funcargs == dict(x='a', y='b') @@ -454,7 +526,8 @@ class TestMetafunc(object): @pytest.mark.issue714 def test_parametrize_indirect_list_error(self, testdir): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) with pytest.raises(ValueError): metafunc.parametrize('x, y', [('a', 'b')], indirect=['x', 'z']) @@ -550,16 +623,17 @@ class TestMetafunc(object): ]) def test_addcalls_and_parametrize_indirect(self): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) metafunc.addcall(param="123") metafunc.parametrize('x', [1], indirect=True) - metafunc.parametrize('y', [2,3], indirect=True) + metafunc.parametrize('y', [2, 3], indirect=True) assert len(metafunc._calls) == 2 assert metafunc._calls[0].funcargs == {} assert metafunc._calls[1].funcargs == {} - assert metafunc._calls[0].params == dict(x=1,y=2) - assert metafunc._calls[1].params == dict(x=1,y=3) + assert metafunc._calls[0].params == dict(x=1, y=2) + assert metafunc._calls[1].params == dict(x=1, y=3) def test_parametrize_functional(self, testdir): testdir.makepyfile(""" @@ -584,7 +658,7 @@ class TestMetafunc(object): def test_parametrize_onearg(self): metafunc = self.Metafunc(lambda x: None) - metafunc.parametrize("x", [1,2]) + metafunc.parametrize("x", [1, 2]) assert len(metafunc._calls) == 2 assert metafunc._calls[0].funcargs == dict(x=1) assert metafunc._calls[0].id == "1" @@ -593,15 +667,15 @@ class TestMetafunc(object): def test_parametrize_onearg_indirect(self): metafunc = self.Metafunc(lambda x: None) - metafunc.parametrize("x", [1,2], indirect=True) + metafunc.parametrize("x", [1, 2], indirect=True) assert metafunc._calls[0].params == dict(x=1) assert metafunc._calls[0].id == "1" assert metafunc._calls[1].params == dict(x=2) assert metafunc._calls[1].id == "2" def test_parametrize_twoargs(self): - metafunc = self.Metafunc(lambda x,y: None) - metafunc.parametrize(("x", "y"), [(1,2), (3,4)]) + metafunc = self.Metafunc(lambda x, y: None) + metafunc.parametrize(("x", "y"), [(1, 2), (3, 4)]) assert len(metafunc._calls) == 2 assert metafunc._calls[0].funcargs == dict(x=1, y=2) assert metafunc._calls[0].id == "1-2" @@ -672,16 +746,20 @@ class TestMetafunc(object): """) def test_format_args(self): - def function1(): pass + def function1(): + pass assert fixtures._format_args(function1) == '()' - def function2(arg1): pass + def function2(arg1): + pass assert fixtures._format_args(function2) == "(arg1)" - def function3(arg1, arg2="qwe"): pass + def function3(arg1, arg2="qwe"): + pass assert fixtures._format_args(function3) == "(arg1, arg2='qwe')" - def function4(arg1, *args, **kwargs): pass + def function4(arg1, *args, **kwargs): + pass assert fixtures._format_args(function4) == "(arg1, *args, **kwargs)" @@ -689,7 +767,7 @@ class TestMetafuncFunctional(object): def test_attributes(self, testdir): p = testdir.makepyfile(""" # assumes that generate/provide runs in the same process - import py, pytest + import sys, pytest def pytest_generate_tests(metafunc): metafunc.addcall(param=metafunc) @@ -708,7 +786,7 @@ class TestMetafuncFunctional(object): def test_method(self, metafunc, pytestconfig): assert metafunc.config == pytestconfig assert metafunc.module.__name__ == __name__ - if py.std.sys.version_info > (3, 0): + if sys.version_info > (3, 0): unbound = TestClass.test_method else: unbound = TestClass.test_method.im_func @@ -776,7 +854,6 @@ class TestMetafuncFunctional(object): result = testdir.runpytest(p) result.assert_outcomes(passed=1) - def test_generate_plugin_and_module(self, testdir): testdir.makeconftest(""" def pytest_generate_tests(metafunc): @@ -920,6 +997,10 @@ class TestMetafuncFunctional(object): ]) def test_parametrize_with_ids(self, testdir): + testdir.makeini(""" + [pytest] + console_output_style=classic + """) testdir.makepyfile(""" import pytest def pytest_generate_tests(metafunc): @@ -965,9 +1046,9 @@ class TestMetafuncFunctional(object): result = testdir.runpytest("-v") assert result.ret == 1 result.stdout.fnmatch_lines_random([ - "*test_function*basic*PASSED", - "*test_function*1-1*PASSED", - "*test_function*advanced*FAILED", + "*test_function*basic*PASSED*", + "*test_function*1-1*PASSED*", + "*test_function*advanced*FAILED*", ]) def test_fixture_parametrized_empty_ids(self, testdir): @@ -1022,8 +1103,8 @@ class TestMetafuncFunctional(object): result = testdir.runpytest("-v") assert result.ret == 1 result.stdout.fnmatch_lines_random([ - "*test_function*a0*PASSED", - "*test_function*a1*FAILED" + "*test_function*a0*PASSED*", + "*test_function*a1*FAILED*" ]) @pytest.mark.parametrize(("scope", "length"), @@ -1031,21 +1112,21 @@ class TestMetafuncFunctional(object): def test_parametrize_scope_overrides(self, testdir, scope, length): testdir.makepyfile(""" import pytest - l = [] + values = [] def pytest_generate_tests(metafunc): if "arg" in metafunc.funcargnames: metafunc.parametrize("arg", [1,2], indirect=True, scope=%r) @pytest.fixture def arg(request): - l.append(request.param) + values.append(request.param) return request.param def test_hello(arg): assert arg in (1,2) def test_world(arg): assert arg in (1,2) def test_checklength(): - assert len(l) == %d + assert len(values) == %d """ % (scope, length)) reprec = testdir.inline_run() reprec.assertoutcome(passed=5) @@ -1114,7 +1195,7 @@ class TestMetafuncFunctional(object): @pytest.mark.issue463 @pytest.mark.parametrize('attr', ['parametrise', 'parameterize', - 'parameterise']) + 'parameterise']) def test_parametrize_misspelling(self, testdir, attr): testdir.makepyfile(""" import pytest @@ -1249,8 +1330,10 @@ class TestMetafuncFunctionalAuto(object): assert output.count('preparing foo-3') == 1 +@pytest.mark.filterwarnings('ignore:Applying marks directly to parameters') +@pytest.mark.issue308 class TestMarkersWithParametrization(object): - pytestmark = pytest.mark.issue308 + def test_simple_mark(self, testdir): s = """ import pytest @@ -1434,7 +1517,6 @@ class TestMarkersWithParametrization(object): reprec = testdir.inline_run() reprec.assertoutcome(passed=2, skipped=2) - @pytest.mark.issue290 def test_parametrize_ID_generation_string_int_works(self, testdir): testdir.makepyfile(""" @@ -1451,7 +1533,6 @@ class TestMarkersWithParametrization(object): reprec = testdir.inline_run() reprec.assertoutcome(passed=2) - @pytest.mark.parametrize('strict', [True, False]) def test_parametrize_marked_value(self, testdir, strict): s = """ @@ -1475,7 +1556,6 @@ class TestMarkersWithParametrization(object): passed, failed = (0, 2) if strict else (2, 0) reprec.assertoutcome(passed=passed, failed=failed) - def test_pytest_make_parametrize_id(self, testdir): testdir.makeconftest(""" def pytest_make_parametrize_id(config, val): diff --git a/testing/python/raises.py b/testing/python/raises.py index 21a6f808c..053426395 100644 --- a/testing/python/raises.py +++ b/testing/python/raises.py @@ -1,3 +1,4 @@ +from _pytest.outcomes import Failed import pytest import sys @@ -61,6 +62,11 @@ class TestRaises(object): with pytest.raises(TypeError): pytest.raises('wrong', lambda: None) + def test_invalid_arguments_to_raises(self): + with pytest.raises(TypeError, match='unknown'): + with pytest.raises(TypeError, unknown='bogus'): + raise ValueError() + def test_tuple(self): with pytest.raises((KeyError, ValueError)): raise KeyError('oops') @@ -118,7 +124,6 @@ class TestRaises(object): for o in gc.get_objects(): assert type(o) is not T - def test_raises_match(self): msg = r"with base \d+" with pytest.raises(ValueError, match=msg): @@ -133,3 +138,30 @@ class TestRaises(object): with pytest.raises(AssertionError, match=expr): with pytest.raises(ValueError, match=msg): int('asdf', base=10) + + def test_raises_match_wrong_type(self): + """Raising an exception with the wrong type and match= given. + + pytest should throw the unexpected exception - the pattern match is not + really relevant if we got a different exception. + """ + with pytest.raises(ValueError): + with pytest.raises(IndexError, match='nomatch'): + int('asdf') + + def test_raises_exception_looks_iterable(self): + from six import add_metaclass + + class Meta(type(object)): + def __getitem__(self, item): + return 1/0 + + def __len__(self): + return 1 + + @add_metaclass(Meta) + class ClassLooksIterableException(Exception): + pass + + with pytest.raises(Failed, match="DID NOT RAISE "): + pytest.raises(ClassLooksIterableException, lambda: None) diff --git a/testing/python/setup_only.py b/testing/python/setup_only.py index c780b197e..ab34312fc 100644 --- a/testing/python/setup_only.py +++ b/testing/python/setup_only.py @@ -187,7 +187,7 @@ def test_dynamic_fixture_request(testdir): pass @pytest.fixture() def dependent_fixture(request): - request.getfuncargvalue('dynamically_requested_fixture') + request.getfixturevalue('dynamically_requested_fixture') def test_dyn(dependent_fixture): pass ''') @@ -238,6 +238,6 @@ def test_show_fixtures_and_execute_test(testdir): result.stdout.fnmatch_lines([ '*SETUP F arg*', - '*test_arg (fixtures used: arg)F', + '*test_arg (fixtures used: arg)F*', '*TEARDOWN F arg*', ]) diff --git a/testing/python/show_fixtures_per_test.py b/testing/python/show_fixtures_per_test.py index 18563e818..741f33946 100644 --- a/testing/python/show_fixtures_per_test.py +++ b/testing/python/show_fixtures_per_test.py @@ -135,3 +135,24 @@ def test_verbose_include_private_fixtures_and_loc(testdir): 'arg3 -- test_verbose_include_private_fixtures_and_loc.py:3', ' arg3 from testmodule', ]) + + +def test_doctest_items(testdir): + testdir.makepyfile(''' + def foo(): + """ + >>> 1 + 1 + 2 + """ + ''') + testdir.maketxtfile(''' + >>> 1 + 1 + 2 + ''') + result = testdir.runpytest("--fixtures-per-test", "--doctest-modules", + "--doctest-glob=*.txt", "-v") + assert result.ret == 0 + + result.stdout.fnmatch_lines([ + '*collected 2 items*', + ]) diff --git a/testing/python/test_deprecations.py b/testing/python/test_deprecations.py new file mode 100644 index 000000000..5001f765f --- /dev/null +++ b/testing/python/test_deprecations.py @@ -0,0 +1,22 @@ +import pytest + +from _pytest.python import PyCollector + + +class PyCollectorMock(PyCollector): + """evil hack""" + + def __init__(self): + self.called = False + + def _makeitem(self, *k): + """hack to disable the actual behaviour""" + self.called = True + + +def test_pycollector_makeitem_is_deprecated(): + + collector = PyCollectorMock() + with pytest.deprecated_call(): + collector.makeitem('foo', 'bar') + assert collector.called diff --git a/testing/test_argcomplete.py b/testing/test_argcomplete.py index 6887c419c..7a5e25d69 100644 --- a/testing/test_argcomplete.py +++ b/testing/test_argcomplete.py @@ -1,8 +1,11 @@ from __future__ import absolute_import, division, print_function -import py, pytest +import subprocess +import sys +import pytest # test for _argcomplete but not specific for any application + def equal_with_bash(prefix, ffc, fc, out=None): res = ffc(prefix) res_bash = set(fc(prefix)) @@ -17,28 +20,32 @@ def equal_with_bash(prefix, ffc, fc, out=None): # copied from argcomplete.completers as import from there # also pulls in argcomplete.__init__ which opens filedescriptor 9 # this gives an IOError at the end of testrun + + def _wrapcall(*args, **kargs): try: - if py.std.sys.version_info > (2,7): - return py.std.subprocess.check_output(*args,**kargs).decode().splitlines() + if sys.version_info > (2, 7): + return subprocess.check_output(*args, **kargs).decode().splitlines() if 'stdout' in kargs: raise ValueError('stdout argument not allowed, it will be overridden.') - process = py.std.subprocess.Popen( - stdout=py.std.subprocess.PIPE, *args, **kargs) + process = subprocess.Popen( + stdout=subprocess.PIPE, *args, **kargs) output, unused_err = process.communicate() retcode = process.poll() if retcode: cmd = kargs.get("args") if cmd is None: cmd = args[0] - raise py.std.subprocess.CalledProcessError(retcode, cmd) + raise subprocess.CalledProcessError(retcode, cmd) return output.decode().splitlines() - except py.std.subprocess.CalledProcessError: + except subprocess.CalledProcessError: return [] + class FilesCompleter(object): 'File completer class, optionally takes a list of allowed extensions' - def __init__(self,allowednames=(),directories=True): + + def __init__(self, allowednames=(), directories=True): # Fix if someone passes in a string instead of a list if type(allowednames) is str: allowednames = [allowednames] @@ -50,33 +57,34 @@ class FilesCompleter(object): completion = [] if self.allowednames: if self.directories: - files = _wrapcall(['bash','-c', - "compgen -A directory -- '{p}'".format(p=prefix)]) - completion += [ f + '/' for f in files] + files = _wrapcall(['bash', '-c', + "compgen -A directory -- '{p}'".format(p=prefix)]) + completion += [f + '/' for f in files] for x in self.allowednames: completion += _wrapcall(['bash', '-c', - "compgen -A file -X '!*.{0}' -- '{p}'".format(x,p=prefix)]) + "compgen -A file -X '!*.{0}' -- '{p}'".format(x, p=prefix)]) else: completion += _wrapcall(['bash', '-c', - "compgen -A file -- '{p}'".format(p=prefix)]) + "compgen -A file -- '{p}'".format(p=prefix)]) anticomp = _wrapcall(['bash', '-c', - "compgen -A directory -- '{p}'".format(p=prefix)]) + "compgen -A directory -- '{p}'".format(p=prefix)]) - completion = list( set(completion) - set(anticomp)) + completion = list(set(completion) - set(anticomp)) if self.directories: completion += [f + '/' for f in anticomp] return completion + class TestArgComplete(object): @pytest.mark.skipif("sys.platform in ('win32', 'darwin')") def test_compare_with_compgen(self): from _pytest._argcomplete import FastFilesCompleter ffc = FastFilesCompleter() fc = FilesCompleter() - for x in '/ /d /data qqq'.split(): - assert equal_with_bash(x, ffc, fc, out=py.std.sys.stdout) + for x in ['/', '/d', '/data', 'qqq', '']: + assert equal_with_bash(x, ffc, fc, out=sys.stdout) @pytest.mark.skipif("sys.platform in ('win32', 'darwin')") def test_remove_dir_prefix(self): @@ -87,4 +95,4 @@ class TestArgComplete(object): ffc = FastFilesCompleter() fc = FilesCompleter() for x in '/usr/'.split(): - assert not equal_with_bash(x, ffc, fc, out=py.std.sys.stdout) + assert not equal_with_bash(x, ffc, fc, out=sys.stdout) diff --git a/testing/test_assertion.py b/testing/test_assertion.py index c385f6aa1..328fe7fa9 100644 --- a/testing/test_assertion.py +++ b/testing/test_assertion.py @@ -129,11 +129,29 @@ class TestImportHookInstallation(object): result = testdir.runpytest_subprocess('--assert=rewrite') assert result.ret == 0 + def test_pytest_plugins_rewrite_module_names_correctly(self, testdir): + """Test that we match files correctly when they are marked for rewriting (#2939).""" + contents = { + 'conftest.py': """ + pytest_plugins = "ham" + """, + 'ham.py': "", + 'hamster.py': "", + 'test_foo.py': """ + def test_foo(pytestconfig): + assert pytestconfig.pluginmanager.rewrite_hook.find_module('ham') is not None + assert pytestconfig.pluginmanager.rewrite_hook.find_module('hamster') is None + """, + } + testdir.makepyfile(**contents) + result = testdir.runpytest_subprocess('--assert=rewrite') + assert result.ret == 0 + @pytest.mark.parametrize('mode', ['plain', 'rewrite']) @pytest.mark.parametrize('plugin_state', ['development', 'installed']) def test_installed_plugin_rewrite(self, testdir, mode, plugin_state): # Make sure the hook is installed early enough so that plugins - # installed via setuptools are re-written. + # installed via setuptools are rewritten. testdir.tmpdir.join('hampkg').ensure(dir=1) contents = { 'hampkg/__init__.py': """ @@ -229,9 +247,9 @@ class TestImportHookInstallation(object): return pkg.helper.tool """, 'pkg/other.py': """ - l = [3, 2] + values = [3, 2] def tool(): - assert l.pop() == 3 + assert values.pop() == 3 """, 'conftest.py': """ pytest_plugins = ['pkg.plugin'] @@ -248,7 +266,7 @@ class TestImportHookInstallation(object): result = testdir.runpytest_subprocess('--assert=rewrite') result.stdout.fnmatch_lines(['>*assert a == b*', 'E*assert 2 == 3*', - '>*assert l.pop() == 3*', + '>*assert values.pop() == 3*', 'E*AssertionError']) def test_register_assert_rewrite_checks_types(self): @@ -263,13 +281,13 @@ class TestBinReprIntegration(object): def test_pytest_assertrepr_compare_called(self, testdir): testdir.makeconftest(""" import pytest - l = [] + values = [] def pytest_assertrepr_compare(op, left, right): - l.append((op, left, right)) + values.append((op, left, right)) @pytest.fixture def list(request): - return l + return values """) testdir.makepyfile(""" def test_hello(): @@ -283,6 +301,7 @@ class TestBinReprIntegration(object): "*test_check*PASS*", ]) + def callequal(left, right, verbose=False): config = mock_config() config.verbose = verbose @@ -303,15 +322,15 @@ class TestAssert_reprcompare(object): assert '+ eggs' in diff def test_text_skipping(self): - lines = callequal('a'*50 + 'spam', 'a'*50 + 'eggs') + lines = callequal('a' * 50 + 'spam', 'a' * 50 + 'eggs') assert 'Skipping' in lines[1] for line in lines: - assert 'a'*50 not in line + assert 'a' * 50 not in line def test_text_skipping_verbose(self): - lines = callequal('a'*50 + 'spam', 'a'*50 + 'eggs', verbose=True) - assert '- ' + 'a'*50 + 'spam' in lines - assert '+ ' + 'a'*50 + 'eggs' in lines + lines = callequal('a' * 50 + 'spam', 'a' * 50 + 'eggs', verbose=True) + assert '- ' + 'a' * 50 + 'spam' in lines + assert '+ ' + 'a' * 50 + 'eggs' in lines def test_multiline_text_diff(self): left = 'foo\nspam\nbar' @@ -437,9 +456,9 @@ class TestAssert_reprcompare(object): assert len(expl) > 1 def test_list_tuples(self): - expl = callequal([], [(1,2)]) + expl = callequal([], [(1, 2)]) assert len(expl) > 1 - expl = callequal([(1,2)], []) + expl = callequal([(1, 2)], []) assert len(expl) > 1 def test_list_bad_repr(self): @@ -609,7 +628,7 @@ class TestTruncateExplanation(object): def test_doesnt_truncate_at_when_input_is_5_lines_and_LT_max_chars(self): expl = ['a' * 100 for x in range(5)] - result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8*80) + result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8 * 80) assert result == expl def test_truncates_at_8_lines_when_given_list_of_empty_strings(self): @@ -619,27 +638,27 @@ class TestTruncateExplanation(object): assert len(result) == 8 + self.LINES_IN_TRUNCATION_MSG assert "Full output truncated" in result[-1] assert "43 lines hidden" in result[-1] - last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG -1] + last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG - 1] assert last_line_before_trunc_msg.endswith("...") def test_truncates_at_8_lines_when_first_8_lines_are_LT_max_chars(self): expl = ['a' for x in range(100)] - result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8*80) + result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8 * 80) assert result != expl assert len(result) == 8 + self.LINES_IN_TRUNCATION_MSG assert "Full output truncated" in result[-1] assert "93 lines hidden" in result[-1] - last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG -1] + last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG - 1] assert last_line_before_trunc_msg.endswith("...") def test_truncates_at_8_lines_when_first_8_lines_are_EQ_max_chars(self): expl = ['a' * 80 for x in range(16)] - result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8*80) + result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8 * 80) assert result != expl assert len(result) == 8 + self.LINES_IN_TRUNCATION_MSG assert "Full output truncated" in result[-1] assert "9 lines hidden" in result[-1] - last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG -1] + last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG - 1] assert last_line_before_trunc_msg.endswith("...") def test_truncates_at_4_lines_when_first_4_lines_are_GT_max_chars(self): @@ -649,7 +668,7 @@ class TestTruncateExplanation(object): assert len(result) == 4 + self.LINES_IN_TRUNCATION_MSG assert "Full output truncated" in result[-1] assert "7 lines hidden" in result[-1] - last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG -1] + last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG - 1] assert last_line_before_trunc_msg.endswith("...") def test_truncates_at_1_line_when_first_line_is_GT_max_chars(self): @@ -659,7 +678,7 @@ class TestTruncateExplanation(object): assert len(result) == 1 + self.LINES_IN_TRUNCATION_MSG assert "Full output truncated" in result[-1] assert "1000 lines hidden" in result[-1] - last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG -1] + last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG - 1] assert last_line_before_trunc_msg.endswith("...") def test_full_output_truncated(self, monkeypatch, testdir): @@ -712,6 +731,7 @@ def test_python25_compile_issue257(testdir): *1 failed* """) + def test_rewritten(testdir): testdir.makepyfile(""" def test_rewritten(): @@ -719,11 +739,13 @@ def test_rewritten(testdir): """) assert testdir.runpytest().ret == 0 + def test_reprcompare_notin(mock_config): detail = plugin.pytest_assertrepr_compare( mock_config, 'not in', 'foo', 'aaafoobbb')[1:] assert detail == ["'foo' is contained here:", ' aaafoobbb', '? +++'] + def test_pytest_assertrepr_compare_integration(testdir): testdir.makepyfile(""" def test_hello(): @@ -740,6 +762,7 @@ def test_pytest_assertrepr_compare_integration(testdir): "*E*50*", ]) + def test_sequence_comparison_uses_repr(testdir): testdir.makepyfile(""" def test_hello(): @@ -772,12 +795,12 @@ def test_assertrepr_loaded_per_dir(testdir): b_conftest.write('def pytest_assertrepr_compare(): return ["summary b"]') result = testdir.runpytest() result.stdout.fnmatch_lines([ - '*def test_base():*', - '*E*assert 1 == 2*', - '*def test_a():*', - '*E*assert summary a*', - '*def test_b():*', - '*E*assert summary b*']) + '*def test_base():*', + '*E*assert 1 == 2*', + '*def test_a():*', + '*E*assert summary a*', + '*def test_b():*', + '*E*assert summary b*']) def test_assertion_options(testdir): @@ -791,6 +814,7 @@ def test_assertion_options(testdir): result = testdir.runpytest_subprocess("--assert=plain") assert "3 == 4" not in result.stdout.str() + def test_triple_quoted_string_issue113(testdir): testdir.makepyfile(""" def test_hello(): @@ -802,6 +826,7 @@ def test_triple_quoted_string_issue113(testdir): ]) assert 'SyntaxError' not in result.stdout.str() + def test_traceback_failure(testdir): p1 = testdir.makepyfile(""" def g(): @@ -813,7 +838,7 @@ def test_traceback_failure(testdir): """) result = testdir.runpytest(p1, "--tb=long") result.stdout.fnmatch_lines([ - "*test_traceback_failure.py F", + "*test_traceback_failure.py F*", "====* FAILURES *====", "____*____", "", @@ -822,7 +847,7 @@ def test_traceback_failure(testdir): "", "*test_*.py:6: ", "_ _ _ *", - #"", + # "", " def f(x):", "> assert x == g()", "E assert 3 == 2", @@ -831,9 +856,9 @@ def test_traceback_failure(testdir): "*test_traceback_failure.py:4: AssertionError" ]) - result = testdir.runpytest(p1) # "auto" + result = testdir.runpytest(p1) # "auto" result.stdout.fnmatch_lines([ - "*test_traceback_failure.py F", + "*test_traceback_failure.py F*", "====* FAILURES *====", "____*____", "", @@ -881,7 +906,7 @@ def test_exception_handling_no_traceback(testdir): ]) -@pytest.mark.skipif("'__pypy__' in sys.builtin_module_names or sys.platform.startswith('java')" ) +@pytest.mark.skipif("'__pypy__' in sys.builtin_module_names or sys.platform.startswith('java')") def test_warn_missing(testdir): testdir.makepyfile("") result = testdir.run(sys.executable, "-OO", "-m", "pytest", "-h") @@ -893,6 +918,7 @@ def test_warn_missing(testdir): "*WARNING*assert statements are not executed*", ]) + def test_recursion_source_decode(testdir): testdir.makepyfile(""" def test_something(): @@ -907,6 +933,7 @@ def test_recursion_source_decode(testdir): """) + def test_AssertionError_message(testdir): testdir.makepyfile(""" def test_hello(): @@ -920,6 +947,7 @@ def test_AssertionError_message(testdir): *AssertionError: (1, 2)* """) + @pytest.mark.skipif(PY3, reason='This bug does not exist on PY3') def test_set_with_unsortable_elements(): # issue #718 @@ -956,6 +984,7 @@ def test_set_with_unsortable_elements(): """).strip() assert '\n'.join(expl) == dedent + def test_diff_newline_at_end(monkeypatch, testdir): testdir.makepyfile(r""" def test_diff(): @@ -970,6 +999,7 @@ def test_diff_newline_at_end(monkeypatch, testdir): * ? + """) + def test_assert_tuple_warning(testdir): testdir.makepyfile(""" def test_tuple(): @@ -981,6 +1011,7 @@ def test_assert_tuple_warning(testdir): '*assertion is always true*', ]) + def test_assert_indirect_tuple_no_warning(testdir): testdir.makepyfile(""" def test_tuple(): @@ -991,6 +1022,7 @@ def test_assert_indirect_tuple_no_warning(testdir): output = '\n'.join(result.stdout.lines) assert 'WR1' not in output + def test_assert_with_unicode(monkeypatch, testdir): testdir.makepyfile(u""" # -*- coding: utf-8 -*- @@ -1000,6 +1032,7 @@ def test_assert_with_unicode(monkeypatch, testdir): result = testdir.runpytest() result.stdout.fnmatch_lines(['*AssertionError*']) + def test_raise_unprintable_assertion_error(testdir): testdir.makepyfile(r""" def test_raise_assertion_error(): @@ -1008,6 +1041,7 @@ def test_raise_unprintable_assertion_error(testdir): result = testdir.runpytest() result.stdout.fnmatch_lines([r"> raise AssertionError('\xff')", 'E AssertionError: *']) + def test_raise_assertion_error_raisin_repr(testdir): testdir.makepyfile(u""" class RaisingRepr(object): @@ -1019,6 +1053,7 @@ def test_raise_assertion_error_raisin_repr(testdir): result = testdir.runpytest() result.stdout.fnmatch_lines(['E AssertionError: ']) + def test_issue_1944(testdir): testdir.makepyfile(""" def f(): diff --git a/testing/test_assertrewrite.py b/testing/test_assertrewrite.py index 8aee520b3..4f7c95600 100644 --- a/testing/test_assertrewrite.py +++ b/testing/test_assertrewrite.py @@ -1,29 +1,31 @@ from __future__ import absolute_import, division, print_function + import glob import os import py_compile import stat import sys +import textwrap import zipfile - import py import pytest -ast = pytest.importorskip("ast") -if sys.platform.startswith("java"): - # XXX should be xfail - pytest.skip("assert rewrite does currently not work on jython") - import _pytest._code from _pytest.assertion import util from _pytest.assertion.rewrite import rewrite_asserts, PYTEST_TAG, AssertionRewritingHook from _pytest.main import EXIT_NOTESTSCOLLECTED +ast = pytest.importorskip("ast") +if sys.platform.startswith("java"): + # XXX should be xfail + pytest.skip("assert rewrite does currently not work on jython") + def setup_module(mod): mod._old_reprcompare = util._reprcompare _pytest._code._reprcompare = None + def teardown_module(mod): util._reprcompare = mod._old_reprcompare del mod._old_reprcompare @@ -34,6 +36,7 @@ def rewrite(src): rewrite_asserts(tree) return tree + def getmsg(f, extra_ns=None, must_pass=False): """Rewrite the assertions in f, run it, and get the failure message.""" src = '\n'.join(_pytest._code.Code(f).source().lines) @@ -63,13 +66,18 @@ class TestAssertionRewrite(object): def test_place_initial_imports(self): s = """'Doc string'\nother = stuff""" m = rewrite(s) - assert isinstance(m.body[0], ast.Expr) - assert isinstance(m.body[0].value, ast.Str) - for imp in m.body[1:3]: + # Module docstrings in 3.7 are part of Module node, it's not in the body + # so we remove it so the following body items have the same indexes on + # all Python versions + if sys.version_info < (3, 7): + assert isinstance(m.body[0], ast.Expr) + assert isinstance(m.body[0].value, ast.Str) + del m.body[0] + for imp in m.body[0:2]: assert isinstance(imp, ast.Import) assert imp.lineno == 2 assert imp.col_offset == 0 - assert isinstance(m.body[3], ast.Assign) + assert isinstance(m.body[2], ast.Assign) s = """from __future__ import with_statement\nother_stuff""" m = rewrite(s) assert isinstance(m.body[0], ast.ImportFrom) @@ -78,16 +86,29 @@ class TestAssertionRewrite(object): assert imp.lineno == 2 assert imp.col_offset == 0 assert isinstance(m.body[3], ast.Expr) + s = """'doc string'\nfrom __future__ import with_statement""" + m = rewrite(s) + if sys.version_info < (3, 7): + assert isinstance(m.body[0], ast.Expr) + assert isinstance(m.body[0].value, ast.Str) + del m.body[0] + assert isinstance(m.body[0], ast.ImportFrom) + for imp in m.body[1:3]: + assert isinstance(imp, ast.Import) + assert imp.lineno == 2 + assert imp.col_offset == 0 s = """'doc string'\nfrom __future__ import with_statement\nother""" m = rewrite(s) - assert isinstance(m.body[0], ast.Expr) - assert isinstance(m.body[0].value, ast.Str) - assert isinstance(m.body[1], ast.ImportFrom) - for imp in m.body[2:4]: + if sys.version_info < (3, 7): + assert isinstance(m.body[0], ast.Expr) + assert isinstance(m.body[0].value, ast.Str) + del m.body[0] + assert isinstance(m.body[0], ast.ImportFrom) + for imp in m.body[1:3]: assert isinstance(imp, ast.Import) assert imp.lineno == 3 assert imp.col_offset == 0 - assert isinstance(m.body[4], ast.Expr) + assert isinstance(m.body[3], ast.Expr) s = """from . import relative\nother_stuff""" m = rewrite(s) for imp in m.body[0:2]: @@ -99,10 +120,24 @@ class TestAssertionRewrite(object): def test_dont_rewrite(self): s = """'PYTEST_DONT_REWRITE'\nassert 14""" m = rewrite(s) - assert len(m.body) == 2 - assert isinstance(m.body[0].value, ast.Str) - assert isinstance(m.body[1], ast.Assert) - assert m.body[1].msg is None + if sys.version_info < (3, 7): + assert len(m.body) == 2 + assert isinstance(m.body[0], ast.Expr) + assert isinstance(m.body[0].value, ast.Str) + del m.body[0] + else: + assert len(m.body) == 1 + assert m.body[0].msg is None + + def test_dont_rewrite_plugin(self, testdir): + contents = { + "conftest.py": "pytest_plugins = 'plugin'; import plugin", + "plugin.py": "'PYTEST_DONT_REWRITE'", + "test_foo.py": "def test_foo(): pass", + } + testdir.makepyfile(**contents) + result = testdir.runpytest_subprocess() + assert "warnings" not in "".join(result.outlines) def test_name(self): def f(): @@ -118,12 +153,12 @@ class TestAssertionRewrite(object): def f(): assert a_global # noqa - assert getmsg(f, {"a_global" : False}) == "assert False" + assert getmsg(f, {"a_global": False}) == "assert False" def f(): assert sys == 42 - assert getmsg(f, {"sys" : sys}) == "assert sys == 42" + assert getmsg(f, {"sys": sys}) == "assert sys == 42" def f(): assert cls == 42 # noqa @@ -131,7 +166,7 @@ class TestAssertionRewrite(object): class X(object): pass - assert getmsg(f, {"cls" : X}) == "assert cls == 42" + assert getmsg(f, {"cls": X}) == "assert cls == 42" def test_assert_already_has_message(self): def f(): @@ -238,13 +273,13 @@ class TestAssertionRewrite(object): def f(): assert x() and x() - assert getmsg(f, {"x" : x}) == """assert (False) + assert getmsg(f, {"x": x}) == """assert (False) + where False = x()""" def f(): assert False or x() - assert getmsg(f, {"x" : x}) == """assert (False or False) + assert getmsg(f, {"x": x}) == """assert (False or False) + where False = x()""" def f(): @@ -255,7 +290,7 @@ class TestAssertionRewrite(object): def f(): x = 1 y = 2 - assert x in {1 : None} and y in {} + assert x in {1: None} and y in {} assert getmsg(f) == "assert (1 in {1: None} and 2 in {})" @@ -348,7 +383,7 @@ class TestAssertionRewrite(object): def g(a=42, *args, **kwargs): return False - ns = {"g" : g} + ns = {"g": g} def f(): assert g() @@ -389,7 +424,7 @@ class TestAssertionRewrite(object): def f(): x = "a" - assert g(**{x : 2}) + assert g(**{x: 2}) assert getmsg(f, ns) == """assert False + where False = g(**{'a': 2})""" @@ -398,10 +433,10 @@ class TestAssertionRewrite(object): class X(object): g = 3 - ns = {"x" : X} + ns = {"x": X} def f(): - assert not x.g # noqa + assert not x.g # noqa assert getmsg(f, ns) == """assert not 3 + where 3 = x.g""" @@ -449,8 +484,8 @@ class TestAssertionRewrite(object): def test_len(self): def f(): - l = list(range(10)) - assert len(l) == 11 + values = list(range(10)) + assert len(values) == 11 assert getmsg(f).startswith("""assert 10 == 11 + where 10 = len([""") @@ -556,7 +591,7 @@ class TestRewriteOnImport(object): def test_readonly(self, testdir): sub = testdir.mkdir("testing") sub.join("test_readonly.py").write( - py.builtin._totext(""" + py.builtin._totext(""" def test_rewritten(): assert "@py_builtins" in globals() """).encode("utf-8"), "wb") @@ -609,7 +644,7 @@ def test_rewritten(): def test_optimized(): "hello" assert test_optimized.__doc__ is None""" - ) + ) p = py.path.local.make_numbered_dir(prefix="runpytest-", keep=None, rootdir=testdir.tmpdir) tmp = "--basetemp=%s" % p @@ -638,8 +673,8 @@ def test_rewritten(): testdir.tmpdir.join("test_newlines.py").write(b, "wb") assert testdir.runpytest().ret == 0 - @pytest.mark.skipif(sys.version_info < (3,3), - reason='packages without __init__.py not supported on python 2') + @pytest.mark.skipif(sys.version_info < (3, 4), + reason='packages without __init__.py not supported on python 2') def test_package_without__init__py(self, testdir): pkg = testdir.mkdir('a_package_without_init_py') pkg.join('module.py').ensure() @@ -804,22 +839,22 @@ class TestAssertionRewriteHookDetails(object): def test_write_pyc(self, testdir, tmpdir, monkeypatch): from _pytest.assertion.rewrite import _write_pyc from _pytest.assertion import AssertionState - try: - import __builtin__ as b - except ImportError: - import builtins as b + import atomicwrites + from contextlib import contextmanager config = testdir.parseconfig([]) state = AssertionState(config, "rewrite") source_path = tmpdir.ensure("source.py") pycpath = tmpdir.join("pyc").strpath assert _write_pyc(state, [1], source_path.stat(), pycpath) - def open(*args): + @contextmanager + def atomic_write_failed(fn, mode='r', overwrite=False): e = IOError() e.errno = 10 raise e + yield # noqa - monkeypatch.setattr(b, "open", open) + monkeypatch.setattr(atomicwrites, "atomic_write", atomic_write_failed) assert not _write_pyc(state, [1], source_path.stat(), pycpath) def test_resources_provider_for_loader(self, testdir): @@ -877,7 +912,7 @@ class TestAssertionRewriteHookDetails(object): def test_reload_is_same(self, testdir): # A file that will be picked up during collecting. testdir.tmpdir.join("file.py").ensure() - testdir.tmpdir.join("pytest.ini").write(py.std.textwrap.dedent(""" + testdir.tmpdir.join("pytest.ini").write(textwrap.dedent(""" [pytest] python_files = *.py """)) @@ -963,7 +998,7 @@ class TestIssue2121(): def test_simple_failure(): assert 1 + 1 == 3 """) - testdir.tmpdir.join("pytest.ini").write(py.std.textwrap.dedent(""" + testdir.tmpdir.join("pytest.ini").write(textwrap.dedent(""" [pytest] python_files = tests/**.py """)) diff --git a/testing/test_cache.py b/testing/test_cache.py deleted file mode 100755 index 600b5e6d9..000000000 --- a/testing/test_cache.py +++ /dev/null @@ -1,411 +0,0 @@ -from __future__ import absolute_import, division, print_function -import sys - -import _pytest -import pytest -import os -import shutil - -pytest_plugins = "pytester", - -class TestNewAPI(object): - def test_config_cache_makedir(self, testdir): - testdir.makeini("[pytest]") - config = testdir.parseconfigure() - with pytest.raises(ValueError): - config.cache.makedir("key/name") - - p = config.cache.makedir("name") - assert p.check() - - def test_config_cache_dataerror(self, testdir): - testdir.makeini("[pytest]") - config = testdir.parseconfigure() - cache = config.cache - pytest.raises(TypeError, lambda: cache.set("key/name", cache)) - config.cache.set("key/name", 0) - config.cache._getvaluepath("key/name").write("123invalid") - val = config.cache.get("key/name", -2) - assert val == -2 - - def test_cache_writefail_cachfile_silent(self, testdir): - testdir.makeini("[pytest]") - testdir.tmpdir.join('.cache').write('gone wrong') - config = testdir.parseconfigure() - cache = config.cache - cache.set('test/broken', []) - - @pytest.mark.skipif(sys.platform.startswith('win'), reason='no chmod on windows') - def test_cache_writefail_permissions(self, testdir): - testdir.makeini("[pytest]") - testdir.tmpdir.ensure_dir('.cache').chmod(0) - config = testdir.parseconfigure() - cache = config.cache - cache.set('test/broken', []) - - @pytest.mark.skipif(sys.platform.startswith('win'), reason='no chmod on windows') - def test_cache_failure_warns(self, testdir): - testdir.tmpdir.ensure_dir('.cache').chmod(0) - testdir.makepyfile(""" - def test_error(): - raise Exception - - """) - result = testdir.runpytest('-rw') - assert result.ret == 1 - result.stdout.fnmatch_lines([ - "*could not create cache path*", - "*1 warnings*", - ]) - - def test_config_cache(self, testdir): - testdir.makeconftest(""" - def pytest_configure(config): - # see that we get cache information early on - assert hasattr(config, "cache") - """) - testdir.makepyfile(""" - def test_session(pytestconfig): - assert hasattr(pytestconfig, "cache") - """) - result = testdir.runpytest() - assert result.ret == 0 - result.stdout.fnmatch_lines(["*1 passed*"]) - - def test_cachefuncarg(self, testdir): - testdir.makepyfile(""" - import pytest - def test_cachefuncarg(cache): - val = cache.get("some/thing", None) - assert val is None - cache.set("some/thing", [1]) - pytest.raises(TypeError, lambda: cache.get("some/thing")) - val = cache.get("some/thing", []) - assert val == [1] - """) - result = testdir.runpytest() - assert result.ret == 0 - result.stdout.fnmatch_lines(["*1 passed*"]) - - - -def test_cache_reportheader(testdir): - testdir.makepyfile(""" - def test_hello(): - pass - """) - result = testdir.runpytest("-v") - result.stdout.fnmatch_lines([ - "cachedir: .cache" - ]) - - -def test_cache_show(testdir): - result = testdir.runpytest("--cache-show") - assert result.ret == 0 - result.stdout.fnmatch_lines([ - "*cache is empty*" - ]) - testdir.makeconftest(""" - def pytest_configure(config): - config.cache.set("my/name", [1,2,3]) - config.cache.set("other/some", {1:2}) - dp = config.cache.makedir("mydb") - dp.ensure("hello") - dp.ensure("world") - """) - result = testdir.runpytest() - assert result.ret == 5 # no tests executed - result = testdir.runpytest("--cache-show") - result.stdout.fnmatch_lines_random([ - "*cachedir:*", - "-*cache values*-", - "*my/name contains:", - " [1, 2, 3]", - "*other/some contains*", - " {*1*: 2}", - "-*cache directories*-", - "*mydb/hello*length 0*", - "*mydb/world*length 0*", - ]) - - -class TestLastFailed(object): - - def test_lastfailed_usecase(self, testdir, monkeypatch): - monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) - p = testdir.makepyfile(""" - def test_1(): - assert 0 - def test_2(): - assert 0 - def test_3(): - assert 1 - """) - result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*2 failed*", - ]) - p.write(_pytest._code.Source(""" - def test_1(): - assert 1 - - def test_2(): - assert 1 - - def test_3(): - assert 0 - """)) - result = testdir.runpytest("--lf") - result.stdout.fnmatch_lines([ - "*2 passed*1 desel*", - ]) - result = testdir.runpytest("--lf") - result.stdout.fnmatch_lines([ - "*1 failed*2 passed*", - ]) - result = testdir.runpytest("--lf", "--cache-clear") - result.stdout.fnmatch_lines([ - "*1 failed*2 passed*", - ]) - - # Run this again to make sure clear-cache is robust - if os.path.isdir('.cache'): - shutil.rmtree('.cache') - result = testdir.runpytest("--lf", "--cache-clear") - result.stdout.fnmatch_lines([ - "*1 failed*2 passed*", - ]) - - def test_failedfirst_order(self, testdir): - testdir.tmpdir.join('test_a.py').write(_pytest._code.Source(""" - def test_always_passes(): - assert 1 - """)) - testdir.tmpdir.join('test_b.py').write(_pytest._code.Source(""" - def test_always_fails(): - assert 0 - """)) - result = testdir.runpytest() - # Test order will be collection order; alphabetical - result.stdout.fnmatch_lines([ - "test_a.py*", - "test_b.py*", - ]) - result = testdir.runpytest("--ff") - # Test order will be failing tests firs - result.stdout.fnmatch_lines([ - "test_b.py*", - "test_a.py*", - ]) - - def test_lastfailed_failedfirst_order(self, testdir): - testdir.makepyfile(**{ - 'test_a.py': """ - def test_always_passes(): - assert 1 - """, - 'test_b.py': """ - def test_always_fails(): - assert 0 - """, - }) - result = testdir.runpytest() - # Test order will be collection order; alphabetical - result.stdout.fnmatch_lines([ - "test_a.py*", - "test_b.py*", - ]) - result = testdir.runpytest("--lf", "--ff") - # Test order will be failing tests firs - result.stdout.fnmatch_lines([ - "test_b.py*", - ]) - assert 'test_a.py' not in result.stdout.str() - - def test_lastfailed_difference_invocations(self, testdir, monkeypatch): - monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) - testdir.makepyfile(test_a=""" - def test_a1(): - assert 0 - def test_a2(): - assert 1 - """, test_b=""" - def test_b1(): - assert 0 - """) - p = testdir.tmpdir.join("test_a.py") - p2 = testdir.tmpdir.join("test_b.py") - - result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*2 failed*", - ]) - result = testdir.runpytest("--lf", p2) - result.stdout.fnmatch_lines([ - "*1 failed*", - ]) - p2.write(_pytest._code.Source(""" - def test_b1(): - assert 1 - """)) - result = testdir.runpytest("--lf", p2) - result.stdout.fnmatch_lines([ - "*1 passed*", - ]) - result = testdir.runpytest("--lf", p) - result.stdout.fnmatch_lines([ - "*1 failed*1 desel*", - ]) - - def test_lastfailed_usecase_splice(self, testdir, monkeypatch): - monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) - testdir.makepyfile(""" - def test_1(): - assert 0 - """) - p2 = testdir.tmpdir.join("test_something.py") - p2.write(_pytest._code.Source(""" - def test_2(): - assert 0 - """)) - result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*2 failed*", - ]) - result = testdir.runpytest("--lf", p2) - result.stdout.fnmatch_lines([ - "*1 failed*", - ]) - result = testdir.runpytest("--lf") - result.stdout.fnmatch_lines([ - "*2 failed*", - ]) - - def test_lastfailed_xpass(self, testdir): - testdir.inline_runsource(""" - import pytest - @pytest.mark.xfail - def test_hello(): - assert 1 - """) - config = testdir.parseconfigure() - lastfailed = config.cache.get("cache/lastfailed", -1) - assert lastfailed == -1 - - def test_non_serializable_parametrize(self, testdir): - """Test that failed parametrized tests with unmarshable parameters - don't break pytest-cache. - """ - testdir.makepyfile(r""" - import pytest - - @pytest.mark.parametrize('val', [ - b'\xac\x10\x02G', - ]) - def test_fail(val): - assert False - """) - result = testdir.runpytest() - result.stdout.fnmatch_lines('*1 failed in*') - - def test_lastfailed_collectfailure(self, testdir, monkeypatch): - - testdir.makepyfile(test_maybe=""" - import py - env = py.std.os.environ - if '1' == env['FAILIMPORT']: - raise ImportError('fail') - def test_hello(): - assert '0' == env['FAILTEST'] - """) - - def rlf(fail_import, fail_run): - monkeypatch.setenv('FAILIMPORT', fail_import) - monkeypatch.setenv('FAILTEST', fail_run) - - testdir.runpytest('-q') - config = testdir.parseconfigure() - lastfailed = config.cache.get("cache/lastfailed", -1) - return lastfailed - - lastfailed = rlf(fail_import=0, fail_run=0) - assert lastfailed == -1 - - lastfailed = rlf(fail_import=1, fail_run=0) - assert list(lastfailed) == ['test_maybe.py'] - - lastfailed = rlf(fail_import=0, fail_run=1) - assert list(lastfailed) == ['test_maybe.py::test_hello'] - - - def test_lastfailed_failure_subset(self, testdir, monkeypatch): - - testdir.makepyfile(test_maybe=""" - import py - env = py.std.os.environ - if '1' == env['FAILIMPORT']: - raise ImportError('fail') - def test_hello(): - assert '0' == env['FAILTEST'] - """) - - testdir.makepyfile(test_maybe2=""" - import py - env = py.std.os.environ - if '1' == env['FAILIMPORT']: - raise ImportError('fail') - def test_hello(): - assert '0' == env['FAILTEST'] - - def test_pass(): - pass - """) - - def rlf(fail_import, fail_run, args=()): - monkeypatch.setenv('FAILIMPORT', fail_import) - monkeypatch.setenv('FAILTEST', fail_run) - - result = testdir.runpytest('-q', '--lf', *args) - config = testdir.parseconfigure() - lastfailed = config.cache.get("cache/lastfailed", -1) - return result, lastfailed - - result, lastfailed = rlf(fail_import=0, fail_run=0) - assert lastfailed == -1 - result.stdout.fnmatch_lines([ - '*3 passed*', - ]) - - result, lastfailed = rlf(fail_import=1, fail_run=0) - assert sorted(list(lastfailed)) == ['test_maybe.py', 'test_maybe2.py'] - - - result, lastfailed = rlf(fail_import=0, fail_run=0, - args=('test_maybe2.py',)) - assert list(lastfailed) == ['test_maybe.py'] - - - # edge case of test selection - even if we remember failures - # from other tests we still need to run all tests if no test - # matches the failures - result, lastfailed = rlf(fail_import=0, fail_run=0, - args=('test_maybe2.py',)) - assert list(lastfailed) == ['test_maybe.py'] - result.stdout.fnmatch_lines([ - '*2 passed*', - ]) - - def test_lastfailed_creates_cache_when_needed(self, testdir): - # Issue #1342 - testdir.makepyfile(test_empty='') - testdir.runpytest('-q', '--lf') - assert not os.path.exists('.cache') - - testdir.makepyfile(test_successful='def test_success():\n assert True') - testdir.runpytest('-q', '--lf') - assert not os.path.exists('.cache') - - testdir.makepyfile(test_errored='def test_error():\n assert False') - testdir.runpytest('-q', '--lf') - assert os.path.exists('.cache') diff --git a/testing/test_cacheprovider.py b/testing/test_cacheprovider.py new file mode 100644 index 000000000..51e45dd48 --- /dev/null +++ b/testing/test_cacheprovider.py @@ -0,0 +1,744 @@ +from __future__ import absolute_import, division, print_function +import sys +import py +import _pytest +import pytest +import os +import shutil + +pytest_plugins = "pytester", + + +class TestNewAPI(object): + def test_config_cache_makedir(self, testdir): + testdir.makeini("[pytest]") + config = testdir.parseconfigure() + with pytest.raises(ValueError): + config.cache.makedir("key/name") + + p = config.cache.makedir("name") + assert p.check() + + def test_config_cache_dataerror(self, testdir): + testdir.makeini("[pytest]") + config = testdir.parseconfigure() + cache = config.cache + pytest.raises(TypeError, lambda: cache.set("key/name", cache)) + config.cache.set("key/name", 0) + config.cache._getvaluepath("key/name").write("123invalid") + val = config.cache.get("key/name", -2) + assert val == -2 + + def test_cache_writefail_cachfile_silent(self, testdir): + testdir.makeini("[pytest]") + testdir.tmpdir.join('.pytest_cache').write('gone wrong') + config = testdir.parseconfigure() + cache = config.cache + cache.set('test/broken', []) + + @pytest.mark.skipif(sys.platform.startswith('win'), reason='no chmod on windows') + def test_cache_writefail_permissions(self, testdir): + testdir.makeini("[pytest]") + testdir.tmpdir.ensure_dir('.pytest_cache').chmod(0) + config = testdir.parseconfigure() + cache = config.cache + cache.set('test/broken', []) + + @pytest.mark.skipif(sys.platform.startswith('win'), reason='no chmod on windows') + def test_cache_failure_warns(self, testdir): + testdir.tmpdir.ensure_dir('.pytest_cache').chmod(0) + testdir.makepyfile(""" + def test_error(): + raise Exception + + """) + result = testdir.runpytest('-rw') + assert result.ret == 1 + result.stdout.fnmatch_lines([ + "*could not create cache path*", + "*2 warnings*", + ]) + + def test_config_cache(self, testdir): + testdir.makeconftest(""" + def pytest_configure(config): + # see that we get cache information early on + assert hasattr(config, "cache") + """) + testdir.makepyfile(""" + def test_session(pytestconfig): + assert hasattr(pytestconfig, "cache") + """) + result = testdir.runpytest() + assert result.ret == 0 + result.stdout.fnmatch_lines(["*1 passed*"]) + + def test_cachefuncarg(self, testdir): + testdir.makepyfile(""" + import pytest + def test_cachefuncarg(cache): + val = cache.get("some/thing", None) + assert val is None + cache.set("some/thing", [1]) + pytest.raises(TypeError, lambda: cache.get("some/thing")) + val = cache.get("some/thing", []) + assert val == [1] + """) + result = testdir.runpytest() + assert result.ret == 0 + result.stdout.fnmatch_lines(["*1 passed*"]) + + def test_custom_rel_cache_dir(self, testdir): + rel_cache_dir = os.path.join('custom_cache_dir', 'subdir') + testdir.makeini(""" + [pytest] + cache_dir = {cache_dir} + """.format(cache_dir=rel_cache_dir)) + testdir.makepyfile(test_errored='def test_error():\n assert False') + testdir.runpytest() + assert testdir.tmpdir.join(rel_cache_dir).isdir() + + def test_custom_abs_cache_dir(self, testdir, tmpdir_factory): + tmp = str(tmpdir_factory.mktemp('tmp')) + abs_cache_dir = os.path.join(tmp, 'custom_cache_dir') + testdir.makeini(""" + [pytest] + cache_dir = {cache_dir} + """.format(cache_dir=abs_cache_dir)) + testdir.makepyfile(test_errored='def test_error():\n assert False') + testdir.runpytest() + assert py.path.local(abs_cache_dir).isdir() + + def test_custom_cache_dir_with_env_var(self, testdir, monkeypatch): + monkeypatch.setenv('env_var', 'custom_cache_dir') + testdir.makeini(""" + [pytest] + cache_dir = {cache_dir} + """.format(cache_dir='$env_var')) + testdir.makepyfile(test_errored='def test_error():\n assert False') + testdir.runpytest() + assert testdir.tmpdir.join('custom_cache_dir').isdir() + + +def test_cache_reportheader(testdir): + testdir.makepyfile(""" + def test_hello(): + pass + """) + result = testdir.runpytest("-v") + result.stdout.fnmatch_lines([ + "cachedir: .pytest_cache" + ]) + + +def test_cache_show(testdir): + result = testdir.runpytest("--cache-show") + assert result.ret == 0 + result.stdout.fnmatch_lines([ + "*cache is empty*" + ]) + testdir.makeconftest(""" + def pytest_configure(config): + config.cache.set("my/name", [1,2,3]) + config.cache.set("other/some", {1:2}) + dp = config.cache.makedir("mydb") + dp.ensure("hello") + dp.ensure("world") + """) + result = testdir.runpytest() + assert result.ret == 5 # no tests executed + result = testdir.runpytest("--cache-show") + result.stdout.fnmatch_lines_random([ + "*cachedir:*", + "-*cache values*-", + "*my/name contains:", + " [1, 2, 3]", + "*other/some contains*", + " {*1*: 2}", + "-*cache directories*-", + "*mydb/hello*length 0*", + "*mydb/world*length 0*", + ]) + + +class TestLastFailed(object): + + def test_lastfailed_usecase(self, testdir, monkeypatch): + monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) + p = testdir.makepyfile(""" + def test_1(): + assert 0 + def test_2(): + assert 0 + def test_3(): + assert 1 + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + "*2 failed*", + ]) + p.write(_pytest._code.Source(""" + def test_1(): + assert 1 + + def test_2(): + assert 1 + + def test_3(): + assert 0 + """)) + result = testdir.runpytest("--lf") + result.stdout.fnmatch_lines([ + "*2 passed*1 desel*", + ]) + result = testdir.runpytest("--lf") + result.stdout.fnmatch_lines([ + "*1 failed*2 passed*", + ]) + result = testdir.runpytest("--lf", "--cache-clear") + result.stdout.fnmatch_lines([ + "*1 failed*2 passed*", + ]) + + # Run this again to make sure clear-cache is robust + if os.path.isdir('.pytest_cache'): + shutil.rmtree('.pytest_cache') + result = testdir.runpytest("--lf", "--cache-clear") + result.stdout.fnmatch_lines([ + "*1 failed*2 passed*", + ]) + + def test_failedfirst_order(self, testdir): + testdir.tmpdir.join('test_a.py').write(_pytest._code.Source(""" + def test_always_passes(): + assert 1 + """)) + testdir.tmpdir.join('test_b.py').write(_pytest._code.Source(""" + def test_always_fails(): + assert 0 + """)) + result = testdir.runpytest() + # Test order will be collection order; alphabetical + result.stdout.fnmatch_lines([ + "test_a.py*", + "test_b.py*", + ]) + result = testdir.runpytest("--ff") + # Test order will be failing tests firs + result.stdout.fnmatch_lines([ + "test_b.py*", + "test_a.py*", + ]) + + def test_lastfailed_failedfirst_order(self, testdir): + testdir.makepyfile(**{ + 'test_a.py': """ + def test_always_passes(): + assert 1 + """, + 'test_b.py': """ + def test_always_fails(): + assert 0 + """, + }) + result = testdir.runpytest() + # Test order will be collection order; alphabetical + result.stdout.fnmatch_lines([ + "test_a.py*", + "test_b.py*", + ]) + result = testdir.runpytest("--lf", "--ff") + # Test order will be failing tests firs + result.stdout.fnmatch_lines([ + "test_b.py*", + ]) + assert 'test_a.py' not in result.stdout.str() + + def test_lastfailed_difference_invocations(self, testdir, monkeypatch): + monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) + testdir.makepyfile(test_a=""" + def test_a1(): + assert 0 + def test_a2(): + assert 1 + """, test_b=""" + def test_b1(): + assert 0 + """) + p = testdir.tmpdir.join("test_a.py") + p2 = testdir.tmpdir.join("test_b.py") + + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + "*2 failed*", + ]) + result = testdir.runpytest("--lf", p2) + result.stdout.fnmatch_lines([ + "*1 failed*", + ]) + p2.write(_pytest._code.Source(""" + def test_b1(): + assert 1 + """)) + result = testdir.runpytest("--lf", p2) + result.stdout.fnmatch_lines([ + "*1 passed*", + ]) + result = testdir.runpytest("--lf", p) + result.stdout.fnmatch_lines([ + "*1 failed*1 desel*", + ]) + + def test_lastfailed_usecase_splice(self, testdir, monkeypatch): + monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) + testdir.makepyfile(""" + def test_1(): + assert 0 + """) + p2 = testdir.tmpdir.join("test_something.py") + p2.write(_pytest._code.Source(""" + def test_2(): + assert 0 + """)) + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + "*2 failed*", + ]) + result = testdir.runpytest("--lf", p2) + result.stdout.fnmatch_lines([ + "*1 failed*", + ]) + result = testdir.runpytest("--lf") + result.stdout.fnmatch_lines([ + "*2 failed*", + ]) + + def test_lastfailed_xpass(self, testdir): + testdir.inline_runsource(""" + import pytest + @pytest.mark.xfail + def test_hello(): + assert 1 + """) + config = testdir.parseconfigure() + lastfailed = config.cache.get("cache/lastfailed", -1) + assert lastfailed == -1 + + def test_non_serializable_parametrize(self, testdir): + """Test that failed parametrized tests with unmarshable parameters + don't break pytest-cache. + """ + testdir.makepyfile(r""" + import pytest + + @pytest.mark.parametrize('val', [ + b'\xac\x10\x02G', + ]) + def test_fail(val): + assert False + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines('*1 failed in*') + + def test_terminal_report_lastfailed(self, testdir): + test_a = testdir.makepyfile(test_a=""" + def test_a1(): + pass + def test_a2(): + pass + """) + test_b = testdir.makepyfile(test_b=""" + def test_b1(): + assert 0 + def test_b2(): + assert 0 + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + 'collected 4 items', + '*2 failed, 2 passed in*', + ]) + + result = testdir.runpytest('--lf') + result.stdout.fnmatch_lines([ + 'collected 4 items / 2 deselected', + 'run-last-failure: rerun previous 2 failures', + '*2 failed, 2 deselected in*', + ]) + + result = testdir.runpytest(test_a, '--lf') + result.stdout.fnmatch_lines([ + 'collected 2 items', + 'run-last-failure: run all (no recorded failures)', + '*2 passed in*', + ]) + + result = testdir.runpytest(test_b, '--lf') + result.stdout.fnmatch_lines([ + 'collected 2 items', + 'run-last-failure: rerun previous 2 failures', + '*2 failed in*', + ]) + + result = testdir.runpytest('test_b.py::test_b1', '--lf') + result.stdout.fnmatch_lines([ + 'collected 1 item', + 'run-last-failure: rerun previous 1 failure', + '*1 failed in*', + ]) + + def test_terminal_report_failedfirst(self, testdir): + testdir.makepyfile(test_a=""" + def test_a1(): + assert 0 + def test_a2(): + pass + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + 'collected 2 items', + '*1 failed, 1 passed in*', + ]) + + result = testdir.runpytest('--ff') + result.stdout.fnmatch_lines([ + 'collected 2 items', + 'run-last-failure: rerun previous 1 failure first', + '*1 failed, 1 passed in*', + ]) + + def test_lastfailed_collectfailure(self, testdir, monkeypatch): + + testdir.makepyfile(test_maybe=""" + import os + env = os.environ + if '1' == env['FAILIMPORT']: + raise ImportError('fail') + def test_hello(): + assert '0' == env['FAILTEST'] + """) + + def rlf(fail_import, fail_run): + monkeypatch.setenv('FAILIMPORT', fail_import) + monkeypatch.setenv('FAILTEST', fail_run) + + testdir.runpytest('-q') + config = testdir.parseconfigure() + lastfailed = config.cache.get("cache/lastfailed", -1) + return lastfailed + + lastfailed = rlf(fail_import=0, fail_run=0) + assert lastfailed == -1 + + lastfailed = rlf(fail_import=1, fail_run=0) + assert list(lastfailed) == ['test_maybe.py'] + + lastfailed = rlf(fail_import=0, fail_run=1) + assert list(lastfailed) == ['test_maybe.py::test_hello'] + + def test_lastfailed_failure_subset(self, testdir, monkeypatch): + + testdir.makepyfile(test_maybe=""" + import os + env = os.environ + if '1' == env['FAILIMPORT']: + raise ImportError('fail') + def test_hello(): + assert '0' == env['FAILTEST'] + """) + + testdir.makepyfile(test_maybe2=""" + import os + env = os.environ + if '1' == env['FAILIMPORT']: + raise ImportError('fail') + def test_hello(): + assert '0' == env['FAILTEST'] + + def test_pass(): + pass + """) + + def rlf(fail_import, fail_run, args=()): + monkeypatch.setenv('FAILIMPORT', fail_import) + monkeypatch.setenv('FAILTEST', fail_run) + + result = testdir.runpytest('-q', '--lf', *args) + config = testdir.parseconfigure() + lastfailed = config.cache.get("cache/lastfailed", -1) + return result, lastfailed + + result, lastfailed = rlf(fail_import=0, fail_run=0) + assert lastfailed == -1 + result.stdout.fnmatch_lines([ + '*3 passed*', + ]) + + result, lastfailed = rlf(fail_import=1, fail_run=0) + assert sorted(list(lastfailed)) == ['test_maybe.py', 'test_maybe2.py'] + + result, lastfailed = rlf(fail_import=0, fail_run=0, + args=('test_maybe2.py',)) + assert list(lastfailed) == ['test_maybe.py'] + + # edge case of test selection - even if we remember failures + # from other tests we still need to run all tests if no test + # matches the failures + result, lastfailed = rlf(fail_import=0, fail_run=0, + args=('test_maybe2.py',)) + assert list(lastfailed) == ['test_maybe.py'] + result.stdout.fnmatch_lines([ + '*2 passed*', + ]) + + def test_lastfailed_creates_cache_when_needed(self, testdir): + # Issue #1342 + testdir.makepyfile(test_empty='') + testdir.runpytest('-q', '--lf') + assert not os.path.exists('.pytest_cache/v/cache/lastfailed') + + testdir.makepyfile(test_successful='def test_success():\n assert True') + testdir.runpytest('-q', '--lf') + assert not os.path.exists('.pytest_cache/v/cache/lastfailed') + + testdir.makepyfile(test_errored='def test_error():\n assert False') + testdir.runpytest('-q', '--lf') + assert os.path.exists('.pytest_cache/v/cache/lastfailed') + + def test_xfail_not_considered_failure(self, testdir): + testdir.makepyfile(''' + import pytest + @pytest.mark.xfail + def test(): + assert 0 + ''') + result = testdir.runpytest() + result.stdout.fnmatch_lines('*1 xfailed*') + assert self.get_cached_last_failed(testdir) == [] + + def test_xfail_strict_considered_failure(self, testdir): + testdir.makepyfile(''' + import pytest + @pytest.mark.xfail(strict=True) + def test(): + pass + ''') + result = testdir.runpytest() + result.stdout.fnmatch_lines('*1 failed*') + assert self.get_cached_last_failed(testdir) == ['test_xfail_strict_considered_failure.py::test'] + + @pytest.mark.parametrize('mark', ['mark.xfail', 'mark.skip']) + def test_failed_changed_to_xfail_or_skip(self, testdir, mark): + testdir.makepyfile(''' + import pytest + def test(): + assert 0 + ''') + result = testdir.runpytest() + assert self.get_cached_last_failed(testdir) == ['test_failed_changed_to_xfail_or_skip.py::test'] + assert result.ret == 1 + + testdir.makepyfile(''' + import pytest + @pytest.{mark} + def test(): + assert 0 + '''.format(mark=mark)) + result = testdir.runpytest() + assert result.ret == 0 + assert self.get_cached_last_failed(testdir) == [] + assert result.ret == 0 + + def get_cached_last_failed(self, testdir): + config = testdir.parseconfigure() + return sorted(config.cache.get("cache/lastfailed", {})) + + def test_cache_cumulative(self, testdir): + """ + Test workflow where user fixes errors gradually file by file using --lf. + """ + # 1. initial run + test_bar = testdir.makepyfile(test_bar=""" + def test_bar_1(): + pass + def test_bar_2(): + assert 0 + """) + test_foo = testdir.makepyfile(test_foo=""" + def test_foo_3(): + pass + def test_foo_4(): + assert 0 + """) + testdir.runpytest() + assert self.get_cached_last_failed(testdir) == ['test_bar.py::test_bar_2', 'test_foo.py::test_foo_4'] + + # 2. fix test_bar_2, run only test_bar.py + testdir.makepyfile(test_bar=""" + def test_bar_1(): + pass + def test_bar_2(): + pass + """) + result = testdir.runpytest(test_bar) + result.stdout.fnmatch_lines('*2 passed*') + # ensure cache does not forget that test_foo_4 failed once before + assert self.get_cached_last_failed(testdir) == ['test_foo.py::test_foo_4'] + + result = testdir.runpytest('--last-failed') + result.stdout.fnmatch_lines('*1 failed, 3 deselected*') + assert self.get_cached_last_failed(testdir) == ['test_foo.py::test_foo_4'] + + # 3. fix test_foo_4, run only test_foo.py + test_foo = testdir.makepyfile(test_foo=""" + def test_foo_3(): + pass + def test_foo_4(): + pass + """) + result = testdir.runpytest(test_foo, '--last-failed') + result.stdout.fnmatch_lines('*1 passed, 1 deselected*') + assert self.get_cached_last_failed(testdir) == [] + + result = testdir.runpytest('--last-failed') + result.stdout.fnmatch_lines('*4 passed*') + assert self.get_cached_last_failed(testdir) == [] + + def test_lastfailed_no_failures_behavior_all_passed(self, testdir): + testdir.makepyfile(""" + def test_1(): + assert True + def test_2(): + assert True + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines(["*2 passed*"]) + result = testdir.runpytest("--lf") + result.stdout.fnmatch_lines(["*2 passed*"]) + result = testdir.runpytest("--lf", "--lfnf", "all") + result.stdout.fnmatch_lines(["*2 passed*"]) + result = testdir.runpytest("--lf", "--lfnf", "none") + result.stdout.fnmatch_lines(["*2 desel*"]) + + def test_lastfailed_no_failures_behavior_empty_cache(self, testdir): + testdir.makepyfile(""" + def test_1(): + assert True + def test_2(): + assert False + """) + result = testdir.runpytest("--lf", "--cache-clear") + result.stdout.fnmatch_lines(["*1 failed*1 passed*"]) + result = testdir.runpytest("--lf", "--cache-clear", "--lfnf", "all") + result.stdout.fnmatch_lines(["*1 failed*1 passed*"]) + result = testdir.runpytest("--lf", "--cache-clear", "--lfnf", "none") + result.stdout.fnmatch_lines(["*2 desel*"]) + + +class TestNewFirst(object): + def test_newfirst_usecase(self, testdir): + testdir.makepyfile(**{ + 'test_1/test_1.py': ''' + def test_1(): assert 1 + def test_2(): assert 1 + def test_3(): assert 1 + ''', + 'test_2/test_2.py': ''' + def test_1(): assert 1 + def test_2(): assert 1 + def test_3(): assert 1 + ''' + }) + + testdir.tmpdir.join('test_1/test_1.py').setmtime(1) + + result = testdir.runpytest("-v") + result.stdout.fnmatch_lines([ + "*test_1/test_1.py::test_1 PASSED*", + "*test_1/test_1.py::test_2 PASSED*", + "*test_1/test_1.py::test_3 PASSED*", + "*test_2/test_2.py::test_1 PASSED*", + "*test_2/test_2.py::test_2 PASSED*", + "*test_2/test_2.py::test_3 PASSED*", + ]) + + result = testdir.runpytest("-v", "--nf") + + result.stdout.fnmatch_lines([ + "*test_2/test_2.py::test_1 PASSED*", + "*test_2/test_2.py::test_2 PASSED*", + "*test_2/test_2.py::test_3 PASSED*", + "*test_1/test_1.py::test_1 PASSED*", + "*test_1/test_1.py::test_2 PASSED*", + "*test_1/test_1.py::test_3 PASSED*", + ]) + + testdir.tmpdir.join("test_1/test_1.py").write( + "def test_1(): assert 1\n" + "def test_2(): assert 1\n" + "def test_3(): assert 1\n" + "def test_4(): assert 1\n" + ) + testdir.tmpdir.join('test_1/test_1.py').setmtime(1) + + result = testdir.runpytest("-v", "--nf") + + result.stdout.fnmatch_lines([ + "*test_1/test_1.py::test_4 PASSED*", + "*test_2/test_2.py::test_1 PASSED*", + "*test_2/test_2.py::test_2 PASSED*", + "*test_2/test_2.py::test_3 PASSED*", + "*test_1/test_1.py::test_1 PASSED*", + "*test_1/test_1.py::test_2 PASSED*", + "*test_1/test_1.py::test_3 PASSED*", + ]) + + def test_newfirst_parametrize(self, testdir): + testdir.makepyfile(**{ + 'test_1/test_1.py': ''' + import pytest + @pytest.mark.parametrize('num', [1, 2]) + def test_1(num): assert num + ''', + 'test_2/test_2.py': ''' + import pytest + @pytest.mark.parametrize('num', [1, 2]) + def test_1(num): assert num + ''' + }) + + testdir.tmpdir.join('test_1/test_1.py').setmtime(1) + + result = testdir.runpytest("-v") + result.stdout.fnmatch_lines([ + "*test_1/test_1.py::test_1[1*", + "*test_1/test_1.py::test_1[2*", + "*test_2/test_2.py::test_1[1*", + "*test_2/test_2.py::test_1[2*" + ]) + + result = testdir.runpytest("-v", "--nf") + + result.stdout.fnmatch_lines([ + "*test_2/test_2.py::test_1[1*", + "*test_2/test_2.py::test_1[2*", + "*test_1/test_1.py::test_1[1*", + "*test_1/test_1.py::test_1[2*", + ]) + + testdir.tmpdir.join("test_1/test_1.py").write( + "import pytest\n" + "@pytest.mark.parametrize('num', [1, 2, 3])\n" + "def test_1(num): assert num\n" + ) + testdir.tmpdir.join('test_1/test_1.py').setmtime(1) + + result = testdir.runpytest("-v", "--nf") + + result.stdout.fnmatch_lines([ + "*test_1/test_1.py::test_1[3*", + "*test_2/test_2.py::test_1[1*", + "*test_2/test_2.py::test_1[2*", + "*test_1/test_1.py::test_1[1*", + "*test_1/test_1.py::test_1[2*", + ]) diff --git a/testing/test_capture.py b/testing/test_capture.py index 8f6f2ccb2..7fccc055d 100644 --- a/testing/test_capture.py +++ b/testing/test_capture.py @@ -49,10 +49,10 @@ def oswritebytes(fd, obj): os.write(fd, tobytes(obj)) - def StdCaptureFD(out=True, err=True, in_=True): return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture) + def StdCapture(out=True, err=True, in_=True): return capture.MultiCapture(out, err, in_, Capture=capture.SysCapture) @@ -72,29 +72,29 @@ class TestCaptureManager(object): @needsosdup @pytest.mark.parametrize("method", - ['no', 'sys', pytest.mark.skipif('not hasattr(os, "dup")', 'fd')]) + ['no', 'sys', pytest.mark.skipif('not hasattr(os, "dup")', 'fd')]) def test_capturing_basic_api(self, method): capouter = StdCaptureFD() old = sys.stdout, sys.stderr, sys.stdin try: capman = CaptureManager(method) - capman.init_capturings() - outerr = capman.suspendcapture() + capman.start_global_capturing() + outerr = capman.suspend_global_capture() assert outerr == ("", "") - outerr = capman.suspendcapture() + outerr = capman.suspend_global_capture() assert outerr == ("", "") - print ("hello") - out, err = capman.suspendcapture() + print("hello") + out, err = capman.suspend_global_capture() if method == "no": assert old == (sys.stdout, sys.stderr, sys.stdin) else: assert not out - capman.resumecapture() - print ("hello") - out, err = capman.suspendcapture() + capman.resume_global_capture() + print("hello") + out, err = capman.suspend_global_capture() if method != "no": assert out == "hello\n" - capman.reset_capturings() + capman.stop_global_capturing() finally: capouter.stop_capturing() @@ -103,16 +103,16 @@ class TestCaptureManager(object): capouter = StdCaptureFD() try: capman = CaptureManager("fd") - capman.init_capturings() - pytest.raises(AssertionError, "capman.init_capturings()") - capman.reset_capturings() + capman.start_global_capturing() + pytest.raises(AssertionError, "capman.start_global_capturing()") + capman.stop_global_capturing() finally: capouter.stop_capturing() @pytest.mark.parametrize("method", ['fd', 'sys']) def test_capturing_unicode(testdir, method): - if hasattr(sys, "pypy_version_info") and sys.pypy_version_info < (2,2): + if hasattr(sys, "pypy_version_info") and sys.pypy_version_info < (2, 2): pytest.xfail("does not work on pypy < 2.2") if sys.version_info >= (3, 0): obj = "'b\u00f6y'" @@ -234,7 +234,7 @@ class TestPerTestCapturing(object): "setup func1*", "in func1*", "teardown func1*", - #"*1 fixture failure*" + # "*1 fixture failure*" ]) def test_teardown_capturing_final(self, testdir): @@ -266,7 +266,7 @@ class TestPerTestCapturing(object): """) result = testdir.runpytest(p1) result.stdout.fnmatch_lines([ - "*test_capturing_outerr.py .F", + "*test_capturing_outerr.py .F*", "====* FAILURES *====", "____*____", "*test_capturing_outerr.py:8: ValueError", @@ -288,7 +288,7 @@ class TestLoggingInteraction(object): stream.close() # to free memory/release resources """) result = testdir.runpytest_subprocess(p) - result.stderr.str().find("atexit") == -1 + assert result.stderr.str().find("atexit") == -1 def test_logging_and_immediate_setupteardown(self, testdir): p = testdir.makepyfile(""" @@ -305,7 +305,7 @@ class TestLoggingInteraction(object): assert 0 """) for optargs in (('--capture=sys',), ('--capture=fd',)): - print (optargs) + print(optargs) result = testdir.runpytest_subprocess(p, *optargs) s = result.stdout.str() result.stdout.fnmatch_lines([ @@ -331,7 +331,7 @@ class TestLoggingInteraction(object): assert 0 """) for optargs in (('--capture=sys',), ('--capture=fd',)): - print (optargs) + print(optargs) result = testdir.runpytest_subprocess(p, *optargs) s = result.stdout.str() result.stdout.fnmatch_lines([ @@ -342,26 +342,6 @@ class TestLoggingInteraction(object): # verify proper termination assert "closed" not in s - def test_logging_initialized_in_test(self, testdir): - p = testdir.makepyfile(""" - import sys - def test_something(): - # pytest does not import logging - assert 'logging' not in sys.modules - import logging - logging.basicConfig() - logging.warn("hello432") - assert 0 - """) - result = testdir.runpytest_subprocess( - p, "--traceconfig", - "-p", "no:capturelog") - assert result.ret != 0 - result.stdout.fnmatch_lines([ - "*hello432*", - ]) - assert 'operation on closed file' not in result.stderr.str() - def test_conftestlogging_is_shown(self, testdir): testdir.makeconftest(""" import logging @@ -418,7 +398,7 @@ class TestCaptureFixture(object): result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "*ERROR*setup*test_one*", - "E*capsys*capfd*same*time*", + "E*capfd*capsys*same*time*", "*ERROR*setup*test_two*", "E*capsys*capfd*same*time*", "*2 error*"]) @@ -438,10 +418,21 @@ class TestCaptureFixture(object): "*test_one*", "*capsys*capfd*same*time*", "*test_two*", - "*capsys*capfd*same*time*", + "*capfd*capsys*same*time*", "*2 failed in*", ]) + def test_capsyscapfdbinary(self, testdir): + p = testdir.makepyfile(""" + def test_one(capsys, capfdbinary): + pass + """) + result = testdir.runpytest(p) + result.stdout.fnmatch_lines([ + "*ERROR*setup*test_one*", + "E*capfdbinary*capsys*same*time*", + "*1 error*"]) + @pytest.mark.parametrize("method", ["sys", "fd"]) def test_capture_is_represented_on_failure_issue128(self, testdir, method): p = testdir.makepyfile(""" @@ -466,6 +457,51 @@ class TestCaptureFixture(object): """) reprec.assertoutcome(passed=1) + @needsosdup + def test_capfdbinary(self, testdir): + reprec = testdir.inline_runsource(""" + def test_hello(capfdbinary): + import os + # some likely un-decodable bytes + os.write(1, b'\\xfe\\x98\\x20') + out, err = capfdbinary.readouterr() + assert out == b'\\xfe\\x98\\x20' + assert err == b'' + """) + reprec.assertoutcome(passed=1) + + @pytest.mark.skipif( + sys.version_info < (3,), + reason='only have capsysbinary in python 3', + ) + def test_capsysbinary(self, testdir): + reprec = testdir.inline_runsource(""" + def test_hello(capsysbinary): + import sys + # some likely un-decodable bytes + sys.stdout.buffer.write(b'\\xfe\\x98\\x20') + out, err = capsysbinary.readouterr() + assert out == b'\\xfe\\x98\\x20' + assert err == b'' + """) + reprec.assertoutcome(passed=1) + + @pytest.mark.skipif( + sys.version_info >= (3,), + reason='only have capsysbinary in python 3', + ) + def test_capsysbinary_forbidden_in_python2(self, testdir): + testdir.makepyfile(""" + def test_hello(capsysbinary): + pass + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + "*test_hello*", + "*capsysbinary is only supported on python 3*", + "*1 error in*", + ]) + def test_partial_setup_failure(self, testdir): p = testdir.makepyfile(""" def test_hello(capsys, missingarg): @@ -502,20 +538,64 @@ class TestCaptureFixture(object): assert 'closed' not in result.stderr.str() @pytest.mark.parametrize('fixture', ['capsys', 'capfd']) - def test_disabled_capture_fixture(self, testdir, fixture): + @pytest.mark.parametrize('no_capture', [True, False]) + def test_disabled_capture_fixture(self, testdir, fixture, no_capture): testdir.makepyfile(""" def test_disabled({fixture}): print('captured before') with {fixture}.disabled(): print('while capture is disabled') print('captured after') + assert {fixture}.readouterr() == ('captured before\\ncaptured after\\n', '') + + def test_normal(): + print('test_normal executed') """.format(fixture=fixture)) - result = testdir.runpytest_subprocess() + args = ('-s',) if no_capture else () + result = testdir.runpytest_subprocess(*args) result.stdout.fnmatch_lines(""" *while capture is disabled* """) assert 'captured before' not in result.stdout.str() assert 'captured after' not in result.stdout.str() + if no_capture: + assert 'test_normal executed' in result.stdout.str() + else: + assert 'test_normal executed' not in result.stdout.str() + + @pytest.mark.parametrize('fixture', ['capsys', 'capfd']) + def test_fixture_use_by_other_fixtures(self, testdir, fixture): + """ + Ensure that capsys and capfd can be used by other fixtures during setup and teardown. + """ + testdir.makepyfile(""" + from __future__ import print_function + import sys + import pytest + + @pytest.fixture + def captured_print({fixture}): + print('stdout contents begin') + print('stderr contents begin', file=sys.stderr) + out, err = {fixture}.readouterr() + + yield out, err + + print('stdout contents end') + print('stderr contents end', file=sys.stderr) + out, err = {fixture}.readouterr() + assert out == 'stdout contents end\\n' + assert err == 'stderr contents end\\n' + + def test_captured_print(captured_print): + out, err = captured_print + assert out == 'stdout contents begin\\n' + assert err == 'stderr contents begin\\n' + """.format(fixture=fixture)) + result = testdir.runpytest_subprocess() + result.stdout.fnmatch_lines("*1 passed*") + assert 'stdout contents begin' not in result.stdout.str() + assert 'stderr contents begin' not in result.stdout.str() def test_setup_failure_does_not_kill_capturing(testdir): @@ -671,7 +751,8 @@ def test_dontreadfrominput(): assert not f.isatty() pytest.raises(IOError, f.read) pytest.raises(IOError, f.readlines) - pytest.raises(IOError, iter, f) + iter_f = iter(f) + pytest.raises(IOError, next, iter_f) pytest.raises(UnsupportedOperation, f.fileno) f.close() # just for completeness @@ -684,7 +765,8 @@ def test_dontreadfrominput_buffer_python3(): assert not fb.isatty() pytest.raises(IOError, fb.read) pytest.raises(IOError, fb.readlines) - pytest.raises(IOError, iter, fb) + iter_f = iter(f) + pytest.raises(IOError, next, iter_f) pytest.raises(ValueError, fb.fileno) f.close() # just for completeness @@ -705,6 +787,7 @@ def tmpfile(testdir): if not f.closed: f.close() + @needsosdup def test_dupfile(tmpfile): flist = [] @@ -715,25 +798,37 @@ def test_dupfile(tmpfile): assert nf not in flist print(i, end="", file=nf) flist.append(nf) + + fname_open = flist[0].name + assert fname_open == repr(flist[0].buffer) + for i in range(5): f = flist[i] f.close() + fname_closed = flist[0].name + assert fname_closed == repr(flist[0].buffer) + assert fname_closed != fname_open tmpfile.seek(0) s = tmpfile.read() assert "01234" in repr(s) tmpfile.close() + assert fname_closed == repr(flist[0].buffer) + def test_dupfile_on_bytesio(): io = py.io.BytesIO() f = capture.safe_text_dupfile(io, "wb") f.write("hello") assert io.getvalue() == b"hello" + assert 'BytesIO object' in f.name + def test_dupfile_on_textio(): io = py.io.TextIO() f = capture.safe_text_dupfile(io, "wb") f.write("hello") assert io.getvalue() == "hello" + assert not hasattr(f, 'name') @contextlib.contextmanager @@ -876,7 +971,7 @@ class TestStdCapture(object): def test_capturing_readouterr(self): with self.getcapture() as cap: - print ("hello world") + print("hello world") sys.stderr.write("hello error\n") out, err = cap.readouterr() assert out == "hello world\n" @@ -885,9 +980,17 @@ class TestStdCapture(object): out, err = cap.readouterr() assert err == "error2" + def test_capture_results_accessible_by_attribute(self): + with self.getcapture() as cap: + sys.stdout.write("hello") + sys.stderr.write("world") + capture_result = cap.readouterr() + assert capture_result.out == "hello" + assert capture_result.err == "world" + def test_capturing_readouterr_unicode(self): with self.getcapture() as cap: - print ("hx\xc4\x85\xc4\x87") + print("hx\xc4\x85\xc4\x87") out, err = cap.readouterr() assert out == py.builtin._totext("hx\xc4\x85\xc4\x87\n", "utf8") @@ -902,7 +1005,7 @@ class TestStdCapture(object): def test_reset_twice_error(self): with self.getcapture() as cap: - print ("hello") + print("hello") out, err = cap.readouterr() pytest.raises(ValueError, cap.stop_capturing) assert out == "hello\n" @@ -916,7 +1019,7 @@ class TestStdCapture(object): sys.stderr.write("world") sys.stdout = capture.CaptureIO() sys.stderr = capture.CaptureIO() - print ("not seen") + print("not seen") sys.stderr.write("not seen\n") out, err = cap.readouterr() assert out == "hello" @@ -926,9 +1029,9 @@ class TestStdCapture(object): def test_capturing_error_recursive(self): with self.getcapture() as cap1: - print ("cap1") + print("cap1") with self.getcapture() as cap2: - print ("cap2") + print("cap2") out2, err2 = cap2.readouterr() out1, err1 = cap1.readouterr() assert out1 == "cap1\n" @@ -958,9 +1061,9 @@ class TestStdCapture(object): assert sys.stdin is old def test_stdin_nulled_by_default(self): - print ("XXX this test may well hang instead of crashing") - print ("XXX which indicates an error in the underlying capturing") - print ("XXX mechanisms") + print("XXX this test may well hang instead of crashing") + print("XXX which indicates an error in the underlying capturing") + print("XXX mechanisms") with self.getcapture(): pytest.raises(IOError, "sys.stdin.read()") @@ -1037,6 +1140,23 @@ def test_capture_not_started_but_reset(): capsys.stop_capturing() +def test_using_capsys_fixture_works_with_sys_stdout_encoding(capsys): + test_text = 'test text' + + print(test_text.encode(sys.stdout.encoding, 'replace')) + (out, err) = capsys.readouterr() + assert out + assert err == '' + + +def test_capsys_results_accessible_by_attribute(capsys): + sys.stdout.write("spam") + sys.stderr.write("eggs") + capture_result = capsys.readouterr() + assert capture_result.out == "spam" + assert capture_result.err == "eggs" + + @needsosdup @pytest.mark.parametrize('use', [True, False]) def test_fdcapture_tmpfile_remains_the_same(tmpfile, use): @@ -1052,6 +1172,7 @@ def test_fdcapture_tmpfile_remains_the_same(tmpfile, use): capfile2 = cap.err.tmpfile assert capfile2 == capfile + @needsosdup def test_close_and_capture_again(testdir): testdir.makepyfile(""" @@ -1071,7 +1192,6 @@ def test_close_and_capture_again(testdir): """) - @pytest.mark.parametrize('method', ['SysCapture', 'FDCapture']) def test_capturing_and_logging_fundamentals(testdir, method): if method == "StdCaptureFD" and not hasattr(os, 'dup'): @@ -1118,6 +1238,23 @@ def test_error_attribute_issue555(testdir): reprec.assertoutcome(passed=1) +@pytest.mark.skipif(not sys.platform.startswith('win') and sys.version_info[:2] >= (3, 6), + reason='only py3.6+ on windows') +def test_py36_windowsconsoleio_workaround_non_standard_streams(): + """ + Ensure _py36_windowsconsoleio_workaround function works with objects that + do not implement the full ``io``-based stream protocol, for example execnet channels (#2666). + """ + from _pytest.capture import _py36_windowsconsoleio_workaround + + class DummyStream(object): + def write(self, s): + pass + + stream = DummyStream() + _py36_windowsconsoleio_workaround(stream) + + def test_dontreadfrominput_has_encoding(testdir): testdir.makepyfile(""" import sys @@ -1130,7 +1267,31 @@ def test_dontreadfrominput_has_encoding(testdir): reprec.assertoutcome(passed=1) -def test_pickling_and_unpickling_enocded_file(): +def test_crash_on_closing_tmpfile_py27(testdir): + testdir.makepyfile(''' + from __future__ import print_function + import time + import threading + import sys + + def spam(): + f = sys.stderr + while True: + print('.', end='', file=f) + + def test_silly(): + t = threading.Thread(target=spam) + t.daemon = True + t.start() + time.sleep(0.5) + + ''') + result = testdir.runpytest_subprocess() + assert result.ret == 0 + assert 'IOError' not in result.stdout.str() + + +def test_pickling_and_unpickling_encoded_file(): # See https://bitbucket.org/pytest-dev/pytest/pull-request/194 # pickle.loads() raises infinite recursion if # EncodedFile.__getattr__ is not implemented properly diff --git a/testing/test_collection.py b/testing/test_collection.py index a90269789..f2d542c62 100644 --- a/testing/test_collection.py +++ b/testing/test_collection.py @@ -1,7 +1,11 @@ from __future__ import absolute_import, division, print_function -import pytest, py +import pprint +import sys +import pytest + +import _pytest._code +from _pytest.main import Session, EXIT_NOTESTSCOLLECTED, _in_venv -from _pytest.main import Session, EXIT_NOTESTSCOLLECTED class TestCollector(object): def test_collect_versus_item(self): @@ -33,7 +37,7 @@ class TestCollector(object): assert fn1 == fn2 assert fn1 != modcol - if py.std.sys.version_info < (3, 0): + if sys.version_info < (3, 0): assert cmp(fn1, fn2) == 0 assert hash(fn1) == hash(fn2) @@ -42,11 +46,11 @@ class TestCollector(object): assert not (fn1 == fn3) assert fn1 != fn3 - for fn in fn1,fn2,fn3: + for fn in fn1, fn2, fn3: assert fn != 3 assert fn != modcol - assert fn != [1,2,3] - assert [1,2,3] != fn + assert fn != [1, 2, 3] + assert [1, 2, 3] != fn assert modcol != fn def test_getparent(self, testdir): @@ -68,7 +72,6 @@ class TestCollector(object): parent = fn.getparent(pytest.Class) assert parent is cls - def test_getcustomfile_roundtrip(self, testdir): hello = testdir.makefile(".xxx", hello="world") testdir.makepyfile(conftest=""" @@ -102,6 +105,7 @@ class TestCollector(object): '*no tests ran in*', ]) + class TestCollectFS(object): def test_ignored_certain_directories(self, testdir): tmpdir = testdir.tmpdir @@ -121,6 +125,53 @@ class TestCollectFS(object): assert "test_notfound" not in s assert "test_found" in s + @pytest.mark.parametrize('fname', + ("activate", "activate.csh", "activate.fish", + "Activate", "Activate.bat", "Activate.ps1")) + def test_ignored_virtualenvs(self, testdir, fname): + bindir = "Scripts" if sys.platform.startswith("win") else "bin" + testdir.tmpdir.ensure("virtual", bindir, fname) + testfile = testdir.tmpdir.ensure("virtual", "test_invenv.py") + testfile.write("def test_hello(): pass") + + # by default, ignore tests inside a virtualenv + result = testdir.runpytest() + assert "test_invenv" not in result.stdout.str() + # allow test collection if user insists + result = testdir.runpytest("--collect-in-virtualenv") + assert "test_invenv" in result.stdout.str() + # allow test collection if user directly passes in the directory + result = testdir.runpytest("virtual") + assert "test_invenv" in result.stdout.str() + + @pytest.mark.parametrize('fname', + ("activate", "activate.csh", "activate.fish", + "Activate", "Activate.bat", "Activate.ps1")) + def test_ignored_virtualenvs_norecursedirs_precedence(self, testdir, fname): + bindir = "Scripts" if sys.platform.startswith("win") else "bin" + # norecursedirs takes priority + testdir.tmpdir.ensure(".virtual", bindir, fname) + testfile = testdir.tmpdir.ensure(".virtual", "test_invenv.py") + testfile.write("def test_hello(): pass") + result = testdir.runpytest("--collect-in-virtualenv") + assert "test_invenv" not in result.stdout.str() + # ...unless the virtualenv is explicitly given on the CLI + result = testdir.runpytest("--collect-in-virtualenv", ".virtual") + assert "test_invenv" in result.stdout.str() + + @pytest.mark.parametrize('fname', + ("activate", "activate.csh", "activate.fish", + "Activate", "Activate.bat", "Activate.ps1")) + def test__in_venv(self, testdir, fname): + """Directly test the virtual env detection function""" + bindir = "Scripts" if sys.platform.startswith("win") else "bin" + # no bin/activate, not a virtualenv + base_path = testdir.tmpdir.mkdir('venv') + assert _in_venv(base_path) is False + # with bin/activate, totally a virtualenv + base_path.ensure(bindir, fname) + assert _in_venv(base_path) is True + def test_custom_norecursedirs(self, testdir): testdir.makeini(""" [pytest] @@ -227,10 +278,12 @@ class TestPrunetraceback(object): """) testdir.makeconftest(""" import pytest - def pytest_make_collect_report(__multicall__): - rep = __multicall__.execute() + @pytest.hookimpl(hookwrapper=True) + def pytest_make_collect_report(): + outcome = yield + rep = outcome.get_result() rep.headerlines += ["header1"] - return rep + outcome.force_result(rep) """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ @@ -334,6 +387,7 @@ class TestCustomConftests(object): "*test_x*" ]) + class TestSession(object): def test_parsearg(self, testdir): p = testdir.makepyfile("def test_func(): pass") @@ -347,11 +401,11 @@ class TestSession(object): assert rcol.fspath == subdir parts = rcol._parsearg(p.basename) - assert parts[0] == target + assert parts[0] == target assert len(parts) == 1 parts = rcol._parsearg(p.basename + "::test_func") - assert parts[0] == target - assert parts[1] == "test_func" + assert parts[0] == target + assert parts[1] == "test_func" assert len(parts) == 2 def test_collect_topdir(self, testdir): @@ -362,9 +416,9 @@ class TestSession(object): topdir = testdir.tmpdir rcol = Session(config) assert topdir == rcol.fspath - #rootid = rcol.nodeid - #root2 = rcol.perform_collect([rcol.nodeid], genitems=False)[0] - #assert root2 == rcol, rootid + # rootid = rcol.nodeid + # root2 = rcol.perform_collect([rcol.nodeid], genitems=False)[0] + # assert root2 == rcol, rootid colitems = rcol.perform_collect([rcol.nodeid], genitems=False) assert len(colitems) == 1 assert colitems[0].fspath == p @@ -383,7 +437,7 @@ class TestSession(object): assert item.name == "test_func" newid = item.nodeid assert newid == id - py.std.pprint.pprint(hookrec.calls) + pprint.pprint(hookrec.calls) topdir = testdir.tmpdir # noqa hookrec.assert_contains([ ("pytest_collectstart", "collector.fspath == topdir"), @@ -433,7 +487,7 @@ class TestSession(object): id = p.basename items, hookrec = testdir.inline_genitems(id) - py.std.pprint.pprint(hookrec.calls) + pprint.pprint(hookrec.calls) assert len(items) == 2 hookrec.assert_contains([ ("pytest_collectstart", @@ -455,12 +509,12 @@ class TestSession(object): items, hookrec = testdir.inline_genitems() assert len(items) == 1 - py.std.pprint.pprint(hookrec.calls) + pprint.pprint(hookrec.calls) hookrec.assert_contains([ ("pytest_collectstart", "collector.fspath == test_aaa"), ("pytest_pycollect_makeitem", "name == 'test_func'"), ("pytest_collectreport", - "report.nodeid.startswith('aaa/test_aaa.py')"), + "report.nodeid.startswith('aaa/test_aaa.py')"), ]) def test_collect_two_commandline_args(self, testdir): @@ -476,7 +530,7 @@ class TestSession(object): items, hookrec = testdir.inline_genitems(id) assert len(items) == 2 - py.std.pprint.pprint(hookrec.calls) + pprint.pprint(hookrec.calls) hookrec.assert_contains([ ("pytest_collectstart", "collector.fspath == test_aaa"), ("pytest_pycollect_makeitem", "name == 'test_func'"), @@ -510,6 +564,7 @@ class TestSession(object): # ensure we are reporting the collection of the single test item (#2464) assert [x.name for x in self.get_reported_items(hookrec)] == ['test_method'] + class Test_getinitialnodes(object): def test_global_file(self, testdir, tmpdir): x = tmpdir.ensure("x.py") @@ -518,7 +573,6 @@ class Test_getinitialnodes(object): col = testdir.getnode(config, x) assert isinstance(col, pytest.Module) assert col.name == 'x.py' - assert col.parent.name == testdir.tmpdir.basename assert col.parent.parent is None for col in col.listchain(): assert col.config is config @@ -537,6 +591,7 @@ class Test_getinitialnodes(object): for col in col.listchain(): assert col.config is config + class Test_genitems(object): def test_check_collect_hashes(self, testdir): p = testdir.makepyfile(""" @@ -650,9 +705,9 @@ class TestNodekeywords(object): def test_pass(): pass def test_fail(): assert 0 """) - l = list(modcol.keywords) - assert modcol.name in l - for x in l: + values = list(modcol.keywords) + assert modcol.name in values + for x in values: assert not x.startswith("_") assert modcol.name in repr(modcol.keywords) @@ -689,6 +744,7 @@ COLLECTION_ERROR_PY_FILES = dict( """, ) + def test_exit_on_collection_error(testdir): """Verify that all collection errors are collected and no tests executed""" testdir.makepyfile(**COLLECTION_ERROR_PY_FILES) @@ -713,12 +769,11 @@ def test_exit_on_collection_with_maxfail_smaller_than_n_errors(testdir): testdir.makepyfile(**COLLECTION_ERROR_PY_FILES) res = testdir.runpytest("--maxfail=1") - assert res.ret == 2 + assert res.ret == 1 res.stdout.fnmatch_lines([ "*ERROR collecting test_02_import_error.py*", "*No module named *asdfa*", - "*Interrupted: stopping after 1 failures*", ]) assert 'test_03' not in res.stdout.str() @@ -770,10 +825,34 @@ def test_continue_on_collection_errors_maxfail(testdir): testdir.makepyfile(**COLLECTION_ERROR_PY_FILES) res = testdir.runpytest("--continue-on-collection-errors", "--maxfail=3") - assert res.ret == 2 + assert res.ret == 1 res.stdout.fnmatch_lines([ "collected 2 items / 2 errors", - "*Interrupted: stopping after 3 failures*", "*1 failed, 2 error*", ]) + + +def test_fixture_scope_sibling_conftests(testdir): + """Regression test case for https://github.com/pytest-dev/pytest/issues/2836""" + foo_path = testdir.mkpydir("foo") + foo_path.join("conftest.py").write(_pytest._code.Source(""" + import pytest + @pytest.fixture + def fix(): + return 1 + """)) + foo_path.join("test_foo.py").write("def test_foo(fix): assert fix == 1") + + # Tests in `food/` should not see the conftest fixture from `foo/` + food_path = testdir.mkpydir("food") + food_path.join("test_food.py").write("def test_food(fix): assert fix == 1") + + res = testdir.runpytest() + assert res.ret == 1 + + res.stdout.fnmatch_lines([ + "*ERROR at setup of test_food*", + "E*fixture 'fix' not found", + "*1 passed, 1 error*", + ]) diff --git a/testing/test_compat.py b/testing/test_compat.py index 7b2251ef6..c74801c6c 100644 --- a/testing/test_compat.py +++ b/testing/test_compat.py @@ -2,7 +2,8 @@ from __future__ import absolute_import, division, print_function import sys import pytest -from _pytest.compat import is_generator, get_real_func +from _pytest.compat import is_generator, get_real_func, safe_getattr +from _pytest.outcomes import OutcomeException def test_is_generator(): @@ -74,3 +75,27 @@ def test_is_generator_async_syntax(testdir): """) result = testdir.runpytest() result.stdout.fnmatch_lines(['*1 passed*']) + + +class ErrorsHelper(object): + @property + def raise_exception(self): + raise Exception('exception should be catched') + + @property + def raise_fail(self): + pytest.fail('fail should be catched') + + +def test_helper_failures(): + helper = ErrorsHelper() + with pytest.raises(Exception): + helper.raise_exception + with pytest.raises(OutcomeException): + helper.raise_fail + + +def test_safe_getattr(): + helper = ErrorsHelper() + assert safe_getattr(helper, 'raise_exception', 'default') == 'default' + assert safe_getattr(helper, 'raise_fail', 'default') == 'default' diff --git a/testing/test_config.py b/testing/test_config.py index 0d8e6abfc..39105f5d6 100644 --- a/testing/test_config.py +++ b/testing/test_config.py @@ -1,10 +1,13 @@ from __future__ import absolute_import, division, print_function -import py, pytest +import sys +import textwrap +import pytest import _pytest._code -from _pytest.config import getcfg, get_common_ancestor, determine_setup +from _pytest.config import getcfg, get_common_ancestor, determine_setup, _iter_rewritable_modules from _pytest.main import EXIT_NOTESTSCOLLECTED + class TestParseIni(object): @pytest.mark.parametrize('section, filename', @@ -54,7 +57,7 @@ class TestParseIni(object): ('pytest', 'pytest.ini')], ) def test_ini_names(self, testdir, name, section): - testdir.tmpdir.join(name).write(py.std.textwrap.dedent(""" + testdir.tmpdir.join(name).write(textwrap.dedent(""" [{section}] minversion = 1.0 """.format(section=section))) @@ -63,11 +66,11 @@ class TestParseIni(object): def test_toxini_before_lower_pytestini(self, testdir): sub = testdir.tmpdir.mkdir("sub") - sub.join("tox.ini").write(py.std.textwrap.dedent(""" + sub.join("tox.ini").write(textwrap.dedent(""" [pytest] minversion = 2.0 """)) - testdir.tmpdir.join("pytest.ini").write(py.std.textwrap.dedent(""" + testdir.tmpdir.join("pytest.ini").write(textwrap.dedent(""" [pytest] minversion = 1.5 """)) @@ -85,6 +88,7 @@ class TestParseIni(object): result = testdir.inline_run("--confcutdir=.") assert result.ret == 0 + class TestConfigCmdlineParsing(object): def test_parsing_again_fails(self, testdir): config = testdir.parseconfig() @@ -99,13 +103,20 @@ class TestConfigCmdlineParsing(object): [pytest] custom = 0 """) - testdir.makefile(".cfg", custom = """ + testdir.makefile(".cfg", custom=""" [pytest] custom = 1 """) config = testdir.parseconfig("-c", "custom.cfg") assert config.getini("custom") == "1" + testdir.makefile(".cfg", custom_tool_pytest_section=""" + [tool:pytest] + custom = 1 + """) + config = testdir.parseconfig("-c", "custom_tool_pytest_section.cfg") + assert config.getini("custom") == "1" + def test_absolute_win32_path(self, testdir): temp_cfg_file = testdir.makefile(".cfg", custom=""" [pytest] @@ -116,14 +127,15 @@ class TestConfigCmdlineParsing(object): ret = pytest.main("-c " + temp_cfg_file) assert ret == _pytest.main.EXIT_OK + class TestConfigAPI(object): def test_config_trace(self, testdir): config = testdir.parseconfig() - l = [] - config.trace.root.setwriter(l.append) + values = [] + config.trace.root.setwriter(values.append) config.trace("hello") - assert len(l) == 1 - assert l[0] == "hello [config]\n" + assert len(values) == 1 + assert values[0] == "hello [config]\n" def test_config_getoption(self, testdir): testdir.makeconftest(""" @@ -135,7 +147,7 @@ class TestConfigAPI(object): assert config.getoption(x) == "this" pytest.raises(ValueError, "config.getoption('qweqwe')") - @pytest.mark.skipif('sys.version_info[:2] not in [(2, 6), (2, 7)]') + @pytest.mark.skipif('sys.version_info[0] < 3') def test_config_getoption_unicode(self, testdir): testdir.makeconftest(""" from __future__ import unicode_literals @@ -149,7 +161,7 @@ class TestConfigAPI(object): def test_config_getvalueorskip(self, testdir): config = testdir.parseconfig() pytest.raises(pytest.skip.Exception, - "config.getvalueorskip('hello')") + "config.getvalueorskip('hello')") verbose = config.getvalueorskip("verbose") assert verbose == config.option.verbose @@ -205,10 +217,10 @@ class TestConfigAPI(object): paths=hello world/sub.py """) config = testdir.parseconfig() - l = config.getini("paths") - assert len(l) == 2 - assert l[0] == p.dirpath('hello') - assert l[1] == p.dirpath('world/sub.py') + values = config.getini("paths") + assert len(values) == 2 + assert values[0] == p.dirpath('hello') + assert values[1] == p.dirpath('world/sub.py') pytest.raises(ValueError, config.getini, 'other') def test_addini_args(self, testdir): @@ -222,11 +234,11 @@ class TestConfigAPI(object): args=123 "123 hello" "this" """) config = testdir.parseconfig() - l = config.getini("args") - assert len(l) == 3 - assert l == ["123", "123 hello", "this"] - l = config.getini("a2") - assert l == list("123") + values = config.getini("args") + assert len(values) == 3 + assert values == ["123", "123 hello", "this"] + values = config.getini("a2") + assert values == list("123") def test_addini_linelist(self, testdir): testdir.makeconftest(""" @@ -240,11 +252,11 @@ class TestConfigAPI(object): second line """) config = testdir.parseconfig() - l = config.getini("xy") - assert len(l) == 2 - assert l == ["123 345", "second line"] - l = config.getini("a2") - assert l == [] + values = config.getini("xy") + assert len(values) == 2 + assert values == ["123 345", "second line"] + values = config.getini("a2") + assert values == [] @pytest.mark.parametrize('str_val, bool_val', [('True', True), ('no', False), ('no-ini', True)]) @@ -271,13 +283,13 @@ class TestConfigAPI(object): xy= 123 """) config = testdir.parseconfig() - l = config.getini("xy") - assert len(l) == 1 - assert l == ["123"] + values = config.getini("xy") + assert len(values) == 1 + assert values == ["123"] config.addinivalue_line("xy", "456") - l = config.getini("xy") - assert len(l) == 2 - assert l == ["123", "456"] + values = config.getini("xy") + assert len(values) == 2 + assert values == ["123", "456"] def test_addinivalue_line_new(self, testdir): testdir.makeconftest(""" @@ -287,13 +299,13 @@ class TestConfigAPI(object): config = testdir.parseconfig() assert not config.getini("xy") config.addinivalue_line("xy", "456") - l = config.getini("xy") - assert len(l) == 1 - assert l == ["456"] + values = config.getini("xy") + assert len(values) == 1 + assert values == ["456"] config.addinivalue_line("xy", "123") - l = config.getini("xy") - assert len(l) == 2 - assert l == ["456", "123"] + values = config.getini("xy") + assert len(values) == 2 + assert values == ["456", "123"] def test_confcutdir_check_isdir(self, testdir): """Give an error if --confcutdir is not a valid directory (#2078)""" @@ -304,6 +316,16 @@ class TestConfigAPI(object): config = testdir.parseconfig('--confcutdir', testdir.tmpdir.join('dir').ensure(dir=1)) assert config.getoption('confcutdir') == str(testdir.tmpdir.join('dir')) + @pytest.mark.parametrize('names, expected', [ + (['bar.py'], ['bar']), + (['foo', 'bar.py'], []), + (['foo', 'bar.pyc'], []), + (['foo', '__init__.py'], ['foo']), + (['foo', 'bar', '__init__.py'], []), + ]) + def test_iter_rewritable_modules(self, names, expected): + assert list(_iter_rewritable_modules(['/'.join(names)])) == expected + class TestConfigFromdictargs(object): def test_basic_behavior(self): @@ -445,9 +467,12 @@ def test_setuptools_importerror_issue1479(testdir, monkeypatch): testdir.parseconfig() -def test_plugin_preparse_prevents_setuptools_loading(testdir, monkeypatch): +@pytest.mark.parametrize('block_it', [True, False]) +def test_plugin_preparse_prevents_setuptools_loading(testdir, monkeypatch, block_it): pkg_resources = pytest.importorskip("pkg_resources") + plugin_module_placeholder = object() + def my_iter(name): assert name == "pytest11" @@ -463,14 +488,20 @@ def test_plugin_preparse_prevents_setuptools_loading(testdir, monkeypatch): dist = Dist() def load(self): - assert 0, "should not arrive here" + return plugin_module_placeholder return iter([EntryPoint()]) monkeypatch.setattr(pkg_resources, 'iter_entry_points', my_iter) - config = testdir.parseconfig("-p", "no:mytestplugin") - plugin = config.pluginmanager.getplugin("mytestplugin") - assert plugin is None + args = ("-p", "no:mytestplugin") if block_it else () + config = testdir.parseconfig(*args) + config.pluginmanager.import_plugin("mytestplugin") + if block_it: + assert "mytestplugin" not in sys.modules + assert config.pluginmanager.get_plugin('mytestplugin') is None + else: + assert config.pluginmanager.get_plugin('mytestplugin') is plugin_module_placeholder + def test_cmdline_processargs_simple(testdir): testdir.makeconftest(""" @@ -483,6 +514,7 @@ def test_cmdline_processargs_simple(testdir): "*-h*", ]) + def test_invalid_options_show_extra_information(testdir): """display extra information when pytest exits due to unrecognized options in the command-line""" @@ -528,6 +560,7 @@ def test_toolongargs_issue224(testdir): result = testdir.runpytest("-m", "hello" * 500) assert result.ret == EXIT_NOTESTSCOLLECTED + def test_config_in_subdirectory_colon_command_line_issue2148(testdir): conftest_source = ''' def pytest_addoption(parser): @@ -569,7 +602,7 @@ def test_notify_exception(testdir, capfd): def test_load_initial_conftest_last_ordering(testdir): - from _pytest.config import get_config + from _pytest.config import get_config pm = get_config().pluginmanager class My(object): @@ -579,13 +612,13 @@ def test_load_initial_conftest_last_ordering(testdir): m = My() pm.register(m) hc = pm.hook.pytest_load_initial_conftests - l = hc._nonwrappers + hc._wrappers + values = hc._nonwrappers + hc._wrappers expected = [ "_pytest.config", 'test_config', '_pytest.capture', ] - assert [x.function.__module__ for x in l] == expected + assert [x.function.__module__ for x in values] == expected def test_get_plugin_specs_as_list(): @@ -606,17 +639,17 @@ def test_get_plugin_specs_as_list(): class TestWarning(object): def test_warn_config(self, testdir): testdir.makeconftest(""" - l = [] + values = [] def pytest_configure(config): config.warn("C1", "hello") def pytest_logwarning(code, message): if message == "hello" and code == "C1": - l.append(1) + values.append(1) """) testdir.makepyfile(""" def test_proper(pytestconfig): import conftest - assert conftest.l == [1] + assert conftest.values == [1] """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) @@ -643,6 +676,7 @@ class TestWarning(object): *hello* """) + class TestRootdir(object): def test_simple_noini(self, tmpdir): assert get_common_ancestor([tmpdir]) == tmpdir @@ -666,7 +700,7 @@ class TestRootdir(object): rootdir, inifile, inicfg = determine_setup(None, args) assert rootdir == tmpdir assert inifile == inifile - rootdir, inifile, inicfg = determine_setup(None, [b,a]) + rootdir, inifile, inicfg = determine_setup(None, [b, a]) assert rootdir == tmpdir assert inifile == inifile @@ -704,7 +738,7 @@ class TestRootdir(object): class TestOverrideIniArgs(object): @pytest.mark.parametrize("name", "setup.cfg tox.ini pytest.ini".split()) def test_override_ini_names(self, testdir, name): - testdir.tmpdir.join(name).write(py.std.textwrap.dedent(""" + testdir.tmpdir.join(name).write(textwrap.dedent(""" [pytest] custom = 1.0""")) testdir.makeconftest(""" @@ -724,7 +758,6 @@ class TestOverrideIniArgs(object): assert result.ret == 0 result.stdout.fnmatch_lines(["custom_option:3.0"]) - def test_override_ini_pathlist(self, testdir): testdir.makeconftest(""" def pytest_addoption(parser): @@ -755,16 +788,18 @@ class TestOverrideIniArgs(object): testdir.makeini(""" [pytest] custom_option_1=custom_option_1 - custom_option_2=custom_option_2""") + custom_option_2=custom_option_2 + """) testdir.makepyfile(""" def test_multiple_options(pytestconfig): prefix = "custom_option" for x in range(1, 5): ini_value=pytestconfig.getini("%s_%d" % (prefix, x)) - print('\\nini%d:%s' % (x, ini_value))""") + print('\\nini%d:%s' % (x, ini_value)) + """) result = testdir.runpytest( "--override-ini", 'custom_option_1=fulldir=/tmp/user1', - 'custom_option_2=url=/tmp/user2?a=b&d=e', + '-o', 'custom_option_2=url=/tmp/user2?a=b&d=e', "-o", 'custom_option_3=True', "-o", 'custom_option_4=no', "-s") result.stdout.fnmatch_lines(["ini1:fulldir=/tmp/user1", @@ -827,3 +862,42 @@ class TestOverrideIniArgs(object): assert rootdir == tmpdir assert inifile is None + def test_addopts_before_initini(self, monkeypatch): + cache_dir = '.custom_cache' + monkeypatch.setenv('PYTEST_ADDOPTS', '-o cache_dir=%s' % cache_dir) + from _pytest.config import get_config + config = get_config() + config._preparse([], addopts=True) + assert config._override_ini == ['cache_dir=%s' % cache_dir] + + def test_override_ini_does_not_contain_paths(self): + """Check that -o no longer swallows all options after it (#3103)""" + from _pytest.config import get_config + config = get_config() + config._preparse(['-o', 'cache_dir=/cache', '/some/test/path']) + assert config._override_ini == ['cache_dir=/cache'] + + def test_multiple_override_ini_options(self, testdir, request): + """Ensure a file path following a '-o' option does not generate an error (#3103)""" + testdir.makepyfile(**{ + "conftest.py": """ + def pytest_addoption(parser): + parser.addini('foo', default=None, help='some option') + parser.addini('bar', default=None, help='some option') + """, + "test_foo.py": """ + def test(pytestconfig): + assert pytestconfig.getini('foo') == '1' + assert pytestconfig.getini('bar') == '0' + """, + "test_bar.py": """ + def test(): + assert False + """, + }) + result = testdir.runpytest('-o', 'foo=1', '-o', 'bar=0', 'test_foo.py') + assert 'ERROR:' not in result.stderr.str() + result.stdout.fnmatch_lines([ + 'collected 1 item', + '*= 1 passed in *=', + ]) diff --git a/testing/test_conftest.py b/testing/test_conftest.py index b6fd7814c..6566f752a 100644 --- a/testing/test_conftest.py +++ b/testing/test_conftest.py @@ -19,11 +19,13 @@ def basedir(request, tmpdir_factory): tmpdir.ensure("adir/b/__init__.py") return tmpdir + def ConftestWithSetinitial(path): conftest = PytestPluginManager() conftest_setinitial(conftest, [path]) return conftest + def conftest_setinitial(conftest, args, confcutdir=None): class Namespace(object): def __init__(self): @@ -32,6 +34,7 @@ def conftest_setinitial(conftest, args, confcutdir=None): self.noconftest = False conftest._set_initial_conftests(Namespace()) + class TestConftestValueAccessGlobal(object): def test_basic_init(self, basedir): conftest = PytestPluginManager() @@ -43,7 +46,7 @@ class TestConftestValueAccessGlobal(object): len(conftest._path2confmods) conftest._getconftestmodules(basedir) snap1 = len(conftest._path2confmods) - #assert len(conftest._path2confmods) == snap1 + 1 + # assert len(conftest._path2confmods) == snap1 + 1 conftest._getconftestmodules(basedir.join('adir')) assert len(conftest._path2confmods) == snap1 + 1 conftest._getconftestmodules(basedir.join('b')) @@ -65,11 +68,12 @@ class TestConftestValueAccessGlobal(object): startdir.ensure("xx", dir=True) conftest = ConftestWithSetinitial(startdir) mod, value = conftest._rget_with_confmod("a", startdir) - assert value == 1.5 + assert value == 1.5 path = py.path.local(mod.__file__) assert path.dirpath() == basedir.join("adir", "b") assert path.purebasename.startswith("conftest") + def test_conftest_in_nonpkg_with_init(tmpdir): tmpdir.ensure("adir-1.0/conftest.py").write("a=1 ; Directory = 3") tmpdir.ensure("adir-1.0/b/conftest.py").write("b=2 ; a = 1.5") @@ -77,13 +81,15 @@ def test_conftest_in_nonpkg_with_init(tmpdir): tmpdir.ensure("adir-1.0/__init__.py") ConftestWithSetinitial(tmpdir.join("adir-1.0", "b")) + def test_doubledash_considered(testdir): conf = testdir.mkdir("--option") - conf.join("conftest.py").ensure() + conf.ensure("conftest.py") conftest = PytestPluginManager() conftest_setinitial(conftest, [conf.basename, conf.basename]) - l = conftest._getconftestmodules(conf) - assert len(l) == 1 + values = conftest._getconftestmodules(conf) + assert len(values) == 1 + def test_issue151_load_all_conftests(testdir): names = "code proj src".split() @@ -96,6 +102,7 @@ def test_issue151_load_all_conftests(testdir): d = list(conftest._conftestpath2mod.values()) assert len(d) == len(names) + def test_conftest_global_import(testdir): testdir.makeconftest("x=3") p = testdir.makepyfile(""" @@ -117,32 +124,35 @@ def test_conftest_global_import(testdir): res = testdir.runpython(p) assert res.ret == 0 + def test_conftestcutdir(testdir): conf = testdir.makeconftest("") p = testdir.mkdir("x") conftest = PytestPluginManager() conftest_setinitial(conftest, [testdir.tmpdir], confcutdir=p) - l = conftest._getconftestmodules(p) - assert len(l) == 0 - l = conftest._getconftestmodules(conf.dirpath()) - assert len(l) == 0 + values = conftest._getconftestmodules(p) + assert len(values) == 0 + values = conftest._getconftestmodules(conf.dirpath()) + assert len(values) == 0 assert conf not in conftest._conftestpath2mod # but we can still import a conftest directly conftest._importconftest(conf) - l = conftest._getconftestmodules(conf.dirpath()) - assert l[0].__file__.startswith(str(conf)) + values = conftest._getconftestmodules(conf.dirpath()) + assert values[0].__file__.startswith(str(conf)) # and all sub paths get updated properly - l = conftest._getconftestmodules(p) - assert len(l) == 1 - assert l[0].__file__.startswith(str(conf)) + values = conftest._getconftestmodules(p) + assert len(values) == 1 + assert values[0].__file__.startswith(str(conf)) + def test_conftestcutdir_inplace_considered(testdir): conf = testdir.makeconftest("") conftest = PytestPluginManager() conftest_setinitial(conftest, [conf.dirpath()], confcutdir=conf.dirpath()) - l = conftest._getconftestmodules(conf.dirpath()) - assert len(l) == 1 - assert l[0].__file__.startswith(str(conf)) + values = conftest._getconftestmodules(conf.dirpath()) + assert len(values) == 1 + assert values[0].__file__.startswith(str(conf)) + @pytest.mark.parametrize("name", 'test tests whatever .dotdir'.split()) def test_setinitial_conftest_subdirs(testdir, name): @@ -151,12 +161,13 @@ def test_setinitial_conftest_subdirs(testdir, name): conftest = PytestPluginManager() conftest_setinitial(conftest, [sub.dirpath()], confcutdir=testdir.tmpdir) if name not in ('whatever', '.dotdir'): - assert subconftest in conftest._conftestpath2mod + assert subconftest in conftest._conftestpath2mod assert len(conftest._conftestpath2mod) == 1 else: - assert subconftest not in conftest._conftestpath2mod + assert subconftest not in conftest._conftestpath2mod assert len(conftest._conftestpath2mod) == 0 + def test_conftest_confcutdir(testdir): testdir.makeconftest("assert 0") x = testdir.mkdir("x") @@ -168,6 +179,7 @@ def test_conftest_confcutdir(testdir): result.stdout.fnmatch_lines(["*--xyz*"]) assert 'warning: could not load initial' not in result.stdout.str() + def test_no_conftest(testdir): testdir.makeconftest("assert 0") result = testdir.runpytest("--noconftest") @@ -176,6 +188,7 @@ def test_no_conftest(testdir): result = testdir.runpytest() assert result.ret == EXIT_USAGEERROR + def test_conftest_existing_resultlog(testdir): x = testdir.mkdir("tests") x.join("conftest.py").write(_pytest._code.Source(""" @@ -186,6 +199,7 @@ def test_conftest_existing_resultlog(testdir): result = testdir.runpytest("-h", "--resultlog", "result.log") result.stdout.fnmatch_lines(["*--xyz*"]) + def test_conftest_existing_junitxml(testdir): x = testdir.mkdir("tests") x.join("conftest.py").write(_pytest._code.Source(""" @@ -196,6 +210,7 @@ def test_conftest_existing_junitxml(testdir): result = testdir.runpytest("-h", "--junitxml", "junit.xml") result.stdout.fnmatch_lines(["*--xyz*"]) + def test_conftest_import_order(testdir, monkeypatch): ct1 = testdir.makeconftest("") sub = testdir.mkdir("sub") @@ -217,7 +232,7 @@ def test_fixture_dependency(testdir, monkeypatch): ct1.write("") sub = testdir.mkdir("sub") sub.join("__init__.py").write("") - sub.join("conftest.py").write(py.std.textwrap.dedent(""" + sub.join("conftest.py").write(dedent(""" import pytest @pytest.fixture @@ -234,7 +249,7 @@ def test_fixture_dependency(testdir, monkeypatch): """)) subsub = sub.mkdir("subsub") subsub.join("__init__.py").write("") - subsub.join("test_bar.py").write(py.std.textwrap.dedent(""" + subsub.join("test_bar.py").write(dedent(""" import pytest @pytest.fixture @@ -250,16 +265,12 @@ def test_fixture_dependency(testdir, monkeypatch): def test_conftest_found_with_double_dash(testdir): sub = testdir.mkdir("sub") - sub.join("conftest.py").write(py.std.textwrap.dedent(""" + sub.join("conftest.py").write(dedent(""" def pytest_addoption(parser): parser.addoption("--hello-world", action="store_true") """)) p = sub.join("test_hello.py") - p.write(py.std.textwrap.dedent(""" - import pytest - def test_hello(found): - assert found == 1 - """)) + p.write("def test_hello(): pass") result = testdir.runpytest(str(p) + "::test_hello", "-h") result.stdout.fnmatch_lines(""" *--hello-world* @@ -306,9 +317,9 @@ class TestConftestVisibility(object): # use value from parent dir's """)) - print ("created directory structure:") + print("created directory structure:") for x in testdir.tmpdir.visit(): - print (" " + x.relto(testdir.tmpdir)) + print(" " + x.relto(testdir.tmpdir)) return { "runner": runner, @@ -319,38 +330,38 @@ class TestConftestVisibility(object): # N.B.: "swc" stands for "subdir with conftest.py" # "snc" stands for "subdir no [i.e. without] conftest.py" @pytest.mark.parametrize("chdir,testarg,expect_ntests_passed", [ - # Effective target: package/.. - ("runner", "..", 3), - ("package", "..", 3), - ("swc", "../..", 3), - ("snc", "../..", 3), + # Effective target: package/.. + ("runner", "..", 3), + ("package", "..", 3), + ("swc", "../..", 3), + ("snc", "../..", 3), - # Effective target: package - ("runner", "../package", 3), - ("package", ".", 3), - ("swc", "..", 3), - ("snc", "..", 3), + # Effective target: package + ("runner", "../package", 3), + ("package", ".", 3), + ("swc", "..", 3), + ("snc", "..", 3), - # Effective target: package/swc - ("runner", "../package/swc", 1), - ("package", "./swc", 1), - ("swc", ".", 1), - ("snc", "../swc", 1), + # Effective target: package/swc + ("runner", "../package/swc", 1), + ("package", "./swc", 1), + ("swc", ".", 1), + ("snc", "../swc", 1), - # Effective target: package/snc - ("runner", "../package/snc", 1), - ("package", "./snc", 1), - ("swc", "../snc", 1), - ("snc", ".", 1), + # Effective target: package/snc + ("runner", "../package/snc", 1), + ("package", "./snc", 1), + ("swc", "../snc", 1), + ("snc", ".", 1), ]) @pytest.mark.issue616 def test_parsefactories_relative_node_ids( - self, testdir, chdir,testarg, expect_ntests_passed): + self, testdir, chdir, testarg, expect_ntests_passed): dirs = self._setup_tree(testdir) - print("pytest run in cwd: %s" %( + print("pytest run in cwd: %s" % ( dirs[chdir].relto(testdir.tmpdir))) - print("pytestarg : %s" %(testarg)) - print("expected pass : %s" %(expect_ntests_passed)) + print("pytestarg : %s" % (testarg)) + print("expected pass : %s" % (expect_ntests_passed)) with dirs[chdir].as_cwd(): reprec = testdir.inline_run(testarg, "-q", "--traceconfig") reprec.assertoutcome(passed=expect_ntests_passed) diff --git a/testing/test_doctest.py b/testing/test_doctest.py index 26f9c8469..314398395 100644 --- a/testing/test_doctest.py +++ b/testing/test_doctest.py @@ -19,7 +19,7 @@ class TestDoctests(object): """) for x in (testdir.tmpdir, checkfile): - #print "checking that %s returns custom items" % (x,) + # print "checking that %s returns custom items" % (x,) items, reprec = testdir.inline_genitems(x) assert len(items) == 1 assert isinstance(items[0], DoctestItem) @@ -32,14 +32,14 @@ class TestDoctests(object): path = testdir.makepyfile(whatever="#") for p in (path, testdir.tmpdir): items, reprec = testdir.inline_genitems(p, - '--doctest-modules') + '--doctest-modules') assert len(items) == 0 def test_collect_module_single_modulelevel_doctest(self, testdir): path = testdir.makepyfile(whatever='""">>> pass"""') for p in (path, testdir.tmpdir): items, reprec = testdir.inline_genitems(p, - '--doctest-modules') + '--doctest-modules') assert len(items) == 1 assert isinstance(items[0], DoctestItem) assert isinstance(items[0].parent, DoctestModule) @@ -52,7 +52,7 @@ class TestDoctests(object): """) for p in (path, testdir.tmpdir): items, reprec = testdir.inline_genitems(p, - '--doctest-modules') + '--doctest-modules') assert len(items) == 2 assert isinstance(items[0], DoctestItem) assert isinstance(items[1], DoctestItem) @@ -77,7 +77,7 @@ class TestDoctests(object): """) for p in (path, testdir.tmpdir): items, reprec = testdir.inline_genitems(p, - '--doctest-modules') + '--doctest-modules') assert len(items) == 2 assert isinstance(items[0], DoctestItem) assert isinstance(items[1], DoctestItem) @@ -135,9 +135,9 @@ class TestDoctests(object): @pytest.mark.parametrize( ' test_string, encoding', [ - (u'foo', 'ascii'), - (u'öäü', 'latin1'), - (u'öäü', 'utf-8') + (u'foo', 'ascii'), + (u'öäü', 'latin1'), + (u'öäü', 'utf-8') ] ) def test_encoding(self, testdir, test_string, encoding): @@ -173,7 +173,7 @@ class TestDoctests(object): "*UNEXPECTED*ZeroDivision*", ]) - def test_docstring_context_around_error(self, testdir): + def test_docstring_partial_context_around_error(self, testdir): """Test that we show some context before the actual line of a failing doctest. """ @@ -199,7 +199,7 @@ class TestDoctests(object): ''') result = testdir.runpytest('--doctest-modules') result.stdout.fnmatch_lines([ - '*docstring_context_around_error*', + '*docstring_partial_context_around_error*', '005*text-line-3', '006*text-line-4', '013*text-line-11', @@ -213,6 +213,32 @@ class TestDoctests(object): assert 'text-line-2' not in result.stdout.str() assert 'text-line-after' not in result.stdout.str() + def test_docstring_full_context_around_error(self, testdir): + """Test that we show the whole context before the actual line of a failing + doctest, provided that the context is up to 10 lines long. + """ + testdir.makepyfile(''' + def foo(): + """ + text-line-1 + text-line-2 + + >>> 1 + 1 + 3 + """ + ''') + result = testdir.runpytest('--doctest-modules') + result.stdout.fnmatch_lines([ + '*docstring_full_context_around_error*', + '003*text-line-1', + '004*text-line-2', + '006*>>> 1 + 1', + 'Expected:', + ' 3', + 'Got:', + ' 2', + ]) + def test_doctest_linedata_missing(self, testdir): testdir.tmpdir.join('hello.py').write(_pytest._code.Source(""" class Fun(object): @@ -294,7 +320,6 @@ class TestDoctests(object): "*:5: DocTestFailure" ]) - def test_txtfile_failing(self, testdir): p = testdir.maketxtfile(""" >>> i = 0 @@ -535,7 +560,7 @@ class TestDoctests(object): p = testdir.makepyfile(test_unicode_doctest_module=""" # -*- encoding: utf-8 -*- from __future__ import unicode_literals - + def fix_bad_unicode(text): ''' >>> print(fix_bad_unicode('único')) @@ -546,6 +571,50 @@ class TestDoctests(object): result = testdir.runpytest(p, '--doctest-modules') result.stdout.fnmatch_lines(['* 1 passed *']) + def test_reportinfo(self, testdir): + ''' + Test case to make sure that DoctestItem.reportinfo() returns lineno. + ''' + p = testdir.makepyfile(test_reportinfo=""" + def foo(x): + ''' + >>> foo('a') + 'b' + ''' + return 'c' + """) + items, reprec = testdir.inline_genitems(p, '--doctest-modules') + reportinfo = items[0].reportinfo() + assert reportinfo[1] == 1 + + def test_valid_setup_py(self, testdir): + ''' + Test to make sure that pytest ignores valid setup.py files when ran + with --doctest-modules + ''' + p = testdir.makepyfile(setup=""" + from setuptools import setup, find_packages + setup(name='sample', + version='0.0', + description='description', + packages=find_packages() + ) + """) + result = testdir.runpytest(p, '--doctest-modules') + result.stdout.fnmatch_lines(['*collected 0 items*']) + + def test_invalid_setup_py(self, testdir): + ''' + Test to make sure that pytest reads setup.py files that are not used + for python packages when ran with --doctest-modules + ''' + p = testdir.makepyfile(setup=""" + def test_foo(): + return 'bar' + """) + result = testdir.runpytest(p, '--doctest-modules') + result.stdout.fnmatch_lines(['*collected 1 item*']) + class TestLiterals(object): @@ -687,6 +756,27 @@ class TestDoctestSkips(object): reprec = testdir.inline_run("--doctest-modules") reprec.assertoutcome(passed=0, skipped=0) + def test_continue_on_failure(self, testdir): + testdir.maketxtfile(test_something=""" + >>> i = 5 + >>> def foo(): + ... raise ValueError('error1') + >>> foo() + >>> i + >>> i + 2 + 7 + >>> i + 1 + """) + result = testdir.runpytest("--doctest-modules", "--doctest-continue-on-failure") + result.assert_outcomes(passed=0, failed=1) + # The lines that contains the failure are 4, 5, and 8. The first one + # is a stack trace and the other two are mismatches. + result.stdout.fnmatch_lines([ + "*4: UnexpectedException*", + "*5: DocTestFailure*", + "*8: DocTestFailure*", + ]) + class TestDoctestAutoUseFixtures(object): @@ -932,4 +1022,3 @@ class TestDoctestReportingOption(object): result.stderr.fnmatch_lines([ "*error: argument --doctest-report: invalid choice: 'obviously_invalid_format' (choose from*" ]) - diff --git a/testing/test_helpconfig.py b/testing/test_helpconfig.py index 41fa953ad..845005a05 100644 --- a/testing/test_helpconfig.py +++ b/testing/test_helpconfig.py @@ -2,10 +2,11 @@ from __future__ import absolute_import, division, print_function from _pytest.main import EXIT_NOTESTSCOLLECTED import pytest + def test_version(testdir, pytestconfig): result = testdir.runpytest("--version") assert result.ret == 0 - #p = py.path.local(py.__file__).dirpath() + # p = py.path.local(py.__file__).dirpath() result.stderr.fnmatch_lines([ '*pytest*%s*imported from*' % (pytest.__version__, ) ]) @@ -15,6 +16,7 @@ def test_version(testdir, pytestconfig): "*at*", ]) + def test_help(testdir): result = testdir.runpytest("--help") assert result.ret == 0 @@ -26,6 +28,7 @@ def test_help(testdir): *to see*fixtures*pytest --fixtures* """) + def test_hookvalidation_unknown(testdir): testdir.makeconftest(""" def pytest_hello(xyz): @@ -37,6 +40,7 @@ def test_hookvalidation_unknown(testdir): '*unknown hook*pytest_hello*' ]) + def test_hookvalidation_optional(testdir): testdir.makeconftest(""" import pytest @@ -47,6 +51,7 @@ def test_hookvalidation_optional(testdir): result = testdir.runpytest() assert result.ret == EXIT_NOTESTSCOLLECTED + def test_traceconfig(testdir): result = testdir.runpytest("--traceconfig") result.stdout.fnmatch_lines([ @@ -54,12 +59,14 @@ def test_traceconfig(testdir): "*active plugins*", ]) + def test_debug(testdir, monkeypatch): result = testdir.runpytest_subprocess("--debug") assert result.ret == EXIT_NOTESTSCOLLECTED p = testdir.tmpdir.join("pytestdebug.log") assert "pytest_sessionstart" in p.read() + def test_PYTEST_DEBUG(testdir, monkeypatch): monkeypatch.setenv("PYTEST_DEBUG", "1") result = testdir.runpytest_subprocess() diff --git a/testing/test_junitxml.py b/testing/test_junitxml.py index bc637b035..a8f5b9fec 100644 --- a/testing/test_junitxml.py +++ b/testing/test_junitxml.py @@ -328,23 +328,28 @@ class TestPython(object): fnode.assert_attr(message="internal error") assert "Division" in fnode.toxml() - def test_failure_function(self, testdir): + @pytest.mark.parametrize('junit_logging', ['no', 'system-out', 'system-err']) + def test_failure_function(self, testdir, junit_logging): testdir.makepyfile(""" + import logging import sys + def test_fail(): print ("hello-stdout") sys.stderr.write("hello-stderr\\n") + logging.info('info msg') + logging.warning('warning msg') raise ValueError(42) """) - result, dom = runandparse(testdir) + result, dom = runandparse(testdir, '-o', 'junit_logging=%s' % junit_logging) assert result.ret node = dom.find_first_by_tag("testsuite") node.assert_attr(failures=1, tests=1) tnode = node.find_first_by_tag("testcase") tnode.assert_attr( file="test_failure_function.py", - line="1", + line="3", classname="test_failure_function", name="test_fail") fnode = tnode.find_first_by_tag("failure") @@ -353,9 +358,21 @@ class TestPython(object): systemout = fnode.next_siebling assert systemout.tag == "system-out" assert "hello-stdout" in systemout.toxml() + assert "info msg" not in systemout.toxml() systemerr = systemout.next_siebling assert systemerr.tag == "system-err" assert "hello-stderr" in systemerr.toxml() + assert "info msg" not in systemerr.toxml() + + if junit_logging == 'system-out': + assert "warning msg" in systemout.toxml() + assert "warning msg" not in systemerr.toxml() + elif junit_logging == 'system-err': + assert "warning msg" not in systemout.toxml() + assert "warning msg" in systemerr.toxml() + elif junit_logging == 'no': + assert "warning msg" not in systemout.toxml() + assert "warning msg" not in systemerr.toxml() def test_failure_verbose_message(self, testdir): testdir.makepyfile(""" @@ -600,6 +617,7 @@ class TestPython(object): assert "hello-stdout call" in systemout.toxml() assert "hello-stdout teardown" in systemout.toxml() + def test_mangle_test_address(): from _pytest.junitxml import mangle_test_address address = '::'.join( @@ -760,11 +778,13 @@ def test_logxml_makedir(testdir): assert result.ret == 0 assert testdir.tmpdir.join("path/to/results.xml").check() + def test_logxml_check_isdir(testdir): """Give an error if --junit-xml is a directory (#2089)""" result = testdir.runpytest("--junit-xml=.") result.stderr.fnmatch_lines(["*--junitxml must be a filename*"]) + def test_escaped_parametrized_names_xml(testdir): testdir.makepyfile(""" import pytest @@ -843,29 +863,25 @@ def test_record_property(testdir): import pytest @pytest.fixture - def other(record_xml_property): - record_xml_property("bar", 1) - def test_record(record_xml_property, other): - record_xml_property("foo", "<1"); + def other(record_property): + record_property("bar", 1) + def test_record(record_property, other): + record_property("foo", "<1"); """) - result, dom = runandparse(testdir, '-rw') + result, dom = runandparse(testdir, '-rwv') node = dom.find_first_by_tag("testsuite") tnode = node.find_first_by_tag("testcase") psnode = tnode.find_first_by_tag('properties') pnodes = psnode.find_by_tag('property') pnodes[0].assert_attr(name="bar", value="1") pnodes[1].assert_attr(name="foo", value="<1") - result.stdout.fnmatch_lines([ - 'test_record_property.py::test_record', - '*record_xml_property*experimental*', - ]) def test_record_property_same_name(testdir): testdir.makepyfile(""" - def test_record_with_same_name(record_xml_property): - record_xml_property("foo", "bar") - record_xml_property("foo", "baz") + def test_record_with_same_name(record_property): + record_property("foo", "bar") + record_property("foo", "baz") """) result, dom = runandparse(testdir, '-rw') node = dom.find_first_by_tag("testsuite") @@ -876,6 +892,27 @@ def test_record_property_same_name(testdir): pnodes[1].assert_attr(name="foo", value="baz") +def test_record_attribute(testdir): + testdir.makepyfile(""" + import pytest + + @pytest.fixture + def other(record_xml_attribute): + record_xml_attribute("bar", 1) + def test_record(record_xml_attribute, other): + record_xml_attribute("foo", "<1"); + """) + result, dom = runandparse(testdir, '-rw') + node = dom.find_first_by_tag("testsuite") + tnode = node.find_first_by_tag("testcase") + tnode.assert_attr(bar="1") + tnode.assert_attr(foo="<1") + result.stdout.fnmatch_lines([ + 'test_record_attribute.py::test_record', + '*record_xml_attribute*experimental*', + ]) + + def test_random_report_log_xdist(testdir): """xdist calls pytest_runtest_logreport as they are executed by the slaves, with nodes from several nodes overlapping, so junitxml must cope with that @@ -1057,4 +1094,3 @@ def test_set_suite_name(testdir, suite_name): assert result.ret == 0 node = dom.find_first_by_tag("testsuite") node.assert_attr(name=expected) - diff --git a/testing/test_mark.py b/testing/test_mark.py index dff04f407..9ec1ce75a 100644 --- a/testing/test_mark.py +++ b/testing/test_mark.py @@ -3,12 +3,18 @@ import os import sys import pytest -from _pytest.mark import MarkGenerator as Mark, ParameterSet, transfer_markers +from _pytest.mark import ( + MarkGenerator as Mark, ParameterSet, transfer_markers, + EMPTY_PARAMETERSET_OPTION, +) + +ignore_markinfo = pytest.mark.filterwarnings('ignore:MarkInfo objects:_pytest.deprecated.RemovedInPytest4Warning') + class TestMark(object): def test_markinfo_repr(self): from _pytest.mark import MarkInfo, Mark - m = MarkInfo(Mark("hello", (1,2), {})) + m = MarkInfo.for_mark(Mark("hello", (1, 2), {})) repr(m) @pytest.mark.parametrize('attr', ['mark', 'param']) @@ -21,6 +27,19 @@ class TestMark(object): mark = Mark() pytest.raises((AttributeError, TypeError), mark) + def test_mark_with_param(self): + def some_function(abc): + pass + + class SomeClass(object): + pass + + assert pytest.mark.fun(some_function) is some_function + assert pytest.mark.fun.with_args(some_function) is not some_function + + assert pytest.mark.fun(SomeClass) is SomeClass + assert pytest.mark.fun.with_args(SomeClass) is not SomeClass + def test_pytest_mark_name_starts_with_underscore(self): mark = Mark() pytest.raises(AttributeError, getattr, mark, '_some_name') @@ -34,6 +53,7 @@ class TestMark(object): mark.hello(f) assert f.hello + @ignore_markinfo def test_pytest_mark_keywords(self): mark = Mark() @@ -45,6 +65,7 @@ class TestMark(object): assert f.world.kwargs['x'] == 3 assert f.world.kwargs['y'] == 4 + @ignore_markinfo def test_apply_multiple_and_merge(self): mark = Mark() @@ -61,6 +82,7 @@ class TestMark(object): assert f.world.kwargs['y'] == 1 assert len(f.world.args) == 0 + @ignore_markinfo def test_pytest_mark_positional(self): mark = Mark() @@ -71,6 +93,7 @@ class TestMark(object): assert f.world.args[0] == "hello" mark.world("world")(f) + @ignore_markinfo def test_pytest_mark_positional_func_and_keyword(self): mark = Mark() @@ -86,6 +109,7 @@ class TestMark(object): assert g.world.args[0] is f assert g.world.kwargs["omega"] == "hello" + @ignore_markinfo def test_pytest_mark_reuse(self): mark = Mark() @@ -140,19 +164,55 @@ def test_ini_markers(testdir): rec = testdir.inline_run() rec.assertoutcome(passed=1) + def test_markers_option(testdir): testdir.makeini(""" [pytest] markers = a1: this is a webtest marker a1some: another marker + nodescription """) result = testdir.runpytest("--markers", ) result.stdout.fnmatch_lines([ "*a1*this is a webtest*", "*a1some*another marker", + "*nodescription*", ]) + +def test_ini_markers_whitespace(testdir): + testdir.makeini(""" + [pytest] + markers = + a1 : this is a whitespace marker + """) + testdir.makepyfile(""" + import pytest + + @pytest.mark.a1 + def test_markers(): + assert True + """) + rec = testdir.inline_run("--strict", "-m", "a1") + rec.assertoutcome(passed=1) + + +def test_marker_without_description(testdir): + testdir.makefile(".cfg", setup=""" + [tool:pytest] + markers=slow + """) + testdir.makeconftest(""" + import pytest + pytest.mark.xfail('FAIL') + """) + ftdir = testdir.mkdir("ft1_dummy") + testdir.tmpdir.join("conftest.py").move(ftdir.join("conftest.py")) + rec = testdir.runpytest_subprocess("--strict") + rec.assert_outcomes() + + def test_markers_option_with_plugin_in_current_dir(testdir): testdir.makeconftest('pytest_plugins = "flip_flop"') testdir.makepyfile(flip_flop="""\ @@ -186,6 +246,7 @@ def test_mark_on_pseudo_function(testdir): reprec = testdir.inline_run() reprec.assertoutcome(passed=1) + def test_strict_prohibits_unregistered_markers(testdir): testdir.makepyfile(""" import pytest @@ -199,11 +260,12 @@ def test_strict_prohibits_unregistered_markers(testdir): "*unregisteredmark*not*registered*", ]) + @pytest.mark.parametrize("spec", [ - ("xyz", ("test_one",)), - ("xyz and xyz2", ()), - ("xyz2", ("test_two",)), - ("xyz or xyz2", ("test_one", "test_two"),) + ("xyz", ("test_one",)), + ("xyz and xyz2", ()), + ("xyz2", ("test_two",)), + ("xyz or xyz2", ("test_one", "test_two"),) ]) def test_mark_option(spec, testdir): testdir.makepyfile(""" @@ -222,9 +284,10 @@ def test_mark_option(spec, testdir): assert len(passed) == len(passed_result) assert list(passed) == list(passed_result) + @pytest.mark.parametrize("spec", [ - ("interface", ("test_interface",)), - ("not interface", ("test_nointer",)), + ("interface", ("test_interface",)), + ("not interface", ("test_nointer",)), ]) def test_mark_option_custom(spec, testdir): testdir.makeconftest(""" @@ -247,11 +310,12 @@ def test_mark_option_custom(spec, testdir): assert len(passed) == len(passed_result) assert list(passed) == list(passed_result) + @pytest.mark.parametrize("spec", [ - ("interface", ("test_interface",)), - ("not interface", ("test_nointer", "test_pass")), - ("pass", ("test_pass",)), - ("not pass", ("test_interface", "test_nointer")), + ("interface", ("test_interface",)), + ("not interface", ("test_nointer", "test_pass")), + ("pass", ("test_pass",)), + ("not pass", ("test_interface", "test_nointer")), ]) def test_keyword_option_custom(spec, testdir): testdir.makepyfile(""" @@ -271,9 +335,9 @@ def test_keyword_option_custom(spec, testdir): @pytest.mark.parametrize("spec", [ - ("None", ("test_func[None]",)), - ("1.3", ("test_func[1.3]",)), - ("2-3", ("test_func[2-3]",)) + ("None", ("test_func[None]",)), + ("1.3", ("test_func[1.3]",)), + ("2-3", ("test_func[2-3]",)) ]) def test_keyword_option_parametrize(spec, testdir): testdir.makepyfile(""" @@ -290,6 +354,21 @@ def test_keyword_option_parametrize(spec, testdir): assert list(passed) == list(passed_result) +@pytest.mark.parametrize("spec", [ + ("foo or import", "ERROR: Python keyword 'import' not accepted in expressions passed to '-k'"), + ("foo or", "ERROR: Wrong expression passed to '-k': foo or") +]) +def test_keyword_option_wrong_arguments(spec, testdir, capsys): + testdir.makepyfile(""" + def test_func(arg): + pass + """) + opt, expected_result = spec + testdir.inline_run("-k", opt) + out = capsys.readouterr().err + assert expected_result in out + + def test_parametrized_collected_from_command_line(testdir): """Parametrized test not collected if test named specified in command line issue#649. @@ -322,6 +401,24 @@ def test_parametrized_collect_with_wrong_args(testdir): ]) +def test_parametrized_with_kwargs(testdir): + """Test collect parametrized func with wrong number of args.""" + py_file = testdir.makepyfile(""" + import pytest + + @pytest.fixture(params=[1,2]) + def a(request): + return request.param + + @pytest.mark.parametrize(argnames='b', argvalues=[1, 2]) + def test_func(a, b): + pass + """) + + result = testdir.runpytest(py_file) + assert(result.ret == 0) + + class TestFunctional(object): def test_mark_per_function(self, testdir): @@ -394,6 +491,7 @@ class TestFunctional(object): assert 'hello' in keywords assert 'world' in keywords + @ignore_markinfo def test_merging_markers(self, testdir): p = testdir.makepyfile(""" import pytest @@ -413,13 +511,12 @@ class TestFunctional(object): assert marker.kwargs == {'x': 1, 'y': 2, 'z': 4} # test the new __iter__ interface - l = list(marker) - assert len(l) == 3 - assert l[0].args == ("pos0",) - assert l[1].args == () - assert l[2].args == ("pos1", ) + values = list(marker) + assert len(values) == 3 + assert values[0].args == ("pos0",) + assert values[1].args == () + assert values[2].args == ("pos1", ) - @pytest.mark.xfail(reason='unfixed') def test_merging_markers_deep(self, testdir): # issue 199 - propagate markers into nested classes p = testdir.makepyfile(""" @@ -436,7 +533,7 @@ class TestFunctional(object): items, rec = testdir.inline_genitems(p) for item in items: print(item, item.keywords) - assert 'a' in item.keywords + assert [x for x in item.iter_markers() if x.name == 'a'] def test_mark_decorator_subclass_does_not_propagate_to_base(self, testdir): p = testdir.makepyfile(""" @@ -455,7 +552,6 @@ class TestFunctional(object): items, rec = testdir.inline_genitems(p) self.assert_markers(items, test_foo=('a', 'b'), test_bar=('a',)) - @pytest.mark.issue568 @pytest.mark.xfail(reason="markers smear on methods of base classes") def test_mark_should_not_pass_to_siebling_class(self, testdir): @@ -480,7 +576,6 @@ class TestFunctional(object): assert not hasattr(base_item.obj, 'b') assert not hasattr(sub_item_other.obj, 'b') - def test_mark_decorator_baseclasses_merged(self, testdir): p = testdir.makepyfile(""" import pytest @@ -511,9 +606,9 @@ class TestFunctional(object): def test_func(): pass """) - l = reprec.getfailedcollections() - assert len(l) == 1 - assert "TypeError" in str(l[0].longrepr) + values = reprec.getfailedcollections() + assert len(values) == 1 + assert "TypeError" in str(values[0].longrepr) def test_mark_dynamically_in_funcarg(self, testdir): testdir.makeconftest(""" @@ -522,8 +617,8 @@ class TestFunctional(object): def arg(request): request.applymarker(pytest.mark.hello) def pytest_terminal_summary(terminalreporter): - l = terminalreporter.stats['passed'] - terminalreporter.writer.line("keyword: %s" % l[0].keywords) + values = terminalreporter.stats['passed'] + terminalreporter._tw.line("keyword: %s" % values[0].keywords) """) testdir.makepyfile(""" def test_func(arg): @@ -534,6 +629,7 @@ class TestFunctional(object): "keyword: *hello*" ]) + @ignore_markinfo def test_merging_markers_two_functions(self, testdir): p = testdir.makepyfile(""" import pytest @@ -546,10 +642,10 @@ class TestFunctional(object): item, = items keywords = item.keywords marker = keywords['hello'] - l = list(marker) - assert len(l) == 2 - assert l[0].args == ("pos0",) - assert l[1].args == ("pos1",) + values = list(marker) + assert len(values) == 2 + assert values[0].args == ("pos0",) + assert values[1].args == ("pos1",) def test_no_marker_match_on_unmarked_names(self, testdir): p = testdir.makepyfile(""" @@ -588,6 +684,7 @@ class TestFunctional(object): reprec = testdir.inline_run() reprec.assertoutcome(passed=1) + @ignore_markinfo def test_keyword_added_for_session(self, testdir): testdir.makeconftest(""" import pytest @@ -627,8 +724,8 @@ class TestFunctional(object): if isinstance(v, MarkInfo)]) assert marker_names == set(expected_markers) - @pytest.mark.xfail(reason='callspec2.setmulti misuses keywords') @pytest.mark.issue1540 + @pytest.mark.filterwarnings("ignore") def test_mark_from_parameters(self, testdir): testdir.makepyfile(""" import pytest @@ -769,6 +866,7 @@ class TestKeywordSelection(object): marks=[pytest.mark.xfail, pytest.mark.skip], id=None)), ]) +@pytest.mark.filterwarnings('ignore') def test_parameterset_extractfrom(argval, expected): extracted = ParameterSet.extract_from(argval) assert extracted == expected @@ -786,11 +884,46 @@ def test_legacy_transfer(): def fake_method(self): pass - transfer_markers(fake_method, FakeClass, FakeModule) # legacy marks transfer smeared assert fake_method.nofun assert fake_method.fun # pristine marks dont transfer - assert fake_method.pytestmark == [pytest.mark.fun.mark] \ No newline at end of file + assert fake_method.pytestmark == [pytest.mark.fun.mark] + + +class TestMarkDecorator(object): + + @pytest.mark.parametrize('lhs, rhs, expected', [ + (pytest.mark.foo(), pytest.mark.foo(), True), + (pytest.mark.foo(), pytest.mark.bar(), False), + (pytest.mark.foo(), 'bar', False), + ('foo', pytest.mark.bar(), False) + ]) + def test__eq__(self, lhs, rhs, expected): + assert (lhs == rhs) == expected + + +@pytest.mark.parametrize('mark', [None, '', 'skip', 'xfail']) +def test_parameterset_for_parametrize_marks(testdir, mark): + if mark is not None: + testdir.makeini( + "[pytest]\n{}={}".format(EMPTY_PARAMETERSET_OPTION, mark)) + + config = testdir.parseconfig() + from _pytest.mark import pytest_configure, get_empty_parameterset_mark + pytest_configure(config) + result_mark = get_empty_parameterset_mark(config, ['a'], all) + if mark in (None, ''): + # normalize to the requested name + mark = 'skip' + assert result_mark.name == mark + assert result_mark.kwargs['reason'].startswith("got empty parameter set ") + if mark == 'xfail': + assert result_mark.kwargs.get('run') is False + + +def test_parameterset_for_parametrize_bad_markname(testdir): + with pytest.raises(pytest.UsageError): + test_parameterset_for_parametrize_marks(testdir, 'bad') diff --git a/testing/test_monkeypatch.py b/testing/test_monkeypatch.py index 1efcf7f95..36ef083f7 100644 --- a/testing/test_monkeypatch.py +++ b/testing/test_monkeypatch.py @@ -319,10 +319,23 @@ def test_issue156_undo_staticmethod(Sample): monkeypatch.undo() assert Sample.hello() + def test_issue1338_name_resolving(): pytest.importorskip('requests') monkeypatch = MonkeyPatch() try: - monkeypatch.delattr('requests.sessions.Session.request') + monkeypatch.delattr('requests.sessions.Session.request') finally: monkeypatch.undo() + + +def test_context(): + monkeypatch = MonkeyPatch() + + import functools + import inspect + + with monkeypatch.context() as m: + m.setattr(functools, "partial", 3) + assert not inspect.isclass(functools.partial) + assert inspect.isclass(functools.partial) diff --git a/testing/test_nodes.py b/testing/test_nodes.py new file mode 100644 index 000000000..6f4540f99 --- /dev/null +++ b/testing/test_nodes.py @@ -0,0 +1,18 @@ +import pytest + +from _pytest import nodes + + +@pytest.mark.parametrize("baseid, nodeid, expected", ( + ('', '', True), + ('', 'foo', True), + ('', 'foo/bar', True), + ('', 'foo/bar::TestBaz::()', True), + ('foo', 'food', False), + ('foo/bar::TestBaz::()', 'foo/bar', False), + ('foo/bar::TestBaz::()', 'foo/bar::TestBop::()', False), + ('foo/bar', 'foo/bar::TestBop::()', True), +)) +def test_ischildnode(baseid, nodeid, expected): + result = nodes.ischildnode(baseid, nodeid) + assert result is expected diff --git a/testing/test_nose.py b/testing/test_nose.py index 798badc1c..df3e1a94b 100644 --- a/testing/test_nose.py +++ b/testing/test_nose.py @@ -1,23 +1,25 @@ from __future__ import absolute_import, division, print_function import pytest + def setup_module(mod): mod.nose = pytest.importorskip("nose") + def test_nose_setup(testdir): p = testdir.makepyfile(""" - l = [] + values = [] from nose.tools import with_setup - @with_setup(lambda: l.append(1), lambda: l.append(2)) + @with_setup(lambda: values.append(1), lambda: values.append(2)) def test_hello(): - assert l == [1] + assert values == [1] def test_world(): - assert l == [1,2] + assert values == [1,2] - test_hello.setup = lambda: l.append(1) - test_hello.teardown = lambda: l.append(2) + test_hello.setup = lambda: values.append(1) + test_hello.teardown = lambda: values.append(2) """) result = testdir.runpytest(p, '-p', 'nose') result.assert_outcomes(passed=2) @@ -25,15 +27,15 @@ def test_nose_setup(testdir): def test_setup_func_with_setup_decorator(): from _pytest.nose import call_optional - l = [] + values = [] class A(object): @pytest.fixture(autouse=True) def f(self): - l.append(1) + values.append(1) call_optional(A(), "f") - assert not l + assert not values def test_setup_func_not_callable(): @@ -44,28 +46,29 @@ def test_setup_func_not_callable(): call_optional(A(), "f") + def test_nose_setup_func(testdir): p = testdir.makepyfile(""" from nose.tools import with_setup - l = [] + values = [] def my_setup(): a = 1 - l.append(a) + values.append(a) def my_teardown(): b = 2 - l.append(b) + values.append(b) @with_setup(my_setup, my_teardown) def test_hello(): - print (l) - assert l == [1] + print (values) + assert values == [1] def test_world(): - print (l) - assert l == [1,2] + print (values) + assert values == [1,2] """) result = testdir.runpytest(p, '-p', 'nose') @@ -76,18 +79,18 @@ def test_nose_setup_func_failure(testdir): p = testdir.makepyfile(""" from nose.tools import with_setup - l = [] + values = [] my_setup = lambda x: 1 my_teardown = lambda x: 2 @with_setup(my_setup, my_teardown) def test_hello(): - print (l) - assert l == [1] + print (values) + assert values == [1] def test_world(): - print (l) - assert l == [1,2] + print (values) + assert values == [1,2] """) result = testdir.runpytest(p, '-p', 'nose') @@ -98,13 +101,13 @@ def test_nose_setup_func_failure(testdir): def test_nose_setup_func_failure_2(testdir): testdir.makepyfile(""" - l = [] + values = [] my_setup = 1 my_teardown = 2 def test_hello(): - assert l == [] + assert values == [] test_hello.setup = my_setup test_hello.teardown = my_teardown @@ -112,31 +115,32 @@ def test_nose_setup_func_failure_2(testdir): reprec = testdir.inline_run() reprec.assertoutcome(passed=1) + def test_nose_setup_partial(testdir): pytest.importorskip("functools") p = testdir.makepyfile(""" from functools import partial - l = [] + values = [] def my_setup(x): a = x - l.append(a) + values.append(a) def my_teardown(x): b = x - l.append(b) + values.append(b) my_setup_partial = partial(my_setup, 1) my_teardown_partial = partial(my_teardown, 2) def test_hello(): - print (l) - assert l == [1] + print (values) + assert values == [1] def test_world(): - print (l) - assert l == [1,2] + print (values) + assert values == [1,2] test_hello.setup = my_setup_partial test_hello.teardown = my_teardown_partial @@ -247,25 +251,26 @@ def test_module_level_setup(testdir): def test_nose_style_setup_teardown(testdir): testdir.makepyfile(""" - l = [] + values = [] def setup_module(): - l.append(1) + values.append(1) def teardown_module(): - del l[0] + del values[0] def test_hello(): - assert l == [1] + assert values == [1] def test_world(): - assert l == [1] + assert values == [1] """) result = testdir.runpytest('-p', 'nose') result.stdout.fnmatch_lines([ "*2 passed*", ]) + def test_nose_setup_ordering(testdir): testdir.makepyfile(""" def setup_module(mod): @@ -305,6 +310,7 @@ def test_apiwrapper_problem_issue260(testdir): result = testdir.runpytest() result.assert_outcomes(passed=1) + def test_setup_teardown_linking_issue265(testdir): # we accidentally didnt integrate nose setupstate with normal setupstate # this test ensures that won't happen again @@ -352,6 +358,7 @@ def test_SkipTest_in_test(testdir): reprec = testdir.inline_run() reprec.assertoutcome(skipped=1) + def test_istest_function_decorator(testdir): p = testdir.makepyfile(""" import nose.tools @@ -362,6 +369,7 @@ def test_istest_function_decorator(testdir): result = testdir.runpytest(p) result.assert_outcomes(passed=1) + def test_nottest_function_decorator(testdir): testdir.makepyfile(""" import nose.tools @@ -374,6 +382,7 @@ def test_nottest_function_decorator(testdir): calls = reprec.getreports("pytest_runtest_logreport") assert not calls + def test_istest_class_decorator(testdir): p = testdir.makepyfile(""" import nose.tools @@ -385,6 +394,7 @@ def test_istest_class_decorator(testdir): result = testdir.runpytest(p) result.assert_outcomes(passed=1) + def test_nottest_class_decorator(testdir): testdir.makepyfile(""" import nose.tools diff --git a/testing/test_parseopt.py b/testing/test_parseopt.py index 38542783a..55983bbb1 100644 --- a/testing/test_parseopt.py +++ b/testing/test_parseopt.py @@ -1,13 +1,17 @@ from __future__ import absolute_import, division, print_function +import argparse import sys import os -import py, pytest +import py +import pytest from _pytest import config as parseopt + @pytest.fixture def parser(): return parseopt.Parser() + class TestParser(object): def test_no_help_by_default(self, capsys): parser = parseopt.Parser(usage="xyz") @@ -161,12 +165,12 @@ class TestParser(object): assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2'] args = parser.parse(['-R', '-S', '4', '2', '-R']) assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2'] - assert args.R == True - assert args.S == False + assert args.R is True + assert args.S is False args = parser.parse(['-R', '4', '-S', '2']) assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2'] - assert args.R == True - assert args.S == False + assert args.R is True + assert args.S is False def test_parse_defaultgetter(self): def defaultget(option): @@ -186,9 +190,9 @@ class TestParser(object): assert option.no is False def test_drop_short_helper(self): - parser = py.std.argparse.ArgumentParser(formatter_class=parseopt.DropShorterLongHelpFormatter) + parser = argparse.ArgumentParser(formatter_class=parseopt.DropShorterLongHelpFormatter) parser.add_argument('-t', '--twoword', '--duo', '--two-word', '--two', - help='foo').map_long_option = {'two': 'two-word'} + help='foo').map_long_option = {'two': 'two-word'} # throws error on --deux only! parser.add_argument('-d', '--deuxmots', '--deux-mots', action='store_true', help='foo').map_long_option = {'deux': 'deux-mots'} @@ -238,18 +242,18 @@ class TestParser(object): assert args.file_or_dir == ['abcd'] def test_drop_short_help0(self, parser, capsys): - parser.addoption('--func-args', '--doit', help = 'foo', + parser.addoption('--func-args', '--doit', help='foo', action='store_true') parser.parse([]) help = parser.optparser.format_help() - assert '--func-args, --doit foo' in help + assert '--func-args, --doit foo' in help # testing would be more helpful with all help generated def test_drop_short_help1(self, parser, capsys): group = parser.getgroup("general") group.addoption('--doit', '--func-args', action='store_true', help='foo') group._addoption("-h", "--help", action="store_true", dest="help", - help="show help message and configuration info") + help="show help message and configuration info") parser.parse(['-h']) help = parser.optparser.format_help() assert '-doit, --func-args foo' in help @@ -273,7 +277,7 @@ def test_argcomplete(testdir, monkeypatch): script = str(testdir.tmpdir.join("test_argcomplete")) pytest_bin = sys.argv[0] if "pytest" not in os.path.basename(pytest_bin): - pytest.skip("need to be run with pytest executable, not %s" %(pytest_bin,)) + pytest.skip("need to be run with pytest executable, not %s" % (pytest_bin,)) with open(str(script), 'w') as fp: # redirect output from argcomplete to stdin and stderr is not trivial @@ -284,7 +288,7 @@ def test_argcomplete(testdir, monkeypatch): # to handle a keyword argument env that replaces os.environ in popen or # extends the copy, advantage: could not forget to restore monkeypatch.setenv('_ARGCOMPLETE', "1") - monkeypatch.setenv('_ARGCOMPLETE_IFS',"\x0b") + monkeypatch.setenv('_ARGCOMPLETE_IFS', "\x0b") monkeypatch.setenv('COMP_WORDBREAKS', ' \\t\\n"\\\'><=;|&(:') arg = '--fu' @@ -297,13 +301,7 @@ def test_argcomplete(testdir, monkeypatch): elif not result.stdout.str(): pytest.skip("bash provided no output, argcomplete not available?") else: - if py.std.sys.version_info < (2,7): - result.stdout.lines = result.stdout.lines[0].split('\x0b') - result.stdout.fnmatch_lines(["--funcargs", "--fulltrace"]) - else: - result.stdout.fnmatch_lines(["--funcargs", "--fulltrace"]) - if py.std.sys.version_info < (2,7): - return + result.stdout.fnmatch_lines(["--funcargs", "--fulltrace"]) os.mkdir('test_argcomplete.d') arg = 'test_argc' monkeypatch.setenv('COMP_LINE', "pytest " + arg) diff --git a/testing/test_pastebin.py b/testing/test_pastebin.py index 3fe66e972..6b1742d14 100644 --- a/testing/test_pastebin.py +++ b/testing/test_pastebin.py @@ -3,6 +3,7 @@ from __future__ import absolute_import, division, print_function import sys import pytest + class TestPasteCapture(object): @pytest.fixture @@ -26,7 +27,7 @@ class TestPasteCapture(object): assert len(pastebinlist) == 1 s = pastebinlist[0] assert s.find("def test_fail") != -1 - assert reprec.countoutcomes() == [1,1,1] + assert reprec.countoutcomes() == [1, 1, 1] def test_all(self, testdir, pastebinlist): from _pytest.pytester import LineMatcher @@ -40,7 +41,7 @@ class TestPasteCapture(object): pytest.skip("") """) reprec = testdir.inline_run(testpath, "--pastebin=all", '-v') - assert reprec.countoutcomes() == [1,1,1] + assert reprec.countoutcomes() == [1, 1, 1] assert len(pastebinlist) == 1 contents = pastebinlist[0].decode('utf-8') matcher = LineMatcher(contents.splitlines()) @@ -114,5 +115,3 @@ class TestPaste(object): assert 'lexer=%s' % lexer in data.decode() assert 'code=full-paste-contents' in data.decode() assert 'expiry=1week' in data.decode() - - diff --git a/testing/test_pdb.py b/testing/test_pdb.py index ec5862082..85817f79b 100644 --- a/testing/test_pdb.py +++ b/testing/test_pdb.py @@ -1,16 +1,21 @@ from __future__ import absolute_import, division, print_function import sys import platform +import os import _pytest._code +from _pytest.debugging import SUPPORTS_BREAKPOINT_BUILTIN import pytest +_ENVIRON_PYTHONBREAKPOINT = os.environ.get('PYTHONBREAKPOINT', '') + + def runpdb_and_get_report(testdir, source): p = testdir.makepyfile(source) result = testdir.runpytest_inprocess("--pdb", p) reports = result.reprec.getreports("pytest_runtest_logreport") - assert len(reports) == 3, reports # setup/call/teardown + assert len(reports) == 3, reports # setup/call/teardown return reports[1] @@ -33,6 +38,29 @@ def custom_pdb_calls(): return called +@pytest.fixture +def custom_debugger_hook(): + called = [] + + # install dummy debugger class and track which methods were called on it + class _CustomDebugger(object): + def __init__(self, *args, **kwargs): + called.append("init") + + def reset(self): + called.append("reset") + + def interaction(self, *args): + called.append("interaction") + + def set_trace(self, frame): + print("**CustomDebugger**") + called.append("set_trace") + + _pytest._CustomDebugger = _CustomDebugger + yield called + del _pytest._CustomDebugger + class TestPDB(object): @@ -142,19 +170,86 @@ class TestPDB(object): child.sendeof() self.flush(child) - def test_pdb_interaction_capture(self, testdir): + def test_pdb_print_captured_stdout(self, testdir): p1 = testdir.makepyfile(""" def test_1(): - print("getrekt") + print("get\\x20rekt") assert False """) child = testdir.spawn_pytest("--pdb %s" % p1) - child.expect("getrekt") + child.expect("captured stdout") + child.expect("get rekt") + child.expect("(Pdb)") + child.sendeof() + rest = child.read().decode("utf8") + assert "1 failed" in rest + assert "get rekt" not in rest + self.flush(child) + + def test_pdb_print_captured_stderr(self, testdir): + p1 = testdir.makepyfile(""" + def test_1(): + import sys + sys.stderr.write("get\\x20rekt") + assert False + """) + child = testdir.spawn_pytest("--pdb %s" % p1) + child.expect("captured stderr") + child.expect("get rekt") + child.expect("(Pdb)") + child.sendeof() + rest = child.read().decode("utf8") + assert "1 failed" in rest + assert "get rekt" not in rest + self.flush(child) + + def test_pdb_dont_print_empty_captured_stdout_and_stderr(self, testdir): + p1 = testdir.makepyfile(""" + def test_1(): + assert False + """) + child = testdir.spawn_pytest("--pdb %s" % p1) + child.expect("(Pdb)") + output = child.before.decode("utf8") + child.sendeof() + assert "captured stdout" not in output + assert "captured stderr" not in output + self.flush(child) + + @pytest.mark.parametrize('showcapture', ['all', 'no', 'log']) + def test_pdb_print_captured_logs(self, testdir, showcapture): + p1 = testdir.makepyfile(""" + def test_1(): + import logging + logging.warn("get " + "rekt") + assert False + """) + child = testdir.spawn_pytest("--show-capture=%s --pdb %s" % (showcapture, p1)) + if showcapture in ('all', 'log'): + child.expect("captured log") + child.expect("get rekt") + child.expect("(Pdb)") + child.sendeof() + rest = child.read().decode("utf8") + assert "1 failed" in rest + self.flush(child) + + def test_pdb_print_captured_logs_nologging(self, testdir): + p1 = testdir.makepyfile(""" + def test_1(): + import logging + logging.warn("get " + "rekt") + assert False + """) + child = testdir.spawn_pytest("--show-capture=all --pdb " + "-p no:logging %s" % p1) + child.expect("get rekt") + output = child.before.decode("utf8") + assert "captured log" not in output child.expect("(Pdb)") child.sendeof() rest = child.read().decode("utf8") assert "1 failed" in rest - assert "getrekt" not in rest self.flush(child) def test_pdb_interaction_exception(self, testdir): @@ -181,7 +276,7 @@ class TestPDB(object): xxx """) child = testdir.spawn_pytest("--pdb %s" % p1) - #child.expect(".*import pytest.*") + # child.expect(".*import pytest.*") child.expect("(Pdb)") child.sendeof() child.expect("1 error") @@ -194,7 +289,7 @@ class TestPDB(object): """) p1 = testdir.makepyfile("def test_func(): pass") child = testdir.spawn_pytest("--pdb %s" % p1) - #child.expect(".*import pytest.*") + # child.expect(".*import pytest.*") child.expect("(Pdb)") child.sendeof() self.flush(child) @@ -216,7 +311,7 @@ class TestPDB(object): rest = child.read().decode("utf-8") assert "1 failed" in rest assert "def test_1" in rest - assert "hello17" in rest # out is captured + assert "hello17" in rest # out is captured self.flush(child) def test_pdb_set_trace_interception(self, testdir): @@ -309,8 +404,8 @@ class TestPDB(object): rest = child.read().decode("utf8") assert "1 failed" in rest assert "def test_1" in rest - assert "hello17" in rest # out is captured - assert "hello18" in rest # out is captured + assert "hello17" in rest # out is captured + assert "hello18" in rest # out is captured self.flush(child) def test_pdb_used_outside_test(self, testdir): @@ -319,7 +414,7 @@ class TestPDB(object): pytest.set_trace() x = 5 """) - child = testdir.spawn("%s %s" %(sys.executable, p1)) + child = testdir.spawn("%s %s" % (sys.executable, p1)) child.expect("x = 5") child.sendeof() self.flush(child) @@ -339,7 +434,7 @@ class TestPDB(object): self.flush(child) def test_pdb_collection_failure_is_shown(self, testdir): - p1 = testdir.makepyfile("""xxx """) + p1 = testdir.makepyfile("xxx") result = testdir.runpytest_subprocess("--pdb", p1) result.stdout.fnmatch_lines([ "*NameError*xxx*", @@ -377,7 +472,6 @@ class TestPDB(object): ]) assert custom_pdb_calls == ["init", "reset", "interaction"] - def test_pdb_custom_cls_without_pdb(self, testdir, custom_pdb_calls): p1 = testdir.makepyfile("""xxx """) result = testdir.runpytest_inprocess( @@ -404,5 +498,123 @@ class TestPDB(object): child = testdir.spawn_pytest("--pdbcls=custom_pdb:CustomPdb %s" % str(p1)) child.expect('custom set_trace>') - if child.isalive(): - child.wait() + self.flush(child) + + +class TestDebuggingBreakpoints(object): + + def test_supports_breakpoint_module_global(self): + """ + Test that supports breakpoint global marks on Python 3.7+ and not on + CPython 3.5, 2.7 + """ + if sys.version_info.major == 3 and sys.version_info.minor >= 7: + assert SUPPORTS_BREAKPOINT_BUILTIN is True + if sys.version_info.major == 3 and sys.version_info.minor == 5: + assert SUPPORTS_BREAKPOINT_BUILTIN is False + if sys.version_info.major == 2 and sys.version_info.minor == 7: + assert SUPPORTS_BREAKPOINT_BUILTIN is False + + @pytest.mark.skipif(not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin") + @pytest.mark.parametrize('arg', ['--pdb', '']) + def test_sys_breakpointhook_configure_and_unconfigure(self, testdir, arg): + """ + Test that sys.breakpointhook is set to the custom Pdb class once configured, test that + hook is reset to system value once pytest has been unconfigured + """ + testdir.makeconftest(""" + import sys + from pytest import hookimpl + from _pytest.debugging import pytestPDB + + def pytest_configure(config): + config._cleanup.append(check_restored) + + def check_restored(): + assert sys.breakpointhook == sys.__breakpointhook__ + + def test_check(): + assert sys.breakpointhook == pytestPDB.set_trace + """) + testdir.makepyfile(""" + def test_nothing(): pass + """) + args = (arg,) if arg else () + result = testdir.runpytest_subprocess(*args) + result.stdout.fnmatch_lines([ + '*1 passed in *', + ]) + + @pytest.mark.skipif(not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin") + def test_pdb_custom_cls(self, testdir, custom_debugger_hook): + p1 = testdir.makepyfile(""" + def test_nothing(): + breakpoint() + """) + result = testdir.runpytest_inprocess( + "--pdb", "--pdbcls=_pytest:_CustomDebugger", p1) + result.stdout.fnmatch_lines([ + "*CustomDebugger*", + "*1 passed*", + ]) + assert custom_debugger_hook == ["init", "set_trace"] + + @pytest.mark.parametrize('arg', ['--pdb', '']) + @pytest.mark.skipif(not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin") + def test_environ_custom_class(self, testdir, custom_debugger_hook, arg): + testdir.makeconftest(""" + import os + import sys + + os.environ['PYTHONBREAKPOINT'] = '_pytest._CustomDebugger.set_trace' + + def pytest_configure(config): + config._cleanup.append(check_restored) + + def check_restored(): + assert sys.breakpointhook == sys.__breakpointhook__ + + def test_check(): + import _pytest + assert sys.breakpointhook is _pytest._CustomDebugger.set_trace + """) + testdir.makepyfile(""" + def test_nothing(): pass + """) + args = (arg,) if arg else () + result = testdir.runpytest_subprocess(*args) + result.stdout.fnmatch_lines([ + '*1 passed in *', + ]) + + @pytest.mark.skipif(not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin") + @pytest.mark.skipif(not _ENVIRON_PYTHONBREAKPOINT == '', reason="Requires breakpoint() default value") + def test_sys_breakpoint_interception(self, testdir): + p1 = testdir.makepyfile(""" + def test_1(): + breakpoint() + """) + child = testdir.spawn_pytest(str(p1)) + child.expect("test_1") + child.expect("(Pdb)") + child.sendeof() + rest = child.read().decode("utf8") + assert "1 failed" in rest + assert "reading from stdin while output" not in rest + TestPDB.flush(child) + + @pytest.mark.skipif(not SUPPORTS_BREAKPOINT_BUILTIN, reason="Requires breakpoint() builtin") + def test_pdb_not_altered(self, testdir): + p1 = testdir.makepyfile(""" + import pdb + def test_1(): + pdb.set_trace() + """) + child = testdir.spawn_pytest(str(p1)) + child.expect("test_1") + child.expect("(Pdb)") + child.sendeof() + rest = child.read().decode("utf8") + assert "1 failed" in rest + assert "reading from stdin while output" not in rest + TestPDB.flush(child) diff --git a/testing/test_pluginmanager.py b/testing/test_pluginmanager.py index 1f0f4f602..503ba8454 100644 --- a/testing/test_pluginmanager.py +++ b/testing/test_pluginmanager.py @@ -1,8 +1,10 @@ # encoding: UTF-8 from __future__ import absolute_import, division, print_function import pytest -import py import os +import re +import sys +import types from _pytest.config import get_config, PytestPluginManager from _pytest.main import EXIT_NOTESTSCOLLECTED, Session @@ -12,6 +14,7 @@ from _pytest.main import EXIT_NOTESTSCOLLECTED, Session def pytestpm(): return PytestPluginManager() + class TestPytestPluginInteractions(object): def test_addhooks_conftestplugin(self, testdir): testdir.makepyfile(newhooks=""" @@ -29,9 +32,9 @@ class TestPytestPluginInteractions(object): config = get_config() pm = config.pluginmanager pm.hook.pytest_addhooks.call_historic( - kwargs=dict(pluginmanager=config.pluginmanager)) + kwargs=dict(pluginmanager=config.pluginmanager)) config.pluginmanager._importconftest(conf) - #print(config.pluginmanager.get_plugins()) + # print(config.pluginmanager.get_plugins()) res = config.hook.pytest_myhook(xyz=10) assert res == [11] @@ -84,23 +87,23 @@ class TestPytestPluginInteractions(object): def test_configure(self, testdir): config = testdir.parseconfig() - l = [] + values = [] class A(object): def pytest_configure(self, config): - l.append(self) + values.append(self) config.pluginmanager.register(A()) - assert len(l) == 0 + assert len(values) == 0 config._do_configure() - assert len(l) == 1 + assert len(values) == 1 config.pluginmanager.register(A()) # leads to a configured() plugin - assert len(l) == 2 - assert l[0] != l[1] + assert len(values) == 2 + assert values[0] != values[1] config._ensure_unconfigure() config.pluginmanager.register(A()) - assert len(l) == 2 + assert len(values) == 2 def test_hook_tracing(self): pytestpm = get_config().pluginmanager # fully initialized with plugins @@ -115,19 +118,19 @@ class TestPytestPluginInteractions(object): saveindent.append(pytestpm.trace.root.indent) raise ValueError() - l = [] - pytestpm.trace.root.setwriter(l.append) + values = [] + pytestpm.trace.root.setwriter(values.append) undo = pytestpm.enable_tracing() try: indent = pytestpm.trace.root.indent p = api1() pytestpm.register(p) assert pytestpm.trace.root.indent == indent - assert len(l) >= 2 - assert 'pytest_plugin_registered' in l[0] - assert 'finish' in l[1] + assert len(values) >= 2 + assert 'pytest_plugin_registered' in values[0] + assert 'finish' in values[1] - l[:] = [] + values[:] = [] with pytest.raises(ValueError): pytestpm.register(api2()) assert pytestpm.trace.root.indent == indent @@ -154,23 +157,6 @@ class TestPytestPluginInteractions(object): ihook_b = session.gethookproxy(testdir.tmpdir.join('tests')) assert ihook_a is not ihook_b - def test_warn_on_deprecated_multicall(self, pytestpm): - warnings = [] - - class get_warnings(object): - def pytest_logwarning(self, message): - warnings.append(message) - - class Plugin(object): - def pytest_configure(self, __multicall__): - pass - - pytestpm.register(get_warnings()) - before = list(warnings) - pytestpm.register(Plugin()) - assert len(warnings) == len(before) + 1 - assert "deprecated" in warnings[-1] - def test_warn_on_deprecated_addhooks(self, pytestpm): warnings = [] @@ -197,6 +183,7 @@ def test_namespace_has_default_and_env_plugins(testdir): result = testdir.runpython(p) assert result.ret == 0 + def test_default_markers(testdir): result = testdir.runpytest("--markers") result.stdout.fnmatch_lines([ @@ -214,30 +201,35 @@ def test_importplugin_error_message(testdir, pytestpm): testdir.syspathinsert(testdir.tmpdir) testdir.makepyfile(qwe=""" # encoding: UTF-8 - raise ImportError(u'Not possible to import: ☺') + def test_traceback(): + raise ImportError(u'Not possible to import: ☺') + test_traceback() """) with pytest.raises(ImportError) as excinfo: pytestpm.import_plugin("qwe") - expected = '.*Error importing plugin "qwe": Not possible to import: .' - assert py.std.re.match(expected, str(excinfo.value)) + + expected_message = '.*Error importing plugin "qwe": Not possible to import: .' + expected_traceback = ".*in test_traceback" + assert re.match(expected_message, str(excinfo.value)) + assert re.match(expected_traceback, str(excinfo.traceback[-1])) class TestPytestPluginManager(object): def test_register_imported_modules(self): pm = PytestPluginManager() - mod = py.std.types.ModuleType("x.y.pytest_hello") + mod = types.ModuleType("x.y.pytest_hello") pm.register(mod) assert pm.is_registered(mod) - l = pm.get_plugins() - assert mod in l + values = pm.get_plugins() + assert mod in values pytest.raises(ValueError, "pm.register(mod)") pytest.raises(ValueError, lambda: pm.register(mod)) - #assert not pm.is_registered(mod2) - assert pm.get_plugins() == l + # assert not pm.is_registered(mod2) + assert pm.get_plugins() == values def test_canonical_import(self, monkeypatch): - mod = py.std.types.ModuleType("pytest_xyz") - monkeypatch.setitem(py.std.sys.modules, 'pytest_xyz', mod) + mod = types.ModuleType("pytest_xyz") + monkeypatch.setitem(sys.modules, 'pytest_xyz', mod) pm = PytestPluginManager() pm.import_plugin('pytest_xyz') assert pm.get_plugin('pytest_xyz') == mod @@ -247,7 +239,7 @@ class TestPytestPluginManager(object): testdir.syspathinsert() testdir.makepyfile(pytest_p1="#") testdir.makepyfile(pytest_p2="#") - mod = py.std.types.ModuleType("temp") + mod = types.ModuleType("temp") mod.pytest_plugins = ["pytest_p1", "pytest_p2"] pytestpm.consider_module(mod) assert pytestpm.get_plugin("pytest_p1").__name__ == "pytest_p1" @@ -255,20 +247,20 @@ class TestPytestPluginManager(object): def test_consider_module_import_module(self, testdir): pytestpm = get_config().pluginmanager - mod = py.std.types.ModuleType("x") + mod = types.ModuleType("x") mod.pytest_plugins = "pytest_a" aplugin = testdir.makepyfile(pytest_a="#") reprec = testdir.make_hook_recorder(pytestpm) - #syspath.prepend(aplugin.dirpath()) - py.std.sys.path.insert(0, str(aplugin.dirpath())) + # syspath.prepend(aplugin.dirpath()) + sys.path.insert(0, str(aplugin.dirpath())) pytestpm.consider_module(mod) call = reprec.getcall(pytestpm.hook.pytest_plugin_registered.name) assert call.plugin.__name__ == "pytest_a" # check that it is not registered twice pytestpm.consider_module(mod) - l = reprec.getcalls("pytest_plugin_registered") - assert len(l) == 1 + values = reprec.getcalls("pytest_plugin_registered") + assert len(values) == 1 def test_consider_env_fails_to_import(self, monkeypatch, pytestpm): monkeypatch.setenv('PYTEST_PLUGINS', 'nonexisting', prepend=",") @@ -352,7 +344,7 @@ class TestPytestPluginManager(object): class TestPytestPluginManagerBootstrapming(object): def test_preparse_args(self, pytestpm): pytest.raises(ImportError, lambda: - pytestpm.consider_preparse(["xyz", "-p", "hello123"])) + pytestpm.consider_preparse(["xyz", "-p", "hello123"])) def test_plugin_prevent_register(self, pytestpm): pytestpm.consider_preparse(["xyz", "-p", "no:abc"]) diff --git a/testing/test_pytester.py b/testing/test_pytester.py index 932427ad3..87063371a 100644 --- a/testing/test_pytester.py +++ b/testing/test_pytester.py @@ -1,7 +1,12 @@ +# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function -import pytest import os +import py.path +import pytest +import sys +import _pytest.pytester as pytester from _pytest.pytester import HookRecorder +from _pytest.pytester import CwdSnapshot, SysModulesSnapshot, SysPathsSnapshot from _pytest.config import PytestPluginManager from _pytest.main import EXIT_OK, EXIT_TESTSFAILED @@ -64,6 +69,7 @@ def test_parseconfig(testdir): assert config2 != config1 assert config1 != pytest.config + def test_testdir_runs_with_plugin(testdir): testdir.makepyfile(""" pytest_plugins = "pytester" @@ -78,6 +84,7 @@ def make_holder(): class apiclass(object): def pytest_xyz(self, arg): "x" + def pytest_xyz_noarg(self): "x" @@ -117,18 +124,255 @@ def test_makepyfile_unicode(testdir): unichr = chr testdir.makepyfile(unichr(0xfffd)) -def test_inline_run_clean_modules(testdir): - test_mod = testdir.makepyfile("def test_foo(): assert True") - result = testdir.inline_run(str(test_mod)) - assert result.ret == EXIT_OK - # rewrite module, now test should fail if module was re-imported - test_mod.write("def test_foo(): assert False") - result2 = testdir.inline_run(str(test_mod)) - assert result2.ret == EXIT_TESTSFAILED -def test_assert_outcomes_after_pytest_erro(testdir): +def test_makepyfile_utf8(testdir): + """Ensure makepyfile accepts utf-8 bytes as input (#2738)""" + utf8_contents = u""" + def setup_function(function): + mixed_encoding = u'São Paulo' + """.encode('utf-8') + p = testdir.makepyfile(utf8_contents) + assert u"mixed_encoding = u'São Paulo'".encode('utf-8') in p.read('rb') + + +class TestInlineRunModulesCleanup(object): + def test_inline_run_test_module_not_cleaned_up(self, testdir): + test_mod = testdir.makepyfile("def test_foo(): assert True") + result = testdir.inline_run(str(test_mod)) + assert result.ret == EXIT_OK + # rewrite module, now test should fail if module was re-imported + test_mod.write("def test_foo(): assert False") + result2 = testdir.inline_run(str(test_mod)) + assert result2.ret == EXIT_TESTSFAILED + + def spy_factory(self): + class SysModulesSnapshotSpy(object): + instances = [] + + def __init__(self, preserve=None): + SysModulesSnapshotSpy.instances.append(self) + self._spy_restore_count = 0 + self._spy_preserve = preserve + self.__snapshot = SysModulesSnapshot(preserve=preserve) + + def restore(self): + self._spy_restore_count += 1 + return self.__snapshot.restore() + return SysModulesSnapshotSpy + + def test_inline_run_taking_and_restoring_a_sys_modules_snapshot( + self, testdir, monkeypatch): + spy_factory = self.spy_factory() + monkeypatch.setattr(pytester, "SysModulesSnapshot", spy_factory) + original = dict(sys.modules) + testdir.syspathinsert() + testdir.makepyfile(import1="# you son of a silly person") + testdir.makepyfile(import2="# my hovercraft is full of eels") + test_mod = testdir.makepyfile(""" + import import1 + def test_foo(): import import2""") + testdir.inline_run(str(test_mod)) + assert len(spy_factory.instances) == 1 + spy = spy_factory.instances[0] + assert spy._spy_restore_count == 1 + assert sys.modules == original + assert all(sys.modules[x] is original[x] for x in sys.modules) + + def test_inline_run_sys_modules_snapshot_restore_preserving_modules( + self, testdir, monkeypatch): + spy_factory = self.spy_factory() + monkeypatch.setattr(pytester, "SysModulesSnapshot", spy_factory) + test_mod = testdir.makepyfile("def test_foo(): pass") + testdir.inline_run(str(test_mod)) + spy = spy_factory.instances[0] + assert not spy._spy_preserve("black_knight") + assert spy._spy_preserve("zope") + assert spy._spy_preserve("zope.interface") + assert spy._spy_preserve("zopelicious") + + def test_external_test_module_imports_not_cleaned_up(self, testdir): + testdir.syspathinsert() + testdir.makepyfile(imported="data = 'you son of a silly person'") + import imported + test_mod = testdir.makepyfile(""" + def test_foo(): + import imported + imported.data = 42""") + testdir.inline_run(str(test_mod)) + assert imported.data == 42 + + +def test_inline_run_clean_sys_paths(testdir): + def test_sys_path_change_cleanup(self, testdir): + test_path1 = testdir.tmpdir.join("boink1").strpath + test_path2 = testdir.tmpdir.join("boink2").strpath + test_path3 = testdir.tmpdir.join("boink3").strpath + sys.path.append(test_path1) + sys.meta_path.append(test_path1) + original_path = list(sys.path) + original_meta_path = list(sys.meta_path) + test_mod = testdir.makepyfile(""" + import sys + sys.path.append({:test_path2}) + sys.meta_path.append({:test_path2}) + def test_foo(): + sys.path.append({:test_path3}) + sys.meta_path.append({:test_path3})""".format(locals())) + testdir.inline_run(str(test_mod)) + assert sys.path == original_path + assert sys.meta_path == original_meta_path + + def spy_factory(self): + class SysPathsSnapshotSpy(object): + instances = [] + + def __init__(self): + SysPathsSnapshotSpy.instances.append(self) + self._spy_restore_count = 0 + self.__snapshot = SysPathsSnapshot() + + def restore(self): + self._spy_restore_count += 1 + return self.__snapshot.restore() + return SysPathsSnapshotSpy + + def test_inline_run_taking_and_restoring_a_sys_paths_snapshot( + self, testdir, monkeypatch): + spy_factory = self.spy_factory() + monkeypatch.setattr(pytester, "SysPathsSnapshot", spy_factory) + test_mod = testdir.makepyfile("def test_foo(): pass") + testdir.inline_run(str(test_mod)) + assert len(spy_factory.instances) == 1 + spy = spy_factory.instances[0] + assert spy._spy_restore_count == 1 + + +def test_assert_outcomes_after_pytest_error(testdir): testdir.makepyfile("def test_foo(): assert True") result = testdir.runpytest('--unexpected-argument') with pytest.raises(ValueError, message="Pytest terminal report not found"): result.assert_outcomes(passed=0) + + +def test_cwd_snapshot(tmpdir): + foo = tmpdir.ensure('foo', dir=1) + bar = tmpdir.ensure('bar', dir=1) + foo.chdir() + snapshot = CwdSnapshot() + bar.chdir() + assert py.path.local() == bar + snapshot.restore() + assert py.path.local() == foo + + +class TestSysModulesSnapshot(object): + key = 'my-test-module' + + def test_remove_added(self): + original = dict(sys.modules) + assert self.key not in sys.modules + snapshot = SysModulesSnapshot() + sys.modules[self.key] = 'something' + assert self.key in sys.modules + snapshot.restore() + assert sys.modules == original + + def test_add_removed(self, monkeypatch): + assert self.key not in sys.modules + monkeypatch.setitem(sys.modules, self.key, 'something') + assert self.key in sys.modules + original = dict(sys.modules) + snapshot = SysModulesSnapshot() + del sys.modules[self.key] + assert self.key not in sys.modules + snapshot.restore() + assert sys.modules == original + + def test_restore_reloaded(self, monkeypatch): + assert self.key not in sys.modules + monkeypatch.setitem(sys.modules, self.key, 'something') + assert self.key in sys.modules + original = dict(sys.modules) + snapshot = SysModulesSnapshot() + sys.modules[self.key] = 'something else' + snapshot.restore() + assert sys.modules == original + + def test_preserve_modules(self, monkeypatch): + key = [self.key + str(i) for i in range(3)] + assert not any(k in sys.modules for k in key) + for i, k in enumerate(key): + monkeypatch.setitem(sys.modules, k, 'something' + str(i)) + original = dict(sys.modules) + + def preserve(name): + return name in (key[0], key[1], 'some-other-key') + + snapshot = SysModulesSnapshot(preserve=preserve) + sys.modules[key[0]] = original[key[0]] = 'something else0' + sys.modules[key[1]] = original[key[1]] = 'something else1' + sys.modules[key[2]] = 'something else2' + snapshot.restore() + assert sys.modules == original + + def test_preserve_container(self, monkeypatch): + original = dict(sys.modules) + assert self.key not in original + replacement = dict(sys.modules) + replacement[self.key] = 'life of brian' + snapshot = SysModulesSnapshot() + monkeypatch.setattr(sys, 'modules', replacement) + snapshot.restore() + assert sys.modules is replacement + assert sys.modules == original + + +@pytest.mark.parametrize('path_type', ('path', 'meta_path')) +class TestSysPathsSnapshot(object): + other_path = { + 'path': 'meta_path', + 'meta_path': 'path'} + + @staticmethod + def path(n): + return 'my-dirty-little-secret-' + str(n) + + def test_restore(self, monkeypatch, path_type): + other_path_type = self.other_path[path_type] + for i in range(10): + assert self.path(i) not in getattr(sys, path_type) + sys_path = [self.path(i) for i in range(6)] + monkeypatch.setattr(sys, path_type, sys_path) + original = list(sys_path) + original_other = list(getattr(sys, other_path_type)) + snapshot = SysPathsSnapshot() + transformation = { + 'source': (0, 1, 2, 3, 4, 5), + 'target': ( 6, 2, 9, 7, 5, 8)} # noqa: E201 + assert sys_path == [self.path(x) for x in transformation['source']] + sys_path[1] = self.path(6) + sys_path[3] = self.path(7) + sys_path.append(self.path(8)) + del sys_path[4] + sys_path[3:3] = [self.path(9)] + del sys_path[0] + assert sys_path == [self.path(x) for x in transformation['target']] + snapshot.restore() + assert getattr(sys, path_type) is sys_path + assert getattr(sys, path_type) == original + assert getattr(sys, other_path_type) == original_other + + def test_preserve_container(self, monkeypatch, path_type): + other_path_type = self.other_path[path_type] + original_data = list(getattr(sys, path_type)) + original_other = getattr(sys, other_path_type) + original_other_data = list(original_other) + new = [] + snapshot = SysPathsSnapshot() + monkeypatch.setattr(sys, path_type, new) + snapshot.restore() + assert getattr(sys, path_type) is new + assert getattr(sys, path_type) == original_data + assert getattr(sys, other_path_type) is original_other + assert getattr(sys, other_path_type) == original_other_data diff --git a/testing/test_recwarn.py b/testing/test_recwarn.py index f1048f07d..1d99a7656 100644 --- a/testing/test_recwarn.py +++ b/testing/test_recwarn.py @@ -1,8 +1,6 @@ from __future__ import absolute_import, division, print_function import warnings import re -import py -import sys import pytest from _pytest.recwarn import WarningsRecorder @@ -25,16 +23,16 @@ class TestWarningsRecorderChecker(object): rec = WarningsRecorder() with rec: assert not rec.list - py.std.warnings.warn_explicit("hello", UserWarning, "xyz", 13) + warnings.warn_explicit("hello", UserWarning, "xyz", 13) assert len(rec.list) == 1 - py.std.warnings.warn(DeprecationWarning("hello")) + warnings.warn(DeprecationWarning("hello")) assert len(rec.list) == 2 warn = rec.pop() assert str(warn.message) == "hello" - l = rec.list + values = rec.list rec.clear() assert len(rec.list) == 0 - assert l is rec.list + assert values is rec.list pytest.raises(AssertionError, "rec.pop()") def test_typechecking(self): @@ -65,14 +63,14 @@ class TestDeprecatedCall(object): def dep(self, i, j=None): if i == 0: - py.std.warnings.warn("is deprecated", DeprecationWarning, - stacklevel=1) + warnings.warn("is deprecated", DeprecationWarning, + stacklevel=1) return 42 def dep_explicit(self, i): if i == 0: - py.std.warnings.warn_explicit("dep_explicit", category=DeprecationWarning, - filename="hello", lineno=3) + warnings.warn_explicit("dep_explicit", category=DeprecationWarning, + filename="hello", lineno=3) def test_deprecated_call_raises(self): with pytest.raises(AssertionError) as excinfo: @@ -87,16 +85,16 @@ class TestDeprecatedCall(object): assert ret == 42 def test_deprecated_call_preserves(self): - onceregistry = py.std.warnings.onceregistry.copy() - filters = py.std.warnings.filters[:] - warn = py.std.warnings.warn - warn_explicit = py.std.warnings.warn_explicit + onceregistry = warnings.onceregistry.copy() + filters = warnings.filters[:] + warn = warnings.warn + warn_explicit = warnings.warn_explicit self.test_deprecated_call_raises() self.test_deprecated_call() - assert onceregistry == py.std.warnings.onceregistry - assert filters == py.std.warnings.filters - assert warn is py.std.warnings.warn - assert warn_explicit is py.std.warnings.warn_explicit + assert onceregistry == warnings.onceregistry + assert filters == warnings.filters + assert warn is warnings.warn + assert warn_explicit is warnings.warn_explicit def test_deprecated_explicit_call_raises(self): with pytest.raises(AssertionError): @@ -115,7 +113,7 @@ class TestDeprecatedCall(object): pass msg = 'Did not produce DeprecationWarning or PendingDeprecationWarning' - with pytest.raises(AssertionError, matches=msg): + with pytest.raises(AssertionError, match=msg): if mode == 'call': pytest.deprecated_call(f) else: @@ -125,6 +123,7 @@ class TestDeprecatedCall(object): @pytest.mark.parametrize('warning_type', [PendingDeprecationWarning, DeprecationWarning]) @pytest.mark.parametrize('mode', ['context_manager', 'call']) @pytest.mark.parametrize('call_f_first', [True, False]) + @pytest.mark.filterwarnings('ignore') def test_deprecated_call_modes(self, warning_type, mode, call_f_first): """Ensure deprecated_call() captures a deprecation warning as expected inside its block/function. @@ -170,32 +169,6 @@ class TestDeprecatedCall(object): with pytest.deprecated_call(): f() - def test_deprecated_function_already_called(self, testdir): - """deprecated_call should be able to catch a call to a deprecated - function even if that function has already been called in the same - module. See #1190. - """ - testdir.makepyfile(""" - import warnings - import pytest - - def deprecated_function(): - warnings.warn("deprecated", DeprecationWarning) - - def test_one(): - deprecated_function() - - def test_two(): - pytest.deprecated_call(deprecated_function) - """) - result = testdir.runpytest() - # for some reason in py26 catch_warnings manages to catch the deprecation warning - # from deprecated_function(), even with default filters active (which ignore deprecation - # warnings) - py26 = sys.version_info[:2] == (2, 6) - expected = '*=== 2 passed in *===' if not py26 else '*=== 2 passed, 1 warnings in *===' - result.stdout.fnmatch_lines(expected) - class TestWarns(object): def test_strings(self): @@ -233,13 +206,13 @@ class TestWarns(object): with pytest.warns(RuntimeWarning): warnings.warn("user", UserWarning) excinfo.match(r"DID NOT WARN. No warnings of type \(.+RuntimeWarning.+,\) was emitted. " - r"The list of emitted warnings is: \[UserWarning\('user',\)\].") + r"The list of emitted warnings is: \[UserWarning\('user',?\)\].") with pytest.raises(pytest.fail.Exception) as excinfo: with pytest.warns(UserWarning): warnings.warn("runtime", RuntimeWarning) excinfo.match(r"DID NOT WARN. No warnings of type \(.+UserWarning.+,\) was emitted. " - r"The list of emitted warnings is: \[RuntimeWarning\('runtime',\)\].") + r"The list of emitted warnings is: \[RuntimeWarning\('runtime',?\)\].") with pytest.raises(pytest.fail.Exception) as excinfo: with pytest.warns(UserWarning): @@ -283,9 +256,11 @@ class TestWarns(object): assert str(record[0].message) == "user" assert str(record[1].message) == "runtime" - class MyUserWarning(UserWarning): pass + class MyUserWarning(UserWarning): + pass - class MyRuntimeWarning(RuntimeWarning): pass + class MyRuntimeWarning(RuntimeWarning): + pass with pytest.warns((UserWarning, RuntimeWarning)) as record: warnings.warn("user", MyUserWarning) @@ -295,7 +270,6 @@ class TestWarns(object): assert str(record[0].message) == "user" assert str(record[1].message) == "runtime" - def test_double_test(self, testdir): """If a test is run again, the warning should still be raised""" testdir.makepyfile(''' @@ -309,3 +283,27 @@ class TestWarns(object): ''') result = testdir.runpytest() result.stdout.fnmatch_lines(['*2 passed in*']) + + def test_match_regex(self): + with pytest.warns(UserWarning, match=r'must be \d+$'): + warnings.warn("value must be 42", UserWarning) + + with pytest.raises(pytest.fail.Exception): + with pytest.warns(UserWarning, match=r'must be \d+$'): + warnings.warn("this is not here", UserWarning) + + with pytest.raises(pytest.fail.Exception): + with pytest.warns(FutureWarning, match=r'must be \d+$'): + warnings.warn("value must be 42", UserWarning) + + def test_one_from_multiple_warns(self): + with pytest.warns(UserWarning, match=r'aaa'): + warnings.warn("cccccccccc", UserWarning) + warnings.warn("bbbbbbbbbb", UserWarning) + warnings.warn("aaaaaaaaaa", UserWarning) + + def test_none_of_multiple_warns(self): + with pytest.raises(pytest.fail.Exception): + with pytest.warns(UserWarning, match=r'aaa'): + warnings.warn("bbbbbbbbbb", UserWarning) + warnings.warn("cccccccccc", UserWarning) diff --git a/testing/test_resultlog.py b/testing/test_resultlog.py index cb083225c..b1760721c 100644 --- a/testing/test_resultlog.py +++ b/testing/test_resultlog.py @@ -4,20 +4,20 @@ import os import _pytest._code import py import pytest -from _pytest.main import Node, Item, FSCollector +from _pytest.nodes import Node, Item, FSCollector from _pytest.resultlog import generic_path, ResultLog, \ - pytest_configure, pytest_unconfigure + pytest_configure, pytest_unconfigure def test_generic_path(testdir): from _pytest.main import Session config = testdir.parseconfig() session = Session(config) - p1 = Node('a', config=config, session=session) - #assert p1.fspath is None + p1 = Node('a', config=config, session=session, nodeid='a') + # assert p1.fspath is None p2 = Node('B', parent=p1) - p3 = Node('()', parent = p2) - item = Item('c', parent = p3) + p3 = Node('()', parent=p2) + item = Item('c', parent=p3) res = generic_path(item) assert res == 'a.B().c' @@ -25,13 +25,14 @@ def test_generic_path(testdir): p0 = FSCollector('proj/test', config=config, session=session) p1 = FSCollector('proj/test/a', parent=p0) p2 = Node('B', parent=p1) - p3 = Node('()', parent = p2) + p3 = Node('()', parent=p2) p4 = Node('c', parent=p3) - item = Item('[1]', parent = p4) + item = Item('[1]', parent=p4) res = generic_path(item) assert res == 'test/a:B().c[1]' + def test_write_log_entry(): reslog = ResultLog(None, None) reslog.logfile = py.io.TextIO() @@ -68,7 +69,7 @@ def test_write_log_entry(): entry_lines = entry.splitlines() assert len(entry_lines) == 5 assert entry_lines[0] == 'F name' - assert entry_lines[1:] == [' '+line for line in longrepr.splitlines()] + assert entry_lines[1:] == [' ' + line for line in longrepr.splitlines()] class TestWithFunctionIntegration(object): @@ -144,7 +145,7 @@ class TestWithFunctionIntegration(object): assert entry_lines[0].startswith('! ') if style != "native": - assert os.path.basename(__file__)[:-9] in entry_lines[0] #.pyc/class + assert os.path.basename(__file__)[:-9] in entry_lines[0] # .pyc/class assert entry_lines[-1][0] == ' ' assert 'ValueError' in entry @@ -176,6 +177,7 @@ def test_generic(testdir, LineMatcher): "x *:test_xfail_norun", ]) + def test_makedir_for_resultlog(testdir, LineMatcher): """--resultlog should automatically create directories for the log file""" testdir.plugins.append("resultlog") @@ -224,5 +226,3 @@ def test_failure_issue380(testdir): """) result = testdir.runpytest("--resultlog=log") assert result.ret == 2 - - diff --git a/testing/test_runner.py b/testing/test_runner.py index def80ea5f..a3bd8ecb4 100644 --- a/testing/test_runner.py +++ b/testing/test_runner.py @@ -2,22 +2,25 @@ from __future__ import absolute_import, division, print_function import _pytest._code +import inspect import os import py import pytest import sys -from _pytest import runner, main +import types +from _pytest import runner, main, outcomes + class TestSetupState(object): def test_setup(self, testdir): ss = runner.SetupState() item = testdir.getitem("def test_func(): pass") - l = [1] + values = [1] ss.prepare(item) - ss.addfinalizer(l.pop, colitem=item) - assert l + ss.addfinalizer(values.pop, colitem=item) + assert values ss._pop_and_teardown() - assert not l + assert not values def test_teardown_exact_stack_empty(self, testdir): item = testdir.getitem("def test_func(): pass") @@ -31,7 +34,7 @@ class TestSetupState(object): def setup_module(mod): raise ValueError(42) def test_func(): pass - """) # noqa + """) ss = runner.SetupState() pytest.raises(ValueError, lambda: ss.prepare(item)) pytest.raises(ValueError, lambda: ss.prepare(item)) @@ -39,11 +42,14 @@ class TestSetupState(object): def test_teardown_multiple_one_fails(self, testdir): r = [] - def fin1(): r.append('fin1') + def fin1(): + r.append('fin1') - def fin2(): raise Exception('oops') + def fin2(): + raise Exception('oops') - def fin3(): r.append('fin3') + def fin3(): + r.append('fin3') item = testdir.getitem("def test_func(): pass") ss = runner.SetupState() @@ -58,9 +64,11 @@ class TestSetupState(object): def test_teardown_multiple_fail(self, testdir): # Ensure the first exception is the one which is re-raised. # Ideally both would be reported however. - def fin1(): raise Exception('oops1') + def fin1(): + raise Exception('oops1') - def fin2(): raise Exception('oops2') + def fin2(): + raise Exception('oops2') item = testdir.getitem("def test_func(): pass") ss = runner.SetupState() @@ -94,7 +102,7 @@ class BaseFunctionalTests(object): assert rep.failed assert rep.when == "call" assert rep.outcome == "failed" - #assert isinstance(rep.longrepr, ReprExceptionInfo) + # assert isinstance(rep.longrepr, ReprExceptionInfo) def test_skipfunction(self, testdir): reports = testdir.runitem(""" @@ -107,12 +115,12 @@ class BaseFunctionalTests(object): assert not rep.passed assert rep.skipped assert rep.outcome == "skipped" - #assert rep.skipped.when == "call" - #assert rep.skipped.when == "call" - #assert rep.skipped == "%sreason == "hello" - #assert rep.skipped.location.lineno == 3 - #assert rep.skipped.location.path - #assert not rep.skipped.failurerepr + # assert rep.skipped.when == "call" + # assert rep.skipped.when == "call" + # assert rep.skipped == "%sreason == "hello" + # assert rep.skipped.location.lineno == 3 + # assert rep.skipped.location.path + # assert not rep.skipped.failurerepr def test_skip_in_setup_function(self, testdir): reports = testdir.runitem(""" @@ -127,11 +135,11 @@ class BaseFunctionalTests(object): assert not rep.failed assert not rep.passed assert rep.skipped - #assert rep.skipped.reason == "hello" - #assert rep.skipped.location.lineno == 3 - #assert rep.skipped.location.lineno == 3 + # assert rep.skipped.reason == "hello" + # assert rep.skipped.location.lineno == 3 + # assert rep.skipped.location.lineno == 3 assert len(reports) == 2 - assert reports[1].passed # teardown + assert reports[1].passed # teardown def test_failure_in_setup_function(self, testdir): reports = testdir.runitem(""" @@ -163,8 +171,8 @@ class BaseFunctionalTests(object): assert not rep.passed assert rep.failed assert rep.when == "teardown" - #assert rep.longrepr.reprcrash.lineno == 3 - #assert rep.longrepr.reprtraceback.reprentries + # assert rep.longrepr.reprcrash.lineno == 3 + # assert rep.longrepr.reprtraceback.reprentries def test_custom_failure_repr(self, testdir): testdir.makepyfile(conftest=""" @@ -182,10 +190,10 @@ class BaseFunctionalTests(object): assert not rep.skipped assert not rep.passed assert rep.failed - #assert rep.outcome.when == "call" - #assert rep.failed.where.lineno == 3 - #assert rep.failed.where.path.basename == "test_func.py" - #assert rep.failed.failurerepr == "hello" + # assert rep.outcome.when == "call" + # assert rep.failed.where.lineno == 3 + # assert rep.failed.where.path.basename == "test_func.py" + # assert rep.failed.failurerepr == "hello" def test_teardown_final_returncode(self, testdir): rec = testdir.inline_runsource(""" @@ -196,6 +204,18 @@ class BaseFunctionalTests(object): """) assert rec.ret == 1 + def test_logstart_logfinish_hooks(self, testdir): + rec = testdir.inline_runsource(""" + import pytest + def test_func(): + pass + """) + reps = rec.getcalls("pytest_runtest_logstart pytest_runtest_logfinish") + assert [x._name for x in reps] == ['pytest_runtest_logstart', 'pytest_runtest_logfinish'] + for rep in reps: + assert rep.nodeid == 'test_logstart_logfinish_hooks.py::test_func' + assert rep.location == ('test_logstart_logfinish_hooks.py', 1, 'test_func') + def test_exact_teardown_issue90(self, testdir): rec = testdir.inline_runsource(""" import pytest @@ -220,14 +240,14 @@ class BaseFunctionalTests(object): raise ValueError(42) """) reps = rec.getreports("pytest_runtest_logreport") - print (reps) + print(reps) for i in range(2): assert reps[i].nodeid.endswith("test_method") assert reps[i].passed assert reps[2].when == "teardown" assert reps[2].failed assert len(reps) == 6 - for i in range(3,5): + for i in range(3, 5): assert reps[i].nodeid.endswith("test_func") assert reps[i].passed assert reps[5].when == "teardown" @@ -247,7 +267,7 @@ class BaseFunctionalTests(object): assert True """) reps = rec.getreports("pytest_runtest_logreport") - print (reps) + print(reps) assert len(reps) == 3 # assert reps[0].nodeid.endswith("test_method") @@ -262,11 +282,11 @@ class BaseFunctionalTests(object): assert reps[2].failed assert reps[2].when == "teardown" assert reps[2].longrepr.reprcrash.message in ( - # python3 error - "TypeError: teardown_method() missing 2 required positional arguments: 'y' and 'z'", - # python2 error - 'TypeError: teardown_method() takes exactly 4 arguments (2 given)' - ) + # python3 error + "TypeError: teardown_method() missing 2 required positional arguments: 'y' and 'z'", + # python2 error + 'TypeError: teardown_method() takes exactly 4 arguments (2 given)' + ) def test_failure_in_setup_function_ignores_custom_repr(self, testdir): testdir.makepyfile(conftest=""" @@ -287,10 +307,10 @@ class BaseFunctionalTests(object): assert not rep.skipped assert not rep.passed assert rep.failed - #assert rep.outcome.when == "setup" - #assert rep.outcome.where.lineno == 3 - #assert rep.outcome.where.path.basename == "test_func.py" - #assert instanace(rep.failed.failurerepr, PythonFailureRepr) + # assert rep.outcome.when == "setup" + # assert rep.outcome.where.lineno == 3 + # assert rep.outcome.where.path.basename == "test_func.py" + # assert instanace(rep.failed.failurerepr, PythonFailureRepr) def test_systemexit_does_not_bail_out(self, testdir): try: @@ -316,6 +336,7 @@ class BaseFunctionalTests(object): else: pytest.fail("did not raise") + class TestExecutionNonForked(BaseFunctionalTests): def getrunner(self): def f(item): @@ -333,6 +354,7 @@ class TestExecutionNonForked(BaseFunctionalTests): else: pytest.fail("did not raise") + class TestExecutionForked(BaseFunctionalTests): pytestmark = pytest.mark.skipif("not hasattr(os, 'fork')") @@ -351,6 +373,7 @@ class TestExecutionForked(BaseFunctionalTests): assert rep.failed assert rep.when == "???" + class TestSessionReports(object): def test_collect_result(self, testdir): col = testdir.getmodulecol(""" @@ -380,22 +403,24 @@ reporttypes = [ runner.CollectReport, ] + @pytest.mark.parametrize('reporttype', reporttypes, ids=[x.__name__ for x in reporttypes]) def test_report_extra_parameters(reporttype): - if hasattr(py.std.inspect, 'signature'): - args = list(py.std.inspect.signature(reporttype.__init__).parameters.keys())[1:] + if hasattr(inspect, 'signature'): + args = list(inspect.signature(reporttype.__init__).parameters.keys())[1:] else: - args = py.std.inspect.getargspec(reporttype.__init__)[0][1:] + args = inspect.getargspec(reporttype.__init__)[0][1:] basekw = dict.fromkeys(args, []) report = reporttype(newthing=1, **basekw) assert report.newthing == 1 + def test_callinfo(): ci = runner.CallInfo(lambda: 0, '123') assert ci.when == "123" assert ci.result == 0 assert "result" in repr(ci) - ci = runner.CallInfo(lambda: 0/0, '123') + ci = runner.CallInfo(lambda: 0 / 0, '123') assert ci.when == "123" assert not hasattr(ci, 'result') assert ci.excinfo @@ -403,6 +428,8 @@ def test_callinfo(): # design question: do we want general hooks in python files? # then something like the following functional tests makes sense + + @pytest.mark.xfail def test_runtest_in_module_ordering(testdir): p1 = testdir.makepyfile(""" @@ -436,9 +463,18 @@ def test_runtest_in_module_ordering(testdir): def test_outcomeexception_exceptionattributes(): - outcome = runner.OutcomeException('test') + outcome = outcomes.OutcomeException('test') assert outcome.args[0] == outcome.msg + +def test_outcomeexception_passes_except_Exception(): + with pytest.raises(outcomes.OutcomeException): + try: + raise outcomes.OutcomeException('test') + except Exception: + pass + + def test_pytest_exit(): try: pytest.exit("hello") @@ -446,6 +482,7 @@ def test_pytest_exit(): excinfo = _pytest._code.ExceptionInfo() assert excinfo.errisinstance(KeyboardInterrupt) + def test_pytest_fail(): try: pytest.fail("hello") @@ -454,6 +491,7 @@ def test_pytest_fail(): s = excinfo.exconly(tryshort=True) assert s.startswith("Failed") + def test_pytest_exit_msg(testdir): testdir.makeconftest(""" import pytest @@ -466,6 +504,7 @@ def test_pytest_exit_msg(testdir): "Exit: oh noes", ]) + def test_pytest_fail_notrace(testdir): testdir.makepyfile(""" import pytest @@ -531,6 +570,7 @@ def test_exception_printing_skip(): s = excinfo.exconly(tryshort=True) assert s.startswith("Skipped") + def test_importorskip(monkeypatch): importorskip = pytest.importorskip @@ -538,10 +578,10 @@ def test_importorskip(monkeypatch): importorskip("asdlkj") try: - sys = importorskip("sys") # noqa - assert sys == py.std.sys - #path = pytest.importorskip("os.path") - #assert path == py.std.os.path + sysmod = importorskip("sys") + assert sysmod is sys + # path = pytest.importorskip("os.path") + # assert path == os.path excinfo = pytest.raises(pytest.skip.Exception, f) path = py.path.local(excinfo.getrepr().reprcrash.path) # check that importorskip reports the actual call @@ -549,7 +589,7 @@ def test_importorskip(monkeypatch): assert path.purebasename == "test_runner" pytest.raises(SyntaxError, "pytest.importorskip('x y z')") pytest.raises(SyntaxError, "pytest.importorskip('x=y')") - mod = py.std.types.ModuleType("hello123") + mod = types.ModuleType("hello123") mod.__version__ = "1.3" monkeypatch.setitem(sys.modules, "hello123", mod) pytest.raises(pytest.skip.Exception, """ @@ -561,13 +601,15 @@ def test_importorskip(monkeypatch): print(_pytest._code.ExceptionInfo()) pytest.fail("spurious skip") + def test_importorskip_imports_last_module_part(): ospath = pytest.importorskip("os.path") assert os.path == ospath + def test_importorskip_dev_module(monkeypatch): try: - mod = py.std.types.ModuleType("mockmodule") + mod = types.ModuleType("mockmodule") mod.__version__ = '0.13.0.dev-43290' monkeypatch.setitem(sys.modules, 'mockmodule', mod) mod2 = pytest.importorskip('mockmodule', minversion='0.12.0') @@ -609,12 +651,14 @@ def test_pytest_cmdline_main(testdir): def test_unicode_in_longrepr(testdir): testdir.makeconftest(""" - import py - def pytest_runtest_makereport(__multicall__): - rep = __multicall__.execute() + # -*- coding: utf-8 -*- + import pytest + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_makereport(): + outcome = yield + rep = outcome.get_result() if rep.when == "call": - rep.longrepr = py.builtin._totext("\\xc3\\xa4", "utf8") - return rep + rep.longrepr = u'ä' """) testdir.makepyfile(""" def test_out(): @@ -681,6 +725,8 @@ def test_store_except_info_on_eror(): """ # Simulate item that raises a specific exception class ItemThatRaises(object): + nodeid = 'item_that_raises' + def runtest(self): raise IndexError('TEST') try: @@ -693,6 +739,31 @@ def test_store_except_info_on_eror(): assert sys.last_traceback +def test_current_test_env_var(testdir, monkeypatch): + pytest_current_test_vars = [] + monkeypatch.setattr(sys, 'pytest_current_test_vars', pytest_current_test_vars, raising=False) + testdir.makepyfile(''' + import pytest + import sys + import os + + @pytest.fixture + def fix(): + sys.pytest_current_test_vars.append(('setup', os.environ['PYTEST_CURRENT_TEST'])) + yield + sys.pytest_current_test_vars.append(('teardown', os.environ['PYTEST_CURRENT_TEST'])) + + def test(fix): + sys.pytest_current_test_vars.append(('call', os.environ['PYTEST_CURRENT_TEST'])) + ''') + result = testdir.runpytest_inprocess() + assert result.ret == 0 + test_id = 'test_current_test_env_var.py::test' + assert pytest_current_test_vars == [ + ('setup', test_id + ' (setup)'), ('call', test_id + ' (call)'), ('teardown', test_id + ' (teardown)')] + assert 'PYTEST_CURRENT_TEST' not in os.environ + + class TestReportContents(object): """ Test user-level API of ``TestReport`` objects. @@ -754,5 +825,3 @@ class TestReportContents(object): rep = reports[1] assert rep.capstdout == '' assert rep.capstderr == '' - - diff --git a/testing/test_runner_xunit.py b/testing/test_runner_xunit.py index 92ba97202..fc931f867 100644 --- a/testing/test_runner_xunit.py +++ b/testing/test_runner_xunit.py @@ -36,22 +36,24 @@ def test_module_and_function_setup(testdir): rep = reprec.matchreport("test_module") assert rep.passed + def test_module_setup_failure_no_teardown(testdir): reprec = testdir.inline_runsource(""" - l = [] + values = [] def setup_module(module): - l.append(1) + values.append(1) 0/0 def test_nothing(): pass def teardown_module(module): - l.append(2) + values.append(2) """) reprec.assertoutcome(failed=1) calls = reprec.getcalls("pytest_runtest_setup") - assert calls[0].item.module.l == [1] + assert calls[0].item.module.values == [1] + def test_setup_function_failure_no_teardown(testdir): reprec = testdir.inline_runsource(""" @@ -69,6 +71,7 @@ def test_setup_function_failure_no_teardown(testdir): calls = reprec.getcalls("pytest_runtest_setup") assert calls[0].item.module.modlevel == [1] + def test_class_setup(testdir): reprec = testdir.inline_runsource(""" class TestSimpleClassSetup(object): @@ -90,7 +93,8 @@ def test_class_setup(testdir): assert not TestSimpleClassSetup.clslevel assert not TestInheritedClassSetupStillWorks.clslevel """) - reprec.assertoutcome(passed=1+2+1) + reprec.assertoutcome(passed=1 + 2 + 1) + def test_class_setup_failure_no_teardown(testdir): reprec = testdir.inline_runsource(""" @@ -110,6 +114,7 @@ def test_class_setup_failure_no_teardown(testdir): """) reprec.assertoutcome(failed=1, passed=1) + def test_method_setup(testdir): reprec = testdir.inline_runsource(""" class TestSetupMethod(object): @@ -126,6 +131,7 @@ def test_method_setup(testdir): """) reprec.assertoutcome(passed=2) + def test_method_setup_failure_no_teardown(testdir): reprec = testdir.inline_runsource(""" class TestMethodSetup(object): @@ -145,6 +151,7 @@ def test_method_setup_failure_no_teardown(testdir): """) reprec.assertoutcome(failed=1, passed=1) + def test_method_generator_setup(testdir): reprec = testdir.inline_runsource(""" class TestSetupTeardownOnInstance(object): @@ -167,6 +174,7 @@ def test_method_generator_setup(testdir): """) reprec.assertoutcome(passed=1, failed=1) + def test_func_generator_setup(testdir): reprec = testdir.inline_runsource(""" import sys @@ -195,6 +203,7 @@ def test_func_generator_setup(testdir): rep = reprec.matchreport("test_one", names="pytest_runtest_logreport") assert rep.passed + def test_method_setup_uses_fresh_instances(testdir): reprec = testdir.inline_runsource(""" class TestSelfState1(object): @@ -207,6 +216,7 @@ def test_method_setup_uses_fresh_instances(testdir): """) reprec.assertoutcome(passed=2, failed=0) + def test_setup_that_skips_calledagain(testdir): p = testdir.makepyfile(""" import pytest @@ -220,6 +230,7 @@ def test_setup_that_skips_calledagain(testdir): reprec = testdir.inline_run(p) reprec.assertoutcome(skipped=2) + def test_setup_fails_again_on_all_tests(testdir): p = testdir.makepyfile(""" import pytest @@ -233,6 +244,7 @@ def test_setup_fails_again_on_all_tests(testdir): reprec = testdir.inline_run(p) reprec.assertoutcome(failed=2) + def test_setup_funcarg_setup_when_outer_scope_fails(testdir): p = testdir.makepyfile(""" import pytest diff --git a/testing/test_session.py b/testing/test_session.py index d08f7b3e2..32d8ce689 100644 --- a/testing/test_session.py +++ b/testing/test_session.py @@ -1,8 +1,10 @@ from __future__ import absolute_import, division, print_function + import pytest from _pytest.main import EXIT_NOTESTSCOLLECTED + class SessionTests(object): def test_basic_testitem_events(self, testdir): tfile = testdir.makepyfile(""" @@ -21,15 +23,18 @@ class SessionTests(object): assert len(skipped) == 0 assert len(passed) == 1 assert len(failed) == 3 - end = lambda x: x.nodeid.split("::")[-1] + + def end(x): + return x.nodeid.split("::")[-1] + assert end(failed[0]) == "test_one_one" assert end(failed[1]) == "test_other" itemstarted = reprec.getcalls("pytest_itemcollected") assert len(itemstarted) == 4 # XXX check for failing funcarg setup - #colreports = reprec.getcalls("pytest_collectreport") - #assert len(colreports) == 4 - #assert colreports[1].report.failed + # colreports = reprec.getcalls("pytest_collectreport") + # assert len(colreports) == 4 + # assert colreports[1].report.failed def test_nested_import_error(self, testdir): tfile = testdir.makepyfile(""" @@ -41,9 +46,9 @@ class SessionTests(object): a = 1 """) reprec = testdir.inline_run(tfile) - l = reprec.getfailedcollections() - assert len(l) == 1 - out = str(l[0].longrepr) + values = reprec.getfailedcollections() + assert len(values) == 1 + out = str(values[0].longrepr) assert out.find('does_not_work') != -1 def test_raises_output(self, testdir): @@ -71,9 +76,9 @@ class SessionTests(object): def test_syntax_error_module(self, testdir): reprec = testdir.inline_runsource("this is really not python") - l = reprec.getfailedcollections() - assert len(l) == 1 - out = str(l[0].longrepr) + values = reprec.getfailedcollections() + assert len(values) == 1 + out = str(values[0].longrepr) assert out.find(str('not python')) != -1 def test_exit_first_problem(self, testdir): @@ -117,7 +122,7 @@ class SessionTests(object): passed, skipped, failed = reprec.listoutcomes() assert len(failed) == 1 out = failed[0].longrepr.reprcrash.message - assert out.find("""[Exception("Ha Ha fooled you, I'm a broken repr().") raised in repr()]""") != -1 #' + assert out.find("""[Exception("Ha Ha fooled you, I'm a broken repr().") raised in repr()]""") != -1 # ' def test_skip_file_by_conftest(self, testdir): testdir.makepyfile(conftest=""" @@ -135,19 +140,20 @@ class SessionTests(object): assert len(reports) == 1 assert reports[0].skipped + class TestNewSession(SessionTests): def test_order_of_execution(self, testdir): reprec = testdir.inline_runsource(""" - l = [] + values = [] def test_1(): - l.append(1) + values.append(1) def test_2(): - l.append(2) + values.append(2) def test_3(): - assert l == [1,2] + assert values == [1,2] class Testmygroup(object): - reslist = l + reslist = values def test_1(self): self.reslist.append(1) def test_2(self): @@ -186,7 +192,7 @@ class TestNewSession(SessionTests): started = reprec.getcalls("pytest_collectstart") finished = reprec.getreports("pytest_collectreport") assert len(started) == len(finished) - assert len(started) == 7 # XXX extra TopCollector + assert len(started) == 7 # XXX extra TopCollector colfail = [x for x in finished if x.failed] assert len(colfail) == 1 @@ -211,9 +217,10 @@ def test_plugin_specify(testdir): pytest.raises(ImportError, """ testdir.parseconfig("-p", "nqweotexistent") """) - #pytest.raises(ImportError, + # pytest.raises(ImportError, # "config.do_configure(config)" - #) + # ) + def test_plugin_already_exists(testdir): config = testdir.parseconfig("-p", "terminal") @@ -221,6 +228,7 @@ def test_plugin_already_exists(testdir): config._do_configure() config._ensure_unconfigure() + def test_exclude(testdir): hellodir = testdir.mkdir("hello") hellodir.join("test_hello.py").write("x y syntaxerror") @@ -231,17 +239,61 @@ def test_exclude(testdir): assert result.ret == 0 result.stdout.fnmatch_lines(["*1 passed*"]) + +def test_deselect(testdir): + testdir.makepyfile(test_a=""" + import pytest + def test_a1(): pass + @pytest.mark.parametrize('b', range(3)) + def test_a2(b): pass + """) + result = testdir.runpytest("-v", "--deselect=test_a.py::test_a2[1]", "--deselect=test_a.py::test_a2[2]") + assert result.ret == 0 + result.stdout.fnmatch_lines(["*2 passed, 2 deselected*"]) + for line in result.stdout.lines: + assert not line.startswith(('test_a.py::test_a2[1]', 'test_a.py::test_a2[2]')) + + def test_sessionfinish_with_start(testdir): testdir.makeconftest(""" import os - l = [] + values = [] def pytest_sessionstart(): - l.append(os.getcwd()) + values.append(os.getcwd()) os.chdir("..") def pytest_sessionfinish(): - assert l[0] == os.getcwd() + assert values[0] == os.getcwd() """) res = testdir.runpytest("--collect-only") assert res.ret == EXIT_NOTESTSCOLLECTED + + +@pytest.mark.parametrize("path", ["root", "{relative}/root", "{environment}/root"]) +def test_rootdir_option_arg(testdir, monkeypatch, path): + monkeypatch.setenv('PY_ROOTDIR_PATH', str(testdir.tmpdir)) + path = path.format(relative=str(testdir.tmpdir), + environment='$PY_ROOTDIR_PATH') + + rootdir = testdir.mkdir("root") + rootdir.mkdir("tests") + testdir.makepyfile(""" + import os + def test_one(): + assert 1 + """) + + result = testdir.runpytest("--rootdir={}".format(path)) + result.stdout.fnmatch_lines(['*rootdir: {}/root, inifile:*'.format(testdir.tmpdir), "*1 passed*"]) + + +def test_rootdir_wrong_option_arg(testdir): + testdir.makepyfile(""" + import os + def test_one(): + assert 1 + """) + + result = testdir.runpytest("--rootdir=wrong_dir") + result.stderr.fnmatch_lines(["*Directory *wrong_dir* not found. Check your '--rootdir' option.*"]) diff --git a/testing/test_skipping.py b/testing/test_skipping.py index 5f25c3e6e..90562c939 100644 --- a/testing/test_skipping.py +++ b/testing/test_skipping.py @@ -80,7 +80,7 @@ class TestEvaluator(object): %s def test_func(): pass - """ % (lines[i], lines[(i+1) %2])) + """ % (lines[i], lines[(i + 1) % 2])) ev = MarkEvaluator(item, 'skipif') assert ev assert ev.istrue() @@ -156,6 +156,21 @@ class TestXFail(object): assert callreport.passed assert callreport.wasxfail == "this is an xfail" + def test_xfail_using_platform(self, testdir): + """ + Verify that platform can be used with xfail statements. + """ + item = testdir.getitem(""" + import pytest + @pytest.mark.xfail("platform.platform() == platform.platform()") + def test_func(): + assert 0 + """) + reports = runtestprotocol(item, log=False) + assert len(reports) == 3 + callreport = reports[1] + assert callreport.wasxfail + def test_xfail_xpassed_strict(self, testdir): item = testdir.getitem(""" import pytest @@ -207,9 +222,9 @@ class TestXFail(object): assert 0 """) testdir.runpytest(p, '-v') - #result.stdout.fnmatch_lines([ + # result.stdout.fnmatch_lines([ # "*HINT*use*-r*" - #]) + # ]) def test_xfail_not_run_xfail_reporting(self, testdir): p = testdir.makepyfile(test_one=""" @@ -350,7 +365,6 @@ class TestXFail(object): "*1 xfailed*", ]) - @pytest.mark.parametrize('expected, actual, matchline', [('TypeError', 'TypeError', "*1 xfailed*"), ('(AttributeError, TypeError)', 'TypeError', "*1 xfailed*"), @@ -576,12 +590,13 @@ class TestSkip(object): def test_hello(): pass """) - result = testdir.runpytest("-rs --strict") + result = testdir.runpytest("-rs") result.stdout.fnmatch_lines([ "*unconditional skip*", "*1 skipped*", ]) + class TestSkipif(object): def test_skipif_conditional(self, testdir): item = testdir.getitem(""" @@ -589,7 +604,7 @@ class TestSkipif(object): @pytest.mark.skipif("hasattr(os, 'sep')") def test_func(): pass - """) # noqa + """) x = pytest.raises(pytest.skip.Exception, lambda: pytest_runtest_setup(item)) assert x.value.msg == "condition: hasattr(os, 'sep')" @@ -612,6 +627,16 @@ class TestSkipif(object): ]) assert result.ret == 0 + def test_skipif_using_platform(self, testdir): + item = testdir.getitem(""" + import pytest + @pytest.mark.skipif("platform.platform() == platform.platform()") + def test_func(): + pass + """) + pytest.raises(pytest.skip.Exception, lambda: + pytest_runtest_setup(item)) + @pytest.mark.parametrize('marker, msg1, msg2', [ ('skipif', 'SKIP', 'skipped'), ('xfail', 'XPASS', 'xpassed'), @@ -640,7 +665,7 @@ def test_skip_not_report_default(testdir): """) result = testdir.runpytest(p, '-v') result.stdout.fnmatch_lines([ - #"*HINT*use*-r*", + # "*HINT*use*-r*", "*1 skipped*", ]) @@ -663,7 +688,7 @@ def test_skipif_class(testdir): def test_skip_reasons_folding(): - path = 'xyz' + path = "xyz" lineno = 3 message = "justso" longrepr = (path, lineno, message) @@ -676,17 +701,24 @@ def test_skip_reasons_folding(): ev1.longrepr = longrepr ev2 = X() + ev2.when = "execute" ev2.longrepr = longrepr ev2.skipped = True - l = folded_skips([ev1, ev2]) - assert len(l) == 1 - num, fspath, lineno, reason = l[0] - assert num == 2 + # ev3 might be a collection report + ev3 = X() + ev3.longrepr = longrepr + ev3.skipped = True + + values = folded_skips([ev1, ev2, ev3]) + assert len(values) == 1 + num, fspath, lineno, reason = values[0] + assert num == 3 assert fspath == path assert lineno == lineno assert reason == message + def test_skipped_reasons_functional(testdir): testdir.makepyfile( test_one=""" @@ -699,7 +731,7 @@ def test_skipped_reasons_functional(testdir): def test_method(self): doskip() """, - conftest = """ + conftest=""" import pytest def doskip(): pytest.skip('test') @@ -707,10 +739,32 @@ def test_skipped_reasons_functional(testdir): ) result = testdir.runpytest('-rs') result.stdout.fnmatch_lines([ - "*SKIP*2*conftest.py:3: test", + "*SKIP*2*conftest.py:4: test", ]) assert result.ret == 0 + +def test_skipped_folding(testdir): + testdir.makepyfile( + test_one=""" + import pytest + pytestmark = pytest.mark.skip("Folding") + def setup_function(func): + pass + def test_func(): + pass + class TestClass(object): + def test_method(self): + pass + """, + ) + result = testdir.runpytest('-rs') + result.stdout.fnmatch_lines([ + "*SKIP*2*test_one.py: Folding" + ]) + assert result.ret == 0 + + def test_reportchars(testdir): testdir.makepyfile(""" import pytest @@ -733,6 +787,7 @@ def test_reportchars(testdir): "SKIP*four*", ]) + def test_reportchars_error(testdir): testdir.makepyfile( conftest=""" @@ -748,6 +803,7 @@ def test_reportchars_error(testdir): 'ERROR*test_foo*', ]) + def test_reportchars_all(testdir): testdir.makepyfile(""" import pytest @@ -770,6 +826,7 @@ def test_reportchars_all(testdir): "XPASS*test_3*", ]) + def test_reportchars_all_error(testdir): testdir.makepyfile( conftest=""" @@ -785,6 +842,7 @@ def test_reportchars_all_error(testdir): 'ERROR*test_foo*', ]) + @pytest.mark.xfail("hasattr(sys, 'pypy_version_info')") def test_errors_in_xfail_skip_expressions(testdir): testdir.makepyfile(""" @@ -816,6 +874,7 @@ def test_errors_in_xfail_skip_expressions(testdir): "*1 pass*2 error*", ]) + def test_xfail_skipif_with_globals(testdir): testdir.makepyfile(""" import pytest @@ -834,6 +893,7 @@ def test_xfail_skipif_with_globals(testdir): "*x == 3*", ]) + def test_direct_gives_error(testdir): testdir.makepyfile(""" import pytest @@ -854,6 +914,7 @@ def test_default_markers(testdir): "*xfail(*condition, reason=None, run=True, raises=None, strict=False)*expected failure*", ]) + def test_xfail_test_setup_exception(testdir): testdir.makeconftest(""" def pytest_runtest_setup(): @@ -870,6 +931,7 @@ def test_xfail_test_setup_exception(testdir): assert 'xfailed' in result.stdout.str() assert 'xpassed' not in result.stdout.str() + def test_imperativeskip_on_xfail_test(testdir): testdir.makepyfile(""" import pytest @@ -893,6 +955,7 @@ def test_imperativeskip_on_xfail_test(testdir): *2 skipped* """) + class TestBooleanCondition(object): def test_skipif(self, testdir): testdir.makepyfile(""" @@ -972,6 +1035,40 @@ def test_module_level_skip_error(testdir): ) +def test_module_level_skip_with_allow_module_level(testdir): + """ + Verify that using pytest.skip(allow_module_level=True) is allowed + """ + testdir.makepyfile(""" + import pytest + pytest.skip("skip_module_level", allow_module_level=True) + + def test_func(): + assert 0 + """) + result = testdir.runpytest("-rxs") + result.stdout.fnmatch_lines( + "*SKIP*skip_module_level" + ) + + +def test_invalid_skip_keyword_parameter(testdir): + """ + Verify that using pytest.skip() with unknown parameter raises an error + """ + testdir.makepyfile(""" + import pytest + pytest.skip("skip_module_level", unknown=1) + + def test_func(): + assert 0 + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines( + "*TypeError:*['unknown']*" + ) + + def test_mark_xfail_item(testdir): # Ensure pytest.mark.xfail works with non-Python Item testdir.makeconftest(""" @@ -993,3 +1090,18 @@ def test_mark_xfail_item(testdir): assert not failed xfailed = [r for r in skipped if hasattr(r, 'wasxfail')] assert xfailed + + +def test_summary_list_after_errors(testdir): + """Ensure the list of errors/fails/xfails/skips appears after tracebacks in terminal reporting.""" + testdir.makepyfile(""" + import pytest + def test_fail(): + assert 0 + """) + result = testdir.runpytest('-ra') + result.stdout.fnmatch_lines([ + '=* FAILURES *=', + '*= short test summary info =*', + 'FAIL test_summary_list_after_errors.py::test_fail', + ]) diff --git a/testing/test_terminal.py b/testing/test_terminal.py index 45c354206..8ef25062e 100644 --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -5,7 +5,7 @@ from __future__ import absolute_import, division, print_function import collections import sys -import _pytest._pluggy as pluggy +import pluggy import _pytest._code import py import pytest @@ -24,23 +24,27 @@ class Option(object): @property def args(self): - l = [] + values = [] if self.verbose: - l.append('-v') + values.append('-v') if self.fulltrace: - l.append('--fulltrace') - return l + values.append('--fulltrace') + return values -def pytest_generate_tests(metafunc): - if "option" in metafunc.fixturenames: - metafunc.addcall(id="default", - funcargs={'option': Option(verbose=False)}) - metafunc.addcall(id="verbose", - funcargs={'option': Option(verbose=True)}) - metafunc.addcall(id="quiet", - funcargs={'option': Option(verbose= -1)}) - metafunc.addcall(id="fulltrace", - funcargs={'option': Option(fulltrace=True)}) + +@pytest.fixture(params=[ + Option(verbose=False), + Option(verbose=True), + Option(verbose=-1), + Option(fulltrace=True), +], ids=[ + "default", + "verbose", + "quiet", + "fulltrace", +]) +def option(request): + return request.param @pytest.mark.parametrize('input,expected', [ @@ -77,8 +81,8 @@ class TestTerminal(object): ]) else: result.stdout.fnmatch_lines([ - "*test_pass_skip_fail.py .sF" - ]) + "*test_pass_skip_fail.py .sF*" + ]) result.stdout.fnmatch_lines([ " def test_func():", "> assert 0", @@ -110,7 +114,7 @@ class TestTerminal(object): item.config.pluginmanager.register(tr) location = item.reportinfo() tr.config.hook.pytest_runtest_logstart(nodeid=item.nodeid, - location=location, fspath=str(item.fspath)) + location=location, fspath=str(item.fspath)) linecomp.assert_contains_lines([ "*test_show_runtest_logstart.py*" ]) @@ -141,12 +145,12 @@ class TestTerminal(object): """) result = testdir.runpytest(p2) result.stdout.fnmatch_lines([ - "*test_p2.py .", + "*test_p2.py .*", "*1 passed*", ]) result = testdir.runpytest("-v", p2) result.stdout.fnmatch_lines([ - "*test_p2.py::TestMore::test_p1* <- *test_p1.py*PASSED", + "*test_p2.py::TestMore::test_p1* <- *test_p1.py*PASSED*", ]) def test_itemreport_directclasses_not_shown_as_subclasses(self, testdir): @@ -213,6 +217,16 @@ class TestTerminal(object): result = testdir.runpytest() result.stdout.fnmatch_lines(['collected 1 item']) + def test_rewrite(self, testdir, monkeypatch): + config = testdir.parseconfig() + f = py.io.TextIO() + monkeypatch.setattr(f, 'isatty', lambda *args: True) + tr = TerminalReporter(config, f) + tr._tw.fullwidth = 10 + tr.write('hello') + tr.rewrite('hey', erase=True) + assert f.getvalue() == 'hello' + '\r' + 'hey' + (6 * ' ') + class TestCollectonly(object): def test_collectonly_basic(self, testdir): @@ -222,8 +236,8 @@ class TestCollectonly(object): """) result = testdir.runpytest("--collect-only",) result.stdout.fnmatch_lines([ - "", - " ", + "", + " ", ]) def test_collectonly_skipped_module(self, testdir): @@ -264,13 +278,13 @@ class TestCollectonly(object): pass """) result = testdir.runpytest("--collect-only", p) - #assert stderr.startswith("inserting into sys.path") + # assert stderr.startswith("inserting into sys.path") assert result.ret == 0 result.stdout.fnmatch_lines([ "*", "* ", "* ", - #"* ", + # "* ", "* ", ]) @@ -315,10 +329,11 @@ def test_repr_python_version(monkeypatch): try: monkeypatch.setattr(sys, 'version_info', (2, 5, 1, 'final', 0)) assert repr_pythonversion() == "2.5.1-final-0" - py.std.sys.version_info = x = (2, 3) + sys.version_info = x = (2, 3) assert repr_pythonversion() == str(x) finally: - monkeypatch.undo() # do this early as pytest can get confused + monkeypatch.undo() # do this early as pytest can get confused + class TestFixtureReporting(object): def test_setup_fixture_error(self, testdir): @@ -378,7 +393,7 @@ class TestFixtureReporting(object): "*def test_fail():", "*failingfunc*", "*1 failed*1 error*", - ]) + ]) def test_setup_teardown_output_and_test_failure(self, testdir): """ Test for issue #442 """ @@ -403,7 +418,8 @@ class TestFixtureReporting(object): "*teardown func*", "*1 failed*", - ]) + ]) + class TestTerminalFunctional(object): def test_deselected(self, testdir): @@ -415,14 +431,39 @@ class TestTerminalFunctional(object): def test_three(): pass """ - ) + ) result = testdir.runpytest("-k", "test_two:", testpath) result.stdout.fnmatch_lines([ - "*test_deselected.py ..", - "=* 1 test*deselected *=", + "collected 3 items / 1 deselected", + "*test_deselected.py ..*", ]) assert result.ret == 0 + def test_show_deselected_items_using_markexpr_before_test_execution( + self, testdir): + testdir.makepyfile(""" + import pytest + + @pytest.mark.foo + def test_foobar(): + pass + + @pytest.mark.bar + def test_bar(): + pass + + def test_pass(): + pass + """) + result = testdir.runpytest('-m', 'not foo') + result.stdout.fnmatch_lines([ + "collected 3 items / 1 deselected", + "*test_show_des*.py ..*", + "*= 2 passed, 1 deselected in * =*", + ]) + assert "= 1 deselected =" not in result.stdout.str() + assert result.ret == 0 + def test_no_skip_summary_if_failure(self, testdir): testdir.makepyfile(""" import pytest @@ -451,7 +492,7 @@ class TestTerminalFunctional(object): finally: old.chdir() result.stdout.fnmatch_lines([ - "test_passes.py ..", + "test_passes.py ..*", "* 2 pass*", ]) assert result.ret == 0 @@ -462,13 +503,13 @@ class TestTerminalFunctional(object): pass """) result = testdir.runpytest() - verinfo = ".".join(map(str, py.std.sys.version_info[:3])) + verinfo = ".".join(map(str, sys.version_info[:3])) result.stdout.fnmatch_lines([ "*===== test session starts ====*", "platform %s -- Python %s*pytest-%s*py-%s*pluggy-%s" % ( - py.std.sys.platform, verinfo, + sys.platform, verinfo, pytest.__version__, py.__version__, pluggy.__version__), - "*test_header_trailer_info.py .", + "*test_header_trailer_info.py .*", "=* 1 passed*in *.[0-9][0-9] seconds *=", ]) if pytest.config.pluginmanager.list_plugin_distinfo(): @@ -485,7 +526,7 @@ class TestTerminalFunctional(object): """) result = testdir.runpytest(p1, '-l') result.stdout.fnmatch_lines([ - #"_ _ * Locals *", + # "_ _ * Locals *", "x* = 3", "y* = 'xxxxxx*" ]) @@ -541,6 +582,23 @@ class TestTerminalFunctional(object): assert "===" not in s assert "passed" not in s + def test_report_collectionfinish_hook(self, testdir): + testdir.makeconftest(""" + def pytest_report_collectionfinish(config, startdir, items): + return ['hello from hook: {0} items'.format(len(items))] + """) + testdir.makepyfile(""" + import pytest + @pytest.mark.parametrize('i', range(3)) + def test(i): + pass + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + "collected 3 items", + "hello from hook: 3 items", + ]) + def test_fail_extra_reporting(testdir): testdir.makepyfile("def test_this(): assert 0") @@ -552,11 +610,13 @@ def test_fail_extra_reporting(testdir): "FAIL*test_fail_extra_reporting*", ]) + def test_fail_reporting_on_pass(testdir): testdir.makepyfile("def test_this(): assert 1") result = testdir.runpytest('-rf') assert 'short test summary' not in result.stdout.str() + def test_pass_extra_reporting(testdir): testdir.makepyfile("def test_this(): assert 1") result = testdir.runpytest() @@ -567,11 +627,13 @@ def test_pass_extra_reporting(testdir): "PASS*test_pass_extra_reporting*", ]) + def test_pass_reporting_on_fail(testdir): testdir.makepyfile("def test_this(): assert 0") result = testdir.runpytest('-rp') assert 'short test summary' not in result.stdout.str() + def test_pass_output_reporting(testdir): testdir.makepyfile(""" def test_pass_output(): @@ -584,6 +646,7 @@ def test_pass_output_reporting(testdir): "Four score and seven years ago...", ]) + def test_color_yes(testdir): testdir.makepyfile("def test_this(): assert 1") result = testdir.runpytest('--color=yes') @@ -622,10 +685,12 @@ def test_color_yes_collection_on_non_atty(testdir, verbose): def test_getreportopt(): - class config(object): - class option(object): + class Config(object): + class Option(object): reportchars = "" disable_warnings = True + option = Option() + config = Config() config.option.reportchars = "sf" assert getreportopt(config) == "sf" @@ -660,6 +725,7 @@ def test_terminalreporter_reportopt_addopts(testdir): "*1 passed*" ]) + def test_tbstyle_short(testdir): p = testdir.makepyfile(""" import pytest @@ -685,6 +751,7 @@ def test_tbstyle_short(testdir): assert 'x = 0' in s assert 'assert x' in s + def test_traceconfig(testdir, monkeypatch): result = testdir.runpytest("--traceconfig") result.stdout.fnmatch_lines([ @@ -697,6 +764,7 @@ class TestGenericReporting(object): """ this test class can be subclassed with a different option provider to run e.g. distributed tests. """ + def test_collect_fail(self, testdir, option): testdir.makepyfile("import xyz\n") result = testdir.runpytest(*option.args) @@ -719,11 +787,9 @@ class TestGenericReporting(object): result.stdout.fnmatch_lines([ "*def test_1():*", "*def test_2():*", - "*!! Interrupted: stopping after 2 failures*!!*", "*2 failed*", ]) - def test_tb_option(self, testdir, option): testdir.makepyfile(""" import pytest @@ -787,6 +853,52 @@ def pytest_report_header(config, startdir): str(testdir.tmpdir), ]) + def test_show_capture(self, testdir): + testdir.makepyfile(""" + import sys + import logging + def test_one(): + sys.stdout.write('!This is stdout!') + sys.stderr.write('!This is stderr!') + logging.warning('!This is a warning log msg!') + assert False, 'Something failed' + """) + + result = testdir.runpytest("--tb=short") + result.stdout.fnmatch_lines(["!This is stdout!", + "!This is stderr!", + "*WARNING*!This is a warning log msg!"]) + + result = testdir.runpytest("--show-capture=all", "--tb=short") + result.stdout.fnmatch_lines(["!This is stdout!", + "!This is stderr!", + "*WARNING*!This is a warning log msg!"]) + + stdout = testdir.runpytest( + "--show-capture=stdout", "--tb=short").stdout.str() + assert "!This is stderr!" not in stdout + assert "!This is stdout!" in stdout + assert "!This is a warning log msg!" not in stdout + + stdout = testdir.runpytest( + "--show-capture=stderr", "--tb=short").stdout.str() + assert "!This is stdout!" not in stdout + assert "!This is stderr!" in stdout + assert "!This is a warning log msg!" not in stdout + + stdout = testdir.runpytest( + "--show-capture=log", "--tb=short").stdout.str() + assert "!This is stdout!" not in stdout + assert "!This is stderr!" not in stdout + assert "!This is a warning log msg!" in stdout + + stdout = testdir.runpytest( + "--show-capture=no", "--tb=short").stdout.str() + assert "!This is stdout!" not in stdout + assert "!This is stderr!" not in stdout + assert "!This is a warning log msg!" not in stdout + + @pytest.mark.xfail("not hasattr(os, 'dup')") def test_fdopen_kept_alive_issue124(testdir): testdir.makepyfile(""" @@ -805,6 +917,7 @@ def test_fdopen_kept_alive_issue124(testdir): "*2 passed*" ]) + def test_tbstyle_native_setup_error(testdir): testdir.makepyfile(""" import pytest @@ -817,8 +930,9 @@ def test_tbstyle_native_setup_error(testdir): """) result = testdir.runpytest("--tb=native") result.stdout.fnmatch_lines([ - '*File *test_tbstyle_native_setup_error.py", line *, in setup_error_fixture*' - ]) + '*File *test_tbstyle_native_setup_error.py", line *, in setup_error_fixture*' + ]) + def test_terminal_summary(testdir): testdir.makeconftest(""" @@ -872,7 +986,7 @@ def test_terminal_summary_warnings_are_displayed(testdir): ("yellow", "1 passed, 1 warnings", {"warnings": (1,), "passed": (1,)}), - ("green", "5 passed", {"passed": (1,2,3,4,5)}), + ("green", "5 passed", {"passed": (1, 2, 3, 4, 5)}), # "Boring" statuses. These have no effect on the color of the summary @@ -901,13 +1015,13 @@ def test_terminal_summary_warnings_are_displayed(testdir): # A couple more complex combinations ("red", "1 failed, 2 passed, 3 xfailed", - {"passed": (1,2), "failed": (1,), "xfailed": (1,2,3)}), + {"passed": (1, 2), "failed": (1,), "xfailed": (1, 2, 3)}), ("green", "1 passed, 2 skipped, 3 deselected, 2 xfailed", {"passed": (1,), - "skipped": (1,2), - "deselected": (1,2,3), - "xfailed": (1,2)}), + "skipped": (1, 2), + "deselected": (1, 2, 3), + "xfailed": (1, 2)}), ]) def test_summary_stats(exp_line, exp_color, stats_arg): print("Based on stats: %s" % stats_arg) @@ -925,3 +1039,162 @@ def test_no_trailing_whitespace_after_inifile_word(testdir): testdir.makeini('[pytest]') result = testdir.runpytest('') assert 'inifile: tox.ini\n' in result.stdout.str() + + +class TestProgress(object): + + @pytest.fixture + def many_tests_files(self, testdir): + testdir.makepyfile( + test_bar=""" + import pytest + @pytest.mark.parametrize('i', range(10)) + def test_bar(i): pass + """, + test_foo=""" + import pytest + @pytest.mark.parametrize('i', range(5)) + def test_foo(i): pass + """, + test_foobar=""" + import pytest + @pytest.mark.parametrize('i', range(5)) + def test_foobar(i): pass + """, + ) + + def test_zero_tests_collected(self, testdir): + """Some plugins (testmon for example) might issue pytest_runtest_logreport without any tests being + actually collected (#2971).""" + testdir.makeconftest(""" + def pytest_collection_modifyitems(items, config): + from _pytest.runner import CollectReport + for node_id in ('nodeid1', 'nodeid2'): + rep = CollectReport(node_id, 'passed', None, None) + rep.when = 'passed' + rep.duration = 0.1 + config.hook.pytest_runtest_logreport(report=rep) + """) + output = testdir.runpytest() + assert 'ZeroDivisionError' not in output.stdout.str() + output.stdout.fnmatch_lines([ + '=* 2 passed in *=', + ]) + + def test_normal(self, many_tests_files, testdir): + output = testdir.runpytest() + output.stdout.re_match_lines([ + r'test_bar.py \.{10} \s+ \[ 50%\]', + r'test_foo.py \.{5} \s+ \[ 75%\]', + r'test_foobar.py \.{5} \s+ \[100%\]', + ]) + + def test_verbose(self, many_tests_files, testdir): + output = testdir.runpytest('-v') + output.stdout.re_match_lines([ + r'test_bar.py::test_bar\[0\] PASSED \s+ \[ 5%\]', + r'test_foo.py::test_foo\[4\] PASSED \s+ \[ 75%\]', + r'test_foobar.py::test_foobar\[4\] PASSED \s+ \[100%\]', + ]) + + def test_xdist_normal(self, many_tests_files, testdir): + pytest.importorskip('xdist') + output = testdir.runpytest('-n2') + output.stdout.re_match_lines([ + r'\.{20} \s+ \[100%\]', + ]) + + def test_xdist_verbose(self, many_tests_files, testdir): + pytest.importorskip('xdist') + output = testdir.runpytest('-n2', '-v') + output.stdout.re_match_lines_random([ + r'\[gw\d\] \[\s*\d+%\] PASSED test_bar.py::test_bar\[1\]', + r'\[gw\d\] \[\s*\d+%\] PASSED test_foo.py::test_foo\[1\]', + r'\[gw\d\] \[\s*\d+%\] PASSED test_foobar.py::test_foobar\[1\]', + ]) + + def test_capture_no(self, many_tests_files, testdir): + output = testdir.runpytest('-s') + output.stdout.re_match_lines([ + r'test_bar.py \.{10}', + r'test_foo.py \.{5}', + r'test_foobar.py \.{5}', + ]) + + output = testdir.runpytest('--capture=no') + assert "%]" not in output.stdout.str() + + +class TestProgressWithTeardown(object): + """Ensure we show the correct percentages for tests that fail during teardown (#3088)""" + + @pytest.fixture + def contest_with_teardown_fixture(self, testdir): + testdir.makeconftest(''' + import pytest + + @pytest.fixture + def fail_teardown(): + yield + assert False + ''') + + @pytest.fixture + def many_files(self, testdir, contest_with_teardown_fixture): + testdir.makepyfile( + test_bar=''' + import pytest + @pytest.mark.parametrize('i', range(5)) + def test_bar(fail_teardown, i): + pass + ''', + test_foo=''' + import pytest + @pytest.mark.parametrize('i', range(15)) + def test_foo(fail_teardown, i): + pass + ''', + ) + + def test_teardown_simple(self, testdir, contest_with_teardown_fixture): + testdir.makepyfile(''' + def test_foo(fail_teardown): + pass + ''') + output = testdir.runpytest() + output.stdout.re_match_lines([ + r'test_teardown_simple.py \.E\s+\[100%\]', + ]) + + def test_teardown_with_test_also_failing(self, testdir, contest_with_teardown_fixture): + testdir.makepyfile(''' + def test_foo(fail_teardown): + assert False + ''') + output = testdir.runpytest() + output.stdout.re_match_lines([ + r'test_teardown_with_test_also_failing.py FE\s+\[100%\]', + ]) + + def test_teardown_many(self, testdir, many_files): + output = testdir.runpytest() + output.stdout.re_match_lines([ + r'test_bar.py (\.E){5}\s+\[ 25%\]', + r'test_foo.py (\.E){15}\s+\[100%\]', + ]) + + def test_teardown_many_verbose(self, testdir, many_files): + output = testdir.runpytest('-v') + output.stdout.re_match_lines([ + r'test_bar.py::test_bar\[0\] PASSED\s+\[ 5%\]', + r'test_bar.py::test_bar\[0\] ERROR\s+\[ 5%\]', + r'test_bar.py::test_bar\[4\] PASSED\s+\[ 25%\]', + r'test_bar.py::test_bar\[4\] ERROR\s+\[ 25%\]', + ]) + + def test_xdist_normal(self, many_files, testdir): + pytest.importorskip('xdist') + output = testdir.runpytest('-n2') + output.stdout.re_match_lines([ + r'[\.E]{40} \s+ \[100%\]', + ]) diff --git a/testing/test_tmpdir.py b/testing/test_tmpdir.py index ccd70ed8b..467e77252 100644 --- a/testing/test_tmpdir.py +++ b/testing/test_tmpdir.py @@ -5,6 +5,7 @@ import pytest from _pytest.tmpdir import tmpdir + def test_funcarg(testdir): testdir.makepyfile(""" def pytest_generate_tests(metafunc): @@ -29,12 +30,14 @@ def test_funcarg(testdir): bn = p.basename.strip("0123456789") assert bn == "qwe__abc" + def test_ensuretemp(recwarn): d1 = pytest.ensuretemp('hello') d2 = pytest.ensuretemp('hello') assert d1 == d2 assert d1.check(dir=1) + class TestTempdirHandler(object): def test_mktemp(self, testdir): from _pytest.tmpdir import TempdirFactory @@ -49,6 +52,7 @@ class TestTempdirHandler(object): assert tmp2.relto(t.getbasetemp()).startswith("this") assert tmp2 != tmp + class TestConfigTmpdir(object): def test_getbasetemp_custom_removes_old(self, testdir): mytemp = testdir.tmpdir.join("xyz") @@ -76,6 +80,7 @@ def test_basetemp(testdir): assert result.ret == 0 assert mytemp.join('hello').check() + @pytest.mark.skipif(not hasattr(py.path.local, 'mksymlinkto'), reason="symlink not available on this platform") def test_tmpdir_always_is_realpath(testdir): diff --git a/testing/test_unittest.py b/testing/test_unittest.py index af9851997..e19773587 100644 --- a/testing/test_unittest.py +++ b/testing/test_unittest.py @@ -3,28 +3,30 @@ from _pytest.main import EXIT_NOTESTSCOLLECTED import pytest import gc + def test_simple_unittest(testdir): testpath = testdir.makepyfile(""" import unittest class MyTestCase(unittest.TestCase): def testpassing(self): - self.assertEquals('foo', 'foo') + self.assertEqual('foo', 'foo') def test_failing(self): - self.assertEquals('foo', 'bar') + self.assertEqual('foo', 'bar') """) reprec = testdir.inline_run(testpath) assert reprec.matchreport("testpassing").passed assert reprec.matchreport("test_failing").failed + def test_runTest_method(testdir): testdir.makepyfile(""" import unittest class MyTestCaseWithRunTest(unittest.TestCase): def runTest(self): - self.assertEquals('foo', 'foo') + self.assertEqual('foo', 'foo') class MyTestCaseWithoutRunTest(unittest.TestCase): def runTest(self): - self.assertEquals('foo', 'foo') + self.assertEqual('foo', 'foo') def test_something(self): pass """) @@ -35,6 +37,7 @@ def test_runTest_method(testdir): *2 passed* """) + def test_isclasscheck_issue53(testdir): testpath = testdir.makepyfile(""" import unittest @@ -46,6 +49,7 @@ def test_isclasscheck_issue53(testdir): result = testdir.runpytest(testpath) assert result.ret == EXIT_NOTESTSCOLLECTED + def test_setup(testdir): testpath = testdir.makepyfile(""" import unittest @@ -55,7 +59,7 @@ def test_setup(testdir): def setup_method(self, method): self.foo2 = 1 def test_both(self): - self.assertEquals(1, self.foo) + self.assertEqual(1, self.foo) assert self.foo2 == 1 def teardown_method(self, method): assert 0, "42" @@ -66,36 +70,38 @@ def test_setup(testdir): rep = reprec.matchreport("test_both", when="teardown") assert rep.failed and '42' in str(rep.longrepr) + def test_setUpModule(testdir): testpath = testdir.makepyfile(""" - l = [] + values = [] def setUpModule(): - l.append(1) + values.append(1) def tearDownModule(): - del l[0] + del values[0] def test_hello(): - assert l == [1] + assert values == [1] def test_world(): - assert l == [1] + assert values == [1] """) result = testdir.runpytest(testpath) result.stdout.fnmatch_lines([ "*2 passed*", ]) + def test_setUpModule_failing_no_teardown(testdir): testpath = testdir.makepyfile(""" - l = [] + values = [] def setUpModule(): 0/0 def tearDownModule(): - l.append(1) + values.append(1) def test_hello(): pass @@ -103,7 +109,8 @@ def test_setUpModule_failing_no_teardown(testdir): reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=0, failed=1) call = reprec.getcalls("pytest_runtest_setup")[0] - assert not call.item.module.l + assert not call.item.module.values + def test_new_instances(testdir): testpath = testdir.makepyfile(""" @@ -117,18 +124,19 @@ def test_new_instances(testdir): reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=2) + def test_teardown(testdir): testpath = testdir.makepyfile(""" import unittest class MyTestCase(unittest.TestCase): - l = [] + values = [] def test_one(self): pass def tearDown(self): - self.l.append(None) + self.values.append(None) class Second(unittest.TestCase): def test_check(self): - self.assertEquals(MyTestCase.l, [None]) + self.assertEqual(MyTestCase.values, [None]) """) reprec = testdir.inline_run(testpath) passed, skipped, failed = reprec.countoutcomes() @@ -136,6 +144,7 @@ def test_teardown(testdir): assert passed == 2 assert passed + skipped + failed == 2 + def test_teardown_issue1649(testdir): """ Are TestCase objects cleaned up? Often unittest TestCase objects set @@ -158,7 +167,7 @@ def test_teardown_issue1649(testdir): for obj in gc.get_objects(): assert type(obj).__name__ != 'TestCaseObjectsShouldBeCleanedUp' -@pytest.mark.skipif("sys.version_info < (2,7)") + def test_unittest_skip_issue148(testdir): testpath = testdir.makepyfile(""" import unittest @@ -177,6 +186,7 @@ def test_unittest_skip_issue148(testdir): reprec = testdir.inline_run(testpath) reprec.assertoutcome(skipped=1) + def test_method_and_teardown_failing_reporting(testdir): testdir.makepyfile(""" import unittest, pytest @@ -196,6 +206,7 @@ def test_method_and_teardown_failing_reporting(testdir): "*1 failed*1 error*", ]) + def test_setup_failure_is_shown(testdir): testdir.makepyfile(""" import unittest @@ -216,6 +227,7 @@ def test_setup_failure_is_shown(testdir): ]) assert 'never42' not in result.stdout.str() + def test_setup_setUpClass(testdir): testpath = testdir.makepyfile(""" import unittest @@ -238,6 +250,7 @@ def test_setup_setUpClass(testdir): reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=3) + def test_setup_class(testdir): testpath = testdir.makepyfile(""" import unittest @@ -279,6 +292,7 @@ def test_testcase_adderrorandfailure_defers(testdir, type): result = testdir.runpytest() assert 'should not raise' not in result.stdout.str() + @pytest.mark.parametrize("type", ['Error', 'Failure']) def test_testcase_custom_exception_info(testdir, type): testdir.makepyfile(""" @@ -310,6 +324,7 @@ def test_testcase_custom_exception_info(testdir, type): "*1 failed*", ]) + def test_testcase_totally_incompatible_exception_info(testdir): item, = testdir.getitems(""" from unittest import TestCase @@ -321,6 +336,7 @@ def test_testcase_totally_incompatible_exception_info(testdir): excinfo = item._excinfo.pop(0) assert 'ERROR: Unknown Incompatible' in str(excinfo.getrepr()) + def test_module_level_pytestmark(testdir): testpath = testdir.makepyfile(""" import unittest @@ -334,61 +350,12 @@ def test_module_level_pytestmark(testdir): reprec.assertoutcome(skipped=1) -def test_trial_testcase_skip_property(testdir): - pytest.importorskip('twisted.trial.unittest') - testpath = testdir.makepyfile(""" - from twisted.trial import unittest - class MyTestCase(unittest.TestCase): - skip = 'dont run' - def test_func(self): - pass - """) - reprec = testdir.inline_run(testpath, "-s") - reprec.assertoutcome(skipped=1) - - -def test_trial_testfunction_skip_property(testdir): - pytest.importorskip('twisted.trial.unittest') - testpath = testdir.makepyfile(""" - from twisted.trial import unittest - class MyTestCase(unittest.TestCase): - def test_func(self): - pass - test_func.skip = 'dont run' - """) - reprec = testdir.inline_run(testpath, "-s") - reprec.assertoutcome(skipped=1) - - -def test_trial_testcase_todo_property(testdir): - pytest.importorskip('twisted.trial.unittest') - testpath = testdir.makepyfile(""" - from twisted.trial import unittest - class MyTestCase(unittest.TestCase): - todo = 'dont run' - def test_func(self): - assert 0 - """) - reprec = testdir.inline_run(testpath, "-s") - reprec.assertoutcome(skipped=1) - - -def test_trial_testfunction_todo_property(testdir): - pytest.importorskip('twisted.trial.unittest') - testpath = testdir.makepyfile(""" - from twisted.trial import unittest - class MyTestCase(unittest.TestCase): - def test_func(self): - assert 0 - test_func.todo = 'dont run' - """) - reprec = testdir.inline_run(testpath, "-s") - reprec.assertoutcome(skipped=1) - - class TestTrialUnittest(object): def setup_class(cls): cls.ut = pytest.importorskip("twisted.trial.unittest") + # on windows trial uses a socket for a reactor and apparently doesn't close it properly + # https://twistedmatrix.com/trac/ticket/9227 + cls.ignore_unclosed_socket_warning = ('-W', 'always') def test_trial_testcase_runtest_not_collected(self, testdir): testdir.makepyfile(""" @@ -398,7 +365,7 @@ class TestTrialUnittest(object): def test_hello(self): pass """) - reprec = testdir.inline_run() + reprec = testdir.inline_run(*self.ignore_unclosed_socket_warning) reprec.assertoutcome(passed=1) testdir.makepyfile(""" from twisted.trial.unittest import TestCase @@ -407,7 +374,7 @@ class TestTrialUnittest(object): def runTest(self): pass """) - reprec = testdir.inline_run() + reprec = testdir.inline_run(*self.ignore_unclosed_socket_warning) reprec.assertoutcome(passed=1) def test_trial_exceptions_with_skips(self, testdir): @@ -445,7 +412,7 @@ class TestTrialUnittest(object): """) from _pytest.compat import _is_unittest_unexpected_success_a_failure should_fail = _is_unittest_unexpected_success_a_failure() - result = testdir.runpytest("-rxs") + result = testdir.runpytest("-rxs", *self.ignore_unclosed_socket_warning) result.stdout.fnmatch_lines_random([ "*XFAIL*test_trial_todo*", "*trialselfskip*", @@ -520,6 +487,51 @@ class TestTrialUnittest(object): child.expect("hellopdb") child.sendeof() + def test_trial_testcase_skip_property(self, testdir): + testpath = testdir.makepyfile(""" + from twisted.trial import unittest + class MyTestCase(unittest.TestCase): + skip = 'dont run' + def test_func(self): + pass + """) + reprec = testdir.inline_run(testpath, "-s") + reprec.assertoutcome(skipped=1) + + def test_trial_testfunction_skip_property(self, testdir): + testpath = testdir.makepyfile(""" + from twisted.trial import unittest + class MyTestCase(unittest.TestCase): + def test_func(self): + pass + test_func.skip = 'dont run' + """) + reprec = testdir.inline_run(testpath, "-s") + reprec.assertoutcome(skipped=1) + + def test_trial_testcase_todo_property(self, testdir): + testpath = testdir.makepyfile(""" + from twisted.trial import unittest + class MyTestCase(unittest.TestCase): + todo = 'dont run' + def test_func(self): + assert 0 + """) + reprec = testdir.inline_run(testpath, "-s") + reprec.assertoutcome(skipped=1) + + def test_trial_testfunction_todo_property(self, testdir): + testpath = testdir.makepyfile(""" + from twisted.trial import unittest + class MyTestCase(unittest.TestCase): + def test_func(self): + assert 0 + test_func.todo = 'dont run' + """) + reprec = testdir.inline_run(testpath, "-s", *self.ignore_unclosed_socket_warning) + reprec.assertoutcome(skipped=1) + + def test_djangolike_testcase(testdir): # contributed from Morten Breekevold testdir.makepyfile(""" @@ -580,11 +592,12 @@ def test_unittest_not_shown_in_traceback(testdir): class t(unittest.TestCase): def test_hello(self): x = 3 - self.assertEquals(x, 4) + self.assertEqual(x, 4) """) res = testdir.runpytest() assert "failUnlessEqual" not in res.stdout.str() + def test_unorderable_types(testdir): testdir.makepyfile(""" import unittest @@ -602,6 +615,7 @@ def test_unorderable_types(testdir): assert "TypeError" not in result.stdout.str() assert result.ret == EXIT_NOTESTSCOLLECTED + def test_unittest_typerror_traceback(testdir): testdir.makepyfile(""" import unittest @@ -614,7 +628,6 @@ def test_unittest_typerror_traceback(testdir): assert result.ret == 1 -@pytest.mark.skipif("sys.version_info < (2,7)") @pytest.mark.parametrize('runner', ['pytest', 'unittest']) def test_unittest_expected_failure_for_failing_test_is_xfail(testdir, runner): script = testdir.makepyfile(""" @@ -641,7 +654,6 @@ def test_unittest_expected_failure_for_failing_test_is_xfail(testdir, runner): assert result.ret == 0 -@pytest.mark.skipif("sys.version_info < (2,7)") @pytest.mark.parametrize('runner', ['pytest', 'unittest']) def test_unittest_expected_failure_for_passing_test_is_fail(testdir, runner): script = testdir.makepyfile(""" @@ -755,8 +767,10 @@ def test_no_teardown_if_setupclass_failed(testdir): def test_issue333_result_clearing(testdir): testdir.makeconftest(""" - def pytest_runtest_call(__multicall__, item): - __multicall__.execute() + import pytest + @pytest.hookimpl(hookwrapper=True) + def pytest_runtest_call(item): + yield assert 0 """) testdir.makepyfile(""" @@ -769,7 +783,7 @@ def test_issue333_result_clearing(testdir): reprec = testdir.inline_run() reprec.assertoutcome(failed=1) -@pytest.mark.skipif("sys.version_info < (2,7)") + def test_unittest_raise_skip_issue748(testdir): testdir.makepyfile(test_foo=""" import unittest @@ -784,11 +798,11 @@ def test_unittest_raise_skip_issue748(testdir): *1 skipped* """) -@pytest.mark.skipif("sys.version_info < (2,7)") + def test_unittest_skip_issue1169(testdir): testdir.makepyfile(test_foo=""" import unittest - + class MyTestCase(unittest.TestCase): @unittest.skip("skipping due to reasons") def test_skip(self): @@ -800,6 +814,7 @@ def test_unittest_skip_issue1169(testdir): *1 skipped* """) + def test_class_method_containing_test_issue1558(testdir): testdir.makepyfile(test_foo=""" import unittest diff --git a/testing/test_warnings.py b/testing/test_warnings.py index 69bda1172..02400bd1d 100644 --- a/testing/test_warnings.py +++ b/testing/test_warnings.py @@ -8,6 +8,7 @@ import pytest WARNINGS_SUMMARY_HEADER = 'warnings summary' + @pytest.fixture def pyfile_with_warnings(testdir, request): """ @@ -32,6 +33,7 @@ def pyfile_with_warnings(testdir, request): }) +@pytest.mark.filterwarnings('always') def test_normal_flow(testdir, pyfile_with_warnings): """ Check that the warnings section is displayed, containing test node ids followed by @@ -53,6 +55,7 @@ def test_normal_flow(testdir, pyfile_with_warnings): assert result.stdout.str().count('test_normal_flow.py::test_func') == 1 +@pytest.mark.filterwarnings('always') def test_setup_teardown_warnings(testdir, pyfile_with_warnings): testdir.makepyfile(''' import warnings @@ -112,9 +115,9 @@ def test_ignore(testdir, pyfile_with_warnings, method): assert WARNINGS_SUMMARY_HEADER not in result.stdout.str() - @pytest.mark.skipif(sys.version_info < (3, 0), reason='warnings message is unicode is ok in python3') +@pytest.mark.filterwarnings('always') def test_unicode(testdir, pyfile_with_warnings): testdir.makepyfile(''' # -*- coding: utf8 -*- @@ -141,6 +144,8 @@ def test_unicode(testdir, pyfile_with_warnings): @pytest.mark.skipif(sys.version_info >= (3, 0), reason='warnings message is broken as it is not str instance') def test_py2_unicode(testdir, pyfile_with_warnings): + if getattr(sys, "pypy_version_info", ())[:2] == (5, 9) and sys.platform.startswith('win'): + pytest.xfail("fails with unicode error on PyPy2 5.9 and Windows (#2905)") testdir.makepyfile(''' # -*- coding: utf8 -*- import warnings @@ -152,6 +157,7 @@ def test_py2_unicode(testdir, pyfile_with_warnings): warnings.warn(u"测试") yield + @pytest.mark.filterwarnings('always') def test_func(fix): pass ''') @@ -159,13 +165,33 @@ def test_py2_unicode(testdir, pyfile_with_warnings): result.stdout.fnmatch_lines([ '*== %s ==*' % WARNINGS_SUMMARY_HEADER, - '*test_py2_unicode.py:8: UserWarning: \u6d4b\u8bd5', + '*test_py2_unicode.py:8: UserWarning: \\u6d4b\\u8bd5', '*warnings.warn(u"\u6d4b\u8bd5")', '*warnings.py:*: UnicodeWarning: Warning is using unicode non*', '* 1 passed, 2 warnings*', ]) +def test_py2_unicode_ascii(testdir): + """Ensure that our warning about 'unicode warnings containing non-ascii messages' + does not trigger with ascii-convertible messages""" + testdir.makeini('[pytest]') + testdir.makepyfile(''' + import pytest + import warnings + + @pytest.mark.filterwarnings('always') + def test_func(): + warnings.warn(u"hello") + ''') + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + '*== %s ==*' % WARNINGS_SUMMARY_HEADER, + '*warnings.warn(u"hello")', + '* 1 passed, 1 warnings in*' + ]) + + def test_works_with_filterwarnings(testdir): """Ensure our warnings capture does not mess with pre-installed filters (#2430).""" testdir.makepyfile(''' @@ -173,9 +199,9 @@ def test_works_with_filterwarnings(testdir): class MyWarning(Warning): pass - + warnings.filterwarnings("error", category=MyWarning) - + class TestWarnings(object): def test_my_warning(self): try: @@ -188,3 +214,45 @@ def test_works_with_filterwarnings(testdir): result.stdout.fnmatch_lines([ '*== 1 passed in *', ]) + + +@pytest.mark.parametrize('default_config', ['ini', 'cmdline']) +def test_filterwarnings_mark(testdir, default_config): + """ + Test ``filterwarnings`` mark works and takes precedence over command line and ini options. + """ + if default_config == 'ini': + testdir.makeini(""" + [pytest] + filterwarnings = always + """) + testdir.makepyfile(""" + import warnings + import pytest + + @pytest.mark.filterwarnings('ignore::RuntimeWarning') + def test_ignore_runtime_warning(): + warnings.warn(RuntimeWarning()) + + @pytest.mark.filterwarnings('error') + def test_warning_error(): + warnings.warn(RuntimeWarning()) + + def test_show_warning(): + warnings.warn(RuntimeWarning()) + """) + result = testdir.runpytest('-W always' if default_config == 'cmdline' else '') + result.stdout.fnmatch_lines(['*= 1 failed, 2 passed, 1 warnings in *']) + + +def test_non_string_warning_argument(testdir): + """Non-str argument passed to warning breaks pytest (#2956)""" + testdir.makepyfile(""" + import warnings + import pytest + + def test(): + warnings.warn(UserWarning(1, u'foo')) + """) + result = testdir.runpytest('-W', 'always') + result.stdout.fnmatch_lines(['*= 1 passed, 1 warnings in *']) diff --git a/tox.ini b/tox.ini index d8b27300f..75f28ca92 100644 --- a/tox.ini +++ b/tox.ini @@ -1,197 +1,225 @@ [tox] -minversion=2.0 -distshare={homedir}/.tox/distshare -# make sure to update environment list on appveyor.yml -envlist= - linting - py26 - py27 - py33 - py34 - py35 - py36 - py37 - pypy - {py27,py35}-{pexpect,xdist,trial} - py27-nobyte - doctesting - freeze - docs +minversion = 2.0 +distshare = {homedir}/.tox/distshare +# make sure to update environment list in travis.yml and appveyor.yml +envlist = + linting + py27 + py34 + py35 + py36 + py37 + pypy + {py27,py36}-{pexpect,xdist,trial,numpy,pluggymaster} + py27-nobyte + doctesting + py35-freeze + docs [testenv] -commands= pytest --lsof -rfsxX {posargs:testing} +commands = pytest --lsof -ra {posargs:testing} passenv = USER USERNAME -deps= +deps = hypothesis>=3.5.2 nose mock requests -[testenv:py26] -commands= pytest --lsof -rfsxX {posargs:testing} -# pinning mock to last supported version for python 2.6 -deps= - hypothesis<3.0 - nose - mock<1.1 - [testenv:py27-subprocess] -changedir=. -basepython=python2.7 -deps=pytest-xdist>=1.13 +changedir = . +deps = + pytest-xdist>=1.13 mock nose -commands= - pytest -n3 -rfsxX --runpytest=subprocess {posargs:testing} +commands = + pytest -n3 -ra --runpytest=subprocess {posargs:testing} [testenv:linting] -skipsdist=True -usedevelop=True +skipsdist = True +usedevelop = True basepython = python2.7 -# needed to keep check-manifest working -setenv = - SETUPTOOLS_SCM_PRETEND_VERSION=2.0.1 deps = flake8 # pygments required by rst-lint - pygments + pygments restructuredtext_lint commands = - flake8 pytest.py _pytest testing + flake8 pytest.py _pytest testing setup.py pytest.py {envpython} scripts/check-rst.py [testenv:py27-xdist] -deps=pytest-xdist>=1.13 +deps = + pytest-xdist>=1.13 mock nose hypothesis>=3.5.2 -commands= - pytest -n1 -rfsxX {posargs:testing} +changedir=testing +commands = + pytest -n1 -ra {posargs:.} -[testenv:py35-xdist] -deps={[testenv:py27-xdist]deps} -commands= - pytest -n3 -rfsxX {posargs:testing} +[testenv:py36-xdist] +deps = {[testenv:py27-xdist]deps} +commands = + pytest -n3 -ra {posargs:testing} [testenv:py27-pexpect] -changedir=testing -platform=linux|darwin -deps=pexpect -commands= - pytest -rfsxX test_pdb.py test_terminal.py test_unittest.py +changedir = testing +platform = linux|darwin +deps = pexpect +commands = + pytest -ra test_pdb.py test_terminal.py test_unittest.py -[testenv:py35-pexpect] -changedir=testing -platform=linux|darwin -deps={[testenv:py27-pexpect]deps} -commands= - pytest -rfsxX test_pdb.py test_terminal.py test_unittest.py +[testenv:py36-pexpect] +changedir = testing +platform = linux|darwin +deps = {[testenv:py27-pexpect]deps} +commands = + pytest -ra test_pdb.py test_terminal.py test_unittest.py [testenv:py27-nobyte] -deps= +deps = pytest-xdist>=1.13 hypothesis>=3.5.2 -distribute=true -setenv= +distribute = true +changedir=testing +setenv = PYTHONDONTWRITEBYTECODE=1 -commands= - pytest -n3 -rfsxX {posargs:testing} +commands = + pytest -n3 -ra {posargs:.} [testenv:py27-trial] -deps=twisted -commands= - pytest -ra {posargs:testing/test_unittest.py} +deps = twisted +commands = + pytest -ra {posargs:testing/test_unittest.py} -[testenv:py35-trial] -deps={[testenv:py27-trial]deps} +[testenv:py36-trial] +deps = {[testenv:py27-trial]deps} +commands = + pytest -ra {posargs:testing/test_unittest.py} + +[testenv:py27-numpy] +deps=numpy commands= - pytest -ra {posargs:testing/test_unittest.py} + pytest -ra {posargs:testing/python/approx.py} + +[testenv:py36-numpy] +deps=numpy +commands= + pytest -ra {posargs:testing/python/approx.py} + +[testenv:py27-pluggymaster] +setenv= + _PYTEST_SETUP_SKIP_PLUGGY_DEP=1 +deps = + {[testenv]deps} + git+https://github.com/pytest-dev/pluggy.git@master + +[testenv:py35-pluggymaster] +setenv= + _PYTEST_SETUP_SKIP_PLUGGY_DEP=1 +deps = + {[testenv:py27-pluggymaster]deps} + git+https://github.com/pytest-dev/pluggy.git@master [testenv:docs] -skipsdist=True -usedevelop=True -basepython=python -changedir=doc/en -deps= - sphinx +skipsdist = True +usedevelop = True +changedir = doc/en +deps = + attrs + more-itertools PyYAML + sphinx + sphinxcontrib-trio -commands= +commands = sphinx-build -W -b html . _build [testenv:doctesting] basepython = python -usedevelop=True -skipsdist=True -# ensure the given pyargs cant mean anytrhing else -changedir=doc/ -deps= +usedevelop = True +skipsdist = True +# ensure the given pyargs can't mean anything else +changedir = doc/ +deps = PyYAML -commands= - pytest -rfsxX en +commands = + pytest -ra en pytest --doctest-modules --pyargs _pytest [testenv:regen] -changedir=doc/en -skipsdist=True +changedir = doc/en +skipsdist = True basepython = python3.5 -deps=sphinx - PyYAML - regendoc>=0.6.1 -whitelist_externals= +deps = + sphinx + PyYAML + regendoc>=0.6.1 +whitelist_externals = rm make -commands= +commands = rm -rf /tmp/doc-exec* make regen -[testenv:jython] -changedir=testing -commands= - {envpython} {envbindir}/py.test-jython -rfsxX {posargs} +[testenv:fix-lint] +skipsdist = True +usedevelop = True +deps = + autopep8 +commands = + autopep8 --in-place -r --max-line-length=120 --exclude=test_source_multiline_block.py _pytest testing setup.py pytest.py -[testenv:freeze] -changedir=testing/freeze -deps=pyinstaller -commands= +[testenv:jython] +changedir = testing +commands = + {envpython} {envbindir}/py.test-jython -ra {posargs} + +[testenv:py35-freeze] +changedir = testing/freeze +deps = pyinstaller +commands = {envpython} create_executable.py {envpython} tox_run.py [testenv:coveralls] passenv = TRAVIS TRAVIS_JOB_ID TRAVIS_BRANCH COVERALLS_REPO_TOKEN -usedevelop=True -basepython=python3.5 -changedir=. +usedevelop = True +changedir = . deps = {[testenv]deps} coveralls -commands= +commands = coverage run --source=_pytest -m pytest testing coverage report -m coveralls [pytest] -minversion=2.0 -plugins=pytester +minversion = 2.0 +plugins = pytester #--pyargs --doctest-modules --ignore=.tox -addopts= -rxsX -p pytester --ignore=testing/cx_freeze -rsyncdirs=tox.ini pytest.py _pytest testing -python_files=test_*.py *_test.py testing/*/*.py -python_classes=Test Acceptance -python_functions=test +addopts = -ra -p pytester --ignore=testing/cx_freeze +rsyncdirs = tox.ini pytest.py _pytest testing +python_files = test_*.py *_test.py testing/*/*.py +python_classes = Test Acceptance +python_functions = test norecursedirs = .tox ja .hg cx_freeze_source -filterwarnings= - # produced by path.local - ignore:bad escape.*:DeprecationWarning:re - # produced by path.readlines - ignore:.*U.*mode is deprecated:DeprecationWarning - # produced by pytest-xdist - ignore:.*type argument to addoption.*:DeprecationWarning - # produced by python >=3.5 on execnet (pytest-xdist) - ignore:.*inspect.getargspec.*deprecated, use inspect.signature.*:DeprecationWarning +xfail_strict=true +filterwarnings = + error + # produced by path.local + ignore:bad escape.*:DeprecationWarning:re + # produced by path.readlines + ignore:.*U.*mode is deprecated:DeprecationWarning + # produced by pytest-xdist + ignore:.*type argument to addoption.*:DeprecationWarning + # produced by python >=3.5 on execnet (pytest-xdist) + ignore:.*inspect.getargspec.*deprecated, use inspect.signature.*:DeprecationWarning + # ignore warning about package resolution using __spec__ or __package__ + # should be a temporary solution, see #3061 for discussion + ignore:.*can't resolve package from __spec__ or __package__.*:ImportWarning [flake8] -ignore =E401,E225,E261,E128,E124,E301,E302,E121,E303,W391,E501,E231,E126,E701,E265,E241,E251,E226,E101,W191,E131,E203,E122,E123,E271,E712,E222,E127,E125,E221,W292,E111,E113,E293,E262,W293,E129,E702,E201,E272,E202,E704,E731,E402 -exclude = _pytest/vendored_packages/pluggy.py +max-line-length = 120