diff --git a/.gitignore b/.gitignore index 0e42b11ff..3b7ec9fac 100644 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,9 @@ include/ *~ .hypothesis/ +# autogenerated +_pytest/_version.py +# setuptools .eggs/ doc/*/_build diff --git a/AUTHORS b/AUTHORS index f8214a91e..90e639234 100644 --- a/AUTHORS +++ b/AUTHORS @@ -14,6 +14,7 @@ Andrzej Ostrowski Andy Freeland Anthon van der Neut Antony Lee +Anthony Sottile Armin Rigo Aron Curzon Aviv Palivoda @@ -47,6 +48,7 @@ David Vierra Denis Kirisov Diego Russo Dmitry Dygalo +Dmitry Pribysh Duncan Betts Edison Gustavo Muenz Edoardo Batini @@ -78,6 +80,7 @@ Javier Romero Jeff Widman John Towler Jon Sonesen +Jonas Obrist Jordan Guymon Joshua Bronson Jurko Gospodnetić @@ -93,6 +96,8 @@ Lukas Bednar Luke Murphy Maciek Fijalkowski Maho +Mandeep Bhutani +Manuel Krebber Marc Schlaich Marcin Bachry Mark Abramowitz @@ -102,6 +107,7 @@ Martin K. Scherer Martin Prusse Mathieu Clabaut Matt Bachmann +Matt Duck Matt Williams Matthias Hafner mbyt @@ -109,6 +115,7 @@ Michael Aquilina Michael Birtwell Michael Droettboom Michael Seifert +Michal Wajszczuk Mike Lundy Ned Batchelder Neven Mundar @@ -127,6 +134,7 @@ Ralf Schmitt Ran Benita Raphael Pierzina Raquel Alegre +Ravi Chandra Roberto Polli Romain Dorgueil Roman Bolshakov @@ -150,6 +158,7 @@ Trevor Bekolay Tyler Goodlet Vasily Kuznetsov Victor Uriarte +Vlad Dragos Vidar T. Fauske Vitaly Lashmanov Wouter van Ackooy diff --git a/CHANGELOG.rst b/CHANGELOG.rst index fb3f7d2cf..25853e136 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,101 @@ -3.0.8 (unreleased) +3.1.0 (2017-05-20) ================== + +New Features +------------ + +* The ``pytest-warnings`` plugin has been integrated into the core, so now ``pytest`` automatically + captures and displays warnings at the end of the test session. + Thanks `@nicoddemus`_ for the PR. + +* Added ``junit_suite_name`` ini option to specify root `` name for JUnit XML reports (`#533`_). + +* Added an ini option ``doctest_encoding`` to specify which encoding to use for doctest files. + Thanks `@wheerd`_ for the PR (`#2101`_). + +* ``pytest.warns`` now checks for subclass relationship rather than + class equality. Thanks `@lesteve`_ for the PR (`#2166`_) + +* ``pytest.raises`` now asserts that the error message matches a text or regex + with the ``match`` keyword argument. Thanks `@Kriechi`_ for the PR. + +* ``pytest.param`` can be used to declare test parameter sets with marks and test ids. + Thanks `@RonnyPfannschmidt`_ for the PR. + + +Changes +------- + +* remove all internal uses of pytest_namespace hooks, + this is to prepare the removal of preloadconfig in pytest 4.0 + Thanks to `@RonnyPfannschmidt`_ for the PR. + +* pytest now warns when a callable ids raises in a parametrized test. Thanks `@fogo`_ for the PR. + +* It is now possible to skip test classes from being collected by setting a + ``__test__`` attribute to ``False`` in the class body (`#2007`_). Thanks + to `@syre`_ for the report and `@lwm`_ for the PR. + +* Change junitxml.py to produce reports that comply with Junitxml schema. + If the same test fails with failure in call and then errors in teardown + we split testcase element into two, one containing the error and the other + the failure. (`#2228`_) Thanks to `@kkoukiou`_ for the PR. + +* Testcase reports with a ``url`` attribute will now properly write this to junitxml. + Thanks `@fushi`_ for the PR (`#1874`_). + +* Remove common items from dict comparision output when verbosity=1. Also update + the truncation message to make it clearer that pytest truncates all + assertion messages if verbosity < 2 (`#1512`_). + Thanks `@mattduck`_ for the PR + +* ``--pdbcls`` no longer implies ``--pdb``. This makes it possible to use + ``addopts=--pdbcls=module.SomeClass`` on ``pytest.ini``. Thanks `@davidszotten`_ for + the PR (`#1952`_). +* Change exception raised by ``capture.DontReadFromInput.fileno()`` from ``ValueError`` + to ``io.UnsupportedOperation``. Thanks `@vlad-dragos`_ for the PR. + +* fix `#2013`_: turn RecordedWarning into ``namedtuple``, + to give it a comprehensible repr while preventing unwarranted modification. + +* fix `#2208`_: ensure a iteration limit for _pytest.compat.get_real_func. + Thanks `@RonnyPfannschmidt`_ for the report and PR. + +* Hooks are now verified after collection is complete, rather than right after loading installed plugins. This + makes it easy to write hooks for plugins which will be loaded during collection, for example using the + ``pytest_plugins`` special variable (`#1821`_). + Thanks `@nicoddemus`_ for the PR. + +* Modify ``pytest_make_parametrize_id()`` hook to accept ``argname`` as an + additional parameter. + Thanks `@unsignedint`_ for the PR. + +* Add ``venv`` to the default ``norecursedirs`` setting. + Thanks `@The-Compiler`_ for the PR. + +* ``PluginManager.import_plugin`` now accepts unicode plugin names in Python 2. + Thanks `@reutsharabani`_ for the PR. + +* fix `#2308`_: When using both ``--lf`` and ``--ff``, only the last failed tests are run. + Thanks `@ojii`_ for the PR. + +* Replace minor/patch level version numbers in the documentation with placeholders. + This significantly reduces change-noise as different contributors regnerate + the documentation on different platforms. + Thanks `@RonnyPfannschmidt`_ for the PR. + +* fix `#2391`_: consider pytest_plugins on all plugin modules + Thansks `@RonnyPfannschmidt`_ for the PR. + + +Bug Fixes +--------- + +* Fix ``AttributeError`` on ``sys.stdout.buffer`` / ``sys.stderr.buffer`` + while using ``capsys`` fixture in python 3. (`#1407`_). + Thanks to `@asottile`_. + * Change capture.py's ``DontReadFromInput`` class to throw ``io.UnsupportedOperation`` errors rather than ValueErrors in the ``fileno`` method (`#2276`_). Thanks `@metasyn`_ for the PR. @@ -8,7 +103,7 @@ * Fix exception formatting while importing modules when the exception message contains non-ascii characters (`#2336`_). Thanks `@fabioz`_ for the report and `@nicoddemus`_ for the PR. - + * Added documentation related to issue (`#1937`_) Thanks `@skylarjhdownes`_ for the PR. @@ -18,26 +113,45 @@ * Show the correct error message when collect "parametrize" func with wrong args (`#2383`_). Thanks `@The-Compiler`_ for the report and `@robin0371`_ for the PR. -* -* - -* - - - -.. _@skylarjhdownes: https://github.com/skylarjhdownes +.. _@davidszotten: https://github.com/davidszotten .. _@fabioz: https://github.com/fabioz -.. _@metasyn: https://github.com/metasyn +.. _@fogo: https://github.com/fogo +.. _@fushi: https://github.com/fushi .. _@Kodiologist: https://github.com/Kodiologist +.. _@Kriechi: https://github.com/Kriechi +.. _@mandeep: https://github.com/mandeep +.. _@mattduck: https://github.com/mattduck +.. _@metasyn: https://github.com/metasyn +.. _@MichalTHEDUDE: https://github.com/MichalTHEDUDE +.. _@ojii: https://github.com/ojii +.. _@reutsharabani: https://github.com/reutsharabani .. _@robin0371: https://github.com/robin0371 +.. _@skylarjhdownes: https://github.com/skylarjhdownes +.. _@unsignedint: https://github.com/unsignedint +.. _@wheerd: https://github.com/wheerd +.. _#1407: https://github.com/pytest-dev/pytest/issues/1407 +.. _#1512: https://github.com/pytest-dev/pytest/issues/1512 +.. _#1821: https://github.com/pytest-dev/pytest/issues/1821 +.. _#1874: https://github.com/pytest-dev/pytest/pull/1874 .. _#1937: https://github.com/pytest-dev/pytest/issues/1937 +.. _#1952: https://github.com/pytest-dev/pytest/pull/1952 +.. _#2007: https://github.com/pytest-dev/pytest/issues/2007 +.. _#2013: https://github.com/pytest-dev/pytest/issues/2013 +.. _#2101: https://github.com/pytest-dev/pytest/pull/2101 +.. _#2166: https://github.com/pytest-dev/pytest/pull/2166 +.. _#2208: https://github.com/pytest-dev/pytest/issues/2208 +.. _#2228: https://github.com/pytest-dev/pytest/issues/2228 .. _#2276: https://github.com/pytest-dev/pytest/issues/2276 +.. _#2308: https://github.com/pytest-dev/pytest/issues/2308 .. _#2336: https://github.com/pytest-dev/pytest/issues/2336 .. _#2369: https://github.com/pytest-dev/pytest/issues/2369 .. _#2383: https://github.com/pytest-dev/pytest/issues/2383 +.. _#2391: https://github.com/pytest-dev/pytest/issues/2391 +.. _#533: https://github.com/pytest-dev/pytest/issues/533 + 3.0.7 (2017-03-14) @@ -191,6 +305,7 @@ * Cope gracefully with a .pyc file with no matching .py file (`#2038`_). Thanks `@nedbat`_. +.. _@syre: https://github.com/syre .. _@adler-j: https://github.com/adler-j .. _@d-b-w: https://bitbucket.org/d-b-w/ .. _@DuncanBetts: https://github.com/DuncanBetts @@ -298,6 +413,7 @@ .. _@raquel-ucl: https://github.com/raquel-ucl .. _@axil: https://github.com/axil .. _@tgoodlet: https://github.com/tgoodlet +.. _@vlad-dragos: https://github.com/vlad-dragos .. _#1853: https://github.com/pytest-dev/pytest/issues/1853 .. _#1905: https://github.com/pytest-dev/pytest/issues/1905 diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 71dc04d91..edf71dad7 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -206,12 +206,12 @@ but here is a simple overview: #. Run all the tests - You need to have Python 2.7 and 3.5 available in your system. Now + You need to have Python 2.7 and 3.6 available in your system. Now running tests is as simple as issuing this command:: - $ tox -e linting,py27,py35 + $ tox -e linting,py27,py36 - This command will run tests via the "tox" tool against Python 2.7 and 3.5 + This command will run tests via the "tox" tool against Python 2.7 and 3.6 and also perform "lint" coding-style checks. #. You can now edit your local working copy. @@ -223,9 +223,9 @@ but here is a simple overview: $ tox -e py27 -- --pdb - Or to only run tests in a particular test module on Python 3.5:: + Or to only run tests in a particular test module on Python 3.6:: - $ tox -e py35 -- testing/test_config.py + $ tox -e py36 -- testing/test_config.py #. Commit and push once your tests pass and you are happy with your change(s):: diff --git a/HOWTORELEASE.rst b/HOWTORELEASE.rst index 372ecf7f1..21834c672 100644 --- a/HOWTORELEASE.rst +++ b/HOWTORELEASE.rst @@ -1,29 +1,30 @@ How to release pytest -------------------------------------------- -Note: this assumes you have already registered on pypi. +.. important:: -1. Bump version numbers in ``_pytest/__init__.py`` (``setup.py`` reads it). + pytest releases must be prepared on **linux** because the docs and examples expect + to be executed in that platform. -2. Check and finalize ``CHANGELOG.rst``. +#. Install development dependencies in a virtual environment with:: -3. Write ``doc/en/announce/release-VERSION.txt`` and include - it in ``doc/en/announce/index.txt``. Run this command to list names of authors involved:: + pip3 install -r tasks/requirements.txt - git log $(git describe --abbrev=0 --tags)..HEAD --format='%aN' | sort -u +#. Create a branch ``release-X.Y.Z`` with the version for the release. Make sure it is up to date + with the latest ``master`` (for patch releases) and with the latest ``features`` merged with + the latest ``master`` (for minor releases). Ensure your are in a clean work tree. -4. Regenerate the docs examples using tox:: +#. Check and finalize ``CHANGELOG.rst`` (will be automated soon). - tox -e regen +#. Execute to automatically generate docs, announcements and upload a package to + your ``devpi`` staging server:: -5. At this point, open a PR named ``release-X`` so others can help find regressions or provide suggestions. + invoke generate.pre_release --password -6. Use devpi for uploading a release tarball to a staging area:: + If ``--password`` is not given, it is assumed the user is already logged in. If you don't have + an account, please ask for one! - devpi use https://devpi.net/USER/dev - devpi upload --formats sdist,bdist_wheel - -7. Run from multiple machines:: +#. Run from multiple machines:: devpi use https://devpi.net/USER/dev devpi test pytest==VERSION @@ -31,27 +32,27 @@ Note: this assumes you have already registered on pypi. Alternatively, you can use `devpi-cloud-tester `_ to test the package on AppVeyor and Travis (follow instructions on the ``README``). -8. Check that tests pass for relevant combinations with:: +#. Check that tests pass for relevant combinations with:: devpi list pytest or look at failures with "devpi list -f pytest". -9. Feeling confident? Publish to pypi:: +#. Feeling confident? Publish to pypi:: devpi push pytest==VERSION pypi:NAME where NAME is the name of pypi.python.org as configured in your ``~/.pypirc`` file `for devpi `_. -10. Tag the release:: +#. Tag the release:: git tag VERSION git push origin VERSION Make sure ```` is **exactly** the git hash at the time the package was created. -11. Send release announcement to mailing lists: +#. Send release announcement to mailing lists: - pytest-dev@python.org - python-announce-list@python.org @@ -59,7 +60,7 @@ Note: this assumes you have already registered on pypi. And announce the release on Twitter, making sure to add the hashtag ``#pytest``. -12. **After the release** +#. **After the release** a. **patch release (2.8.3)**: @@ -81,5 +82,3 @@ Note: this assumes you have already registered on pypi. 9. Push ``master`` and ``features``. c. **major release (3.0.0)**: same steps as that of a **minor release** - - diff --git a/MANIFEST.in b/MANIFEST.in index c57cbd911..51041f0c9 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -20,6 +20,7 @@ recursive-include extra *.py graft testing graft doc prune doc/en/_build +graft tasks exclude _pytest/impl diff --git a/_pytest/__init__.py b/_pytest/__init__.py index dd7876046..6e41f0504 100644 --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,8 @@ -# -__version__ = '3.0.8.dev' +__all__ = ['__version__'] + +try: + from ._version import version as __version__ +except ImportError: + # broken installation, we don't even try + # unknown only works because we do poor mans version compare + __version__ = 'unknown' diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py index 3ab679d8b..8c93e4c92 100644 --- a/_pytest/_argcomplete.py +++ b/_pytest/_argcomplete.py @@ -57,7 +57,7 @@ If things do not work right away: which should throw a KeyError: 'COMPLINE' (which is properly set by the global argcomplete script). """ - +from __future__ import absolute_import, division, print_function import sys import os from glob import glob diff --git a/_pytest/_code/__init__.py b/_pytest/_code/__init__.py index 3463c11ea..815c13b42 100644 --- a/_pytest/_code/__init__.py +++ b/_pytest/_code/__init__.py @@ -1,4 +1,5 @@ """ python inspection/code generation API """ +from __future__ import absolute_import, division, print_function from .code import Code # noqa from .code import ExceptionInfo # noqa from .code import Frame # noqa diff --git a/_pytest/_code/_py2traceback.py b/_pytest/_code/_py2traceback.py index a830d9899..d45ee01fa 100644 --- a/_pytest/_code/_py2traceback.py +++ b/_pytest/_code/_py2traceback.py @@ -2,6 +2,7 @@ # CHANGES: # - some_str is replaced, trying to create unicode strings # +from __future__ import absolute_import, division, print_function import types def format_exception_only(etype, value): diff --git a/_pytest/_code/code.py b/_pytest/_code/code.py index 2b26e4a24..f872dba0b 100644 --- a/_pytest/_code/code.py +++ b/_pytest/_code/code.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import sys from inspect import CO_VARARGS, CO_VARKEYWORDS import re diff --git a/_pytest/_code/source.py b/_pytest/_code/source.py index fcec0f5ca..8e6148410 100644 --- a/_pytest/_code/source.py +++ b/_pytest/_code/source.py @@ -1,4 +1,4 @@ -from __future__ import generators +from __future__ import absolute_import, division, generators, print_function from bisect import bisect_right import sys diff --git a/_pytest/_pluggy.py b/_pytest/_pluggy.py index 87d32cf8d..6cc1d3d54 100644 --- a/_pytest/_pluggy.py +++ b/_pytest/_pluggy.py @@ -2,7 +2,7 @@ imports symbols from vendored "pluggy" if available, otherwise falls back to importing "pluggy" from the default namespace. """ - +from __future__ import absolute_import, division, print_function try: from _pytest.vendored_packages.pluggy import * # noqa from _pytest.vendored_packages.pluggy import __version__ # noqa diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py index 3f14a7ae7..acb034d86 100644 --- a/_pytest/assertion/__init__.py +++ b/_pytest/assertion/__init__.py @@ -1,12 +1,13 @@ """ support for presenting detailed information in failing assertions. """ +from __future__ import absolute_import, division, print_function import py -import os import sys from _pytest.assertion import util from _pytest.assertion import rewrite +from _pytest.assertion import truncate def pytest_addoption(parser): @@ -24,9 +25,6 @@ def pytest_addoption(parser): expression information.""") -def pytest_namespace(): - return {'register_assert_rewrite': register_assert_rewrite} - def register_assert_rewrite(*names): """Register one or more module names to be rewritten on import. @@ -100,12 +98,6 @@ def pytest_collection(session): assertstate.hook.set_session(session) -def _running_on_ci(): - """Check if we're currently running on a CI system.""" - env_vars = ['CI', 'BUILD_NUMBER'] - return any(var in os.environ for var in env_vars) - - def pytest_runtest_setup(item): """Setup the pytest_assertrepr_compare hook @@ -119,8 +111,8 @@ def pytest_runtest_setup(item): This uses the first result from the hook and then ensures the following: - * Overly verbose explanations are dropped unless -vv was used or - running on a CI. + * Overly verbose explanations are truncated unless configured otherwise + (eg. if running in verbose mode). * Embedded newlines are escaped to help util.format_explanation() later. * If the rewrite mode is used embedded %-characters are replaced @@ -133,14 +125,7 @@ def pytest_runtest_setup(item): config=item.config, op=op, left=left, right=right) for new_expl in hook_result: if new_expl: - if (sum(len(p) for p in new_expl[1:]) > 80*8 and - item.config.option.verbose < 2 and - not _running_on_ci()): - show_max = 10 - truncated_lines = len(new_expl) - show_max - new_expl[show_max:] = [py.builtin._totext( - 'Detailed information truncated (%d more lines)' - ', use "-vv" to show' % truncated_lines)] + new_expl = truncate.truncate_if_required(new_expl, item) new_expl = [line.replace("\n", "\\n") for line in new_expl] res = py.builtin._totext("\n~").join(new_expl) if item.config.getvalue("assertmode") == "rewrite": diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py index 7408c4746..f7e255efd 100644 --- a/_pytest/assertion/rewrite.py +++ b/_pytest/assertion/rewrite.py @@ -1,5 +1,5 @@ """Rewrite assertion AST to produce nice error messages""" - +from __future__ import absolute_import, division, print_function import ast import _ast import errno @@ -337,7 +337,7 @@ def _rewrite_test(config, fn): return None, None rewrite_asserts(tree, fn, config) try: - co = compile(tree, fn.strpath, "exec") + co = compile(tree, fn.strpath, "exec", dont_inherit=True) except SyntaxError: # It's possible that this error is from some bug in the # assertion rewriting, but I don't know of a fast way to tell. diff --git a/_pytest/assertion/truncate.py b/_pytest/assertion/truncate.py new file mode 100644 index 000000000..1e1306356 --- /dev/null +++ b/_pytest/assertion/truncate.py @@ -0,0 +1,102 @@ +""" +Utilities for truncating assertion output. + +Current default behaviour is to truncate assertion explanations at +~8 terminal lines, unless running in "-vv" mode or running on CI. +""" +from __future__ import absolute_import, division, print_function +import os + +import py + + +DEFAULT_MAX_LINES = 8 +DEFAULT_MAX_CHARS = 8 * 80 +USAGE_MSG = "use '-vv' to show" + + +def truncate_if_required(explanation, item, max_length=None): + """ + Truncate this assertion explanation if the given test item is eligible. + """ + if _should_truncate_item(item): + return _truncate_explanation(explanation) + return explanation + + +def _should_truncate_item(item): + """ + Whether or not this test item is eligible for truncation. + """ + verbose = item.config.option.verbose + return verbose < 2 and not _running_on_ci() + + +def _running_on_ci(): + """Check if we're currently running on a CI system.""" + env_vars = ['CI', 'BUILD_NUMBER'] + return any(var in os.environ for var in env_vars) + + +def _truncate_explanation(input_lines, max_lines=None, max_chars=None): + """ + Truncate given list of strings that makes up the assertion explanation. + + Truncates to either 8 lines, or 640 characters - whichever the input reaches + first. The remaining lines will be replaced by a usage message. + """ + + if max_lines is None: + max_lines = DEFAULT_MAX_LINES + if max_chars is None: + max_chars = DEFAULT_MAX_CHARS + + # Check if truncation required + input_char_count = len("".join(input_lines)) + if len(input_lines) <= max_lines and input_char_count <= max_chars: + return input_lines + + # Truncate first to max_lines, and then truncate to max_chars if max_chars + # is exceeded. + truncated_explanation = input_lines[:max_lines] + truncated_explanation = _truncate_by_char_count(truncated_explanation, max_chars) + + # Add ellipsis to final line + truncated_explanation[-1] = truncated_explanation[-1] + "..." + + # Append useful message to explanation + truncated_line_count = len(input_lines) - len(truncated_explanation) + truncated_line_count += 1 # Account for the part-truncated final line + msg = '...Full output truncated' + if truncated_line_count == 1: + msg += ' ({0} line hidden)'.format(truncated_line_count) + else: + msg += ' ({0} lines hidden)'.format(truncated_line_count) + msg += ", {0}" .format(USAGE_MSG) + truncated_explanation.extend([ + py.builtin._totext(""), + py.builtin._totext(msg), + ]) + return truncated_explanation + + +def _truncate_by_char_count(input_lines, max_chars): + # Check if truncation required + if len("".join(input_lines)) <= max_chars: + return input_lines + + # Find point at which input length exceeds total allowed length + iterated_char_count = 0 + for iterated_index, input_line in enumerate(input_lines): + if iterated_char_count + len(input_line) > max_chars: + break + iterated_char_count += len(input_line) + + # Create truncated explanation with modified final line + truncated_result = input_lines[:iterated_index] + final_line = input_lines[iterated_index] + if final_line: + final_line_truncate_point = max_chars - iterated_char_count + final_line = final_line[:final_line_truncate_point] + truncated_result.append(final_line) + return truncated_result diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py index 4a0a4e431..06eda8d91 100644 --- a/_pytest/assertion/util.py +++ b/_pytest/assertion/util.py @@ -1,4 +1,5 @@ """Utilities for assertion debugging""" +from __future__ import absolute_import, division, print_function import pprint import _pytest._code @@ -8,7 +9,7 @@ try: except ImportError: Sequence = list -BuiltinAssertionError = py.builtin.builtins.AssertionError + u = py.builtin._totext # The _reprcompare attribute on the util module is used by the new assertion @@ -256,8 +257,8 @@ def _compare_eq_dict(left, right, verbose=False): explanation = [] common = set(left).intersection(set(right)) same = dict((k, left[k]) for k in common if left[k] == right[k]) - if same and not verbose: - explanation += [u('Omitting %s identical items, use -v to show') % + if same and verbose < 2: + explanation += [u('Omitting %s identical items, use -vv to show') % len(same)] elif same: explanation += [u('Common items:')] diff --git a/_pytest/cacheprovider.py b/_pytest/cacheprovider.py index 893f0eae5..ab08362ee 100755 --- a/_pytest/cacheprovider.py +++ b/_pytest/cacheprovider.py @@ -4,7 +4,7 @@ merged implementation of the cache provider the name cache was not chosen to ensure pluggy automatically ignores the external pytest-cache """ - +from __future__ import absolute_import, division, print_function import py import pytest import json @@ -139,11 +139,11 @@ class LFPlugin: # running a subset of all tests with recorded failures outside # of the set of tests currently executing pass - elif self.config.getvalue("failedfirst"): - items[:] = previously_failed + previously_passed - else: + elif self.config.getvalue("lf"): items[:] = previously_failed config.hook.pytest_deselected(items=previously_passed) + else: + items[:] = previously_failed + previously_passed def pytest_sessionfinish(self, session): config = self.config diff --git a/_pytest/capture.py b/_pytest/capture.py index 07ec662b6..6bc3fc1f0 100644 --- a/_pytest/capture.py +++ b/_pytest/capture.py @@ -2,18 +2,18 @@ per-test stdout/stderr capturing mechanism. """ -from __future__ import with_statement +from __future__ import absolute_import, division, print_function import contextlib import sys import os +from io import UnsupportedOperation from tempfile import TemporaryFile import py import pytest +from _pytest.compat import CaptureIO -from py.io import TextIO -from io import UnsupportedOperation unicode = py.builtin.text patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} @@ -403,7 +403,7 @@ class SysCapture: if name == "stdin": tmpfile = DontReadFromInput() else: - tmpfile = TextIO() + tmpfile = CaptureIO() self.tmpfile = tmpfile def start(self): @@ -449,7 +449,8 @@ class DontReadFromInput: __iter__ = read def fileno(self): - raise UnsupportedOperation("redirected Stdin is pseudofile, has no fileno()") + raise UnsupportedOperation("redirected stdin is pseudofile, " + "has no fileno()") def isatty(self): return False diff --git a/_pytest/compat.py b/_pytest/compat.py index d37fe1a48..25610b645 100644 --- a/_pytest/compat.py +++ b/_pytest/compat.py @@ -1,6 +1,7 @@ """ python version compatibility code """ +from __future__ import absolute_import, division, print_function import sys import inspect import types @@ -123,7 +124,7 @@ if sys.version_info[:2] == (2, 6): if _PY3: import codecs - + imap = map STRING_TYPES = bytes, str def _escape_strings(val): @@ -157,6 +158,8 @@ if _PY3: else: STRING_TYPES = bytes, str, unicode + from itertools import imap # NOQA + def _escape_strings(val): """In py2 bytes and str are the same type, so return if it's a bytes object, return it unchanged if it is a full ascii string, @@ -179,8 +182,18 @@ def get_real_func(obj): """ gets the real function object of the (possibly) wrapped object by functools.wraps or functools.partial. """ - while hasattr(obj, "__wrapped__"): - obj = obj.__wrapped__ + start_obj = obj + for i in range(100): + new_obj = getattr(obj, '__wrapped__', None) + if new_obj is None: + break + obj = new_obj + else: + raise ValueError( + ("could not find real function of {start}" + "\nstopped at {current}").format( + start=py.io.saferepr(start_obj), + current=py.io.saferepr(obj))) if isinstance(obj, functools.partial): obj = obj.func return obj @@ -210,7 +223,7 @@ def safe_getattr(object, name, default): """ Like getattr but return default upon any Exception. Attribute access can potentially fail for 'evil' Python objects. - See issue214 + See issue #214. """ try: return getattr(object, name, default) @@ -242,3 +255,51 @@ else: v = unicode(v) errors = 'replace' return v.encode('utf-8', errors) + + +COLLECT_FAKEMODULE_ATTRIBUTES = ( + 'Collector', + 'Module', + 'Generator', + 'Function', + 'Instance', + 'Session', + 'Item', + 'Class', + 'File', + '_fillfuncargs', +) + + +def _setup_collect_fakemodule(): + from types import ModuleType + import pytest + pytest.collect = ModuleType('pytest.collect') + pytest.collect.__all__ = [] # used for setns + for attr in COLLECT_FAKEMODULE_ATTRIBUTES: + setattr(pytest.collect, attr, getattr(pytest, attr)) + + +if _PY2: + from py.io import TextIO as CaptureIO +else: + import io + + class CaptureIO(io.TextIOWrapper): + def __init__(self): + super(CaptureIO, self).__init__( + io.BytesIO(), + encoding='UTF-8', newline='', write_through=True, + ) + + def getvalue(self): + return self.buffer.getvalue().decode('UTF-8') + +class FuncargnamesCompatAttr(object): + """ helper class so that Metafunc, Function and FixtureRequest + don't need to each define the "funcargnames" compatibility attribute. + """ + @property + def funcargnames(self): + """ alias attribute for ``fixturenames`` for pre-2.3 compatibility""" + return self.fixturenames diff --git a/_pytest/config.py b/_pytest/config.py index 36288170c..c687e3df9 100644 --- a/_pytest/config.py +++ b/_pytest/config.py @@ -1,4 +1,5 @@ """ command line options, ini-file and conftest.py processing. """ +from __future__ import absolute_import, division, print_function import argparse import shlex import traceback @@ -7,7 +8,8 @@ import warnings import py # DON't import pytest here because it causes import cycle troubles -import sys, os +import sys +import os import _pytest._code import _pytest.hookspec # the extension point definitions import _pytest.assertion @@ -53,7 +55,6 @@ def main(args=None, plugins=None): return 4 else: try: - config.pluginmanager.check_pending() return config.hook.pytest_cmdline_main(config=config) finally: config._ensure_unconfigure() @@ -98,7 +99,8 @@ default_plugins = ( "mark main terminal runner python fixtures debugging unittest capture skipping " "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion " "junitxml resultlog doctest cacheprovider freeze_support " - "setuponly setupplan").split() + "setuponly setupplan warnings").split() + builtin_plugins = set(default_plugins) builtin_plugins.add("pytester") @@ -251,6 +253,9 @@ class PytestPluginManager(PluginManager): if ret: self.hook.pytest_plugin_registered.call_historic( kwargs=dict(plugin=plugin, manager=self)) + + if isinstance(plugin, types.ModuleType): + self.consider_module(plugin) return ret def getplugin(self, name): @@ -395,8 +400,7 @@ class PytestPluginManager(PluginManager): self.import_plugin(arg) def consider_conftest(self, conftestmodule): - if self.register(conftestmodule, name=conftestmodule.__file__): - self.consider_module(conftestmodule) + self.register(conftestmodule, name=conftestmodule.__file__) def consider_env(self): self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) @@ -414,7 +418,8 @@ class PytestPluginManager(PluginManager): # "terminal" or "capture". Those plugins are registered under their # basename for historic purposes but must be imported with the # _pytest prefix. - assert isinstance(modname, str), "module name as string required, got %r" % modname + assert isinstance(modname, (py.builtin.text, str)), "module name as text required, got %r" % modname + modname = str(modname) if self.get_plugin(modname) is not None: return if modname in builtin_plugins: @@ -439,7 +444,6 @@ class PytestPluginManager(PluginManager): else: mod = sys.modules[importspec] self.register(mod, modname) - self.consider_module(mod) def _get_plugin_specs_as_list(specs): @@ -910,11 +914,11 @@ class Config(object): fin = self._cleanup.pop() fin() - def warn(self, code, message, fslocation=None): + def warn(self, code, message, fslocation=None, nodeid=None): """ generate a warning for this test session. """ self.hook.pytest_logwarning.call_historic(kwargs=dict( code=code, message=message, - fslocation=fslocation, nodeid=None)) + fslocation=fslocation, nodeid=nodeid)) def get_terminal_writer(self): return self.pluginmanager.get_plugin("terminalreporter")._tw diff --git a/_pytest/debugging.py b/_pytest/debugging.py index d96170bd8..73a0a2ef5 100644 --- a/_pytest/debugging.py +++ b/_pytest/debugging.py @@ -1,9 +1,8 @@ """ interactive debugging with PDB, the Python Debugger. """ -from __future__ import absolute_import +from __future__ import absolute_import, division, print_function import pdb import sys -import pytest def pytest_addoption(parser): @@ -16,19 +15,17 @@ def pytest_addoption(parser): help="start a custom interactive Python debugger on errors. " "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb") -def pytest_namespace(): - return {'set_trace': pytestPDB().set_trace} def pytest_configure(config): - if config.getvalue("usepdb") or config.getvalue("usepdb_cls"): + if config.getvalue("usepdb_cls"): + modname, classname = config.getvalue("usepdb_cls").split(":") + __import__(modname) + pdb_cls = getattr(sys.modules[modname], classname) + else: + pdb_cls = pdb.Pdb + + if config.getvalue("usepdb"): config.pluginmanager.register(PdbInvoke(), 'pdbinvoke') - if config.getvalue("usepdb_cls"): - modname, classname = config.getvalue("usepdb_cls").split(":") - __import__(modname) - pdb_cls = getattr(sys.modules[modname], classname) - else: - pdb_cls = pdb.Pdb - pytestPDB._pdb_cls = pdb_cls old = (pdb.set_trace, pytestPDB._pluginmanager) @@ -37,9 +34,10 @@ def pytest_configure(config): pytestPDB._config = None pytestPDB._pdb_cls = pdb.Pdb - pdb.set_trace = pytest.set_trace + pdb.set_trace = pytestPDB.set_trace pytestPDB._pluginmanager = config.pluginmanager pytestPDB._config = config + pytestPDB._pdb_cls = pdb_cls config._cleanup.append(fin) class pytestPDB: @@ -48,19 +46,20 @@ class pytestPDB: _config = None _pdb_cls = pdb.Pdb - def set_trace(self): + @classmethod + def set_trace(cls): """ invoke PDB set_trace debugging, dropping any IO capturing. """ import _pytest.config frame = sys._getframe().f_back - if self._pluginmanager is not None: - capman = self._pluginmanager.getplugin("capturemanager") + if cls._pluginmanager is not None: + capman = cls._pluginmanager.getplugin("capturemanager") if capman: capman.suspendcapture(in_=True) - tw = _pytest.config.create_terminal_writer(self._config) + tw = _pytest.config.create_terminal_writer(cls._config) tw.line() tw.sep(">", "PDB set_trace (IO-capturing turned off)") - self._pluginmanager.hook.pytest_enter_pdb(config=self._config) - self._pdb_cls().set_trace(frame) + cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config) + cls._pdb_cls().set_trace(frame) class PdbInvoke: @@ -74,7 +73,7 @@ class PdbInvoke: def pytest_internalerror(self, excrepr, excinfo): for line in str(excrepr).split("\n"): - sys.stderr.write("INTERNALERROR> %s\n" %line) + sys.stderr.write("INTERNALERROR> %s\n" % line) sys.stderr.flush() tb = _postmortem_traceback(excinfo) post_mortem(tb) diff --git a/_pytest/deprecated.py b/_pytest/deprecated.py index 6edc475f6..e75ff099e 100644 --- a/_pytest/deprecated.py +++ b/_pytest/deprecated.py @@ -5,7 +5,7 @@ that is planned to be removed in the next pytest release. Keeping it in a central location makes it easy to track what is deprecated and should be removed when the time comes. """ - +from __future__ import absolute_import, division, print_function MAIN_STR_ARGS = 'passing a string to pytest.main() is deprecated, ' \ 'pass a list of arguments instead.' diff --git a/_pytest/doctest.py b/_pytest/doctest.py index f4782dded..f9299be72 100644 --- a/_pytest/doctest.py +++ b/_pytest/doctest.py @@ -1,5 +1,5 @@ """ discover and run doctests in modules and test files.""" -from __future__ import absolute_import +from __future__ import absolute_import, division, print_function import traceback @@ -25,6 +25,7 @@ DOCTEST_REPORT_CHOICES = ( def pytest_addoption(parser): parser.addini('doctest_optionflags', 'option flags for doctests', type="args", default=["ELLIPSIS"]) + parser.addini("doctest_encoding", 'encoding used for doctest files', default="utf-8") group = parser.getgroup("collect") group.addoption("--doctest-modules", action="store_true", default=False, @@ -162,7 +163,6 @@ def get_optionflags(parent): flag_acc |= flag_lookup_table[flag] return flag_acc - class DoctestTextfile(pytest.Module): obj = None @@ -171,7 +171,8 @@ class DoctestTextfile(pytest.Module): # inspired by doctest.testfile; ideally we would use it directly, # but it doesn't support passing a custom checker - text = self.fspath.read() + encoding = self.config.getini("doctest_encoding") + text = self.fspath.read_text(encoding) filename = str(self.fspath) name = self.fspath.basename globs = {'__name__': '__main__'} diff --git a/_pytest/fixtures.py b/_pytest/fixtures.py index 59315751d..1a6e245c7 100644 --- a/_pytest/fixtures.py +++ b/_pytest/fixtures.py @@ -1,9 +1,9 @@ +from __future__ import absolute_import, division, print_function import sys from py._code.code import FormattedExcinfo import py -import pytest import warnings import inspect @@ -16,8 +16,16 @@ from _pytest.compat import ( getlocation, getfuncargnames, safe_getattr, ) +from _pytest.runner import fail +from _pytest.compat import FuncargnamesCompatAttr def pytest_sessionstart(session): + import _pytest.python + scopename2class.update({ + 'class': _pytest.python.Class, + 'module': _pytest.python.Module, + 'function': _pytest.main.Item, + }) session._fixturemanager = FixtureManager(session) @@ -44,19 +52,6 @@ def scopeproperty(name=None, doc=None): return decoratescope -def pytest_namespace(): - scopename2class.update({ - 'class': pytest.Class, - 'module': pytest.Module, - 'function': pytest.Item, - }) - return { - 'fixture': fixture, - 'yield_fixture': yield_fixture, - 'collect': {'_fillfuncargs': fillfixtures} - } - - def get_scope_node(node, scope): cls = scopename2class.get(scope) if cls is None: @@ -104,7 +99,7 @@ def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager): if scope != "function": node = get_scope_node(collector, scope) if node is None: - assert scope == "class" and isinstance(collector, pytest.Module) + assert scope == "class" and isinstance(collector, _pytest.python.Module) # use module-level collector for class-scope (for now) node = collector if node and argname in node._name2pseudofixturedef: @@ -220,17 +215,6 @@ def slice_items(items, ignore, scoped_argkeys_cache): return items, None, None, None - -class FuncargnamesCompatAttr: - """ helper class so that Metafunc, Function and FixtureRequest - don't need to each define the "funcargnames" compatibility attribute. - """ - @property - def funcargnames(self): - """ alias attribute for ``fixturenames`` for pre-2.3 compatibility""" - return self.fixturenames - - def fillfixtures(function): """ fill missing funcargs for a test function. """ try: @@ -326,7 +310,7 @@ class FixtureRequest(FuncargnamesCompatAttr): @scopeproperty("class") def cls(self): """ class (can be None) where the test function was collected. """ - clscol = self._pyfuncitem.getparent(pytest.Class) + clscol = self._pyfuncitem.getparent(_pytest.python.Class) if clscol: return clscol.obj @@ -344,7 +328,7 @@ class FixtureRequest(FuncargnamesCompatAttr): @scopeproperty() def module(self): """ python module object where the test function was collected. """ - return self._pyfuncitem.getparent(pytest.Module).obj + return self._pyfuncitem.getparent(_pytest.python.Module).obj @scopeproperty() def fspath(self): @@ -507,7 +491,7 @@ class FixtureRequest(FuncargnamesCompatAttr): source_lineno, ) ) - pytest.fail(msg) + fail(msg) else: # indices might not be set if old-style metafunc.addcall() was used param_index = funcitem.callspec.indices.get(argname, 0) @@ -540,10 +524,10 @@ class FixtureRequest(FuncargnamesCompatAttr): if scopemismatch(invoking_scope, requested_scope): # try to report something helpful lines = self._factorytraceback() - pytest.fail("ScopeMismatch: You tried to access the %r scoped " - "fixture %r with a %r scoped request object, " - "involved factories\n%s" %( - (requested_scope, argname, invoking_scope, "\n".join(lines))), + fail("ScopeMismatch: You tried to access the %r scoped " + "fixture %r with a %r scoped request object, " + "involved factories\n%s" % ( + (requested_scope, argname, invoking_scope, "\n".join(lines))), pytrace=False) def _factorytraceback(self): @@ -553,7 +537,7 @@ class FixtureRequest(FuncargnamesCompatAttr): fs, lineno = getfslineno(factory) p = self._pyfuncitem.session.fspath.bestrelpath(fs) args = _format_args(factory) - lines.append("%s:%d: def %s%s" %( + lines.append("%s:%d: def %s%s" % ( p, lineno, factory.__name__, args)) return lines @@ -698,8 +682,9 @@ def fail_fixturefunc(fixturefunc, msg): fs, lineno = getfslineno(fixturefunc) location = "%s:%s" % (fs, lineno+1) source = _pytest._code.Source(fixturefunc) - pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, - pytrace=False) + fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, + pytrace=False) + def call_fixture_func(fixturefunc, request, kwargs): yieldctx = is_generator(fixturefunc) @@ -1080,7 +1065,7 @@ class FixtureManager: continue marker = defaultfuncargprefixmarker from _pytest import deprecated - self.config.warn('C1', deprecated.FUNCARG_PREFIX.format(name=name)) + self.config.warn('C1', deprecated.FUNCARG_PREFIX.format(name=name), nodeid=nodeid) name = name[len(self._argprefix):] elif not isinstance(marker, FixtureFunctionMarker): # magic globals with __getattr__ might have got us a wrong diff --git a/_pytest/freeze_support.py b/_pytest/freeze_support.py index f78ccd298..52f86087f 100644 --- a/_pytest/freeze_support.py +++ b/_pytest/freeze_support.py @@ -2,9 +2,8 @@ Provides a function to report all internal modules for using freezing tools pytest """ +from __future__ import absolute_import, division, print_function -def pytest_namespace(): - return {'freeze_includes': freeze_includes} def freeze_includes(): @@ -42,4 +41,4 @@ def _iter_all_modules(package, prefix=''): for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'): yield prefix + m else: - yield prefix + name \ No newline at end of file + yield prefix + name diff --git a/_pytest/helpconfig.py b/_pytest/helpconfig.py index 6e66b11c4..abc792f7e 100644 --- a/_pytest/helpconfig.py +++ b/_pytest/helpconfig.py @@ -1,4 +1,6 @@ """ version info, help messages, tracing configuration. """ +from __future__ import absolute_import, division, print_function + import py import pytest import os, sys diff --git a/_pytest/hookspec.py b/_pytest/hookspec.py index 552a06575..917632669 100644 --- a/_pytest/hookspec.py +++ b/_pytest/hookspec.py @@ -16,7 +16,9 @@ def pytest_addhooks(pluginmanager): @hookspec(historic=True) def pytest_namespace(): - """return dict of name->object to be made globally available in + """ + DEPRECATED: this hook causes direct monkeypatching on pytest, its use is strongly discouraged + return dict of name->object to be made globally available in the pytest namespace. This hook is called at plugin registration time. """ @@ -157,9 +159,10 @@ def pytest_generate_tests(metafunc): """ generate (multiple) parametrized calls to a test function.""" @hookspec(firstresult=True) -def pytest_make_parametrize_id(config, val): +def pytest_make_parametrize_id(config, val, argname): """Return a user-friendly string representation of the given ``val`` that will be used by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``. + The parameter name is available as ``argname``, if required. """ # ------------------------------------------------------------------------- diff --git a/_pytest/junitxml.py b/_pytest/junitxml.py index 3f371c9d3..301633706 100644 --- a/_pytest/junitxml.py +++ b/_pytest/junitxml.py @@ -4,9 +4,11 @@ Based on initial code from Ross Lawley. + +Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/ +src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd """ -# Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/ -# src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd +from __future__ import absolute_import, division, print_function import functools import py @@ -105,6 +107,8 @@ class _NodeReporter(object): } if testreport.location[1] is not None: attrs["line"] = testreport.location[1] + if hasattr(testreport, "url"): + attrs["url"] = testreport.url self.attrs = attrs def to_xml(self): @@ -222,13 +226,14 @@ def pytest_addoption(parser): metavar="str", default=None, help="prepend prefix to classnames in junit-xml output") + parser.addini("junit_suite_name", "Test suite name for JUnit report", default="pytest") def pytest_configure(config): xmlpath = config.option.xmlpath # prevent opening xmllog on slave nodes (xdist) if xmlpath and not hasattr(config, 'slaveinput'): - config._xml = LogXML(xmlpath, config.option.junitprefix) + config._xml = LogXML(xmlpath, config.option.junitprefix, config.getini("junit_suite_name")) config.pluginmanager.register(config._xml) @@ -255,10 +260,11 @@ def mangle_test_address(address): class LogXML(object): - def __init__(self, logfile, prefix): + def __init__(self, logfile, prefix, suite_name="pytest"): logfile = os.path.expanduser(os.path.expandvars(logfile)) self.logfile = os.path.normpath(os.path.abspath(logfile)) self.prefix = prefix + self.suite_name = suite_name self.stats = dict.fromkeys([ 'error', 'passed', @@ -268,6 +274,9 @@ class LogXML(object): self.node_reporters = {} # nodeid -> _NodeReporter self.node_reporters_ordered = [] self.global_properties = [] + # List of reports that failed on call but teardown is pending. + self.open_reports = [] + self.cnt_double_fail_tests = 0 def finalize(self, report): nodeid = getattr(report, 'nodeid', report) @@ -327,14 +336,33 @@ class LogXML(object): -> teardown node2 -> teardown node1 """ + close_report = None if report.passed: if report.when == "call": # ignore setup/teardown reporter = self._opentestcase(report) reporter.append_pass(report) elif report.failed: + if report.when == "teardown": + # The following vars are needed when xdist plugin is used + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + (rep for rep in self.open_reports + if (rep.nodeid == report.nodeid and + getattr(rep, "item_index", None) == report_ii and + getattr(rep, "worker_id", None) == report_wid + ) + ), None) + if close_report: + # We need to open new testcase in case we have failure in + # call and error in teardown in order to follow junit + # schema + self.finalize(close_report) + self.cnt_double_fail_tests += 1 reporter = self._opentestcase(report) if report.when == "call": reporter.append_failure(report) + self.open_reports.append(report) else: reporter.append_error(report) elif report.skipped: @@ -345,6 +373,17 @@ class LogXML(object): reporter = self._opentestcase(report) reporter.write_captured_output(report) self.finalize(report) + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + (rep for rep in self.open_reports + if (rep.nodeid == report.nodeid and + getattr(rep, "item_index", None) == report_ii and + getattr(rep, "worker_id", None) == report_wid + ) + ), None) + if close_report: + self.open_reports.remove(close_report) def update_testcase_duration(self, report): """accumulates total duration for nodeid from given report and updates @@ -377,14 +416,15 @@ class LogXML(object): suite_stop_time = time.time() suite_time_delta = suite_stop_time - self.suite_start_time - numtests = self.stats['passed'] + self.stats['failure'] + self.stats['skipped'] + self.stats['error'] - + numtests = (self.stats['passed'] + self.stats['failure'] + + self.stats['skipped'] + self.stats['error'] - + self.cnt_double_fail_tests) logfile.write('') logfile.write(Junit.testsuite( self._get_global_properties_node(), [x.to_xml() for x in self.node_reporters_ordered], - name="pytest", + name=self.suite_name, errors=self.stats['error'], failures=self.stats['failure'], skips=self.stats['skipped'], diff --git a/_pytest/main.py b/_pytest/main.py index b66b661c8..480810cc8 100644 --- a/_pytest/main.py +++ b/_pytest/main.py @@ -1,4 +1,6 @@ """ core implementation of testing process: init, session, runtest loop. """ +from __future__ import absolute_import, division, print_function + import functools import os import sys @@ -6,14 +8,13 @@ import sys import _pytest import _pytest._code import py -import pytest try: from collections import MutableMapping as MappingMixin except ImportError: from UserDict import DictMixin as MappingMixin -from _pytest.config import directory_arg -from _pytest.runner import collect_one_node +from _pytest.config import directory_arg, UsageError, hookimpl +from _pytest.runner import collect_one_node, exit tracebackcutdir = py.path.local(_pytest.__file__).dirpath() @@ -25,9 +26,10 @@ EXIT_INTERNALERROR = 3 EXIT_USAGEERROR = 4 EXIT_NOTESTSCOLLECTED = 5 + def pytest_addoption(parser): parser.addini("norecursedirs", "directory patterns to avoid for recursion", - type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg']) + type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv']) parser.addini("testpaths", "directories to search for tests when no files or directories are given in the command line.", type="args", default=[]) #parser.addini("dirpatterns", @@ -75,13 +77,18 @@ def pytest_addoption(parser): help="base temporary directory for this test run.") + def pytest_namespace(): - collect = dict(Item=Item, Collector=Collector, File=File, Session=Session) - return dict(collect=collect) + """keeping this one works around a deeper startup issue in pytest + + i tried to find it for a while but the amount of time turned unsustainable, + so i put a hack in to revisit later + """ + return {} def pytest_configure(config): - pytest.config = config # compatibility + __import__('pytest').config = config # compatibiltiy def wrap_session(config, doit): @@ -96,12 +103,11 @@ def wrap_session(config, doit): config.hook.pytest_sessionstart(session=session) initstate = 2 session.exitstatus = doit(config, session) or 0 - except pytest.UsageError: + except UsageError: raise except KeyboardInterrupt: excinfo = _pytest._code.ExceptionInfo() - if initstate < 2 and isinstance( - excinfo.value, pytest.exit.Exception): + if initstate < 2 and isinstance(excinfo.value, exit.Exception): sys.stderr.write('{0}: {1}\n'.format( excinfo.typename, excinfo.value.msg)) config.hook.pytest_keyboard_interrupt(excinfo=excinfo) @@ -123,9 +129,11 @@ def wrap_session(config, doit): config._ensure_unconfigure() return session.exitstatus + def pytest_cmdline_main(config): return wrap_session(config, _main) + def _main(config, session): """ default command line protocol for initialization, session, running tests and reporting. """ @@ -137,9 +145,11 @@ def _main(config, session): elif session.testscollected == 0: return EXIT_NOTESTSCOLLECTED + def pytest_collection(session): return session.perform_collect() + def pytest_runtestloop(session): if (session.testsfailed and not session.config.option.continue_on_collection_errors): @@ -156,6 +166,7 @@ def pytest_runtestloop(session): raise session.Interrupted(session.shouldstop) return True + def pytest_ignore_collect(path, config): p = path.dirpath() ignore_paths = config._getconftest_pathlist("collect_ignore", path=p) @@ -203,7 +214,7 @@ class _CompatProperty(object): # "usage of {owner!r}.{name} is deprecated, please use pytest.{name} instead".format( # name=self.name, owner=type(owner).__name__), # PendingDeprecationWarning, stacklevel=2) - return getattr(pytest, self.name) + return getattr(__import__('pytest'), self.name) @@ -287,7 +298,7 @@ class Node(object): def _getcustomclass(self, name): maybe_compatprop = getattr(type(self), name) if isinstance(maybe_compatprop, _CompatProperty): - return getattr(pytest, name) + return getattr(__import__('pytest'), name) else: cls = getattr(self, name) # TODO: reenable in the features branch @@ -307,9 +318,6 @@ class Node(object): fslocation = getattr(self, "location", None) if fslocation is None: fslocation = getattr(self, "fspath", None) - else: - fslocation = "%s:%s" % (fslocation[0], fslocation[1] + 1) - self.ihook.pytest_logwarning.call_historic(kwargs=dict( code=code, message=message, nodeid=self.nodeid, fslocation=fslocation)) @@ -370,9 +378,9 @@ class Node(object): ``marker`` can be a string or pytest.mark.* instance. """ - from _pytest.mark import MarkDecorator + from _pytest.mark import MarkDecorator, MARK_GEN if isinstance(marker, py.builtin._basestring): - marker = MarkDecorator(marker) + marker = getattr(MARK_GEN, marker) elif not isinstance(marker, MarkDecorator): raise ValueError("is not a string or pytest.mark.* Marker") self.keywords[marker.name] = marker @@ -471,10 +479,6 @@ class Collector(Node): return str(exc.args[0]) return self._repr_failure_py(excinfo, style="short") - def _memocollect(self): - """ internal helper method to cache results of calling collect(). """ - return self._memoizedcall('_collected', lambda: list(self.collect())) - def _prunetraceback(self, excinfo): if hasattr(self, 'fspath'): traceback = excinfo.traceback @@ -562,12 +566,12 @@ class Session(FSCollector): def _makeid(self): return "" - @pytest.hookimpl(tryfirst=True) + @hookimpl(tryfirst=True) def pytest_collectstart(self): if self.shouldstop: raise self.Interrupted(self.shouldstop) - @pytest.hookimpl(tryfirst=True) + @hookimpl(tryfirst=True) def pytest_runtest_logreport(self, report): if report.failed and not hasattr(report, 'wasxfail'): self.testsfailed += 1 @@ -598,6 +602,7 @@ class Session(FSCollector): hook = self.config.hook try: items = self._perform_collect(args, genitems) + self.config.pluginmanager.check_pending() hook.pytest_collection_modifyitems(session=self, config=self.config, items=items) finally: @@ -626,8 +631,8 @@ class Session(FSCollector): for arg, exc in self._notfound: line = "(no name %r in any of %r)" % (arg, exc.args[0]) errors.append("not found: %s\n%s" % (arg, line)) - #XXX: test this - raise pytest.UsageError(*errors) + # XXX: test this + raise UsageError(*errors) if not genitems: return rep.result else: @@ -655,7 +660,7 @@ class Session(FSCollector): names = self._parsearg(arg) path = names.pop(0) if path.check(dir=1): - assert not names, "invalid arg %r" %(arg,) + assert not names, "invalid arg %r" % (arg,) for path in path.visit(fil=lambda x: x.check(file=1), rec=self._recurse, bf=True, sort=True): for x in self._collectfile(path): @@ -714,9 +719,11 @@ class Session(FSCollector): path = self.config.invocation_dir.join(relpath, abs=True) if not path.check(): if self.config.option.pyargs: - raise pytest.UsageError("file or package not found: " + arg + " (missing __init__.py?)") + raise UsageError( + "file or package not found: " + arg + + " (missing __init__.py?)") else: - raise pytest.UsageError("file not found: " + arg) + raise UsageError("file not found: " + arg) parts[0] = path return parts @@ -739,11 +746,11 @@ class Session(FSCollector): nextnames = names[1:] resultnodes = [] for node in matching: - if isinstance(node, pytest.Item): + if isinstance(node, Item): if not names: resultnodes.append(node) continue - assert isinstance(node, pytest.Collector) + assert isinstance(node, Collector) rep = collect_one_node(node) if rep.passed: has_matched = False @@ -761,11 +768,11 @@ class Session(FSCollector): def genitems(self, node): self.trace("genitems", node) - if isinstance(node, pytest.Item): + if isinstance(node, Item): node.ihook.pytest_itemcollected(item=node) yield node else: - assert isinstance(node, pytest.Collector) + assert isinstance(node, Collector) rep = collect_one_node(node) if rep.passed: for subnode in rep.result: diff --git a/_pytest/mark.py b/_pytest/mark.py index d406bca6b..8b40a4f6e 100644 --- a/_pytest/mark.py +++ b/_pytest/mark.py @@ -1,5 +1,64 @@ """ generic mechanism for marking and selecting python functions. """ +from __future__ import absolute_import, division, print_function + import inspect +from collections import namedtuple +from operator import attrgetter +from .compat import imap + + +def alias(name): + return property(attrgetter(name), doc='alias for ' + name) + + +class ParameterSet(namedtuple('ParameterSet', 'values, marks, id')): + @classmethod + def param(cls, *values, **kw): + marks = kw.pop('marks', ()) + if isinstance(marks, MarkDecorator): + marks = marks, + else: + assert isinstance(marks, (tuple, list, set)) + + def param_extract_id(id=None): + return id + + id = param_extract_id(**kw) + return cls(values, marks, id) + + @classmethod + def extract_from(cls, parameterset, legacy_force_tuple=False): + """ + :param parameterset: + a legacy style parameterset that may or may not be a tuple, + and may or may not be wrapped into a mess of mark objects + + :param legacy_force_tuple: + enforce tuple wrapping so single argument tuple values + don't get decomposed and break tests + + """ + + if isinstance(parameterset, cls): + return parameterset + if not isinstance(parameterset, MarkDecorator) and legacy_force_tuple: + return cls.param(parameterset) + + newmarks = [] + argval = parameterset + while isinstance(argval, MarkDecorator): + newmarks.append(MarkDecorator(Mark( + argval.markname, argval.args[:-1], argval.kwargs))) + argval = argval.args[-1] + assert not isinstance(argval, ParameterSet) + if legacy_force_tuple: + argval = argval, + + return cls(argval, marks=newmarks, id=None) + + @property + def deprecated_arg_dict(self): + return dict((mark.name, mark) for mark in self.marks) class MarkerError(Exception): @@ -7,8 +66,8 @@ class MarkerError(Exception): """Error in use of a pytest marker/attribute.""" -def pytest_namespace(): - return {'mark': MarkGenerator()} +def param(*values, **kw): + return ParameterSet.param(*values, **kw) def pytest_addoption(parser): @@ -162,9 +221,13 @@ def matchkeyword(colitem, keywordexpr): def pytest_configure(config): - import pytest + config._old_mark_config = MARK_GEN._config if config.option.strict: - pytest.mark._config = config + MARK_GEN._config = config + + +def pytest_unconfigure(config): + MARK_GEN._config = getattr(config, '_old_mark_config', None) class MarkGenerator: @@ -178,13 +241,15 @@ class MarkGenerator: will set a 'slowtest' :class:`MarkInfo` object on the ``test_function`` object. """ + _config = None + def __getattr__(self, name): if name[0] == "_": raise AttributeError("Marker name must NOT start with underscore") - if hasattr(self, '_config'): + if self._config is not None: self._check(name) - return MarkDecorator(name) + return MarkDecorator(Mark(name, (), {})) def _check(self, name): try: @@ -200,6 +265,7 @@ class MarkGenerator: if name not in self._markers: raise AttributeError("%r not a registered marker" % (name,)) + def istestfunc(func): return hasattr(func, "__call__") and \ getattr(func, "__name__", "") != "" @@ -237,19 +303,23 @@ class MarkDecorator: additional keyword or positional arguments. """ - def __init__(self, name, args=None, kwargs=None): - self.name = name - self.args = args or () - self.kwargs = kwargs or {} + def __init__(self, mark): + assert isinstance(mark, Mark), repr(mark) + self.mark = mark + + name = alias('mark.name') + args = alias('mark.args') + kwargs = alias('mark.kwargs') @property def markname(self): return self.name # for backward-compat (2.4.1 had this attr) + def __eq__(self, other): + return self.mark == other.mark + def __repr__(self): - d = self.__dict__.copy() - name = d.pop('name') - return "" % (name, d) + return "" % (self.mark,) def __call__(self, *args, **kwargs): """ if passed a single callable argument: decorate it with mark info. @@ -272,57 +342,50 @@ class MarkDecorator: else: holder = getattr(func, self.name, None) if holder is None: - holder = MarkInfo( - self.name, self.args, self.kwargs - ) + holder = MarkInfo(self.mark) setattr(func, self.name, holder) else: - holder.add(self.args, self.kwargs) + holder.add_mark(self.mark) return func - kw = self.kwargs.copy() - kw.update(kwargs) - args = self.args + args - return self.__class__(self.name, args=args, kwargs=kw) + + mark = Mark(self.name, args, kwargs) + return self.__class__(self.mark.combined_with(mark)) -def extract_argvalue(maybe_marked_args): - # TODO: incorrect mark data, the old code wanst able to collect lists - # individual parametrized argument sets can be wrapped in a series - # of markers in which case we unwrap the values and apply the mark - # at Function init - newmarks = {} - argval = maybe_marked_args - while isinstance(argval, MarkDecorator): - newmark = MarkDecorator(argval.markname, - argval.args[:-1], argval.kwargs) - newmarks[newmark.markname] = newmark - argval = argval.args[-1] - return argval, newmarks -class MarkInfo: + +class Mark(namedtuple('Mark', 'name, args, kwargs')): + + def combined_with(self, other): + assert self.name == other.name + return Mark( + self.name, self.args + other.args, + dict(self.kwargs, **other.kwargs)) + + +class MarkInfo(object): """ Marking object created by :class:`MarkDecorator` instances. """ - def __init__(self, name, args, kwargs): - #: name of attribute - self.name = name - #: positional argument list, empty if none specified - self.args = args - #: keyword argument dictionary, empty if nothing specified - self.kwargs = kwargs.copy() - self._arglist = [(args, kwargs.copy())] + def __init__(self, mark): + assert isinstance(mark, Mark), repr(mark) + self.combined = mark + self._marks = [mark] + + name = alias('combined.name') + args = alias('combined.args') + kwargs = alias('combined.kwargs') def __repr__(self): - return "" % ( - self.name, self.args, self.kwargs - ) + return "".format(self.combined) - def add(self, args, kwargs): + def add_mark(self, mark): """ add a MarkInfo with the given args and kwargs. """ - self._arglist.append((args, kwargs)) - self.args += args - self.kwargs.update(kwargs) + self._marks.append(mark) + self.combined = self.combined.combined_with(mark) def __iter__(self): """ yield MarkInfo objects each relating to a marking-call. """ - for args, kwargs in self._arglist: - yield MarkInfo(self.name, args, kwargs) + return imap(MarkInfo, self._marks) + + +MARK_GEN = MarkGenerator() diff --git a/_pytest/monkeypatch.py b/_pytest/monkeypatch.py index 2651cf044..a70b23dda 100644 --- a/_pytest/monkeypatch.py +++ b/_pytest/monkeypatch.py @@ -1,16 +1,17 @@ """ monkeypatching and mocking functionality. """ +from __future__ import absolute_import, division, print_function -import os, sys +import os +import sys import re from py.builtin import _basestring - -import pytest +from _pytest.fixtures import fixture RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$") -@pytest.fixture +@fixture def monkeypatch(): """The returned ``monkeypatch`` fixture provides these helper methods to modify objects, dictionaries or os.environ:: diff --git a/_pytest/nose.py b/_pytest/nose.py index 038746868..9d4fc0b6e 100644 --- a/_pytest/nose.py +++ b/_pytest/nose.py @@ -1,10 +1,11 @@ """ run test suites written for nose. """ +from __future__ import absolute_import, division, print_function import sys import py -import pytest -from _pytest import unittest +from _pytest import unittest, runner, python +from _pytest.config import hookimpl def get_skip_exceptions(): @@ -19,19 +20,19 @@ def get_skip_exceptions(): def pytest_runtest_makereport(item, call): if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()): # let's substitute the excinfo with a pytest.skip one - call2 = call.__class__(lambda: - pytest.skip(str(call.excinfo.value)), call.when) + call2 = call.__class__( + lambda: runner.skip(str(call.excinfo.value)), call.when) call.excinfo = call2.excinfo -@pytest.hookimpl(trylast=True) +@hookimpl(trylast=True) def pytest_runtest_setup(item): if is_potential_nosetest(item): - if isinstance(item.parent, pytest.Generator): + if isinstance(item.parent, python.Generator): gen = item.parent if not hasattr(gen, '_nosegensetup'): call_optional(gen.obj, 'setup') - if isinstance(gen.parent, pytest.Instance): + if isinstance(gen.parent, python.Instance): call_optional(gen.parent.obj, 'setup') gen._nosegensetup = True if not call_optional(item.obj, 'setup'): @@ -50,14 +51,14 @@ def teardown_nose(item): def pytest_make_collect_report(collector): - if isinstance(collector, pytest.Generator): + if isinstance(collector, python.Generator): call_optional(collector.obj, 'setup') def is_potential_nosetest(item): # extra check needed since we do not do nose style setup/teardown # on direct unittest style classes - return isinstance(item, pytest.Function) and \ + return isinstance(item, python.Function) and \ not isinstance(item, unittest.TestCaseFunction) diff --git a/_pytest/pastebin.py b/_pytest/pastebin.py index 9f1cf9063..6f3ce8fed 100644 --- a/_pytest/pastebin.py +++ b/_pytest/pastebin.py @@ -1,4 +1,6 @@ """ submit failure or test session information to a pastebin service. """ +from __future__ import absolute_import, division, print_function + import pytest import sys import tempfile diff --git a/_pytest/pytester.py b/_pytest/pytester.py index 6ad26c918..901caa340 100644 --- a/_pytest/pytester.py +++ b/_pytest/pytester.py @@ -1,4 +1,6 @@ """ (disabled by default) support for testing pytest and pytest plugins. """ +from __future__ import absolute_import, division, print_function + import codecs import gc import os @@ -10,8 +12,9 @@ import time import traceback from fnmatch import fnmatch -from py.builtin import print_ +from weakref import WeakKeyDictionary +from _pytest.capture import MultiCapture, SysCapture from _pytest._code import Source import py import pytest @@ -85,7 +88,7 @@ class LsofFdLeakChecker(object): return True @pytest.hookimpl(hookwrapper=True, tryfirst=True) - def pytest_runtest_item(self, item): + def pytest_runtest_protocol(self, item): lines1 = self.get_open_files() yield if hasattr(sys, "pypy_version_info"): @@ -104,7 +107,8 @@ class LsofFdLeakChecker(object): error.extend([str(f) for f in lines2]) error.append(error[0]) error.append("*** function %s:%s: %s " % item.location) - pytest.fail("\n".join(error), pytrace=False) + error.append("See issue #2366") + item.warn('', "\n".join(error)) # XXX copied from execnet's conftest.py - needs to be merged @@ -226,15 +230,15 @@ class HookRecorder: name, check = entries.pop(0) for ind, call in enumerate(self.calls[i:]): if call._name == name: - print_("NAMEMATCH", name, call) + print("NAMEMATCH", name, call) if eval(check, backlocals, call.__dict__): - print_("CHECKERMATCH", repr(check), "->", call) + print("CHECKERMATCH", repr(check), "->", call) else: - print_("NOCHECKERMATCH", repr(check), "-", call) + print("NOCHECKERMATCH", repr(check), "-", call) continue i += ind + 1 break - print_("NONAMEMATCH", name, "with", call) + print("NONAMEMATCH", name, "with", call) else: pytest.fail("could not find %r check %r" % (name, check)) @@ -402,6 +406,7 @@ class Testdir: def __init__(self, request, tmpdir_factory): self.request = request + self._mod_collections = WeakKeyDictionary() # XXX remove duplication with tmpdir plugin basetmp = tmpdir_factory.ensuretemp("testdir") name = request.function.__name__ @@ -470,7 +475,7 @@ class Testdir: if not hasattr(self, '_olddir'): self._olddir = old - def _makefile(self, ext, args, kwargs): + def _makefile(self, ext, args, kwargs, encoding="utf-8"): items = list(kwargs.items()) if args: source = py.builtin._totext("\n").join( @@ -490,7 +495,7 @@ class Testdir: source_unicode = "\n".join([my_totext(line) for line in source.lines]) source = py.builtin._totext(source_unicode) - content = source.strip().encode("utf-8") # + "\n" + content = source.strip().encode(encoding) # + "\n" #content = content.rstrip() + "\n" p.write(content, "wb") if ret is None: @@ -735,7 +740,8 @@ class Testdir: if kwargs.get("syspathinsert"): self.syspathinsert() now = time.time() - capture = py.io.StdCapture() + capture = MultiCapture(Capture=SysCapture) + capture.start_capturing() try: try: reprec = self.inline_run(*args, **kwargs) @@ -750,7 +756,8 @@ class Testdir: class reprec: ret = 3 finally: - out, err = capture.reset() + out, err = capture.readouterr() + capture.stop_capturing() sys.stdout.write(out) sys.stderr.write(err) @@ -867,6 +874,7 @@ class Testdir: self.makepyfile(__init__ = "#") self.config = config = self.parseconfigure(path, *configargs) node = self.getnode(config, path) + return node def collect_by_name(self, modcol, name): @@ -881,7 +889,9 @@ class Testdir: :param name: The name of the node to return. """ - for colitem in modcol._memocollect(): + if modcol not in self._mod_collections: + self._mod_collections[modcol] = list(modcol.collect()) + for colitem in self._mod_collections[modcol]: if colitem.name == name: return colitem @@ -916,8 +926,8 @@ class Testdir: cmdargs = [str(x) for x in cmdargs] p1 = self.tmpdir.join("stdout") p2 = self.tmpdir.join("stderr") - print_("running:", ' '.join(cmdargs)) - print_(" in:", str(py.path.local())) + print("running:", ' '.join(cmdargs)) + print(" in:", str(py.path.local())) f1 = codecs.open(str(p1), "w", encoding="utf8") f2 = codecs.open(str(p2), "w", encoding="utf8") try: @@ -943,7 +953,7 @@ class Testdir: def _dump_lines(self, lines, fp): try: for line in lines: - py.builtin.print_(line, file=fp) + print(line, file=fp) except UnicodeEncodeError: print("couldn't print to %s because of encoding" % (fp,)) @@ -1000,7 +1010,7 @@ class Testdir: The pexpect child is returned. """ - basetemp = self.tmpdir.mkdir("pexpect") + basetemp = self.tmpdir.mkdir("temp-pexpect") invoke = " ".join(map(str, self._getpytestargs())) cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string) return self.spawn(cmd, expect_timeout=expect_timeout) diff --git a/_pytest/python.py b/_pytest/python.py index 5a381fae3..06f74ce4b 100644 --- a/_pytest/python.py +++ b/_pytest/python.py @@ -1,4 +1,5 @@ """ Python test discovery, setup and run of test functions. """ +from __future__ import absolute_import, division, print_function import fnmatch import inspect @@ -9,19 +10,20 @@ import math from itertools import count import py -import pytest from _pytest.mark import MarkerError - +from _pytest.config import hookimpl import _pytest import _pytest._pluggy as pluggy from _pytest import fixtures +from _pytest import main from _pytest.compat import ( isclass, isfunction, is_generator, _escape_strings, REGEX_TYPE, STRING_TYPES, NoneType, NOTSET, get_real_func, getfslineno, safe_getattr, safe_str, getlocation, enum, ) +from _pytest.runner import fail cutdir1 = py.path.local(pluggy.__file__.rstrip("oc")) cutdir2 = py.path.local(_pytest.__file__).dirpath() @@ -49,7 +51,7 @@ def filter_traceback(entry): def pyobj_property(name): def get(self): - node = self.getparent(getattr(pytest, name)) + node = self.getparent(getattr(__import__('pytest'), name)) if node is not None: return node.obj doc = "python %s object this node was collected from (can be None)." % ( @@ -126,23 +128,8 @@ def pytest_configure(config): "all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures " ) -@pytest.hookimpl(trylast=True) -def pytest_namespace(): - raises.Exception = pytest.fail.Exception - return { - 'raises': raises, - 'approx': approx, - 'collect': { - 'Module': Module, - 'Class': Class, - 'Instance': Instance, - 'Function': Function, - 'Generator': Generator, - } - } - -@pytest.hookimpl(trylast=True) +@hookimpl(trylast=True) def pytest_pyfunc_call(pyfuncitem): testfunction = pyfuncitem.obj if pyfuncitem._isyieldedfunction(): @@ -155,6 +142,7 @@ def pytest_pyfunc_call(pyfuncitem): testfunction(**testargs) return True + def pytest_collect_file(path, parent): ext = path.ext if ext == ".py": @@ -170,7 +158,7 @@ def pytest_collect_file(path, parent): def pytest_pycollect_makemodule(path, parent): return Module(path, parent) -@pytest.hookimpl(hookwrapper=True) +@hookimpl(hookwrapper=True) def pytest_pycollect_makeitem(collector, name, obj): outcome = yield res = outcome.get_result() @@ -198,7 +186,7 @@ def pytest_pycollect_makeitem(collector, name, obj): res = list(collector._genfunctions(name, obj)) outcome.force_result(res) -def pytest_make_parametrize_id(config, val): +def pytest_make_parametrize_id(config, val, argname=None): return None @@ -265,7 +253,7 @@ class PyobjMixin(PyobjContext): assert isinstance(lineno, int) return fspath, lineno, modpath -class PyCollector(PyobjMixin, pytest.Collector): +class PyCollector(PyobjMixin, main.Collector): def funcnamefilter(self, name): return self._matches_prefix_or_glob_option('python_functions', name) @@ -402,10 +390,12 @@ def transfer_markers(funcobj, cls, mod): if not _marked(funcobj, pytestmark): pytestmark(funcobj) -class Module(pytest.File, PyCollector): + +class Module(main.File, PyCollector): """ Collector for test classes and functions. """ + def _getobj(self): - return self._memoizedcall('_obj', self._importtestmodule) + return self._importtestmodule() def collect(self): self.session._fixturemanager.parsefactories(self) @@ -502,6 +492,8 @@ def _get_xunit_func(obj, name): class Class(PyCollector): """ Collector for test methods. """ def collect(self): + if not safe_getattr(self.obj, "__test__", True): + return [] if hasinit(self.obj): self.warn("C1", "cannot collect test class %r because it has a " "__init__ constructor" % self.obj.__name__) @@ -586,7 +578,7 @@ class FunctionMixin(PyobjMixin): entry.set_repr_style('short') def _repr_failure_py(self, excinfo, style="long"): - if excinfo.errisinstance(pytest.fail.Exception): + if excinfo.errisinstance(fail.Exception): if not excinfo.value.pytrace: return py._builtin._totext(excinfo.value) return super(FunctionMixin, self)._repr_failure_py(excinfo, @@ -784,36 +776,34 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): to set a dynamic scope using test context or configuration. """ from _pytest.fixtures import scope2index - from _pytest.mark import extract_argvalue + from _pytest.mark import MARK_GEN, ParameterSet from py.io import saferepr - unwrapped_argvalues = [] - newkeywords = [] - for maybe_marked_args in argvalues: - argval, newmarks = extract_argvalue(maybe_marked_args) - unwrapped_argvalues.append(argval) - newkeywords.append(newmarks) - argvalues = unwrapped_argvalues - if not isinstance(argnames, (tuple, list)): argnames = [x.strip() for x in argnames.split(",") if x.strip()] - if len(argnames) == 1: - argvalues = [(val,) for val in argvalues] - if not argvalues: - argvalues = [(NOTSET,) * len(argnames)] - # we passed a empty list to parameterize, skip that test - # + force_tuple = len(argnames) == 1 + else: + force_tuple = False + parameters = [ + ParameterSet.extract_from(x, legacy_force_tuple=force_tuple) + for x in argvalues] + del argvalues + + if not parameters: fs, lineno = getfslineno(self.function) - newmark = pytest.mark.skip( - reason="got empty parameter set %r, function %s at %s:%d" % ( - argnames, self.function.__name__, fs, lineno)) - newkeywords = [{newmark.markname: newmark}] + reason = "got empty parameter set %r, function %s at %s:%d" % ( + argnames, self.function.__name__, fs, lineno) + mark = MARK_GEN.skip(reason=reason) + parameters.append(ParameterSet( + values=(NOTSET,) * len(argnames), + marks=[mark], + id=None, + )) if scope is None: scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) - scopenum = scope2index( - scope, descr='call to {0}'.format(self.parametrize)) + scopenum = scope2index(scope, descr='call to {0}'.format(self.parametrize)) valtypes = {} for arg in argnames: if arg not in self.fixturenames: @@ -841,26 +831,26 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): idfn = ids ids = None if ids: - if len(ids) != len(argvalues): - raise ValueError('%d tests specified with %d ids' %( - len(argvalues), len(ids))) + if len(ids) != len(parameters): + raise ValueError('%d tests specified with %d ids' % ( + len(parameters), len(ids))) for id_value in ids: if id_value is not None and not isinstance(id_value, py.builtin._basestring): msg = 'ids must be list of strings, found: %s (type: %s)' raise ValueError(msg % (saferepr(id_value), type(id_value).__name__)) - ids = idmaker(argnames, argvalues, idfn, ids, self.config) + ids = idmaker(argnames, parameters, idfn, ids, self.config) newcalls = [] for callspec in self._calls or [CallSpec2(self)]: - elements = zip(ids, argvalues, newkeywords, count()) - for a_id, valset, keywords, param_index in elements: - if len(valset) != len(argnames): + elements = zip(ids, parameters, count()) + for a_id, param, param_index in elements: + if len(param.values) != len(argnames): raise ValueError( 'In "parametrize" the number of values ({0}) must be ' 'equal to the number of names ({1})'.format( - valset, argnames)) + param.values, argnames)) newcallspec = callspec.copy(self) - newcallspec.setmulti(valtypes, argnames, valset, a_id, - keywords, scopenum, param_index) + newcallspec.setmulti(valtypes, argnames, param.values, a_id, + param.deprecated_arg_dict, scopenum, param_index) newcalls.append(newcallspec) self._calls = newcalls @@ -884,7 +874,7 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): if funcargs is not None: for name in funcargs: if name not in self.fixturenames: - pytest.fail("funcarg %r not used in this function." % name) + fail("funcarg %r not used in this function." % name) else: funcargs = {} if id is None: @@ -929,15 +919,21 @@ def _find_parametrized_scope(argnames, arg2fixturedefs, indirect): def _idval(val, argname, idx, idfn, config=None): if idfn: + s = None try: s = idfn(val) - if s: - return _escape_strings(s) except Exception: - pass + # See issue https://github.com/pytest-dev/pytest/issues/2169 + import warnings + msg = "Raised while trying to determine id of parameter %s at position %d." % (argname, idx) + msg += '\nUpdate your code as this will raise an error in pytest-4.0.' + warnings.warn(msg, DeprecationWarning) + if s: + return _escape_strings(s) if config: - hook_id = config.hook.pytest_make_parametrize_id(config=config, val=val) + hook_id = config.hook.pytest_make_parametrize_id( + config=config, val=val, argname=argname) if hook_id: return hook_id @@ -953,17 +949,21 @@ def _idval(val, argname, idx, idfn, config=None): return val.__name__ return str(argname)+str(idx) -def _idvalset(idx, valset, argnames, idfn, ids, config=None): + +def _idvalset(idx, parameterset, argnames, idfn, ids, config=None): + if parameterset.id is not None: + return parameterset.id if ids is None or (idx >= len(ids) or ids[idx] is None): this_id = [_idval(val, argname, idx, idfn, config) - for val, argname in zip(valset, argnames)] + for val, argname in zip(parameterset.values, argnames)] return "-".join(this_id) else: return _escape_strings(ids[idx]) -def idmaker(argnames, argvalues, idfn=None, ids=None, config=None): - ids = [_idvalset(valindex, valset, argnames, idfn, ids, config) - for valindex, valset in enumerate(argvalues)] + +def idmaker(argnames, parametersets, idfn=None, ids=None, config=None): + ids = [_idvalset(valindex, parameterset, argnames, idfn, ids, config) + for valindex, parameterset in enumerate(parametersets)] if len(set(ids)) != len(ids): # The ids are not unique duplicates = [testid for testid in ids if ids.count(testid) > 1] @@ -1039,6 +1039,7 @@ def showfixtures(config): from _pytest.main import wrap_session return wrap_session(config, _showfixtures_main) + def _showfixtures_main(config, session): import _pytest.config session.perform_collect() @@ -1129,7 +1130,7 @@ def raises(expected_exception, *args, **kwargs): >>> with raises(ValueError) as exc_info: ... if value > 10: ... raise ValueError("value must be <= 10") - ... assert str(exc_info.value) == "value must be <= 10" # this will not execute + ... assert exc_info.type == ValueError # this will not execute Instead, the following approach must be taken (note the difference in scope):: @@ -1138,7 +1139,16 @@ def raises(expected_exception, *args, **kwargs): ... if value > 10: ... raise ValueError("value must be <= 10") ... - >>> assert str(exc_info.value) == "value must be <= 10" + >>> assert exc_info.type == ValueError + + Or you can use the keyword argument ``match`` to assert that the + exception matches a text or regex:: + + >>> with raises(ValueError, match='must be 0 or None'): + ... raise ValueError("value must be 0 or None") + + >>> with raises(ValueError, match=r'must be \d+$'): + ... raise ValueError("value must be 42") Or you can specify a callable by passing a to-be-called lambda:: @@ -1179,12 +1189,6 @@ def raises(expected_exception, *args, **kwargs): """ __tracebackhide__ = True - if expected_exception is AssertionError: - # we want to catch a AssertionError - # replace our subclass with the builtin one - # see https://github.com/pytest-dev/pytest/issues/176 - from _pytest.assertion.util import BuiltinAssertionError \ - as expected_exception msg = ("exceptions must be old-style classes or" " derived from BaseException, not %s") if isinstance(expected_exception, tuple): @@ -1195,11 +1199,15 @@ def raises(expected_exception, *args, **kwargs): raise TypeError(msg % type(expected_exception)) message = "DID NOT RAISE {0}".format(expected_exception) + match_expr = None if not args: if "message" in kwargs: message = kwargs.pop("message") - return RaisesContext(expected_exception, message) + if "match" in kwargs: + match_expr = kwargs.pop("match") + message += " matching '{0}'".format(match_expr) + return RaisesContext(expected_exception, message, match_expr) elif isinstance(args[0], str): code, = args assert isinstance(code, str) @@ -1220,12 +1228,17 @@ def raises(expected_exception, *args, **kwargs): func(*args[1:], **kwargs) except expected_exception: return _pytest._code.ExceptionInfo() - pytest.fail(message) + fail(message) + + +raises.Exception = fail.Exception + class RaisesContext(object): - def __init__(self, expected_exception, message): + def __init__(self, expected_exception, message, match_expr): self.expected_exception = expected_exception self.message = message + self.match_expr = match_expr self.excinfo = None def __enter__(self): @@ -1235,7 +1248,7 @@ class RaisesContext(object): def __exit__(self, *tp): __tracebackhide__ = True if tp[0] is None: - pytest.fail(self.message) + fail(self.message) if sys.version_info < (2, 7): # py26: on __exit__() exc_value often does not contain the # exception value. @@ -1247,6 +1260,8 @@ class RaisesContext(object): suppress_exception = issubclass(self.excinfo.type, self.expected_exception) if sys.version_info[0] == 2 and suppress_exception: sys.exc_clear() + if self.match_expr: + self.excinfo.match(self.match_expr) return suppress_exception @@ -1504,7 +1519,7 @@ class ApproxNonIterable(object): # the basic pytest Function item # -class Function(FunctionMixin, pytest.Item, fixtures.FuncargnamesCompatAttr): +class Function(FunctionMixin, main.Item, fixtures.FuncargnamesCompatAttr): """ a Function Item is responsible for setting up and executing a Python test function. """ diff --git a/_pytest/recwarn.py b/_pytest/recwarn.py index d66f87e72..7dce842f6 100644 --- a/_pytest/recwarn.py +++ b/_pytest/recwarn.py @@ -1,4 +1,5 @@ """ recording warnings during test function execution. """ +from __future__ import absolute_import, division, print_function import inspect @@ -6,11 +7,11 @@ import _pytest._code import py import sys import warnings -import pytest +from _pytest.fixtures import yield_fixture -@pytest.yield_fixture -def recwarn(request): +@yield_fixture +def recwarn(): """Return a WarningsRecorder instance that provides these methods: * ``pop(category=None)``: return last warning matching the category. @@ -25,11 +26,6 @@ def recwarn(request): yield wrec -def pytest_namespace(): - return {'deprecated_call': deprecated_call, - 'warns': warns} - - def deprecated_call(func=None, *args, **kwargs): """ assert that calling ``func(*args, **kwargs)`` triggers a ``DeprecationWarning`` or ``PendingDeprecationWarning``. @@ -55,14 +51,12 @@ def deprecated_call(func=None, *args, **kwargs): def warn_explicit(message, category, *args, **kwargs): categories.append(category) - old_warn_explicit(message, category, *args, **kwargs) def warn(message, category=None, *args, **kwargs): if isinstance(message, Warning): categories.append(message.__class__) else: categories.append(category) - old_warn(message, category, *args, **kwargs) old_warn = warnings.warn old_warn_explicit = warnings.warn_explicit @@ -115,24 +109,14 @@ def warns(expected_warning, *args, **kwargs): return func(*args[1:], **kwargs) -class RecordedWarning(object): - def __init__(self, message, category, filename, lineno, file, line): - self.message = message - self.category = category - self.filename = filename - self.lineno = lineno - self.file = file - self.line = line - - -class WarningsRecorder(object): +class WarningsRecorder(warnings.catch_warnings): """A context manager to record raised warnings. Adapted from `warnings.catch_warnings`. """ - def __init__(self, module=None): - self._module = sys.modules['warnings'] if module is None else module + def __init__(self): + super(WarningsRecorder, self).__init__(record=True) self._entered = False self._list = [] @@ -169,38 +153,20 @@ class WarningsRecorder(object): if self._entered: __tracebackhide__ = True raise RuntimeError("Cannot enter %r twice" % self) - self._entered = True - self._filters = self._module.filters - self._module.filters = self._filters[:] - self._showwarning = self._module.showwarning - - def showwarning(message, category, filename, lineno, - file=None, line=None): - self._list.append(RecordedWarning( - message, category, filename, lineno, file, line)) - - # still perform old showwarning functionality - self._showwarning( - message, category, filename, lineno, file=file, line=line) - - self._module.showwarning = showwarning - - # allow the same warning to be raised more than once - - self._module.simplefilter('always') + self._list = super(WarningsRecorder, self).__enter__() + warnings.simplefilter('always') return self def __exit__(self, *exc_info): if not self._entered: __tracebackhide__ = True raise RuntimeError("Cannot exit %r without entering first" % self) - self._module.filters = self._filters - self._module.showwarning = self._showwarning + super(WarningsRecorder, self).__exit__(*exc_info) class WarningsChecker(WarningsRecorder): - def __init__(self, expected_warning=None, module=None): - super(WarningsChecker, self).__init__(module=module) + def __init__(self, expected_warning=None): + super(WarningsChecker, self).__init__() msg = ("exceptions must be old-style classes or " "derived from Warning, not %s") @@ -221,9 +187,11 @@ class WarningsChecker(WarningsRecorder): # only check if we're not currently handling an exception if all(a is None for a in exc_info): if self.expected_warning is not None: - if not any(r.category in self.expected_warning for r in self): + if not any(issubclass(r.category, self.expected_warning) + for r in self): __tracebackhide__ = True - pytest.fail("DID NOT WARN. No warnings of type {0} was emitted. " - "The list of emitted warnings is: {1}.".format( - self.expected_warning, - [each.message for each in self])) + from _pytest.runner import fail + fail("DID NOT WARN. No warnings of type {0} was emitted. " + "The list of emitted warnings is: {1}.".format( + self.expected_warning, + [each.message for each in self])) diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py index fc0025983..3e4b00cf9 100644 --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -1,6 +1,7 @@ """ log machine-parseable test session result information in a plain text file. """ +from __future__ import absolute_import, division, print_function import py import os @@ -61,9 +62,9 @@ class ResultLog(object): self.logfile = logfile # preferably line buffered def write_log_entry(self, testpath, lettercode, longrepr): - py.builtin.print_("%s %s" % (lettercode, testpath), file=self.logfile) + print("%s %s" % (lettercode, testpath), file=self.logfile) for line in longrepr.splitlines(): - py.builtin.print_(" %s" % line, file=self.logfile) + print(" %s" % line, file=self.logfile) def log_outcome(self, report, lettercode, longrepr): testpath = getattr(report, 'nodeid', None) diff --git a/_pytest/runner.py b/_pytest/runner.py index eb29e7370..fd0b549a9 100644 --- a/_pytest/runner.py +++ b/_pytest/runner.py @@ -1,20 +1,14 @@ """ basic collect and runtest protocol implementations """ +from __future__ import absolute_import, division, print_function + import bdb import sys from time import time import py -import pytest from _pytest._code.code import TerminalRepr, ExceptionInfo -def pytest_namespace(): - return { - 'fail' : fail, - 'skip' : skip, - 'importorskip' : importorskip, - 'exit' : exit, - } # # pytest plugin hooks @@ -262,7 +256,7 @@ def pytest_runtest_makereport(item, call): if not isinstance(excinfo, ExceptionInfo): outcome = "failed" longrepr = excinfo - elif excinfo.errisinstance(pytest.skip.Exception): + elif excinfo.errisinstance(skip.Exception): outcome = "skipped" r = excinfo._getreprcrash() longrepr = (str(r.path), r.lineno, r.message) @@ -330,7 +324,9 @@ class TeardownErrorReport(BaseReport): self.__dict__.update(extra) def pytest_make_collect_report(collector): - call = CallInfo(collector._memocollect, "memocollect") + call = CallInfo( + lambda: list(collector.collect()), + 'collect') longrepr = None if not call.excinfo: outcome = "passed" @@ -550,14 +546,21 @@ def importorskip(modname, minversion=None): __version__ attribute. If no minversion is specified the a skip is only triggered if the module can not be imported. """ + import warnings __tracebackhide__ = True compile(modname, '', 'eval') # to catch syntaxerrors should_skip = False - try: - __import__(modname) - except ImportError: - # Do not raise chained exception here(#1485) - should_skip = True + + with warnings.catch_warnings(): + # make sure to ignore ImportWarnings that might happen because + # of existing directories with the same name we're trying to + # import but without a __init__.py file + warnings.simplefilter('ignore') + try: + __import__(modname) + except ImportError: + # Do not raise chained exception here(#1485) + should_skip = True if should_skip: raise Skipped("could not import %r" %(modname,), allow_module_level=True) mod = sys.modules[modname] @@ -575,4 +578,3 @@ def importorskip(modname, minversion=None): raise Skipped("module %r has __version__ %r, required is: %r" %( modname, verattr, minversion), allow_module_level=True) return mod - diff --git a/_pytest/setuponly.py b/_pytest/setuponly.py index 1752c575f..15e195ad5 100644 --- a/_pytest/setuponly.py +++ b/_pytest/setuponly.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import, division, print_function + import pytest import sys diff --git a/_pytest/setupplan.py b/_pytest/setupplan.py index f0853dee5..e11bd4069 100644 --- a/_pytest/setupplan.py +++ b/_pytest/setupplan.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import, division, print_function + import pytest diff --git a/_pytest/skipping.py b/_pytest/skipping.py index 0af5573ac..5af1ca404 100644 --- a/_pytest/skipping.py +++ b/_pytest/skipping.py @@ -1,12 +1,14 @@ """ support for skip/xfail functions and markers. """ +from __future__ import absolute_import, division, print_function + import os import sys import traceback import py -import pytest +from _pytest.config import hookimpl from _pytest.mark import MarkInfo, MarkDecorator - +from _pytest.runner import fail, skip def pytest_addoption(parser): group = parser.getgroup("general") @@ -23,6 +25,8 @@ def pytest_addoption(parser): def pytest_configure(config): if config.option.runxfail: + # yay a hack + import pytest old = pytest.xfail config._cleanup.append(lambda: setattr(pytest, "xfail", old)) @@ -55,11 +59,7 @@ def pytest_configure(config): ) -def pytest_namespace(): - return dict(xfail=xfail) - - -class XFailed(pytest.fail.Exception): +class XFailed(fail.Exception): """ raised from an explicit call to pytest.xfail() """ @@ -100,15 +100,15 @@ class MarkEvaluator: except Exception: self.exc = sys.exc_info() if isinstance(self.exc[1], SyntaxError): - msg = [" " * (self.exc[1].offset + 4) + "^",] + msg = [" " * (self.exc[1].offset + 4) + "^", ] msg.append("SyntaxError: invalid syntax") else: msg = traceback.format_exception_only(*self.exc[:2]) - pytest.fail("Error evaluating %r expression\n" - " %s\n" - "%s" - %(self.name, self.expr, "\n".join(msg)), - pytrace=False) + fail("Error evaluating %r expression\n" + " %s\n" + "%s" + % (self.name, self.expr, "\n".join(msg)), + pytrace=False) def _getglobals(self): d = {'os': os, 'sys': sys, 'config': self.item.config} @@ -125,11 +125,9 @@ class MarkEvaluator: # "holder" might be a MarkInfo or a MarkDecorator; only # MarkInfo keeps track of all parameters it received in an # _arglist attribute - if hasattr(self.holder, '_arglist'): - arglist = self.holder._arglist - else: - arglist = [(self.holder.args, self.holder.kwargs)] - for args, kwargs in arglist: + marks = getattr(self.holder, '_marks', None) \ + or [self.holder.mark] + for _, args, kwargs in marks: if 'condition' in kwargs: args = (kwargs['condition'],) for expr in args: @@ -142,7 +140,7 @@ class MarkEvaluator: # XXX better be checked at collection time msg = "you need to specify reason=STRING " \ "when using booleans as conditions." - pytest.fail(msg) + fail(msg) result = bool(expr) if result: self.result = True @@ -166,7 +164,7 @@ class MarkEvaluator: return expl -@pytest.hookimpl(tryfirst=True) +@hookimpl(tryfirst=True) def pytest_runtest_setup(item): # Check if skip or skipif are specified as pytest marks @@ -175,23 +173,23 @@ def pytest_runtest_setup(item): eval_skipif = MarkEvaluator(item, 'skipif') if eval_skipif.istrue(): item._evalskip = eval_skipif - pytest.skip(eval_skipif.getexplanation()) + skip(eval_skipif.getexplanation()) skip_info = item.keywords.get('skip') if isinstance(skip_info, (MarkInfo, MarkDecorator)): item._evalskip = True if 'reason' in skip_info.kwargs: - pytest.skip(skip_info.kwargs['reason']) + skip(skip_info.kwargs['reason']) elif skip_info.args: - pytest.skip(skip_info.args[0]) + skip(skip_info.args[0]) else: - pytest.skip("unconditional skip") + skip("unconditional skip") item._evalxfail = MarkEvaluator(item, 'xfail') check_xfail_no_run(item) -@pytest.mark.hookwrapper +@hookimpl(hookwrapper=True) def pytest_pyfunc_call(pyfuncitem): check_xfail_no_run(pyfuncitem) outcome = yield @@ -206,7 +204,7 @@ def check_xfail_no_run(item): evalxfail = item._evalxfail if evalxfail.istrue(): if not evalxfail.get('run', True): - pytest.xfail("[NOTRUN] " + evalxfail.getexplanation()) + xfail("[NOTRUN] " + evalxfail.getexplanation()) def check_strict_xfail(pyfuncitem): @@ -218,10 +216,10 @@ def check_strict_xfail(pyfuncitem): if is_strict_xfail: del pyfuncitem._evalxfail explanation = evalxfail.getexplanation() - pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False) + fail('[XPASS(strict)] ' + explanation, pytrace=False) -@pytest.hookimpl(hookwrapper=True) +@hookimpl(hookwrapper=True) def pytest_runtest_makereport(item, call): outcome = yield rep = outcome.get_result() @@ -241,7 +239,7 @@ def pytest_runtest_makereport(item, call): rep.wasxfail = rep.longrepr elif item.config.option.runxfail: pass # don't interefere - elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception): + elif call.excinfo and call.excinfo.errisinstance(xfail.Exception): rep.wasxfail = "reason: " + call.excinfo.value.msg rep.outcome = "skipped" elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \ @@ -309,12 +307,14 @@ def pytest_terminal_summary(terminalreporter): for line in lines: tr._tw.line(line) + def show_simple(terminalreporter, lines, stat, format): failed = terminalreporter.stats.get(stat) if failed: for rep in failed: pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) - lines.append(format %(pos,)) + lines.append(format % (pos,)) + def show_xfailed(terminalreporter, lines): xfailed = terminalreporter.stats.get("xfailed") @@ -326,13 +326,15 @@ def show_xfailed(terminalreporter, lines): if reason: lines.append(" " + str(reason)) + def show_xpassed(terminalreporter, lines): xpassed = terminalreporter.stats.get("xpassed") if xpassed: for rep in xpassed: pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) reason = rep.wasxfail - lines.append("XPASS %s %s" %(pos, reason)) + lines.append("XPASS %s %s" % (pos, reason)) + def cached_eval(config, expr, d): if not hasattr(config, '_evalcache'): @@ -357,6 +359,7 @@ def folded_skips(skipped): l.append((len(events),) + key) return l + def show_skipped(terminalreporter, lines): tr = terminalreporter skipped = tr.stats.get('skipped', []) @@ -372,5 +375,6 @@ def show_skipped(terminalreporter, lines): for num, fspath, lineno, reason in fskips: if reason.startswith("Skipped: "): reason = reason[9:] - lines.append("SKIP [%d] %s:%d: %s" % + lines.append( + "SKIP [%d] %s:%d: %s" % (num, fspath, lineno, reason)) diff --git a/_pytest/terminal.py b/_pytest/terminal.py index 79e065329..e226d607b 100644 --- a/_pytest/terminal.py +++ b/_pytest/terminal.py @@ -2,6 +2,9 @@ This is a good source for looking at the various reporting hooks. """ +from __future__ import absolute_import, division, print_function + +import itertools from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \ EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED import pytest @@ -24,11 +27,11 @@ def pytest_addoption(parser): help="show extra test summary info as specified by chars (f)ailed, " "(E)error, (s)skipped, (x)failed, (X)passed, " "(p)passed, (P)passed with output, (a)all except pP. " - "The pytest warnings are displayed at all times except when " - "--disable-pytest-warnings is set") - group._addoption('--disable-pytest-warnings', default=False, - dest='disablepytestwarnings', action='store_true', - help='disable warnings summary, overrides -r w flag') + "Warnings are displayed at all times except when " + "--disable-warnings is set") + group._addoption('--disable-warnings', '--disable-pytest-warnings', default=False, + dest='disable_warnings', action='store_true', + help='disable warnings summary') group._addoption('-l', '--showlocals', action="store_true", dest="showlocals", default=False, help="show locals in tracebacks (disabled by default).") @@ -57,9 +60,9 @@ def pytest_configure(config): def getreportopt(config): reportopts = "" reportchars = config.option.reportchars - if not config.option.disablepytestwarnings and 'w' not in reportchars: + if not config.option.disable_warnings and 'w' not in reportchars: reportchars += 'w' - elif config.option.disablepytestwarnings and 'w' in reportchars: + elif config.option.disable_warnings and 'w' in reportchars: reportchars = reportchars.replace('w', '') if reportchars: for char in reportchars: @@ -80,13 +83,40 @@ def pytest_report_teststatus(report): letter = "f" return report.outcome, letter, report.outcome.upper() + class WarningReport: + """ + Simple structure to hold warnings information captured by ``pytest_logwarning``. + """ def __init__(self, code, message, nodeid=None, fslocation=None): + """ + :param code: unused + :param str message: user friendly message about the warning + :param str|None nodeid: node id that generated the warning (see ``get_location``). + :param tuple|py.path.local fslocation: + file system location of the source of the warning (see ``get_location``). + """ self.code = code self.message = message self.nodeid = nodeid self.fslocation = fslocation + def get_location(self, config): + """ + Returns the more user-friendly information about the location + of a warning, or None. + """ + if self.nodeid: + return self.nodeid + if self.fslocation: + if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2: + filename, linenum = self.fslocation[:2] + relpath = py.path.local(filename).relto(config.invocation_dir) + return '%s:%s' % (relpath, linenum) + else: + return str(self.fslocation) + return None + class TerminalReporter: def __init__(self, config, file=None): @@ -166,8 +196,6 @@ class TerminalReporter: def pytest_logwarning(self, code, fslocation, message, nodeid): warnings = self.stats.setdefault("warnings", []) - if isinstance(fslocation, tuple): - fslocation = "%s:%d" % fslocation warning = WarningReport(code=code, fslocation=fslocation, message=message, nodeid=nodeid) warnings.append(warning) @@ -438,13 +466,21 @@ class TerminalReporter: def summary_warnings(self): if self.hasopt("w"): - warnings = self.stats.get("warnings") - if not warnings: + all_warnings = self.stats.get("warnings") + if not all_warnings: return - self.write_sep("=", "pytest-warning summary") - for w in warnings: - self._tw.line("W%s %s %s" % (w.code, - w.fslocation, w.message)) + + grouped = itertools.groupby(all_warnings, key=lambda wr: wr.get_location(self.config)) + + self.write_sep("=", "warnings summary", yellow=True, bold=False) + for location, warnings in grouped: + self._tw.line(str(location) or '') + for w in warnings: + lines = w.message.splitlines() + indented = '\n'.join(' ' + x for x in lines) + self._tw.line(indented) + self._tw.line() + self._tw.line('-- Docs: http://doc.pytest.org/en/latest/warnings.html') def summary_passes(self): if self.config.option.tbstyle != "no": @@ -546,8 +582,7 @@ def flatten(l): def build_summary_stats_line(stats): keys = ("failed passed skipped deselected " - "xfailed xpassed warnings error").split() - key_translation = {'warnings': 'pytest-warnings'} + "xfailed xpassed warnings error").split() unknown_key_seen = False for key in stats.keys(): if key not in keys: @@ -558,8 +593,7 @@ def build_summary_stats_line(stats): for key in keys: val = stats.get(key, None) if val: - key_name = key_translation.get(key, key) - parts.append("%d %s" % (len(val), key_name)) + parts.append("%d %s" % (len(val), key)) if parts: line = ", ".join(parts) diff --git a/_pytest/tmpdir.py b/_pytest/tmpdir.py index 0f878ad01..596014059 100644 --- a/_pytest/tmpdir.py +++ b/_pytest/tmpdir.py @@ -1,4 +1,6 @@ """ support for providing temporary directories to test functions. """ +from __future__ import absolute_import, division, print_function + import re import pytest diff --git a/_pytest/unittest.py b/_pytest/unittest.py index 276b9ba16..0cf0f1726 100644 --- a/_pytest/unittest.py +++ b/_pytest/unittest.py @@ -1,14 +1,15 @@ """ discovery and running of std-library "unittest" style tests. """ -from __future__ import absolute_import +from __future__ import absolute_import, division, print_function import sys import traceback -import pytest # for transferring markers import _pytest._code -from _pytest.python import transfer_markers -from _pytest.skipping import MarkEvaluator +from _pytest.config import hookimpl +from _pytest.runner import fail, skip +from _pytest.python import transfer_markers, Class, Module, Function +from _pytest.skipping import MarkEvaluator, xfail def pytest_pycollect_makeitem(collector, name, obj): @@ -22,11 +23,11 @@ def pytest_pycollect_makeitem(collector, name, obj): return UnitTestCase(name, parent=collector) -class UnitTestCase(pytest.Class): +class UnitTestCase(Class): # marker for fixturemanger.getfixtureinfo() # to declare that our children do not support funcargs nofuncargs = True - + def setup(self): cls = self.obj if getattr(cls, '__unittest_skip__', False): @@ -46,7 +47,7 @@ class UnitTestCase(pytest.Class): return self.session._fixturemanager.parsefactories(self, unittest=True) loader = TestLoader() - module = self.getparent(pytest.Module).obj + module = self.getparent(Module).obj foundsomething = False for name in loader.getTestCaseNames(self.obj): x = getattr(self.obj, name) @@ -65,7 +66,7 @@ class UnitTestCase(pytest.Class): yield TestCaseFunction('runTest', parent=self) -class TestCaseFunction(pytest.Function): +class TestCaseFunction(Function): _excinfo = None def setup(self): @@ -110,36 +111,37 @@ class TestCaseFunction(pytest.Function): try: l = traceback.format_exception(*rawexcinfo) l.insert(0, "NOTE: Incompatible Exception Representation, " - "displaying natively:\n\n") - pytest.fail("".join(l), pytrace=False) - except (pytest.fail.Exception, KeyboardInterrupt): + "displaying natively:\n\n") + fail("".join(l), pytrace=False) + except (fail.Exception, KeyboardInterrupt): raise except: - pytest.fail("ERROR: Unknown Incompatible Exception " - "representation:\n%r" %(rawexcinfo,), pytrace=False) + fail("ERROR: Unknown Incompatible Exception " + "representation:\n%r" % (rawexcinfo,), pytrace=False) except KeyboardInterrupt: raise - except pytest.fail.Exception: + except fail.Exception: excinfo = _pytest._code.ExceptionInfo() self.__dict__.setdefault('_excinfo', []).append(excinfo) def addError(self, testcase, rawexcinfo): self._addexcinfo(rawexcinfo) + def addFailure(self, testcase, rawexcinfo): self._addexcinfo(rawexcinfo) def addSkip(self, testcase, reason): try: - pytest.skip(reason) - except pytest.skip.Exception: + skip(reason) + except skip.Exception: self._evalskip = MarkEvaluator(self, 'SkipTest') self._evalskip.result = True self._addexcinfo(sys.exc_info()) def addExpectedFailure(self, testcase, rawexcinfo, reason=""): try: - pytest.xfail(str(reason)) - except pytest.xfail.Exception: + xfail(str(reason)) + except xfail.Exception: self._addexcinfo(sys.exc_info()) def addUnexpectedSuccess(self, testcase, reason=""): @@ -179,13 +181,14 @@ class TestCaseFunction(pytest.Function): self._testcase.debug() def _prunetraceback(self, excinfo): - pytest.Function._prunetraceback(self, excinfo) + Function._prunetraceback(self, excinfo) traceback = excinfo.traceback.filter( - lambda x:not x.frame.f_globals.get('__unittest')) + lambda x: not x.frame.f_globals.get('__unittest')) if traceback: excinfo.traceback = traceback -@pytest.hookimpl(tryfirst=True) + +@hookimpl(tryfirst=True) def pytest_runtest_makereport(item, call): if isinstance(item, TestCaseFunction): if item._excinfo: @@ -197,7 +200,8 @@ def pytest_runtest_makereport(item, call): # twisted trial support -@pytest.hookimpl(hookwrapper=True) + +@hookimpl(hookwrapper=True) def pytest_runtest_protocol(item): if isinstance(item, TestCaseFunction) and \ 'twisted.trial.unittest' in sys.modules: diff --git a/_pytest/warnings.py b/_pytest/warnings.py new file mode 100644 index 000000000..bfa2b0087 --- /dev/null +++ b/_pytest/warnings.py @@ -0,0 +1,73 @@ +from __future__ import absolute_import, division, print_function + +import warnings +from contextlib import contextmanager + +import pytest + + +def _setoption(wmod, arg): + """ + Copy of the warning._setoption function but does not escape arguments. + """ + parts = arg.split(':') + if len(parts) > 5: + raise wmod._OptionError("too many fields (max 5): %r" % (arg,)) + while len(parts) < 5: + parts.append('') + action, message, category, module, lineno = [s.strip() + for s in parts] + action = wmod._getaction(action) + category = wmod._getcategory(category) + if lineno: + try: + lineno = int(lineno) + if lineno < 0: + raise ValueError + except (ValueError, OverflowError): + raise wmod._OptionError("invalid lineno %r" % (lineno,)) + else: + lineno = 0 + wmod.filterwarnings(action, message, category, module, lineno) + + +def pytest_addoption(parser): + group = parser.getgroup("pytest-warnings") + group.addoption( + '-W', '--pythonwarnings', action='append', + help="set which warnings to report, see -W option of python itself.") + parser.addini("filterwarnings", type="linelist", + help="Each line specifies warning filter pattern which would be passed" + "to warnings.filterwarnings. Process after -W and --pythonwarnings.") + + +@contextmanager +def catch_warnings_for_item(item): + """ + catches the warnings generated during setup/call/teardown execution + of the given item and after it is done posts them as warnings to this + item. + """ + args = item.config.getoption('pythonwarnings') or [] + inifilters = item.config.getini("filterwarnings") + with warnings.catch_warnings(record=True) as log: + warnings.simplefilter('once') + for arg in args: + warnings._setoption(arg) + + for arg in inifilters: + _setoption(warnings, arg) + + yield + + for warning in log: + msg = warnings.formatwarning( + warning.message, warning.category, + warning.filename, warning.lineno, warning.line) + item.warn("unused", msg) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_runtest_protocol(item): + with catch_warnings_for_item(item): + yield diff --git a/doc/en/Makefile b/doc/en/Makefile index 5499c405e..286bbd8e7 100644 --- a/doc/en/Makefile +++ b/doc/en/Makefile @@ -17,7 +17,12 @@ REGENDOC_ARGS := \ --normalize "/_{8,} (.*) _{8,}/_______ \1 ________/" \ --normalize "/in \d+.\d+ seconds/in 0.12 seconds/" \ --normalize "@/tmp/pytest-of-.*/pytest-\d+@PYTEST_TMPDIR@" \ - + --normalize "@pytest-(\d+)\\.[^ ,]+@pytest-\1.x.y@" \ + --normalize "@(This is pytest version )(\d+)\\.[^ ,]+@\1\2.x.y@" \ + --normalize "@py-(\d+)\\.[^ ,]+@py-\1.x.y@" \ + --normalize "@pluggy-(\d+)\\.[.\d,]+@pluggy-\1.x.y@" \ + --normalize "@hypothesis-(\d+)\\.[.\d,]+@hypothesis-\1.x.y@" \ + --normalize "@Python (\d+)\\.[^ ,]+@Python \1.x.y@" .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest @@ -36,7 +41,7 @@ clean: -rm -rf $(BUILDDIR)/* regen: - PYTHONDONTWRITEBYTECODE=1 COLUMNS=76 regendoc --update *.rst */*.rst ${REGENDOC_ARGS} + PYTHONDONTWRITEBYTECODE=1 PYTEST_ADDOPT=-pno:hypothesis COLUMNS=76 regendoc --update *.rst */*.rst ${REGENDOC_ARGS} html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html diff --git a/doc/en/announce/index.rst b/doc/en/announce/index.rst index 52dd90de0..5eadc9bf1 100644 --- a/doc/en/announce/index.rst +++ b/doc/en/announce/index.rst @@ -6,6 +6,7 @@ Release announcements :maxdepth: 2 + release-3.1.0 release-3.0.7 release-3.0.6 release-3.0.5 diff --git a/doc/en/announce/release-3.1.0.rst b/doc/en/announce/release-3.1.0.rst new file mode 100644 index 000000000..99cc6bdbe --- /dev/null +++ b/doc/en/announce/release-3.1.0.rst @@ -0,0 +1,61 @@ +pytest-3.1.0 +======================================= + +The pytest team is proud to announce the 3.1.0 release! + +pytest is a mature Python testing tool with more than a 1600 tests +against itself, passing on many different interpreters and platforms. + +This release contains a bugs fixes and improvements, so users are encouraged +to take a look at the CHANGELOG: + +http://doc.pytest.org/en/latest/changelog.html + +For complete documentation, please visit: + + http://docs.pytest.org + +As usual, you can upgrade from pypi via: + + pip install -U pytest + +Thanks to all who contributed to this release, among them: + +* Anthony Sottile +* Ben Lloyd +* Bruno Oliveira +* David Giese +* David Szotten +* Dmitri Pribysh +* Florian Bruhin +* Florian Schulze +* Floris Bruynooghe +* John Towler +* Jonas Obrist +* Katerina Koukiou +* Kodi Arfer +* Krzysztof Szularz +* Lev Maximov +* Loïc Estève +* Luke Murphy +* Manuel Krebber +* Matthew Duck +* Matthias Bussonnier +* Michael Howitz +* Michal Wajszczuk +* Paweł Adamczak +* Rafael Bertoldi +* Ravi Chandra +* Ronny Pfannschmidt +* Skylar Downes +* Thomas Kriechbaumer +* Vitaly Lashmanov +* Vlad Dragos +* Wheerd +* Xander Johnson +* mandeep +* reut + + +Happy testing, +The Pytest Development Team diff --git a/doc/en/assert.rst b/doc/en/assert.rst index 1d5def87b..d3d06804e 100644 --- a/doc/en/assert.rst +++ b/doc/en/assert.rst @@ -26,7 +26,7 @@ you will see the return value of the function call:: $ pytest test_assert1.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items @@ -170,7 +170,7 @@ if you run this module:: $ pytest test_assert2.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items @@ -223,7 +223,7 @@ provides an alternative explanation for ``Foo`` objects:: now, given this test module:: # content of test_foocompare.py - class Foo: + class Foo(object): def __init__(self, val): self.val = val diff --git a/doc/en/cache.rst b/doc/en/cache.rst index b3b992507..9672562af 100644 --- a/doc/en/cache.rst +++ b/doc/en/cache.rst @@ -80,7 +80,7 @@ If you then run it with ``--lf``:: $ pytest --lf ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y run-last-failure: rerun last 2 failures rootdir: $REGENDOC_TMPDIR, inifile: collected 50 items @@ -122,7 +122,7 @@ of ``FF`` and dots):: $ pytest --ff ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y run-last-failure: rerun last 2 failures first rootdir: $REGENDOC_TMPDIR, inifile: collected 50 items @@ -227,14 +227,14 @@ You can always peek at the content of the cache using the $ py.test --cache-show ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: cachedir: $REGENDOC_TMPDIR/.cache ------------------------------- cache values ------------------------------- - cache/lastfailed contains: - {'test_caching.py::test_function': True} example/value contains: 42 + cache/lastfailed contains: + {'test_caching.py::test_function': True} ======= no tests ran in 0.12 seconds ======== diff --git a/doc/en/capture.rst b/doc/en/capture.rst index 2f666b7bc..58ebdf840 100644 --- a/doc/en/capture.rst +++ b/doc/en/capture.rst @@ -64,7 +64,7 @@ of the failing function and hide the other one:: $ pytest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items diff --git a/doc/en/contents.rst b/doc/en/contents.rst index ce6396e47..9f4a9a1be 100644 --- a/doc/en/contents.rst +++ b/doc/en/contents.rst @@ -19,7 +19,7 @@ Full pytest documentation monkeypatch tmpdir capture - recwarn + warnings doctest mark skipping diff --git a/doc/en/customize.rst b/doc/en/customize.rst index e1d8fc571..ce0a36c11 100644 --- a/doc/en/customize.rst +++ b/doc/en/customize.rst @@ -160,7 +160,7 @@ Builtin configuration file options [seq] matches any character in seq [!seq] matches any char not in seq - Default patterns are ``'.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg'``. + Default patterns are ``'.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv'``. Setting a ``norecursedirs`` replaces the default. Here is an example of how to avoid certain directories: @@ -242,3 +242,23 @@ Builtin configuration file options By default, pytest will stop searching for ``conftest.py`` files upwards from ``pytest.ini``/``tox.ini``/``setup.cfg`` of the project if any, or up to the file-system root. + + +.. confval:: filterwarnings + + .. versionadded:: 3.1 + + Sets a list of filters and actions that should be taken for matched + warnings. By default all warnings emitted during the test session + will be displayed in a summary at the end of the test session. + + .. code-block:: ini + + # content of pytest.ini + [pytest] + filterwarnings = + error + ignore::DeprecationWarning + + This tells pytest to ignore deprecation warnings and turn all other warnings + into errors. For more information please refer to :ref:`warnings`. diff --git a/doc/en/doctest.rst b/doc/en/doctest.rst index fd92eb7be..24c068a86 100644 --- a/doc/en/doctest.rst +++ b/doc/en/doctest.rst @@ -11,6 +11,19 @@ can change the pattern by issuing:: on the command line. Since version ``2.9``, ``--doctest-glob`` can be given multiple times in the command-line. +.. versionadded:: 3.1 + + You can specify the encoding that will be used for those doctest files + using the ``doctest_encoding`` ini option: + + .. code-block:: ini + + # content of pytest.ini + [pytest] + doctest_encoding = latin1 + + The default encoding is UTF-8. + You can also trigger running of doctests from docstrings in all python modules (including regular python test modules):: @@ -49,7 +62,7 @@ then you can just invoke ``pytest`` without command line options:: $ pytest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 1 items diff --git a/doc/en/example/assertion/failure_demo.py b/doc/en/example/assertion/failure_demo.py index a4ff758b1..d31fba2ad 100644 --- a/doc/en/example/assertion/failure_demo.py +++ b/doc/en/example/assertion/failure_demo.py @@ -128,7 +128,7 @@ def test_attribute_multiple(): def globf(x): return x+1 -class TestRaises: +class TestRaises(object): def test_raises(self): s = 'qwe' raises(TypeError, "int(s)") @@ -167,7 +167,7 @@ def test_dynamic_compile_shows_nicely(): -class TestMoreErrors: +class TestMoreErrors(object): def test_complex_error(self): def f(): return 44 @@ -213,23 +213,23 @@ class TestMoreErrors: x = 0 -class TestCustomAssertMsg: +class TestCustomAssertMsg(object): def test_single_line(self): - class A: + class A(object): a = 1 b = 2 assert A.a == b, "A.a appears not to be b" def test_multiline(self): - class A: + class A(object): a = 1 b = 2 assert A.a == b, "A.a appears not to be b\n" \ "or does not appear to be b\none of those" def test_custom_repr(self): - class JSON: + class JSON(object): a = 1 def __repr__(self): return "This is JSON\n{\n 'foo': 'bar'\n}" diff --git a/doc/en/example/assertion/test_setup_flow_example.py b/doc/en/example/assertion/test_setup_flow_example.py index 512330cb4..100effa49 100644 --- a/doc/en/example/assertion/test_setup_flow_example.py +++ b/doc/en/example/assertion/test_setup_flow_example.py @@ -1,7 +1,7 @@ def setup_module(module): module.TestStateFullThing.classcount = 0 -class TestStateFullThing: +class TestStateFullThing(object): def setup_class(cls): cls.classcount += 1 diff --git a/doc/en/example/attic.rst b/doc/en/example/attic.rst index 1bc32b283..6004ebb8f 100644 --- a/doc/en/example/attic.rst +++ b/doc/en/example/attic.rst @@ -15,7 +15,7 @@ example: specifying and selecting acceptance tests def pytest_funcarg__accept(request): return AcceptFixture(request) - class AcceptFixture: + class AcceptFixture(object): def __init__(self, request): if not request.config.option.acceptance: pytest.skip("specify -A to run acceptance tests") @@ -61,7 +61,7 @@ extend the `accept example`_ by putting this in our test module: arg.tmpdir.mkdir("special") return arg - class TestSpecialAcceptance: + class TestSpecialAcceptance(object): def test_sometest(self, accept): assert accept.tmpdir.join("special").check() diff --git a/doc/en/example/costlysetup/conftest.py b/doc/en/example/costlysetup/conftest.py index c8b9a257e..ea3c1cffb 100644 --- a/doc/en/example/costlysetup/conftest.py +++ b/doc/en/example/costlysetup/conftest.py @@ -7,7 +7,7 @@ def setup(request): yield setup setup.finalize() -class CostlySetup: +class CostlySetup(object): def __init__(self): import time print ("performing costly setup") diff --git a/doc/en/example/markers.rst b/doc/en/example/markers.rst index 0c96d408c..338f707a5 100644 --- a/doc/en/example/markers.rst +++ b/doc/en/example/markers.rst @@ -21,7 +21,7 @@ You can "mark" a test function with custom metadata like this:: pass def test_another(): pass - class TestClass: + class TestClass(object): def test_method(self): pass @@ -31,7 +31,7 @@ You can then restrict a test run to only run tests marked with ``webtest``:: $ pytest -v -m webtest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items @@ -45,7 +45,7 @@ Or the inverse, running all tests except the webtest ones:: $ pytest -v -m "not webtest" ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items @@ -66,7 +66,7 @@ tests based on their module, class, method, or function name:: $ pytest -v test_server.py::TestClass::test_method ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 5 items @@ -79,7 +79,7 @@ You can also select on the class:: $ pytest -v test_server.py::TestClass ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items @@ -92,7 +92,7 @@ Or select multiple nodes:: $ pytest -v test_server.py::TestClass test_server.py::test_send_http ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 8 items @@ -130,7 +130,7 @@ select tests based on their names:: $ pytest -v -k http # running with the above defined example module ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items @@ -144,7 +144,7 @@ And you can also run all tests except the ones that match the keyword:: $ pytest -k "not send_http" -v ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items @@ -160,7 +160,7 @@ Or to select "http" and "quick" tests:: $ pytest -k "http or quick" -v ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items @@ -242,7 +242,7 @@ its test methods:: # content of test_mark_classlevel.py import pytest @pytest.mark.webtest - class TestClass: + class TestClass(object): def test_startup(self): pass def test_startup_and_more(self): @@ -256,14 +256,14 @@ To remain backward-compatible with Python 2.4 you can also set a import pytest - class TestClass: + class TestClass(object): pytestmark = pytest.mark.webtest or if you need to use multiple markers you can use a list:: import pytest - class TestClass: + class TestClass(object): pytestmark = [pytest.mark.webtest, pytest.mark.slowtest] You can also set a module level marker:: @@ -352,7 +352,7 @@ the test needs:: $ pytest -E stage2 ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items @@ -364,7 +364,7 @@ and here is one that specifies exactly the environment needed:: $ pytest -E stage1 ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items @@ -407,7 +407,7 @@ code you can read over all such settings. Example:: pytestmark = pytest.mark.glob("module", x=1) @pytest.mark.glob("class", x=2) - class TestClass: + class TestClass(object): @pytest.mark.glob("function", x=3) def test_something(self): pass @@ -485,7 +485,7 @@ then you will see two tests skipped and two executed tests as expected:: $ pytest -rs # this option reports skip reasons ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items @@ -499,7 +499,7 @@ Note that if you specify a platform via the marker-command line option like this $ pytest -m linux ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items @@ -551,7 +551,7 @@ We can now use the ``-m option`` to select one set:: $ pytest -m interface --tb=short ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items @@ -573,7 +573,7 @@ or to select both "event" and "interface" tests:: $ pytest -m "interface or event" --tb=short ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items diff --git a/doc/en/example/multipython.py b/doc/en/example/multipython.py index 1f5e976ef..586f44184 100644 --- a/doc/en/example/multipython.py +++ b/doc/en/example/multipython.py @@ -16,7 +16,7 @@ def python1(request, tmpdir): def python2(request, python1): return Python(request.param, python1.picklefile) -class Python: +class Python(object): def __init__(self, version, picklefile): self.pythonpath = py.path.local.sysfind(version) if not self.pythonpath: diff --git a/doc/en/example/nonpython.rst b/doc/en/example/nonpython.rst index 38918dba4..5784f6ed6 100644 --- a/doc/en/example/nonpython.rst +++ b/doc/en/example/nonpython.rst @@ -27,7 +27,7 @@ now execute the test specification:: nonpython $ pytest test_simple.yml ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/nonpython, inifile: collected 2 items @@ -59,7 +59,7 @@ consulted when reporting in ``verbose`` mode:: nonpython $ pytest -v ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache rootdir: $REGENDOC_TMPDIR/nonpython, inifile: collecting ... collected 2 items @@ -81,7 +81,7 @@ interesting to just look at the collection tree:: nonpython $ pytest --collect-only ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/nonpython, inifile: collected 2 items diff --git a/doc/en/example/parametrize.rst b/doc/en/example/parametrize.rst index 9dd2829de..7a9992ca7 100644 --- a/doc/en/example/parametrize.rst +++ b/doc/en/example/parametrize.rst @@ -130,7 +130,7 @@ objects, they are still using the default pytest representation:: $ pytest test_time.py --collect-only ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 6 items @@ -168,7 +168,7 @@ only have to work a bit to construct the correct arguments for pytest's scenario1 = ('basic', {'attribute': 'value'}) scenario2 = ('advanced', {'attribute': 'value2'}) - class TestSampleWithScenarios: + class TestSampleWithScenarios(object): scenarios = [scenario1, scenario2] def test_demo1(self, attribute): @@ -181,7 +181,7 @@ this is a fully self-contained example which you can run with:: $ pytest test_scenarios.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items @@ -194,7 +194,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia $ pytest --collect-only test_scenarios.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items @@ -241,9 +241,9 @@ creates a database object for the actual test invocations:: if 'db' in metafunc.fixturenames: metafunc.parametrize("db", ['d1', 'd2'], indirect=True) - class DB1: + class DB1(object): "one database object" - class DB2: + class DB2(object): "alternative database object" @pytest.fixture @@ -259,7 +259,7 @@ Let's first see how it looks like at collection time:: $ pytest test_backends.py --collect-only ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items @@ -320,7 +320,7 @@ The result of this test will be successful:: $ pytest test_indirect_list.py --collect-only ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items @@ -350,7 +350,7 @@ parametrizer`_ but in a lot less code:: metafunc.parametrize(argnames, [[funcargs[name] for name in argnames] for funcargs in funcarglist]) - class TestClass: + class TestClass(object): # a map specifying multiple argument sets for a test method params = { 'test_equals': [dict(a=1, b=2), dict(a=3, b=3), ], @@ -447,7 +447,7 @@ If you run this with reporting for skips enabled:: $ pytest -rs test_module.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items diff --git a/doc/en/example/pythoncollection.py b/doc/en/example/pythoncollection.py index 0b9e35df4..9c4bd31ce 100644 --- a/doc/en/example/pythoncollection.py +++ b/doc/en/example/pythoncollection.py @@ -4,7 +4,7 @@ def test_function(): pass -class TestClass: +class TestClass(object): def test_method(self): pass def test_anothermethod(self): diff --git a/doc/en/example/pythoncollection.rst b/doc/en/example/pythoncollection.rst index 428b832f7..8d36c2e37 100644 --- a/doc/en/example/pythoncollection.rst +++ b/doc/en/example/pythoncollection.rst @@ -107,7 +107,7 @@ This would make ``pytest`` look for tests in files that match the ``check_* that match ``*_check``. For example, if we have:: # content of check_myapp.py - class CheckMyApp: + class CheckMyApp(object): def simple_check(self): pass def complex_check(self): @@ -117,7 +117,7 @@ then the test collection looks like this:: $ pytest --collect-only ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 2 items @@ -163,7 +163,7 @@ You can always peek at the collection tree without running tests like this:: . $ pytest --collect-only pythoncollection.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 3 items @@ -230,7 +230,7 @@ will be left out:: $ pytest --collect-only ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 0 items diff --git a/doc/en/example/reportingdemo.rst b/doc/en/example/reportingdemo.rst index 1323f1933..47c18851d 100644 --- a/doc/en/example/reportingdemo.rst +++ b/doc/en/example/reportingdemo.rst @@ -11,7 +11,7 @@ get on the terminal - we are working on that):: assertion $ pytest failure_demo.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/assertion, inifile: collected 42 items @@ -144,13 +144,9 @@ get on the terminal - we are working on that):: E 1 E 1 E 1 - E 1 - E - a2 - E + b2 - E 2 - E 2 - E 2 - E 2 + E 1... + E + E ...Full output truncated (7 lines hidden), use '-vv' to show failure_demo.py:59: AssertionError _______ TestSpecialisedExplanations.test_eq_list ________ @@ -184,14 +180,15 @@ get on the terminal - we are working on that):: def test_eq_dict(self): > assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0} E AssertionError: assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0} - E Omitting 1 identical items, use -v to show + E Omitting 1 identical items, use -vv to show E Differing items: E {'b': 1} != {'b': 2} E Left contains more items: E {'c': 0} E Right contains more items: - E {'d': 0} - E Use -v to get the full diff + E {'d': 0}... + E + E ...Full output truncated (2 lines hidden), use '-vv' to show failure_demo.py:70: AssertionError _______ TestSpecialisedExplanations.test_eq_set ________ @@ -200,15 +197,16 @@ get on the terminal - we are working on that):: def test_eq_set(self): > assert set([0, 10, 11, 12]) == set([0, 20, 21]) - E assert {0, 10, 11, 12} == {0, 20, 21} + E AssertionError: assert {0, 10, 11, 12} == {0, 20, 21} E Extra items in the left set: E 10 E 11 E 12 E Extra items in the right set: E 20 - E 21 - E Use -v to get the full diff + E 21... + E + E ...Full output truncated (2 lines hidden), use '-vv' to show failure_demo.py:73: AssertionError _______ TestSpecialisedExplanations.test_eq_longer_list ________ @@ -245,8 +243,9 @@ get on the terminal - we are working on that):: E which E includes foo E ? +++ - E and a - E tail + E and a... + E + E ...Full output truncated (2 lines hidden), use '-vv' to show failure_demo.py:83: AssertionError _______ TestSpecialisedExplanations.test_not_in_text_single ________ @@ -359,7 +358,7 @@ get on the terminal - we are working on that):: > int(s) E ValueError: invalid literal for int() with base 10: 'qwe' - <0-codegen $PYTHON_PREFIX/lib/python3.5/site-packages/_pytest/python.py:1207>:1: ValueError + <0-codegen $PYTHON_PREFIX/lib/python3.5/site-packages/_pytest/python.py:1219>:1: ValueError _______ TestRaises.test_raises_doesnt ________ self = @@ -550,7 +549,7 @@ get on the terminal - we are working on that):: self = def test_single_line(self): - class A: + class A(object): a = 1 b = 2 > assert A.a == b, "A.a appears not to be b" @@ -564,7 +563,7 @@ get on the terminal - we are working on that):: self = def test_multiline(self): - class A: + class A(object): a = 1 b = 2 > assert A.a == b, "A.a appears not to be b\n" \ @@ -581,7 +580,7 @@ get on the terminal - we are working on that):: self = def test_custom_repr(self): - class JSON: + class JSON(object): a = 1 def __repr__(self): return "This is JSON\n{\n 'foo': 'bar'\n}" diff --git a/doc/en/example/simple.rst b/doc/en/example/simple.rst index 5e5134c70..da831244b 100644 --- a/doc/en/example/simple.rst +++ b/doc/en/example/simple.rst @@ -113,7 +113,7 @@ directory with the above conftest.py:: $ pytest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items @@ -164,7 +164,7 @@ and when running it will see a skipped "slow" test:: $ pytest -rs # "-rs" means report details on the little 's' ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items @@ -178,7 +178,7 @@ Or run it including the ``slow`` marked test:: $ pytest --runslow ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items @@ -303,7 +303,7 @@ which will add the string to the test header accordingly:: $ pytest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y project deps: mylib-1.1 rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items @@ -328,7 +328,7 @@ which will add info only when run with "--v":: $ pytest -v ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache info1: did you know that ... did you? @@ -341,7 +341,7 @@ and nothing when run plainly:: $ pytest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items @@ -375,7 +375,7 @@ Now we can profile which test functions execute the slowest:: $ pytest --durations=3 ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items @@ -426,7 +426,7 @@ tests in a class. Here is a test module example: import pytest @pytest.mark.incremental - class TestUserHandling: + class TestUserHandling(object): def test_login(self): pass def test_modification(self): @@ -441,7 +441,7 @@ If we run this:: $ pytest -rx ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items @@ -484,7 +484,7 @@ Here is an example for making a ``db`` fixture available in a directory: # content of a/conftest.py import pytest - class DB: + class DB(object): pass @pytest.fixture(scope="session") @@ -520,7 +520,7 @@ We can run this:: $ pytest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 7 items @@ -628,7 +628,7 @@ and run them:: $ pytest test_module.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items @@ -722,7 +722,7 @@ and run it:: $ pytest -s test_module.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items diff --git a/doc/en/example/special.rst b/doc/en/example/special.rst index fdffef089..1fc32f6c8 100644 --- a/doc/en/example/special.rst +++ b/doc/en/example/special.rst @@ -28,7 +28,7 @@ will be called ahead of running any tests:: # content of test_module.py - class TestHello: + class TestHello(object): @classmethod def callme(cls): print ("callme called!") @@ -39,7 +39,7 @@ will be called ahead of running any tests:: def test_method2(self): print ("test_method1 called") - class TestOther: + class TestOther(object): @classmethod def callme(cls): print ("callme other called") diff --git a/doc/en/fixture.rst b/doc/en/fixture.rst index 9641d6aac..f760c423e 100644 --- a/doc/en/fixture.rst +++ b/doc/en/fixture.rst @@ -70,7 +70,7 @@ marked ``smtp`` fixture function. Running the test looks like this:: $ pytest test_smtpsimple.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items @@ -188,7 +188,7 @@ inspect what is going on and can now run the tests:: $ pytest test_module.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items @@ -523,7 +523,7 @@ Running the above tests results in the following test IDs being used:: $ pytest --collect-only ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 10 items @@ -558,7 +558,7 @@ and instantiate an object ``app`` where we stick the already defined import pytest - class App: + class App(object): def __init__(self, smtp): self.smtp = smtp @@ -574,7 +574,7 @@ Here we declare an ``app`` fixture which receives the previously defined $ pytest -v test_appsetup.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 2 items @@ -643,7 +643,7 @@ Let's run the tests in verbose mode and with looking at the print-output:: $ pytest -v -s test_module.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 8 items @@ -729,7 +729,7 @@ and declare its use in a test module via a ``usefixtures`` marker:: import pytest @pytest.mark.usefixtures("cleandir") - class TestDirectoryInit: + class TestDirectoryInit(object): def test_cwd_starts_empty(self): assert os.listdir(os.getcwd()) == [] with open("myfile", "w") as f: @@ -792,7 +792,7 @@ self-contained implementation of this idea:: import pytest - class DB: + class DB(object): def __init__(self): self.intransaction = [] def begin(self, name): @@ -804,7 +804,7 @@ self-contained implementation of this idea:: def db(): return DB() - class TestClass: + class TestClass(object): @pytest.fixture(autouse=True) def transact(self, request, db): db.begin(request.function.__name__) @@ -862,7 +862,7 @@ into a conftest.py file **without** using ``autouse``:: and then e.g. have a TestClass using it by declaring the need:: @pytest.mark.usefixtures("transact") - class TestClass: + class TestClass(object): def test_method1(self): ... diff --git a/doc/en/funcarg_compare.rst b/doc/en/funcarg_compare.rst index 3d121b944..b857a014d 100644 --- a/doc/en/funcarg_compare.rst +++ b/doc/en/funcarg_compare.rst @@ -24,7 +24,7 @@ resources. Here is a basic example how we could implement a per-session Database object:: # content of conftest.py - class Database: + class Database(object): def __init__(self): print ("database instance created") def destroy(self): diff --git a/doc/en/genapi.py b/doc/en/genapi.py index 89ddc8731..0ede44fa2 100644 --- a/doc/en/genapi.py +++ b/doc/en/genapi.py @@ -1,7 +1,7 @@ import textwrap import inspect -class Writer: +class Writer(object): def __init__(self, clsname): self.clsname = clsname diff --git a/doc/en/getting-started.rst b/doc/en/getting-started.rst index 9f2aa2cda..59abd4c79 100644 --- a/doc/en/getting-started.rst +++ b/doc/en/getting-started.rst @@ -26,7 +26,7 @@ Installation:: To check your installation has installed the correct version:: $ pytest --version - This is pytest version 3.0.7, imported from $PYTHON_PREFIX/lib/python3.5/site-packages/pytest.py + This is pytest version 3.x.y, imported from $PYTHON_PREFIX/lib/python3.5/site-packages/pytest.py .. _`simpletest`: @@ -46,20 +46,20 @@ That's it. You can execute the test function now:: $ pytest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items - + test_sample.py F - + ======= FAILURES ======== _______ test_answer ________ - + def test_answer(): > assert func(3) == 5 E assert 4 == 5 E + where 4 = func(3) - + test_sample.py:5: AssertionError ======= 1 failed in 0.12 seconds ======== @@ -111,7 +111,7 @@ to group tests logically, in classes and modules. Let's write a class containing two tests:: # content of test_class.py - class TestClass: + class TestClass(object): def test_one(self): x = "this" assert 'h' in x @@ -128,15 +128,15 @@ run the module by passing its filename:: .F ======= FAILURES ======== _______ TestClass.test_two ________ - + self = - + def test_two(self): x = "hello" > assert hasattr(x, 'check') E AssertionError: assert False E + where False = hasattr('hello', 'check') - + test_class.py:8: AssertionError 1 failed, 1 passed in 0.12 seconds @@ -165,14 +165,14 @@ before performing the test function call. Let's just run it:: F ======= FAILURES ======== _______ test_needsfiles ________ - + tmpdir = local('PYTEST_TMPDIR/test_needsfiles0') - + def test_needsfiles(tmpdir): print (tmpdir) > assert 0 E assert 0 - + test_tmpdir.py:3: AssertionError --------------------------- Captured stdout call --------------------------- PYTEST_TMPDIR/test_needsfiles0 diff --git a/doc/en/index.rst b/doc/en/index.rst index 24ae67957..cb901b8d5 100644 --- a/doc/en/index.rst +++ b/doc/en/index.rst @@ -25,7 +25,7 @@ To execute it:: $ pytest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items diff --git a/doc/en/parametrize.rst b/doc/en/parametrize.rst index c261fb0ed..fdd963b1d 100644 --- a/doc/en/parametrize.rst +++ b/doc/en/parametrize.rst @@ -54,7 +54,7 @@ them in turn:: $ pytest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items @@ -93,16 +93,37 @@ for example with the builtin ``mark.xfail``:: @pytest.mark.parametrize("test_input,expected", [ ("3+5", 8), ("2+4", 6), - pytest.mark.xfail(("6*9", 42)), + pytest.param("6*9", 42, + marks=pytest.mark.xfail), ]) def test_eval(test_input, expected): assert eval(test_input) == expected +.. note:: + + prior to version 3.1 the supported mechanism for marking values + used the syntax:: + + import pytest + @pytest.mark.parametrize("test_input,expected", [ + ("3+5", 8), + ("2+4", 6), + pytest.mark.xfail(("6*9", 42),), + ]) + def test_eval(test_input, expected): + assert eval(test_input) == expected + + + This was an initial hack to support the feature but soon was demonstrated to be incomplete, + broken for passing functions or applying multiple marks with the same name but different parameters. + The old syntax will be removed in pytest-4.0. + + Let's run this:: $ pytest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items diff --git a/doc/en/recwarn.rst b/doc/en/recwarn.rst index 7bb193c99..513af0d45 100644 --- a/doc/en/recwarn.rst +++ b/doc/en/recwarn.rst @@ -1,139 +1,3 @@ -.. _`asserting warnings`: +:orphan: -.. _assertwarnings: - -Asserting Warnings -===================================================== - -.. _`asserting warnings with the warns function`: - -.. _warns: - -Asserting warnings with the warns function ------------------------------------------------ - -.. versionadded:: 2.8 - -You can check that code raises a particular warning using ``pytest.warns``, -which works in a similar manner to :ref:`raises `:: - - import warnings - import pytest - - def test_warning(): - with pytest.warns(UserWarning): - warnings.warn("my warning", UserWarning) - -The test will fail if the warning in question is not raised. - -You can also call ``pytest.warns`` on a function or code string:: - - pytest.warns(expected_warning, func, *args, **kwargs) - pytest.warns(expected_warning, "func(*args, **kwargs)") - -The function also returns a list of all raised warnings (as -``warnings.WarningMessage`` objects), which you can query for -additional information:: - - with pytest.warns(RuntimeWarning) as record: - warnings.warn("another warning", RuntimeWarning) - - # check that only one warning was raised - assert len(record) == 1 - # check that the message matches - assert record[0].message.args[0] == "another warning" - -Alternatively, you can examine raised warnings in detail using the -:ref:`recwarn ` fixture (see below). - -.. note:: - ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated - differently; see :ref:`ensuring_function_triggers`. - -.. _`recording warnings`: - -.. _recwarn: - -Recording warnings ------------------------- - -You can record raised warnings either using ``pytest.warns`` or with -the ``recwarn`` fixture. - -To record with ``pytest.warns`` without asserting anything about the warnings, -pass ``None`` as the expected warning type:: - - with pytest.warns(None) as record: - warnings.warn("user", UserWarning) - warnings.warn("runtime", RuntimeWarning) - - assert len(record) == 2 - assert str(record[0].message) == "user" - assert str(record[1].message) == "runtime" - -The ``recwarn`` fixture will record warnings for the whole function:: - - import warnings - - def test_hello(recwarn): - warnings.warn("hello", UserWarning) - assert len(recwarn) == 1 - w = recwarn.pop(UserWarning) - assert issubclass(w.category, UserWarning) - assert str(w.message) == "hello" - assert w.filename - assert w.lineno - -Both ``recwarn`` and ``pytest.warns`` return the same interface for recorded -warnings: a WarningsRecorder instance. To view the recorded warnings, you can -iterate over this instance, call ``len`` on it to get the number of recorded -warnings, or index into it to get a particular recorded warning. It also -provides these methods: - -.. autoclass:: _pytest.recwarn.WarningsRecorder() - :members: - -Each recorded warning has the attributes ``message``, ``category``, -``filename``, ``lineno``, ``file``, and ``line``. The ``category`` is the -class of the warning. The ``message`` is the warning itself; calling -``str(message)`` will return the actual message of the warning. - -.. note:: - ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated - differently; see :ref:`ensuring_function_triggers`. - -.. _`ensuring a function triggers a deprecation warning`: - -.. _ensuring_function_triggers: - -Ensuring a function triggers a deprecation warning -------------------------------------------------------- - -You can also call a global helper for checking -that a certain function call triggers a ``DeprecationWarning`` or -``PendingDeprecationWarning``:: - - import pytest - - def test_global(): - pytest.deprecated_call(myfunction, 17) - -By default, ``DeprecationWarning`` and ``PendingDeprecationWarning`` will not be -caught when using ``pytest.warns`` or ``recwarn`` because default Python warnings filters hide -them. If you wish to record them in your own code, use the -command ``warnings.simplefilter('always')``:: - - import warnings - import pytest - - def test_deprecation(recwarn): - warnings.simplefilter('always') - warnings.warn("deprecated", DeprecationWarning) - assert len(recwarn) == 1 - assert recwarn.pop(DeprecationWarning) - -You can also use it as a contextmanager:: - - def test_global(): - with pytest.deprecated_call(): - myobject.deprecated_method() +This page has been moved, please see :ref:`assertwarnings`. diff --git a/doc/en/skipping.rst b/doc/en/skipping.rst index 0597c76e7..4ee8feb0c 100644 --- a/doc/en/skipping.rst +++ b/doc/en/skipping.rst @@ -98,7 +98,7 @@ You can use the ``skipif`` decorator (and any other marker) on classes:: @pytest.mark.skipif(sys.platform == 'win32', reason="does not run on windows") - class TestPosixCalls: + class TestPosixCalls(object): def test_function(self): "will not be setup or run under 'win32' platform" @@ -224,7 +224,7 @@ Running it with the report-on-xfail option gives this output:: example $ pytest -rx xfail_demo.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/example, inifile: collected 7 items diff --git a/doc/en/test/attic.rst b/doc/en/test/attic.rst index 11140db2c..06944661c 100644 --- a/doc/en/test/attic.rst +++ b/doc/en/test/attic.rst @@ -110,7 +110,7 @@ If you want to disable a complete test class you can set the class-level attribute ``disabled``. For example, in order to avoid running some tests on Win32:: - class TestPosixOnly: + class TestPosixOnly(object): disabled = sys.platform == 'win32' def test_xxx(self): diff --git a/doc/en/tmpdir.rst b/doc/en/tmpdir.rst index 637047195..642bb0814 100644 --- a/doc/en/tmpdir.rst +++ b/doc/en/tmpdir.rst @@ -29,7 +29,7 @@ Running this would result in a passed test except for the last $ pytest test_tmpdir.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items diff --git a/doc/en/unittest.rst b/doc/en/unittest.rst index 6809f68fd..06180f19d 100644 --- a/doc/en/unittest.rst +++ b/doc/en/unittest.rst @@ -71,7 +71,7 @@ it from a unittest-style test:: @pytest.fixture(scope="class") def db_class(request): - class DummyDB: + class DummyDB(object): pass # set a class attribute on the invoking test context request.cls.db = DummyDB() @@ -108,7 +108,7 @@ the ``self.db`` values in the traceback:: $ pytest test_unittest_db.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items @@ -171,7 +171,8 @@ creation of a per-test temporary directory:: tmpdir.join("samplefile.ini").write("# testdata") def test_method(self): - s = open("samplefile.ini").read() + with open("samplefile.ini") as f: + s = f.read() assert "testdata" in s Due to the ``autouse`` flag the ``initdir`` fixture function will be diff --git a/doc/en/usage.rst b/doc/en/usage.rst index 8cc682787..763328f5a 100644 --- a/doc/en/usage.rst +++ b/doc/en/usage.rst @@ -177,6 +177,15 @@ integration servers, use this invocation:: to create an XML file at ``path``. +.. versionadded:: 3.1 + +To set the name of the root test suite xml item, you can configure the ``junit_suite_name`` option in your config file: + +.. code-block:: ini + + [pytest] + junit_suite_name = my_suite + record_xml_property ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -238,7 +247,7 @@ to all testcases you can use ``LogXML.add_global_properties`` def start_and_prepare_env(): pass - class TestMe: + class TestMe(object): def test_foo(self): assert True @@ -326,7 +335,7 @@ You can specify additional plugins to ``pytest.main``:: # content of myinvoke.py import pytest - class MyPlugin: + class MyPlugin(object): def pytest_sessionfinish(self): print("*** test run reporting finishing") diff --git a/doc/en/warnings.rst b/doc/en/warnings.rst new file mode 100644 index 000000000..1766f997c --- /dev/null +++ b/doc/en/warnings.rst @@ -0,0 +1,224 @@ +.. _`warnings`: + +Warnings Capture +================ + +.. versionadded:: 3.1 + +Starting from version ``3.1``, pytest now automatically catches all warnings during test execution +and displays them at the end of the session:: + + # content of test_show_warnings.py + import warnings + + def deprecated_function(): + warnings.warn("this function is deprecated, use another_function()", DeprecationWarning) + return 1 + + def test_one(): + assert deprecated_function() == 1 + +Running pytest now produces this output:: + + $ pytest test_show_warnings.py + ======= test session starts ======== + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: + collected 1 items + + test_show_warnings.py . + + ======= warnings summary ======== + test_show_warnings.py::test_one + $REGENDOC_TMPDIR/test_show_warnings.py:4: DeprecationWarning: this function is deprecated, use another_function() + warnings.warn("this function is deprecated, use another_function()", DeprecationWarning) + + -- Docs: http://doc.pytest.org/en/latest/warnings.html + ======= 1 passed, 1 warnings in 0.12 seconds ======== + +The ``-W`` flag can be passed to control which warnings will be displayed or even turn +them into errors:: + + $ pytest -q test_show_warnings.py -W error::DeprecationWarning + F + ======= FAILURES ======== + _______ test_one ________ + + def test_one(): + > assert deprecated_function() == 1 + + test_show_warnings.py:8: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + + def deprecated_function(): + > warnings.warn("this function is deprecated, use another_function()", DeprecationWarning) + E DeprecationWarning: this function is deprecated, use another_function() + + test_show_warnings.py:4: DeprecationWarning + 1 failed in 0.12 seconds + +The same option can be set in the ``pytest.ini`` file using the ``filterwarnings`` ini option. +For example, the configuration below will ignore all deprecation warnings, but will transform +all other warnings into errors. + +.. code-block:: ini + + [pytest] + filterwarnings = + error + ignore::DeprecationWarning + + +When a warning matches more than one option in the list, the action for the last matching option +is performed. + +Both ``-W`` command-line option and ``filterwarnings`` ini option are based on Python's own +`-W option`_ and `warnings.simplefilter`_, so please refer to those sections in the Python +documentation for other examples and advanced usage. + +*Credits go to Florian Schulze for the reference implementation in the* `pytest-warnings`_ +*plugin.* + +.. _`-W option`: https://docs.python.org/3/using/cmdline.html?highlight=#cmdoption-W +.. _warnings.simplefilter: https://docs.python.org/3/library/warnings.html#warnings.simplefilter +.. _`pytest-warnings`: https://github.com/fschulze/pytest-warnings + +.. _`asserting warnings`: + +.. _assertwarnings: + +.. _`asserting warnings with the warns function`: + +.. _warns: + +Asserting warnings with the warns function +----------------------------------------------- + +.. versionadded:: 2.8 + +You can check that code raises a particular warning using ``pytest.warns``, +which works in a similar manner to :ref:`raises `:: + + import warnings + import pytest + + def test_warning(): + with pytest.warns(UserWarning): + warnings.warn("my warning", UserWarning) + +The test will fail if the warning in question is not raised. + +You can also call ``pytest.warns`` on a function or code string:: + + pytest.warns(expected_warning, func, *args, **kwargs) + pytest.warns(expected_warning, "func(*args, **kwargs)") + +The function also returns a list of all raised warnings (as +``warnings.WarningMessage`` objects), which you can query for +additional information:: + + with pytest.warns(RuntimeWarning) as record: + warnings.warn("another warning", RuntimeWarning) + + # check that only one warning was raised + assert len(record) == 1 + # check that the message matches + assert record[0].message.args[0] == "another warning" + +Alternatively, you can examine raised warnings in detail using the +:ref:`recwarn ` fixture (see below). + +.. note:: + ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated + differently; see :ref:`ensuring_function_triggers`. + +.. _`recording warnings`: + +.. _recwarn: + +Recording warnings +------------------------ + +You can record raised warnings either using ``pytest.warns`` or with +the ``recwarn`` fixture. + +To record with ``pytest.warns`` without asserting anything about the warnings, +pass ``None`` as the expected warning type:: + + with pytest.warns(None) as record: + warnings.warn("user", UserWarning) + warnings.warn("runtime", RuntimeWarning) + + assert len(record) == 2 + assert str(record[0].message) == "user" + assert str(record[1].message) == "runtime" + +The ``recwarn`` fixture will record warnings for the whole function:: + + import warnings + + def test_hello(recwarn): + warnings.warn("hello", UserWarning) + assert len(recwarn) == 1 + w = recwarn.pop(UserWarning) + assert issubclass(w.category, UserWarning) + assert str(w.message) == "hello" + assert w.filename + assert w.lineno + +Both ``recwarn`` and ``pytest.warns`` return the same interface for recorded +warnings: a WarningsRecorder instance. To view the recorded warnings, you can +iterate over this instance, call ``len`` on it to get the number of recorded +warnings, or index into it to get a particular recorded warning. It also +provides these methods: + +.. autoclass:: _pytest.recwarn.WarningsRecorder() + :members: + +Each recorded warning has the attributes ``message``, ``category``, +``filename``, ``lineno``, ``file``, and ``line``. The ``category`` is the +class of the warning. The ``message`` is the warning itself; calling +``str(message)`` will return the actual message of the warning. + +.. note:: + :class:`RecordedWarning` was changed from a plain class to a namedtuple in pytest 3.1 + +.. note:: + ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated + differently; see :ref:`ensuring_function_triggers`. + +.. _`ensuring a function triggers a deprecation warning`: + +.. _ensuring_function_triggers: + +Ensuring a function triggers a deprecation warning +------------------------------------------------------- + +You can also call a global helper for checking +that a certain function call triggers a ``DeprecationWarning`` or +``PendingDeprecationWarning``:: + + import pytest + + def test_global(): + pytest.deprecated_call(myfunction, 17) + +By default, ``DeprecationWarning`` and ``PendingDeprecationWarning`` will not be +caught when using ``pytest.warns`` or ``recwarn`` because default Python warnings filters hide +them. If you wish to record them in your own code, use the +command ``warnings.simplefilter('always')``:: + + import warnings + import pytest + + def test_deprecation(recwarn): + warnings.simplefilter('always') + warnings.warn("deprecated", DeprecationWarning) + assert len(recwarn) == 1 + assert recwarn.pop(DeprecationWarning) + +You can also use it as a contextmanager:: + + def test_global(): + with pytest.deprecated_call(): + myobject.deprecated_method() diff --git a/doc/en/writing_plugins.rst b/doc/en/writing_plugins.rst index f6ed6e4e3..bb07ba0df 100644 --- a/doc/en/writing_plugins.rst +++ b/doc/en/writing_plugins.rst @@ -517,7 +517,6 @@ Initialization, command line and configuration hooks .. autofunction:: pytest_load_initial_conftests .. autofunction:: pytest_cmdline_preparse .. autofunction:: pytest_cmdline_parse -.. autofunction:: pytest_namespace .. autofunction:: pytest_addoption .. autofunction:: pytest_cmdline_main .. autofunction:: pytest_configure diff --git a/pytest.py b/pytest.py index e376e417e..4e4ccb32d 100644 --- a/pytest.py +++ b/pytest.py @@ -2,19 +2,7 @@ """ pytest: unit and functional testing with Python. """ -__all__ = [ - 'main', - 'UsageError', - 'cmdline', - 'hookspec', - 'hookimpl', - '__version__', -] -if __name__ == '__main__': # if run as a script or by 'python -m pytest' - # we trigger the below "else" condition by the following import - import pytest - raise SystemExit(pytest.main()) # else we are imported @@ -22,7 +10,69 @@ from _pytest.config import ( main, UsageError, _preloadplugins, cmdline, hookspec, hookimpl ) +from _pytest.fixtures import fixture, yield_fixture +from _pytest.assertion import register_assert_rewrite +from _pytest.freeze_support import freeze_includes from _pytest import __version__ +from _pytest.debugging import pytestPDB as __pytestPDB +from _pytest.recwarn import warns, deprecated_call +from _pytest.runner import fail, skip, importorskip, exit +from _pytest.mark import MARK_GEN as mark, param +from _pytest.skipping import xfail +from _pytest.main import Item, Collector, File, Session +from _pytest.fixtures import fillfixtures as _fillfuncargs +from _pytest.python import ( + raises, approx, + Module, Class, Instance, Function, Generator, +) -_preloadplugins() # to populate pytest.* namespace so help(pytest) works +set_trace = __pytestPDB.set_trace +__all__ = [ + 'main', + 'UsageError', + 'cmdline', + 'hookspec', + 'hookimpl', + '__version__', + 'register_assert_rewrite', + 'freeze_includes', + 'set_trace', + 'warns', + 'deprecated_call', + 'fixture', + 'yield_fixture', + 'fail', + 'skip', + 'xfail', + 'importorskip', + 'exit', + 'mark', + 'param', + 'approx', + '_fillfuncargs', + + 'Item', + 'File', + 'Collector', + 'Session', + 'Module', + 'Class', + 'Instance', + 'Function', + 'Generator', + 'raises', + + +] + +if __name__ == '__main__': + # if run as a script or by 'python -m pytest' + # we trigger the below "else" condition by the following import + import pytest + raise SystemExit(pytest.main()) +else: + + from _pytest.compat import _setup_collect_fakemodule + _preloadplugins() # to populate pytest.* namespace so help(pytest) works + _setup_collect_fakemodule() diff --git a/scripts/check-manifest.py b/scripts/check-manifest.py index 5911a84fe..909e7519b 100644 --- a/scripts/check-manifest.py +++ b/scripts/check-manifest.py @@ -18,4 +18,3 @@ if os.path.isdir('.git'): else: print('No .git directory found, skipping checking the manifest file') sys.exit(0) - diff --git a/setup.cfg b/setup.cfg index f3299af5b..816539e2e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -9,6 +9,10 @@ upload-dir = doc/en/build/html [bdist_wheel] universal = 1 +[check-manifest] +ignore = + _pytest/_version.py + [metadata] license_file = LICENSE diff --git a/setup.py b/setup.py index 1d0630cd2..a71692c25 100644 --- a/setup.py +++ b/setup.py @@ -1,32 +1,27 @@ -import os, sys +import os +import sys import setuptools import pkg_resources from setuptools import setup, Command -classifiers = ['Development Status :: 6 - Mature', - 'Intended Audience :: Developers', - 'License :: OSI Approved :: MIT License', - 'Operating System :: POSIX', - 'Operating System :: Microsoft :: Windows', - 'Operating System :: MacOS :: MacOS X', - 'Topic :: Software Development :: Testing', - 'Topic :: Software Development :: Libraries', - 'Topic :: Utilities'] + [ - ('Programming Language :: Python :: %s' % x) for x in - '2 2.6 2.7 3 3.3 3.4 3.5 3.6'.split()] +classifiers = [ + 'Development Status :: 6 - Mature', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Operating System :: POSIX', + 'Operating System :: Microsoft :: Windows', + 'Operating System :: MacOS :: MacOS X', + 'Topic :: Software Development :: Testing', + 'Topic :: Software Development :: Libraries', + 'Topic :: Utilities', +] + [ + ('Programming Language :: Python :: %s' % x) + for x in '2 2.6 2.7 3 3.3 3.4 3.5 3.6'.split() +] with open('README.rst') as fd: long_description = fd.read() -def get_version(): - p = os.path.join(os.path.dirname( - os.path.abspath(__file__)), "_pytest", "__init__.py") - with open(p) as f: - for line in f.readlines(): - if "__version__" in line: - return line.strip().split("=")[-1].strip(" '") - raise ValueError("could not read version") - def has_environment_marker_support(): """ @@ -63,7 +58,9 @@ def main(): name='pytest', description='pytest: simple powerful testing with Python', long_description=long_description, - version=get_version(), + use_scm_version={ + 'write_to': '_pytest/_version.py', + }, url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], @@ -74,6 +71,7 @@ def main(): keywords="test unittest", cmdclass={'test': PyTest}, # the following should be enabled for release + setup_requires=['setuptools-scm'], install_requires=install_requires, extras_require=extras_require, packages=['_pytest', '_pytest.assertion', '_pytest._code', '_pytest.vendored_packages'], diff --git a/tasks/__init__.py b/tasks/__init__.py new file mode 100644 index 000000000..9551ff059 --- /dev/null +++ b/tasks/__init__.py @@ -0,0 +1,9 @@ +""" +Invoke tasks to help with pytest development and release process. +""" + +import invoke + +from . import generate + +ns = invoke.Collection(generate) diff --git a/tasks/generate.py b/tasks/generate.py new file mode 100644 index 000000000..deb77ed4f --- /dev/null +++ b/tasks/generate.py @@ -0,0 +1,112 @@ +import os +from pathlib import Path +from subprocess import check_output, check_call + +import invoke + + +@invoke.task(help={ + 'version': 'version being released', +}) +def announce(ctx, version): + """Generates a new release announcement entry in the docs.""" + # Get our list of authors + stdout = check_output(["git", "describe", "--abbrev=0", '--tags']) + stdout = stdout.decode('utf-8') + last_version = stdout.strip() + + stdout = check_output(["git", "log", "{}..HEAD".format(last_version), "--format=%aN"]) + stdout = stdout.decode('utf-8') + + contributors = set(stdout.splitlines()) + + template_name = 'release.minor.rst' if version.endswith('.0') else 'release.patch.rst' + template_text = Path(__file__).parent.joinpath(template_name).read_text(encoding='UTF-8') + + contributors_text = '\n'.join('* {}'.format(name) for name in sorted(contributors)) + '\n' + text = template_text.format(version=version, contributors=contributors_text) + + target = Path(__file__).parent.joinpath('../doc/en/announce/release-{}.rst'.format(version)) + target.write_text(text, encoding='UTF-8') + print("[generate.announce] Generated {}".format(target.name)) + + # Update index with the new release entry + index_path = Path(__file__).parent.joinpath('../doc/en/announce/index.rst') + lines = index_path.read_text(encoding='UTF-8').splitlines() + indent = ' ' + for index, line in enumerate(lines): + if line.startswith('{}release-'.format(indent)): + new_line = indent + target.stem + if line != new_line: + lines.insert(index, new_line) + index_path.write_text('\n'.join(lines) + '\n', encoding='UTF-8') + print("[generate.announce] Updated {}".format(index_path.name)) + else: + print("[generate.announce] Skip {} (already contains release)".format(index_path.name)) + break + + check_call(['git', 'add', str(target)]) + + +@invoke.task() +def regen(ctx): + """Call regendoc tool to update examples and pytest output in the docs.""" + print("[generate.regen] Updating docs") + check_call(['tox', '-e', 'regen']) + + +@invoke.task() +def make_tag(ctx, version): + """Create a new (local) tag for the release, only if the repository is clean.""" + from git import Repo + + repo = Repo('.') + if repo.is_dirty(): + print('Current repository is dirty. Please commit any changes and try again.') + raise invoke.Exit(code=2) + + tag_names = [x.name for x in repo.tags] + if version in tag_names: + print("[generate.make_tag] Delete existing tag {}".format(version)) + repo.delete_tag(version) + + print("[generate.make_tag] Create tag {}".format(version)) + repo.create_tag(version) + + +@invoke.task() +def devpi_upload(ctx, version, user, password=None): + """Creates and uploads a package to devpi for testing.""" + if password: + print("[generate.devpi_upload] devpi login {}".format(user)) + check_call(['devpi', 'login', user, '--password', password]) + + check_call(['devpi', 'use', 'https://devpi.net/{}/dev'.format(user)]) + + env = os.environ.copy() + env['SETUPTOOLS_SCM_PRETEND_VERSION'] = version + check_call(['devpi', 'upload', '--formats', 'sdist,bdist_wheel'], env=env) + print("[generate.devpi_upload] package uploaded") + + +@invoke.task(help={ + 'version': 'version being released', + 'user': 'name of the user on devpi to stage the generated package', + 'password': 'user password on devpi to stage the generated package ' + '(if not given assumed logged in)', +}) +def pre_release(ctx, version, user, password=None): + """Generates new docs, release announcements and uploads a new release to devpi for testing.""" + announce(ctx, version) + regen(ctx) + + msg = 'Preparing release version {}'.format(version) + check_call(['git', 'commit', '-a', '-m', msg]) + + make_tag(ctx, version) + + devpi_upload(ctx, version=version, user=user, password=password) + + print() + print('[generate.pre_release] Please push your branch and open a PR.') + diff --git a/tasks/release.minor.rst b/tasks/release.minor.rst new file mode 100644 index 000000000..4bbce5a82 --- /dev/null +++ b/tasks/release.minor.rst @@ -0,0 +1,27 @@ +pytest-{version} +======================================= + +The pytest team is proud to announce the {version} release! + +pytest is a mature Python testing tool with more than a 1600 tests +against itself, passing on many different interpreters and platforms. + +This release contains a bugs fixes and improvements, so users are encouraged +to take a look at the CHANGELOG: + +http://doc.pytest.org/en/latest/changelog.html + +For complete documentation, please visit: + + http://docs.pytest.org + +As usual, you can upgrade from pypi via: + + pip install -U pytest + +Thanks to all who contributed to this release, among them: + +{contributors} + +Happy testing, +The Pytest Development Team diff --git a/tasks/release.patch.rst b/tasks/release.patch.rst new file mode 100644 index 000000000..56764b913 --- /dev/null +++ b/tasks/release.patch.rst @@ -0,0 +1,17 @@ +pytest-{version} +======================================= + +pytest {version} has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +{contributors} + +Happy testing, +The pytest Development Team diff --git a/tasks/requirements.txt b/tasks/requirements.txt new file mode 100644 index 000000000..35afc29c8 --- /dev/null +++ b/tasks/requirements.txt @@ -0,0 +1,3 @@ +invoke +tox +gitpython diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 88e3fa449..00abfc38d 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +from __future__ import absolute_import, division, print_function import os import sys @@ -8,7 +9,7 @@ import pytest from _pytest.main import EXIT_NOTESTSCOLLECTED, EXIT_USAGEERROR -class TestGeneralUsage: +class TestGeneralUsage(object): def test_config_error(self, testdir): testdir.makeconftest(""" def pytest_configure(config): @@ -338,10 +339,16 @@ class TestGeneralUsage: "*ERROR*test_b.py::b*", ]) + @pytest.mark.usefixtures('recwarn') def test_namespace_import_doesnt_confuse_import_hook(self, testdir): - # Ref #383. Python 3.3's namespace package messed with our import hooks - # Importing a module that didn't exist, even if the ImportError was - # gracefully handled, would make our test crash. + """ + Ref #383. Python 3.3's namespace package messed with our import hooks + Importing a module that didn't exist, even if the ImportError was + gracefully handled, would make our test crash. + + Use recwarn here to silence this warning in Python 2.6 and 2.7: + ImportWarning: Not importing directory '...\not_a_package': missing __init__.py + """ testdir.mkdir('not_a_package') p = testdir.makepyfile(""" try: @@ -410,7 +417,7 @@ class TestGeneralUsage: ]) -class TestInvocationVariants: +class TestInvocationVariants(object): def test_earlyinit(self, testdir): p = testdir.makepyfile(""" import pytest @@ -502,7 +509,7 @@ class TestInvocationVariants: out, err = capsys.readouterr() def test_invoke_plugin_api(self, testdir, capsys): - class MyPlugin: + class MyPlugin(object): def pytest_addoption(self, parser): parser.addoption("--myopt") @@ -523,6 +530,7 @@ class TestInvocationVariants: ]) def test_cmdline_python_package(self, testdir, monkeypatch): + import warnings monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', False) path = testdir.mkpydir("tpkg") path.join("test_hello.py").write("def test_hello(): pass") @@ -545,7 +553,11 @@ class TestInvocationVariants: return what empty_package = testdir.mkpydir("empty_package") monkeypatch.setenv('PYTHONPATH', join_pythonpath(empty_package)) - result = testdir.runpytest("--pyargs", ".") + # the path which is not a package raises a warning on pypy; + # no idea why only pypy and not normal python warn about it here + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ImportWarning) + result = testdir.runpytest("--pyargs", ".") assert result.ret == 0 result.stdout.fnmatch_lines([ "*2 passed*" @@ -670,7 +682,7 @@ class TestInvocationVariants: assert request.config.pluginmanager.hasplugin('python') -class TestDurations: +class TestDurations(object): source = """ import time frag = 0.002 @@ -741,7 +753,7 @@ class TestDurations: assert result.ret == 0 -class TestDurationWithFixture: +class TestDurationWithFixture(object): source = """ import time frag = 0.001 @@ -781,3 +793,45 @@ def test_zipimport_hook(testdir, tmpdir): assert result.ret == 0 result.stderr.fnmatch_lines(['*not found*foo*']) assert 'INTERNALERROR>' not in result.stdout.str() + + +def test_import_plugin_unicode_name(testdir): + testdir.makepyfile( + myplugin='', + ) + testdir.makepyfile(""" + def test(): pass + """) + testdir.makeconftest(""" + pytest_plugins = [u'myplugin'] + """) + r = testdir.runpytest() + assert r.ret == 0 + + +def test_deferred_hook_checking(testdir): + """ + Check hooks as late as possible (#1821). + """ + testdir.syspathinsert() + testdir.makepyfile(**{ + 'plugin.py': """ + class Hooks: + def pytest_my_hook(self, config): + pass + + def pytest_configure(config): + config.pluginmanager.add_hookspecs(Hooks) + """, + 'conftest.py': """ + pytest_plugins = ['plugin'] + def pytest_my_hook(config): + return 40 + """, + 'test_foo.py': """ + def test(request): + assert request.config.hook.pytest_my_hook(config=request.config) == [40] + """ + }) + result = testdir.runpytest() + result.stdout.fnmatch_lines(['* 1 passed *']) diff --git a/testing/code/test_code.py b/testing/code/test_code.py index ad9db6d2e..479a2e7cc 100644 --- a/testing/code/test_code.py +++ b/testing/code/test_code.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import sys import _pytest._code @@ -20,7 +21,7 @@ def test_code_gives_back_name_for_not_existing_file(): assert code.fullsource is None def test_code_with_class(): - class A: + class A(object): pass pytest.raises(TypeError, "_pytest._code.Code(A)") @@ -136,7 +137,7 @@ def test_frame_getargs(): ('z', {'c': 'd'})] -class TestExceptionInfo: +class TestExceptionInfo(object): def test_bad_getsource(self): try: @@ -147,7 +148,7 @@ class TestExceptionInfo: assert exci.getrepr() -class TestTracebackEntry: +class TestTracebackEntry(object): def test_getsource(self): try: diff --git a/testing/code/test_excinfo.py b/testing/code/test_excinfo.py index 23b0a985e..b7dafdb46 100644 --- a/testing/code/test_excinfo.py +++ b/testing/code/test_excinfo.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +from __future__ import absolute_import, division, print_function import operator import _pytest @@ -25,7 +26,7 @@ else: import pytest pytest_version_info = tuple(map(int, pytest.__version__.split(".")[:3])) -class TWMock: +class TWMock(object): WRITE = object() def __init__(self): @@ -89,7 +90,7 @@ def h(): g() # -class TestTraceback_f_g_h: +class TestTraceback_f_g_h(object): def setup_method(self, method): try: h() @@ -369,7 +370,7 @@ def test_codepath_Queue_example(): def test_match_succeeds(): with pytest.raises(ZeroDivisionError) as excinfo: - 0 / 0 + 0 // 0 excinfo.match(r'.*zero.*') def test_match_raises_error(testdir): @@ -386,7 +387,7 @@ def test_match_raises_error(testdir): "*AssertionError*Pattern*[123]*not found*", ]) -class TestFormattedExcinfo: +class TestFormattedExcinfo(object): @pytest.fixture def importasmod(self, request): @@ -472,7 +473,7 @@ raise ValueError() pr = FormattedExcinfo() class FakeCode(object): - class raw: + class raw(object): co_filename = '?' path = '?' diff --git a/testing/code/test_source.py b/testing/code/test_source.py index 13bfccd54..bdbc00d19 100644 --- a/testing/code/test_source.py +++ b/testing/code/test_source.py @@ -1,6 +1,7 @@ # flake8: noqa # disable flake check on this file because some constructs are strange # or redundant on purpose and can't be disable on a line-by-line basis +from __future__ import absolute_import, division, print_function import sys import _pytest._code @@ -49,7 +50,7 @@ def test_source_from_function(): assert str(source).startswith('def test_source_str_function():') def test_source_from_method(): - class TestClass: + class TestClass(object): def test_method(self): pass source = _pytest._code.Source(TestClass().test_method) @@ -119,7 +120,7 @@ def test_isparseable(): assert not Source(" \nif 1:\npass").isparseable() assert not Source(chr(0)).isparseable() -class TestAccesses: +class TestAccesses(object): source = Source("""\ def f(x): pass @@ -143,7 +144,7 @@ class TestAccesses: l = [x for x in self.source] assert len(l) == 4 -class TestSourceParsingAndCompiling: +class TestSourceParsingAndCompiling(object): source = Source("""\ def f(x): assert (x == @@ -307,7 +308,7 @@ class TestSourceParsingAndCompiling: pytest.raises(SyntaxError, _pytest._code.compile, "lambda a,a: 0", mode='eval') def test_getstartingblock_singleline(): - class A: + class A(object): def __init__(self, *args): frame = sys._getframe(1) self.source = _pytest._code.Frame(frame).statement @@ -318,7 +319,7 @@ def test_getstartingblock_singleline(): assert len(l) == 1 def test_getstartingblock_multiline(): - class A: + class A(object): def __init__(self, *args): frame = sys._getframe(1) self.source = _pytest._code.Frame(frame).statement @@ -461,16 +462,16 @@ def test_getfslineno(): assert lineno == A_lineno assert getfslineno(3) == ("", -1) - class B: + class B(object): pass B.__name__ = "B2" assert getfslineno(B)[1] == -1 def test_code_of_object_instance_with_call(): - class A: + class A(object): pass pytest.raises(TypeError, lambda: _pytest._code.Source(A())) - class WithCall: + class WithCall(object): def __call__(self): pass @@ -559,7 +560,7 @@ x = 3 """) assert str(source) == "raise ValueError(\n 23\n)" -class TestTry: +class TestTry(object): pytestmark = astonly source = """\ try: @@ -586,7 +587,7 @@ else: source = getstatement(5, self.source) assert str(source) == " raise KeyError()" -class TestTryFinally: +class TestTryFinally(object): source = """\ try: raise ValueError @@ -604,7 +605,7 @@ finally: -class TestIf: +class TestIf(object): pytestmark = astonly source = """\ if 1: diff --git a/testing/deprecated_test.py b/testing/deprecated_test.py index e610458e0..0c41a71bf 100644 --- a/testing/deprecated_test.py +++ b/testing/deprecated_test.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import pytest @@ -26,7 +27,7 @@ def test_funcarg_prefix_deprecation(testdir): """) result = testdir.runpytest('-ra') result.stdout.fnmatch_lines([ - ('WC1 None pytest_funcarg__value: ' + ('*pytest_funcarg__value: ' 'declaring fixtures using "pytest_funcarg__" prefix is deprecated ' 'and scheduled to be removed in pytest 4.0. ' 'Please remove the prefix and use the @pytest.fixture decorator instead.'), @@ -48,7 +49,7 @@ def test_str_args_deprecated(tmpdir, testdir): from _pytest.main import EXIT_NOTESTSCOLLECTED warnings = [] - class Collect: + class Collect(object): def pytest_logwarning(self, message): warnings.append(message) diff --git a/testing/python/approx.py b/testing/python/approx.py index fc1cbf9ab..d7063e215 100644 --- a/testing/python/approx.py +++ b/testing/python/approx.py @@ -20,7 +20,7 @@ class MyDocTestRunner(doctest.DocTestRunner): example.source.strip(), got.strip(), example.want.strip())) -class TestApprox: +class TestApprox(object): def test_repr_string(self): # for some reason in Python 2.6 it is not displaying the tolerance representation correctly diff --git a/testing/python/collect.py b/testing/python/collect.py index ca8eb30a5..236421f1c 100644 --- a/testing/python/collect.py +++ b/testing/python/collect.py @@ -12,7 +12,7 @@ from _pytest.main import ( ) -class TestModule: +class TestModule(object): def test_failing_import(self, testdir): modcol = testdir.getmodulecol("import alksdjalskdjalkjals") pytest.raises(Collector.CollectError, modcol.collect) @@ -104,7 +104,6 @@ class TestModule: else: assert name not in stdout - def test_show_traceback_import_error_unicode(self, testdir): """Check test modules collected which raise ImportError with unicode messages are handled properly (#2336). @@ -122,17 +121,17 @@ class TestModule: assert result.ret == 2 -class TestClass: +class TestClass(object): def test_class_with_init_warning(self, testdir): testdir.makepyfile(""" - class TestClass1: + class TestClass1(object): def __init__(self): pass """) result = testdir.runpytest("-rw") - result.stdout.fnmatch_lines_random(""" - WC1*test_class_with_init_warning.py*__init__* - """) + result.stdout.fnmatch_lines([ + "*cannot collect test class 'TestClass1' because it has a __init__ constructor", + ]) def test_class_subclassobject(self, testdir): testdir.getmodulecol(""" @@ -146,7 +145,7 @@ class TestClass: def test_setup_teardown_class_as_classmethod(self, testdir): testdir.makepyfile(test_mod1=""" - class TestClassMethod: + class TestClassMethod(object): @classmethod def setup_class(cls): pass @@ -194,7 +193,7 @@ class TestClass: assert result.ret == EXIT_NOTESTSCOLLECTED -class TestGenerator: +class TestGenerator(object): def test_generative_functions(self, testdir): modcol = testdir.getmodulecol(""" def func1(arg, arg2): @@ -219,7 +218,7 @@ class TestGenerator: modcol = testdir.getmodulecol(""" def func1(arg, arg2): assert arg == arg2 - class TestGenMethods: + class TestGenMethods(object): def test_gen(self): yield func1, 17, 3*5 yield func1, 42, 6*7 @@ -273,7 +272,7 @@ class TestGenerator: modcol = testdir.getmodulecol(""" def func1(arg, arg2): assert arg == arg2 - class TestGenMethods: + class TestGenMethods(object): def test_gen(self): yield "m1", func1, 17, 3*5 yield "m2", func1, 42, 6*7 @@ -291,6 +290,7 @@ class TestGenerator: def test_order_of_execution_generator_same_codeline(self, testdir, tmpdir): o = testdir.makepyfile(""" + from __future__ import print_function def test_generative_order_of_execution(): import py, pytest test_list = [] @@ -300,8 +300,8 @@ class TestGenerator: test_list.append(item) def assert_order_of_execution(): - py.builtin.print_('expected order', expected_list) - py.builtin.print_('but got ', test_list) + print('expected order', expected_list) + print('but got ', test_list) assert test_list == expected_list for i in expected_list: @@ -315,6 +315,7 @@ class TestGenerator: def test_order_of_execution_generator_different_codeline(self, testdir): o = testdir.makepyfile(""" + from __future__ import print_function def test_generative_tests_different_codeline(): import py, pytest test_list = [] @@ -330,8 +331,8 @@ class TestGenerator: test_list.append(0) def assert_order_of_execution(): - py.builtin.print_('expected order', expected_list) - py.builtin.print_('but got ', test_list) + print('expected order', expected_list) + print('but got ', test_list) assert test_list == expected_list yield list_append_0 @@ -353,7 +354,7 @@ class TestGenerator: # has been used during collection. o = testdir.makepyfile(""" setuplist = [] - class TestClass: + class TestClass(object): def setup_method(self, func): #print "setup_method", self, func setuplist.append(self) @@ -387,7 +388,7 @@ class TestGenerator: assert not skipped and not failed -class TestFunction: +class TestFunction(object): def test_getmodulecollector(self, testdir): item = testdir.getitem("def test_func(): pass") modcol = item.getparent(pytest.Module) @@ -396,7 +397,7 @@ class TestFunction: def test_function_as_object_instance_ignored(self, testdir): testdir.makepyfile(""" - class A: + class A(object): def __call__(self, tmpdir): 0/0 @@ -447,7 +448,7 @@ class TestFunction: def test_issue213_parametrize_value_no_equal(self, testdir): testdir.makepyfile(""" import pytest - class A: + class A(object): def __eq__(self, other): raise ValueError("not possible") @pytest.mark.parametrize('arg', [A()]) @@ -578,11 +579,11 @@ class TestFunction: item = testdir.getitem("def test_func(): raise ValueError") config = item.config - class MyPlugin1: + class MyPlugin1(object): def pytest_pyfunc_call(self, pyfuncitem): raise ValueError - class MyPlugin2: + class MyPlugin2(object): def pytest_pyfunc_call(self, pyfuncitem): return True @@ -710,7 +711,7 @@ class TestFunction: assert [x.originalname for x in items] == ['test_func', 'test_func'] -class TestSorting: +class TestSorting(object): def test_check_equality(self, testdir): modcol = testdir.getmodulecol(""" def test_pass(): pass @@ -760,7 +761,7 @@ class TestSorting: assert [item.name for item in colitems] == ['test_b', 'test_a'] -class TestConftestCustomization: +class TestConftestCustomization(object): def test_pytest_pycollect_module(self, testdir): testdir.makeconftest(""" import pytest @@ -902,7 +903,7 @@ def test_modulecol_roundtrip(testdir): assert modcol.name == newcol.name -class TestTracebackCutting: +class TestTracebackCutting(object): def test_skip_simple(self): excinfo = pytest.raises(pytest.skip.Exception, 'pytest.skip("xxx")') assert excinfo.traceback[-1].frame.code.name == "skip" @@ -1028,7 +1029,7 @@ class TestTracebackCutting: assert filter_traceback(tb[-1]) -class TestReportInfo: +class TestReportInfo(object): def test_itemreport_reportinfo(self, testdir, linecomp): testdir.makeconftest(""" import pytest @@ -1053,7 +1054,7 @@ class TestReportInfo: def test_class_reportinfo(self, testdir): modcol = testdir.getmodulecol(""" # lineno 0 - class TestClass: + class TestClass(object): def test_hello(self): pass """) classcol = testdir.collect_by_name(modcol, "TestClass") @@ -1088,7 +1089,7 @@ class TestReportInfo: def check(x): pass yield check, 3 - class TestClass: + class TestClass(object): def test_method(self): pass """ @@ -1097,7 +1098,7 @@ class TestReportInfo: # https://github.com/pytest-dev/pytest/issues/1204 modcol = testdir.getmodulecol(""" # lineno 0 - class TestClass: + class TestClass(object): def __getattr__(self, name): return "this is not an int" @@ -1119,7 +1120,7 @@ def test_customized_python_discovery(testdir): p = testdir.makepyfile(""" def check_simple(): pass - class CheckMyApp: + class CheckMyApp(object): def check_meth(self): pass """) @@ -1194,7 +1195,7 @@ def test_customize_through_attributes(testdir): return MyClass(name, parent=collector) """) testdir.makepyfile(""" - class MyTestClass: + class MyTestClass(object): def test_hello(self): pass """) @@ -1208,11 +1209,11 @@ def test_customize_through_attributes(testdir): def test_unorderable_types(testdir): testdir.makepyfile(""" - class TestJoinEmpty: + class TestJoinEmpty(object): pass def make_test(): - class Test: + class Test(object): pass Test.__name__ = "TestFoo" return Test @@ -1286,8 +1287,8 @@ def test_dont_collect_non_function_callable(testdir): result = testdir.runpytest('-rw') result.stdout.fnmatch_lines([ '*collected 1 item*', - 'WC2 *', - '*1 passed, 1 pytest-warnings in *', + "*cannot collect 'test_a' because it is not a function*", + '*1 passed, 1 warnings in *', ]) diff --git a/testing/python/fixture.py b/testing/python/fixture.py index be99ed833..4c9ad7a91 100644 --- a/testing/python/fixture.py +++ b/testing/python/fixture.py @@ -20,7 +20,7 @@ def test_getfuncargnames(): def h(arg1, arg2, arg3="hello"): pass assert fixtures.getfuncargnames(h) == ('arg1', 'arg2') - class A: + class A(object): def f(self, arg1, arg2="hello"): pass @@ -28,7 +28,7 @@ def test_getfuncargnames(): if sys.version_info < (3,0): assert fixtures.getfuncargnames(A.f) == ('arg1',) -class TestFillFixtures: +class TestFillFixtures(object): def test_fillfuncargs_exposed(self): # used by oejskit, kept for compatibility assert pytest._fillfuncargs == fixtures.fillfixtures @@ -79,7 +79,7 @@ class TestFillFixtures: def something(request): return request.function.__name__ - class TestClass: + class TestClass(object): def test_method(self, something): assert something == "test_method" def test_func(something): @@ -91,7 +91,7 @@ class TestFillFixtures: def test_funcarg_lookup_classlevel(self, testdir): p = testdir.makepyfile(""" import pytest - class TestClass: + class TestClass(object): @pytest.fixture def something(self, request): @@ -134,7 +134,7 @@ class TestFillFixtures: def spam(): return 'spam' - class TestSpam: + class TestSpam(object): @pytest.fixture def spam(self, spam): @@ -463,7 +463,7 @@ class TestFillFixtures: assert result.ret == 0 -class TestRequestBasic: +class TestRequestBasic(object): def test_request_attributes(self, testdir): item = testdir.getitem(""" import pytest @@ -484,7 +484,7 @@ class TestRequestBasic: def test_request_attributes_method(self, testdir): item, = testdir.getitems(""" import pytest - class TestB: + class TestB(object): @pytest.fixture def something(self, request): @@ -502,7 +502,7 @@ class TestRequestBasic: @pytest.fixture def something(request): pass - class TestClass: + class TestClass(object): def test_method(self, something): pass """) @@ -545,22 +545,33 @@ class TestRequestBasic: return l.pop() def test_func(something): pass """) + import contextlib + if getfixmethod == 'getfuncargvalue': + warning_expectation = pytest.warns(DeprecationWarning) + else: + # see #1830 for a cleaner way to accomplish this + @contextlib.contextmanager + def expecting_no_warning(): yield + + warning_expectation = expecting_no_warning() + req = item._request - fixture_fetcher = getattr(req, getfixmethod) - pytest.raises(FixtureLookupError, fixture_fetcher, "notexists") - val = fixture_fetcher("something") - assert val == 1 - val = fixture_fetcher("something") - assert val == 1 - val2 = fixture_fetcher("other") - assert val2 == 2 - val2 = fixture_fetcher("other") # see about caching - assert val2 == 2 - pytest._fillfuncargs(item) - assert item.funcargs["something"] == 1 - assert len(get_public_names(item.funcargs)) == 2 - assert "request" in item.funcargs - #assert item.funcargs == {'something': 1, "other": 2} + with warning_expectation: + fixture_fetcher = getattr(req, getfixmethod) + with pytest.raises(FixtureLookupError): + fixture_fetcher("notexists") + val = fixture_fetcher("something") + assert val == 1 + val = fixture_fetcher("something") + assert val == 1 + val2 = fixture_fetcher("other") + assert val2 == 2 + val2 = fixture_fetcher("other") # see about caching + assert val2 == 2 + pytest._fillfuncargs(item) + assert item.funcargs["something"] == 1 + assert len(get_public_names(item.funcargs)) == 2 + assert "request" in item.funcargs def test_request_addfinalizer(self, testdir): item = testdir.getitem(""" @@ -704,7 +715,7 @@ class TestRequestBasic: def test_func(): pass - class TestClass: + class TestClass(object): @pytest.fixture(scope="class", autouse=True) def setup_class(self): l.append("class") @@ -771,7 +782,7 @@ class TestRequestBasic: reprec = testdir.inline_run() reprec.assertoutcome(passed=2) -class TestRequestMarking: +class TestRequestMarking(object): def test_applymarker(self, testdir): item1,item2 = testdir.getitems(""" import pytest @@ -779,7 +790,7 @@ class TestRequestMarking: @pytest.fixture def something(request): pass - class TestClass: + class TestClass(object): def test_func1(self, something): pass def test_func2(self, something): @@ -831,7 +842,7 @@ class TestRequestMarking: reprec = testdir.inline_run() reprec.assertoutcome(passed=2) -class TestRequestCachedSetup: +class TestRequestCachedSetup(object): def test_request_cachedsetup_defaultmodule(self, testdir): reprec = testdir.inline_runsource(""" mysetup = ["hello",].pop @@ -844,7 +855,7 @@ class TestRequestCachedSetup: def test_func1(something): assert something == "hello" - class TestClass: + class TestClass(object): def test_func1a(self, something): assert something == "hello" """) @@ -862,7 +873,7 @@ class TestRequestCachedSetup: assert something == "hello3" def test_func2(something): assert something == "hello2" - class TestClass: + class TestClass(object): def test_func1a(self, something): assert something == "hello" def test_func2b(self, something): @@ -996,7 +1007,7 @@ class TestRequestCachedSetup: "*ZeroDivisionError*", ]) -class TestFixtureUsages: +class TestFixtureUsages(object): def test_noargfixturedec(self, testdir): testdir.makepyfile(""" import pytest @@ -1138,7 +1149,7 @@ class TestFixtureUsages: def test_factory_setup_as_classes_fails(self, testdir): testdir.makepyfile(""" import pytest - class arg1: + class arg1(object): def __init__(self, request): self.x = 1 arg1 = pytest.fixture()(arg1) @@ -1172,7 +1183,7 @@ class TestFixtureUsages: request.cls.hello = "world" l.append(1) - class TestClass: + class TestClass(object): def test_one(self): assert self.hello == "world" assert len(l) == 1 @@ -1198,7 +1209,7 @@ class TestFixtureUsages: """) testdir.makepyfile(""" - class TestClass: + class TestClass(object): def test_one(self): assert self.hello == "world" def test_two(self): @@ -1217,7 +1228,7 @@ class TestFixtureUsages: testdir.makepyfile(""" import pytest - class TestClass: + class TestClass(object): @pytest.fixture def setup1(self, request): assert self == request.instance @@ -1256,7 +1267,7 @@ class TestFixtureUsages: assert l == [1,2, 10,20] -class TestFixtureManagerParseFactories: +class TestFixtureManagerParseFactories(object): @pytest.fixture def testdir(self, request): @@ -1280,7 +1291,7 @@ class TestFixtureManagerParseFactories: def test_parsefactories_evil_objects_issue214(self, testdir): testdir.makepyfile(""" - class A: + class A(object): def __call__(self): pass def __getattr__(self, name): @@ -1311,7 +1322,7 @@ class TestFixtureManagerParseFactories: @pytest.fixture def hello(request): return "module" - class TestClass: + class TestClass(object): @pytest.fixture def hello(self, request): return "class" @@ -1360,7 +1371,7 @@ class TestFixtureManagerParseFactories: reprec.assertoutcome(passed=2) -class TestAutouseDiscovery: +class TestAutouseDiscovery(object): @pytest.fixture def testdir(self, testdir): @@ -1402,14 +1413,14 @@ class TestAutouseDiscovery: def test_two_classes_separated_autouse(self, testdir): testdir.makepyfile(""" import pytest - class TestA: + class TestA(object): l = [] @pytest.fixture(autouse=True) def setup1(self): self.l.append(1) def test_setup1(self): assert self.l == [1] - class TestB: + class TestB(object): l = [] @pytest.fixture(autouse=True) def setup2(self): @@ -1423,7 +1434,7 @@ class TestAutouseDiscovery: def test_setup_at_classlevel(self, testdir): testdir.makepyfile(""" import pytest - class TestClass: + class TestClass(object): @pytest.fixture(autouse=True) def permethod(self, request): request.instance.funcname = request.function.__name__ @@ -1505,13 +1516,13 @@ class TestAutouseDiscovery: def test_x(): assert l == ["module"] - class TestA: + class TestA(object): @pytest.fixture(autouse=True) def append2(self): l.append("A") def test_hello(self): assert l == ["module", "module", "A"], l - class TestA2: + class TestA2(object): def test_world(self): assert l == ["module", "module", "A", "module"], l """) @@ -1519,7 +1530,7 @@ class TestAutouseDiscovery: reprec.assertoutcome(passed=3) -class TestAutouseManagement: +class TestAutouseManagement(object): def test_autouse_conftest_mid_directory(self, testdir): pkgdir = testdir.mkpydir("xyz123") pkgdir.join("conftest.py").write(_pytest._code.Source(""" @@ -1654,10 +1665,10 @@ class TestAutouseManagement: testdir.makepyfile(""" import pytest - class TestClass: + class TestClass(object): def test_1(self): pass - class TestClass2: + class TestClass2(object): def test_2(self): pass """) @@ -1682,7 +1693,7 @@ class TestAutouseManagement: def mappend(): l.append(1) - class TestHallo: + class TestHallo(object): def test_method(self): assert l == [1,3,2] """) @@ -1696,7 +1707,7 @@ class TestAutouseManagement: def pytest_generate_tests(metafunc): if metafunc.cls is not None: metafunc.parametrize("item", [1,2], scope="class") - class TestClass: + class TestClass(object): @pytest.fixture(scope="class", autouse=True) def addteardown(self, item, request): l.append("setup-%d" % item) @@ -1756,7 +1767,7 @@ class TestAutouseManagement: reprec.assertoutcome(passed=2) -class TestFixtureMarker: +class TestFixtureMarker(object): def test_parametrize(self, testdir): testdir.makepyfile(""" import pytest @@ -1822,7 +1833,7 @@ class TestFixtureMarker: def test_2(arg): assert arg == 1 assert len(l) == 1 - class TestClass: + class TestClass(object): def test3(self, arg): assert arg == 1 assert len(l) == 1 @@ -1916,7 +1927,7 @@ class TestFixtureMarker: def test_2(arg): assert arg == 1 assert len(l) == 1 - class TestClass: + class TestClass(object): def test3(self, arg): assert arg == 1 assert len(l) == 1 @@ -2135,12 +2146,12 @@ class TestFixtureMarker: testdir.makepyfile(""" import pytest - class TestClass2: + class TestClass2(object): def test_1(self): pass def test_2(self): pass - class TestClass: + class TestClass(object): def test_3(self): pass """) @@ -2213,7 +2224,7 @@ class TestFixtureMarker: l = [] - class TestClass: + class TestClass(object): @classmethod @pytest.fixture(scope="class", autouse=True) def setup1(self, request, param1): @@ -2273,7 +2284,7 @@ class TestFixtureMarker: testpath = testdir.makepyfile(""" import pytest - class Box: + class Box(object): value = 0 @pytest.fixture(scope='class') @@ -2284,11 +2295,11 @@ class TestFixtureMarker: def test_a(a): assert a == 1 - class Test1: + class Test1(object): def test_b(self, a): assert a == 2 - class Test2: + class Test2(object): def test_c(self, a): assert a == 3""") reprec = testdir.inline_run(testpath) @@ -2402,11 +2413,11 @@ class TestFixtureMarker: request.addfinalizer(lambda: l.append("fin %s" % request.param)) return request.param - class TestGreetings: + class TestGreetings(object): def test_hello(self, human): l.append("test_hello") - class TestMetrics: + class TestMetrics(object): def test_name(self, human): l.append("test_name") @@ -2499,7 +2510,7 @@ class TestFixtureMarker: '*test_foo*beta*']) -class TestRequestScopeAccess: +class TestRequestScopeAccess(object): pytestmark = pytest.mark.parametrize(("scope", "ok", "error"),[ ["session", "", "fspath class function module"], ["module", "module fspath", "cls function"], @@ -2543,7 +2554,7 @@ class TestRequestScopeAccess: reprec = testdir.inline_run() reprec.assertoutcome(passed=1) -class TestErrors: +class TestErrors(object): def test_subfactory_missing_funcarg(self, testdir): testdir.makepyfile(""" import pytest @@ -2607,7 +2618,7 @@ class TestErrors: "*1 error*", ]) -class TestShowFixtures: +class TestShowFixtures(object): def test_funcarg_compat(self, testdir): config = testdir.parseconfigure("--funcargs") assert config.option.showfixtures @@ -2770,7 +2781,7 @@ class TestShowFixtures: @pytest.mark.parametrize('flavor', ['fixture', 'yield_fixture']) -class TestContextManagerFixtureFuncs: +class TestContextManagerFixtureFuncs(object): def test_simple(self, testdir, flavor): testdir.makepyfile(""" @@ -2877,7 +2888,7 @@ class TestContextManagerFixtureFuncs: result = testdir.runpytest("-s") result.stdout.fnmatch_lines("*mew*") -class TestParameterizedSubRequest: +class TestParameterizedSubRequest(object): def test_call_from_fixture(self, testdir): testfile = testdir.makepyfile(""" import pytest diff --git a/testing/python/integration.py b/testing/python/integration.py index 6697342ea..4f888276b 100644 --- a/testing/python/integration.py +++ b/testing/python/integration.py @@ -3,7 +3,7 @@ from _pytest import python from _pytest import runner -class TestOEJSKITSpecials: +class TestOEJSKITSpecials(object): def test_funcarg_non_pycollectobj(self, testdir): # rough jstests usage testdir.makeconftest(""" import pytest @@ -19,7 +19,7 @@ class TestOEJSKITSpecials: @pytest.fixture def arg1(request): return 42 - class MyClass: + class MyClass(object): pass """) # this hook finds funcarg factories @@ -48,7 +48,7 @@ class TestOEJSKITSpecials: @pytest.fixture def arg1(request): return 42 - class MyClass: + class MyClass(object): pass """) # this hook finds funcarg factories @@ -76,7 +76,7 @@ def test_wrapped_getfslineno(): fs2, lineno2 = python.getfslineno(wrap) assert lineno > lineno2, "getfslineno does not unwrap correctly" -class TestMockDecoration: +class TestMockDecoration(object): def test_wrapped_getfuncargnames(self): from _pytest.compat import getfuncargnames @@ -207,7 +207,7 @@ class TestMockDecoration: @patch('os.getcwd') @patch('os.path') @mark.slow - class TestSimple: + class TestSimple(object): def test_simple_thing(self, mock_path, mock_getcwd): pass """) @@ -215,7 +215,7 @@ class TestMockDecoration: reprec.assertoutcome(passed=1) -class TestReRunTests: +class TestReRunTests(object): def test_rerun(self, testdir): testdir.makeconftest(""" from _pytest.runner import runtestprotocol @@ -251,7 +251,7 @@ def test_pytestconfig_is_session_scoped(): assert pytestconfig._pytestfixturefunction.scope == "session" -class TestNoselikeTestAttribute: +class TestNoselikeTestAttribute(object): def test_module_with_global_test(self, testdir): testdir.makepyfile(""" __test__ = False @@ -270,7 +270,7 @@ class TestNoselikeTestAttribute: pass test_func.__test__ = False - class TestSome: + class TestSome(object): __test__ = False def test_method(self): pass @@ -328,7 +328,7 @@ class TestNoselikeTestAttribute: @pytest.mark.issue351 -class TestParameterize: +class TestParameterize(object): def test_idfn_marker(self, testdir): testdir.makepyfile(""" diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py index a7e1d5699..380dbf0e6 100644 --- a/testing/python/metafunc.py +++ b/testing/python/metafunc.py @@ -13,12 +13,12 @@ from hypothesis import strategies PY3 = sys.version_info >= (3, 0) -class TestMetafunc: +class TestMetafunc(object): def Metafunc(self, func): # the unit tests of this class check if things work correctly # on the funcarg level, so we don't need a full blown # initiliazation - class FixtureInfo: + class FixtureInfo(object): name2fixturedefs = None def __init__(self, names): @@ -68,7 +68,7 @@ class TestMetafunc: def func(arg1): pass metafunc = self.Metafunc(func) - class obj: pass + class obj(object): pass metafunc.addcall(param=obj) metafunc.addcall(param=obj) @@ -83,7 +83,7 @@ class TestMetafunc: metafunc = self.Metafunc(func) - class obj: pass + class obj(object): pass metafunc.addcall(funcargs={"x": 2}) metafunc.addcall(funcargs={"x": 3}) @@ -150,7 +150,7 @@ class TestMetafunc: def func(x, y): pass metafunc = self.Metafunc(func) - class A: + class A(object): pass metafunc.parametrize("x", [A(), A()]) @@ -207,37 +207,40 @@ class TestMetafunc: @pytest.mark.issue250 def test_idmaker_autoname(self): from _pytest.python import idmaker - result = idmaker(("a", "b"), [("string", 1.0), - ("st-ring", 2.0)]) + result = idmaker(("a", "b"), [pytest.param("string", 1.0), + pytest.param("st-ring", 2.0)]) assert result == ["string-1.0", "st-ring-2.0"] - result = idmaker(("a", "b"), [(object(), 1.0), - (object(), object())]) + result = idmaker(("a", "b"), [pytest.param(object(), 1.0), + pytest.param(object(), object())]) assert result == ["a0-1.0", "a1-b1"] # unicode mixing, issue250 - result = idmaker((py.builtin._totext("a"), "b"), [({}, b'\xc3\xb4')]) + result = idmaker( + (py.builtin._totext("a"), "b"), + [pytest.param({}, b'\xc3\xb4')]) assert result == ['a0-\\xc3\\xb4'] def test_idmaker_with_bytes_regex(self): from _pytest.python import idmaker - result = idmaker(("a"), [(re.compile(b'foo'), 1.0)]) + result = idmaker(("a"), [pytest.param(re.compile(b'foo'), 1.0)]) assert result == ["foo"] def test_idmaker_native_strings(self): from _pytest.python import idmaker totext = py.builtin._totext - result = idmaker(("a", "b"), [(1.0, -1.1), - (2, -202), - ("three", "three hundred"), - (True, False), - (None, None), - (re.compile('foo'), re.compile('bar')), - (str, int), - (list("six"), [66, 66]), - (set([7]), set("seven")), - (tuple("eight"), (8, -8, 8)), - (b'\xc3\xb4', b"name"), - (b'\xc3\xb4', totext("other")), + result = idmaker(("a", "b"), [ + pytest.param(1.0, -1.1), + pytest.param(2, -202), + pytest.param("three", "three hundred"), + pytest.param(True, False), + pytest.param(None, None), + pytest.param(re.compile('foo'), re.compile('bar')), + pytest.param(str, int), + pytest.param(list("six"), [66, 66]), + pytest.param(set([7]), set("seven")), + pytest.param(tuple("eight"), (8, -8, 8)), + pytest.param(b'\xc3\xb4', b"name"), + pytest.param(b'\xc3\xb4', totext("other")), ]) assert result == ["1.0--1.1", "2--202", @@ -257,7 +260,7 @@ class TestMetafunc: from _pytest.python import idmaker enum = pytest.importorskip("enum") e = enum.Enum("Foo", "one, two") - result = idmaker(("a", "b"), [(e.one, e.two)]) + result = idmaker(("a", "b"), [pytest.param(e.one, e.two)]) assert result == ["Foo.one-Foo.two"] @pytest.mark.issue351 @@ -268,9 +271,10 @@ class TestMetafunc: if isinstance(val, Exception): return repr(val) - result = idmaker(("a", "b"), [(10.0, IndexError()), - (20, KeyError()), - ("three", [1, 2, 3]), + result = idmaker(("a", "b"), [ + pytest.param(10.0, IndexError()), + pytest.param(20, KeyError()), + pytest.param("three", [1, 2, 3]), ], idfn=ids) assert result == ["10.0-IndexError()", "20-KeyError()", @@ -284,9 +288,9 @@ class TestMetafunc: def ids(val): return 'a' - result = idmaker(("a", "b"), [(10.0, IndexError()), - (20, KeyError()), - ("three", [1, 2, 3]), + result = idmaker(("a", "b"), [pytest.param(10.0, IndexError()), + pytest.param(20, KeyError()), + pytest.param("three", [1, 2, 3]), ], idfn=ids) assert result == ["a-a0", "a-a1", @@ -296,29 +300,78 @@ class TestMetafunc: @pytest.mark.issue351 def test_idmaker_idfn_exception(self): from _pytest.python import idmaker + from _pytest.recwarn import WarningsRecorder + + class BadIdsException(Exception): + pass def ids(val): - raise Exception("bad code") + raise BadIdsException("ids raised") - result = idmaker(("a", "b"), [(10.0, IndexError()), - (20, KeyError()), - ("three", [1, 2, 3]), - ], idfn=ids) - assert result == ["10.0-b0", - "20-b1", - "three-b2", - ] + rec = WarningsRecorder() + with rec: + idmaker(("a", "b"), [ + pytest.param(10.0, IndexError()), + pytest.param(20, KeyError()), + pytest.param("three", [1, 2, 3]), + ], idfn=ids) + + assert [str(i.message) for i in rec.list] == [ + "Raised while trying to determine id of parameter a at position 0." + "\nUpdate your code as this will raise an error in pytest-4.0.", + "Raised while trying to determine id of parameter b at position 0." + "\nUpdate your code as this will raise an error in pytest-4.0.", + "Raised while trying to determine id of parameter a at position 1." + "\nUpdate your code as this will raise an error in pytest-4.0.", + "Raised while trying to determine id of parameter b at position 1." + "\nUpdate your code as this will raise an error in pytest-4.0.", + "Raised while trying to determine id of parameter a at position 2." + "\nUpdate your code as this will raise an error in pytest-4.0.", + "Raised while trying to determine id of parameter b at position 2." + "\nUpdate your code as this will raise an error in pytest-4.0.", + ] + + + def test_parametrize_ids_exception(self, testdir): + """ + :param testdir: the instance of Testdir class, a temporary + test directory. + """ + testdir.makepyfile(""" + import pytest + + def ids(arg): + raise Exception("bad ids") + + @pytest.mark.parametrize("arg", ["a", "b"], ids=ids) + def test_foo(arg): + pass + """) + with pytest.warns(DeprecationWarning): + result = testdir.runpytest("--collect-only") + result.stdout.fnmatch_lines([ + "", + " ", + " ", + ]) def test_idmaker_with_ids(self): from _pytest.python import idmaker - result = idmaker(("a", "b"), [(1, 2), - (3, 4)], + result = idmaker(("a", "b"), [pytest.param(1, 2), + pytest.param(3, 4)], ids=["a", None]) assert result == ["a", "3-4"] + def test_idmaker_with_paramset_id(self): + from _pytest.python import idmaker + result = idmaker(("a", "b"), [pytest.param(1, 2, id="me"), + pytest.param(3, 4, id="you")], + ids=["a", None]) + assert result == ["me", "you"] + def test_idmaker_with_ids_unique_names(self): from _pytest.python import idmaker - result = idmaker(("a"), [1,2,3,4,5], + result = idmaker(("a"), map(pytest.param, [1,2,3,4,5]), ids=["a", "a", "b", "c", "b"]) assert result == ["a0", "a1", "b0", "c", "b1"] @@ -561,7 +614,7 @@ class TestMetafunc: pytestmark = pytest.mark.parametrize("x", [1,2]) def test_func(x): assert 0, x - class TestClass: + class TestClass(object): pytestmark = pytest.mark.parametrize("y", [3,4]) def test_meth(self, x, y): assert 0, x @@ -632,7 +685,7 @@ class TestMetafunc: assert fixtures._format_args(function4) == "(arg1, *args, **kwargs)" -class TestMetafuncFunctional: +class TestMetafuncFunctional(object): def test_attributes(self, testdir): p = testdir.makepyfile(""" # assumes that generate/provide runs in the same process @@ -651,7 +704,7 @@ class TestMetafuncFunctional: assert metafunc.function == test_function assert metafunc.cls is None - class TestClass: + class TestClass(object): def test_method(self, metafunc, pytestconfig): assert metafunc.config == pytestconfig assert metafunc.module.__name__ == __name__ @@ -676,7 +729,7 @@ class TestMetafuncFunctional: def pytest_generate_tests(metafunc): metafunc.addcall(funcargs=dict(arg1=1, arg2=1)) - class TestClass: + class TestClass(object): def test_myfunc(self, arg1, arg2): assert arg1 == arg2 """) @@ -716,7 +769,7 @@ class TestMetafuncFunctional: def pytest_generate_tests(metafunc): assert 'xyz' not in metafunc.fixturenames - class TestHello: + class TestHello(object): def test_hello(xyz): pass """) @@ -742,7 +795,7 @@ class TestMetafuncFunctional: def arg2(request): return request.param[1] - class TestClass: + class TestClass(object): def test_myfunc(self, arg1, arg2): assert arg1 == arg2 """) @@ -755,7 +808,7 @@ class TestMetafuncFunctional: def test_generate_tests_in_class(self, testdir): p = testdir.makepyfile(""" - class TestClass: + class TestClass(object): def pytest_generate_tests(self, metafunc): metafunc.addcall(funcargs={'hello': 'world'}, id="hello") @@ -774,7 +827,7 @@ class TestMetafuncFunctional: metafunc.addcall({'arg1': 10}) metafunc.addcall({'arg1': 20}) - class TestClass: + class TestClass(object): def test_func(self, arg1): assert not hasattr(self, 'x') self.x = 1 @@ -791,7 +844,7 @@ class TestMetafuncFunctional: def pytest_generate_tests(metafunc): metafunc.addcall({'arg1': 1}) - class TestClass: + class TestClass(object): def test_method(self, arg1): assert arg1 == self.val def setup_method(self, func): @@ -1077,7 +1130,7 @@ class TestMetafuncFunctional: assert expectederror in failures[0].longrepr.reprcrash.message -class TestMetafuncFunctionalAuto: +class TestMetafuncFunctionalAuto(object): """ Tests related to automatically find out the correct scope for parametrized tests (#1832). """ @@ -1196,7 +1249,7 @@ class TestMetafuncFunctionalAuto: assert output.count('preparing foo-3') == 1 -class TestMarkersWithParametrization: +class TestMarkersWithParametrization(object): pytestmark = pytest.mark.issue308 def test_simple_mark(self, testdir): s = """ @@ -1398,6 +1451,31 @@ class TestMarkersWithParametrization: reprec = testdir.inline_run() reprec.assertoutcome(passed=2) + + @pytest.mark.parametrize('strict', [True, False]) + def test_parametrize_marked_value(self, testdir, strict): + s = """ + import pytest + + @pytest.mark.parametrize(("n", "expected"), [ + pytest.param( + 2,3, + marks=pytest.mark.xfail("sys.version_info > (0, 0, 0)", reason="some bug", strict={strict}), + ), + pytest.param( + 2,3, + marks=[pytest.mark.xfail("sys.version_info > (0, 0, 0)", reason="some bug", strict={strict})], + ), + ]) + def test_increment(n, expected): + assert n + 1 == expected + """.format(strict=strict) + testdir.makepyfile(s) + reprec = testdir.inline_run() + passed, failed = (0, 2) if strict else (2, 0) + reprec.assertoutcome(passed=passed, failed=failed) + + def test_pytest_make_parametrize_id(self, testdir): testdir.makeconftest(""" def pytest_make_parametrize_id(config, val): @@ -1415,3 +1493,26 @@ class TestMarkersWithParametrization: "*test_func*0*PASS*", "*test_func*2*PASS*", ]) + + def test_pytest_make_parametrize_id_with_argname(self, testdir): + testdir.makeconftest(""" + def pytest_make_parametrize_id(config, val, argname): + return str(val * 2 if argname == 'x' else val * 10) + """) + testdir.makepyfile(""" + import pytest + + @pytest.mark.parametrize("x", range(2)) + def test_func_a(x): + pass + + @pytest.mark.parametrize("y", [1]) + def test_func_b(y): + pass + """) + result = testdir.runpytest("-v") + result.stdout.fnmatch_lines([ + "*test_func_a*0*PASS*", + "*test_func_a*2*PASS*", + "*test_func_b*10*PASS*", + ]) diff --git a/testing/python/raises.py b/testing/python/raises.py index 8f141cfa1..21a6f808c 100644 --- a/testing/python/raises.py +++ b/testing/python/raises.py @@ -2,7 +2,7 @@ import pytest import sys -class TestRaises: +class TestRaises(object): def test_raises(self): source = "int('qwe')" excinfo = pytest.raises(ValueError, source) @@ -20,7 +20,7 @@ class TestRaises: pytest.raises(ValueError, int, 'hello') def test_raises_callable_no_exception(self): - class A: + class A(object): def __call__(self): pass try: @@ -28,14 +28,6 @@ class TestRaises: except pytest.raises.Exception: pass - def test_raises_flip_builtin_AssertionError(self): - # we replace AssertionError on python level - # however c code might still raise the builtin one - from _pytest.assertion.util import BuiltinAssertionError # noqa - pytest.raises(AssertionError,""" - raise BuiltinAssertionError - """) - def test_raises_as_contextmanager(self, testdir): testdir.makepyfile(""" from __future__ import with_statement @@ -126,3 +118,18 @@ class TestRaises: for o in gc.get_objects(): assert type(o) is not T + + def test_raises_match(self): + msg = r"with base \d+" + with pytest.raises(ValueError, match=msg): + int('asdf') + + msg = "with base 10" + with pytest.raises(ValueError, match=msg): + int('asdf') + + msg = "with base 16" + expr = r"Pattern '{0}' not found in 'invalid literal for int\(\) with base 10: 'asdf''".format(msg) + with pytest.raises(AssertionError, match=expr): + with pytest.raises(ValueError, match=msg): + int('asdf', base=10) diff --git a/testing/test_argcomplete.py b/testing/test_argcomplete.py index ace7d8ceb..6887c419c 100644 --- a/testing/test_argcomplete.py +++ b/testing/test_argcomplete.py @@ -1,4 +1,4 @@ -from __future__ import with_statement +from __future__ import absolute_import, division, print_function import py, pytest # test for _argcomplete but not specific for any application @@ -69,7 +69,7 @@ class FilesCompleter(object): completion += [f + '/' for f in anticomp] return completion -class TestArgComplete: +class TestArgComplete(object): @pytest.mark.skipif("sys.platform in ('win32', 'darwin')") def test_compare_with_compgen(self): from _pytest._argcomplete import FastFilesCompleter diff --git a/testing/test_assertion.py b/testing/test_assertion.py index bc814590a..c385f6aa1 100644 --- a/testing/test_assertion.py +++ b/testing/test_assertion.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +from __future__ import absolute_import, division, print_function import sys import textwrap @@ -6,6 +7,7 @@ import _pytest.assertion as plugin import py import pytest from _pytest.assertion import util +from _pytest.assertion import truncate PY3 = sys.version_info >= (3, 0) @@ -24,7 +26,7 @@ def mock_config(): return Config() -class TestImportHookInstallation: +class TestImportHookInstallation(object): @pytest.mark.parametrize('initial_conftest', [True, False]) @pytest.mark.parametrize('mode', ['plain', 'rewrite']) @@ -158,7 +160,7 @@ class TestImportHookInstallation: plugin_state = "{plugin_state}" - class DummyDistInfo: + class DummyDistInfo(object): project_name = 'spam' version = '1.0' @@ -173,7 +175,7 @@ class TestImportHookInstallation: 'hampkg/__init__.py'] return [] - class DummyEntryPoint: + class DummyEntryPoint(object): name = 'spam' module_name = 'spam.py' attrs = () @@ -256,7 +258,7 @@ class TestImportHookInstallation: 'pytest_tests_internal_non_existing2') -class TestBinReprIntegration: +class TestBinReprIntegration(object): def test_pytest_assertrepr_compare_called(self, testdir): testdir.makeconftest(""" @@ -287,7 +289,7 @@ def callequal(left, right, verbose=False): return plugin.pytest_assertrepr_compare(config, '==', left, right) -class TestAssert_reprcompare: +class TestAssert_reprcompare(object): def test_different_types(self): assert callequal([0, 1], 'foo') is None @@ -381,8 +383,16 @@ class TestAssert_reprcompare: for line in lines[1:]: assert 'b' not in line - def test_dict_omitting_verbose(self): - lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1}, verbose=True) + def test_dict_omitting_with_verbosity_1(self): + """ Ensure differing items are visible for verbosity=1 (#1512) """ + lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1}, verbose=1) + assert lines[1].startswith('Omitting 1 identical item') + assert lines[2].startswith('Differing items') + assert lines[3] == "{'a': 0} != {'a': 1}" + assert 'Common items' not in lines + + def test_dict_omitting_with_verbosity_2(self): + lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1}, verbose=2) assert lines[1].startswith('Common items:') assert 'Omitting' not in lines[1] assert lines[2] == "{'b': 1}" @@ -433,7 +443,7 @@ class TestAssert_reprcompare: assert len(expl) > 1 def test_list_bad_repr(self): - class A: + class A(object): def __repr__(self): raise ValueError(42) expl = callequal([], [A()]) @@ -492,7 +502,7 @@ class TestAssert_reprcompare: assert msg -class TestFormatExplanation: +class TestFormatExplanation(object): def test_special_chars_full(self, testdir): # Issue 453, for the bug this would raise IndexError @@ -584,6 +594,111 @@ class TestFormatExplanation: assert util.format_explanation(expl) == res +class TestTruncateExplanation(object): + + """ Confirm assertion output is truncated as expected """ + + # The number of lines in the truncation explanation message. Used + # to calculate that results have the expected length. + LINES_IN_TRUNCATION_MSG = 2 + + def test_doesnt_truncate_when_input_is_empty_list(self): + expl = [] + result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100) + assert result == expl + + def test_doesnt_truncate_at_when_input_is_5_lines_and_LT_max_chars(self): + expl = ['a' * 100 for x in range(5)] + result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8*80) + assert result == expl + + def test_truncates_at_8_lines_when_given_list_of_empty_strings(self): + expl = ['' for x in range(50)] + result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100) + assert result != expl + assert len(result) == 8 + self.LINES_IN_TRUNCATION_MSG + assert "Full output truncated" in result[-1] + assert "43 lines hidden" in result[-1] + last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG -1] + assert last_line_before_trunc_msg.endswith("...") + + def test_truncates_at_8_lines_when_first_8_lines_are_LT_max_chars(self): + expl = ['a' for x in range(100)] + result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8*80) + assert result != expl + assert len(result) == 8 + self.LINES_IN_TRUNCATION_MSG + assert "Full output truncated" in result[-1] + assert "93 lines hidden" in result[-1] + last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG -1] + assert last_line_before_trunc_msg.endswith("...") + + def test_truncates_at_8_lines_when_first_8_lines_are_EQ_max_chars(self): + expl = ['a' * 80 for x in range(16)] + result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8*80) + assert result != expl + assert len(result) == 8 + self.LINES_IN_TRUNCATION_MSG + assert "Full output truncated" in result[-1] + assert "9 lines hidden" in result[-1] + last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG -1] + assert last_line_before_trunc_msg.endswith("...") + + def test_truncates_at_4_lines_when_first_4_lines_are_GT_max_chars(self): + expl = ['a' * 250 for x in range(10)] + result = truncate._truncate_explanation(expl, max_lines=8, max_chars=999) + assert result != expl + assert len(result) == 4 + self.LINES_IN_TRUNCATION_MSG + assert "Full output truncated" in result[-1] + assert "7 lines hidden" in result[-1] + last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG -1] + assert last_line_before_trunc_msg.endswith("...") + + def test_truncates_at_1_line_when_first_line_is_GT_max_chars(self): + expl = ['a' * 250 for x in range(1000)] + result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100) + assert result != expl + assert len(result) == 1 + self.LINES_IN_TRUNCATION_MSG + assert "Full output truncated" in result[-1] + assert "1000 lines hidden" in result[-1] + last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG -1] + assert last_line_before_trunc_msg.endswith("...") + + def test_full_output_truncated(self, monkeypatch, testdir): + """ Test against full runpytest() output. """ + + line_count = 7 + line_len = 100 + expected_truncated_lines = 2 + testdir.makepyfile(r""" + def test_many_lines(): + a = list([str(i)[0] * %d for i in range(%d)]) + b = a[::2] + a = '\n'.join(map(str, a)) + b = '\n'.join(map(str, b)) + assert a == b + """ % (line_len, line_count)) + monkeypatch.delenv('CI', raising=False) + + result = testdir.runpytest() + # without -vv, truncate the message showing a few diff lines only + result.stdout.fnmatch_lines([ + "*- 1*", + "*- 3*", + "*- 5*", + "*truncated (%d lines hidden)*use*-vv*" % expected_truncated_lines, + ]) + + result = testdir.runpytest('-vv') + result.stdout.fnmatch_lines([ + "* 6*", + ]) + + monkeypatch.setenv('CI', '1') + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + "* 6*", + ]) + + def test_python25_compile_issue257(testdir): testdir.makepyfile(""" def test_rewritten(): @@ -643,40 +758,6 @@ def test_sequence_comparison_uses_repr(testdir): ]) -def test_assert_compare_truncate_longmessage(monkeypatch, testdir): - testdir.makepyfile(r""" - def test_long(): - a = list(range(200)) - b = a[::2] - a = '\n'.join(map(str, a)) - b = '\n'.join(map(str, b)) - assert a == b - """) - monkeypatch.delenv('CI', raising=False) - - result = testdir.runpytest() - # without -vv, truncate the message showing a few diff lines only - result.stdout.fnmatch_lines([ - "*- 1", - "*- 3", - "*- 5", - "*- 7", - "*truncated (193 more lines)*use*-vv*", - ]) - - - result = testdir.runpytest('-vv') - result.stdout.fnmatch_lines([ - "*- 197", - ]) - - monkeypatch.setenv('CI', '1') - result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*- 197", - ]) - - def test_assertrepr_loaded_per_dir(testdir): testdir.makepyfile(test_base=['def test_base(): assert 1 == 2']) a = testdir.mkdir('a') @@ -895,7 +976,10 @@ def test_assert_tuple_warning(testdir): assert(False, 'you shall not pass') """) result = testdir.runpytest('-rw') - result.stdout.fnmatch_lines('WR1*:2 assertion is always true*') + result.stdout.fnmatch_lines([ + '*test_assert_tuple_warning.py:2', + '*assertion is always true*', + ]) def test_assert_indirect_tuple_no_warning(testdir): testdir.makepyfile(""" @@ -945,4 +1029,3 @@ def test_issue_1944(testdir): result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 error*"]) assert "AttributeError: 'Module' object has no attribute '_obj'" not in result.stdout.str() - diff --git a/testing/test_assertrewrite.py b/testing/test_assertrewrite.py index 7cc58e8a8..11b5ce051 100644 --- a/testing/test_assertrewrite.py +++ b/testing/test_assertrewrite.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import glob import os import py_compile @@ -57,7 +58,7 @@ def getmsg(f, extra_ns=None, must_pass=False): pytest.fail("function didn't raise at all") -class TestAssertionRewrite: +class TestAssertionRewrite(object): def test_place_initial_imports(self): s = """'Doc string'\nother = stuff""" @@ -333,7 +334,7 @@ class TestAssertionRewrite: @pytest.mark.skipif("sys.version_info < (3,5)") def test_at_operator_issue1290(self, testdir): testdir.makepyfile(""" - class Matrix: + class Matrix(object): def __init__(self, num): self.num = num def __matmul__(self, other): @@ -515,7 +516,7 @@ class TestAssertionRewrite: assert r"where 1 = \n{ \n~ \n}.a" in util._format_lines([getmsg(f)])[0] -class TestRewriteOnImport: +class TestRewriteOnImport(object): def test_pycache_is_a_file(self, testdir): testdir.tmpdir.join("__pycache__").write("Hello") @@ -711,6 +712,24 @@ def test_rewritten(): result.stdout.fnmatch_lines(['*= 1 passed in *=*']) assert 'pytest-warning summary' not in result.stdout.str() + @pytest.mark.skipif(sys.version_info[0] > 2, reason='python 2 only') + def test_rewrite_future_imports(self, testdir): + """Test that rewritten modules don't inherit the __future__ flags + from the assertrewrite module. + + assertion.rewrite imports __future__.division (and others), so + ensure rewritten modules don't inherit those flags. + + The test below will fail if __future__.division is enabled + """ + testdir.makepyfile(''' + def test(): + x = 1 / 2 + assert type(x) is int + ''') + result = testdir.runpytest() + assert result.ret == 0 + class TestAssertionRewriteHookDetails(object): def test_loader_is_package_false_for_module(self, testdir): @@ -884,7 +903,7 @@ class TestAssertionRewriteHookDetails(object): """ path = testdir.mkpydir("foo") path.join("test_foo.py").write(_pytest._code.Source(""" - class Test: + class Test(object): def test_foo(self): import pkgutil data = pkgutil.get_data('foo.test_foo', 'data.txt') @@ -912,7 +931,7 @@ def test_issue731(testdir): assert 'unbalanced braces' not in result.stdout.str() -class TestIssue925(): +class TestIssue925(object): def test_simple_case(self, testdir): testdir.makepyfile(""" def test_ternary_display(): diff --git a/testing/test_cache.py b/testing/test_cache.py index 98053f869..600b5e6d9 100755 --- a/testing/test_cache.py +++ b/testing/test_cache.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import sys import _pytest @@ -7,7 +8,7 @@ import shutil pytest_plugins = "pytester", -class TestNewAPI: +class TestNewAPI(object): def test_config_cache_makedir(self, testdir): testdir.makeini("[pytest]") config = testdir.parseconfigure() @@ -54,7 +55,7 @@ class TestNewAPI: assert result.ret == 1 result.stdout.fnmatch_lines([ "*could not create cache path*", - "*1 pytest-warnings*", + "*1 warnings*", ]) def test_config_cache(self, testdir): @@ -129,7 +130,7 @@ def test_cache_show(testdir): ]) -class TestLastFailed: +class TestLastFailed(object): def test_lastfailed_usecase(self, testdir, monkeypatch): monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) @@ -191,13 +192,37 @@ class TestLastFailed: "test_a.py*", "test_b.py*", ]) - result = testdir.runpytest("--lf", "--ff") + result = testdir.runpytest("--ff") # Test order will be failing tests firs result.stdout.fnmatch_lines([ "test_b.py*", "test_a.py*", ]) + def test_lastfailed_failedfirst_order(self, testdir): + testdir.makepyfile(**{ + 'test_a.py': """ + def test_always_passes(): + assert 1 + """, + 'test_b.py': """ + def test_always_fails(): + assert 0 + """, + }) + result = testdir.runpytest() + # Test order will be collection order; alphabetical + result.stdout.fnmatch_lines([ + "test_a.py*", + "test_b.py*", + ]) + result = testdir.runpytest("--lf", "--ff") + # Test order will be failing tests firs + result.stdout.fnmatch_lines([ + "test_b.py*", + ]) + assert 'test_a.py' not in result.stdout.str() + def test_lastfailed_difference_invocations(self, testdir, monkeypatch): monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) testdir.makepyfile(test_a=""" diff --git a/testing/test_capture.py b/testing/test_capture.py index 978e67b7e..8f6f2ccb2 100644 --- a/testing/test_capture.py +++ b/testing/test_capture.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function # note: py.io capture tests where copied from # pylib 1.4.20.dev2 (rev 13d9af95547e) from __future__ import with_statement @@ -14,7 +15,7 @@ import contextlib from _pytest import capture from _pytest.capture import CaptureManager from _pytest.main import EXIT_NOTESTSCOLLECTED -from py.builtin import print_ + needsosdup = pytest.mark.xfail("not hasattr(os, 'dup')") @@ -56,7 +57,7 @@ def StdCapture(out=True, err=True, in_=True): return capture.MultiCapture(out, err, in_, Capture=capture.SysCapture) -class TestCaptureManager: +class TestCaptureManager(object): def test_getmethod_default_no_fd(self, monkeypatch): from _pytest.capture import pytest_addoption from _pytest.config import Parser @@ -155,7 +156,7 @@ def test_collect_capturing(testdir): ]) -class TestPerTestCapturing: +class TestPerTestCapturing(object): def test_capture_and_fixtures(self, testdir): p = testdir.makepyfile(""" def setup_module(mod): @@ -276,13 +277,13 @@ class TestPerTestCapturing: ]) -class TestLoggingInteraction: +class TestLoggingInteraction(object): def test_logging_stream_ownership(self, testdir): p = testdir.makepyfile(""" def test_logging(): import logging import pytest - stream = capture.TextIO() + stream = capture.CaptureIO() logging.basicConfig(stream=stream) stream.close() # to free memory/release resources """) @@ -396,7 +397,7 @@ class TestLoggingInteraction: assert 'operation on closed file' not in result.stderr.str() -class TestCaptureFixture: +class TestCaptureFixture(object): @pytest.mark.parametrize("opt", [[], ["-s"]]) def test_std_functional(self, testdir, opt): reprec = testdir.inline_runsource(""" @@ -623,16 +624,16 @@ def test_error_during_readouterr(testdir): ]) -class TestTextIO: +class TestCaptureIO(object): def test_text(self): - f = capture.TextIO() + f = capture.CaptureIO() f.write("hello") s = f.getvalue() assert s == "hello" f.close() def test_unicode_and_str_mixture(self): - f = capture.TextIO() + f = capture.CaptureIO() if sys.version_info >= (3, 0): f.write("\u00f6") pytest.raises(TypeError, "f.write(bytes('hello', 'UTF-8'))") @@ -643,6 +644,18 @@ class TestTextIO: f.close() assert isinstance(s, unicode) + @pytest.mark.skipif( + sys.version_info[0] == 2, + reason='python 3 only behaviour', + ) + def test_write_bytes_to_buffer(self): + """In python3, stdout / stderr are text io wrappers (exposing a buffer + property of the underlying bytestream). See issue #1407 + """ + f = capture.CaptureIO() + f.buffer.write(b'foo\r\n') + assert f.getvalue() == 'foo\r\n' + def test_bytes_io(): f = py.io.BytesIO() @@ -700,7 +713,7 @@ def test_dupfile(tmpfile): assert nf != tmpfile assert nf.fileno() != tmpfile.fileno() assert nf not in flist - print_(i, end="", file=nf) + print(i, end="", file=nf) flist.append(nf) for i in range(5): f = flist[i] @@ -738,7 +751,7 @@ def lsof_check(): assert len2 < len1 + 3, out2 -class TestFDCapture: +class TestFDCapture(object): pytestmark = needsosdup def test_simple(self, tmpfile): @@ -774,7 +787,7 @@ class TestFDCapture: def test_stderr(self): cap = capture.FDCapture(2) cap.start() - print_("hello", file=sys.stderr) + print("hello", file=sys.stderr) s = cap.snap() cap.done() assert s == "hello\n" @@ -833,7 +846,7 @@ def saved_fd(fd): os.close(new_fd) -class TestStdCapture: +class TestStdCapture(object): captureclass = staticmethod(StdCapture) @contextlib.contextmanager @@ -901,8 +914,8 @@ class TestStdCapture: with self.getcapture() as cap: sys.stdout.write("hello") sys.stderr.write("world") - sys.stdout = capture.TextIO() - sys.stderr = capture.TextIO() + sys.stdout = capture.CaptureIO() + sys.stderr = capture.CaptureIO() print ("not seen") sys.stderr.write("not seen\n") out, err = cap.readouterr() @@ -991,7 +1004,7 @@ class TestStdCaptureFD(TestStdCapture): cap.stop_capturing() -class TestStdCaptureFDinvalidFD: +class TestStdCaptureFDinvalidFD(object): pytestmark = needsosdup def test_stdcapture_fd_invalid_fd(self, testdir): diff --git a/testing/test_collection.py b/testing/test_collection.py index 9cf4de895..c19fc0e72 100644 --- a/testing/test_collection.py +++ b/testing/test_collection.py @@ -1,8 +1,9 @@ +from __future__ import absolute_import, division, print_function import pytest, py from _pytest.main import Session, EXIT_NOTESTSCOLLECTED -class TestCollector: +class TestCollector(object): def test_collect_versus_item(self): from pytest import Collector, Item assert not issubclass(Collector, Item) @@ -50,7 +51,7 @@ class TestCollector: def test_getparent(self, testdir): modcol = testdir.getmodulecol(""" - class TestClass: + class TestClass(object): def test_foo(): pass """) @@ -85,7 +86,23 @@ class TestCollector: assert len(nodes) == 1 assert isinstance(nodes[0], pytest.File) -class TestCollectFS: + def test_can_skip_class_with_test_attr(self, testdir): + """Assure test class is skipped when using `__test__=False` (See #2007).""" + testdir.makepyfile(""" + class TestFoo(object): + __test__ = False + def __init__(self): + pass + def test_foo(): + assert True + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + 'collected 0 items', + '*no tests ran in*', + ]) + +class TestCollectFS(object): def test_ignored_certain_directories(self, testdir): tmpdir = testdir.tmpdir tmpdir.ensure("build", 'test_notfound.py') @@ -147,11 +164,11 @@ class TestCollectFS: assert [x.name for x in items] == ['test_%s' % dirname] -class TestCollectPluginHookRelay: +class TestCollectPluginHookRelay(object): def test_pytest_collect_file(self, testdir): wascalled = [] - class Plugin: + class Plugin(object): def pytest_collect_file(self, path, parent): if not path.basename.startswith("."): # Ignore hidden files, e.g. .testmondata. @@ -165,7 +182,7 @@ class TestCollectPluginHookRelay: def test_pytest_collect_directory(self, testdir): wascalled = [] - class Plugin: + class Plugin(object): def pytest_collect_directory(self, path, parent): wascalled.append(path.basename) @@ -176,7 +193,7 @@ class TestCollectPluginHookRelay: assert "world" in wascalled -class TestPrunetraceback: +class TestPrunetraceback(object): def test_custom_repr_failure(self, testdir): p = testdir.makepyfile(""" @@ -222,7 +239,7 @@ class TestPrunetraceback: ]) -class TestCustomConftests: +class TestCustomConftests(object): def test_ignore_collect_path(self, testdir): testdir.makeconftest(""" def pytest_ignore_collect(path, config): @@ -317,7 +334,7 @@ class TestCustomConftests: "*test_x*" ]) -class TestSession: +class TestSession(object): def test_parsearg(self, testdir): p = testdir.makepyfile("def test_func(): pass") subdir = testdir.mkdir("sub") @@ -375,7 +392,7 @@ class TestSession: def test_collect_protocol_method(self, testdir): p = testdir.makepyfile(""" - class TestClass: + class TestClass(object): def test_method(self): pass """) @@ -474,7 +491,7 @@ class TestSession: def test_find_byid_without_instance_parents(self, testdir): p = testdir.makepyfile(""" - class TestClass: + class TestClass(object): def test_method(self): pass """) @@ -484,7 +501,7 @@ class TestSession: item, = items assert item.nodeid.endswith("TestClass::()::test_method") -class Test_getinitialnodes: +class Test_getinitialnodes(object): def test_global_file(self, testdir, tmpdir): x = tmpdir.ensure("x.py") with tmpdir.as_cwd(): @@ -511,7 +528,7 @@ class Test_getinitialnodes: for col in col.listchain(): assert col.config is config -class Test_genitems: +class Test_genitems(object): def test_check_collect_hashes(self, testdir): p = testdir.makepyfile(""" def test_1(): @@ -534,7 +551,7 @@ class Test_genitems: def testone(): pass - class TestX: + class TestX(object): def testmethod_one(self): pass @@ -567,11 +584,11 @@ class Test_genitems: python_functions = *_test test """) p = testdir.makepyfile(''' - class MyTestSuite: + class MyTestSuite(object): def x_test(self): pass - class TestCase: + class TestCase(object): def test_y(self): pass ''') @@ -586,7 +603,7 @@ def test_matchnodes_two_collections_same_file(testdir): def pytest_configure(config): config.pluginmanager.register(Plugin2()) - class Plugin2: + class Plugin2(object): def pytest_collect_file(self, path, parent): if path.ext == ".abc": return MyFile2(path, parent) @@ -618,7 +635,7 @@ def test_matchnodes_two_collections_same_file(testdir): ]) -class TestNodekeywords: +class TestNodekeywords(object): def test_no_under(self, testdir): modcol = testdir.getmodulecol(""" def test_pass(): pass diff --git a/testing/test_compat.py b/testing/test_compat.py index 1fdd07e29..7b2251ef6 100644 --- a/testing/test_compat.py +++ b/testing/test_compat.py @@ -1,7 +1,8 @@ +from __future__ import absolute_import, division, print_function import sys import pytest -from _pytest.compat import is_generator +from _pytest.compat import is_generator, get_real_func def test_is_generator(): @@ -15,7 +16,30 @@ def test_is_generator(): assert not is_generator(foo) -@pytest.mark.skipif(sys.version_info < (3, 4), reason='asyncio available in Python 3.4+') +def test_real_func_loop_limit(): + + class Evil(object): + def __init__(self): + self.left = 1000 + + def __repr__(self): + return "".format(left=self.left) + + def __getattr__(self, attr): + if not self.left: + raise RuntimeError('its over') + self.left -= 1 + return self + + evil = Evil() + + with pytest.raises(ValueError): + res = get_real_func(evil) + print(res) + + +@pytest.mark.skipif(sys.version_info < (3, 4), + reason='asyncio available in Python 3.4+') def test_is_generator_asyncio(testdir): testdir.makepyfile(""" from _pytest.compat import is_generator @@ -27,12 +51,14 @@ def test_is_generator_asyncio(testdir): def test_is_generator_asyncio(): assert not is_generator(baz) """) - # avoid importing asyncio into pytest's own process, which in turn imports logging (#8) + # avoid importing asyncio into pytest's own process, + # which in turn imports logging (#8) result = testdir.runpytest_subprocess() result.stdout.fnmatch_lines(['*1 passed*']) -@pytest.mark.skipif(sys.version_info < (3, 5), reason='async syntax available in Python 3.5+') +@pytest.mark.skipif(sys.version_info < (3, 5), + reason='async syntax available in Python 3.5+') def test_is_generator_async_syntax(testdir): testdir.makepyfile(""" from _pytest.compat import is_generator diff --git a/testing/test_config.py b/testing/test_config.py index b6ccd7085..0d8e6abfc 100644 --- a/testing/test_config.py +++ b/testing/test_config.py @@ -1,10 +1,11 @@ +from __future__ import absolute_import, division, print_function import py, pytest import _pytest._code from _pytest.config import getcfg, get_common_ancestor, determine_setup from _pytest.main import EXIT_NOTESTSCOLLECTED -class TestParseIni: +class TestParseIni(object): @pytest.mark.parametrize('section, filename', [('pytest', 'pytest.ini'), ('tool:pytest', 'setup.cfg')]) @@ -84,7 +85,7 @@ class TestParseIni: result = testdir.inline_run("--confcutdir=.") assert result.ret == 0 -class TestConfigCmdlineParsing: +class TestConfigCmdlineParsing(object): def test_parsing_again_fails(self, testdir): config = testdir.parseconfig() pytest.raises(AssertionError, lambda: config.parse([])) @@ -115,7 +116,7 @@ class TestConfigCmdlineParsing: ret = pytest.main("-c " + temp_cfg_file) assert ret == _pytest.main.EXIT_OK -class TestConfigAPI: +class TestConfigAPI(object): def test_config_trace(self, testdir): config = testdir.parseconfig() l = [] @@ -140,7 +141,7 @@ class TestConfigAPI: from __future__ import unicode_literals def pytest_addoption(parser): - parser.addoption('--hello', type='string') + parser.addoption('--hello', type=str) """) config = testdir.parseconfig('--hello=this') assert config.getoption('hello') == 'this' @@ -304,7 +305,7 @@ class TestConfigAPI: assert config.getoption('confcutdir') == str(testdir.tmpdir.join('dir')) -class TestConfigFromdictargs: +class TestConfigFromdictargs(object): def test_basic_behavior(self): from _pytest.config import Config option_dict = { @@ -389,19 +390,19 @@ def test_preparse_ordering_with_setuptools(testdir, monkeypatch): def my_iter(name): assert name == "pytest11" - class Dist: + class Dist(object): project_name = 'spam' version = '1.0' def _get_metadata(self, name): return ['foo.txt,sha256=abc,123'] - class EntryPoint: + class EntryPoint(object): name = "mytestplugin" dist = Dist() def load(self): - class PseudoPlugin: + class PseudoPlugin(object): x = 42 return PseudoPlugin() @@ -423,14 +424,14 @@ def test_setuptools_importerror_issue1479(testdir, monkeypatch): def my_iter(name): assert name == "pytest11" - class Dist: + class Dist(object): project_name = 'spam' version = '1.0' def _get_metadata(self, name): return ['foo.txt,sha256=abc,123'] - class EntryPoint: + class EntryPoint(object): name = "mytestplugin" dist = Dist() @@ -450,14 +451,14 @@ def test_plugin_preparse_prevents_setuptools_loading(testdir, monkeypatch): def my_iter(name): assert name == "pytest11" - class Dist: + class Dist(object): project_name = 'spam' version = '1.0' def _get_metadata(self, name): return ['foo.txt,sha256=abc,123'] - class EntryPoint: + class EntryPoint(object): name = "mytestplugin" dist = Dist() @@ -557,7 +558,7 @@ def test_notify_exception(testdir, capfd): out, err = capfd.readouterr() assert "ValueError" in err - class A: + class A(object): def pytest_internalerror(self, excrepr): return True @@ -571,7 +572,7 @@ def test_load_initial_conftest_last_ordering(testdir): from _pytest.config import get_config pm = get_config().pluginmanager - class My: + class My(object): def pytest_load_initial_conftests(self): pass @@ -602,7 +603,7 @@ def test_get_plugin_specs_as_list(): assert _get_plugin_specs_as_list(('foo', 'bar')) == ['foo', 'bar'] -class TestWarning: +class TestWarning(object): def test_warn_config(self, testdir): testdir.makeconftest(""" l = [] @@ -632,16 +633,17 @@ class TestWarning: pass """) result = testdir.runpytest("--disable-pytest-warnings") - assert result.parseoutcomes()["pytest-warnings"] > 0 + assert result.parseoutcomes()["warnings"] > 0 assert "hello" not in result.stdout.str() result = testdir.runpytest() result.stdout.fnmatch_lines(""" - ===*pytest-warning summary*=== - *WT1*test_warn_on_test_item*:7 hello* + ===*warnings summary*=== + *test_warn_on_test_item_from_request.py::test_hello* + *hello* """) -class TestRootdir: +class TestRootdir(object): def test_simple_noini(self, tmpdir): assert get_common_ancestor([tmpdir]) == tmpdir a = tmpdir.mkdir("a") @@ -699,7 +701,7 @@ class TestRootdir: assert rootdir == tmpdir -class TestOverrideIniArgs: +class TestOverrideIniArgs(object): @pytest.mark.parametrize("name", "setup.cfg tox.ini pytest.ini".split()) def test_override_ini_names(self, testdir, name): testdir.tmpdir.join(name).write(py.std.textwrap.dedent(""" diff --git a/testing/test_conftest.py b/testing/test_conftest.py index c0fa74701..db67a0cc8 100644 --- a/testing/test_conftest.py +++ b/testing/test_conftest.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function from textwrap import dedent import _pytest._code @@ -24,14 +25,14 @@ def ConftestWithSetinitial(path): return conftest def conftest_setinitial(conftest, args, confcutdir=None): - class Namespace: + class Namespace(object): def __init__(self): self.file_or_dir = args self.confcutdir = str(confcutdir) self.noconftest = False conftest._set_initial_conftests(Namespace()) -class TestConftestValueAccessGlobal: +class TestConftestValueAccessGlobal(object): def test_basic_init(self, basedir): conftest = PytestPluginManager() p = basedir.join("adir") @@ -265,7 +266,7 @@ def test_conftest_found_with_double_dash(testdir): """) -class TestConftestVisibility: +class TestConftestVisibility(object): def _setup_tree(self, testdir): # for issue616 # example mostly taken from: # https://mail.python.org/pipermail/pytest-dev/2014-September/002617.html @@ -398,7 +399,7 @@ def test_search_conftest_up_to_inifile(testdir, confcutdir, passed, error): def test_issue1073_conftest_special_objects(testdir): testdir.makeconftest(""" - class DontTouchMe: + class DontTouchMe(object): def __getattr__(self, x): raise Exception('cant touch me') diff --git a/testing/test_doctest.py b/testing/test_doctest.py index faf75ef33..82597b477 100644 --- a/testing/test_doctest.py +++ b/testing/test_doctest.py @@ -1,4 +1,5 @@ # encoding: utf-8 +from __future__ import absolute_import, division, print_function import sys import _pytest._code from _pytest.compat import MODULE_NOT_FOUND_ERROR @@ -6,7 +7,7 @@ from _pytest.doctest import DoctestItem, DoctestModule, DoctestTextfile import pytest -class TestDoctests: +class TestDoctests(object): def test_collect_testtextfile(self, testdir): w = testdir.maketxtfile(whatever="") @@ -131,6 +132,33 @@ class TestDoctests: '*1 passed*', ]) + @pytest.mark.parametrize( + ' test_string, encoding', + [ + (u'foo', 'ascii'), + (u'öäü', 'latin1'), + (u'öäü', 'utf-8') + ] + ) + def test_encoding(self, testdir, test_string, encoding): + """Test support for doctest_encoding ini option. + """ + testdir.makeini(""" + [pytest] + doctest_encoding={0} + """.format(encoding)) + doctest = u""" + >>> u"{0}" + {1} + """.format(test_string, repr(test_string)) + testdir._makefile(".txt", [doctest], {}, encoding=encoding) + + result = testdir.runpytest() + + result.stdout.fnmatch_lines([ + '*1 passed*', + ]) + def test_doctest_unexpected_exception(self, testdir): testdir.maketxtfile(""" >>> i = 0 @@ -351,7 +379,7 @@ class TestDoctests: def test_doctestmodule_two_tests_one_fail(self, testdir): p = testdir.makepyfile(""" - class MyClass: + class MyClass(object): def bad_meth(self): ''' >>> magic = 42 @@ -374,7 +402,7 @@ class TestDoctests: doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE """) p = testdir.makepyfile(""" - class MyClass: + class MyClass(object): ''' >>> a = "foo " >>> print(a) @@ -391,7 +419,7 @@ class TestDoctests: doctest_optionflags = ELLIPSIS """) p = testdir.makepyfile(""" - class MyClass: + class MyClass(object): ''' >>> a = "foo " >>> print(a) @@ -478,7 +506,7 @@ class TestDoctests: reprec.assertoutcome(failed=1) -class TestLiterals: +class TestLiterals(object): @pytest.mark.parametrize('config_mode', ['ini', 'comment']) def test_allow_unicode(self, testdir, config_mode): @@ -565,7 +593,7 @@ class TestLiterals: reprec.assertoutcome(passed=passed, failed=int(not passed)) -class TestDoctestSkips: +class TestDoctestSkips(object): """ If all examples in a doctest are skipped due to the SKIP option, then the tests should be SKIPPED rather than PASSED. (#957) @@ -619,7 +647,7 @@ class TestDoctestSkips: reprec.assertoutcome(passed=0, skipped=0) -class TestDoctestAutoUseFixtures: +class TestDoctestAutoUseFixtures(object): SCOPES = ['module', 'session', 'class', 'function'] @@ -738,7 +766,7 @@ class TestDoctestAutoUseFixtures: result.stdout.fnmatch_lines(['*=== 1 passed in *']) -class TestDoctestNamespaceFixture: +class TestDoctestNamespaceFixture(object): SCOPES = ['module', 'session', 'class', 'function'] @@ -788,7 +816,7 @@ class TestDoctestNamespaceFixture: reprec.assertoutcome(passed=1) -class TestDoctestReportingOption: +class TestDoctestReportingOption(object): def _run_doctest_report(self, testdir, format): testdir.makepyfile(""" def foo(): diff --git a/testing/test_entry_points.py b/testing/test_entry_points.py index 370b93129..6ca68b481 100644 --- a/testing/test_entry_points.py +++ b/testing/test_entry_points.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import pkg_resources import pytest diff --git a/testing/test_helpconfig.py b/testing/test_helpconfig.py index fc3c8fdf6..41fa953ad 100644 --- a/testing/test_helpconfig.py +++ b/testing/test_helpconfig.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function from _pytest.main import EXIT_NOTESTSCOLLECTED import pytest @@ -32,7 +33,7 @@ def test_hookvalidation_unknown(testdir): """) result = testdir.runpytest() assert result.ret != 0 - result.stderr.fnmatch_lines([ + result.stdout.fnmatch_lines([ '*unknown hook*pytest_hello*' ]) diff --git a/testing/test_junitxml.py b/testing/test_junitxml.py index d167f735d..bc637b035 100644 --- a/testing/test_junitxml.py +++ b/testing/test_junitxml.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- - +from __future__ import absolute_import, division, print_function from xml.dom import minidom import py import sys @@ -79,7 +79,7 @@ class DomNode(object): return type(self)(self.__node.nextSibling) -class TestPython: +class TestPython(object): def test_summing_simple(self, testdir): testdir.makepyfile(""" import pytest @@ -189,6 +189,29 @@ class TestPython: fnode.assert_attr(message="test teardown failure") assert "ValueError" in fnode.toxml() + def test_call_failure_teardown_error(self, testdir): + testdir.makepyfile(""" + import pytest + + @pytest.fixture + def arg(): + yield + raise Exception("Teardown Exception") + def test_function(arg): + raise Exception("Call Exception") + """) + result, dom = runandparse(testdir) + assert result.ret + node = dom.find_first_by_tag("testsuite") + node.assert_attr(errors=1, failures=1, tests=1) + first, second = dom.find_by_tag("testcase") + if not first or not second or first == second: + assert 0 + fnode = first.find_first_by_tag("failure") + fnode.assert_attr(message="Exception: Call Exception") + snode = second.find_first_by_tag("error") + snode.assert_attr(message="test teardown failure") + def test_skip_contains_name_reason(self, testdir): testdir.makepyfile(""" import pytest @@ -263,7 +286,7 @@ class TestPython: def test_classname_instance(self, testdir): testdir.makepyfile(""" - class TestClass: + class TestClass(object): def test_method(self): assert 0 """) @@ -376,7 +399,7 @@ class TestPython: testdir.makepyfile(""" def test_func(): assert 0 - class TestHello: + class TestHello(object): def test_hello(self): pass """) @@ -588,11 +611,14 @@ def test_mangle_test_address(): def test_dont_configure_on_slaves(tmpdir): gotten = [] - class FakeConfig: + class FakeConfig(object): def __init__(self): self.pluginmanager = self self.option = self + def getini(self, name): + return "pytest" + junitprefix = None # XXX: shouldnt need tmpdir ? xmlpath = str(tmpdir.join('junix.xml')) @@ -607,7 +633,7 @@ def test_dont_configure_on_slaves(tmpdir): assert len(gotten) == 1 -class TestNonPython: +class TestNonPython(object): def test_summing_simple(self, testdir): testdir.makeconftest(""" import pytest @@ -769,7 +795,7 @@ def test_double_colon_split_function_issue469(testdir): def test_double_colon_split_method_issue469(testdir): testdir.makepyfile(""" import pytest - class TestClass: + class TestClass(object): @pytest.mark.parametrize('param', ["double::colon"]) def test_func(self, param): pass @@ -829,7 +855,10 @@ def test_record_property(testdir): pnodes = psnode.find_by_tag('property') pnodes[0].assert_attr(name="bar", value="1") pnodes[1].assert_attr(name="foo", value="<1") - result.stdout.fnmatch_lines('*C3*test_record_property.py*experimental*') + result.stdout.fnmatch_lines([ + 'test_record_property.py::test_record', + '*record_xml_property*experimental*', + ]) def test_record_property_same_name(testdir): @@ -981,3 +1010,51 @@ def test_global_properties(testdir): actual[k] = v assert actual == expected + + +def test_url_property(testdir): + test_url = "http://www.github.com/pytest-dev" + path = testdir.tmpdir.join("test_url_property.xml") + log = LogXML(str(path), None) + from _pytest.runner import BaseReport + + class Report(BaseReport): + longrepr = "FooBarBaz" + sections = [] + nodeid = "something" + location = 'tests/filename.py', 42, 'TestClass.method' + url = test_url + + test_report = Report() + + log.pytest_sessionstart() + node_reporter = log._opentestcase(test_report) + node_reporter.append_failure(test_report) + log.pytest_sessionfinish() + + test_case = minidom.parse(str(path)).getElementsByTagName('testcase')[0] + + assert (test_case.getAttribute('url') == test_url), "The URL did not get written to the xml" + + +@pytest.mark.parametrize('suite_name', ['my_suite', '']) +def test_set_suite_name(testdir, suite_name): + if suite_name: + testdir.makeini(""" + [pytest] + junit_suite_name={0} + """.format(suite_name)) + expected = suite_name + else: + expected = 'pytest' + testdir.makepyfile(""" + import pytest + + def test_func(): + pass + """) + result, dom = runandparse(testdir) + assert result.ret == 0 + node = dom.find_first_by_tag("testsuite") + node.assert_attr(name=expected) + diff --git a/testing/test_mark.py b/testing/test_mark.py index 8f460586b..0792b04fd 100644 --- a/testing/test_mark.py +++ b/testing/test_mark.py @@ -1,17 +1,21 @@ +from __future__ import absolute_import, division, print_function import os +import sys -import py, pytest -from _pytest.mark import MarkGenerator as Mark +import pytest +from _pytest.mark import MarkGenerator as Mark, ParameterSet -class TestMark: +class TestMark(object): def test_markinfo_repr(self): - from _pytest.mark import MarkInfo - m = MarkInfo("hello", (1,2), {}) + from _pytest.mark import MarkInfo, Mark + m = MarkInfo(Mark("hello", (1,2), {})) repr(m) - def test_pytest_exists_in_namespace_all(self): - assert 'mark' in py.test.__all__ - assert 'mark' in pytest.__all__ + @pytest.mark.parametrize('attr', ['mark', 'param']) + @pytest.mark.parametrize('modulename', ['py.test', 'pytest']) + def test_pytest_exists_in_namespace_all(self, attr, modulename): + module = sys.modules[modulename] + assert attr in module.__all__ def test_pytest_mark_notcallable(self): mark = Mark() @@ -318,7 +322,7 @@ def test_parametrized_collect_with_wrong_args(testdir): ]) -class TestFunctional: +class TestFunctional(object): def test_mark_per_function(self, testdir): p = testdir.makepyfile(""" @@ -343,7 +347,7 @@ class TestFunctional: def test_marklist_per_class(self, testdir): item = testdir.getitem(""" import pytest - class TestClass: + class TestClass(object): pytestmark = [pytest.mark.hello, pytest.mark.world] def test_func(self): assert TestClass.test_func.hello @@ -356,7 +360,7 @@ class TestFunctional: item = testdir.getitem(""" import pytest pytestmark = [pytest.mark.hello, pytest.mark.world] - class TestClass: + class TestClass(object): def test_func(self): assert TestClass.test_func.hello assert TestClass.test_func.world @@ -369,7 +373,7 @@ class TestFunctional: item = testdir.getitem(""" import pytest @pytest.mark.hello - class TestClass: + class TestClass(object): def test_func(self): assert TestClass.test_func.hello """) @@ -380,7 +384,7 @@ class TestFunctional: item = testdir.getitem(""" import pytest @pytest.mark.hello - class TestClass: + class TestClass(object): pytestmark = pytest.mark.world def test_func(self): assert TestClass.test_func.hello @@ -394,7 +398,7 @@ class TestFunctional: p = testdir.makepyfile(""" import pytest pytestmark = pytest.mark.hello("pos1", x=1, y=2) - class TestClass: + class TestClass(object): # classlevel overrides module level pytestmark = pytest.mark.hello(x=3) @pytest.mark.hello("pos0", z=4) @@ -420,18 +424,18 @@ class TestFunctional: # issue 199 - propagate markers into nested classes p = testdir.makepyfile(""" import pytest - class TestA: + class TestA(object): pytestmark = pytest.mark.a def test_b(self): assert True - class TestC: + class TestC(object): # this one didnt get marked def test_d(self): assert True """) items, rec = testdir.inline_genitems(p) for item in items: - print (item, item.keywords) + print(item, item.keywords) assert 'a' in item.keywords def test_mark_decorator_subclass_does_not_propagate_to_base(self, testdir): @@ -439,7 +443,7 @@ class TestFunctional: import pytest @pytest.mark.a - class Base: pass + class Base(object): pass @pytest.mark.b class Test1(Base): @@ -458,7 +462,7 @@ class TestFunctional: p = testdir.makepyfile(""" import pytest - class TestBase: + class TestBase(object): def test_foo(self): pass @@ -482,7 +486,7 @@ class TestFunctional: import pytest @pytest.mark.a - class Base: pass + class Base(object): pass @pytest.mark.b class Base2(Base): pass @@ -502,7 +506,7 @@ class TestFunctional: def test_mark_with_wrong_marker(self, testdir): reprec = testdir.inline_runsource(""" import pytest - class pytestmark: + class pytestmark(object): pass def test_func(): pass @@ -647,7 +651,7 @@ class TestFunctional: reprec.assertoutcome(skipped=1) -class TestKeywordSelection: +class TestKeywordSelection(object): def test_select_simple(self, testdir): file_test = testdir.makepyfile(""" @@ -676,7 +680,7 @@ class TestKeywordSelection: p = testdir.makepyfile(test_select=""" def test_1(): pass - class TestClass: + class TestClass(object): def test_2(self): pass """) @@ -690,7 +694,7 @@ class TestKeywordSelection: item.extra_keyword_matches.add("xxx") """) reprec = testdir.inline_run(p.dirpath(), '-s', '-k', keyword) - py.builtin.print_("keyword", repr(keyword)) + print("keyword", repr(keyword)) passed, skipped, failed = reprec.listoutcomes() assert len(passed) == 1 assert passed[0].nodeid.endswith("test_2") @@ -755,3 +759,16 @@ class TestKeywordSelection: assert_test_is_not_selected("__") assert_test_is_not_selected("()") + + +@pytest.mark.parametrize('argval, expected', [ + (pytest.mark.skip()((1, 2)), + ParameterSet(values=(1, 2), marks=[pytest.mark.skip], id=None)), + (pytest.mark.xfail(pytest.mark.skip()((1, 2))), + ParameterSet(values=(1, 2), + marks=[pytest.mark.xfail, pytest.mark.skip], id=None)), + +]) +def test_parameterset_extractfrom(argval, expected): + extracted = ParameterSet.extract_from(argval) + assert extracted == expected diff --git a/testing/test_modimport.py b/testing/test_modimport.py new file mode 100644 index 000000000..2ab86bf7a --- /dev/null +++ b/testing/test_modimport.py @@ -0,0 +1,25 @@ +import py +import subprocess +import sys +import pytest +import _pytest + +MODSET = [ + x for x in py.path.local(_pytest.__file__).dirpath().visit('*.py') + if x.purebasename != '__init__' +] + + +@pytest.mark.parametrize('modfile', MODSET, ids=lambda x: x.purebasename) +def test_fileimport(modfile): + # this test ensures all internal packages can import + # without needing the pytest namespace being set + # this is critical for the initialization of xdist + + res = subprocess.call([ + sys.executable, + '-c', 'import sys, py; py.path.local(sys.argv[1]).pyimport()', + modfile.strpath, + ]) + if res: + pytest.fail("command result %s" % res) diff --git a/testing/test_monkeypatch.py b/testing/test_monkeypatch.py index 9d02e2cc0..1efcf7f95 100644 --- a/testing/test_monkeypatch.py +++ b/testing/test_monkeypatch.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import os import sys import textwrap @@ -16,7 +17,7 @@ def mp(): def test_setattr(): - class A: + class A(object): x = 1 monkeypatch = MonkeyPatch() @@ -39,7 +40,7 @@ def test_setattr(): assert A.x == 5 -class TestSetattrWithImportPath: +class TestSetattrWithImportPath(object): def test_string_expression(self, monkeypatch): monkeypatch.setattr("os.path.abspath", lambda x: "hello2") assert os.path.abspath("123") == "hello2" @@ -79,7 +80,7 @@ class TestSetattrWithImportPath: def test_delattr(): - class A: + class A(object): x = 1 monkeypatch = MonkeyPatch() @@ -294,7 +295,7 @@ class SampleNewInherit(SampleNew): pass -class SampleOld: +class SampleOld(object): # oldstyle on python2 @staticmethod def hello(): diff --git a/testing/test_nose.py b/testing/test_nose.py index f54246111..798badc1c 100644 --- a/testing/test_nose.py +++ b/testing/test_nose.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import pytest def setup_module(mod): @@ -26,7 +27,7 @@ def test_setup_func_with_setup_decorator(): from _pytest.nose import call_optional l = [] - class A: + class A(object): @pytest.fixture(autouse=True) def f(self): l.append(1) @@ -38,7 +39,7 @@ def test_setup_func_with_setup_decorator(): def test_setup_func_not_callable(): from _pytest.nose import call_optional - class A: + class A(object): f = 1 call_optional(A(), "f") @@ -270,7 +271,7 @@ def test_nose_setup_ordering(testdir): def setup_module(mod): mod.visited = True - class TestClass: + class TestClass(object): def setup(self): assert visited def test_first(self): @@ -377,7 +378,7 @@ def test_istest_class_decorator(testdir): p = testdir.makepyfile(""" import nose.tools @nose.tools.istest - class NotTestPrefix: + class NotTestPrefix(object): def test_method(self): pass """) @@ -388,7 +389,7 @@ def test_nottest_class_decorator(testdir): testdir.makepyfile(""" import nose.tools @nose.tools.nottest - class TestPrefix: + class TestPrefix(object): def test_method(self): pass """) diff --git a/testing/test_parseopt.py b/testing/test_parseopt.py index e933dbb8d..38542783a 100644 --- a/testing/test_parseopt.py +++ b/testing/test_parseopt.py @@ -1,4 +1,4 @@ -from __future__ import with_statement +from __future__ import absolute_import, division, print_function import sys import os import py, pytest @@ -8,7 +8,7 @@ from _pytest import config as parseopt def parser(): return parseopt.Parser() -class TestParser: +class TestParser(object): def test_no_help_by_default(self, capsys): parser = parseopt.Parser(usage="xyz") pytest.raises(SystemExit, lambda: parser.parse(["-h"])) @@ -34,15 +34,16 @@ class TestParser: ) def test_argument_type(self): - argument = parseopt.Argument('-t', dest='abc', type='int') + argument = parseopt.Argument('-t', dest='abc', type=int) assert argument.type is int - argument = parseopt.Argument('-t', dest='abc', type='string') + argument = parseopt.Argument('-t', dest='abc', type=str) assert argument.type is str argument = parseopt.Argument('-t', dest='abc', type=float) assert argument.type is float - with pytest.raises(KeyError): - argument = parseopt.Argument('-t', dest='abc', type='choice') - argument = parseopt.Argument('-t', dest='abc', type='choice', + with pytest.warns(DeprecationWarning): + with pytest.raises(KeyError): + argument = parseopt.Argument('-t', dest='abc', type='choice') + argument = parseopt.Argument('-t', dest='abc', type=str, choices=['red', 'blue']) assert argument.type is str @@ -139,7 +140,7 @@ class TestParser: parser.addoption("--hello", dest="hello", action="store") parser.addoption("--world", dest="world", default=42) - class A: + class A(object): pass option = A() @@ -176,8 +177,8 @@ class TestParser: elif option.type is str: option.default = "world" parser = parseopt.Parser(processopt=defaultget) - parser.addoption("--this", dest="this", type="int", action="store") - parser.addoption("--hello", dest="hello", type="string", action="store") + parser.addoption("--this", dest="this", type=int, action="store") + parser.addoption("--hello", dest="hello", type=str, action="store") parser.addoption("--no", dest="no", action="store_true") option = parser.parse([]) assert option.hello == "world" diff --git a/testing/test_pastebin.py b/testing/test_pastebin.py index 8123424ca..3fe66e972 100644 --- a/testing/test_pastebin.py +++ b/testing/test_pastebin.py @@ -1,8 +1,9 @@ # encoding: utf-8 +from __future__ import absolute_import, division, print_function import sys import pytest -class TestPasteCapture: +class TestPasteCapture(object): @pytest.fixture def pastebinlist(self, monkeypatch, request): @@ -71,7 +72,7 @@ class TestPasteCapture: ]) -class TestPaste: +class TestPaste(object): @pytest.fixture def pastebin(self, request): @@ -88,7 +89,7 @@ class TestPaste: def mocked(url, data): calls.append((url, data)) - class DummyFile: + class DummyFile(object): def read(self): # part of html of a normal response return b'View raw.' diff --git a/testing/test_pdb.py b/testing/test_pdb.py index 52a75d916..ec5862082 100644 --- a/testing/test_pdb.py +++ b/testing/test_pdb.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import sys import platform @@ -13,7 +14,27 @@ def runpdb_and_get_report(testdir, source): return reports[1] -class TestPDB: +@pytest.fixture +def custom_pdb_calls(): + called = [] + + # install dummy debugger class and track which methods were called on it + class _CustomPdb(object): + def __init__(self, *args, **kwargs): + called.append("init") + + def reset(self): + called.append("reset") + + def interaction(self, *args): + called.append("interaction") + + _pytest._CustomPdb = _CustomPdb + return called + + + +class TestPDB(object): @pytest.fixture def pdblist(self, request): @@ -346,22 +367,18 @@ class TestPDB: child.sendeof() self.flush(child) - def test_pdb_custom_cls(self, testdir): - called = [] + def test_pdb_custom_cls(self, testdir, custom_pdb_calls): + p1 = testdir.makepyfile("""xxx """) + result = testdir.runpytest_inprocess( + "--pdb", "--pdbcls=_pytest:_CustomPdb", p1) + result.stdout.fnmatch_lines([ + "*NameError*xxx*", + "*1 error*", + ]) + assert custom_pdb_calls == ["init", "reset", "interaction"] - # install dummy debugger class and track which methods were called on it - class _CustomPdb: - def __init__(self, *args, **kwargs): - called.append("init") - - def reset(self): - called.append("reset") - - def interaction(self, *args): - called.append("interaction") - - _pytest._CustomPdb = _CustomPdb + def test_pdb_custom_cls_without_pdb(self, testdir, custom_pdb_calls): p1 = testdir.makepyfile("""xxx """) result = testdir.runpytest_inprocess( "--pdbcls=_pytest:_CustomPdb", p1) @@ -369,4 +386,23 @@ class TestPDB: "*NameError*xxx*", "*1 error*", ]) - assert called == ["init", "reset", "interaction"] + assert custom_pdb_calls == [] + + def test_pdb_custom_cls_with_settrace(self, testdir, monkeypatch): + testdir.makepyfile(custom_pdb=""" + class CustomPdb(object): + def set_trace(*args, **kwargs): + print 'custom set_trace>' + """) + p1 = testdir.makepyfile(""" + import pytest + + def test_foo(): + pytest.set_trace() + """) + monkeypatch.setenv('PYTHONPATH', str(testdir.tmpdir)) + child = testdir.spawn_pytest("--pdbcls=custom_pdb:CustomPdb %s" % str(p1)) + + child.expect('custom set_trace>') + if child.isalive(): + child.wait() diff --git a/testing/test_pluginmanager.py b/testing/test_pluginmanager.py index 45ad321a3..1f0f4f602 100644 --- a/testing/test_pluginmanager.py +++ b/testing/test_pluginmanager.py @@ -1,4 +1,5 @@ # encoding: UTF-8 +from __future__ import absolute_import, division, print_function import pytest import py import os @@ -11,7 +12,7 @@ from _pytest.main import EXIT_NOTESTSCOLLECTED, Session def pytestpm(): return PytestPluginManager() -class TestPytestPluginInteractions: +class TestPytestPluginInteractions(object): def test_addhooks_conftestplugin(self, testdir): testdir.makepyfile(newhooks=""" def pytest_myhook(xyz): @@ -85,7 +86,7 @@ class TestPytestPluginInteractions: config = testdir.parseconfig() l = [] - class A: + class A(object): def pytest_configure(self, config): l.append(self) @@ -105,11 +106,11 @@ class TestPytestPluginInteractions: pytestpm = get_config().pluginmanager # fully initialized with plugins saveindent = [] - class api1: + class api1(object): def pytest_plugin_registered(self): saveindent.append(pytestpm.trace.root.indent) - class api2: + class api2(object): def pytest_plugin_registered(self): saveindent.append(pytestpm.trace.root.indent) raise ValueError() @@ -156,11 +157,11 @@ class TestPytestPluginInteractions: def test_warn_on_deprecated_multicall(self, pytestpm): warnings = [] - class get_warnings: + class get_warnings(object): def pytest_logwarning(self, message): warnings.append(message) - class Plugin: + class Plugin(object): def pytest_configure(self, __multicall__): pass @@ -173,11 +174,11 @@ class TestPytestPluginInteractions: def test_warn_on_deprecated_addhooks(self, pytestpm): warnings = [] - class get_warnings: + class get_warnings(object): def pytest_logwarning(self, code, fslocation, message, nodeid): warnings.append(message) - class Plugin: + class Plugin(object): def pytest_testhook(): pass @@ -221,7 +222,7 @@ def test_importplugin_error_message(testdir, pytestpm): assert py.std.re.match(expected, str(excinfo.value)) -class TestPytestPluginManager: +class TestPytestPluginManager(object): def test_register_imported_modules(self): pm = PytestPluginManager() mod = py.std.types.ModuleType("x.y.pytest_hello") @@ -284,8 +285,8 @@ class TestPytestPluginManager: result = testdir.runpytest("-rw", "-p", "skipping1", syspathinsert=True) assert result.ret == EXIT_NOTESTSCOLLECTED result.stdout.fnmatch_lines([ - "WI1*skipped plugin*skipping1*hello*", - "WI1*skipped plugin*skipping2*hello*", + "*skipped plugin*skipping1*hello*", + "*skipped plugin*skipping2*hello*", ]) def test_consider_env_plugin_instantiation(self, testdir, monkeypatch, pytestpm): @@ -348,7 +349,7 @@ class TestPytestPluginManager: pytestpm.consider_conftest(mod) -class TestPytestPluginManagerBootstrapming: +class TestPytestPluginManagerBootstrapming(object): def test_preparse_args(self, pytestpm): pytest.raises(ImportError, lambda: pytestpm.consider_preparse(["xyz", "-p", "hello123"])) diff --git a/testing/test_pytester.py b/testing/test_pytester.py index 49cf43a3e..932427ad3 100644 --- a/testing/test_pytester.py +++ b/testing/test_pytester.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import pytest import os from _pytest.pytester import HookRecorder @@ -12,7 +13,7 @@ def test_make_hook_recorder(testdir): pytest.xfail("internal reportrecorder tests need refactoring") - class rep: + class rep(object): excinfo = None passed = False failed = True @@ -25,7 +26,7 @@ def test_make_hook_recorder(testdir): failures = recorder.getfailures() assert failures == [rep] - class rep: + class rep(object): excinfo = None passed = False failed = False @@ -74,7 +75,7 @@ def test_testdir_runs_with_plugin(testdir): def make_holder(): - class apiclass: + class apiclass(object): def pytest_xyz(self, arg): "x" def pytest_xyz_noarg(self): diff --git a/testing/test_recwarn.py b/testing/test_recwarn.py index 0f3bf6a14..0f921f057 100644 --- a/testing/test_recwarn.py +++ b/testing/test_recwarn.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import warnings import re import py @@ -8,25 +9,19 @@ from _pytest.recwarn import WarningsRecorder def test_recwarn_functional(testdir): reprec = testdir.inline_runsource(""" import warnings - oldwarn = warnings.showwarning def test_method(recwarn): - assert warnings.showwarning != oldwarn warnings.warn("hello") warn = recwarn.pop() assert isinstance(warn.message, UserWarning) - def test_finalized(): - assert warnings.showwarning == oldwarn """) res = reprec.countoutcomes() - assert tuple(res) == (2, 0, 0), res + assert tuple(res) == (1, 0, 0), res class TestWarningsRecorderChecker(object): - def test_recording(self, recwarn): - showwarning = py.std.warnings.showwarning + def test_recording(self): rec = WarningsRecorder() with rec: - assert py.std.warnings.showwarning != showwarning assert not rec.list py.std.warnings.warn_explicit("hello", UserWarning, "xyz", 13) assert len(rec.list) == 1 @@ -40,8 +35,6 @@ class TestWarningsRecorderChecker(object): assert l is rec.list pytest.raises(AssertionError, "rec.pop()") - assert showwarning == py.std.warnings.showwarning - def test_typechecking(self): from _pytest.recwarn import WarningsChecker with pytest.raises(TypeError): @@ -112,10 +105,9 @@ class TestDeprecatedCall(object): pytest.deprecated_call(self.dep_explicit, 0) def test_deprecated_call_as_context_manager_no_warning(self): - with pytest.raises(pytest.fail.Exception) as ex: + with pytest.raises(pytest.fail.Exception, matches='^DID NOT WARN'): with pytest.deprecated_call(): self.dep(1) - assert str(ex.value).startswith("DID NOT WARN") def test_deprecated_call_as_context_manager(self): with pytest.deprecated_call(): @@ -154,7 +146,9 @@ class TestDeprecatedCall(object): pytest.deprecated_call(deprecated_function) """) result = testdir.runpytest() - result.stdout.fnmatch_lines('*=== 2 passed in *===') + # the 2 tests must pass, but the call to test_one() will generate a warning + # in pytest's summary + result.stdout.fnmatch_lines('*=== 2 passed, 1 warnings in *===') class TestWarns(object): @@ -218,7 +212,6 @@ class TestWarns(object): excinfo.match(re.escape(message_template.format(warning_classes, [each.message for each in warninfo]))) - def test_record(self): with pytest.warns(UserWarning) as record: warnings.warn("user", UserWarning) @@ -235,6 +228,28 @@ class TestWarns(object): assert str(record[0].message) == "user" assert str(record[1].message) == "runtime" + def test_record_by_subclass(self): + with pytest.warns(Warning) as record: + warnings.warn("user", UserWarning) + warnings.warn("runtime", RuntimeWarning) + + assert len(record) == 2 + assert str(record[0].message) == "user" + assert str(record[1].message) == "runtime" + + class MyUserWarning(UserWarning): pass + + class MyRuntimeWarning(RuntimeWarning): pass + + with pytest.warns((UserWarning, RuntimeWarning)) as record: + warnings.warn("user", MyUserWarning) + warnings.warn("runtime", MyRuntimeWarning) + + assert len(record) == 2 + assert str(record[0].message) == "user" + assert str(record[1].message) == "runtime" + + def test_double_test(self, testdir): """If a test is run again, the warning should still be raised""" testdir.makepyfile(''' diff --git a/testing/test_resultlog.py b/testing/test_resultlog.py index e2d4fc263..cb083225c 100644 --- a/testing/test_resultlog.py +++ b/testing/test_resultlog.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import os import _pytest._code @@ -70,7 +71,7 @@ def test_write_log_entry(): assert entry_lines[1:] == [' '+line for line in longrepr.splitlines()] -class TestWithFunctionIntegration: +class TestWithFunctionIntegration(object): # XXX (hpk) i think that the resultlog plugin should # provide a Parser object so that one can remain # ignorant regarding formatting details. diff --git a/testing/test_runner.py b/testing/test_runner.py index 727defa92..51d430fc8 100644 --- a/testing/test_runner.py +++ b/testing/test_runner.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -from __future__ import with_statement +from __future__ import absolute_import, division, print_function import _pytest._code import os @@ -8,7 +8,7 @@ import pytest import sys from _pytest import runner, main -class TestSetupState: +class TestSetupState(object): def test_setup(self, testdir): ss = runner.SetupState() item = testdir.getitem("def test_func(): pass") @@ -71,7 +71,7 @@ class TestSetupState: assert err.value.args == ('oops2',) -class BaseFunctionalTests: +class BaseFunctionalTests(object): def test_passfunction(self, testdir): reports = testdir.runitem(""" def test_func(): @@ -200,7 +200,7 @@ class BaseFunctionalTests: rec = testdir.inline_runsource(""" import pytest - class TestClass: + class TestClass(object): def test_method(self): pass def teardown_class(cls): @@ -239,7 +239,7 @@ class BaseFunctionalTests: rec = testdir.inline_runsource(""" import pytest - class TestClass: + class TestClass(object): def teardown_method(self, x, y, z): pass @@ -351,12 +351,12 @@ class TestExecutionForked(BaseFunctionalTests): assert rep.failed assert rep.when == "???" -class TestSessionReports: +class TestSessionReports(object): def test_collect_result(self, testdir): col = testdir.getmodulecol(""" def test_func1(): pass - class TestClass: + class TestClass(object): pass """) rep = runner.collect_one_node(col) @@ -409,7 +409,7 @@ def test_runtest_in_module_ordering(testdir): import pytest def pytest_runtest_setup(item): # runs after class-level! item.function.mylist.append("module") - class TestClass: + class TestClass(object): def pytest_runtest_setup(self, item): assert not hasattr(item.function, 'mylist') item.function.mylist = ['class'] @@ -680,7 +680,7 @@ def test_store_except_info_on_eror(): sys.last_traceback and friends. """ # Simulate item that raises a specific exception - class ItemThatRaises: + class ItemThatRaises(object): def runtest(self): raise IndexError('TEST') try: @@ -693,7 +693,7 @@ def test_store_except_info_on_eror(): assert sys.last_traceback -class TestReportContents: +class TestReportContents(object): """ Test user-level API of ``TestReport`` objects. """ diff --git a/testing/test_runner_xunit.py b/testing/test_runner_xunit.py index e1f0924c6..92ba97202 100644 --- a/testing/test_runner_xunit.py +++ b/testing/test_runner_xunit.py @@ -1,6 +1,8 @@ -# -# test correct setup/teardowns at -# module, class, and instance level +""" + test correct setup/teardowns at + module, class, and instance level +""" +from __future__ import absolute_import, division, print_function import pytest @@ -24,7 +26,7 @@ def test_module_and_function_setup(testdir): assert modlevel[0] == 42 assert test_modlevel.answer == 17 - class TestFromClass: + class TestFromClass(object): def test_module(self): assert modlevel[0] == 42 assert not hasattr(test_modlevel, 'answer') @@ -69,7 +71,7 @@ def test_setup_function_failure_no_teardown(testdir): def test_class_setup(testdir): reprec = testdir.inline_runsource(""" - class TestSimpleClassSetup: + class TestSimpleClassSetup(object): clslevel = [] def setup_class(cls): cls.clslevel.append(23) @@ -92,7 +94,7 @@ def test_class_setup(testdir): def test_class_setup_failure_no_teardown(testdir): reprec = testdir.inline_runsource(""" - class TestSimpleClassSetup: + class TestSimpleClassSetup(object): clslevel = [] def setup_class(cls): 0/0 @@ -110,7 +112,7 @@ def test_class_setup_failure_no_teardown(testdir): def test_method_setup(testdir): reprec = testdir.inline_runsource(""" - class TestSetupMethod: + class TestSetupMethod(object): def setup_method(self, meth): self.methsetup = meth def teardown_method(self, meth): @@ -126,7 +128,7 @@ def test_method_setup(testdir): def test_method_setup_failure_no_teardown(testdir): reprec = testdir.inline_runsource(""" - class TestMethodSetup: + class TestMethodSetup(object): clslevel = [] def setup_method(self, method): self.clslevel.append(1) @@ -145,7 +147,7 @@ def test_method_setup_failure_no_teardown(testdir): def test_method_generator_setup(testdir): reprec = testdir.inline_runsource(""" - class TestSetupTeardownOnInstance: + class TestSetupTeardownOnInstance(object): def setup_class(cls): cls.classsetup = True @@ -195,7 +197,7 @@ def test_func_generator_setup(testdir): def test_method_setup_uses_fresh_instances(testdir): reprec = testdir.inline_runsource(""" - class TestSelfState1: + class TestSelfState1(object): memory = [] def test_hello(self): self.memory.append(self) @@ -276,7 +278,7 @@ def test_setup_teardown_function_level_with_optional_argument(testdir, monkeypat def test_function_1(): pass def test_function_2(): pass - class Test: + class Test(object): def setup_method(self, {arg}): trace('setup_method') def teardown_method(self, {arg}): trace('teardown_method') diff --git a/testing/test_session.py b/testing/test_session.py index f494dbc11..d08f7b3e2 100644 --- a/testing/test_session.py +++ b/testing/test_session.py @@ -1,8 +1,9 @@ +from __future__ import absolute_import, division, print_function import pytest from _pytest.main import EXIT_NOTESTSCOLLECTED -class SessionTests: +class SessionTests(object): def test_basic_testitem_events(self, testdir): tfile = testdir.makepyfile(""" def test_one(): @@ -11,7 +12,7 @@ class SessionTests: assert 0 def test_other(): raise ValueError(23) - class TestClass: + class TestClass(object): def test_two(self, someargs): pass """) @@ -97,12 +98,12 @@ class SessionTests: def test_broken_repr(self, testdir): p = testdir.makepyfile(""" import pytest - class BrokenRepr1: + class BrokenRepr1(object): foo=0 def __repr__(self): raise Exception("Ha Ha fooled you, I'm a broken repr().") - class TestBrokenClass: + class TestBrokenClass(object): def test_explicit_bad_repr(self): t = BrokenRepr1() pytest.raises(Exception, 'repr(t)') @@ -145,7 +146,7 @@ class TestNewSession(SessionTests): l.append(2) def test_3(): assert l == [1,2] - class Testmygroup: + class Testmygroup(object): reslist = l def test_1(self): self.reslist.append(1) @@ -167,7 +168,7 @@ class TestNewSession(SessionTests): def test_one(): raise ValueError() - class TestX: + class TestX(object): def test_method_one(self): pass diff --git a/testing/test_skipping.py b/testing/test_skipping.py index ac4412fcb..5f25c3e6e 100644 --- a/testing/test_skipping.py +++ b/testing/test_skipping.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import pytest import sys @@ -5,7 +6,7 @@ from _pytest.skipping import MarkEvaluator, folded_skips, pytest_runtest_setup from _pytest.runner import runtestprotocol -class TestEvaluator: +class TestEvaluator(object): def test_no_marker(self, testdir): item = testdir.getitem("def test_func(): pass") evalskipif = MarkEvaluator(item, 'skipif') @@ -114,7 +115,7 @@ class TestEvaluator: def test_skipif_class(self, testdir): item, = testdir.getitems(""" import pytest - class TestClass: + class TestClass(object): pytestmark = pytest.mark.skipif("config._hackxyz") def test_func(self): pass @@ -126,7 +127,7 @@ class TestEvaluator: assert expl == "condition: config._hackxyz" -class TestXFail: +class TestXFail(object): @pytest.mark.parametrize('strict', [True, False]) def test_xfail_simple(self, testdir, strict): @@ -452,7 +453,7 @@ class TestXFail: assert result.ret == (1 if strict else 0) -class TestXFailwithSetupTeardown: +class TestXFailwithSetupTeardown(object): def test_failing_setup_issue9(self, testdir): testdir.makepyfile(""" import pytest @@ -484,7 +485,7 @@ class TestXFailwithSetupTeardown: ]) -class TestSkip: +class TestSkip(object): def test_skip_class(self, testdir): testdir.makepyfile(""" import pytest @@ -581,7 +582,7 @@ class TestSkip: "*1 skipped*", ]) -class TestSkipif: +class TestSkipif(object): def test_skipif_conditional(self, testdir): item = testdir.getitem(""" import pytest @@ -648,7 +649,7 @@ def test_skipif_class(testdir): p = testdir.makepyfile(""" import pytest - class TestClass: + class TestClass(object): pytestmark = pytest.mark.skipif("True") def test_that(self): assert 0 @@ -667,7 +668,7 @@ def test_skip_reasons_folding(): message = "justso" longrepr = (path, lineno, message) - class X: + class X(object): pass ev1 = X() ev1.when = "execute" @@ -694,7 +695,7 @@ def test_skipped_reasons_functional(testdir): doskip() def test_func(): pass - class TestClass: + class TestClass(object): def test_method(self): doskip() """, @@ -892,7 +893,7 @@ def test_imperativeskip_on_xfail_test(testdir): *2 skipped* """) -class TestBooleanCondition: +class TestBooleanCondition(object): def test_skipif(self, testdir): testdir.makepyfile(""" import pytest diff --git a/testing/test_terminal.py b/testing/test_terminal.py index 0f919b5ed..5a90b3dd4 100644 --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -1,6 +1,7 @@ """ terminal reporting of the full testing process. """ +from __future__ import absolute_import, division, print_function import collections import sys @@ -16,7 +17,7 @@ from _pytest.terminal import build_summary_stats_line, _plugin_nameversions DistInfo = collections.namedtuple('DistInfo', ['project_name', 'version']) -class Option: +class Option(object): def __init__(self, verbose=False, fulltrace=False): self.verbose = verbose self.fulltrace = fulltrace @@ -56,7 +57,7 @@ def test_plugin_nameversion(input, expected): assert result == expected -class TestTerminal: +class TestTerminal(object): def test_pass_skip_fail(self, testdir, option): testdir.makepyfile(""" import pytest @@ -127,7 +128,7 @@ class TestTerminal: def test_itemreport_subclasses_show_subclassed_file(self, testdir): testdir.makepyfile(test_p1=""" - class BaseTests: + class BaseTests(object): def test_p1(self): pass class TestClass(BaseTests): @@ -151,7 +152,7 @@ class TestTerminal: def test_itemreport_directclasses_not_shown_as_subclasses(self, testdir): a = testdir.mkpydir("a123") a.join("test_hello123.py").write(_pytest._code.Source(""" - class TestClass: + class TestClass(object): def test_method(self): pass """)) @@ -204,7 +205,7 @@ class TestTerminal: result.stdout.fnmatch_lines(['*KeyboardInterrupt*']) -class TestCollectonly: +class TestCollectonly(object): def test_collectonly_basic(self, testdir): testdir.makepyfile(""" def test_func(): @@ -249,7 +250,7 @@ class TestCollectonly: p = testdir.makepyfile(""" def test_func1(): pass - class TestClass: + class TestClass(object): def test_method(self): pass """) @@ -310,7 +311,7 @@ def test_repr_python_version(monkeypatch): finally: monkeypatch.undo() # do this early as pytest can get confused -class TestFixtureReporting: +class TestFixtureReporting(object): def test_setup_fixture_error(self, testdir): testdir.makepyfile(""" def setup_function(function): @@ -395,7 +396,7 @@ class TestFixtureReporting: "*1 failed*", ]) -class TestTerminalFunctional: +class TestTerminalFunctional(object): def test_deselected(self, testdir): testpath = testdir.makepyfile(""" def test_one(): @@ -431,7 +432,7 @@ class TestTerminalFunctional: p1 = testdir.makepyfile(""" def test_passes(): pass - class TestClass: + class TestClass(object): def test_method(self): pass """) @@ -487,7 +488,7 @@ class TestTerminalFunctional: raise ValueError() def test_pass(): pass - class TestClass: + class TestClass(object): def test_skip(self): pytest.skip("hello") def test_gen(): @@ -612,10 +613,10 @@ def test_color_yes_collection_on_non_atty(testdir, verbose): def test_getreportopt(): - class config: - class option: + class config(object): + class option(object): reportchars = "" - disablepytestwarnings = True + disable_warnings = True config.option.reportchars = "sf" assert getreportopt(config) == "sf" @@ -624,11 +625,11 @@ def test_getreportopt(): assert getreportopt(config) == "sfx" config.option.reportchars = "sfx" - config.option.disablepytestwarnings = False + config.option.disable_warnings = False assert getreportopt(config) == "sfxw" config.option.reportchars = "sfxw" - config.option.disablepytestwarnings = False + config.option.disable_warnings = False assert getreportopt(config) == "sfxw" @@ -683,7 +684,7 @@ def test_traceconfig(testdir, monkeypatch): assert result.ret == EXIT_NOTESTSCOLLECTED -class TestGenericReporting: +class TestGenericReporting(object): """ this test class can be subclassed with a different option provider to run e.g. distributed tests. """ @@ -837,8 +838,8 @@ def test_terminal_summary_warnings_are_displayed(testdir): """) result = testdir.runpytest('-rw') result.stdout.fnmatch_lines([ - '*C1*internal warning', - '*== 1 pytest-warnings in *', + '*internal warning', + '*== 1 warnings in *', ]) @@ -858,9 +859,9 @@ def test_terminal_summary_warnings_are_displayed(testdir): ("yellow", "1 weird", {"weird": (1,)}), ("yellow", "1 passed, 1 weird", {"weird": (1,), "passed": (1,)}), - ("yellow", "1 pytest-warnings", {"warnings": (1,)}), - ("yellow", "1 passed, 1 pytest-warnings", {"warnings": (1,), - "passed": (1,)}), + ("yellow", "1 warnings", {"warnings": (1,)}), + ("yellow", "1 passed, 1 warnings", {"warnings": (1,), + "passed": (1,)}), ("green", "5 passed", {"passed": (1,2,3,4,5)}), diff --git a/testing/test_tmpdir.py b/testing/test_tmpdir.py index 232acb6d2..ccd70ed8b 100644 --- a/testing/test_tmpdir.py +++ b/testing/test_tmpdir.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import sys import py import pytest @@ -34,7 +35,7 @@ def test_ensuretemp(recwarn): assert d1 == d2 assert d1.check(dir=1) -class TestTempdirHandler: +class TestTempdirHandler(object): def test_mktemp(self, testdir): from _pytest.tmpdir import TempdirFactory config = testdir.parseconfig() @@ -48,7 +49,7 @@ class TestTempdirHandler: assert tmp2.relto(t.getbasetemp()).startswith("this") assert tmp2 != tmp -class TestConfigTmpdir: +class TestConfigTmpdir(object): def test_getbasetemp_custom_removes_old(self, testdir): mytemp = testdir.tmpdir.join("xyz") p = testdir.makepyfile(""" diff --git a/testing/test_unittest.py b/testing/test_unittest.py index 9625ae0f8..af9851997 100644 --- a/testing/test_unittest.py +++ b/testing/test_unittest.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function from _pytest.main import EXIT_NOTESTSCOLLECTED import pytest import gc @@ -385,7 +386,7 @@ def test_trial_testfunction_todo_property(testdir): reprec.assertoutcome(skipped=1) -class TestTrialUnittest: +class TestTrialUnittest(object): def setup_class(cls): cls.ut = pytest.importorskip("twisted.trial.unittest") @@ -704,7 +705,7 @@ def test_unittest_setup_interaction(testdir, fix_type, stmt): def test_non_unittest_no_setupclass_support(testdir): testpath = testdir.makepyfile(""" - class TestFoo: + class TestFoo(object): x = 0 @classmethod diff --git a/testing/test_warnings.py b/testing/test_warnings.py new file mode 100644 index 000000000..e0baed8d1 --- /dev/null +++ b/testing/test_warnings.py @@ -0,0 +1,108 @@ +import pytest + + +WARNINGS_SUMMARY_HEADER = 'warnings summary' + +@pytest.fixture +def pyfile_with_warnings(testdir, request): + """ + Create a test file which calls a function in a module which generates warnings. + """ + testdir.syspathinsert() + test_name = request.function.__name__ + module_name = test_name.lstrip('test_') + '_module' + testdir.makepyfile(**{ + module_name: ''' + import warnings + def foo(): + warnings.warn(PendingDeprecationWarning("functionality is pending deprecation")) + warnings.warn(DeprecationWarning("functionality is deprecated")) + return 1 + ''', + test_name: ''' + import {module_name} + def test_func(): + assert {module_name}.foo() == 1 + '''.format(module_name=module_name) + }) + + +def test_normal_flow(testdir, pyfile_with_warnings): + """ + Check that the warnings section is displayed, containing test node ids followed by + all warnings generated by that test node. + """ + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + '*== %s ==*' % WARNINGS_SUMMARY_HEADER, + + '*test_normal_flow.py::test_func', + + '*normal_flow_module.py:3: PendingDeprecationWarning: functionality is pending deprecation', + '* warnings.warn(PendingDeprecationWarning("functionality is pending deprecation"))', + + '*normal_flow_module.py:4: DeprecationWarning: functionality is deprecated', + '* warnings.warn(DeprecationWarning("functionality is deprecated"))', + '* 1 passed, 2 warnings*', + ]) + assert result.stdout.str().count('test_normal_flow.py::test_func') == 1 + + +def test_setup_teardown_warnings(testdir, pyfile_with_warnings): + testdir.makepyfile(''' + import warnings + import pytest + + @pytest.fixture + def fix(): + warnings.warn(UserWarning("warning during setup")) + yield + warnings.warn(UserWarning("warning during teardown")) + + def test_func(fix): + pass + ''') + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + '*== %s ==*' % WARNINGS_SUMMARY_HEADER, + + '*test_setup_teardown_warnings.py:6: UserWarning: warning during setup', + '*warnings.warn(UserWarning("warning during setup"))', + + '*test_setup_teardown_warnings.py:8: UserWarning: warning during teardown', + '*warnings.warn(UserWarning("warning during teardown"))', + '* 1 passed, 2 warnings*', + ]) + + +@pytest.mark.parametrize('method', ['cmdline', 'ini']) +def test_as_errors(testdir, pyfile_with_warnings, method): + args = ('-W', 'error') if method == 'cmdline' else () + if method == 'ini': + testdir.makeini(''' + [pytest] + filterwarnings= error + ''') + result = testdir.runpytest(*args) + result.stdout.fnmatch_lines([ + 'E PendingDeprecationWarning: functionality is pending deprecation', + 'as_errors_module.py:3: PendingDeprecationWarning', + '* 1 failed in *', + ]) + + +@pytest.mark.parametrize('method', ['cmdline', 'ini']) +def test_ignore(testdir, pyfile_with_warnings, method): + args = ('-W', 'ignore') if method == 'cmdline' else () + if method == 'ini': + testdir.makeini(''' + [pytest] + filterwarnings= ignore + ''') + + result = testdir.runpytest(*args) + result.stdout.fnmatch_lines([ + '* 1 passed in *', + ]) + assert WARNINGS_SUMMARY_HEADER not in result.stdout.str() + diff --git a/tox.ini b/tox.ini index a47fc5132..a2d4af9ed 100644 --- a/tox.ini +++ b/tox.ini @@ -47,6 +47,9 @@ commands= [testenv:linting] basepython = python2.7 +# needed to keep check-manifest working +setenv = + SETUPTOOLS_SCM_PRETEND_VERSION=2.0.1 deps = flake8 # pygments required by rst-lint @@ -56,7 +59,7 @@ deps = commands = {envpython} scripts/check-manifest.py flake8 pytest.py _pytest testing - rst-lint CHANGELOG.rst HOWTORELEASE.rst README.rst + rst-lint CHANGELOG.rst HOWTORELEASE.rst README.rst --encoding utf-8 [testenv:py27-xdist] deps=pytest-xdist>=1.13 @@ -131,6 +134,7 @@ commands= [testenv:regen] changedir=doc/en +skipsdist=True basepython = python3.5 deps=sphinx PyYAML @@ -178,7 +182,15 @@ python_files=test_*.py *_test.py testing/*/*.py python_classes=Test Acceptance python_functions=test norecursedirs = .tox ja .hg cx_freeze_source - +filterwarnings= error + # produced by path.local + ignore:bad escape.*:DeprecationWarning:re + # produced by path.readlines + ignore:.*U.*mode is deprecated:DeprecationWarning + # produced by pytest-xdist + ignore:.*type argument to addoption.*:DeprecationWarning + # produced by python >=3.5 on execnet (pytest-xdist) + ignore:.*inspect.getargspec.*deprecated, use inspect.signature.*:DeprecationWarning [flake8] ignore =E401,E225,E261,E128,E124,E301,E302,E121,E303,W391,E501,E231,E126,E701,E265,E241,E251,E226,E101,W191,E131,E203,E122,E123,E271,E712,E222,E127,E125,E221,W292,E111,E113,E293,E262,W293,E129,E702,E201,E272,E202,E704,E731,E402