commit
						6a43c8cd94
					
				
							
								
								
									
										2
									
								
								AUTHORS
								
								
								
								
							
							
						
						
									
										2
									
								
								AUTHORS
								
								
								
								
							|  | @ -194,6 +194,7 @@ Paweł Adamczak | |||
| Pedro Algarvio | ||||
| Pieter Mulder | ||||
| Piotr Banaszkiewicz | ||||
| Pulkit Goyal | ||||
| Punyashloka Biswal | ||||
| Quentin Pradet | ||||
| Ralf Schmitt | ||||
|  | @ -211,6 +212,7 @@ Ross Lawley | |||
| Russel Winder | ||||
| Ryan Wooden | ||||
| Samuel Dion-Girardeau | ||||
| Samuel Searles-Bryant | ||||
| Samuele Pedroni | ||||
| Sankt Petersbug | ||||
| Segev Finer | ||||
|  |  | |||
							
								
								
									
										133
									
								
								CHANGELOG.rst
								
								
								
								
							
							
						
						
									
										133
									
								
								CHANGELOG.rst
								
								
								
								
							|  | @ -18,6 +18,139 @@ with advance notice in the **Deprecations** section of releases. | |||
| 
 | ||||
| .. towncrier release notes start | ||||
| 
 | ||||
| pytest 4.5.0 (2019-05-11) | ||||
| ========================= | ||||
| 
 | ||||
| Features | ||||
| -------- | ||||
| 
 | ||||
| - `#4826 <https://github.com/pytest-dev/pytest/issues/4826>`_: A warning is now emitted when unknown marks are used as a decorator. | ||||
|   This is often due to a typo, which can lead to silently broken tests. | ||||
| 
 | ||||
| 
 | ||||
| - `#4907 <https://github.com/pytest-dev/pytest/issues/4907>`_: Show XFail reason as part of JUnitXML message field. | ||||
| 
 | ||||
| 
 | ||||
| - `#5013 <https://github.com/pytest-dev/pytest/issues/5013>`_: Messages from crash reports are displayed within test summaries now, truncated to the terminal width. | ||||
| 
 | ||||
| 
 | ||||
| - `#5023 <https://github.com/pytest-dev/pytest/issues/5023>`_: New flag ``--strict-markers`` that triggers an error when unknown markers (e.g. those not registered using the `markers option`_ in the configuration file) are used in the test suite. | ||||
| 
 | ||||
|   The existing ``--strict`` option has the same behavior currently, but can be augmented in the future for additional checks. | ||||
| 
 | ||||
|   .. _`markers option`: https://docs.pytest.org/en/latest/reference.html#confval-markers | ||||
| 
 | ||||
| 
 | ||||
| - `#5026 <https://github.com/pytest-dev/pytest/issues/5026>`_: Assertion failure messages for sequences and dicts contain the number of different items now. | ||||
| 
 | ||||
| 
 | ||||
| - `#5034 <https://github.com/pytest-dev/pytest/issues/5034>`_: Improve reporting with ``--lf`` and ``--ff`` (run-last-failure). | ||||
| 
 | ||||
| 
 | ||||
| - `#5035 <https://github.com/pytest-dev/pytest/issues/5035>`_: The ``--cache-show`` option/action accepts an optional glob to show only matching cache entries. | ||||
| 
 | ||||
| 
 | ||||
| - `#5059 <https://github.com/pytest-dev/pytest/issues/5059>`_: Standard input (stdin) can be given to pytester's ``Testdir.run()`` and ``Testdir.popen()``. | ||||
| 
 | ||||
| 
 | ||||
| - `#5068 <https://github.com/pytest-dev/pytest/issues/5068>`_: The ``-r`` option learnt about ``A`` to display all reports (including passed ones) in the short test summary. | ||||
| 
 | ||||
| 
 | ||||
| - `#5108 <https://github.com/pytest-dev/pytest/issues/5108>`_: The short test summary is displayed after passes with output (``-rP``). | ||||
| 
 | ||||
| 
 | ||||
| - `#5172 <https://github.com/pytest-dev/pytest/issues/5172>`_: The ``--last-failed`` (``--lf``) option got smarter and will now skip entire files if all tests | ||||
|   of that test file have passed in previous runs, greatly speeding up collection. | ||||
| 
 | ||||
| 
 | ||||
| - `#5177 <https://github.com/pytest-dev/pytest/issues/5177>`_: Introduce new specific warning ``PytestWarning`` subclasses to make it easier to filter warnings based on the class, rather than on the message. The new subclasses are: | ||||
| 
 | ||||
| 
 | ||||
|   * ``PytestAssertRewriteWarning`` | ||||
| 
 | ||||
|   * ``PytestCacheWarning`` | ||||
| 
 | ||||
|   * ``PytestCollectionWarning`` | ||||
| 
 | ||||
|   * ``PytestConfigWarning`` | ||||
| 
 | ||||
|   * ``PytestUnhandledCoroutineWarning`` | ||||
| 
 | ||||
|   * ``PytestUnknownMarkWarning`` | ||||
| 
 | ||||
| 
 | ||||
| - `#5202 <https://github.com/pytest-dev/pytest/issues/5202>`_: New ``record_testsuite_property`` session-scoped fixture allows users to log ``<property>`` tags at the ``testsuite`` | ||||
|   level with the ``junitxml`` plugin. | ||||
| 
 | ||||
|   The generated XML is compatible with the latest xunit standard, contrary to | ||||
|   the properties recorded by ``record_property`` and ``record_xml_attribute``. | ||||
| 
 | ||||
| 
 | ||||
| - `#5214 <https://github.com/pytest-dev/pytest/issues/5214>`_: The default logging format has been changed to improve readability. Here is an | ||||
|   example of a previous logging message:: | ||||
| 
 | ||||
|       test_log_cli_enabled_disabled.py    3 CRITICAL critical message logged by test | ||||
| 
 | ||||
|   This has now become:: | ||||
| 
 | ||||
|       CRITICAL root:test_log_cli_enabled_disabled.py:3 critical message logged by test | ||||
| 
 | ||||
|   The formatting can be changed through the `log_format <https://docs.pytest.org/en/latest/reference.html#confval-log_format>`__ configuration option. | ||||
| 
 | ||||
| 
 | ||||
| - `#5220 <https://github.com/pytest-dev/pytest/issues/5220>`_: ``--fixtures`` now also shows fixture scope for scopes other than ``"function"``. | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| Bug Fixes | ||||
| --------- | ||||
| 
 | ||||
| - `#5113 <https://github.com/pytest-dev/pytest/issues/5113>`_: Deselected items from plugins using ``pytest_collect_modifyitems`` as a hookwrapper are correctly reported now. | ||||
| 
 | ||||
| 
 | ||||
| - `#5144 <https://github.com/pytest-dev/pytest/issues/5144>`_: With usage errors ``exitstatus`` is set to ``EXIT_USAGEERROR`` in the ``pytest_sessionfinish`` hook now as expected. | ||||
| 
 | ||||
| 
 | ||||
| - `#5235 <https://github.com/pytest-dev/pytest/issues/5235>`_: ``outcome.exit`` is not used with ``EOF`` in the pdb wrapper anymore, but only with ``quit``. | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| Improved Documentation | ||||
| ---------------------- | ||||
| 
 | ||||
| - `#4935 <https://github.com/pytest-dev/pytest/issues/4935>`_: Expand docs on registering marks and the effect of ``--strict``. | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| Trivial/Internal Changes | ||||
| ------------------------ | ||||
| 
 | ||||
| - `#4942 <https://github.com/pytest-dev/pytest/issues/4942>`_: ``logging.raiseExceptions`` is not set to ``False`` anymore. | ||||
| 
 | ||||
| 
 | ||||
| - `#5013 <https://github.com/pytest-dev/pytest/issues/5013>`_: pytest now depends on `wcwidth <https://pypi.org/project/wcwidth>`__ to properly track unicode character sizes for more precise terminal output. | ||||
| 
 | ||||
| 
 | ||||
| - `#5059 <https://github.com/pytest-dev/pytest/issues/5059>`_: pytester's ``Testdir.popen()`` uses ``stdout`` and ``stderr`` via keyword arguments with defaults now (``subprocess.PIPE``). | ||||
| 
 | ||||
| 
 | ||||
| - `#5069 <https://github.com/pytest-dev/pytest/issues/5069>`_: The code for the short test summary in the terminal was moved to the terminal plugin. | ||||
| 
 | ||||
| 
 | ||||
| - `#5082 <https://github.com/pytest-dev/pytest/issues/5082>`_: Improved validation of kwargs for various methods in the pytester plugin. | ||||
| 
 | ||||
| 
 | ||||
| - `#5202 <https://github.com/pytest-dev/pytest/issues/5202>`_: ``record_property`` now emits a ``PytestWarning`` when used with ``junit_family=xunit2``: the fixture generates | ||||
|   ``property`` tags as children of ``testcase``, which is not permitted according to the most | ||||
|   `recent schema <https://github.com/jenkinsci/xunit-plugin/blob/master/ | ||||
|   src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd>`__. | ||||
| 
 | ||||
| 
 | ||||
| - `#5239 <https://github.com/pytest-dev/pytest/issues/5239>`_: Pin ``pluggy`` to ``< 1.0`` so we don't update to ``1.0`` automatically when | ||||
|   it gets released: there are planned breaking changes, and we want to ensure | ||||
|   pytest properly supports ``pluggy 1.0``. | ||||
| 
 | ||||
| 
 | ||||
| pytest 4.4.2 (2019-05-08) | ||||
| ========================= | ||||
| 
 | ||||
|  |  | |||
|  | @ -1,3 +0,0 @@ | |||
| Pin ``pluggy`` to ``< 1.0`` so we don't update to ``1.0`` automatically when | ||||
| it gets released: there are planned breaking changes, and we want to ensure | ||||
| pytest properly supports ``pluggy 1.0``. | ||||
|  | @ -43,7 +43,7 @@ clean: | |||
| 
 | ||||
| regen: REGENDOC_FILES:=*.rst */*.rst | ||||
| regen: | ||||
| 	PYTHONDONTWRITEBYTECODE=1 PYTEST_ADDOPTS=-pno:hypothesis COLUMNS=76 regendoc --update ${REGENDOC_FILES} ${REGENDOC_ARGS} | ||||
| 	PYTHONDONTWRITEBYTECODE=1 PYTEST_ADDOPTS="-pno:hypothesis -Wignore::pytest.PytestUnknownMarkWarning" COLUMNS=76 regendoc --update ${REGENDOC_FILES} ${REGENDOC_ARGS} | ||||
| 
 | ||||
| html: | ||||
| 	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html | ||||
|  |  | |||
|  | @ -6,6 +6,7 @@ Release announcements | |||
|    :maxdepth: 2 | ||||
| 
 | ||||
| 
 | ||||
|    release-4.5.0 | ||||
|    release-4.4.2 | ||||
|    release-4.4.1 | ||||
|    release-4.4.0 | ||||
|  |  | |||
|  | @ -0,0 +1,35 @@ | |||
| pytest-4.5.0 | ||||
| ======================================= | ||||
| 
 | ||||
| The pytest team is proud to announce the 4.5.0 release! | ||||
| 
 | ||||
| pytest is a mature Python testing tool with more than a 2000 tests | ||||
| against itself, passing on many different interpreters and platforms. | ||||
| 
 | ||||
| This release contains a number of bugs fixes and improvements, so users are encouraged | ||||
| to take a look at the CHANGELOG: | ||||
| 
 | ||||
|     https://docs.pytest.org/en/latest/changelog.html | ||||
| 
 | ||||
| For complete documentation, please visit: | ||||
| 
 | ||||
|     https://docs.pytest.org/en/latest/ | ||||
| 
 | ||||
| As usual, you can upgrade from pypi via: | ||||
| 
 | ||||
|     pip install -U pytest | ||||
| 
 | ||||
| Thanks to all who contributed to this release, among them: | ||||
| 
 | ||||
| * Anthony Sottile | ||||
| * Bruno Oliveira | ||||
| * Daniel Hahler | ||||
| * Floris Bruynooghe | ||||
| * Pulkit Goyal | ||||
| * Samuel Searles-Bryant | ||||
| * Zac Hatfield-Dodds | ||||
| * Zac-HD | ||||
| 
 | ||||
| 
 | ||||
| Happy testing, | ||||
| The Pytest Development Team | ||||
|  | @ -27,33 +27,39 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a | |||
|         name of your plugin or application to avoid clashes with other cache users. | ||||
| 
 | ||||
|         Values can be any object handled by the json stdlib module. | ||||
| 
 | ||||
|     capsys | ||||
|         Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``. | ||||
| 
 | ||||
|         The captured output is made available via ``capsys.readouterr()`` method | ||||
|         calls, which return a ``(out, err)`` namedtuple. | ||||
|         ``out`` and ``err`` will be ``text`` objects. | ||||
| 
 | ||||
|     capsysbinary | ||||
|         Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``. | ||||
| 
 | ||||
|         The captured output is made available via ``capsysbinary.readouterr()`` | ||||
|         method calls, which return a ``(out, err)`` namedtuple. | ||||
|         ``out`` and ``err`` will be ``bytes`` objects. | ||||
| 
 | ||||
|     capfd | ||||
|         Enable text capturing of writes to file descriptors ``1`` and ``2``. | ||||
| 
 | ||||
|         The captured output is made available via ``capfd.readouterr()`` method | ||||
|         calls, which return a ``(out, err)`` namedtuple. | ||||
|         ``out`` and ``err`` will be ``text`` objects. | ||||
| 
 | ||||
|     capfdbinary | ||||
|         Enable bytes capturing of writes to file descriptors ``1`` and ``2``. | ||||
| 
 | ||||
|         The captured output is made available via ``capfd.readouterr()`` method | ||||
|         calls, which return a ``(out, err)`` namedtuple. | ||||
|         ``out`` and ``err`` will be ``byte`` objects. | ||||
|     doctest_namespace | ||||
| 
 | ||||
|     doctest_namespace [session scope] | ||||
|         Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests. | ||||
|     pytestconfig | ||||
| 
 | ||||
|     pytestconfig [session scope] | ||||
|         Session-scoped fixture that returns the :class:`_pytest.config.Config` object. | ||||
| 
 | ||||
|         Example:: | ||||
|  | @ -61,6 +67,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a | |||
|             def test_foo(pytestconfig): | ||||
|                 if pytestconfig.getoption("verbose") > 0: | ||||
|                     ... | ||||
| 
 | ||||
|     record_property | ||||
|         Add an extra properties the calling test. | ||||
|         User properties become part of the test report and are available to the | ||||
|  | @ -72,10 +79,26 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a | |||
| 
 | ||||
|             def test_function(record_property): | ||||
|                 record_property("example_key", 1) | ||||
| 
 | ||||
|     record_xml_attribute | ||||
|         Add extra xml attributes to the tag for the calling test. | ||||
|         The fixture is callable with ``(name, value)``, with value being | ||||
|         automatically xml-encoded | ||||
| 
 | ||||
|     record_testsuite_property [session scope] | ||||
|         Records a new ``<property>`` tag as child of the root ``<testsuite>``. This is suitable to | ||||
|         writing global information regarding the entire test suite, and is compatible with ``xunit2`` JUnit family. | ||||
| 
 | ||||
|         This is a ``session``-scoped fixture which is called with ``(name, value)``. Example: | ||||
| 
 | ||||
|         .. code-block:: python | ||||
| 
 | ||||
|             def test_foo(record_testsuite_property): | ||||
|                 record_testsuite_property("ARCH", "PPC") | ||||
|                 record_testsuite_property("STORAGE_TYPE", "CEPH") | ||||
| 
 | ||||
|         ``name`` must be a string, ``value`` will be converted to a string and properly xml-escaped. | ||||
| 
 | ||||
|     caplog | ||||
|         Access and control log capturing. | ||||
| 
 | ||||
|  | @ -85,6 +108,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a | |||
|         * caplog.records         -> list of logging.LogRecord instances | ||||
|         * caplog.record_tuples   -> list of (logger_name, level, message) tuples | ||||
|         * caplog.clear()         -> clear captured records and formatted log output string | ||||
| 
 | ||||
|     monkeypatch | ||||
|         The returned ``monkeypatch`` fixture provides these | ||||
|         helper methods to modify objects, dictionaries or os.environ:: | ||||
|  | @ -102,15 +126,19 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a | |||
|         test function or fixture has finished. The ``raising`` | ||||
|         parameter determines if a KeyError or AttributeError | ||||
|         will be raised if the set/deletion operation has no target. | ||||
| 
 | ||||
|     recwarn | ||||
|         Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. | ||||
| 
 | ||||
|         See http://docs.python.org/library/warnings.html for information | ||||
|         on warning categories. | ||||
|     tmpdir_factory | ||||
| 
 | ||||
|     tmpdir_factory [session scope] | ||||
|         Return a :class:`_pytest.tmpdir.TempdirFactory` instance for the test session. | ||||
|     tmp_path_factory | ||||
| 
 | ||||
|     tmp_path_factory [session scope] | ||||
|         Return a :class:`_pytest.tmpdir.TempPathFactory` instance for the test session. | ||||
| 
 | ||||
|     tmpdir | ||||
|         Return a temporary directory path object | ||||
|         which is unique to each test function invocation, | ||||
|  | @ -119,6 +147,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a | |||
|         path object. | ||||
| 
 | ||||
|         .. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html | ||||
| 
 | ||||
|     tmp_path | ||||
|         Return a temporary directory path object | ||||
|         which is unique to each test function invocation, | ||||
|  | @ -130,6 +159,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a | |||
| 
 | ||||
|             in python < 3.6 this is a pathlib2.Path | ||||
| 
 | ||||
| 
 | ||||
|     no tests ran in 0.12 seconds | ||||
| 
 | ||||
| You can also interactively ask for help, e.g. by typing on the Python interactive prompt something like:: | ||||
|  |  | |||
|  | @ -247,7 +247,7 @@ See the :ref:`cache-api` for more details. | |||
| 
 | ||||
| 
 | ||||
| Inspecting Cache content | ||||
| ------------------------------- | ||||
| ------------------------ | ||||
| 
 | ||||
| You can always peek at the content of the cache using the | ||||
| ``--cache-show`` command line option: | ||||
|  | @ -260,7 +260,7 @@ You can always peek at the content of the cache using the | |||
|     cachedir: $PYTHON_PREFIX/.pytest_cache | ||||
|     rootdir: $REGENDOC_TMPDIR | ||||
|     cachedir: $PYTHON_PREFIX/.pytest_cache | ||||
|     ------------------------------- cache values ------------------------------- | ||||
|     --------------------------- cache values for '*' --------------------------- | ||||
|     cache/lastfailed contains: | ||||
|       {'test_50.py::test_num[17]': True, | ||||
|        'test_50.py::test_num[25]': True, | ||||
|  | @ -277,8 +277,25 @@ You can always peek at the content of the cache using the | |||
| 
 | ||||
|     ======================= no tests ran in 0.12 seconds ======================= | ||||
| 
 | ||||
| ``--cache-show`` takes an optional argument to specify a glob pattern for | ||||
| filtering: | ||||
| 
 | ||||
| .. code-block:: pytest | ||||
| 
 | ||||
|     $ pytest --cache-show example/* | ||||
|     =========================== test session starts ============================ | ||||
|     platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y | ||||
|     cachedir: $PYTHON_PREFIX/.pytest_cache | ||||
|     rootdir: $REGENDOC_TMPDIR | ||||
|     cachedir: $PYTHON_PREFIX/.pytest_cache | ||||
|     ----------------------- cache values for 'example/*' ----------------------- | ||||
|     example/value contains: | ||||
|       42 | ||||
| 
 | ||||
|     ======================= no tests ran in 0.12 seconds ======================= | ||||
| 
 | ||||
| Clearing Cache content | ||||
| ------------------------------- | ||||
| ---------------------- | ||||
| 
 | ||||
| You can instruct pytest to clear all cache files and values | ||||
| by adding the ``--cache-clear`` option like this: | ||||
|  |  | |||
|  | @ -259,7 +259,7 @@ For an example on how to add and work with markers from a plugin, see | |||
|     * Asking for existing markers via ``pytest --markers`` gives good output | ||||
| 
 | ||||
|     * Typos in function markers are treated as an error if you use | ||||
|       the ``--strict`` option. | ||||
|       the ``--strict-markers`` option. | ||||
| 
 | ||||
| .. _`scoped-marking`: | ||||
| 
 | ||||
|  | @ -619,9 +619,9 @@ then you will see two tests skipped and two executed tests as expected: | |||
|     collected 4 items | ||||
| 
 | ||||
|     test_plat.py s.s.                                                    [100%] | ||||
| 
 | ||||
|     ========================= short test summary info ========================== | ||||
|     SKIPPED [2] $REGENDOC_TMPDIR/conftest.py:13: cannot run on platform linux | ||||
| 
 | ||||
|     =================== 2 passed, 2 skipped in 0.12 seconds ==================== | ||||
| 
 | ||||
| Note that if you specify a platform via the marker-command line option like this: | ||||
|  |  | |||
|  | @ -492,9 +492,9 @@ If you run this with reporting for skips enabled: | |||
|     collected 2 items | ||||
| 
 | ||||
|     test_module.py .s                                                    [100%] | ||||
| 
 | ||||
|     ========================= short test summary info ========================== | ||||
|     SKIPPED [1] $REGENDOC_TMPDIR/conftest.py:11: could not import 'opt2' | ||||
| 
 | ||||
|     =================== 1 passed, 1 skipped in 0.12 seconds ==================== | ||||
| 
 | ||||
| You'll see that we don't have an ``opt2`` module and thus the second test run | ||||
|  |  | |||
|  | @ -182,9 +182,9 @@ Here is a nice run of several failures and how ``pytest`` presents things: | |||
|     E         Omitting 1 identical items, use -vv to show | ||||
|     E         Differing items: | ||||
|     E         {'b': 1} != {'b': 2} | ||||
|     E         Left contains more items: | ||||
|     E         Left contains 1 more item: | ||||
|     E         {'c': 0} | ||||
|     E         Right contains more items: | ||||
|     E         Right contains 1 more item: | ||||
|     E         {'d': 0}... | ||||
|     E | ||||
|     E         ...Full output truncated (2 lines hidden), use '-vv' to show | ||||
|  | @ -215,7 +215,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: | |||
|         def test_eq_longer_list(self): | ||||
|     >       assert [1, 2] == [1, 2, 3] | ||||
|     E       assert [1, 2] == [1, 2, 3] | ||||
|     E         Right contains more items, first extra item: 3 | ||||
|     E         Right contains one more item: 3 | ||||
|     E         Use -v to get the full diff | ||||
| 
 | ||||
|     failure_demo.py:78: AssertionError | ||||
|  |  | |||
|  | @ -194,9 +194,9 @@ and when running it will see a skipped "slow" test: | |||
|     collected 2 items | ||||
| 
 | ||||
|     test_module.py .s                                                    [100%] | ||||
| 
 | ||||
|     ========================= short test summary info ========================== | ||||
|     SKIPPED [1] test_module.py:8: need --runslow option to run | ||||
| 
 | ||||
|     =================== 1 passed, 1 skipped in 0.12 seconds ==================== | ||||
| 
 | ||||
| Or run it including the ``slow`` marked test: | ||||
|  | @ -606,7 +606,7 @@ We can run this: | |||
|     file $REGENDOC_TMPDIR/b/test_error.py, line 1 | ||||
|       def test_root(db):  # no db here, will error out | ||||
|     E       fixture 'db' not found | ||||
|     >       available fixtures: cache, capfd, capfdbinary, caplog, capsys, capsysbinary, doctest_namespace, monkeypatch, pytestconfig, record_property, record_xml_attribute, recwarn, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory | ||||
|     >       available fixtures: cache, capfd, capfdbinary, caplog, capsys, capsysbinary, doctest_namespace, monkeypatch, pytestconfig, record_property, record_testsuite_property, record_xml_attribute, recwarn, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory | ||||
|     >       use 'pytest --fixtures [testpath]' for help on them. | ||||
| 
 | ||||
|     $REGENDOC_TMPDIR/b/test_error.py:1 | ||||
|  |  | |||
|  | @ -1,9 +1,7 @@ | |||
| 
 | ||||
| .. _mark: | ||||
| 
 | ||||
| Marking test functions with attributes | ||||
| ================================================================= | ||||
| 
 | ||||
| ====================================== | ||||
| 
 | ||||
| By using the ``pytest.mark`` helper you can easily set | ||||
| metadata on your test functions. There are | ||||
|  | @ -26,32 +24,38 @@ which also serve as documentation. | |||
|     :ref:`fixtures <fixtures>`. | ||||
| 
 | ||||
| 
 | ||||
| Raising errors on unknown marks: --strict | ||||
| ----------------------------------------- | ||||
| .. _unknown-marks: | ||||
| 
 | ||||
| When the ``--strict`` command-line flag is passed, any unknown marks applied | ||||
| Raising errors on unknown marks | ||||
| ------------------------------- | ||||
| 
 | ||||
| Unknown marks applied with the ``@pytest.mark.name_of_the_mark`` decorator | ||||
| will always emit a warning, in order to avoid silently doing something | ||||
| surprising due to mis-typed names.  You can disable the warning for custom | ||||
| marks by registering them in ``pytest.ini`` like this: | ||||
| 
 | ||||
| .. code-block:: ini | ||||
| 
 | ||||
|     [pytest] | ||||
|     markers = | ||||
|         slow | ||||
|         serial | ||||
| 
 | ||||
| When the ``--strict-markers`` command-line flag is passed, any unknown marks applied | ||||
| with the ``@pytest.mark.name_of_the_mark`` decorator will trigger an error. | ||||
| Marks defined or added by pytest or by a plugin will not trigger an error. | ||||
| 
 | ||||
| Marks can be registered in ``pytest.ini`` like this: | ||||
| Marks added by pytest or by a plugin instead of the decorator will not trigger | ||||
| this error.  To enforce validation of markers, add ``--strict-markers`` to ``addopts``: | ||||
| 
 | ||||
| .. code-block:: ini | ||||
| 
 | ||||
|     [pytest] | ||||
|     addopts = --strict-markers | ||||
|     markers = | ||||
|         slow | ||||
|         serial | ||||
| 
 | ||||
| This can be used to prevent users mistyping mark names by accident. Test suites that want to enforce this | ||||
| should add ``--strict`` to ``addopts``: | ||||
| 
 | ||||
| .. code-block:: ini | ||||
| 
 | ||||
|     [pytest] | ||||
|     addopts = --strict | ||||
|     markers = | ||||
|         slow | ||||
|         serial | ||||
| Third-party plugins should always :ref:`register their markers <registering-markers>` | ||||
| so that they appear in pytest's help text and do not emit warnings. | ||||
| 
 | ||||
| 
 | ||||
| .. _marker-revamp: | ||||
|  | @ -158,4 +162,4 @@ More details can be found in the `original PR <https://github.com/pytest-dev/pyt | |||
| .. note:: | ||||
| 
 | ||||
|     in a future major relase of pytest we will introduce class based markers, | ||||
|     at which point markers will no longer be limited to instances of :py:class:`Mark` | ||||
|     at which point markers will no longer be limited to instances of :py:class:`Mark`. | ||||
|  |  | |||
|  | @ -424,6 +424,14 @@ record_property | |||
| 
 | ||||
| .. autofunction:: _pytest.junitxml.record_property() | ||||
| 
 | ||||
| 
 | ||||
| record_testsuite_property | ||||
| ~~~~~~~~~~~~~~~~~~~~~~~~~ | ||||
| 
 | ||||
| **Tutorial**: :ref:`record_testsuite_property example`. | ||||
| 
 | ||||
| .. autofunction:: _pytest.junitxml.record_testsuite_property() | ||||
| 
 | ||||
| caplog | ||||
| ~~~~~~ | ||||
| 
 | ||||
|  | @ -1261,15 +1269,17 @@ passed multiple times. The expected format is ``name=value``. For example:: | |||
| 
 | ||||
| .. confval:: markers | ||||
| 
 | ||||
|     When the ``--strict`` command-line argument is used, only known markers - | ||||
|     defined in code by core pytest or some plugin - are allowed. | ||||
|     You can list additional markers in this setting to add them to the whitelist. | ||||
|     When the ``--strict-markers`` or ``--strict`` command-line arguments are used, | ||||
|     only known markers - defined in code by core pytest or some plugin - are allowed. | ||||
| 
 | ||||
|     You can list one marker name per line, indented from the option name. | ||||
|     You can list additional markers in this setting to add them to the whitelist, | ||||
|     in which case you probably want to add ``--strict-markers`` to ``addopts`` | ||||
|     to avoid future regressions: | ||||
| 
 | ||||
|     .. code-block:: ini | ||||
| 
 | ||||
|         [pytest] | ||||
|         addopts = --strict-markers | ||||
|         markers = | ||||
|             slow | ||||
|             serial | ||||
|  |  | |||
|  | @ -352,6 +352,7 @@ Running it with the report-on-xfail option gives this output: | |||
|     collected 7 items | ||||
| 
 | ||||
|     xfail_demo.py xxxxxxx                                                [100%] | ||||
| 
 | ||||
|     ========================= short test summary info ========================== | ||||
|     XFAIL xfail_demo.py::test_hello | ||||
|     XFAIL xfail_demo.py::test_hello2 | ||||
|  | @ -365,7 +366,6 @@ Running it with the report-on-xfail option gives this output: | |||
|     XFAIL xfail_demo.py::test_hello6 | ||||
|       reason: reason | ||||
|     XFAIL xfail_demo.py::test_hello7 | ||||
| 
 | ||||
|     ======================== 7 xfailed in 0.12 seconds ========================= | ||||
| 
 | ||||
| .. _`skip/xfail with parametrize`: | ||||
|  |  | |||
|  | @ -231,11 +231,12 @@ Example: | |||
|     XFAIL test_example.py::test_xfail | ||||
|       reason: xfailing this test | ||||
|     XPASS test_example.py::test_xpass always xfail | ||||
|     ERROR test_example.py::test_error | ||||
|     FAILED test_example.py::test_fail | ||||
|     ERROR test_example.py::test_error - assert 0 | ||||
|     FAILED test_example.py::test_fail - assert 0 | ||||
|     = 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds = | ||||
| 
 | ||||
| The ``-r`` options accepts a number of characters after it, with ``a`` used above meaning "all except passes". | ||||
| The ``-r`` options accepts a number of characters after it, with ``a`` used | ||||
| above meaning "all except passes". | ||||
| 
 | ||||
| Here is the full list of available characters that can be used: | ||||
| 
 | ||||
|  | @ -247,6 +248,7 @@ Here is the full list of available characters that can be used: | |||
|  - ``p`` - passed | ||||
|  - ``P`` - passed with output | ||||
|  - ``a`` - all except ``pP`` | ||||
|  - ``A`` - all | ||||
| 
 | ||||
| More than one character can be used, so for example to only see failed and skipped tests, you can execute: | ||||
| 
 | ||||
|  | @ -279,7 +281,7 @@ More than one character can be used, so for example to only see failed and skipp | |||
| 
 | ||||
|     test_example.py:14: AssertionError | ||||
|     ========================= short test summary info ========================== | ||||
|     FAILED test_example.py::test_fail | ||||
|     FAILED test_example.py::test_fail - assert 0 | ||||
|     SKIPPED [1] $REGENDOC_TMPDIR/test_example.py:23: skipping this test | ||||
|     = 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds = | ||||
| 
 | ||||
|  | @ -314,12 +316,12 @@ captured output: | |||
|     E       assert 0 | ||||
| 
 | ||||
|     test_example.py:14: AssertionError | ||||
|     ========================= short test summary info ========================== | ||||
|     PASSED test_example.py::test_ok | ||||
|     ================================== PASSES ================================== | ||||
|     _________________________________ test_ok __________________________________ | ||||
|     --------------------------- Captured stdout call --------------------------- | ||||
|     ok | ||||
|     ========================= short test summary info ========================== | ||||
|     PASSED test_example.py::test_ok | ||||
|     = 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds = | ||||
| 
 | ||||
| .. _pdb-option: | ||||
|  | @ -456,13 +458,6 @@ instead, configure the ``junit_duration_report`` option like this: | |||
| record_property | ||||
| ^^^^^^^^^^^^^^^ | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
|    Fixture renamed from ``record_xml_property`` to ``record_property`` as user | ||||
|    properties are now available to all reporters. | ||||
|    ``record_xml_property`` is now deprecated. | ||||
| 
 | ||||
| If you want to log additional information for a test, you can use the | ||||
| ``record_property`` fixture: | ||||
| 
 | ||||
|  | @ -520,9 +515,7 @@ Will result in: | |||
| 
 | ||||
| .. warning:: | ||||
| 
 | ||||
|     ``record_property`` is an experimental feature and may change in the future. | ||||
| 
 | ||||
|     Also please note that using this feature will break any schema verification. | ||||
|     Please note that using this feature will break schema verifications for the latest JUnitXML schema. | ||||
|     This might be a problem when used with some CI servers. | ||||
| 
 | ||||
| record_xml_attribute | ||||
|  | @ -585,43 +578,45 @@ Instead, this will add an attribute ``assertions="REQ-1234"`` inside the generat | |||
|             </xs:complexType> | ||||
|         </xs:element> | ||||
| 
 | ||||
| LogXML: add_global_property | ||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | ||||
| .. warning:: | ||||
| 
 | ||||
|     Please note that using this feature will break schema verifications for the latest JUnitXML schema. | ||||
|     This might be a problem when used with some CI servers. | ||||
| 
 | ||||
| .. _record_testsuite_property example: | ||||
| 
 | ||||
| If you want to add a properties node in the testsuite level, which may contains properties that are relevant | ||||
| to all testcases you can use ``LogXML.add_global_properties`` | ||||
| record_testsuite_property | ||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^ | ||||
| 
 | ||||
| .. versionadded:: 4.5 | ||||
| 
 | ||||
| If you want to add a properties node at the test-suite level, which may contains properties | ||||
| that are relevant to all tests, you can use the ``record_testsuite_property`` session-scoped fixture: | ||||
| 
 | ||||
| The ``record_testsuite_property`` session-scoped fixture can be used to add properties relevant | ||||
| to all tests. | ||||
| 
 | ||||
| .. code-block:: python | ||||
| 
 | ||||
|     import pytest | ||||
| 
 | ||||
| 
 | ||||
|     @pytest.fixture(scope="session") | ||||
|     def log_global_env_facts(f): | ||||
| 
 | ||||
|         if pytest.config.pluginmanager.hasplugin("junitxml"): | ||||
|             my_junit = getattr(pytest.config, "_xml", None) | ||||
| 
 | ||||
|         my_junit.add_global_property("ARCH", "PPC") | ||||
|         my_junit.add_global_property("STORAGE_TYPE", "CEPH") | ||||
| 
 | ||||
| 
 | ||||
|     @pytest.mark.usefixtures(log_global_env_facts.__name__) | ||||
|     def start_and_prepare_env(): | ||||
|         pass | ||||
|     @pytest.fixture(scope="session", autouse=True) | ||||
|     def log_global_env_facts(record_testsuite_property): | ||||
|         record_testsuite_property("ARCH", "PPC") | ||||
|         record_testsuite_property("STORAGE_TYPE", "CEPH") | ||||
| 
 | ||||
| 
 | ||||
|     class TestMe(object): | ||||
|         def test_foo(self): | ||||
|             assert True | ||||
| 
 | ||||
| This will add a property node below the testsuite node to the generated xml: | ||||
| The fixture is a callable which receives ``name`` and ``value`` of a ``<property>`` tag | ||||
| added at the test-suite level of the generated xml: | ||||
| 
 | ||||
| .. code-block:: xml | ||||
| 
 | ||||
|     <testsuite errors="0" failures="0" name="pytest" skips="0" tests="1" time="0.006"> | ||||
|     <testsuite errors="0" failures="0" name="pytest" skipped="0" tests="1" time="0.006"> | ||||
|       <properties> | ||||
|         <property name="ARCH" value="PPC"/> | ||||
|         <property name="STORAGE_TYPE" value="CEPH"/> | ||||
|  | @ -629,11 +624,11 @@ This will add a property node below the testsuite node to the generated xml: | |||
|       <testcase classname="test_me.TestMe" file="test_me.py" line="16" name="test_foo" time="0.000243663787842"/> | ||||
|     </testsuite> | ||||
| 
 | ||||
| .. warning:: | ||||
| ``name`` must be a string, ``value`` will be converted to a string and properly xml-escaped. | ||||
| 
 | ||||
| The generated XML is compatible with the latest ``xunit`` standard, contrary to `record_property`_ | ||||
| and `record_xml_attribute`_. | ||||
| 
 | ||||
|     This is an experimental feature, and its interface might be replaced | ||||
|     by something more powerful and general in future versions. The | ||||
|     functionality per-se will be kept. | ||||
| 
 | ||||
| Creating resultlog format files | ||||
| ---------------------------------------------------- | ||||
|  |  | |||
|  | @ -400,7 +400,7 @@ defines an ``__init__`` constructor, as this prevents the class from being insta | |||
| 
 | ||||
|     ============================= warnings summary ============================= | ||||
|     test_pytest_warnings.py:1 | ||||
|       $REGENDOC_TMPDIR/test_pytest_warnings.py:1: PytestWarning: cannot collect test class 'Test' because it has a __init__ constructor | ||||
|       $REGENDOC_TMPDIR/test_pytest_warnings.py:1: PytestCollectionWarning: cannot collect test class 'Test' because it has a __init__ constructor | ||||
|         class Test: | ||||
| 
 | ||||
|     -- Docs: https://docs.pytest.org/en/latest/warnings.html | ||||
|  | @ -415,8 +415,20 @@ The following warning types ares used by pytest and are part of the public API: | |||
| 
 | ||||
| .. autoclass:: pytest.PytestWarning | ||||
| 
 | ||||
| .. autoclass:: pytest.PytestAssertRewriteWarning | ||||
| 
 | ||||
| .. autoclass:: pytest.PytestCacheWarning | ||||
| 
 | ||||
| .. autoclass:: pytest.PytestCollectionWarning | ||||
| 
 | ||||
| .. autoclass:: pytest.PytestConfigWarning | ||||
| 
 | ||||
| .. autoclass:: pytest.PytestDeprecationWarning | ||||
| 
 | ||||
| .. autoclass:: pytest.RemovedInPytest4Warning | ||||
| 
 | ||||
| .. autoclass:: pytest.PytestExperimentalApiWarning | ||||
| 
 | ||||
| .. autoclass:: pytest.PytestUnhandledCoroutineWarning | ||||
| 
 | ||||
| .. autoclass:: pytest.PytestUnknownMarkWarning | ||||
| 
 | ||||
| .. autoclass:: pytest.RemovedInPytest4Warning | ||||
|  |  | |||
|  | @ -223,7 +223,6 @@ import ``helper.py`` normally.  The contents of | |||
|    pytest.register_assert_rewrite("pytest_foo.helper") | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| Requiring/Loading plugins in a test module or conftest file | ||||
| ----------------------------------------------------------- | ||||
| 
 | ||||
|  | @ -286,6 +285,26 @@ the plugin manager like this: | |||
| If you want to look at the names of existing plugins, use | ||||
| the ``--trace-config`` option. | ||||
| 
 | ||||
| 
 | ||||
| .. _registering-markers: | ||||
| 
 | ||||
| Registering custom markers | ||||
| -------------------------- | ||||
| 
 | ||||
| If your plugin uses any markers, you should register them so that they appear in | ||||
| pytest's help text and do not :ref:`cause spurious warnings <unknown-marks>`. | ||||
| For example, the following plugin would register ``cool_marker`` and | ||||
| ``mark_with`` for all users: | ||||
| 
 | ||||
| .. code-block:: python | ||||
| 
 | ||||
|     def pytest_configure(config): | ||||
|         config.addinivalue_line("markers", "cool_marker: this one is for cool tests.") | ||||
|         config.addinivalue_line( | ||||
|             "markers", "mark_with(arg, arg2): this marker takes arguments." | ||||
|         ) | ||||
| 
 | ||||
| 
 | ||||
| Testing plugins | ||||
| --------------- | ||||
| 
 | ||||
|  |  | |||
							
								
								
									
										1
									
								
								setup.py
								
								
								
								
							
							
						
						
									
										1
									
								
								setup.py
								
								
								
								
							|  | @ -14,6 +14,7 @@ INSTALL_REQUIRES = [ | |||
|     'pathlib2>=2.2.0;python_version<"3.6"', | ||||
|     'colorama;sys_platform=="win32"', | ||||
|     "pluggy>=0.9,!=0.10,<1.0", | ||||
|     "wcwidth", | ||||
| ] | ||||
| 
 | ||||
| 
 | ||||
|  |  | |||
|  | @ -268,11 +268,13 @@ class AssertionRewritingHook(object): | |||
|         self._marked_for_rewrite_cache.clear() | ||||
| 
 | ||||
|     def _warn_already_imported(self, name): | ||||
|         from _pytest.warning_types import PytestWarning | ||||
|         from _pytest.warning_types import PytestAssertRewriteWarning | ||||
|         from _pytest.warnings import _issue_warning_captured | ||||
| 
 | ||||
|         _issue_warning_captured( | ||||
|             PytestWarning("Module already imported so cannot be rewritten: %s" % name), | ||||
|             PytestAssertRewriteWarning( | ||||
|                 "Module already imported so cannot be rewritten: %s" % name | ||||
|             ), | ||||
|             self.config.hook, | ||||
|             stacklevel=5, | ||||
|         ) | ||||
|  | @ -744,12 +746,12 @@ class AssertionRewriter(ast.NodeVisitor): | |||
| 
 | ||||
|     def display(self, expr): | ||||
|         """Call saferepr on the expression.""" | ||||
|         return self.helper("saferepr", expr) | ||||
|         return self.helper("_saferepr", expr) | ||||
| 
 | ||||
|     def helper(self, name, *args): | ||||
|         """Call a helper in this module.""" | ||||
|         py_name = ast.Name("@pytest_ar", ast.Load()) | ||||
|         attr = ast.Attribute(py_name, "_" + name, ast.Load()) | ||||
|         attr = ast.Attribute(py_name, name, ast.Load()) | ||||
|         return ast_Call(attr, list(args), []) | ||||
| 
 | ||||
|     def builtin(self, name): | ||||
|  | @ -819,11 +821,13 @@ class AssertionRewriter(ast.NodeVisitor): | |||
| 
 | ||||
|         """ | ||||
|         if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1: | ||||
|             from _pytest.warning_types import PytestWarning | ||||
|             from _pytest.warning_types import PytestAssertRewriteWarning | ||||
|             import warnings | ||||
| 
 | ||||
|             warnings.warn_explicit( | ||||
|                 PytestWarning("assertion is always true, perhaps remove parentheses?"), | ||||
|                 PytestAssertRewriteWarning( | ||||
|                     "assertion is always true, perhaps remove parentheses?" | ||||
|                 ), | ||||
|                 category=None, | ||||
|                 filename=str(self.module_path), | ||||
|                 lineno=assert_.lineno, | ||||
|  | @ -849,14 +853,14 @@ class AssertionRewriter(ast.NodeVisitor): | |||
|         negation = ast.UnaryOp(ast.Not(), top_condition) | ||||
|         self.statements.append(ast.If(negation, body, [])) | ||||
|         if assert_.msg: | ||||
|             assertmsg = self.helper("format_assertmsg", assert_.msg) | ||||
|             assertmsg = self.helper("_format_assertmsg", assert_.msg) | ||||
|             explanation = "\n>assert " + explanation | ||||
|         else: | ||||
|             assertmsg = ast.Str("") | ||||
|             explanation = "assert " + explanation | ||||
|         template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation)) | ||||
|         msg = self.pop_format_context(template) | ||||
|         fmt = self.helper("format_explanation", msg) | ||||
|         fmt = self.helper("_format_explanation", msg) | ||||
|         err_name = ast.Name("AssertionError", ast.Load()) | ||||
|         exc = ast_Call(err_name, [fmt], []) | ||||
|         if sys.version_info[0] >= 3: | ||||
|  | @ -887,10 +891,10 @@ class AssertionRewriter(ast.NodeVisitor): | |||
|         val_is_none = ast.Compare(node, [ast.Is()], [AST_NONE]) | ||||
|         send_warning = ast.parse( | ||||
|             """ | ||||
| from _pytest.warning_types import PytestWarning | ||||
| from _pytest.warning_types import PytestAssertRewriteWarning | ||||
| from warnings import warn_explicit | ||||
| warn_explicit( | ||||
|     PytestWarning('asserting the value None, please use "assert is None"'), | ||||
|     PytestAssertRewriteWarning('asserting the value None, please use "assert is None"'), | ||||
|     category=None, | ||||
|     filename={filename!r}, | ||||
|     lineno={lineno}, | ||||
|  | @ -906,7 +910,7 @@ warn_explicit( | |||
|         # _should_repr_global_name() thinks it's acceptable. | ||||
|         locs = ast_Call(self.builtin("locals"), [], []) | ||||
|         inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) | ||||
|         dorepr = self.helper("should_repr_global_name", name) | ||||
|         dorepr = self.helper("_should_repr_global_name", name) | ||||
|         test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) | ||||
|         expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) | ||||
|         return name, self.explanation_param(expr) | ||||
|  | @ -942,7 +946,7 @@ warn_explicit( | |||
|                 self.statements = body = inner | ||||
|         self.statements = save | ||||
|         self.on_failure = fail_save | ||||
|         expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or)) | ||||
|         expl_template = self.helper("_format_boolop", expl_list, ast.Num(is_or)) | ||||
|         expl = self.pop_format_context(expl_template) | ||||
|         return ast.Name(res_var, ast.Load()), self.explanation_param(expl) | ||||
| 
 | ||||
|  | @ -1067,7 +1071,7 @@ warn_explicit( | |||
|             left_res, left_expl = next_res, next_expl | ||||
|         # Use pytest.assertion.util._reprcompare if that's available. | ||||
|         expl_call = self.helper( | ||||
|             "call_reprcompare", | ||||
|             "_call_reprcompare", | ||||
|             ast.Tuple(syms, ast.Load()), | ||||
|             ast.Tuple(load_names, ast.Load()), | ||||
|             ast.Tuple(expls, ast.Load()), | ||||
|  |  | |||
|  | @ -285,19 +285,29 @@ def _compare_eq_iterable(left, right, verbose=0): | |||
| 
 | ||||
| def _compare_eq_sequence(left, right, verbose=0): | ||||
|     explanation = [] | ||||
|     for i in range(min(len(left), len(right))): | ||||
|     len_left = len(left) | ||||
|     len_right = len(right) | ||||
|     for i in range(min(len_left, len_right)): | ||||
|         if left[i] != right[i]: | ||||
|             explanation += [u"At index %s diff: %r != %r" % (i, left[i], right[i])] | ||||
|             break | ||||
|     if len(left) > len(right): | ||||
|     len_diff = len_left - len_right | ||||
| 
 | ||||
|     if len_diff: | ||||
|         if len_diff > 0: | ||||
|             dir_with_more = "Left" | ||||
|             extra = saferepr(left[len_right]) | ||||
|         else: | ||||
|             len_diff = 0 - len_diff | ||||
|             dir_with_more = "Right" | ||||
|             extra = saferepr(right[len_left]) | ||||
| 
 | ||||
|         if len_diff == 1: | ||||
|             explanation += [u"%s contains one more item: %s" % (dir_with_more, extra)] | ||||
|         else: | ||||
|             explanation += [ | ||||
|             u"Left contains more items, first extra item: %s" | ||||
|             % saferepr(left[len(right)]) | ||||
|         ] | ||||
|     elif len(left) < len(right): | ||||
|         explanation += [ | ||||
|             u"Right contains more items, first extra item: %s" | ||||
|             % saferepr(right[len(left)]) | ||||
|                 u"%s contains %d more items, first extra item: %s" | ||||
|                 % (dir_with_more, len_diff, extra) | ||||
|             ] | ||||
|     return explanation | ||||
| 
 | ||||
|  | @ -319,7 +329,9 @@ def _compare_eq_set(left, right, verbose=0): | |||
| 
 | ||||
| def _compare_eq_dict(left, right, verbose=0): | ||||
|     explanation = [] | ||||
|     common = set(left).intersection(set(right)) | ||||
|     set_left = set(left) | ||||
|     set_right = set(right) | ||||
|     common = set_left.intersection(set_right) | ||||
|     same = {k: left[k] for k in common if left[k] == right[k]} | ||||
|     if same and verbose < 2: | ||||
|         explanation += [u"Omitting %s identical items, use -vv to show" % len(same)] | ||||
|  | @ -331,15 +343,23 @@ def _compare_eq_dict(left, right, verbose=0): | |||
|         explanation += [u"Differing items:"] | ||||
|         for k in diff: | ||||
|             explanation += [saferepr({k: left[k]}) + " != " + saferepr({k: right[k]})] | ||||
|     extra_left = set(left) - set(right) | ||||
|     if extra_left: | ||||
|         explanation.append(u"Left contains more items:") | ||||
|     extra_left = set_left - set_right | ||||
|     len_extra_left = len(extra_left) | ||||
|     if len_extra_left: | ||||
|         explanation.append( | ||||
|             u"Left contains %d more item%s:" | ||||
|             % (len_extra_left, "" if len_extra_left == 1 else "s") | ||||
|         ) | ||||
|         explanation.extend( | ||||
|             pprint.pformat({k: left[k] for k in extra_left}).splitlines() | ||||
|         ) | ||||
|     extra_right = set(right) - set(left) | ||||
|     if extra_right: | ||||
|         explanation.append(u"Right contains more items:") | ||||
|     extra_right = set_right - set_left | ||||
|     len_extra_right = len(extra_right) | ||||
|     if len_extra_right: | ||||
|         explanation.append( | ||||
|             u"Right contains %d more item%s:" | ||||
|             % (len_extra_right, "" if len_extra_right == 1 else "s") | ||||
|         ) | ||||
|         explanation.extend( | ||||
|             pprint.pformat({k: right[k] for k in extra_right}).splitlines() | ||||
|         ) | ||||
|  |  | |||
|  | @ -60,10 +60,10 @@ class Cache(object): | |||
| 
 | ||||
|     def warn(self, fmt, **args): | ||||
|         from _pytest.warnings import _issue_warning_captured | ||||
|         from _pytest.warning_types import PytestWarning | ||||
|         from _pytest.warning_types import PytestCacheWarning | ||||
| 
 | ||||
|         _issue_warning_captured( | ||||
|             PytestWarning(fmt.format(**args) if args else fmt), | ||||
|             PytestCacheWarning(fmt.format(**args) if args else fmt), | ||||
|             self._config.hook, | ||||
|             stacklevel=3, | ||||
|         ) | ||||
|  | @ -157,18 +157,38 @@ class LFPlugin(object): | |||
|         self.active = any(config.getoption(key) for key in active_keys) | ||||
|         self.lastfailed = config.cache.get("cache/lastfailed", {}) | ||||
|         self._previously_failed_count = None | ||||
|         self._no_failures_behavior = self.config.getoption("last_failed_no_failures") | ||||
|         self._report_status = None | ||||
|         self._skipped_files = 0  # count skipped files during collection due to --lf | ||||
| 
 | ||||
|     def last_failed_paths(self): | ||||
|         """Returns a set with all Paths()s of the previously failed nodeids (cached). | ||||
|         """ | ||||
|         result = getattr(self, "_last_failed_paths", None) | ||||
|         if result is None: | ||||
|             rootpath = Path(self.config.rootdir) | ||||
|             result = {rootpath / nodeid.split("::")[0] for nodeid in self.lastfailed} | ||||
|             self._last_failed_paths = result | ||||
|         return result | ||||
| 
 | ||||
|     def pytest_ignore_collect(self, path): | ||||
|         """ | ||||
|         Ignore this file path if we are in --lf mode and it is not in the list of | ||||
|         previously failed files. | ||||
|         """ | ||||
|         if ( | ||||
|             self.active | ||||
|             and self.config.getoption("lf") | ||||
|             and path.isfile() | ||||
|             and self.lastfailed | ||||
|         ): | ||||
|             skip_it = Path(path) not in self.last_failed_paths() | ||||
|             if skip_it: | ||||
|                 self._skipped_files += 1 | ||||
|             return skip_it | ||||
| 
 | ||||
|     def pytest_report_collectionfinish(self): | ||||
|         if self.active and self.config.getoption("verbose") >= 0: | ||||
|             if not self._previously_failed_count: | ||||
|                 return None | ||||
|             noun = "failure" if self._previously_failed_count == 1 else "failures" | ||||
|             suffix = " first" if self.config.getoption("failedfirst") else "" | ||||
|             mode = "rerun previous {count} {noun}{suffix}".format( | ||||
|                 count=self._previously_failed_count, suffix=suffix, noun=noun | ||||
|             ) | ||||
|             return "run-last-failure: %s" % mode | ||||
|             return "run-last-failure: %s" % self._report_status | ||||
| 
 | ||||
|     def pytest_runtest_logreport(self, report): | ||||
|         if (report.when == "call" and report.passed) or report.skipped: | ||||
|  | @ -186,7 +206,9 @@ class LFPlugin(object): | |||
|             self.lastfailed[report.nodeid] = True | ||||
| 
 | ||||
|     def pytest_collection_modifyitems(self, session, config, items): | ||||
|         if self.active: | ||||
|         if not self.active: | ||||
|             return | ||||
| 
 | ||||
|         if self.lastfailed: | ||||
|             previously_failed = [] | ||||
|             previously_passed = [] | ||||
|  | @ -196,18 +218,43 @@ class LFPlugin(object): | |||
|                 else: | ||||
|                     previously_passed.append(item) | ||||
|             self._previously_failed_count = len(previously_failed) | ||||
| 
 | ||||
|             if not previously_failed: | ||||
|                     # running a subset of all tests with recorded failures outside | ||||
|                     # of the set of tests currently executing | ||||
|                     return | ||||
|                 # Running a subset of all tests with recorded failures | ||||
|                 # only outside of it. | ||||
|                 self._report_status = "%d known failures not in selected tests" % ( | ||||
|                     len(self.lastfailed), | ||||
|                 ) | ||||
|             else: | ||||
|                 if self.config.getoption("lf"): | ||||
|                     items[:] = previously_failed | ||||
|                     config.hook.pytest_deselected(items=previously_passed) | ||||
|                 else: | ||||
|                 else:  # --failedfirst | ||||
|                     items[:] = previously_failed + previously_passed | ||||
|             elif self._no_failures_behavior == "none": | ||||
| 
 | ||||
|                 noun = "failure" if self._previously_failed_count == 1 else "failures" | ||||
|                 if self._skipped_files > 0: | ||||
|                     files_noun = "file" if self._skipped_files == 1 else "files" | ||||
|                     skipped_files_msg = " (skipped {files} {files_noun})".format( | ||||
|                         files=self._skipped_files, files_noun=files_noun | ||||
|                     ) | ||||
|                 else: | ||||
|                     skipped_files_msg = "" | ||||
|                 suffix = " first" if self.config.getoption("failedfirst") else "" | ||||
|                 self._report_status = "rerun previous {count} {noun}{suffix}{skipped_files}".format( | ||||
|                     count=self._previously_failed_count, | ||||
|                     suffix=suffix, | ||||
|                     noun=noun, | ||||
|                     skipped_files=skipped_files_msg, | ||||
|                 ) | ||||
|         else: | ||||
|             self._report_status = "no previously failed tests, " | ||||
|             if self.config.getoption("last_failed_no_failures") == "none": | ||||
|                 self._report_status += "deselecting all items." | ||||
|                 config.hook.pytest_deselected(items=items) | ||||
|                 items[:] = [] | ||||
|             else: | ||||
|                 self._report_status += "not deselecting items." | ||||
| 
 | ||||
|     def pytest_sessionfinish(self, session): | ||||
|         config = self.config | ||||
|  | @ -282,9 +329,13 @@ def pytest_addoption(parser): | |||
|     ) | ||||
|     group.addoption( | ||||
|         "--cache-show", | ||||
|         action="store_true", | ||||
|         action="append", | ||||
|         nargs="?", | ||||
|         dest="cacheshow", | ||||
|         help="show cache contents, don't perform collection or tests", | ||||
|         help=( | ||||
|             "show cache contents, don't perform collection or tests. " | ||||
|             "Optional argument: glob (default: '*')." | ||||
|         ), | ||||
|     ) | ||||
|     group.addoption( | ||||
|         "--cache-clear", | ||||
|  | @ -303,8 +354,7 @@ def pytest_addoption(parser): | |||
|         dest="last_failed_no_failures", | ||||
|         choices=("all", "none"), | ||||
|         default="all", | ||||
|         help="change the behavior when no test failed in the last run or no " | ||||
|         "information about the last failures was found in the cache", | ||||
|         help="which tests to run with no previously (known) failures.", | ||||
|     ) | ||||
| 
 | ||||
| 
 | ||||
|  | @ -360,11 +410,16 @@ def cacheshow(config, session): | |||
|     if not config.cache._cachedir.is_dir(): | ||||
|         tw.line("cache is empty") | ||||
|         return 0 | ||||
| 
 | ||||
|     glob = config.option.cacheshow[0] | ||||
|     if glob is None: | ||||
|         glob = "*" | ||||
| 
 | ||||
|     dummy = object() | ||||
|     basedir = config.cache._cachedir | ||||
|     vdir = basedir / "v" | ||||
|     tw.sep("-", "cache values") | ||||
|     for valpath in sorted(x for x in vdir.rglob("*") if x.is_file()): | ||||
|     tw.sep("-", "cache values for %r" % glob) | ||||
|     for valpath in sorted(x for x in vdir.rglob(glob) if x.is_file()): | ||||
|         key = valpath.relative_to(vdir) | ||||
|         val = config.cache.get(key, dummy) | ||||
|         if val is dummy: | ||||
|  | @ -376,8 +431,8 @@ def cacheshow(config, session): | |||
| 
 | ||||
|     ddir = basedir / "d" | ||||
|     if ddir.is_dir(): | ||||
|         contents = sorted(ddir.rglob("*")) | ||||
|         tw.sep("-", "cache directories") | ||||
|         contents = sorted(ddir.rglob(glob)) | ||||
|         tw.sep("-", "cache directories for %r" % glob) | ||||
|         for p in contents: | ||||
|             # if p.check(dir=1): | ||||
|             #    print("%s/" % p.relto(basedir)) | ||||
|  |  | |||
|  | @ -56,13 +56,6 @@ def pytest_load_initial_conftests(early_config, parser, args): | |||
|     # make sure that capturemanager is properly reset at final shutdown | ||||
|     early_config.add_cleanup(capman.stop_global_capturing) | ||||
| 
 | ||||
|     # make sure logging does not raise exceptions at the end | ||||
|     def silence_logging_at_shutdown(): | ||||
|         if "logging" in sys.modules: | ||||
|             sys.modules["logging"].raiseExceptions = False | ||||
| 
 | ||||
|     early_config.add_cleanup(silence_logging_at_shutdown) | ||||
| 
 | ||||
|     # finally trigger conftest loading but while capturing (issue93) | ||||
|     capman.start_global_capturing() | ||||
|     outcome = yield | ||||
|  | @ -463,6 +456,7 @@ CaptureResult = collections.namedtuple("CaptureResult", ["out", "err"]) | |||
| 
 | ||||
| class MultiCapture(object): | ||||
|     out = err = in_ = None | ||||
|     _state = None | ||||
| 
 | ||||
|     def __init__(self, out=True, err=True, in_=True, Capture=None): | ||||
|         if in_: | ||||
|  | @ -473,9 +467,16 @@ class MultiCapture(object): | |||
|             self.err = Capture(2) | ||||
| 
 | ||||
|     def __repr__(self): | ||||
|         return "<MultiCapture out=%r err=%r in_=%r>" % (self.out, self.err, self.in_) | ||||
|         return "<MultiCapture out=%r err=%r in_=%r _state=%r _in_suspended=%r>" % ( | ||||
|             self.out, | ||||
|             self.err, | ||||
|             self.in_, | ||||
|             self._state, | ||||
|             getattr(self, "_in_suspended", "<UNSET>"), | ||||
|         ) | ||||
| 
 | ||||
|     def start_capturing(self): | ||||
|         self._state = "started" | ||||
|         if self.in_: | ||||
|             self.in_.start() | ||||
|         if self.out: | ||||
|  | @ -493,6 +494,7 @@ class MultiCapture(object): | |||
|         return out, err | ||||
| 
 | ||||
|     def suspend_capturing(self, in_=False): | ||||
|         self._state = "suspended" | ||||
|         if self.out: | ||||
|             self.out.suspend() | ||||
|         if self.err: | ||||
|  | @ -502,6 +504,7 @@ class MultiCapture(object): | |||
|             self._in_suspended = True | ||||
| 
 | ||||
|     def resume_capturing(self): | ||||
|         self._state = "resumed" | ||||
|         if self.out: | ||||
|             self.out.resume() | ||||
|         if self.err: | ||||
|  | @ -512,9 +515,9 @@ class MultiCapture(object): | |||
| 
 | ||||
|     def stop_capturing(self): | ||||
|         """ stop capturing and reset capturing streams """ | ||||
|         if hasattr(self, "_reset"): | ||||
|         if self._state == "stopped": | ||||
|             raise ValueError("was already stopped") | ||||
|         self._reset = True | ||||
|         self._state = "stopped" | ||||
|         if self.out: | ||||
|             self.out.done() | ||||
|         if self.err: | ||||
|  | @ -542,6 +545,7 @@ class FDCaptureBinary(object): | |||
|     """ | ||||
| 
 | ||||
|     EMPTY_BUFFER = b"" | ||||
|     _state = None | ||||
| 
 | ||||
|     def __init__(self, targetfd, tmpfile=None): | ||||
|         self.targetfd = targetfd | ||||
|  | @ -568,9 +572,10 @@ class FDCaptureBinary(object): | |||
|             self.tmpfile_fd = tmpfile.fileno() | ||||
| 
 | ||||
|     def __repr__(self): | ||||
|         return "<FDCapture %s oldfd=%s>" % ( | ||||
|         return "<FDCapture %s oldfd=%s _state=%r>" % ( | ||||
|             self.targetfd, | ||||
|             getattr(self, "targetfd_save", None), | ||||
|             self._state, | ||||
|         ) | ||||
| 
 | ||||
|     def start(self): | ||||
|  | @ -581,6 +586,7 @@ class FDCaptureBinary(object): | |||
|             raise ValueError("saved filedescriptor not valid anymore") | ||||
|         os.dup2(self.tmpfile_fd, self.targetfd) | ||||
|         self.syscapture.start() | ||||
|         self._state = "started" | ||||
| 
 | ||||
|     def snap(self): | ||||
|         self.tmpfile.seek(0) | ||||
|  | @ -597,14 +603,17 @@ class FDCaptureBinary(object): | |||
|         os.close(targetfd_save) | ||||
|         self.syscapture.done() | ||||
|         _attempt_to_close_capture_file(self.tmpfile) | ||||
|         self._state = "done" | ||||
| 
 | ||||
|     def suspend(self): | ||||
|         self.syscapture.suspend() | ||||
|         os.dup2(self.targetfd_save, self.targetfd) | ||||
|         self._state = "suspended" | ||||
| 
 | ||||
|     def resume(self): | ||||
|         self.syscapture.resume() | ||||
|         os.dup2(self.tmpfile_fd, self.targetfd) | ||||
|         self._state = "resumed" | ||||
| 
 | ||||
|     def writeorg(self, data): | ||||
|         """ write to original file descriptor. """ | ||||
|  | @ -632,6 +641,7 @@ class FDCapture(FDCaptureBinary): | |||
| class SysCapture(object): | ||||
| 
 | ||||
|     EMPTY_BUFFER = str() | ||||
|     _state = None | ||||
| 
 | ||||
|     def __init__(self, fd, tmpfile=None): | ||||
|         name = patchsysdict[fd] | ||||
|  | @ -644,8 +654,17 @@ class SysCapture(object): | |||
|                 tmpfile = CaptureIO() | ||||
|         self.tmpfile = tmpfile | ||||
| 
 | ||||
|     def __repr__(self): | ||||
|         return "<SysCapture %s _old=%r, tmpfile=%r _state=%r>" % ( | ||||
|             self.name, | ||||
|             self._old, | ||||
|             self.tmpfile, | ||||
|             self._state, | ||||
|         ) | ||||
| 
 | ||||
|     def start(self): | ||||
|         setattr(sys, self.name, self.tmpfile) | ||||
|         self._state = "started" | ||||
| 
 | ||||
|     def snap(self): | ||||
|         res = self.tmpfile.getvalue() | ||||
|  | @ -657,12 +676,15 @@ class SysCapture(object): | |||
|         setattr(sys, self.name, self._old) | ||||
|         del self._old | ||||
|         _attempt_to_close_capture_file(self.tmpfile) | ||||
|         self._state = "done" | ||||
| 
 | ||||
|     def suspend(self): | ||||
|         setattr(sys, self.name, self._old) | ||||
|         self._state = "suspended" | ||||
| 
 | ||||
|     def resume(self): | ||||
|         setattr(sys, self.name, self.tmpfile) | ||||
|         self._state = "resumed" | ||||
| 
 | ||||
|     def writeorg(self, data): | ||||
|         self._old.write(data) | ||||
|  |  | |||
|  | @ -32,7 +32,7 @@ from _pytest.compat import lru_cache | |||
| from _pytest.compat import safe_str | ||||
| from _pytest.outcomes import fail | ||||
| from _pytest.outcomes import Skipped | ||||
| from _pytest.warning_types import PytestWarning | ||||
| from _pytest.warning_types import PytestConfigWarning | ||||
| 
 | ||||
| hookimpl = HookimplMarker("pytest") | ||||
| hookspec = HookspecMarker("pytest") | ||||
|  | @ -112,13 +112,18 @@ def directory_arg(path, optname): | |||
|     return path | ||||
| 
 | ||||
| 
 | ||||
| default_plugins = ( | ||||
| # Plugins that cannot be disabled via "-p no:X" currently. | ||||
| essential_plugins = ( | ||||
|     "mark", | ||||
|     "main", | ||||
|     "terminal", | ||||
|     "runner", | ||||
|     "python", | ||||
|     "fixtures", | ||||
|     "helpconfig",  # Provides -p. | ||||
| ) | ||||
| 
 | ||||
| default_plugins = essential_plugins + ( | ||||
|     "terminal", | ||||
|     "debugging", | ||||
|     "unittest", | ||||
|     "capture", | ||||
|  | @ -127,7 +132,6 @@ default_plugins = ( | |||
|     "monkeypatch", | ||||
|     "recwarn", | ||||
|     "pastebin", | ||||
|     "helpconfig", | ||||
|     "nose", | ||||
|     "assertion", | ||||
|     "junitxml", | ||||
|  | @ -143,7 +147,6 @@ default_plugins = ( | |||
|     "reports", | ||||
| ) | ||||
| 
 | ||||
| 
 | ||||
| builtin_plugins = set(default_plugins) | ||||
| builtin_plugins.add("pytester") | ||||
| 
 | ||||
|  | @ -279,7 +282,6 @@ class PytestPluginManager(PluginManager): | |||
|             known_marks = {m.name for m in getattr(method, "pytestmark", [])} | ||||
| 
 | ||||
|             for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"): | ||||
| 
 | ||||
|                 opts.setdefault(name, hasattr(method, name) or name in known_marks) | ||||
|         return opts | ||||
| 
 | ||||
|  | @ -305,7 +307,7 @@ class PytestPluginManager(PluginManager): | |||
|     def register(self, plugin, name=None): | ||||
|         if name in ["pytest_catchlog", "pytest_capturelog"]: | ||||
|             warnings.warn( | ||||
|                 PytestWarning( | ||||
|                 PytestConfigWarning( | ||||
|                     "{} plugin has been merged into the core, " | ||||
|                     "please remove it from your requirements.".format( | ||||
|                         name.replace("_", "-") | ||||
|  | @ -496,6 +498,9 @@ class PytestPluginManager(PluginManager): | |||
|     def consider_pluginarg(self, arg): | ||||
|         if arg.startswith("no:"): | ||||
|             name = arg[3:] | ||||
|             if name in essential_plugins: | ||||
|                 raise UsageError("plugin %s cannot be disabled" % name) | ||||
| 
 | ||||
|             # PR #4304 : remove stepwise if cacheprovider is blocked | ||||
|             if name == "cacheprovider": | ||||
|                 self.set_blocked("stepwise") | ||||
|  | @ -569,7 +574,7 @@ class PytestPluginManager(PluginManager): | |||
|             from _pytest.warnings import _issue_warning_captured | ||||
| 
 | ||||
|             _issue_warning_captured( | ||||
|                 PytestWarning("skipped plugin %r: %s" % (modname, e.msg)), | ||||
|                 PytestConfigWarning("skipped plugin %r: %s" % (modname, e.msg)), | ||||
|                 self.hook, | ||||
|                 stacklevel=1, | ||||
|             ) | ||||
|  | @ -858,7 +863,7 @@ class Config(object): | |||
|                 from _pytest.warnings import _issue_warning_captured | ||||
| 
 | ||||
|                 _issue_warning_captured( | ||||
|                     PytestWarning( | ||||
|                     PytestConfigWarning( | ||||
|                         "could not load initial conftests: {}".format(e.path) | ||||
|                     ), | ||||
|                     self.hook, | ||||
|  |  | |||
|  | @ -143,18 +143,18 @@ class pytestPDB(object): | |||
|                     else: | ||||
|                         tw.sep(">", "PDB set_trace") | ||||
| 
 | ||||
|             class _PdbWrapper(cls._pdb_cls, object): | ||||
|             class PytestPdbWrapper(cls._pdb_cls, object): | ||||
|                 _pytest_capman = capman | ||||
|                 _continued = False | ||||
| 
 | ||||
|                 def do_debug(self, arg): | ||||
|                     cls._recursive_debug += 1 | ||||
|                     ret = super(_PdbWrapper, self).do_debug(arg) | ||||
|                     ret = super(PytestPdbWrapper, self).do_debug(arg) | ||||
|                     cls._recursive_debug -= 1 | ||||
|                     return ret | ||||
| 
 | ||||
|                 def do_continue(self, arg): | ||||
|                     ret = super(_PdbWrapper, self).do_continue(arg) | ||||
|                     ret = super(PytestPdbWrapper, self).do_continue(arg) | ||||
|                     if cls._recursive_debug == 0: | ||||
|                         tw = _pytest.config.create_terminal_writer(cls._config) | ||||
|                         tw.line() | ||||
|  | @ -181,24 +181,30 @@ class pytestPDB(object): | |||
| 
 | ||||
|                 do_c = do_cont = do_continue | ||||
| 
 | ||||
|                 def set_quit(self): | ||||
|                 def do_quit(self, arg): | ||||
|                     """Raise Exit outcome when quit command is used in pdb. | ||||
| 
 | ||||
|                     This is a bit of a hack - it would be better if BdbQuit | ||||
|                     could be handled, but this would require to wrap the | ||||
|                     whole pytest run, and adjust the report etc. | ||||
|                     """ | ||||
|                     super(_PdbWrapper, self).set_quit() | ||||
|                     ret = super(PytestPdbWrapper, self).do_quit(arg) | ||||
| 
 | ||||
|                     if cls._recursive_debug == 0: | ||||
|                         outcomes.exit("Quitting debugger") | ||||
| 
 | ||||
|                     return ret | ||||
| 
 | ||||
|                 do_q = do_quit | ||||
|                 do_exit = do_quit | ||||
| 
 | ||||
|                 def setup(self, f, tb): | ||||
|                     """Suspend on setup(). | ||||
| 
 | ||||
|                     Needed after do_continue resumed, and entering another | ||||
|                     breakpoint again. | ||||
|                     """ | ||||
|                     ret = super(_PdbWrapper, self).setup(f, tb) | ||||
|                     ret = super(PytestPdbWrapper, self).setup(f, tb) | ||||
|                     if not ret and self._continued: | ||||
|                         # pdb.setup() returns True if the command wants to exit | ||||
|                         # from the interaction: do not suspend capturing then. | ||||
|  | @ -206,7 +212,7 @@ class pytestPDB(object): | |||
|                             self._pytest_capman.suspend_global_capture(in_=True) | ||||
|                     return ret | ||||
| 
 | ||||
|             _pdb = _PdbWrapper(**kwargs) | ||||
|             _pdb = PytestPdbWrapper(**kwargs) | ||||
|             cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config, pdb=_pdb) | ||||
|         else: | ||||
|             _pdb = cls._pdb_cls(**kwargs) | ||||
|  | @ -246,7 +252,7 @@ def _test_pytest_function(pyfuncitem): | |||
|     _pdb = pytestPDB._init_pdb() | ||||
|     testfunction = pyfuncitem.obj | ||||
|     pyfuncitem.obj = _pdb.runcall | ||||
|     if "func" in pyfuncitem._fixtureinfo.argnames:  # noqa | ||||
|     if "func" in pyfuncitem._fixtureinfo.argnames:  # pragma: no branch | ||||
|         raise ValueError("--trace can't be used with a fixture named func!") | ||||
|     pyfuncitem.funcargs["func"] = testfunction | ||||
|     new_list = list(pyfuncitem._fixtureinfo.argnames) | ||||
|  |  | |||
|  | @ -1023,6 +1023,7 @@ def fixture(scope="function", params=None, autouse=False, ids=None, name=None): | |||
|     :arg params: an optional list of parameters which will cause multiple | ||||
|                 invocations of the fixture function and all of the tests | ||||
|                 using it. | ||||
|                 The current parameter is available in ``request.param``. | ||||
| 
 | ||||
|     :arg autouse: if True, the fixture func is activated for all tests that | ||||
|                 can see it.  If False (the default) then an explicit | ||||
|  |  | |||
|  | @ -151,13 +151,14 @@ def showhelp(config): | |||
|     ) | ||||
|     tw.line() | ||||
| 
 | ||||
|     columns = tw.fullwidth  # costly call | ||||
|     for name in config._parser._ininames: | ||||
|         help, type, default = config._parser._inidict[name] | ||||
|         if type is None: | ||||
|             type = "string" | ||||
|         spec = "%s (%s)" % (name, type) | ||||
|         line = "  %-24s %s" % (spec, help) | ||||
|         tw.line(line[: tw.fullwidth]) | ||||
|         tw.line(line[:columns]) | ||||
| 
 | ||||
|     tw.line() | ||||
|     tw.line("environment variables:") | ||||
|  |  | |||
|  | @ -227,7 +227,7 @@ def pytest_collectreport(report): | |||
| 
 | ||||
| 
 | ||||
| def pytest_deselected(items): | ||||
|     """ called for test items deselected by keyword. """ | ||||
|     """ called for test items deselected, e.g. by keyword. """ | ||||
| 
 | ||||
| 
 | ||||
| @hookspec(firstresult=True) | ||||
|  |  | |||
|  | @ -252,7 +252,14 @@ class _NodeReporter(object): | |||
| 
 | ||||
|     def append_skipped(self, report): | ||||
|         if hasattr(report, "wasxfail"): | ||||
|             self._add_simple(Junit.skipped, "expected test failure", report.wasxfail) | ||||
|             xfailreason = report.wasxfail | ||||
|             if xfailreason.startswith("reason: "): | ||||
|                 xfailreason = xfailreason[8:] | ||||
|             self.append( | ||||
|                 Junit.skipped( | ||||
|                     "", type="pytest.xfail", message=bin_xml_escape(xfailreason) | ||||
|                 ) | ||||
|             ) | ||||
|         else: | ||||
|             filename, lineno, skipreason = report.longrepr | ||||
|             if skipreason.startswith("Skipped: "): | ||||
|  | @ -274,6 +281,21 @@ class _NodeReporter(object): | |||
|         self.to_xml = lambda: py.xml.raw(data) | ||||
| 
 | ||||
| 
 | ||||
| def _warn_incompatibility_with_xunit2(request, fixture_name): | ||||
|     """Emits a PytestWarning about the given fixture being incompatible with newer xunit revisions""" | ||||
|     from _pytest.warning_types import PytestWarning | ||||
| 
 | ||||
|     xml = getattr(request.config, "_xml", None) | ||||
|     if xml is not None and xml.family not in ("xunit1", "legacy"): | ||||
|         request.node.warn( | ||||
|             PytestWarning( | ||||
|                 "{fixture_name} is incompatible with junit_family '{family}' (use 'legacy' or 'xunit1')".format( | ||||
|                     fixture_name=fixture_name, family=xml.family | ||||
|                 ) | ||||
|             ) | ||||
|         ) | ||||
| 
 | ||||
| 
 | ||||
| @pytest.fixture | ||||
| def record_property(request): | ||||
|     """Add an extra properties the calling test. | ||||
|  | @ -287,6 +309,7 @@ def record_property(request): | |||
|         def test_function(record_property): | ||||
|             record_property("example_key", 1) | ||||
|     """ | ||||
|     _warn_incompatibility_with_xunit2(request, "record_property") | ||||
| 
 | ||||
|     def append_property(name, value): | ||||
|         request.node.user_properties.append((name, value)) | ||||
|  | @ -300,31 +323,67 @@ def record_xml_attribute(request): | |||
|     The fixture is callable with ``(name, value)``, with value being | ||||
|     automatically xml-encoded | ||||
|     """ | ||||
|     from _pytest.warning_types import PytestWarning | ||||
|     from _pytest.warning_types import PytestExperimentalApiWarning | ||||
| 
 | ||||
|     request.node.warn(PytestWarning("record_xml_attribute is an experimental feature")) | ||||
|     request.node.warn( | ||||
|         PytestExperimentalApiWarning("record_xml_attribute is an experimental feature") | ||||
|     ) | ||||
| 
 | ||||
|     _warn_incompatibility_with_xunit2(request, "record_xml_attribute") | ||||
| 
 | ||||
|     # Declare noop | ||||
|     def add_attr_noop(name, value): | ||||
|         pass | ||||
| 
 | ||||
|     attr_func = add_attr_noop | ||||
|     xml = getattr(request.config, "_xml", None) | ||||
| 
 | ||||
|     if xml is not None and xml.family != "xunit1": | ||||
|         request.node.warn( | ||||
|             PytestWarning( | ||||
|                 "record_xml_attribute is incompatible with junit_family: " | ||||
|                 "%s (use: legacy|xunit1)" % xml.family | ||||
|             ) | ||||
|         ) | ||||
|     elif xml is not None: | ||||
|     xml = getattr(request.config, "_xml", None) | ||||
|     if xml is not None: | ||||
|         node_reporter = xml.node_reporter(request.node.nodeid) | ||||
|         attr_func = node_reporter.add_attribute | ||||
| 
 | ||||
|     return attr_func | ||||
| 
 | ||||
| 
 | ||||
| def _check_record_param_type(param, v): | ||||
|     """Used by record_testsuite_property to check that the given parameter name is of the proper | ||||
|     type""" | ||||
|     __tracebackhide__ = True | ||||
|     if not isinstance(v, six.string_types): | ||||
|         msg = "{param} parameter needs to be a string, but {g} given" | ||||
|         raise TypeError(msg.format(param=param, g=type(v).__name__)) | ||||
| 
 | ||||
| 
 | ||||
| @pytest.fixture(scope="session") | ||||
| def record_testsuite_property(request): | ||||
|     """ | ||||
|     Records a new ``<property>`` tag as child of the root ``<testsuite>``. This is suitable to | ||||
|     writing global information regarding the entire test suite, and is compatible with ``xunit2`` JUnit family. | ||||
| 
 | ||||
|     This is a ``session``-scoped fixture which is called with ``(name, value)``. Example: | ||||
| 
 | ||||
|     .. code-block:: python | ||||
| 
 | ||||
|         def test_foo(record_testsuite_property): | ||||
|             record_testsuite_property("ARCH", "PPC") | ||||
|             record_testsuite_property("STORAGE_TYPE", "CEPH") | ||||
| 
 | ||||
|     ``name`` must be a string, ``value`` will be converted to a string and properly xml-escaped. | ||||
|     """ | ||||
| 
 | ||||
|     __tracebackhide__ = True | ||||
| 
 | ||||
|     def record_func(name, value): | ||||
|         """noop function in case --junitxml was not passed in the command-line""" | ||||
|         __tracebackhide__ = True | ||||
|         _check_record_param_type("name", name) | ||||
| 
 | ||||
|     xml = getattr(request.config, "_xml", None) | ||||
|     if xml is not None: | ||||
|         record_func = xml.add_global_property  # noqa | ||||
|     return record_func | ||||
| 
 | ||||
| 
 | ||||
| def pytest_addoption(parser): | ||||
|     group = parser.getgroup("terminal reporting") | ||||
|     group.addoption( | ||||
|  | @ -424,6 +483,7 @@ class LogXML(object): | |||
|         self.node_reporters = {}  # nodeid -> _NodeReporter | ||||
|         self.node_reporters_ordered = [] | ||||
|         self.global_properties = [] | ||||
| 
 | ||||
|         # List of reports that failed on call but teardown is pending. | ||||
|         self.open_reports = [] | ||||
|         self.cnt_double_fail_tests = 0 | ||||
|  | @ -612,7 +672,9 @@ class LogXML(object): | |||
|         terminalreporter.write_sep("-", "generated xml file: %s" % (self.logfile)) | ||||
| 
 | ||||
|     def add_global_property(self, name, value): | ||||
|         self.global_properties.append((str(name), bin_xml_escape(value))) | ||||
|         __tracebackhide__ = True | ||||
|         _check_record_param_type("name", name) | ||||
|         self.global_properties.append((name, bin_xml_escape(value))) | ||||
| 
 | ||||
|     def _get_global_properties_node(self): | ||||
|         """Return a Junit node containing custom properties, if any. | ||||
|  |  | |||
|  | @ -15,7 +15,7 @@ from _pytest.compat import dummy_context_manager | |||
| from _pytest.config import create_terminal_writer | ||||
| from _pytest.pathlib import Path | ||||
| 
 | ||||
| DEFAULT_LOG_FORMAT = "%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s" | ||||
| DEFAULT_LOG_FORMAT = "%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s" | ||||
| DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S" | ||||
| 
 | ||||
| 
 | ||||
|  |  | |||
|  | @ -47,11 +47,6 @@ def pytest_addoption(parser): | |||
|         type="args", | ||||
|         default=[], | ||||
|     ) | ||||
|     # parser.addini("dirpatterns", | ||||
|     #    "patterns specifying possible locations of test files", | ||||
|     #    type="linelist", default=["**/test_*.txt", | ||||
|     #            "**/test_*.py", "**/*_test.py"] | ||||
|     # ) | ||||
|     group = parser.getgroup("general", "running and selection options") | ||||
|     group._addoption( | ||||
|         "-x", | ||||
|  | @ -71,9 +66,10 @@ def pytest_addoption(parser): | |||
|         help="exit after first num failures or errors.", | ||||
|     ) | ||||
|     group._addoption( | ||||
|         "--strict-markers", | ||||
|         "--strict", | ||||
|         action="store_true", | ||||
|         help="marks not registered in configuration file raise errors.", | ||||
|         help="markers not registered in the `markers` section of the configuration file raise errors.", | ||||
|     ) | ||||
|     group._addoption( | ||||
|         "-c", | ||||
|  | @ -208,16 +204,20 @@ def wrap_session(config, doit): | |||
|             initstate = 2 | ||||
|             session.exitstatus = doit(config, session) or 0 | ||||
|         except UsageError: | ||||
|             session.exitstatus = EXIT_USAGEERROR | ||||
|             raise | ||||
|         except Failed: | ||||
|             session.exitstatus = EXIT_TESTSFAILED | ||||
|         except (KeyboardInterrupt, exit.Exception): | ||||
|             excinfo = _pytest._code.ExceptionInfo.from_current() | ||||
|             exitstatus = EXIT_INTERRUPTED | ||||
|             if initstate <= 2 and isinstance(excinfo.value, exit.Exception): | ||||
|                 sys.stderr.write("{}: {}\n".format(excinfo.typename, excinfo.value.msg)) | ||||
|             if isinstance(excinfo.value, exit.Exception): | ||||
|                 if excinfo.value.returncode is not None: | ||||
|                     exitstatus = excinfo.value.returncode | ||||
|                 if initstate < 2: | ||||
|                     sys.stderr.write( | ||||
|                         "{}: {}\n".format(excinfo.typename, excinfo.value.msg) | ||||
|                     ) | ||||
|             config.hook.pytest_keyboard_interrupt(excinfo=excinfo) | ||||
|             session.exitstatus = exitstatus | ||||
|         except:  # noqa | ||||
|  | @ -431,7 +431,7 @@ class Session(nodes.FSCollector): | |||
|         self.shouldfail = False | ||||
|         self.trace = config.trace.root.get("collection") | ||||
|         self._norecursepatterns = config.getini("norecursedirs") | ||||
|         self.startdir = py.path.local() | ||||
|         self.startdir = config.invocation_dir | ||||
|         self._initialpaths = frozenset() | ||||
|         # Keep track of any collected nodes in here, so we don't duplicate fixtures | ||||
|         self._node_cache = {} | ||||
|  |  | |||
|  | @ -100,6 +100,9 @@ pytest_cmdline_main.tryfirst = True | |||
| 
 | ||||
| def deselect_by_keyword(items, config): | ||||
|     keywordexpr = config.option.keyword.lstrip() | ||||
|     if not keywordexpr: | ||||
|         return | ||||
| 
 | ||||
|     if keywordexpr.startswith("-"): | ||||
|         keywordexpr = "not " + keywordexpr[1:] | ||||
|     selectuntil = False | ||||
|  | @ -147,7 +150,6 @@ def pytest_collection_modifyitems(items, config): | |||
| 
 | ||||
| def pytest_configure(config): | ||||
|     config._old_mark_config = MARK_GEN._config | ||||
|     if config.option.strict: | ||||
|     MARK_GEN._config = config | ||||
| 
 | ||||
|     empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION) | ||||
|  |  | |||
|  | @ -12,6 +12,7 @@ from ..compat import MappingMixin | |||
| from ..compat import NOTSET | ||||
| from _pytest.deprecated import PYTEST_PARAM_UNKNOWN_KWARGS | ||||
| from _pytest.outcomes import fail | ||||
| from _pytest.warning_types import PytestUnknownMarkWarning | ||||
| 
 | ||||
| EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark" | ||||
| 
 | ||||
|  | @ -135,7 +136,7 @@ class ParameterSet(namedtuple("ParameterSet", "values, marks, id")): | |||
|                     ) | ||||
|         else: | ||||
|             # empty parameter set (likely computed at runtime): create a single | ||||
|             # parameter set with NOSET values, with the "empty parameter set" mark applied to it | ||||
|             # parameter set with NOTSET values, with the "empty parameter set" mark applied to it | ||||
|             mark = get_empty_parameterset_mark(config, argnames, func) | ||||
|             parameters.append( | ||||
|                 ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None) | ||||
|  | @ -158,7 +159,7 @@ class Mark(object): | |||
|         :type other: Mark | ||||
|         :rtype: Mark | ||||
| 
 | ||||
|         combines by appending aargs and merging the mappings | ||||
|         combines by appending args and merging the mappings | ||||
|         """ | ||||
|         assert self.name == other.name | ||||
|         return Mark( | ||||
|  | @ -289,28 +290,41 @@ class MarkGenerator(object): | |||
|     on the ``test_function`` object. """ | ||||
| 
 | ||||
|     _config = None | ||||
|     _markers = set() | ||||
| 
 | ||||
|     def __getattr__(self, name): | ||||
|         if name[0] == "_": | ||||
|             raise AttributeError("Marker name must NOT start with underscore") | ||||
|         if self._config is not None: | ||||
|             self._check(name) | ||||
|         return MarkDecorator(Mark(name, (), {})) | ||||
| 
 | ||||
|     def _check(self, name): | ||||
|         try: | ||||
|             if name in self._markers: | ||||
|                 return | ||||
|         except AttributeError: | ||||
|             pass | ||||
|         self._markers = values = set() | ||||
|         for line in self._config.getini("markers"): | ||||
|             marker = line.split(":", 1)[0] | ||||
|             marker = marker.rstrip() | ||||
|             x = marker.split("(", 1)[0] | ||||
|             values.add(x) | ||||
|         if self._config is not None: | ||||
|             # We store a set of markers as a performance optimisation - if a mark | ||||
|             # name is in the set we definitely know it, but a mark may be known and | ||||
|             # not in the set.  We therefore start by updating the set! | ||||
|             if name not in self._markers: | ||||
|             fail("{!r} not a registered marker".format(name), pytrace=False) | ||||
|                 for line in self._config.getini("markers"): | ||||
|                     # example lines: "skipif(condition): skip the given test if..." | ||||
|                     # or "hypothesis: tests which use Hypothesis", so to get the | ||||
|                     # marker name we split on both `:` and `(`. | ||||
|                     marker = line.split(":")[0].split("(")[0].strip() | ||||
|                     self._markers.add(marker) | ||||
| 
 | ||||
|             # If the name is not in the set of known marks after updating, | ||||
|             # then it really is time to issue a warning or an error. | ||||
|             if name not in self._markers: | ||||
|                 if self._config.option.strict_markers: | ||||
|                     fail( | ||||
|                         "{!r} not found in `markers` configuration option".format(name), | ||||
|                         pytrace=False, | ||||
|                     ) | ||||
|                 else: | ||||
|                     warnings.warn( | ||||
|                         "Unknown pytest.mark.%s - is this a typo?  You can register " | ||||
|                         "custom marks to avoid this warning - for details, see " | ||||
|                         "https://docs.pytest.org/en/latest/mark.html" % name, | ||||
|                         PytestUnknownMarkWarning, | ||||
|                     ) | ||||
| 
 | ||||
|         return MarkDecorator(Mark(name, (), {})) | ||||
| 
 | ||||
| 
 | ||||
| MARK_GEN = MarkGenerator() | ||||
|  |  | |||
|  | @ -7,6 +7,7 @@ import sys | |||
| 
 | ||||
| import six | ||||
| 
 | ||||
| import pytest | ||||
| from _pytest import python | ||||
| from _pytest import runner | ||||
| from _pytest import unittest | ||||
|  | @ -26,7 +27,7 @@ def pytest_runtest_makereport(item, call): | |||
|     if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()): | ||||
|         # let's substitute the excinfo with a pytest.skip one | ||||
|         call2 = runner.CallInfo.from_call( | ||||
|             lambda: runner.skip(six.text_type(call.excinfo.value)), call.when | ||||
|             lambda: pytest.skip(six.text_type(call.excinfo.value)), call.when | ||||
|         ) | ||||
|         call.excinfo = call2.excinfo | ||||
| 
 | ||||
|  |  | |||
|  | @ -68,10 +68,19 @@ def pytest_configure(config): | |||
|         if checker.matching_platform(): | ||||
|             config.pluginmanager.register(checker) | ||||
| 
 | ||||
|     config.addinivalue_line( | ||||
|         "markers", | ||||
|         "pytester_example_path(*path_segments): join the given path " | ||||
|         "segments to `pytester_example_dir` for this test.", | ||||
|     ) | ||||
| 
 | ||||
| 
 | ||||
| def raise_on_kwargs(kwargs): | ||||
|     if kwargs: | ||||
|         raise TypeError("Unexpected arguments: {}".format(", ".join(sorted(kwargs)))) | ||||
|     __tracebackhide__ = True | ||||
|     if kwargs:  # pragma: no branch | ||||
|         raise TypeError( | ||||
|             "Unexpected keyword arguments: {}".format(", ".join(sorted(kwargs))) | ||||
|         ) | ||||
| 
 | ||||
| 
 | ||||
| class LsofFdLeakChecker(object): | ||||
|  | @ -303,7 +312,8 @@ class HookRecorder(object): | |||
|                     passed.append(rep) | ||||
|             elif rep.skipped: | ||||
|                 skipped.append(rep) | ||||
|             elif rep.failed: | ||||
|             else: | ||||
|                 assert rep.failed, "Unexpected outcome: {!r}".format(rep) | ||||
|                 failed.append(rep) | ||||
|         return passed, skipped, failed | ||||
| 
 | ||||
|  | @ -476,6 +486,8 @@ class Testdir(object): | |||
| 
 | ||||
|     """ | ||||
| 
 | ||||
|     CLOSE_STDIN = object | ||||
| 
 | ||||
|     class TimeoutExpired(Exception): | ||||
|         pass | ||||
| 
 | ||||
|  | @ -787,12 +799,15 @@ class Testdir(object): | |||
| 
 | ||||
|         :param args: command line arguments to pass to :py:func:`pytest.main` | ||||
| 
 | ||||
|         :param plugin: (keyword-only) extra plugin instances the | ||||
|         :param plugins: (keyword-only) extra plugin instances the | ||||
|            ``pytest.main()`` instance should use | ||||
| 
 | ||||
|         :return: a :py:class:`HookRecorder` instance | ||||
| 
 | ||||
|         """ | ||||
|         plugins = kwargs.pop("plugins", []) | ||||
|         no_reraise_ctrlc = kwargs.pop("no_reraise_ctrlc", None) | ||||
|         raise_on_kwargs(kwargs) | ||||
| 
 | ||||
|         finalizers = [] | ||||
|         try: | ||||
|             # Do not load user config (during runs only). | ||||
|  | @ -832,7 +847,6 @@ class Testdir(object): | |||
|                 def pytest_configure(x, config): | ||||
|                     rec.append(self.make_hook_recorder(config.pluginmanager)) | ||||
| 
 | ||||
|             plugins = kwargs.get("plugins") or [] | ||||
|             plugins.append(Collect()) | ||||
|             ret = pytest.main(list(args), plugins=plugins) | ||||
|             if len(rec) == 1: | ||||
|  | @ -846,7 +860,7 @@ class Testdir(object): | |||
| 
 | ||||
|             # typically we reraise keyboard interrupts from the child run | ||||
|             # because it's our user requesting interruption of the testing | ||||
|             if ret == EXIT_INTERRUPTED and not kwargs.get("no_reraise_ctrlc"): | ||||
|             if ret == EXIT_INTERRUPTED and not no_reraise_ctrlc: | ||||
|                 calls = reprec.getcalls("pytest_keyboard_interrupt") | ||||
|                 if calls and calls[-1].excinfo.type == KeyboardInterrupt: | ||||
|                     raise KeyboardInterrupt() | ||||
|  | @ -858,9 +872,10 @@ class Testdir(object): | |||
|     def runpytest_inprocess(self, *args, **kwargs): | ||||
|         """Return result of running pytest in-process, providing a similar | ||||
|         interface to what self.runpytest() provides. | ||||
| 
 | ||||
|         """ | ||||
|         if kwargs.get("syspathinsert"): | ||||
|         syspathinsert = kwargs.pop("syspathinsert", False) | ||||
| 
 | ||||
|         if syspathinsert: | ||||
|             self.syspathinsert() | ||||
|         now = time.time() | ||||
|         capture = MultiCapture(Capture=SysCapture) | ||||
|  | @ -1018,7 +1033,14 @@ class Testdir(object): | |||
|             if colitem.name == name: | ||||
|                 return colitem | ||||
| 
 | ||||
|     def popen(self, cmdargs, stdout, stderr, **kw): | ||||
|     def popen( | ||||
|         self, | ||||
|         cmdargs, | ||||
|         stdout=subprocess.PIPE, | ||||
|         stderr=subprocess.PIPE, | ||||
|         stdin=CLOSE_STDIN, | ||||
|         **kw | ||||
|     ): | ||||
|         """Invoke subprocess.Popen. | ||||
| 
 | ||||
|         This calls subprocess.Popen making sure the current working directory | ||||
|  | @ -1036,10 +1058,18 @@ class Testdir(object): | |||
|         env["USERPROFILE"] = env["HOME"] | ||||
|         kw["env"] = env | ||||
| 
 | ||||
|         popen = subprocess.Popen( | ||||
|             cmdargs, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw | ||||
|         ) | ||||
|         if stdin is Testdir.CLOSE_STDIN: | ||||
|             kw["stdin"] = subprocess.PIPE | ||||
|         elif isinstance(stdin, bytes): | ||||
|             kw["stdin"] = subprocess.PIPE | ||||
|         else: | ||||
|             kw["stdin"] = stdin | ||||
| 
 | ||||
|         popen = subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw) | ||||
|         if stdin is Testdir.CLOSE_STDIN: | ||||
|             popen.stdin.close() | ||||
|         elif isinstance(stdin, bytes): | ||||
|             popen.stdin.write(stdin) | ||||
| 
 | ||||
|         return popen | ||||
| 
 | ||||
|  | @ -1051,6 +1081,10 @@ class Testdir(object): | |||
|         :param args: the sequence of arguments to pass to `subprocess.Popen()` | ||||
|         :param timeout: the period in seconds after which to timeout and raise | ||||
|             :py:class:`Testdir.TimeoutExpired` | ||||
|         :param stdin: optional standard input.  Bytes are being send, closing | ||||
|             the pipe, otherwise it is passed through to ``popen``. | ||||
|             Defaults to ``CLOSE_STDIN``, which translates to using a pipe | ||||
|             (``subprocess.PIPE``) that gets closed. | ||||
| 
 | ||||
|         Returns a :py:class:`RunResult`. | ||||
| 
 | ||||
|  | @ -1058,6 +1092,7 @@ class Testdir(object): | |||
|         __tracebackhide__ = True | ||||
| 
 | ||||
|         timeout = kwargs.pop("timeout", None) | ||||
|         stdin = kwargs.pop("stdin", Testdir.CLOSE_STDIN) | ||||
|         raise_on_kwargs(kwargs) | ||||
| 
 | ||||
|         cmdargs = [ | ||||
|  | @ -1072,8 +1107,14 @@ class Testdir(object): | |||
|         try: | ||||
|             now = time.time() | ||||
|             popen = self.popen( | ||||
|                 cmdargs, stdout=f1, stderr=f2, close_fds=(sys.platform != "win32") | ||||
|                 cmdargs, | ||||
|                 stdin=stdin, | ||||
|                 stdout=f1, | ||||
|                 stderr=f2, | ||||
|                 close_fds=(sys.platform != "win32"), | ||||
|             ) | ||||
|             if isinstance(stdin, bytes): | ||||
|                 popen.stdin.close() | ||||
| 
 | ||||
|             def handle_timeout(): | ||||
|                 __tracebackhide__ = True | ||||
|  | @ -1159,9 +1200,10 @@ class Testdir(object): | |||
|             :py:class:`Testdir.TimeoutExpired` | ||||
| 
 | ||||
|         Returns a :py:class:`RunResult`. | ||||
| 
 | ||||
|         """ | ||||
|         __tracebackhide__ = True | ||||
|         timeout = kwargs.pop("timeout", None) | ||||
|         raise_on_kwargs(kwargs) | ||||
| 
 | ||||
|         p = py.path.local.make_numbered_dir( | ||||
|             prefix="runpytest-", keep=None, rootdir=self.tmpdir | ||||
|  | @ -1171,7 +1213,7 @@ class Testdir(object): | |||
|         if plugins: | ||||
|             args = ("-p", plugins[0]) + args | ||||
|         args = self._getpytestargs() + args | ||||
|         return self.run(*args, timeout=kwargs.get("timeout")) | ||||
|         return self.run(*args, timeout=timeout) | ||||
| 
 | ||||
|     def spawn_pytest(self, string, expect_timeout=10.0): | ||||
|         """Run pytest using pexpect. | ||||
|  |  | |||
|  | @ -45,7 +45,8 @@ from _pytest.mark.structures import normalize_mark_list | |||
| from _pytest.outcomes import fail | ||||
| from _pytest.outcomes import skip | ||||
| from _pytest.pathlib import parts | ||||
| from _pytest.warning_types import PytestWarning | ||||
| from _pytest.warning_types import PytestCollectionWarning | ||||
| from _pytest.warning_types import PytestUnhandledCoroutineWarning | ||||
| 
 | ||||
| 
 | ||||
| def pyobj_property(name): | ||||
|  | @ -171,7 +172,7 @@ def pytest_pyfunc_call(pyfuncitem): | |||
|         msg += "  - pytest-asyncio\n" | ||||
|         msg += "  - pytest-trio\n" | ||||
|         msg += "  - pytest-tornasync" | ||||
|         warnings.warn(PytestWarning(msg.format(pyfuncitem.nodeid))) | ||||
|         warnings.warn(PytestUnhandledCoroutineWarning(msg.format(pyfuncitem.nodeid))) | ||||
|         skip(msg="coroutine function and no async plugin installed (see warnings)") | ||||
|     funcargs = pyfuncitem.funcargs | ||||
|     testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} | ||||
|  | @ -221,7 +222,7 @@ def pytest_pycollect_makeitem(collector, name, obj): | |||
|         if not (isfunction(obj) or isfunction(get_real_func(obj))): | ||||
|             filename, lineno = getfslineno(obj) | ||||
|             warnings.warn_explicit( | ||||
|                 message=PytestWarning( | ||||
|                 message=PytestCollectionWarning( | ||||
|                     "cannot collect %r because it is not a function." % name | ||||
|                 ), | ||||
|                 category=None, | ||||
|  | @ -233,7 +234,7 @@ def pytest_pycollect_makeitem(collector, name, obj): | |||
|                 res = Function(name, parent=collector) | ||||
|                 reason = deprecated.YIELD_TESTS.format(name=name) | ||||
|                 res.add_marker(MARK_GEN.xfail(run=False, reason=reason)) | ||||
|                 res.warn(PytestWarning(reason)) | ||||
|                 res.warn(PytestCollectionWarning(reason)) | ||||
|             else: | ||||
|                 res = list(collector._genfunctions(name, obj)) | ||||
|             outcome.force_result(res) | ||||
|  | @ -721,7 +722,7 @@ class Class(PyCollector): | |||
|             return [] | ||||
|         if hasinit(self.obj): | ||||
|             self.warn( | ||||
|                 PytestWarning( | ||||
|                 PytestCollectionWarning( | ||||
|                     "cannot collect test class %r because it has a " | ||||
|                     "__init__ constructor" % self.obj.__name__ | ||||
|                 ) | ||||
|  | @ -729,7 +730,7 @@ class Class(PyCollector): | |||
|             return [] | ||||
|         elif hasnew(self.obj): | ||||
|             self.warn( | ||||
|                 PytestWarning( | ||||
|                 PytestCollectionWarning( | ||||
|                     "cannot collect test class %r because it has a " | ||||
|                     "__new__ constructor" % self.obj.__name__ | ||||
|                 ) | ||||
|  | @ -1341,17 +1342,19 @@ def _showfixtures_main(config, session): | |||
|                 currentmodule = module | ||||
|         if verbose <= 0 and argname[0] == "_": | ||||
|             continue | ||||
|         tw.write(argname, green=True) | ||||
|         if fixturedef.scope != "function": | ||||
|             tw.write(" [%s scope]" % fixturedef.scope, cyan=True) | ||||
|         if verbose > 0: | ||||
|             funcargspec = "%s -- %s" % (argname, bestrel) | ||||
|         else: | ||||
|             funcargspec = argname | ||||
|         tw.line(funcargspec, green=True) | ||||
|             tw.write(" -- %s" % bestrel, yellow=True) | ||||
|         tw.write("\n") | ||||
|         loc = getlocation(fixturedef.func, curdir) | ||||
|         doc = fixturedef.func.__doc__ or "" | ||||
|         if doc: | ||||
|             write_docstring(tw, doc) | ||||
|         else: | ||||
|             tw.line("    %s: no docstring available" % (loc,), red=True) | ||||
|         tw.line() | ||||
| 
 | ||||
| 
 | ||||
| def write_docstring(tw, doc, indent="    "): | ||||
|  |  | |||
|  | @ -148,6 +148,12 @@ class BaseReport(object): | |||
|             fspath, lineno, domain = self.location | ||||
|             return domain | ||||
| 
 | ||||
|     def _get_verbose_word(self, config): | ||||
|         _category, _short, verbose = config.hook.pytest_report_teststatus( | ||||
|             report=self, config=config | ||||
|         ) | ||||
|         return verbose | ||||
| 
 | ||||
|     def _to_json(self): | ||||
|         """ | ||||
|         This was originally the serialize_report() function from xdist (ca03269). | ||||
|  | @ -328,7 +334,8 @@ class TestReport(BaseReport): | |||
|         self.__dict__.update(extra) | ||||
| 
 | ||||
|     def __repr__(self): | ||||
|         return "<TestReport %r when=%r outcome=%r>" % ( | ||||
|         return "<%s %r when=%r outcome=%r>" % ( | ||||
|             self.__class__.__name__, | ||||
|             self.nodeid, | ||||
|             self.when, | ||||
|             self.outcome, | ||||
|  |  | |||
|  | @ -16,7 +16,6 @@ from .reports import CollectReport | |||
| from .reports import TestReport | ||||
| from _pytest._code.code import ExceptionInfo | ||||
| from _pytest.outcomes import Exit | ||||
| from _pytest.outcomes import skip | ||||
| from _pytest.outcomes import Skipped | ||||
| from _pytest.outcomes import TEST_OUTCOME | ||||
| 
 | ||||
|  | @ -183,7 +182,7 @@ def call_and_report(item, when, log=True, **kwds): | |||
| def check_interactive_exception(call, report): | ||||
|     return call.excinfo and not ( | ||||
|         hasattr(report, "wasxfail") | ||||
|         or call.excinfo.errisinstance(skip.Exception) | ||||
|         or call.excinfo.errisinstance(Skipped) | ||||
|         or call.excinfo.errisinstance(bdb.BdbQuit) | ||||
|     ) | ||||
| 
 | ||||
|  |  | |||
|  | @ -1,3 +1,4 @@ | |||
| # coding=utf8 | ||||
| """ support for skip/xfail functions and markers. """ | ||||
| from __future__ import absolute_import | ||||
| from __future__ import division | ||||
|  | @ -183,128 +184,3 @@ def pytest_report_teststatus(report): | |||
|             return "xfailed", "x", "XFAIL" | ||||
|         elif report.passed: | ||||
|             return "xpassed", "X", "XPASS" | ||||
| 
 | ||||
| 
 | ||||
| # called by the terminalreporter instance/plugin | ||||
| 
 | ||||
| 
 | ||||
| def pytest_terminal_summary(terminalreporter): | ||||
|     tr = terminalreporter | ||||
|     if not tr.reportchars: | ||||
|         return | ||||
| 
 | ||||
|     lines = [] | ||||
|     for char in tr.reportchars: | ||||
|         action = REPORTCHAR_ACTIONS.get(char, lambda tr, lines: None) | ||||
|         action(terminalreporter, lines) | ||||
| 
 | ||||
|     if lines: | ||||
|         tr._tw.sep("=", "short test summary info") | ||||
|         for line in lines: | ||||
|             tr._tw.line(line) | ||||
| 
 | ||||
| 
 | ||||
| def show_simple(terminalreporter, lines, stat): | ||||
|     failed = terminalreporter.stats.get(stat) | ||||
|     if failed: | ||||
|         config = terminalreporter.config | ||||
|         for rep in failed: | ||||
|             verbose_word = _get_report_str(config, rep) | ||||
|             pos = _get_pos(config, rep) | ||||
|             lines.append("%s %s" % (verbose_word, pos)) | ||||
| 
 | ||||
| 
 | ||||
| def show_xfailed(terminalreporter, lines): | ||||
|     xfailed = terminalreporter.stats.get("xfailed") | ||||
|     if xfailed: | ||||
|         config = terminalreporter.config | ||||
|         for rep in xfailed: | ||||
|             verbose_word = _get_report_str(config, rep) | ||||
|             pos = _get_pos(config, rep) | ||||
|             lines.append("%s %s" % (verbose_word, pos)) | ||||
|             reason = rep.wasxfail | ||||
|             if reason: | ||||
|                 lines.append("  " + str(reason)) | ||||
| 
 | ||||
| 
 | ||||
| def show_xpassed(terminalreporter, lines): | ||||
|     xpassed = terminalreporter.stats.get("xpassed") | ||||
|     if xpassed: | ||||
|         config = terminalreporter.config | ||||
|         for rep in xpassed: | ||||
|             verbose_word = _get_report_str(config, rep) | ||||
|             pos = _get_pos(config, rep) | ||||
|             reason = rep.wasxfail | ||||
|             lines.append("%s %s %s" % (verbose_word, pos, reason)) | ||||
| 
 | ||||
| 
 | ||||
| def folded_skips(skipped): | ||||
|     d = {} | ||||
|     for event in skipped: | ||||
|         key = event.longrepr | ||||
|         assert len(key) == 3, (event, key) | ||||
|         keywords = getattr(event, "keywords", {}) | ||||
|         # folding reports with global pytestmark variable | ||||
|         # this is workaround, because for now we cannot identify the scope of a skip marker | ||||
|         # TODO: revisit after marks scope would be fixed | ||||
|         if ( | ||||
|             event.when == "setup" | ||||
|             and "skip" in keywords | ||||
|             and "pytestmark" not in keywords | ||||
|         ): | ||||
|             key = (key[0], None, key[2]) | ||||
|         d.setdefault(key, []).append(event) | ||||
|     values = [] | ||||
|     for key, events in d.items(): | ||||
|         values.append((len(events),) + key) | ||||
|     return values | ||||
| 
 | ||||
| 
 | ||||
| def show_skipped(terminalreporter, lines): | ||||
|     tr = terminalreporter | ||||
|     skipped = tr.stats.get("skipped", []) | ||||
|     if skipped: | ||||
|         fskips = folded_skips(skipped) | ||||
|         if fskips: | ||||
|             verbose_word = _get_report_str(terminalreporter.config, report=skipped[0]) | ||||
|             for num, fspath, lineno, reason in fskips: | ||||
|                 if reason.startswith("Skipped: "): | ||||
|                     reason = reason[9:] | ||||
|                 if lineno is not None: | ||||
|                     lines.append( | ||||
|                         "%s [%d] %s:%d: %s" | ||||
|                         % (verbose_word, num, fspath, lineno + 1, reason) | ||||
|                     ) | ||||
|                 else: | ||||
|                     lines.append("%s [%d] %s: %s" % (verbose_word, num, fspath, reason)) | ||||
| 
 | ||||
| 
 | ||||
| def shower(stat): | ||||
|     def show_(terminalreporter, lines): | ||||
|         return show_simple(terminalreporter, lines, stat) | ||||
| 
 | ||||
|     return show_ | ||||
| 
 | ||||
| 
 | ||||
| def _get_report_str(config, report): | ||||
|     _category, _short, verbose = config.hook.pytest_report_teststatus( | ||||
|         report=report, config=config | ||||
|     ) | ||||
|     return verbose | ||||
| 
 | ||||
| 
 | ||||
| def _get_pos(config, rep): | ||||
|     nodeid = config.cwd_relative_nodeid(rep.nodeid) | ||||
|     return nodeid | ||||
| 
 | ||||
| 
 | ||||
| REPORTCHAR_ACTIONS = { | ||||
|     "x": show_xfailed, | ||||
|     "X": show_xpassed, | ||||
|     "f": shower("failed"), | ||||
|     "F": shower("failed"), | ||||
|     "s": show_skipped, | ||||
|     "S": show_skipped, | ||||
|     "p": shower("passed"), | ||||
|     "E": shower("error"), | ||||
| } | ||||
|  |  | |||
|  | @ -1,3 +1,4 @@ | |||
| # encoding: utf-8 | ||||
| """ terminal reporting of the full testing process. | ||||
| 
 | ||||
| This is a good source for looking at the various reporting hooks. | ||||
|  | @ -11,6 +12,7 @@ import collections | |||
| import platform | ||||
| import sys | ||||
| import time | ||||
| from functools import partial | ||||
| 
 | ||||
| import attr | ||||
| import pluggy | ||||
|  | @ -81,11 +83,11 @@ def pytest_addoption(parser): | |||
|         dest="reportchars", | ||||
|         default="", | ||||
|         metavar="chars", | ||||
|         help="show extra test summary info as specified by chars (f)ailed, " | ||||
|         "(E)error, (s)skipped, (x)failed, (X)passed, " | ||||
|         "(p)passed, (P)passed with output, (a)all except pP. " | ||||
|         help="show extra test summary info as specified by chars: (f)ailed, " | ||||
|         "(E)rror, (s)kipped, (x)failed, (X)passed, " | ||||
|         "(p)assed, (P)assed with output, (a)ll except passed (p/P), or (A)ll. " | ||||
|         "Warnings are displayed at all times except when " | ||||
|         "--disable-warnings is set", | ||||
|         "--disable-warnings is set.", | ||||
|     ) | ||||
|     group._addoption( | ||||
|         "--disable-warnings", | ||||
|  | @ -140,7 +142,7 @@ def pytest_addoption(parser): | |||
| 
 | ||||
|     parser.addini( | ||||
|         "console_output_style", | ||||
|         help="console output: classic or with additional progress information (classic|progress).", | ||||
|         help='console output: "classic", or with additional progress information ("progress" (percentage) | "count").', | ||||
|         default="progress", | ||||
|     ) | ||||
| 
 | ||||
|  | @ -164,15 +166,18 @@ def getreportopt(config): | |||
|         reportchars += "w" | ||||
|     elif config.option.disable_warnings and "w" in reportchars: | ||||
|         reportchars = reportchars.replace("w", "") | ||||
|     if reportchars: | ||||
|     for char in reportchars: | ||||
|             if char not in reportopts and char != "a": | ||||
|                 reportopts += char | ||||
|             elif char == "a": | ||||
|         if char == "a": | ||||
|             reportopts = "sxXwEf" | ||||
|         elif char == "A": | ||||
|             reportopts = "sxXwEfpP" | ||||
|             break | ||||
|         elif char not in reportopts: | ||||
|             reportopts += char | ||||
|     return reportopts | ||||
| 
 | ||||
| 
 | ||||
| @pytest.hookimpl(trylast=True)  # after _pytest.runner | ||||
| def pytest_report_teststatus(report): | ||||
|     if report.passed: | ||||
|         letter = "." | ||||
|  | @ -230,7 +235,7 @@ class TerminalReporter(object): | |||
|         self._showfspath = None | ||||
| 
 | ||||
|         self.stats = {} | ||||
|         self.startdir = py.path.local() | ||||
|         self.startdir = config.invocation_dir | ||||
|         if file is None: | ||||
|             file = sys.stdout | ||||
|         self._tw = _pytest.config.create_terminal_writer(config, file) | ||||
|  | @ -253,7 +258,10 @@ class TerminalReporter(object): | |||
|         # do not show progress if we are showing fixture setup/teardown | ||||
|         if self.config.getoption("setupshow", False): | ||||
|             return False | ||||
|         return self.config.getini("console_output_style") in ("progress", "count") | ||||
|         cfg = self.config.getini("console_output_style") | ||||
|         if cfg in ("progress", "count"): | ||||
|             return cfg | ||||
|         return False | ||||
| 
 | ||||
|     @property | ||||
|     def verbosity(self): | ||||
|  | @ -437,18 +445,18 @@ class TerminalReporter(object): | |||
|                 self.currentfspath = -2 | ||||
| 
 | ||||
|     def pytest_runtest_logfinish(self, nodeid): | ||||
|         if self.config.getini("console_output_style") == "count": | ||||
|         if self.verbosity <= 0 and self._show_progress_info: | ||||
|             if self._show_progress_info == "count": | ||||
|                 num_tests = self._session.testscollected | ||||
|                 progress_length = len(" [{}/{}]".format(str(num_tests), str(num_tests))) | ||||
|             else: | ||||
|                 progress_length = len(" [100%]") | ||||
| 
 | ||||
|         if self.verbosity <= 0 and self._show_progress_info: | ||||
|             self._progress_nodeids_reported.add(nodeid) | ||||
|             last_item = ( | ||||
|             is_last_item = ( | ||||
|                 len(self._progress_nodeids_reported) == self._session.testscollected | ||||
|             ) | ||||
|             if last_item: | ||||
|             if is_last_item: | ||||
|                 self._write_progress_information_filling_space() | ||||
|             else: | ||||
|                 w = self._width_of_current_line | ||||
|  | @ -459,7 +467,7 @@ class TerminalReporter(object): | |||
| 
 | ||||
|     def _get_progress_information_message(self): | ||||
|         collected = self._session.testscollected | ||||
|         if self.config.getini("console_output_style") == "count": | ||||
|         if self._show_progress_info == "count": | ||||
|             if collected: | ||||
|                 progress = self._progress_nodeids_reported | ||||
|                 counter_format = "{{:{}d}}".format(len(str(collected))) | ||||
|  | @ -545,10 +553,6 @@ class TerminalReporter(object): | |||
|         else: | ||||
|             self.write_line(line) | ||||
| 
 | ||||
|     @pytest.hookimpl(trylast=True) | ||||
|     def pytest_collection_modifyitems(self): | ||||
|         self.report_collect(True) | ||||
| 
 | ||||
|     @pytest.hookimpl(trylast=True) | ||||
|     def pytest_sessionstart(self, session): | ||||
|         self._session = session | ||||
|  | @ -601,6 +605,8 @@ class TerminalReporter(object): | |||
|         return result | ||||
| 
 | ||||
|     def pytest_collection_finish(self, session): | ||||
|         self.report_collect(True) | ||||
| 
 | ||||
|         if self.config.getoption("collectonly"): | ||||
|             self._printcollecteditems(session.items) | ||||
| 
 | ||||
|  | @ -676,8 +682,9 @@ class TerminalReporter(object): | |||
|         self.summary_errors() | ||||
|         self.summary_failures() | ||||
|         self.summary_warnings() | ||||
|         yield | ||||
|         self.summary_passes() | ||||
|         yield | ||||
|         self.short_test_summary() | ||||
|         # Display any extra warnings from teardown here (if any). | ||||
|         self.summary_warnings() | ||||
| 
 | ||||
|  | @ -725,9 +732,9 @@ class TerminalReporter(object): | |||
|         return res + " " | ||||
| 
 | ||||
|     def _getfailureheadline(self, rep): | ||||
|         if rep.head_line: | ||||
|             return rep.head_line | ||||
|         else: | ||||
|         head_line = rep.head_line | ||||
|         if head_line: | ||||
|             return head_line | ||||
|         return "test session"  # XXX? | ||||
| 
 | ||||
|     def _getcrashline(self, rep): | ||||
|  | @ -797,7 +804,7 @@ class TerminalReporter(object): | |||
|                 for rep in reports: | ||||
|                     if rep.sections: | ||||
|                         msg = self._getfailureheadline(rep) | ||||
|                         self.write_sep("_", msg) | ||||
|                         self.write_sep("_", msg, green=True, bold=True) | ||||
|                         self._outrep_summary(rep) | ||||
| 
 | ||||
|     def print_teardown_sections(self, rep): | ||||
|  | @ -819,16 +826,21 @@ class TerminalReporter(object): | |||
|             if not reports: | ||||
|                 return | ||||
|             self.write_sep("=", "FAILURES") | ||||
|             for rep in reports: | ||||
|             if self.config.option.tbstyle == "line": | ||||
|                 for rep in reports: | ||||
|                     line = self._getcrashline(rep) | ||||
|                     self.write_line(line) | ||||
|             else: | ||||
|                 teardown_sections = {} | ||||
|                 for report in self.getreports(""): | ||||
|                     if report.when == "teardown": | ||||
|                         teardown_sections.setdefault(report.nodeid, []).append(report) | ||||
| 
 | ||||
|                 for rep in reports: | ||||
|                     msg = self._getfailureheadline(rep) | ||||
|                     self.write_sep("_", msg, red=True, bold=True) | ||||
|                     self._outrep_summary(rep) | ||||
|                     for report in self.getreports(""): | ||||
|                         if report.nodeid == rep.nodeid and report.when == "teardown": | ||||
|                     for report in teardown_sections.get(rep.nodeid, []): | ||||
|                         self.print_teardown_sections(report) | ||||
| 
 | ||||
|     def summary_errors(self): | ||||
|  | @ -841,10 +853,8 @@ class TerminalReporter(object): | |||
|                 msg = self._getfailureheadline(rep) | ||||
|                 if rep.when == "collect": | ||||
|                     msg = "ERROR collecting " + msg | ||||
|                 elif rep.when == "setup": | ||||
|                     msg = "ERROR at setup of " + msg | ||||
|                 elif rep.when == "teardown": | ||||
|                     msg = "ERROR at teardown of " + msg | ||||
|                 else: | ||||
|                     msg = "ERROR at %s of %s" % (rep.when, msg) | ||||
|                 self.write_sep("_", msg, red=True, bold=True) | ||||
|                 self._outrep_summary(rep) | ||||
| 
 | ||||
|  | @ -872,6 +882,149 @@ class TerminalReporter(object): | |||
|         if self.verbosity == -1: | ||||
|             self.write_line(msg, **markup) | ||||
| 
 | ||||
|     def short_test_summary(self): | ||||
|         if not self.reportchars: | ||||
|             return | ||||
| 
 | ||||
|         def show_simple(stat, lines): | ||||
|             failed = self.stats.get(stat, []) | ||||
|             if not failed: | ||||
|                 return | ||||
|             termwidth = self.writer.fullwidth | ||||
|             config = self.config | ||||
|             for rep in failed: | ||||
|                 line = _get_line_with_reprcrash_message(config, rep, termwidth) | ||||
|                 lines.append(line) | ||||
| 
 | ||||
|         def show_xfailed(lines): | ||||
|             xfailed = self.stats.get("xfailed", []) | ||||
|             for rep in xfailed: | ||||
|                 verbose_word = rep._get_verbose_word(self.config) | ||||
|                 pos = _get_pos(self.config, rep) | ||||
|                 lines.append("%s %s" % (verbose_word, pos)) | ||||
|                 reason = rep.wasxfail | ||||
|                 if reason: | ||||
|                     lines.append("  " + str(reason)) | ||||
| 
 | ||||
|         def show_xpassed(lines): | ||||
|             xpassed = self.stats.get("xpassed", []) | ||||
|             for rep in xpassed: | ||||
|                 verbose_word = rep._get_verbose_word(self.config) | ||||
|                 pos = _get_pos(self.config, rep) | ||||
|                 reason = rep.wasxfail | ||||
|                 lines.append("%s %s %s" % (verbose_word, pos, reason)) | ||||
| 
 | ||||
|         def show_skipped(lines): | ||||
|             skipped = self.stats.get("skipped", []) | ||||
|             fskips = _folded_skips(skipped) if skipped else [] | ||||
|             if not fskips: | ||||
|                 return | ||||
|             verbose_word = skipped[0]._get_verbose_word(self.config) | ||||
|             for num, fspath, lineno, reason in fskips: | ||||
|                 if reason.startswith("Skipped: "): | ||||
|                     reason = reason[9:] | ||||
|                 if lineno is not None: | ||||
|                     lines.append( | ||||
|                         "%s [%d] %s:%d: %s" | ||||
|                         % (verbose_word, num, fspath, lineno + 1, reason) | ||||
|                     ) | ||||
|                 else: | ||||
|                     lines.append("%s [%d] %s: %s" % (verbose_word, num, fspath, reason)) | ||||
| 
 | ||||
|         REPORTCHAR_ACTIONS = { | ||||
|             "x": show_xfailed, | ||||
|             "X": show_xpassed, | ||||
|             "f": partial(show_simple, "failed"), | ||||
|             "F": partial(show_simple, "failed"), | ||||
|             "s": show_skipped, | ||||
|             "S": show_skipped, | ||||
|             "p": partial(show_simple, "passed"), | ||||
|             "E": partial(show_simple, "error"), | ||||
|         } | ||||
| 
 | ||||
|         lines = [] | ||||
|         for char in self.reportchars: | ||||
|             action = REPORTCHAR_ACTIONS.get(char) | ||||
|             if action:  # skipping e.g. "P" (passed with output) here. | ||||
|                 action(lines) | ||||
| 
 | ||||
|         if lines: | ||||
|             self.write_sep("=", "short test summary info") | ||||
|             for line in lines: | ||||
|                 self.write_line(line) | ||||
| 
 | ||||
| 
 | ||||
| def _get_pos(config, rep): | ||||
|     nodeid = config.cwd_relative_nodeid(rep.nodeid) | ||||
|     return nodeid | ||||
| 
 | ||||
| 
 | ||||
| def _get_line_with_reprcrash_message(config, rep, termwidth): | ||||
|     """Get summary line for a report, trying to add reprcrash message.""" | ||||
|     from wcwidth import wcswidth | ||||
| 
 | ||||
|     verbose_word = rep._get_verbose_word(config) | ||||
|     pos = _get_pos(config, rep) | ||||
| 
 | ||||
|     line = "%s %s" % (verbose_word, pos) | ||||
|     len_line = wcswidth(line) | ||||
|     ellipsis, len_ellipsis = "...", 3 | ||||
|     if len_line > termwidth - len_ellipsis: | ||||
|         # No space for an additional message. | ||||
|         return line | ||||
| 
 | ||||
|     try: | ||||
|         msg = rep.longrepr.reprcrash.message | ||||
|     except AttributeError: | ||||
|         pass | ||||
|     else: | ||||
|         # Only use the first line. | ||||
|         i = msg.find("\n") | ||||
|         if i != -1: | ||||
|             msg = msg[:i] | ||||
|         len_msg = wcswidth(msg) | ||||
| 
 | ||||
|         sep, len_sep = " - ", 3 | ||||
|         max_len_msg = termwidth - len_line - len_sep | ||||
|         if max_len_msg >= len_ellipsis: | ||||
|             if len_msg > max_len_msg: | ||||
|                 max_len_msg -= len_ellipsis | ||||
|                 msg = msg[:max_len_msg] | ||||
|                 while wcswidth(msg) > max_len_msg: | ||||
|                     msg = msg[:-1] | ||||
|                 if six.PY2: | ||||
|                     # on python 2 systems with narrow unicode compilation, trying to | ||||
|                     # get a single character out of a multi-byte unicode character such as | ||||
|                     # u'😄' will result in a High Surrogate (U+D83D) character, which is | ||||
|                     # rendered as u'<27>'; in this case we just strip that character out as it | ||||
|                     # serves no purpose being rendered | ||||
|                     msg = msg.rstrip(u"\uD83D") | ||||
|                 msg += ellipsis | ||||
|             line += sep + msg | ||||
|     return line | ||||
| 
 | ||||
| 
 | ||||
| def _folded_skips(skipped): | ||||
|     d = {} | ||||
|     for event in skipped: | ||||
|         key = event.longrepr | ||||
|         assert len(key) == 3, (event, key) | ||||
|         keywords = getattr(event, "keywords", {}) | ||||
|         # folding reports with global pytestmark variable | ||||
|         # this is workaround, because for now we cannot identify the scope of a skip marker | ||||
|         # TODO: revisit after marks scope would be fixed | ||||
|         if ( | ||||
|             event.when == "setup" | ||||
|             and "skip" in keywords | ||||
|             and "pytestmark" not in keywords | ||||
|         ): | ||||
|             key = (key[0], None, key[2]) | ||||
|         d.setdefault(key, []).append(event) | ||||
|     values = [] | ||||
|     for key, events in d.items(): | ||||
|         values.append((len(events),) + key) | ||||
|     return values | ||||
| 
 | ||||
| 
 | ||||
| def build_summary_stats_line(stats): | ||||
|     known_types = ( | ||||
|  |  | |||
|  | @ -9,6 +9,38 @@ class PytestWarning(UserWarning): | |||
|     """ | ||||
| 
 | ||||
| 
 | ||||
| class PytestAssertRewriteWarning(PytestWarning): | ||||
|     """ | ||||
|     Bases: :class:`PytestWarning`. | ||||
| 
 | ||||
|     Warning emitted by the pytest assert rewrite module. | ||||
|     """ | ||||
| 
 | ||||
| 
 | ||||
| class PytestCacheWarning(PytestWarning): | ||||
|     """ | ||||
|     Bases: :class:`PytestWarning`. | ||||
| 
 | ||||
|     Warning emitted by the cache plugin in various situations. | ||||
|     """ | ||||
| 
 | ||||
| 
 | ||||
| class PytestConfigWarning(PytestWarning): | ||||
|     """ | ||||
|     Bases: :class:`PytestWarning`. | ||||
| 
 | ||||
|     Warning emitted for configuration issues. | ||||
|     """ | ||||
| 
 | ||||
| 
 | ||||
| class PytestCollectionWarning(PytestWarning): | ||||
|     """ | ||||
|     Bases: :class:`PytestWarning`. | ||||
| 
 | ||||
|     Warning emitted when pytest is not able to collect a file or symbol in a module. | ||||
|     """ | ||||
| 
 | ||||
| 
 | ||||
| class PytestDeprecationWarning(PytestWarning, DeprecationWarning): | ||||
|     """ | ||||
|     Bases: :class:`pytest.PytestWarning`, :class:`DeprecationWarning`. | ||||
|  | @ -17,14 +49,6 @@ class PytestDeprecationWarning(PytestWarning, DeprecationWarning): | |||
|     """ | ||||
| 
 | ||||
| 
 | ||||
| class RemovedInPytest4Warning(PytestDeprecationWarning): | ||||
|     """ | ||||
|     Bases: :class:`pytest.PytestDeprecationWarning`. | ||||
| 
 | ||||
|     Warning class for features scheduled to be removed in pytest 4.0. | ||||
|     """ | ||||
| 
 | ||||
| 
 | ||||
| class PytestExperimentalApiWarning(PytestWarning, FutureWarning): | ||||
|     """ | ||||
|     Bases: :class:`pytest.PytestWarning`, :class:`FutureWarning`. | ||||
|  | @ -42,6 +66,33 @@ class PytestExperimentalApiWarning(PytestWarning, FutureWarning): | |||
|         ) | ||||
| 
 | ||||
| 
 | ||||
| class PytestUnhandledCoroutineWarning(PytestWarning): | ||||
|     """ | ||||
|     Bases: :class:`PytestWarning`. | ||||
| 
 | ||||
|     Warning emitted when pytest encounters a test function which is a coroutine, | ||||
|     but it was not handled by any async-aware plugin. Coroutine test functions | ||||
|     are not natively supported. | ||||
|     """ | ||||
| 
 | ||||
| 
 | ||||
| class PytestUnknownMarkWarning(PytestWarning): | ||||
|     """ | ||||
|     Bases: :class:`PytestWarning`. | ||||
| 
 | ||||
|     Warning emitted on use of unknown markers. | ||||
|     See https://docs.pytest.org/en/latest/mark.html for details. | ||||
|     """ | ||||
| 
 | ||||
| 
 | ||||
| class RemovedInPytest4Warning(PytestDeprecationWarning): | ||||
|     """ | ||||
|     Bases: :class:`pytest.PytestDeprecationWarning`. | ||||
| 
 | ||||
|     Warning class for features scheduled to be removed in pytest 4.0. | ||||
|     """ | ||||
| 
 | ||||
| 
 | ||||
| @attr.s | ||||
| class UnformattedWarning(object): | ||||
|     """Used to hold warnings that need to format their message at runtime, as opposed to a direct message. | ||||
|  |  | |||
|  | @ -35,8 +35,14 @@ from _pytest.python_api import approx | |||
| from _pytest.python_api import raises | ||||
| from _pytest.recwarn import deprecated_call | ||||
| from _pytest.recwarn import warns | ||||
| from _pytest.warning_types import PytestAssertRewriteWarning | ||||
| from _pytest.warning_types import PytestCacheWarning | ||||
| from _pytest.warning_types import PytestCollectionWarning | ||||
| from _pytest.warning_types import PytestConfigWarning | ||||
| from _pytest.warning_types import PytestDeprecationWarning | ||||
| from _pytest.warning_types import PytestExperimentalApiWarning | ||||
| from _pytest.warning_types import PytestUnhandledCoroutineWarning | ||||
| from _pytest.warning_types import PytestUnknownMarkWarning | ||||
| from _pytest.warning_types import PytestWarning | ||||
| from _pytest.warning_types import RemovedInPytest4Warning | ||||
| 
 | ||||
|  | @ -66,8 +72,14 @@ __all__ = [ | |||
|     "Module", | ||||
|     "Package", | ||||
|     "param", | ||||
|     "PytestAssertRewriteWarning", | ||||
|     "PytestCacheWarning", | ||||
|     "PytestCollectionWarning", | ||||
|     "PytestConfigWarning", | ||||
|     "PytestDeprecationWarning", | ||||
|     "PytestExperimentalApiWarning", | ||||
|     "PytestUnhandledCoroutineWarning", | ||||
|     "PytestUnknownMarkWarning", | ||||
|     "PytestWarning", | ||||
|     "raises", | ||||
|     "register_assert_rewrite", | ||||
|  |  | |||
|  | @ -428,9 +428,20 @@ class TestGeneralUsage(object): | |||
|             assert result.ret == 4  # usage error only if item not found | ||||
| 
 | ||||
|     def test_report_all_failed_collections_initargs(self, testdir): | ||||
|         testdir.makeconftest( | ||||
|             """ | ||||
|             from _pytest.main import EXIT_USAGEERROR | ||||
| 
 | ||||
|             def pytest_sessionfinish(exitstatus): | ||||
|                 assert exitstatus == EXIT_USAGEERROR | ||||
|                 print("pytest_sessionfinish_called") | ||||
|             """ | ||||
|         ) | ||||
|         testdir.makepyfile(test_a="def", test_b="def") | ||||
|         result = testdir.runpytest("test_a.py::a", "test_b.py::b") | ||||
|         result.stderr.fnmatch_lines(["*ERROR*test_a.py::a*", "*ERROR*test_b.py::b*"]) | ||||
|         result.stdout.fnmatch_lines(["pytest_sessionfinish_called"]) | ||||
|         assert result.ret == EXIT_USAGEERROR | ||||
| 
 | ||||
|     @pytest.mark.usefixtures("recwarn") | ||||
|     def test_namespace_import_doesnt_confuse_import_hook(self, testdir): | ||||
|  | @ -865,7 +876,9 @@ class TestInvocationVariants(object): | |||
|                 _fail, _sep, testid = line.partition(" ") | ||||
|                 break | ||||
|         result = testdir.runpytest(testid, "-rf") | ||||
|         result.stdout.fnmatch_lines([line, "*1 failed*"]) | ||||
|         result.stdout.fnmatch_lines( | ||||
|             ["FAILED test_doctest_id.txt::test_doctest_id.txt", "*1 failed*"] | ||||
|         ) | ||||
| 
 | ||||
|     def test_core_backward_compatibility(self): | ||||
|         """Test backward compatibility for get_plugin_manager function. See #787.""" | ||||
|  |  | |||
|  | @ -11,7 +11,6 @@ import textwrap | |||
| import py | ||||
| import six | ||||
| from six.moves import queue | ||||
| from test_source import astonly | ||||
| 
 | ||||
| import _pytest | ||||
| import pytest | ||||
|  | @ -147,7 +146,6 @@ class TestTraceback_f_g_h(object): | |||
|         assert s.startswith("def f():") | ||||
|         assert s.endswith("raise ValueError") | ||||
| 
 | ||||
|     @astonly | ||||
|     @failsonjython | ||||
|     def test_traceback_entry_getsource_in_construct(self): | ||||
|         source = _pytest._code.Source( | ||||
|  |  | |||
|  | @ -16,7 +16,6 @@ import _pytest._code | |||
| import pytest | ||||
| from _pytest._code import Source | ||||
| 
 | ||||
| astonly = pytest.mark.nothing | ||||
| failsonjython = pytest.mark.xfail("sys.platform.startswith('java')") | ||||
| 
 | ||||
| 
 | ||||
|  | @ -227,7 +226,6 @@ class TestSourceParsingAndCompiling(object): | |||
|         s = source.getstatement(1) | ||||
|         assert s == str(source) | ||||
| 
 | ||||
|     @astonly | ||||
|     def test_getstatementrange_within_constructs(self): | ||||
|         source = Source( | ||||
|             """\ | ||||
|  | @ -630,7 +628,6 @@ x = 3 | |||
| 
 | ||||
| 
 | ||||
| class TestTry(object): | ||||
|     pytestmark = astonly | ||||
|     source = """\ | ||||
| try: | ||||
|     raise ValueError | ||||
|  | @ -675,7 +672,6 @@ finally: | |||
| 
 | ||||
| 
 | ||||
| class TestIf(object): | ||||
|     pytestmark = astonly | ||||
|     source = """\ | ||||
| if 1: | ||||
|     y = 3 | ||||
|  |  | |||
|  | @ -0,0 +1,36 @@ | |||
| import pytest | ||||
| 
 | ||||
| 
 | ||||
| @pytest.hookimpl(hookwrapper=True, tryfirst=True) | ||||
| def pytest_collection_modifyitems(config, items): | ||||
|     """Prefer faster tests. | ||||
| 
 | ||||
|     Use a hookwrapper to do this in the beginning, so e.g. --ff still works | ||||
|     correctly. | ||||
|     """ | ||||
|     fast_items = [] | ||||
|     slow_items = [] | ||||
|     neutral_items = [] | ||||
| 
 | ||||
|     slow_fixturenames = ("testdir",) | ||||
| 
 | ||||
|     for item in items: | ||||
|         try: | ||||
|             fixtures = item.fixturenames | ||||
|         except AttributeError: | ||||
|             # doctest at least | ||||
|             # (https://github.com/pytest-dev/pytest/issues/5070) | ||||
|             neutral_items.append(item) | ||||
|         else: | ||||
|             if any(x for x in fixtures if x in slow_fixturenames): | ||||
|                 slow_items.append(item) | ||||
|             else: | ||||
|                 marker = item.get_closest_marker("slow") | ||||
|                 if marker: | ||||
|                     slow_items.append(item) | ||||
|                 else: | ||||
|                     fast_items.append(item) | ||||
| 
 | ||||
|     items[:] = fast_items + neutral_items + slow_items | ||||
| 
 | ||||
|     yield | ||||
|  | @ -248,7 +248,7 @@ def test_log_cli_enabled_disabled(testdir, enabled): | |||
|             [ | ||||
|                 "test_log_cli_enabled_disabled.py::test_log_cli ", | ||||
|                 "*-- live log call --*", | ||||
|                 "test_log_cli_enabled_disabled.py* CRITICAL critical message logged by test", | ||||
|                 "CRITICAL *test_log_cli_enabled_disabled.py* critical message logged by test", | ||||
|                 "PASSED*", | ||||
|             ] | ||||
|         ) | ||||
|  | @ -282,7 +282,7 @@ def test_log_cli_default_level(testdir): | |||
|     result.stdout.fnmatch_lines( | ||||
|         [ | ||||
|             "test_log_cli_default_level.py::test_log_cli ", | ||||
|             "test_log_cli_default_level.py*WARNING message will be shown*", | ||||
|             "WARNING*test_log_cli_default_level.py* message will be shown*", | ||||
|         ] | ||||
|     ) | ||||
|     assert "INFO message won't be shown" not in result.stdout.str() | ||||
|  | @ -523,7 +523,7 @@ def test_sections_single_new_line_after_test_outcome(testdir, request): | |||
|     ) | ||||
|     assert ( | ||||
|         re.search( | ||||
|             r"(.+)live log teardown(.+)\n(.+)WARNING(.+)\n(.+)WARNING(.+)", | ||||
|             r"(.+)live log teardown(.+)\nWARNING(.+)\nWARNING(.+)", | ||||
|             result.stdout.str(), | ||||
|             re.MULTILINE, | ||||
|         ) | ||||
|  | @ -531,7 +531,7 @@ def test_sections_single_new_line_after_test_outcome(testdir, request): | |||
|     ) | ||||
|     assert ( | ||||
|         re.search( | ||||
|             r"(.+)live log finish(.+)\n(.+)WARNING(.+)\n(.+)WARNING(.+)", | ||||
|             r"(.+)live log finish(.+)\nWARNING(.+)\nWARNING(.+)", | ||||
|             result.stdout.str(), | ||||
|             re.MULTILINE, | ||||
|         ) | ||||
|  | @ -565,7 +565,7 @@ def test_log_cli_level(testdir): | |||
|     # fnmatch_lines does an assertion internally | ||||
|     result.stdout.fnmatch_lines( | ||||
|         [ | ||||
|             "test_log_cli_level.py*This log message will be shown", | ||||
|             "*test_log_cli_level.py*This log message will be shown", | ||||
|             "PASSED",  # 'PASSED' on its own line because the log message prints a new line | ||||
|         ] | ||||
|     ) | ||||
|  | @ -579,7 +579,7 @@ def test_log_cli_level(testdir): | |||
|     # fnmatch_lines does an assertion internally | ||||
|     result.stdout.fnmatch_lines( | ||||
|         [ | ||||
|             "test_log_cli_level.py* This log message will be shown", | ||||
|             "*test_log_cli_level.py* This log message will be shown", | ||||
|             "PASSED",  # 'PASSED' on its own line because the log message prints a new line | ||||
|         ] | ||||
|     ) | ||||
|  | @ -615,7 +615,7 @@ def test_log_cli_ini_level(testdir): | |||
|     # fnmatch_lines does an assertion internally | ||||
|     result.stdout.fnmatch_lines( | ||||
|         [ | ||||
|             "test_log_cli_ini_level.py* This log message will be shown", | ||||
|             "*test_log_cli_ini_level.py* This log message will be shown", | ||||
|             "PASSED",  # 'PASSED' on its own line because the log message prints a new line | ||||
|         ] | ||||
|     ) | ||||
|  |  | |||
|  | @ -1925,10 +1925,10 @@ class TestAutouseManagement(object): | |||
|         reprec = testdir.inline_run() | ||||
|         reprec.assertoutcome(passed=1) | ||||
| 
 | ||||
|     @pytest.mark.issue226 | ||||
|     @pytest.mark.parametrize("param1", ["", "params=[1]"], ids=["p00", "p01"]) | ||||
|     @pytest.mark.parametrize("param2", ["", "params=[1]"], ids=["p10", "p11"]) | ||||
|     def test_ordering_dependencies_torndown_first(self, testdir, param1, param2): | ||||
|         """#226""" | ||||
|         testdir.makepyfile( | ||||
|             """ | ||||
|             import pytest | ||||
|  | @ -2707,9 +2707,9 @@ class TestFixtureMarker(object): | |||
|         reprec = testdir.inline_run("-v") | ||||
|         reprec.assertoutcome(passed=5) | ||||
| 
 | ||||
|     @pytest.mark.issue246 | ||||
|     @pytest.mark.parametrize("scope", ["session", "function", "module"]) | ||||
|     def test_finalizer_order_on_parametrization(self, scope, testdir): | ||||
|         """#246""" | ||||
|         testdir.makepyfile( | ||||
|             """ | ||||
|             import pytest | ||||
|  | @ -2744,8 +2744,8 @@ class TestFixtureMarker(object): | |||
|         reprec = testdir.inline_run("-lvs") | ||||
|         reprec.assertoutcome(passed=3) | ||||
| 
 | ||||
|     @pytest.mark.issue396 | ||||
|     def test_class_scope_parametrization_ordering(self, testdir): | ||||
|         """#396""" | ||||
|         testdir.makepyfile( | ||||
|             """ | ||||
|             import pytest | ||||
|  | @ -2865,8 +2865,8 @@ class TestFixtureMarker(object): | |||
|         res = testdir.runpytest("-v") | ||||
|         res.stdout.fnmatch_lines(["*test_foo*alpha*", "*test_foo*beta*"]) | ||||
| 
 | ||||
|     @pytest.mark.issue920 | ||||
|     def test_deterministic_fixture_collection(self, testdir, monkeypatch): | ||||
|         """#920""" | ||||
|         testdir.makepyfile( | ||||
|             """ | ||||
|             import pytest | ||||
|  | @ -3037,11 +3037,25 @@ class TestShowFixtures(object): | |||
| 
 | ||||
|     def test_show_fixtures(self, testdir): | ||||
|         result = testdir.runpytest("--fixtures") | ||||
|         result.stdout.fnmatch_lines(["*tmpdir*", "*temporary directory*"]) | ||||
|         result.stdout.fnmatch_lines( | ||||
|             [ | ||||
|                 "tmpdir_factory [[]session scope[]]", | ||||
|                 "*for the test session*", | ||||
|                 "tmpdir", | ||||
|                 "*temporary directory*", | ||||
|             ] | ||||
|         ) | ||||
| 
 | ||||
|     def test_show_fixtures_verbose(self, testdir): | ||||
|         result = testdir.runpytest("--fixtures", "-v") | ||||
|         result.stdout.fnmatch_lines(["*tmpdir*--*tmpdir.py*", "*temporary directory*"]) | ||||
|         result.stdout.fnmatch_lines( | ||||
|             [ | ||||
|                 "tmpdir_factory [[]session scope[]] -- *tmpdir.py*", | ||||
|                 "*for the test session*", | ||||
|                 "tmpdir -- *tmpdir.py*", | ||||
|                 "*temporary directory*", | ||||
|             ] | ||||
|         ) | ||||
| 
 | ||||
|     def test_show_fixtures_testmodule(self, testdir): | ||||
|         p = testdir.makepyfile( | ||||
|  | @ -3635,7 +3649,6 @@ class TestScopeOrdering(object): | |||
|     """Class of tests that ensure fixtures are ordered based on their scopes (#2405)""" | ||||
| 
 | ||||
|     @pytest.mark.parametrize("variant", ["mark", "autouse"]) | ||||
|     @pytest.mark.issue(github="#2405") | ||||
|     def test_func_closure_module_auto(self, testdir, variant, monkeypatch): | ||||
|         """Semantically identical to the example posted in #2405 when ``use_mark=True``""" | ||||
|         monkeypatch.setenv("FIXTURE_ACTIVATION_VARIANT", variant) | ||||
|  |  | |||
|  | @ -393,8 +393,9 @@ class TestNoselikeTestAttribute(object): | |||
|         assert not call.items | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.issue351 | ||||
| class TestParameterize(object): | ||||
|     """#351""" | ||||
| 
 | ||||
|     def test_idfn_marker(self, testdir): | ||||
|         testdir.makepyfile( | ||||
|             """ | ||||
|  |  | |||
|  | @ -159,8 +159,9 @@ class TestMetafunc(object): | |||
|                 ("x", "y"), [("abc", "def"), ("ghi", "jkl")], ids=["one"] | ||||
|             ) | ||||
| 
 | ||||
|     @pytest.mark.issue510 | ||||
|     def test_parametrize_empty_list(self): | ||||
|         """#510""" | ||||
| 
 | ||||
|         def func(y): | ||||
|             pass | ||||
| 
 | ||||
|  | @ -262,8 +263,8 @@ class TestMetafunc(object): | |||
|         for val, expected in values: | ||||
|             assert _idval(val, "a", 6, None, item=None, config=None) == expected | ||||
| 
 | ||||
|     @pytest.mark.issue250 | ||||
|     def test_idmaker_autoname(self): | ||||
|         """#250""" | ||||
|         from _pytest.python import idmaker | ||||
| 
 | ||||
|         result = idmaker( | ||||
|  | @ -356,8 +357,8 @@ class TestMetafunc(object): | |||
|         result = idmaker(("a", "b"), [pytest.param(e.one, e.two)]) | ||||
|         assert result == ["Foo.one-Foo.two"] | ||||
| 
 | ||||
|     @pytest.mark.issue351 | ||||
|     def test_idmaker_idfn(self): | ||||
|         """#351""" | ||||
|         from _pytest.python import idmaker | ||||
| 
 | ||||
|         def ids(val): | ||||
|  | @ -375,8 +376,8 @@ class TestMetafunc(object): | |||
|         ) | ||||
|         assert result == ["10.0-IndexError()", "20-KeyError()", "three-b2"] | ||||
| 
 | ||||
|     @pytest.mark.issue351 | ||||
|     def test_idmaker_idfn_unique_names(self): | ||||
|         """#351""" | ||||
|         from _pytest.python import idmaker | ||||
| 
 | ||||
|         def ids(val): | ||||
|  | @ -459,8 +460,9 @@ class TestMetafunc(object): | |||
|         ) | ||||
|         assert result == ["a0", "a1", "b0", "c", "b1"] | ||||
| 
 | ||||
|     @pytest.mark.issue714 | ||||
|     def test_parametrize_indirect(self): | ||||
|         """#714""" | ||||
| 
 | ||||
|         def func(x, y): | ||||
|             pass | ||||
| 
 | ||||
|  | @ -473,8 +475,9 @@ class TestMetafunc(object): | |||
|         assert metafunc._calls[0].params == dict(x=1, y=2) | ||||
|         assert metafunc._calls[1].params == dict(x=1, y=3) | ||||
| 
 | ||||
|     @pytest.mark.issue714 | ||||
|     def test_parametrize_indirect_list(self): | ||||
|         """#714""" | ||||
| 
 | ||||
|         def func(x, y): | ||||
|             pass | ||||
| 
 | ||||
|  | @ -483,8 +486,9 @@ class TestMetafunc(object): | |||
|         assert metafunc._calls[0].funcargs == dict(y="b") | ||||
|         assert metafunc._calls[0].params == dict(x="a") | ||||
| 
 | ||||
|     @pytest.mark.issue714 | ||||
|     def test_parametrize_indirect_list_all(self): | ||||
|         """#714""" | ||||
| 
 | ||||
|         def func(x, y): | ||||
|             pass | ||||
| 
 | ||||
|  | @ -493,8 +497,9 @@ class TestMetafunc(object): | |||
|         assert metafunc._calls[0].funcargs == {} | ||||
|         assert metafunc._calls[0].params == dict(x="a", y="b") | ||||
| 
 | ||||
|     @pytest.mark.issue714 | ||||
|     def test_parametrize_indirect_list_empty(self): | ||||
|         """#714""" | ||||
| 
 | ||||
|         def func(x, y): | ||||
|             pass | ||||
| 
 | ||||
|  | @ -503,9 +508,9 @@ class TestMetafunc(object): | |||
|         assert metafunc._calls[0].funcargs == dict(x="a", y="b") | ||||
|         assert metafunc._calls[0].params == {} | ||||
| 
 | ||||
|     @pytest.mark.issue714 | ||||
|     def test_parametrize_indirect_list_functional(self, testdir): | ||||
|         """ | ||||
|         #714 | ||||
|         Test parametrization with 'indirect' parameter applied on | ||||
|         particular arguments. As y is is direct, its value should | ||||
|         be used directly rather than being passed to the fixture | ||||
|  | @ -532,8 +537,9 @@ class TestMetafunc(object): | |||
|         result = testdir.runpytest("-v") | ||||
|         result.stdout.fnmatch_lines(["*test_simple*a-b*", "*1 passed*"]) | ||||
| 
 | ||||
|     @pytest.mark.issue714 | ||||
|     def test_parametrize_indirect_list_error(self, testdir): | ||||
|         """#714""" | ||||
| 
 | ||||
|         def func(x, y): | ||||
|             pass | ||||
| 
 | ||||
|  | @ -541,12 +547,13 @@ class TestMetafunc(object): | |||
|         with pytest.raises(pytest.fail.Exception): | ||||
|             metafunc.parametrize("x, y", [("a", "b")], indirect=["x", "z"]) | ||||
| 
 | ||||
|     @pytest.mark.issue714 | ||||
|     def test_parametrize_uses_no_fixture_error_indirect_false(self, testdir): | ||||
|         """The 'uses no fixture' error tells the user at collection time | ||||
|         that the parametrize data they've set up doesn't correspond to the | ||||
|         fixtures in their test function, rather than silently ignoring this | ||||
|         and letting the test potentially pass. | ||||
| 
 | ||||
|         #714 | ||||
|         """ | ||||
|         testdir.makepyfile( | ||||
|             """ | ||||
|  | @ -560,8 +567,8 @@ class TestMetafunc(object): | |||
|         result = testdir.runpytest("--collect-only") | ||||
|         result.stdout.fnmatch_lines(["*uses no argument 'y'*"]) | ||||
| 
 | ||||
|     @pytest.mark.issue714 | ||||
|     def test_parametrize_uses_no_fixture_error_indirect_true(self, testdir): | ||||
|         """#714""" | ||||
|         testdir.makepyfile( | ||||
|             """ | ||||
|             import pytest | ||||
|  | @ -580,8 +587,8 @@ class TestMetafunc(object): | |||
|         result = testdir.runpytest("--collect-only") | ||||
|         result.stdout.fnmatch_lines(["*uses no fixture 'y'*"]) | ||||
| 
 | ||||
|     @pytest.mark.issue714 | ||||
|     def test_parametrize_indirect_uses_no_fixture_error_indirect_string(self, testdir): | ||||
|         """#714""" | ||||
|         testdir.makepyfile( | ||||
|             """ | ||||
|             import pytest | ||||
|  | @ -597,8 +604,8 @@ class TestMetafunc(object): | |||
|         result = testdir.runpytest("--collect-only") | ||||
|         result.stdout.fnmatch_lines(["*uses no fixture 'y'*"]) | ||||
| 
 | ||||
|     @pytest.mark.issue714 | ||||
|     def test_parametrize_indirect_uses_no_fixture_error_indirect_list(self, testdir): | ||||
|         """#714""" | ||||
|         testdir.makepyfile( | ||||
|             """ | ||||
|             import pytest | ||||
|  | @ -614,8 +621,8 @@ class TestMetafunc(object): | |||
|         result = testdir.runpytest("--collect-only") | ||||
|         result.stdout.fnmatch_lines(["*uses no fixture 'y'*"]) | ||||
| 
 | ||||
|     @pytest.mark.issue714 | ||||
|     def test_parametrize_argument_not_in_indirect_list(self, testdir): | ||||
|         """#714""" | ||||
|         testdir.makepyfile( | ||||
|             """ | ||||
|             import pytest | ||||
|  | @ -1201,9 +1208,9 @@ class TestMetafuncFunctional(object): | |||
|         reprec = testdir.runpytest() | ||||
|         reprec.assert_outcomes(passed=4) | ||||
| 
 | ||||
|     @pytest.mark.issue463 | ||||
|     @pytest.mark.parametrize("attr", ["parametrise", "parameterize", "parameterise"]) | ||||
|     def test_parametrize_misspelling(self, testdir, attr): | ||||
|         """#463""" | ||||
|         testdir.makepyfile( | ||||
|             """ | ||||
|             import pytest | ||||
|  | @ -1386,8 +1393,9 @@ class TestMetafuncFunctionalAuto(object): | |||
|         assert output.count("preparing foo-3") == 1 | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.issue308 | ||||
| class TestMarkersWithParametrization(object): | ||||
|     """#308""" | ||||
| 
 | ||||
|     def test_simple_mark(self, testdir): | ||||
|         s = """ | ||||
|             import pytest | ||||
|  | @ -1575,8 +1583,8 @@ class TestMarkersWithParametrization(object): | |||
|         reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG) | ||||
|         reprec.assertoutcome(passed=2, skipped=2) | ||||
| 
 | ||||
|     @pytest.mark.issue290 | ||||
|     def test_parametrize_ID_generation_string_int_works(self, testdir): | ||||
|         """#290""" | ||||
|         testdir.makepyfile( | ||||
|             """ | ||||
|             import pytest | ||||
|  |  | |||
|  | @ -446,6 +446,50 @@ class TestAssert_reprcompare(object): | |||
|         assert "Omitting" not in lines[1] | ||||
|         assert lines[2] == "{'b': 1}" | ||||
| 
 | ||||
|     def test_dict_different_items(self): | ||||
|         lines = callequal({"a": 0}, {"b": 1, "c": 2}, verbose=2) | ||||
|         assert lines == [ | ||||
|             "{'a': 0} == {'b': 1, 'c': 2}", | ||||
|             "Left contains 1 more item:", | ||||
|             "{'a': 0}", | ||||
|             "Right contains 2 more items:", | ||||
|             "{'b': 1, 'c': 2}", | ||||
|             "Full diff:", | ||||
|             "- {'a': 0}", | ||||
|             "+ {'b': 1, 'c': 2}", | ||||
|         ] | ||||
|         lines = callequal({"b": 1, "c": 2}, {"a": 0}, verbose=2) | ||||
|         assert lines == [ | ||||
|             "{'b': 1, 'c': 2} == {'a': 0}", | ||||
|             "Left contains 2 more items:", | ||||
|             "{'b': 1, 'c': 2}", | ||||
|             "Right contains 1 more item:", | ||||
|             "{'a': 0}", | ||||
|             "Full diff:", | ||||
|             "- {'b': 1, 'c': 2}", | ||||
|             "+ {'a': 0}", | ||||
|         ] | ||||
| 
 | ||||
|     def test_sequence_different_items(self): | ||||
|         lines = callequal((1, 2), (3, 4, 5), verbose=2) | ||||
|         assert lines == [ | ||||
|             "(1, 2) == (3, 4, 5)", | ||||
|             "At index 0 diff: 1 != 3", | ||||
|             "Right contains one more item: 5", | ||||
|             "Full diff:", | ||||
|             "- (1, 2)", | ||||
|             "+ (3, 4, 5)", | ||||
|         ] | ||||
|         lines = callequal((1, 2, 3), (4,), verbose=2) | ||||
|         assert lines == [ | ||||
|             "(1, 2, 3) == (4,)", | ||||
|             "At index 0 diff: 1 != 4", | ||||
|             "Left contains 2 more items, first extra item: 2", | ||||
|             "Full diff:", | ||||
|             "- (1, 2, 3)", | ||||
|             "+ (4,)", | ||||
|         ] | ||||
| 
 | ||||
|     def test_set(self): | ||||
|         expl = callequal({0, 1}, {0, 2}) | ||||
|         assert len(expl) > 1 | ||||
|  |  | |||
|  | @ -10,6 +10,7 @@ import textwrap | |||
| import py | ||||
| 
 | ||||
| import pytest | ||||
| from _pytest.main import EXIT_NOTESTSCOLLECTED | ||||
| 
 | ||||
| pytest_plugins = ("pytester",) | ||||
| 
 | ||||
|  | @ -195,6 +196,7 @@ def test_cache_show(testdir): | |||
|         """ | ||||
|         def pytest_configure(config): | ||||
|             config.cache.set("my/name", [1,2,3]) | ||||
|             config.cache.set("my/hello", "world") | ||||
|             config.cache.set("other/some", {1:2}) | ||||
|             dp = config.cache.makedir("mydb") | ||||
|             dp.ensure("hello") | ||||
|  | @ -203,20 +205,39 @@ def test_cache_show(testdir): | |||
|     ) | ||||
|     result = testdir.runpytest() | ||||
|     assert result.ret == 5  # no tests executed | ||||
| 
 | ||||
|     result = testdir.runpytest("--cache-show") | ||||
|     result.stdout.fnmatch_lines_random( | ||||
|     result.stdout.fnmatch_lines( | ||||
|         [ | ||||
|             "*cachedir:*", | ||||
|             "-*cache values*-", | ||||
|             "*my/name contains:", | ||||
|             "*- cache values for '[*]' -*", | ||||
|             "cache/nodeids contains:", | ||||
|             "my/name contains:", | ||||
|             "  [1, 2, 3]", | ||||
|             "*other/some contains*", | ||||
|             "  {*1*: 2}", | ||||
|             "-*cache directories*-", | ||||
|             "other/some contains:", | ||||
|             "  {*'1': 2}", | ||||
|             "*- cache directories for '[*]' -*", | ||||
|             "*mydb/hello*length 0*", | ||||
|             "*mydb/world*length 0*", | ||||
|         ] | ||||
|     ) | ||||
|     assert result.ret == 0 | ||||
| 
 | ||||
|     result = testdir.runpytest("--cache-show", "*/hello") | ||||
|     result.stdout.fnmatch_lines( | ||||
|         [ | ||||
|             "*cachedir:*", | ||||
|             "*- cache values for '[*]/hello' -*", | ||||
|             "my/hello contains:", | ||||
|             "  *'world'", | ||||
|             "*- cache directories for '[*]/hello' -*", | ||||
|             "d/mydb/hello*length 0*", | ||||
|         ] | ||||
|     ) | ||||
|     stdout = result.stdout.str() | ||||
|     assert "other/some" not in stdout | ||||
|     assert "d/mydb/world" not in stdout | ||||
|     assert result.ret == 0 | ||||
| 
 | ||||
| 
 | ||||
| class TestLastFailed(object): | ||||
|  | @ -251,7 +272,13 @@ class TestLastFailed(object): | |||
|         result = testdir.runpytest("--lf") | ||||
|         result.stdout.fnmatch_lines(["*2 passed*1 desel*"]) | ||||
|         result = testdir.runpytest("--lf") | ||||
|         result.stdout.fnmatch_lines(["*1 failed*2 passed*"]) | ||||
|         result.stdout.fnmatch_lines( | ||||
|             [ | ||||
|                 "collected 3 items", | ||||
|                 "run-last-failure: no previously failed tests, not deselecting items.", | ||||
|                 "*1 failed*2 passed*", | ||||
|             ] | ||||
|         ) | ||||
|         result = testdir.runpytest("--lf", "--cache-clear") | ||||
|         result.stdout.fnmatch_lines(["*1 failed*2 passed*"]) | ||||
| 
 | ||||
|  | @ -418,14 +445,20 @@ class TestLastFailed(object): | |||
|         result = testdir.runpytest("--lf") | ||||
|         result.stdout.fnmatch_lines( | ||||
|             [ | ||||
|                 "collected 4 items / 2 deselected / 2 selected", | ||||
|                 "run-last-failure: rerun previous 2 failures", | ||||
|                 "*2 failed, 2 deselected in*", | ||||
|                 "collected 2 items", | ||||
|                 "run-last-failure: rerun previous 2 failures (skipped 1 file)", | ||||
|                 "*2 failed in*", | ||||
|             ] | ||||
|         ) | ||||
| 
 | ||||
|         result = testdir.runpytest(test_a, "--lf") | ||||
|         result.stdout.fnmatch_lines(["collected 2 items", "*2 passed in*"]) | ||||
|         result.stdout.fnmatch_lines( | ||||
|             [ | ||||
|                 "collected 2 items", | ||||
|                 "run-last-failure: 2 known failures not in selected tests", | ||||
|                 "*2 passed in*", | ||||
|             ] | ||||
|         ) | ||||
| 
 | ||||
|         result = testdir.runpytest(test_b, "--lf") | ||||
|         result.stdout.fnmatch_lines( | ||||
|  | @ -685,7 +718,7 @@ class TestLastFailed(object): | |||
|         assert self.get_cached_last_failed(testdir) == ["test_foo.py::test_foo_4"] | ||||
| 
 | ||||
|         result = testdir.runpytest("--last-failed") | ||||
|         result.stdout.fnmatch_lines(["*1 failed, 3 deselected*"]) | ||||
|         result.stdout.fnmatch_lines(["*1 failed, 1 deselected*"]) | ||||
|         assert self.get_cached_last_failed(testdir) == ["test_foo.py::test_foo_4"] | ||||
| 
 | ||||
|         # 3. fix test_foo_4, run only test_foo.py | ||||
|  | @ -721,7 +754,14 @@ class TestLastFailed(object): | |||
|         result = testdir.runpytest("--lf", "--lfnf", "all") | ||||
|         result.stdout.fnmatch_lines(["*2 passed*"]) | ||||
|         result = testdir.runpytest("--lf", "--lfnf", "none") | ||||
|         result.stdout.fnmatch_lines(["*2 desel*"]) | ||||
|         result.stdout.fnmatch_lines( | ||||
|             [ | ||||
|                 "collected 2 items / 2 deselected", | ||||
|                 "run-last-failure: no previously failed tests, deselecting all items.", | ||||
|                 "* 2 deselected in *", | ||||
|             ] | ||||
|         ) | ||||
|         assert result.ret == EXIT_NOTESTSCOLLECTED | ||||
| 
 | ||||
|     def test_lastfailed_no_failures_behavior_empty_cache(self, testdir): | ||||
|         testdir.makepyfile( | ||||
|  | @ -739,6 +779,58 @@ class TestLastFailed(object): | |||
|         result = testdir.runpytest("--lf", "--cache-clear", "--lfnf", "none") | ||||
|         result.stdout.fnmatch_lines(["*2 desel*"]) | ||||
| 
 | ||||
|     def test_lastfailed_skip_collection(self, testdir): | ||||
|         """ | ||||
|         Test --lf behavior regarding skipping collection of files that are not marked as | ||||
|         failed in the cache (#5172). | ||||
|         """ | ||||
|         testdir.makepyfile( | ||||
|             **{ | ||||
|                 "pkg1/test_1.py": """ | ||||
|                 import pytest | ||||
| 
 | ||||
|                 @pytest.mark.parametrize('i', range(3)) | ||||
|                 def test_1(i): pass | ||||
|             """, | ||||
|                 "pkg2/test_2.py": """ | ||||
|                 import pytest | ||||
| 
 | ||||
|                 @pytest.mark.parametrize('i', range(5)) | ||||
|                 def test_1(i): | ||||
|                     assert i not in (1, 3) | ||||
|             """, | ||||
|             } | ||||
|         ) | ||||
|         # first run: collects 8 items (test_1: 3, test_2: 5) | ||||
|         result = testdir.runpytest() | ||||
|         result.stdout.fnmatch_lines(["collected 8 items", "*2 failed*6 passed*"]) | ||||
|         # second run: collects only 5 items from test_2, because all tests from test_1 have passed | ||||
|         result = testdir.runpytest("--lf") | ||||
|         result.stdout.fnmatch_lines( | ||||
|             [ | ||||
|                 "collected 5 items / 3 deselected / 2 selected", | ||||
|                 "run-last-failure: rerun previous 2 failures (skipped 1 file)", | ||||
|                 "*2 failed*3 deselected*", | ||||
|             ] | ||||
|         ) | ||||
| 
 | ||||
|         # add another file and check if message is correct when skipping more than 1 file | ||||
|         testdir.makepyfile( | ||||
|             **{ | ||||
|                 "pkg1/test_3.py": """ | ||||
|                 def test_3(): pass | ||||
|             """ | ||||
|             } | ||||
|         ) | ||||
|         result = testdir.runpytest("--lf") | ||||
|         result.stdout.fnmatch_lines( | ||||
|             [ | ||||
|                 "collected 5 items / 3 deselected / 2 selected", | ||||
|                 "run-last-failure: rerun previous 2 failures (skipped 2 files)", | ||||
|                 "*2 failed*3 deselected*", | ||||
|             ] | ||||
|         ) | ||||
| 
 | ||||
| 
 | ||||
| class TestNewFirst(object): | ||||
|     def test_newfirst_usecase(self, testdir): | ||||
|  |  | |||
|  | @ -605,8 +605,8 @@ class TestCaptureFixture(object): | |||
|         result.stdout.fnmatch_lines(["*KeyboardInterrupt*"]) | ||||
|         assert result.ret == 2 | ||||
| 
 | ||||
|     @pytest.mark.issue14 | ||||
|     def test_capture_and_logging(self, testdir): | ||||
|         """#14""" | ||||
|         p = testdir.makepyfile( | ||||
|             """\ | ||||
|             import logging | ||||
|  | @ -819,15 +819,15 @@ def test_error_during_readouterr(testdir): | |||
|     testdir.makepyfile( | ||||
|         pytest_xyz=""" | ||||
|         from _pytest.capture import FDCapture | ||||
| 
 | ||||
|         def bad_snap(self): | ||||
|             raise Exception('boom') | ||||
| 
 | ||||
|         assert FDCapture.snap | ||||
|         FDCapture.snap = bad_snap | ||||
|     """ | ||||
|     ) | ||||
|     result = testdir.runpytest_subprocess( | ||||
|         "-p", "pytest_xyz", "--version", syspathinsert=True | ||||
|     ) | ||||
|     result = testdir.runpytest_subprocess("-p", "pytest_xyz", "--version") | ||||
|     result.stderr.fnmatch_lines( | ||||
|         ["*in bad_snap", "    raise Exception('boom')", "Exception: boom"] | ||||
|     ) | ||||
|  | @ -1243,25 +1243,24 @@ class TestStdCaptureFDinvalidFD(object): | |||
|             from _pytest import capture | ||||
| 
 | ||||
|             def StdCaptureFD(out=True, err=True, in_=True): | ||||
|                 return capture.MultiCapture(out, err, in_, | ||||
|                                             Capture=capture.FDCapture) | ||||
|                 return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture) | ||||
| 
 | ||||
|             def test_stdout(): | ||||
|                 os.close(1) | ||||
|                 cap = StdCaptureFD(out=True, err=False, in_=False) | ||||
|                 assert repr(cap.out) == "<FDCapture 1 oldfd=None>" | ||||
|                 assert repr(cap.out) == "<FDCapture 1 oldfd=None _state=None>" | ||||
|                 cap.stop_capturing() | ||||
| 
 | ||||
|             def test_stderr(): | ||||
|                 os.close(2) | ||||
|                 cap = StdCaptureFD(out=False, err=True, in_=False) | ||||
|                 assert repr(cap.err) == "<FDCapture 2 oldfd=None>" | ||||
|                 assert repr(cap.err) == "<FDCapture 2 oldfd=None _state=None>" | ||||
|                 cap.stop_capturing() | ||||
| 
 | ||||
|             def test_stdin(): | ||||
|                 os.close(0) | ||||
|                 cap = StdCaptureFD(out=False, err=False, in_=True) | ||||
|                 assert repr(cap.in_) == "<FDCapture 0 oldfd=None>" | ||||
|                 assert repr(cap.in_) == "<FDCapture 0 oldfd=None _state=None>" | ||||
|                 cap.stop_capturing() | ||||
|         """ | ||||
|         ) | ||||
|  |  | |||
|  | @ -1212,20 +1212,12 @@ def test_config_does_not_load_blocked_plugin_from_args(testdir): | |||
|     [ | ||||
|         x | ||||
|         for x in _pytest.config.default_plugins | ||||
|         if x | ||||
|         not in [ | ||||
|             "fixtures", | ||||
|             "helpconfig",  # Provides -p. | ||||
|             "main", | ||||
|             "mark", | ||||
|             "python", | ||||
|             "runner", | ||||
|             "terminal",  # works in OK case (no output), but not with failures. | ||||
|         ] | ||||
|         if x not in _pytest.config.essential_plugins | ||||
|     ], | ||||
| ) | ||||
| def test_config_blocked_default_plugins(testdir, plugin): | ||||
|     if plugin == "debugging": | ||||
|         # Fixed in xdist master (after 1.27.0). | ||||
|         # https://github.com/pytest-dev/pytest-xdist/pull/422 | ||||
|         try: | ||||
|             import xdist  # noqa: F401 | ||||
|  | @ -1237,9 +1229,13 @@ def test_config_blocked_default_plugins(testdir, plugin): | |||
|     p = testdir.makepyfile("def test(): pass") | ||||
|     result = testdir.runpytest(str(p), "-pno:%s" % plugin) | ||||
|     assert result.ret == EXIT_OK | ||||
|     if plugin != "terminal": | ||||
|         result.stdout.fnmatch_lines(["* 1 passed in *"]) | ||||
| 
 | ||||
|     p = testdir.makepyfile("def test(): assert 0") | ||||
|     result = testdir.runpytest(str(p), "-pno:%s" % plugin) | ||||
|     assert result.ret == EXIT_TESTSFAILED | ||||
|     if plugin != "terminal": | ||||
|         result.stdout.fnmatch_lines(["* 1 failed in *"]) | ||||
|     else: | ||||
|         assert result.stdout.lines == [""] | ||||
|  |  | |||
|  | @ -491,10 +491,10 @@ class TestConftestVisibility(object): | |||
|             ("snc", ".", 1), | ||||
|         ], | ||||
|     ) | ||||
|     @pytest.mark.issue616 | ||||
|     def test_parsefactories_relative_node_ids( | ||||
|         self, testdir, chdir, testarg, expect_ntests_passed | ||||
|     ): | ||||
|         """#616""" | ||||
|         dirs = self._setup_tree(testdir) | ||||
|         print("pytest run in cwd: %s" % (dirs[chdir].relto(testdir.tmpdir))) | ||||
|         print("pytestarg        : %s" % (testarg)) | ||||
|  |  | |||
|  | @ -485,9 +485,27 @@ class TestPython(object): | |||
|         tnode = node.find_first_by_tag("testcase") | ||||
|         tnode.assert_attr(classname="test_xfailure_function", name="test_xfail") | ||||
|         fnode = tnode.find_first_by_tag("skipped") | ||||
|         fnode.assert_attr(message="expected test failure") | ||||
|         fnode.assert_attr(type="pytest.xfail", message="42") | ||||
|         # assert "ValueError" in fnode.toxml() | ||||
| 
 | ||||
|     def test_xfailure_marker(self, testdir): | ||||
|         testdir.makepyfile( | ||||
|             """ | ||||
|             import pytest | ||||
|             @pytest.mark.xfail(reason="42") | ||||
|             def test_xfail(): | ||||
|                 assert False | ||||
|         """ | ||||
|         ) | ||||
|         result, dom = runandparse(testdir) | ||||
|         assert not result.ret | ||||
|         node = dom.find_first_by_tag("testsuite") | ||||
|         node.assert_attr(skipped=1, tests=1) | ||||
|         tnode = node.find_first_by_tag("testcase") | ||||
|         tnode.assert_attr(classname="test_xfailure_marker", name="test_xfail") | ||||
|         fnode = tnode.find_first_by_tag("skipped") | ||||
|         fnode.assert_attr(type="pytest.xfail", message="42") | ||||
| 
 | ||||
|     def test_xfail_captures_output_once(self, testdir): | ||||
|         testdir.makepyfile( | ||||
|             """ | ||||
|  | @ -975,6 +993,20 @@ def test_record_property_same_name(testdir): | |||
|     pnodes[1].assert_attr(name="foo", value="baz") | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize("fixture_name", ["record_property", "record_xml_attribute"]) | ||||
| def test_record_fixtures_without_junitxml(testdir, fixture_name): | ||||
|     testdir.makepyfile( | ||||
|         """ | ||||
|         def test_record({fixture_name}): | ||||
|             {fixture_name}("foo", "bar") | ||||
|     """.format( | ||||
|             fixture_name=fixture_name | ||||
|         ) | ||||
|     ) | ||||
|     result = testdir.runpytest() | ||||
|     assert result.ret == 0 | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.filterwarnings("default") | ||||
| def test_record_attribute(testdir): | ||||
|     testdir.makeini( | ||||
|  | @ -1005,8 +1037,9 @@ def test_record_attribute(testdir): | |||
| 
 | ||||
| 
 | ||||
| @pytest.mark.filterwarnings("default") | ||||
| def test_record_attribute_xunit2(testdir): | ||||
|     """Ensure record_xml_attribute drops values when outside of legacy family | ||||
| @pytest.mark.parametrize("fixture_name", ["record_xml_attribute", "record_property"]) | ||||
| def test_record_fixtures_xunit2(testdir, fixture_name): | ||||
|     """Ensure record_xml_attribute and record_property drop values when outside of legacy family | ||||
|     """ | ||||
|     testdir.makeini( | ||||
|         """ | ||||
|  | @ -1019,21 +1052,28 @@ def test_record_attribute_xunit2(testdir): | |||
|         import pytest | ||||
| 
 | ||||
|         @pytest.fixture | ||||
|         def other(record_xml_attribute): | ||||
|             record_xml_attribute("bar", 1) | ||||
|         def test_record(record_xml_attribute, other): | ||||
|             record_xml_attribute("foo", "<1"); | ||||
|     """ | ||||
|         def other({fixture_name}): | ||||
|             {fixture_name}("bar", 1) | ||||
|         def test_record({fixture_name}, other): | ||||
|             {fixture_name}("foo", "<1"); | ||||
|     """.format( | ||||
|             fixture_name=fixture_name | ||||
|         ) | ||||
|     ) | ||||
| 
 | ||||
|     result, dom = runandparse(testdir, "-rw") | ||||
|     result.stdout.fnmatch_lines( | ||||
|         [ | ||||
|             "*test_record_attribute_xunit2.py:6:*record_xml_attribute is an experimental feature", | ||||
|             "*test_record_attribute_xunit2.py:6:*record_xml_attribute is incompatible with " | ||||
|             "junit_family: xunit2 (use: legacy|xunit1)", | ||||
|         ] | ||||
|     expected_lines = [] | ||||
|     if fixture_name == "record_xml_attribute": | ||||
|         expected_lines.append( | ||||
|             "*test_record_fixtures_xunit2.py:6:*record_xml_attribute is an experimental feature" | ||||
|         ) | ||||
|     expected_lines = [ | ||||
|         "*test_record_fixtures_xunit2.py:6:*{fixture_name} is incompatible " | ||||
|         "with junit_family 'xunit2' (use 'legacy' or 'xunit1')".format( | ||||
|             fixture_name=fixture_name | ||||
|         ) | ||||
|     ] | ||||
|     result.stdout.fnmatch_lines(expected_lines) | ||||
| 
 | ||||
| 
 | ||||
| def test_random_report_log_xdist(testdir, monkeypatch): | ||||
|  | @ -1203,6 +1243,53 @@ def test_url_property(testdir): | |||
|     ), "The URL did not get written to the xml" | ||||
| 
 | ||||
| 
 | ||||
| def test_record_testsuite_property(testdir): | ||||
|     testdir.makepyfile( | ||||
|         """ | ||||
|         def test_func1(record_testsuite_property): | ||||
|             record_testsuite_property("stats", "all good") | ||||
| 
 | ||||
|         def test_func2(record_testsuite_property): | ||||
|             record_testsuite_property("stats", 10) | ||||
|     """ | ||||
|     ) | ||||
|     result, dom = runandparse(testdir) | ||||
|     assert result.ret == 0 | ||||
|     node = dom.find_first_by_tag("testsuite") | ||||
|     properties_node = node.find_first_by_tag("properties") | ||||
|     p1_node = properties_node.find_nth_by_tag("property", 0) | ||||
|     p2_node = properties_node.find_nth_by_tag("property", 1) | ||||
|     p1_node.assert_attr(name="stats", value="all good") | ||||
|     p2_node.assert_attr(name="stats", value="10") | ||||
| 
 | ||||
| 
 | ||||
| def test_record_testsuite_property_junit_disabled(testdir): | ||||
|     testdir.makepyfile( | ||||
|         """ | ||||
|         def test_func1(record_testsuite_property): | ||||
|             record_testsuite_property("stats", "all good") | ||||
|     """ | ||||
|     ) | ||||
|     result = testdir.runpytest() | ||||
|     assert result.ret == 0 | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize("junit", [True, False]) | ||||
| def test_record_testsuite_property_type_checking(testdir, junit): | ||||
|     testdir.makepyfile( | ||||
|         """ | ||||
|         def test_func1(record_testsuite_property): | ||||
|             record_testsuite_property(1, 2) | ||||
|     """ | ||||
|     ) | ||||
|     args = ("--junitxml=tests.xml",) if junit else () | ||||
|     result = testdir.runpytest(*args) | ||||
|     assert result.ret == 1 | ||||
|     result.stdout.fnmatch_lines( | ||||
|         ["*TypeError: name parameter needs to be a string, but int given"] | ||||
|     ) | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize("suite_name", ["my_suite", ""]) | ||||
| def test_set_suite_name(testdir, suite_name): | ||||
|     if suite_name: | ||||
|  |  | |||
|  | @ -44,11 +44,11 @@ class TestMark(object): | |||
|         class SomeClass(object): | ||||
|             pass | ||||
| 
 | ||||
|         assert pytest.mark.fun(some_function) is some_function | ||||
|         assert pytest.mark.fun.with_args(some_function) is not some_function | ||||
|         assert pytest.mark.foo(some_function) is some_function | ||||
|         assert pytest.mark.foo.with_args(some_function) is not some_function | ||||
| 
 | ||||
|         assert pytest.mark.fun(SomeClass) is SomeClass | ||||
|         assert pytest.mark.fun.with_args(SomeClass) is not SomeClass | ||||
|         assert pytest.mark.foo(SomeClass) is SomeClass | ||||
|         assert pytest.mark.foo.with_args(SomeClass) is not SomeClass | ||||
| 
 | ||||
|     def test_pytest_mark_name_starts_with_underscore(self): | ||||
|         mark = Mark() | ||||
|  | @ -130,7 +130,7 @@ def test_ini_markers_whitespace(testdir): | |||
|             assert True | ||||
|     """ | ||||
|     ) | ||||
|     rec = testdir.inline_run("--strict", "-m", "a1") | ||||
|     rec = testdir.inline_run("--strict-markers", "-m", "a1") | ||||
|     rec.assertoutcome(passed=1) | ||||
| 
 | ||||
| 
 | ||||
|  | @ -150,7 +150,7 @@ def test_marker_without_description(testdir): | |||
|     ) | ||||
|     ftdir = testdir.mkdir("ft1_dummy") | ||||
|     testdir.tmpdir.join("conftest.py").move(ftdir.join("conftest.py")) | ||||
|     rec = testdir.runpytest("--strict") | ||||
|     rec = testdir.runpytest("--strict-markers") | ||||
|     rec.assert_outcomes() | ||||
| 
 | ||||
| 
 | ||||
|  | @ -194,7 +194,8 @@ def test_mark_on_pseudo_function(testdir): | |||
|     reprec.assertoutcome(passed=1) | ||||
| 
 | ||||
| 
 | ||||
| def test_strict_prohibits_unregistered_markers(testdir): | ||||
| @pytest.mark.parametrize("option_name", ["--strict-markers", "--strict"]) | ||||
| def test_strict_prohibits_unregistered_markers(testdir, option_name): | ||||
|     testdir.makepyfile( | ||||
|         """ | ||||
|         import pytest | ||||
|  | @ -203,9 +204,11 @@ def test_strict_prohibits_unregistered_markers(testdir): | |||
|             pass | ||||
|     """ | ||||
|     ) | ||||
|     result = testdir.runpytest("--strict") | ||||
|     result = testdir.runpytest(option_name) | ||||
|     assert result.ret != 0 | ||||
|     result.stdout.fnmatch_lines(["'unregisteredmark' not a registered marker"]) | ||||
|     result.stdout.fnmatch_lines( | ||||
|         ["'unregisteredmark' not found in `markers` configuration option"] | ||||
|     ) | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|  | @ -449,8 +452,8 @@ class TestFunctional(object): | |||
|         items, rec = testdir.inline_genitems(p) | ||||
|         self.assert_markers(items, test_foo=("a", "b"), test_bar=("a",)) | ||||
| 
 | ||||
|     @pytest.mark.issue568 | ||||
|     def test_mark_should_not_pass_to_siebling_class(self, testdir): | ||||
|         """#568""" | ||||
|         p = testdir.makepyfile( | ||||
|             """ | ||||
|             import pytest | ||||
|  | @ -652,9 +655,9 @@ class TestFunctional(object): | |||
|             markers = {m.name for m in items[name].iter_markers()} | ||||
|             assert markers == set(expected_markers) | ||||
| 
 | ||||
|     @pytest.mark.issue1540 | ||||
|     @pytest.mark.filterwarnings("ignore") | ||||
|     def test_mark_from_parameters(self, testdir): | ||||
|         """#1540""" | ||||
|         testdir.makepyfile( | ||||
|             """ | ||||
|             import pytest | ||||
|  | @ -933,16 +936,16 @@ def test_mark_expressions_no_smear(testdir): | |||
| 
 | ||||
| def test_addmarker_order(): | ||||
|     node = Node("Test", config=mock.Mock(), session=mock.Mock(), nodeid="Test") | ||||
|     node.add_marker("a") | ||||
|     node.add_marker("b") | ||||
|     node.add_marker("c", append=False) | ||||
|     node.add_marker("foo") | ||||
|     node.add_marker("bar") | ||||
|     node.add_marker("baz", append=False) | ||||
|     extracted = [x.name for x in node.iter_markers()] | ||||
|     assert extracted == ["c", "a", "b"] | ||||
|     assert extracted == ["baz", "foo", "bar"] | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.issue("https://github.com/pytest-dev/pytest/issues/3605") | ||||
| @pytest.mark.filterwarnings("ignore") | ||||
| def test_markers_from_parametrize(testdir): | ||||
|     """#3605""" | ||||
|     testdir.makepyfile( | ||||
|         """ | ||||
|         from __future__ import print_function | ||||
|  |  | |||
|  | @ -6,6 +6,8 @@ import py | |||
| import _pytest | ||||
| import pytest | ||||
| 
 | ||||
| pytestmark = pytest.mark.slow | ||||
| 
 | ||||
| MODSET = [ | ||||
|     x | ||||
|     for x in py.path.local(_pytest.__file__).dirpath().visit("*.py") | ||||
|  |  | |||
|  | @ -6,6 +6,8 @@ import os | |||
| import platform | ||||
| import sys | ||||
| 
 | ||||
| import six | ||||
| 
 | ||||
| import _pytest._code | ||||
| import pytest | ||||
| from _pytest.debugging import _validate_usepdb_cls | ||||
|  | @ -395,7 +397,7 @@ class TestPDB(object): | |||
|         child = testdir.spawn_pytest(str(p1)) | ||||
|         child.expect("test_1") | ||||
|         child.expect("Pdb") | ||||
|         child.sendeof() | ||||
|         child.sendline("q") | ||||
|         rest = child.read().decode("utf8") | ||||
|         assert "no tests ran" in rest | ||||
|         assert "reading from stdin while output" not in rest | ||||
|  | @ -957,7 +959,7 @@ class TestDebuggingBreakpoints(object): | |||
|         child = testdir.spawn_pytest(str(p1)) | ||||
|         child.expect("test_1") | ||||
|         child.expect("Pdb") | ||||
|         child.sendeof() | ||||
|         child.sendline("quit") | ||||
|         rest = child.read().decode("utf8") | ||||
|         assert "Quitting debugger" in rest | ||||
|         assert "reading from stdin while output" not in rest | ||||
|  | @ -1013,7 +1015,8 @@ class TestTraceOption: | |||
|         rest = child.read().decode("utf8") | ||||
|         assert "2 passed in" in rest | ||||
|         assert "reading from stdin while output" not in rest | ||||
|         assert "Exit: Quitting debugger" in child.before.decode("utf8") | ||||
|         # Only printed once - not on stderr. | ||||
|         assert "Exit: Quitting debugger" not in child.before.decode("utf8") | ||||
|         TestPDB.flush(child) | ||||
| 
 | ||||
| 
 | ||||
|  | @ -1133,7 +1136,11 @@ def test_pdbcls_via_local_module(testdir): | |||
|         class Wrapped: | ||||
|             class MyPdb: | ||||
|                 def set_trace(self, *args): | ||||
|                     print("mypdb_called", args) | ||||
|                     print("settrace_called", args) | ||||
| 
 | ||||
|                 def runcall(self, *args, **kwds): | ||||
|                     print("runcall_called", args, kwds) | ||||
|                     assert "func" in kwds | ||||
|         """, | ||||
|     ) | ||||
|     result = testdir.runpytest( | ||||
|  | @ -1150,4 +1157,37 @@ def test_pdbcls_via_local_module(testdir): | |||
|         str(p1), "--pdbcls=mypdb:Wrapped.MyPdb", syspathinsert=True | ||||
|     ) | ||||
|     assert result.ret == 0 | ||||
|     result.stdout.fnmatch_lines(["*mypdb_called*", "* 1 passed in *"]) | ||||
|     result.stdout.fnmatch_lines(["*settrace_called*", "* 1 passed in *"]) | ||||
| 
 | ||||
|     # Ensure that it also works with --trace. | ||||
|     result = testdir.runpytest( | ||||
|         str(p1), "--pdbcls=mypdb:Wrapped.MyPdb", "--trace", syspathinsert=True | ||||
|     ) | ||||
|     assert result.ret == 0 | ||||
|     result.stdout.fnmatch_lines(["*runcall_called*", "* 1 passed in *"]) | ||||
| 
 | ||||
| 
 | ||||
| def test_raises_bdbquit_with_eoferror(testdir): | ||||
|     """It is not guaranteed that DontReadFromInput's read is called.""" | ||||
|     if six.PY2: | ||||
|         builtin_module = "__builtin__" | ||||
|         input_func = "raw_input" | ||||
|     else: | ||||
|         builtin_module = "builtins" | ||||
|         input_func = "input" | ||||
|     p1 = testdir.makepyfile( | ||||
|         """ | ||||
|         def input_without_read(*args, **kwargs): | ||||
|             raise EOFError() | ||||
| 
 | ||||
|         def test(monkeypatch): | ||||
|             import {builtin_module} | ||||
|             monkeypatch.setattr({builtin_module}, {input_func!r}, input_without_read) | ||||
|             __import__('pdb').set_trace() | ||||
|         """.format( | ||||
|             builtin_module=builtin_module, input_func=input_func | ||||
|         ) | ||||
|     ) | ||||
|     result = testdir.runpytest(str(p1)) | ||||
|     result.stdout.fnmatch_lines(["E *BdbQuit", "*= 1 failed in*"]) | ||||
|     assert result.ret == 1 | ||||
|  |  | |||
|  | @ -9,6 +9,7 @@ import types | |||
| 
 | ||||
| import pytest | ||||
| from _pytest.config import PytestPluginManager | ||||
| from _pytest.config.exceptions import UsageError | ||||
| from _pytest.main import EXIT_NOTESTSCOLLECTED | ||||
| from _pytest.main import Session | ||||
| 
 | ||||
|  | @ -314,6 +315,9 @@ class TestPytestPluginManagerBootstrapming(object): | |||
|         # Handles -p without following arg (when used without argparse). | ||||
|         pytestpm.consider_preparse(["-p"]) | ||||
| 
 | ||||
|         with pytest.raises(UsageError, match="^plugin main cannot be disabled$"): | ||||
|             pytestpm.consider_preparse(["-p", "no:main"]) | ||||
| 
 | ||||
|     def test_plugin_prevent_register(self, pytestpm): | ||||
|         pytestpm.consider_preparse(["xyz", "-p", "no:abc"]) | ||||
|         l1 = pytestpm.get_plugins() | ||||
|  |  | |||
|  | @ -4,6 +4,7 @@ from __future__ import division | |||
| from __future__ import print_function | ||||
| 
 | ||||
| import os | ||||
| import subprocess | ||||
| import sys | ||||
| import time | ||||
| 
 | ||||
|  | @ -482,3 +483,79 @@ def test_pytester_addopts(request, monkeypatch): | |||
|         testdir.finalize() | ||||
| 
 | ||||
|     assert os.environ["PYTEST_ADDOPTS"] == "--orig-unused" | ||||
| 
 | ||||
| 
 | ||||
| def test_run_stdin(testdir): | ||||
|     with pytest.raises(testdir.TimeoutExpired): | ||||
|         testdir.run( | ||||
|             sys.executable, | ||||
|             "-c", | ||||
|             "import sys, time; time.sleep(1); print(sys.stdin.read())", | ||||
|             stdin=subprocess.PIPE, | ||||
|             timeout=0.1, | ||||
|         ) | ||||
| 
 | ||||
|     with pytest.raises(testdir.TimeoutExpired): | ||||
|         result = testdir.run( | ||||
|             sys.executable, | ||||
|             "-c", | ||||
|             "import sys, time; time.sleep(1); print(sys.stdin.read())", | ||||
|             stdin=b"input\n2ndline", | ||||
|             timeout=0.1, | ||||
|         ) | ||||
| 
 | ||||
|     result = testdir.run( | ||||
|         sys.executable, | ||||
|         "-c", | ||||
|         "import sys; print(sys.stdin.read())", | ||||
|         stdin=b"input\n2ndline", | ||||
|     ) | ||||
|     assert result.stdout.lines == ["input", "2ndline"] | ||||
|     assert result.stderr.str() == "" | ||||
|     assert result.ret == 0 | ||||
| 
 | ||||
| 
 | ||||
| def test_popen_stdin_pipe(testdir): | ||||
|     proc = testdir.popen( | ||||
|         [sys.executable, "-c", "import sys; print(sys.stdin.read())"], | ||||
|         stdout=subprocess.PIPE, | ||||
|         stderr=subprocess.PIPE, | ||||
|         stdin=subprocess.PIPE, | ||||
|     ) | ||||
|     stdin = b"input\n2ndline" | ||||
|     stdout, stderr = proc.communicate(input=stdin) | ||||
|     assert stdout.decode("utf8").splitlines() == ["input", "2ndline"] | ||||
|     assert stderr == b"" | ||||
|     assert proc.returncode == 0 | ||||
| 
 | ||||
| 
 | ||||
| def test_popen_stdin_bytes(testdir): | ||||
|     proc = testdir.popen( | ||||
|         [sys.executable, "-c", "import sys; print(sys.stdin.read())"], | ||||
|         stdout=subprocess.PIPE, | ||||
|         stderr=subprocess.PIPE, | ||||
|         stdin=b"input\n2ndline", | ||||
|     ) | ||||
|     stdout, stderr = proc.communicate() | ||||
|     assert stdout.decode("utf8").splitlines() == ["input", "2ndline"] | ||||
|     assert stderr == b"" | ||||
|     assert proc.returncode == 0 | ||||
| 
 | ||||
| 
 | ||||
| def test_popen_default_stdin_stderr_and_stdin_None(testdir): | ||||
|     # stdout, stderr default to pipes, | ||||
|     # stdin can be None to not close the pipe, avoiding | ||||
|     # "ValueError: flush of closed file" with `communicate()`. | ||||
|     p1 = testdir.makepyfile( | ||||
|         """ | ||||
|         import sys | ||||
|         print(sys.stdin.read())  # empty | ||||
|         print('stdout') | ||||
|         sys.stderr.write('stderr') | ||||
|         """ | ||||
|     ) | ||||
|     proc = testdir.popen([sys.executable, str(p1)], stdin=None) | ||||
|     stdout, stderr = proc.communicate(b"ignored") | ||||
|     assert stdout.splitlines() == [b"", b"stdout"] | ||||
|     assert stderr.splitlines() == [b"stderr"] | ||||
|     assert proc.returncode == 0 | ||||
|  |  | |||
|  | @ -47,8 +47,8 @@ class TestWarningsRecorderChecker(object): | |||
|             assert values is rec.list | ||||
|             pytest.raises(AssertionError, rec.pop) | ||||
| 
 | ||||
|     @pytest.mark.issue(4243) | ||||
|     def test_warn_stacklevel(self): | ||||
|         """#4243""" | ||||
|         rec = WarningsRecorder() | ||||
|         with rec: | ||||
|             warnings.warn("test", DeprecationWarning, 2) | ||||
|  |  | |||
|  | @ -580,8 +580,31 @@ def test_pytest_exit_returncode(testdir): | |||
|     """ | ||||
|     ) | ||||
|     result = testdir.runpytest() | ||||
|     result.stdout.fnmatch_lines(["*! *Exit: some exit msg !*"]) | ||||
|     # Assert no output on stderr, except for unreliable ResourceWarnings. | ||||
|     # (https://github.com/pytest-dev/pytest/issues/5088) | ||||
|     assert [ | ||||
|         x | ||||
|         for x in result.stderr.lines | ||||
|         if not x.startswith("Exception ignored in:") | ||||
|         and not x.startswith("ResourceWarning") | ||||
|     ] == [""] | ||||
|     assert result.ret == 99 | ||||
| 
 | ||||
|     # It prints to stderr also in case of exit during pytest_sessionstart. | ||||
|     testdir.makeconftest( | ||||
|         """ | ||||
|         import pytest | ||||
| 
 | ||||
|         def pytest_sessionstart(): | ||||
|             pytest.exit("during_sessionstart", 98) | ||||
|         """ | ||||
|     ) | ||||
|     result = testdir.runpytest() | ||||
|     result.stdout.fnmatch_lines(["*! *Exit: during_sessionstart !*"]) | ||||
|     assert result.stderr.lines == ["Exit: during_sessionstart", ""] | ||||
|     assert result.ret == 98 | ||||
| 
 | ||||
| 
 | ||||
| def test_pytest_fail_notrace_runtest(testdir): | ||||
|     """Test pytest.fail(..., pytrace=False) does not show tracebacks during test run.""" | ||||
|  |  | |||
|  | @ -1,3 +1,4 @@ | |||
| # coding=utf8 | ||||
| from __future__ import absolute_import | ||||
| from __future__ import division | ||||
| from __future__ import print_function | ||||
|  | @ -6,7 +7,6 @@ import sys | |||
| 
 | ||||
| import pytest | ||||
| from _pytest.runner import runtestprotocol | ||||
| from _pytest.skipping import folded_skips | ||||
| from _pytest.skipping import MarkEvaluator | ||||
| from _pytest.skipping import pytest_runtest_setup | ||||
| 
 | ||||
|  | @ -749,40 +749,6 @@ def test_skipif_class(testdir): | |||
|     result.stdout.fnmatch_lines(["*2 skipped*"]) | ||||
| 
 | ||||
| 
 | ||||
| def test_skip_reasons_folding(): | ||||
|     path = "xyz" | ||||
|     lineno = 3 | ||||
|     message = "justso" | ||||
|     longrepr = (path, lineno, message) | ||||
| 
 | ||||
|     class X(object): | ||||
|         pass | ||||
| 
 | ||||
|     ev1 = X() | ||||
|     ev1.when = "execute" | ||||
|     ev1.skipped = True | ||||
|     ev1.longrepr = longrepr | ||||
| 
 | ||||
|     ev2 = X() | ||||
|     ev2.when = "execute" | ||||
|     ev2.longrepr = longrepr | ||||
|     ev2.skipped = True | ||||
| 
 | ||||
|     # ev3 might be a collection report | ||||
|     ev3 = X() | ||||
|     ev3.when = "collect" | ||||
|     ev3.longrepr = longrepr | ||||
|     ev3.skipped = True | ||||
| 
 | ||||
|     values = folded_skips([ev1, ev2, ev3]) | ||||
|     assert len(values) == 1 | ||||
|     num, fspath, lineno, reason = values[0] | ||||
|     assert num == 3 | ||||
|     assert fspath == path | ||||
|     assert lineno == lineno | ||||
|     assert reason == message | ||||
| 
 | ||||
| 
 | ||||
| def test_skipped_reasons_functional(testdir): | ||||
|     testdir.makepyfile( | ||||
|         test_one=""" | ||||
|  | @ -1208,6 +1174,6 @@ def test_summary_list_after_errors(testdir): | |||
|         [ | ||||
|             "=* FAILURES *=", | ||||
|             "*= short test summary info =*", | ||||
|             "FAILED test_summary_list_after_errors.py::test_fail", | ||||
|             "FAILED test_summary_list_after_errors.py::test_fail - assert 0", | ||||
|         ] | ||||
|     ) | ||||
|  |  | |||
|  | @ -76,7 +76,7 @@ def broken_testdir(testdir): | |||
| 
 | ||||
| 
 | ||||
| def test_run_without_stepwise(stepwise_testdir): | ||||
|     result = stepwise_testdir.runpytest("-v", "--strict", "--fail") | ||||
|     result = stepwise_testdir.runpytest("-v", "--strict-markers", "--fail") | ||||
| 
 | ||||
|     result.stdout.fnmatch_lines(["*test_success_before_fail PASSED*"]) | ||||
|     result.stdout.fnmatch_lines(["*test_fail_on_flag FAILED*"]) | ||||
|  | @ -85,7 +85,9 @@ def test_run_without_stepwise(stepwise_testdir): | |||
| 
 | ||||
| def test_fail_and_continue_with_stepwise(stepwise_testdir): | ||||
|     # Run the tests with a failing second test. | ||||
|     result = stepwise_testdir.runpytest("-v", "--strict", "--stepwise", "--fail") | ||||
|     result = stepwise_testdir.runpytest( | ||||
|         "-v", "--strict-markers", "--stepwise", "--fail" | ||||
|     ) | ||||
|     assert not result.stderr.str() | ||||
| 
 | ||||
|     stdout = result.stdout.str() | ||||
|  | @ -95,7 +97,7 @@ def test_fail_and_continue_with_stepwise(stepwise_testdir): | |||
|     assert "test_success_after_fail" not in stdout | ||||
| 
 | ||||
|     # "Fix" the test that failed in the last run and run it again. | ||||
|     result = stepwise_testdir.runpytest("-v", "--strict", "--stepwise") | ||||
|     result = stepwise_testdir.runpytest("-v", "--strict-markers", "--stepwise") | ||||
|     assert not result.stderr.str() | ||||
| 
 | ||||
|     stdout = result.stdout.str() | ||||
|  | @ -107,7 +109,12 @@ def test_fail_and_continue_with_stepwise(stepwise_testdir): | |||
| 
 | ||||
| def test_run_with_skip_option(stepwise_testdir): | ||||
|     result = stepwise_testdir.runpytest( | ||||
|         "-v", "--strict", "--stepwise", "--stepwise-skip", "--fail", "--fail-last" | ||||
|         "-v", | ||||
|         "--strict-markers", | ||||
|         "--stepwise", | ||||
|         "--stepwise-skip", | ||||
|         "--fail", | ||||
|         "--fail-last", | ||||
|     ) | ||||
|     assert not result.stderr.str() | ||||
| 
 | ||||
|  | @ -120,7 +127,7 @@ def test_run_with_skip_option(stepwise_testdir): | |||
| 
 | ||||
| 
 | ||||
| def test_fail_on_errors(error_testdir): | ||||
|     result = error_testdir.runpytest("-v", "--strict", "--stepwise") | ||||
|     result = error_testdir.runpytest("-v", "--strict-markers", "--stepwise") | ||||
| 
 | ||||
|     assert not result.stderr.str() | ||||
|     stdout = result.stdout.str() | ||||
|  | @ -131,7 +138,7 @@ def test_fail_on_errors(error_testdir): | |||
| 
 | ||||
| def test_change_testfile(stepwise_testdir): | ||||
|     result = stepwise_testdir.runpytest( | ||||
|         "-v", "--strict", "--stepwise", "--fail", "test_a.py" | ||||
|         "-v", "--strict-markers", "--stepwise", "--fail", "test_a.py" | ||||
|     ) | ||||
|     assert not result.stderr.str() | ||||
| 
 | ||||
|  | @ -140,7 +147,9 @@ def test_change_testfile(stepwise_testdir): | |||
| 
 | ||||
|     # Make sure the second test run starts from the beginning, since the | ||||
|     # test to continue from does not exist in testfile_b. | ||||
|     result = stepwise_testdir.runpytest("-v", "--strict", "--stepwise", "test_b.py") | ||||
|     result = stepwise_testdir.runpytest( | ||||
|         "-v", "--strict-markers", "--stepwise", "test_b.py" | ||||
|     ) | ||||
|     assert not result.stderr.str() | ||||
| 
 | ||||
|     stdout = result.stdout.str() | ||||
|  | @ -149,7 +158,11 @@ def test_change_testfile(stepwise_testdir): | |||
| 
 | ||||
| def test_stop_on_collection_errors(broken_testdir): | ||||
|     result = broken_testdir.runpytest( | ||||
|         "-v", "--strict", "--stepwise", "working_testfile.py", "broken_testfile.py" | ||||
|         "-v", | ||||
|         "--strict-markers", | ||||
|         "--stepwise", | ||||
|         "working_testfile.py", | ||||
|         "broken_testfile.py", | ||||
|     ) | ||||
| 
 | ||||
|     stdout = result.stdout.str() | ||||
|  |  | |||
|  | @ -1,3 +1,4 @@ | |||
| # encoding: utf-8 | ||||
| """ | ||||
| terminal reporting of the full testing process. | ||||
| """ | ||||
|  | @ -16,6 +17,8 @@ import py | |||
| import pytest | ||||
| from _pytest.main import EXIT_NOTESTSCOLLECTED | ||||
| from _pytest.reports import BaseReport | ||||
| from _pytest.terminal import _folded_skips | ||||
| from _pytest.terminal import _get_line_with_reprcrash_message | ||||
| from _pytest.terminal import _plugin_nameversions | ||||
| from _pytest.terminal import build_summary_stats_line | ||||
| from _pytest.terminal import getreportopt | ||||
|  | @ -505,6 +508,37 @@ class TestTerminalFunctional(object): | |||
|         ) | ||||
|         assert result.ret == 0 | ||||
| 
 | ||||
|     def test_deselected_with_hookwrapper(self, testdir): | ||||
|         testpath = testdir.makeconftest( | ||||
|             """ | ||||
|             import pytest | ||||
| 
 | ||||
|             @pytest.hookimpl(hookwrapper=True) | ||||
|             def pytest_collection_modifyitems(config, items): | ||||
|                 yield | ||||
|                 deselected = items.pop() | ||||
|                 config.hook.pytest_deselected(items=[deselected]) | ||||
|             """ | ||||
|         ) | ||||
|         testpath = testdir.makepyfile( | ||||
|             """ | ||||
|                 def test_one(): | ||||
|                     pass | ||||
|                 def test_two(): | ||||
|                     pass | ||||
|                 def test_three(): | ||||
|                     pass | ||||
|            """ | ||||
|         ) | ||||
|         result = testdir.runpytest(testpath) | ||||
|         result.stdout.fnmatch_lines( | ||||
|             [ | ||||
|                 "collected 3 items / 1 deselected / 2 selected", | ||||
|                 "*= 2 passed, 1 deselected in*", | ||||
|             ] | ||||
|         ) | ||||
|         assert result.ret == 0 | ||||
| 
 | ||||
|     def test_show_deselected_items_using_markexpr_before_test_execution(self, testdir): | ||||
|         testdir.makepyfile( | ||||
|             test_show_deselected=""" | ||||
|  | @ -726,12 +760,18 @@ class TestTerminalFunctional(object): | |||
|         result.stdout.fnmatch_lines(["collected 3 items", "hello from hook: 3 items"]) | ||||
| 
 | ||||
| 
 | ||||
| def test_fail_extra_reporting(testdir): | ||||
|     testdir.makepyfile("def test_this(): assert 0") | ||||
| def test_fail_extra_reporting(testdir, monkeypatch): | ||||
|     monkeypatch.setenv("COLUMNS", "80") | ||||
|     testdir.makepyfile("def test_this(): assert 0, 'this_failed' * 100") | ||||
|     result = testdir.runpytest() | ||||
|     assert "short test summary" not in result.stdout.str() | ||||
|     result = testdir.runpytest("-rf") | ||||
|     result.stdout.fnmatch_lines(["*test summary*", "FAIL*test_fail_extra_reporting*"]) | ||||
|     result.stdout.fnmatch_lines( | ||||
|         [ | ||||
|             "*test summary*", | ||||
|             "FAILED test_fail_extra_reporting.py::test_this - AssertionError: this_failedt...", | ||||
|         ] | ||||
|     ) | ||||
| 
 | ||||
| 
 | ||||
| def test_fail_reporting_on_pass(testdir): | ||||
|  | @ -768,11 +808,19 @@ def test_pass_output_reporting(testdir): | |||
|     assert "test_pass_has_output" not in s | ||||
|     assert "Four score and seven years ago..." not in s | ||||
|     assert "test_pass_no_output" not in s | ||||
|     result = testdir.runpytest("-rP") | ||||
|     result = testdir.runpytest("-rPp") | ||||
|     result.stdout.fnmatch_lines( | ||||
|         ["*test_pass_has_output*", "Four score and seven years ago..."] | ||||
|         [ | ||||
|             "*= PASSES =*", | ||||
|             "*_ test_pass_has_output _*", | ||||
|             "*- Captured stdout call -*", | ||||
|             "Four score and seven years ago...", | ||||
|             "*= short test summary info =*", | ||||
|             "PASSED test_pass_output_reporting.py::test_pass_has_output", | ||||
|             "PASSED test_pass_output_reporting.py::test_pass_no_output", | ||||
|             "*= 2 passed in *", | ||||
|         ] | ||||
|     ) | ||||
|     assert "test_pass_no_output" not in result.stdout.str() | ||||
| 
 | ||||
| 
 | ||||
| def test_color_yes(testdir): | ||||
|  | @ -830,14 +878,23 @@ def test_getreportopt(): | |||
|     config.option.reportchars = "sfxw" | ||||
|     assert getreportopt(config) == "sfx" | ||||
| 
 | ||||
|     config.option.reportchars = "sfx" | ||||
|     # Now with --disable-warnings. | ||||
|     config.option.disable_warnings = False | ||||
|     config.option.reportchars = "a" | ||||
|     assert getreportopt(config) == "sxXwEf"  # NOTE: "w" included! | ||||
| 
 | ||||
|     config.option.reportchars = "sfx" | ||||
|     assert getreportopt(config) == "sfxw" | ||||
| 
 | ||||
|     config.option.reportchars = "sfxw" | ||||
|     config.option.disable_warnings = False | ||||
|     assert getreportopt(config) == "sfxw" | ||||
| 
 | ||||
|     config.option.reportchars = "a" | ||||
|     assert getreportopt(config) == "sxXwEf"  # NOTE: "w" included! | ||||
| 
 | ||||
|     config.option.reportchars = "A" | ||||
|     assert getreportopt(config) == "sxXwEfpP" | ||||
| 
 | ||||
| 
 | ||||
| def test_terminalreporter_reportopt_addopts(testdir): | ||||
|     testdir.makeini("[pytest]\naddopts=-rs") | ||||
|  | @ -1524,3 +1581,106 @@ class TestProgressWithTeardown(object): | |||
|         monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False) | ||||
|         output = testdir.runpytest("-n2") | ||||
|         output.stdout.re_match_lines([r"[\.E]{40} \s+ \[100%\]"]) | ||||
| 
 | ||||
| 
 | ||||
| def test_skip_reasons_folding(): | ||||
|     path = "xyz" | ||||
|     lineno = 3 | ||||
|     message = "justso" | ||||
|     longrepr = (path, lineno, message) | ||||
| 
 | ||||
|     class X(object): | ||||
|         pass | ||||
| 
 | ||||
|     ev1 = X() | ||||
|     ev1.when = "execute" | ||||
|     ev1.skipped = True | ||||
|     ev1.longrepr = longrepr | ||||
| 
 | ||||
|     ev2 = X() | ||||
|     ev2.when = "execute" | ||||
|     ev2.longrepr = longrepr | ||||
|     ev2.skipped = True | ||||
| 
 | ||||
|     # ev3 might be a collection report | ||||
|     ev3 = X() | ||||
|     ev3.when = "collect" | ||||
|     ev3.longrepr = longrepr | ||||
|     ev3.skipped = True | ||||
| 
 | ||||
|     values = _folded_skips([ev1, ev2, ev3]) | ||||
|     assert len(values) == 1 | ||||
|     num, fspath, lineno, reason = values[0] | ||||
|     assert num == 3 | ||||
|     assert fspath == path | ||||
|     assert lineno == lineno | ||||
|     assert reason == message | ||||
| 
 | ||||
| 
 | ||||
| def test_line_with_reprcrash(monkeypatch): | ||||
|     import _pytest.terminal | ||||
|     from wcwidth import wcswidth | ||||
| 
 | ||||
|     mocked_verbose_word = "FAILED" | ||||
| 
 | ||||
|     mocked_pos = "some::nodeid" | ||||
| 
 | ||||
|     def mock_get_pos(*args): | ||||
|         return mocked_pos | ||||
| 
 | ||||
|     monkeypatch.setattr(_pytest.terminal, "_get_pos", mock_get_pos) | ||||
| 
 | ||||
|     class config(object): | ||||
|         pass | ||||
| 
 | ||||
|     class rep(object): | ||||
|         def _get_verbose_word(self, *args): | ||||
|             return mocked_verbose_word | ||||
| 
 | ||||
|         class longrepr: | ||||
|             class reprcrash: | ||||
|                 pass | ||||
| 
 | ||||
|     def check(msg, width, expected): | ||||
|         __tracebackhide__ = True | ||||
|         if msg: | ||||
|             rep.longrepr.reprcrash.message = msg | ||||
|         actual = _get_line_with_reprcrash_message(config, rep(), width) | ||||
| 
 | ||||
|         assert actual == expected | ||||
|         if actual != "%s %s" % (mocked_verbose_word, mocked_pos): | ||||
|             assert len(actual) <= width | ||||
|             assert wcswidth(actual) <= width | ||||
| 
 | ||||
|     # AttributeError with message | ||||
|     check(None, 80, "FAILED some::nodeid") | ||||
| 
 | ||||
|     check("msg", 80, "FAILED some::nodeid - msg") | ||||
|     check("msg", 3, "FAILED some::nodeid") | ||||
| 
 | ||||
|     check("msg", 24, "FAILED some::nodeid") | ||||
|     check("msg", 25, "FAILED some::nodeid - msg") | ||||
| 
 | ||||
|     check("some longer msg", 24, "FAILED some::nodeid") | ||||
|     check("some longer msg", 25, "FAILED some::nodeid - ...") | ||||
|     check("some longer msg", 26, "FAILED some::nodeid - s...") | ||||
| 
 | ||||
|     check("some\nmessage", 25, "FAILED some::nodeid - ...") | ||||
|     check("some\nmessage", 26, "FAILED some::nodeid - some") | ||||
|     check("some\nmessage", 80, "FAILED some::nodeid - some") | ||||
| 
 | ||||
|     # Test unicode safety. | ||||
|     check(u"😄😄😄😄😄\n2nd line", 25, u"FAILED some::nodeid - ...") | ||||
|     check(u"😄😄😄😄😄\n2nd line", 26, u"FAILED some::nodeid - ...") | ||||
|     check(u"😄😄😄😄😄\n2nd line", 27, u"FAILED some::nodeid - 😄...") | ||||
|     check(u"😄😄😄😄😄\n2nd line", 28, u"FAILED some::nodeid - 😄...") | ||||
|     check(u"😄😄😄😄😄\n2nd line", 29, u"FAILED some::nodeid - 😄😄...") | ||||
| 
 | ||||
|     # NOTE: constructed, not sure if this is supported. | ||||
|     # It would fail if not using u"" in Python 2 for mocked_pos. | ||||
|     mocked_pos = u"nodeid::😄::withunicode" | ||||
|     check(u"😄😄😄😄😄\n2nd line", 29, u"FAILED nodeid::😄::withunicode") | ||||
|     check(u"😄😄😄😄😄\n2nd line", 40, u"FAILED nodeid::😄::withunicode - 😄😄...") | ||||
|     check(u"😄😄😄😄😄\n2nd line", 41, u"FAILED nodeid::😄::withunicode - 😄😄...") | ||||
|     check(u"😄😄😄😄😄\n2nd line", 42, u"FAILED nodeid::😄::withunicode - 😄😄😄...") | ||||
|     check(u"😄😄😄😄😄\n2nd line", 80, u"FAILED nodeid::😄::withunicode - 😄😄😄😄😄") | ||||
|  |  | |||
|  | @ -58,8 +58,8 @@ class TestTempdirHandler(object): | |||
|         assert tmp2.relto(t.getbasetemp()).startswith("this") | ||||
|         assert tmp2 != tmp | ||||
| 
 | ||||
|     @pytest.mark.issue(4425) | ||||
|     def test_tmppath_relative_basetemp_absolute(self, tmp_path, monkeypatch): | ||||
|         """#4425""" | ||||
|         from _pytest.tmpdir import TempPathFactory | ||||
| 
 | ||||
|         monkeypatch.chdir(tmp_path) | ||||
|  |  | |||
|  | @ -930,11 +930,11 @@ def test_class_method_containing_test_issue1558(testdir): | |||
|     reprec.assertoutcome(passed=1) | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.issue(3498) | ||||
| @pytest.mark.parametrize( | ||||
|     "base", ["six.moves.builtins.object", "unittest.TestCase", "unittest2.TestCase"] | ||||
| ) | ||||
| def test_usefixtures_marker_on_unittest(base, testdir): | ||||
|     """#3498""" | ||||
|     module = base.rsplit(".", 1)[0] | ||||
|     pytest.importorskip(module) | ||||
|     testdir.makepyfile( | ||||
|  |  | |||
|  | @ -302,7 +302,7 @@ def test_filterwarnings_mark_registration(testdir): | |||
|             pass | ||||
|     """ | ||||
|     ) | ||||
|     result = testdir.runpytest("--strict") | ||||
|     result = testdir.runpytest("--strict-markers") | ||||
|     assert result.ret == 0 | ||||
| 
 | ||||
| 
 | ||||
|  | @ -630,7 +630,7 @@ def test_removed_in_pytest4_warning_as_error(testdir, change_default): | |||
| class TestAssertionWarnings: | ||||
|     @staticmethod | ||||
|     def assert_result_warns(result, msg): | ||||
|         result.stdout.fnmatch_lines(["*PytestWarning: %s*" % msg]) | ||||
|         result.stdout.fnmatch_lines(["*PytestAssertRewriteWarning: %s*" % msg]) | ||||
| 
 | ||||
|     def test_tuple_warning(self, testdir): | ||||
|         testdir.makepyfile( | ||||
|  |  | |||
							
								
								
									
										11
									
								
								tox.ini
								
								
								
								
							
							
						
						
									
										11
									
								
								tox.ini
								
								
								
								
							|  | @ -139,7 +139,7 @@ commands = python scripts/release.py {posargs} | |||
| 
 | ||||
| [pytest] | ||||
| minversion = 2.0 | ||||
| addopts = -ra -p pytester | ||||
| addopts = -ra -p pytester --strict-markers | ||||
| rsyncdirs = tox.ini doc src testing | ||||
| python_files = test_*.py *_test.py testing/*/*.py | ||||
| python_classes = Test Acceptance | ||||
|  | @ -166,7 +166,16 @@ filterwarnings = | |||
|     ignore::pytest.PytestExperimentalApiWarning | ||||
|     # Do not cause SyntaxError for invalid escape sequences in py37. | ||||
|     default:invalid escape sequence:DeprecationWarning | ||||
|     # ignore use of unregistered marks, because we use many to test the implementation | ||||
|     ignore::_pytest.warning_types.PytestUnknownMarkWarning | ||||
| pytester_example_dir = testing/example_scripts | ||||
| markers = | ||||
|     # dummy markers for testing | ||||
|     foo | ||||
|     bar | ||||
|     baz | ||||
|     # conftest.py reorders tests moving slow ones to the end of the list | ||||
|     slow | ||||
| 
 | ||||
| [flake8] | ||||
| max-line-length = 120 | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue