diff --git a/CHANGELOG b/CHANGELOG index adc1b95df..0ed06fc2f 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,7 +1,8 @@ Changes between 2.0.1 and 2.0.2 ---------------------------------------------- -- tackle issue32 - half the overhead for running test functions +- tackle issue32 - speed up test runs of very quick test functions + by reducing the relative overhead - fix issue30 - extended xfail/skipif handling and improved reporting. If you have a syntax error in your skip/xfail @@ -9,7 +10,8 @@ Changes between 2.0.1 and 2.0.2 Also you can now access module globals from xfail/skipif expressions so that this for example works now:: - + + import pytest import mymodule @pytest.mark.skipif("mymodule.__version__[0] == "1") def test_function(): diff --git a/doc/announce/index.txt b/doc/announce/index.txt index f4ddc19ef..82e58e467 100644 --- a/doc/announce/index.txt +++ b/doc/announce/index.txt @@ -5,6 +5,7 @@ Release announcements .. toctree:: :maxdepth: 2 + release-2.0.2 release-2.0.1 release-2.0.0 diff --git a/doc/announce/release-2.0.2.txt b/doc/announce/release-2.0.2.txt new file mode 100644 index 000000000..faf56347f --- /dev/null +++ b/doc/announce/release-2.0.2.txt @@ -0,0 +1,73 @@ +py.test 2.0.2: bug fixes, improved xfail/skip expressions, speedups +=========================================================================== + +Welcome to pytest-2.0.2, a maintenance and bug fix release of pytest, +a mature testing tool for Python, supporting CPython 2.4-3.2, Jython +and latest PyPy interpreters. See the extensive docs with tested examples here: + + http://pytest.org/ + +If you want to install or upgrade pytest, just type one of:: + + pip install -U pytest # or + easy_install -U pytest + +Many thanks to all issue reporters and people asking questions +or complaining, particularly Jurko for his insistence, +Laura, Victor and Brianna for helping with improving +and Ronny for his general advise. + +best, +holger krekel + +Changes between 2.0.1 and 2.0.2 +---------------------------------------------- + +- tackle issue32 - speed up test runs of very quick test functions + by reducing the relative overhead + +- fix issue30 - extended xfail/skipif handling and improved reporting. + If you have a syntax error in your skip/xfail + expressions you now get nice error reports. + + Also you can now access module globals from xfail/skipif + expressions so that this for example works now:: + + import pytest + import mymodule + @pytest.mark.skipif("mymodule.__version__[0] == "1") + def test_function(): + pass + + This will not run the test function if the module's version string + does not start with a "1". Note that specifying a string instead + of a boolean expressions allows py.test to report meaningful information + when summarizing a test run as to what conditions lead to skipping + (or xfail-ing) tests. + +- fix issue28 - setup_method and pytest_generate_tests work together + The setup_method fixture method now gets called also for + test function invocations generated from the pytest_generate_tests + hook. + +- fix issue27 - collectonly and keyword-selection (-k) now work together + Also, if you do "py.test --collectonly -q" you now get a flat list + of test ids that you can use to paste to the py.test commandline + in order to execute a particular test. + +- fix issue25 avoid reported problems with --pdb and python3.2/encodings output + +- fix issue23 - tmpdir argument now works on Python3.2 and WindowsXP + Starting with Python3.2 os.symlink may be supported. By requiring + a newer py lib version the py.path.local() implementation acknowledges + this. + +- fixed typos in the docs (thanks Victor Garcia, Brianna Laugher) and particular + thanks to Laura Creighton who also revieved parts of the documentation. + +- fix slighly wrong output of verbose progress reporting for classes + (thanks Amaury) + +- more precise (avoiding of) deprecation warnings for node.Class|Function accesses + +- avoid std unittest assertion helper code in tracebacks (thanks Ronny) diff --git a/doc/assert.txt b/doc/assert.txt index f60e0684a..e1a991b88 100644 --- a/doc/assert.txt +++ b/doc/assert.txt @@ -23,7 +23,7 @@ assertion fails you will see the value of ``x``:: $ py.test test_assert1.py =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 1 items test_assert1.py F @@ -37,7 +37,7 @@ assertion fails you will see the value of ``x``:: E + where 3 = f() test_assert1.py:5: AssertionError - ========================= 1 failed in 0.03 seconds ========================= + ========================= 1 failed in 0.07 seconds ========================= Reporting details about the failing assertion is achieved by re-evaluating the assert expression and recording the intermediate values. @@ -108,7 +108,7 @@ if you run this module:: $ py.test test_assert2.py =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 1 items test_assert2.py F diff --git a/doc/capture.txt b/doc/capture.txt index 1434e6f9a..74f72eb1c 100644 --- a/doc/capture.txt +++ b/doc/capture.txt @@ -64,7 +64,7 @@ of the failing function and hide the other one:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 2 items test_module.py .F @@ -78,7 +78,7 @@ of the failing function and hide the other one:: test_module.py:9: AssertionError ----------------------------- Captured stdout ------------------------------ - setting up + setting up ==================== 1 failed, 1 passed in 0.02 seconds ==================== Accessing captured output from a test function diff --git a/doc/doctest.txt b/doc/doctest.txt index 3db3430d7..84c2bcbd5 100644 --- a/doc/doctest.txt +++ b/doc/doctest.txt @@ -44,9 +44,9 @@ then you can just invoke ``py.test`` without command line options:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 1 items mymodule.py . - ========================= 1 passed in 0.02 seconds ========================= + ========================= 1 passed in 0.01 seconds ========================= diff --git a/doc/example/mysetup.txt b/doc/example/mysetup.txt index 460d4018c..fff91748b 100644 --- a/doc/example/mysetup.txt +++ b/doc/example/mysetup.txt @@ -49,7 +49,7 @@ You can now run the test:: $ py.test test_sample.py =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 1 items test_sample.py F @@ -57,7 +57,7 @@ You can now run the test:: ================================= FAILURES ================================= _______________________________ test_answer ________________________________ - mysetup = + mysetup = def test_answer(mysetup): app = mysetup.myapp() @@ -122,12 +122,12 @@ Running it yields:: $ py.test test_ssh.py -rs =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 1 items test_ssh.py s ========================= short test summary info ========================== - SKIP [1] /tmp/doc-exec-35/conftest.py:22: specify ssh host with --ssh + SKIP [1] /tmp/doc-exec-36/conftest.py:22: specify ssh host with --ssh ======================== 1 skipped in 0.02 seconds ========================= diff --git a/doc/example/nonpython.txt b/doc/example/nonpython.txt index ae304bb8a..9692400df 100644 --- a/doc/example/nonpython.txt +++ b/doc/example/nonpython.txt @@ -27,7 +27,7 @@ now execute the test specification:: nonpython $ py.test test_simple.yml =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 2 items test_simple.yml .F @@ -37,7 +37,7 @@ now execute the test specification:: usecase execution failed spec failed: 'some': 'other' no further details known at this point. - ==================== 1 failed, 1 passed in 0.36 seconds ==================== + ==================== 1 failed, 1 passed in 0.51 seconds ==================== You get one dot for the passing ``sub1: sub1`` check and one failure. Obviously in the above ``conftest.py`` you'll want to implement a more @@ -56,7 +56,7 @@ reporting in ``verbose`` mode:: nonpython $ py.test -v =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 -- /home/hpk/venv/0/bin/python + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 -- /home/hpk/venv/0/bin/python collecting ... collected 2 items test_simple.yml:1: usecase: ok PASSED @@ -67,12 +67,17 @@ reporting in ``verbose`` mode:: usecase execution failed spec failed: 'some': 'other' no further details known at this point. - ==================== 1 failed, 1 passed in 0.08 seconds ==================== + ==================== 1 failed, 1 passed in 0.06 seconds ==================== While developing your custom test collection and execution it's also interesting to just look at the collection tree:: nonpython $ py.test --collectonly + =========================== test session starts ============================ + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 + collecting ... collected 2 items + + ============================= in 0.06 seconds ============================= diff --git a/doc/example/parametrize.txt b/doc/example/parametrize.txt index f28996bbd..e396b728b 100644 --- a/doc/example/parametrize.txt +++ b/doc/example/parametrize.txt @@ -62,7 +62,7 @@ let's run the full monty:: E assert 4 < 4 test_compute.py:3: AssertionError - 1 failed, 4 passed in 0.03 seconds + 1 failed, 4 passed in 0.02 seconds As expected when running the full range of ``param1`` values we'll get an error on the last one. @@ -113,9 +113,14 @@ each test is actually run:: Let's first see how it looks like at collection time:: $ py.test test_backends.py --collectonly + =========================== test session starts ============================ + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 + collecting ... collected 2 items + + ============================= in 0.00 seconds ============================= And then when we run the test:: @@ -125,7 +130,7 @@ And then when we run the test:: ================================= FAILURES ================================= __________________________ test_db_initialized[1] __________________________ - db = + db = def test_db_initialized(db): # a dummy test @@ -179,7 +184,7 @@ the respective settings:: ================================= FAILURES ================================= __________________________ test_db_initialized[1] __________________________ - db = + db = def test_db_initialized(db): # a dummy test @@ -190,7 +195,7 @@ the respective settings:: test_backends.py:6: Failed _________________________ TestClass.test_equals[0] _________________________ - self = , a = 1, b = 2 + self = , a = 1, b = 2 def test_equals(self, a, b): > assert a == b @@ -199,14 +204,14 @@ the respective settings:: test_parametrize.py:17: AssertionError ______________________ TestClass.test_zerodivision[1] ______________________ - self = , a = 3, b = 2 + self = , a = 3, b = 2 def test_zerodivision(self, a, b): > pytest.raises(ZeroDivisionError, "a/b") E Failed: DID NOT RAISE test_parametrize.py:20: Failed - 3 failed, 3 passed in 0.04 seconds + 3 failed, 3 passed in 0.03 seconds Parametrizing test methods through a decorator -------------------------------------------------------------- @@ -247,7 +252,7 @@ Running it gives similar results as before:: ================================= FAILURES ================================= _________________________ TestClass.test_equals[0] _________________________ - self = , a = 1, b = 2 + self = , a = 1, b = 2 @params([dict(a=1, b=2), dict(a=3, b=3), ]) def test_equals(self, a, b): @@ -257,7 +262,7 @@ Running it gives similar results as before:: test_parametrize2.py:19: AssertionError ______________________ TestClass.test_zerodivision[1] ______________________ - self = , a = 3, b = 2 + self = , a = 3, b = 2 @params([dict(a=1, b=0), dict(a=3, b=2)]) def test_zerodivision(self, a, b): @@ -265,7 +270,7 @@ Running it gives similar results as before:: E Failed: DID NOT RAISE test_parametrize2.py:23: Failed - 2 failed, 2 passed in 0.03 seconds + 2 failed, 2 passed in 0.02 seconds checking serialization between Python interpreters -------------------------------------------------------------- @@ -286,4 +291,4 @@ Running it (with Python-2.4 through to Python2.7 installed):: . $ py.test -q multipython.py collecting ... collected 75 items ....s....s....s....ssssss....s....s....s....ssssss....s....s....s....ssssss - 48 passed, 27 skipped in 1.92 seconds + 48 passed, 27 skipped in 3.74 seconds diff --git a/doc/example/pythoncollection.txt b/doc/example/pythoncollection.txt index e5527662f..614d82310 100644 --- a/doc/example/pythoncollection.txt +++ b/doc/example/pythoncollection.txt @@ -42,11 +42,16 @@ in functions and classes. For example, if we have:: then the test collection looks like this:: $ py.test --collectonly + =========================== test session starts ============================ + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 + collecting ... collected 2 items + + ============================= in 0.01 seconds ============================= interpret cmdline arguments as Python packages ----------------------------------------------------- @@ -76,9 +81,14 @@ finding out what is collected You can always peek at the collection tree without running tests like this:: . $ py.test --collectonly pythoncollection.py + =========================== test session starts ============================ + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 + collecting ... collected 3 items + + ============================= in 0.06 seconds ============================= diff --git a/doc/example/reportingdemo.txt b/doc/example/reportingdemo.txt index ffdb34dcc..2a7876494 100644 --- a/doc/example/reportingdemo.txt +++ b/doc/example/reportingdemo.txt @@ -13,7 +13,7 @@ get on the terminal - we are working on that): assertion $ py.test failure_demo.py =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 39 items failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF @@ -30,7 +30,7 @@ get on the terminal - we are working on that): failure_demo.py:15: AssertionError _________________________ TestFailing.test_simple __________________________ - self = + self = def test_simple(self): def f(): @@ -40,13 +40,13 @@ get on the terminal - we are working on that): > assert f() == g() E assert 42 == 43 - E + where 42 = () - E + and 43 = () + E + where 42 = () + E + and 43 = () failure_demo.py:28: AssertionError ____________________ TestFailing.test_simple_multiline _____________________ - self = + self = def test_simple_multiline(self): otherfunc_multi( @@ -66,19 +66,19 @@ get on the terminal - we are working on that): failure_demo.py:12: AssertionError ___________________________ TestFailing.test_not ___________________________ - self = + self = def test_not(self): def f(): return 42 > assert not f() E assert not 42 - E + where 42 = () + E + where 42 = () failure_demo.py:38: AssertionError _________________ TestSpecialisedExplanations.test_eq_text _________________ - self = + self = def test_eq_text(self): > assert 'spam' == 'eggs' @@ -89,7 +89,7 @@ get on the terminal - we are working on that): failure_demo.py:42: AssertionError _____________ TestSpecialisedExplanations.test_eq_similar_text _____________ - self = + self = def test_eq_similar_text(self): > assert 'foo 1 bar' == 'foo 2 bar' @@ -102,7 +102,7 @@ get on the terminal - we are working on that): failure_demo.py:45: AssertionError ____________ TestSpecialisedExplanations.test_eq_multiline_text ____________ - self = + self = def test_eq_multiline_text(self): > assert 'foo\nspam\nbar' == 'foo\neggs\nbar' @@ -115,7 +115,7 @@ get on the terminal - we are working on that): failure_demo.py:48: AssertionError ______________ TestSpecialisedExplanations.test_eq_long_text _______________ - self = + self = def test_eq_long_text(self): a = '1'*100 + 'a' + '2'*100 @@ -132,7 +132,7 @@ get on the terminal - we are working on that): failure_demo.py:53: AssertionError _________ TestSpecialisedExplanations.test_eq_long_text_multiline __________ - self = + self = def test_eq_long_text_multiline(self): a = '1\n'*100 + 'a' + '2\n'*100 @@ -156,7 +156,7 @@ get on the terminal - we are working on that): failure_demo.py:58: AssertionError _________________ TestSpecialisedExplanations.test_eq_list _________________ - self = + self = def test_eq_list(self): > assert [0, 1, 2] == [0, 1, 3] @@ -166,7 +166,7 @@ get on the terminal - we are working on that): failure_demo.py:61: AssertionError ______________ TestSpecialisedExplanations.test_eq_list_long _______________ - self = + self = def test_eq_list_long(self): a = [0]*100 + [1] + [3]*100 @@ -178,7 +178,7 @@ get on the terminal - we are working on that): failure_demo.py:66: AssertionError _________________ TestSpecialisedExplanations.test_eq_dict _________________ - self = + self = def test_eq_dict(self): > assert {'a': 0, 'b': 1} == {'a': 0, 'b': 2} @@ -191,7 +191,7 @@ get on the terminal - we are working on that): failure_demo.py:69: AssertionError _________________ TestSpecialisedExplanations.test_eq_set __________________ - self = + self = def test_eq_set(self): > assert set([0, 10, 11, 12]) == set([0, 20, 21]) @@ -207,7 +207,7 @@ get on the terminal - we are working on that): failure_demo.py:72: AssertionError _____________ TestSpecialisedExplanations.test_eq_longer_list ______________ - self = + self = def test_eq_longer_list(self): > assert [1,2] == [1,2,3] @@ -217,7 +217,7 @@ get on the terminal - we are working on that): failure_demo.py:75: AssertionError _________________ TestSpecialisedExplanations.test_in_list _________________ - self = + self = def test_in_list(self): > assert 1 in [0, 2, 3, 4, 5] @@ -226,7 +226,7 @@ get on the terminal - we are working on that): failure_demo.py:78: AssertionError __________ TestSpecialisedExplanations.test_not_in_text_multiline __________ - self = + self = def test_not_in_text_multiline(self): text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail' @@ -244,7 +244,7 @@ get on the terminal - we are working on that): failure_demo.py:82: AssertionError ___________ TestSpecialisedExplanations.test_not_in_text_single ____________ - self = + self = def test_not_in_text_single(self): text = 'single foo line' @@ -257,7 +257,7 @@ get on the terminal - we are working on that): failure_demo.py:86: AssertionError _________ TestSpecialisedExplanations.test_not_in_text_single_long _________ - self = + self = def test_not_in_text_single_long(self): text = 'head ' * 50 + 'foo ' + 'tail ' * 20 @@ -270,7 +270,7 @@ get on the terminal - we are working on that): failure_demo.py:90: AssertionError ______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______ - self = + self = def test_not_in_text_single_long_term(self): text = 'head ' * 50 + 'f'*70 + 'tail ' * 20 @@ -289,7 +289,7 @@ get on the terminal - we are working on that): i = Foo() > assert i.b == 2 E assert 1 == 2 - E + where 1 = .b + E + where 1 = .b failure_demo.py:101: AssertionError _________________________ test_attribute_instance __________________________ @@ -299,8 +299,8 @@ get on the terminal - we are working on that): b = 1 > assert Foo().b == 2 E assert 1 == 2 - E + where 1 = .b - E + where = () + E + where 1 = .b + E + where = () failure_demo.py:107: AssertionError __________________________ test_attribute_failure __________________________ @@ -316,7 +316,7 @@ get on the terminal - we are working on that): failure_demo.py:116: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - self = + self = def _get_b(self): > raise Exception('Failed to get attrib') @@ -332,15 +332,15 @@ get on the terminal - we are working on that): b = 2 > assert Foo().b == Bar().b E assert 1 == 2 - E + where 1 = .b - E + where = () - E + and 2 = .b - E + where = () + E + where 1 = .b + E + where = () + E + and 2 = .b + E + where = () failure_demo.py:124: AssertionError __________________________ TestRaises.test_raises __________________________ - self = + self = def test_raises(self): s = 'qwe' @@ -352,10 +352,10 @@ get on the terminal - we are working on that): > int(s) E ValueError: invalid literal for int() with base 10: 'qwe' - <0-codegen /home/hpk/p/pytest/_pytest/python.py:825>:1: ValueError + <0-codegen /home/hpk/p/pytest/_pytest/python.py:837>:1: ValueError ______________________ TestRaises.test_raises_doesnt _______________________ - self = + self = def test_raises_doesnt(self): > raises(IOError, "int('3')") @@ -364,7 +364,7 @@ get on the terminal - we are working on that): failure_demo.py:136: Failed __________________________ TestRaises.test_raise ___________________________ - self = + self = def test_raise(self): > raise ValueError("demo error") @@ -373,7 +373,7 @@ get on the terminal - we are working on that): failure_demo.py:139: ValueError ________________________ TestRaises.test_tupleerror ________________________ - self = + self = def test_tupleerror(self): > a,b = [1] @@ -382,7 +382,7 @@ get on the terminal - we are working on that): failure_demo.py:142: ValueError ______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______ - self = + self = def test_reinterpret_fails_with_print_for_the_fun_of_it(self): l = [1,2,3] @@ -395,7 +395,7 @@ get on the terminal - we are working on that): l is [1, 2, 3] ________________________ TestRaises.test_some_error ________________________ - self = + self = def test_some_error(self): > if namenotexi: @@ -423,7 +423,7 @@ get on the terminal - we are working on that): <2-codegen 'abc-123' /home/hpk/p/pytest/doc/example/assertion/failure_demo.py:162>:2: AssertionError ____________________ TestMoreErrors.test_complex_error _____________________ - self = + self = def test_complex_error(self): def f(): @@ -452,7 +452,7 @@ get on the terminal - we are working on that): failure_demo.py:5: AssertionError ___________________ TestMoreErrors.test_z1_unpack_error ____________________ - self = + self = def test_z1_unpack_error(self): l = [] @@ -462,7 +462,7 @@ get on the terminal - we are working on that): failure_demo.py:179: ValueError ____________________ TestMoreErrors.test_z2_type_error _____________________ - self = + self = def test_z2_type_error(self): l = 3 @@ -472,20 +472,20 @@ get on the terminal - we are working on that): failure_demo.py:183: TypeError ______________________ TestMoreErrors.test_startswith ______________________ - self = + self = def test_startswith(self): s = "123" g = "456" > assert s.startswith(g) E assert False - E + where False = ('456') - E + where = '123'.startswith + E + where False = ('456') + E + where = '123'.startswith failure_demo.py:188: AssertionError __________________ TestMoreErrors.test_startswith_nested ___________________ - self = + self = def test_startswith_nested(self): def f(): @@ -494,15 +494,15 @@ get on the terminal - we are working on that): return "456" > assert f().startswith(g()) E assert False - E + where False = ('456') - E + where = '123'.startswith - E + where '123' = () - E + and '456' = () + E + where False = ('456') + E + where = '123'.startswith + E + where '123' = () + E + and '456' = () failure_demo.py:195: AssertionError _____________________ TestMoreErrors.test_global_func ______________________ - self = + self = def test_global_func(self): > assert isinstance(globf(42), float) @@ -513,19 +513,19 @@ get on the terminal - we are working on that): failure_demo.py:198: AssertionError _______________________ TestMoreErrors.test_instance _______________________ - self = + self = def test_instance(self): self.x = 6*7 > assert self.x != 42 E assert 42 != 42 E + where 42 = 42 - E + where 42 = .x + E + where 42 = .x failure_demo.py:202: AssertionError _______________________ TestMoreErrors.test_compare ________________________ - self = + self = def test_compare(self): > assert globf(10) < 5 @@ -535,7 +535,7 @@ get on the terminal - we are working on that): failure_demo.py:205: AssertionError _____________________ TestMoreErrors.test_try_finally ______________________ - self = + self = def test_try_finally(self): x = 1 @@ -544,4 +544,4 @@ get on the terminal - we are working on that): E assert 1 == 0 failure_demo.py:210: AssertionError - ======================== 39 failed in 0.26 seconds ========================= + ======================== 39 failed in 0.19 seconds ========================= diff --git a/doc/example/simple.txt b/doc/example/simple.txt index 4efb3795f..ae0c4323c 100644 --- a/doc/example/simple.txt +++ b/doc/example/simple.txt @@ -109,13 +109,13 @@ directory with the above conftest.py:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 gw0 I / gw1 I / gw2 I / gw3 I gw0 [0] / gw1 [0] / gw2 [0] / gw3 [0] scheduling tests via LoadScheduling - ============================= in 0.35 seconds ============================= + ============================= in 0.51 seconds ============================= .. _`excontrolskip`: @@ -156,12 +156,12 @@ and when running it will see a skipped "slow" test:: $ py.test -rs # "-rs" means report details on the little 's' =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 2 items test_module.py .s ========================= short test summary info ========================== - SKIP [1] /tmp/doc-exec-40/conftest.py:9: need --runslow option to run + SKIP [1] /tmp/doc-exec-41/conftest.py:9: need --runslow option to run =================== 1 passed, 1 skipped in 0.02 seconds ==================== @@ -169,7 +169,7 @@ Or run it including the ``slow`` marked test:: $ py.test --runslow =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 2 items test_module.py .. @@ -213,7 +213,7 @@ Let's run our little function:: E Failed: not configured: 42 test_checkconfig.py:8: Failed - 1 failed in 0.03 seconds + 1 failed in 0.01 seconds Detect if running from within a py.test run -------------------------------------------------------------- @@ -261,7 +261,7 @@ which will add the string to the test header accordingly:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 project deps: mylib-1.1 collecting ... collected 0 items @@ -284,7 +284,7 @@ which will add info only when run with "--v":: $ py.test -v =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 -- /home/hpk/venv/0/bin/python + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 -- /home/hpk/venv/0/bin/python info1: did you know that ... did you? collecting ... collected 0 items @@ -295,7 +295,7 @@ and nothing when run plainly:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 0 items ============================= in 0.00 seconds ============================= diff --git a/doc/funcargs.txt b/doc/funcargs.txt index 68516f7a7..6b04a4b19 100644 --- a/doc/funcargs.txt +++ b/doc/funcargs.txt @@ -61,7 +61,7 @@ Running the test looks like this:: $ py.test test_simplefactory.py =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 1 items test_simplefactory.py F @@ -167,7 +167,7 @@ Running this:: $ py.test test_example.py =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 10 items test_example.py .........F @@ -182,13 +182,16 @@ Running this:: E assert 9 < 9 test_example.py:7: AssertionError - ==================== 1 failed, 9 passed in 0.05 seconds ==================== + ==================== 1 failed, 9 passed in 0.02 seconds ==================== Note that the ``pytest_generate_tests(metafunc)`` hook is called during the test collection phase which is separate from the actual test running. Let's just look at what is collected:: $ py.test --collectonly test_example.py + =========================== test session starts ============================ + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 + collecting ... collected 10 items @@ -200,12 +203,14 @@ Let's just look at what is collected:: + + ============================= in 0.00 seconds ============================= If you want to select only the run with the value ``7`` you could do:: $ py.test -v -k 7 test_example.py # or -k test_func[7] =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 -- /home/hpk/venv/0/bin/python + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 -- /home/hpk/venv/0/bin/python collecting ... collected 10 items test_example.py:6: test_func[7] PASSED diff --git a/doc/getting-started.txt b/doc/getting-started.txt index 1e7cb5f57..d967236dc 100644 --- a/doc/getting-started.txt +++ b/doc/getting-started.txt @@ -16,10 +16,9 @@ Installation options:: To check your installation has installed the correct version:: $ py.test --version - This is py.test version 2.0.2.dev2, imported from /home/hpk/p/pytest/pytest.py + This is py.test version 2.0.2, imported from /home/hpk/p/pytest/pytest.py setuptools registered plugins: pytest-xdist-1.6.dev2 at /home/hpk/p/pytest-xdist/xdist/plugin.pyc - pytest-pep8-0.7 at /home/hpk/p/pytest-pep8/pytest_pep8.pyc If you get an error checkout :ref:`installation issues`. @@ -41,7 +40,7 @@ That's it. You can execute the test function now:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 1 items test_sample.py F @@ -100,7 +99,7 @@ Running it with, this time in "quiet" reporting mode:: $ py.test -q test_sysexit.py collecting ... collected 1 items . - 1 passed in 0.01 seconds + 1 passed in 0.00 seconds .. todo:: For further ways to assert exceptions see the `raises` @@ -131,7 +130,7 @@ run the module by passing its filename:: ================================= FAILURES ================================= ____________________________ TestClass.test_two ____________________________ - self = + self = def test_two(self): x = "hello" @@ -140,7 +139,7 @@ run the module by passing its filename:: E + where False = hasattr('hello', 'check') test_class.py:8: AssertionError - 1 failed, 1 passed in 0.04 seconds + 1 failed, 1 passed in 0.02 seconds The first test passed, the second failed. Again we can easily see the intermediate values used in the assertion, helping us to @@ -169,7 +168,7 @@ before performing the test function call. Let's just run it:: ================================= FAILURES ================================= _____________________________ test_needsfiles ______________________________ - tmpdir = local('/tmp/pytest-92/test_needsfiles0') + tmpdir = local('/tmp/pytest-0/test_needsfiles0') def test_needsfiles(tmpdir): print tmpdir @@ -178,8 +177,8 @@ before performing the test function call. Let's just run it:: test_tmpdir.py:3: AssertionError ----------------------------- Captured stdout ------------------------------ - /tmp/pytest-92/test_needsfiles0 - 1 failed in 0.14 seconds + /tmp/pytest-0/test_needsfiles0 + 1 failed in 0.02 seconds Before the test runs, a unique-per-test-invocation temporary directory was created. More info at :ref:`tmpdir handling`. diff --git a/doc/mark.txt b/doc/mark.txt index bf8707c15..614921822 100644 --- a/doc/mark.txt +++ b/doc/mark.txt @@ -88,19 +88,19 @@ You can use the ``-k`` command line option to select tests:: $ py.test -k webtest # running with the above defined examples yields =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 4 items test_mark.py .. test_mark_classlevel.py .. - ========================= 4 passed in 0.02 seconds ========================= + ========================= 4 passed in 0.01 seconds ========================= And you can also run all tests except the ones that match the keyword:: $ py.test -k-webtest =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 4 items ===================== 4 tests deselected by '-webtest' ===================== @@ -110,7 +110,7 @@ Or to only select the class:: $ py.test -kTestClass =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 4 items test_mark_classlevel.py .. diff --git a/doc/monkeypatch.txt b/doc/monkeypatch.txt index c5f42adbd..c2a620474 100644 --- a/doc/monkeypatch.txt +++ b/doc/monkeypatch.txt @@ -39,7 +39,7 @@ will be undone. .. background check: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 0 items ============================= in 0.00 seconds ============================= diff --git a/doc/skipping.txt b/doc/skipping.txt index ee68f3027..408fd2e9c 100644 --- a/doc/skipping.txt +++ b/doc/skipping.txt @@ -130,14 +130,14 @@ Running it with the report-on-xfail option gives this output:: example $ py.test -rx xfail_demo.py =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 6 items xfail_demo.py xxxxxx ========================= short test summary info ========================== XFAIL xfail_demo.py::test_hello XFAIL xfail_demo.py::test_hello2 - reason: [NOTRUN] + reason: [NOTRUN] XFAIL xfail_demo.py::test_hello3 condition: hasattr(os, 'sep') XFAIL xfail_demo.py::test_hello4 @@ -147,7 +147,7 @@ Running it with the report-on-xfail option gives this output:: XFAIL xfail_demo.py::test_hello6 reason: reason - ======================== 6 xfailed in 0.06 seconds ========================= + ======================== 6 xfailed in 0.04 seconds ========================= .. _`evaluation of skipif/xfail conditions`: diff --git a/doc/tmpdir.txt b/doc/tmpdir.txt index 9b0622feb..6ed19eceb 100644 --- a/doc/tmpdir.txt +++ b/doc/tmpdir.txt @@ -28,7 +28,7 @@ Running this would result in a passed test except for the last $ py.test test_tmpdir.py =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 1 items test_tmpdir.py F @@ -36,7 +36,7 @@ Running this would result in a passed test except for the last ================================= FAILURES ================================= _____________________________ test_create_file _____________________________ - tmpdir = local('/tmp/pytest-93/test_create_file0') + tmpdir = local('/tmp/pytest-1/test_create_file0') def test_create_file(tmpdir): p = tmpdir.mkdir("sub").join("hello.txt") @@ -47,7 +47,7 @@ Running this would result in a passed test except for the last E assert 0 test_tmpdir.py:7: AssertionError - ========================= 1 failed in 0.04 seconds ========================= + ========================= 1 failed in 0.02 seconds ========================= .. _`base temporary directory`: diff --git a/doc/unittest.txt b/doc/unittest.txt index 11a249724..05a0730e3 100644 --- a/doc/unittest.txt +++ b/doc/unittest.txt @@ -24,7 +24,7 @@ Running it yields:: $ py.test test_unittest.py =========================== test session starts ============================ - platform linux2 -- Python 2.6.6 -- pytest-2.0.2.dev2 + platform linux2 -- Python 2.6.6 -- pytest-2.0.2 collecting ... collected 1 items test_unittest.py F @@ -37,26 +37,12 @@ Running it yields:: def test_method(self): x = 1 > self.assertEquals(x, 3) + E AssertionError: 1 != 3 - test_unittest.py:8: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - - self = , first = 1, second = 3 - msg = None - - def failUnlessEqual(self, first, second, msg=None): - """Fail if the two objects are unequal as determined by the '==' - operator. - """ - if not first == second: - raise self.failureException, \ - > (msg or '%r != %r' % (first, second)) - E AssertionError: 1 != 3 - - /usr/lib/python2.6/unittest.py:350: AssertionError + test_unittest.py:8: AssertionError ----------------------------- Captured stdout ------------------------------ hello - ========================= 1 failed in 0.03 seconds ========================= + ========================= 1 failed in 0.02 seconds ========================= .. _`unittest.py style`: http://docs.python.org/library/unittest.html diff --git a/pytest.py b/pytest.py index 8947e365e..a56501d73 100644 --- a/pytest.py +++ b/pytest.py @@ -1,7 +1,7 @@ """ unit and functional testing with Python. """ -__version__ = '2.0.2.dev7' +__version__ = '2.0.2' __all__ = ['main'] from _pytest.core import main, UsageError, _preloadplugins diff --git a/setup.py b/setup.py index aa0503013..b543afe45 100644 --- a/setup.py +++ b/setup.py @@ -22,7 +22,7 @@ def main(): name='pytest', description='py.test: simple powerful testing with Python', long_description = long_description, - version='2.0.2.dev7', + version='2.0.2', url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],