问题
I have some kind of test data and want to create a unit test for each item. My first idea was to do it like this:
import unittest
l = [[\"foo\", \"a\", \"a\",], [\"bar\", \"a\", \"b\"], [\"lee\", \"b\", \"b\"]]
class TestSequence(unittest.TestCase):
def testsample(self):
for name, a,b in l:
print \"test\", name
self.assertEqual(a,b)
if __name__ == \'__main__\':
unittest.main()
The downside of this is that it handles all data in one test. I would like to generate one test for each item on the fly. Any suggestions?
回答1:
i use something like this:
import unittest
l = [["foo", "a", "a",], ["bar", "a", "b"], ["lee", "b", "b"]]
class TestSequense(unittest.TestCase):
pass
def test_generator(a, b):
def test(self):
self.assertEqual(a,b)
return test
if __name__ == '__main__':
for t in l:
test_name = 'test_%s' % t[0]
test = test_generator(t[1], t[2])
setattr(TestSequense, test_name, test)
unittest.main()
The parameterized package can be used to automate this process:
from parameterized import parameterized
class TestSequence(unittest.TestCase):
@parameterized.expand([
["foo", "a", "a",],
["bar", "a", "b"],
["lee", "b", "b"],
])
def test_sequence(self, name, a, b):
self.assertEqual(a,b)
Which will generate the tests:
test_sequence_0_foo (__main__.TestSequence) ... ok
test_sequence_1_bar (__main__.TestSequence) ... FAIL
test_sequence_2_lee (__main__.TestSequence) ... ok
======================================================================
FAIL: test_sequence_1_bar (__main__.TestSequence)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python2.7/site-packages/parameterized/parameterized.py", line 233, in <lambda>
standalone_func = lambda *a: func(*(a + p.args), **p.kwargs)
File "x.py", line 12, in test_sequence
self.assertEqual(a,b)
AssertionError: 'a' != 'b'
回答2:
Using unittest (since 3.4)
Since Python 3.4, the standard library unittest
package has the subTest
context manager.
See the documentation:
- 26.4.7. Distinguishing test iterations using subtests
- subTest
Example:
from unittest import TestCase
param_list = [('a', 'a'), ('a', 'b'), ('b', 'b')]
class TestDemonstrateSubtest(TestCase):
def test_works_as_expected(self):
for p1, p2 in param_list:
with self.subTest():
self.assertEqual(p1, p2)
You can also specify a custom message and parameter values to subTest()
:
with self.subTest(msg="Checking if p1 equals p2", p1=p1, p2=p2):
Using nose
The nose testing framework supports this.
Example (the code below is the entire contents of the file containing the test):
param_list = [('a', 'a'), ('a', 'b'), ('b', 'b')]
def test_generator():
for params in param_list:
yield check_em, params[0], params[1]
def check_em(a, b):
assert a == b
The output of the nosetests command:
> nosetests -v
testgen.test_generator('a', 'a') ... ok
testgen.test_generator('a', 'b') ... FAIL
testgen.test_generator('b', 'b') ... ok
======================================================================
FAIL: testgen.test_generator('a', 'b')
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/lib/python2.5/site-packages/nose-0.10.1-py2.5.egg/nose/case.py", line 203, in runTest
self.test(*self.arg)
File "testgen.py", line 7, in check_em
assert a == b
AssertionError
----------------------------------------------------------------------
Ran 3 tests in 0.006s
FAILED (failures=1)
回答3:
This can be solved elegantly using Metaclasses:
import unittest
l = [["foo", "a", "a",], ["bar", "a", "b"], ["lee", "b", "b"]]
class TestSequenceMeta(type):
def __new__(mcs, name, bases, dict):
def gen_test(a, b):
def test(self):
self.assertEqual(a, b)
return test
for tname, a, b in l:
test_name = "test_%s" % tname
dict[test_name] = gen_test(a,b)
return type.__new__(mcs, name, bases, dict)
class TestSequence(unittest.TestCase):
__metaclass__ = TestSequenceMeta
if __name__ == '__main__':
unittest.main()
回答4:
As of Python 3.4 subtests have been introduced to unittest for this purpose. See the documentation for details. TestCase.subTest is a context manager which allows one to isolate asserts in a test so that a failure will be reported with parameter information but does not stop the test execution. Here's the example from the documentation:
class NumbersTest(unittest.TestCase):
def test_even(self):
"""
Test that numbers between 0 and 5 are all even.
"""
for i in range(0, 6):
with self.subTest(i=i):
self.assertEqual(i % 2, 0)
The output of a test run would be:
======================================================================
FAIL: test_even (__main__.NumbersTest) (i=1)
----------------------------------------------------------------------
Traceback (most recent call last):
File "subtests.py", line 32, in test_even
self.assertEqual(i % 2, 0)
AssertionError: 1 != 0
======================================================================
FAIL: test_even (__main__.NumbersTest) (i=3)
----------------------------------------------------------------------
Traceback (most recent call last):
File "subtests.py", line 32, in test_even
self.assertEqual(i % 2, 0)
AssertionError: 1 != 0
======================================================================
FAIL: test_even (__main__.NumbersTest) (i=5)
----------------------------------------------------------------------
Traceback (most recent call last):
File "subtests.py", line 32, in test_even
self.assertEqual(i % 2, 0)
AssertionError: 1 != 0
This is also part of unittest2, so it is available for earlier versions of Python.
回答5:
load_tests is a little known mechanism introduced in 2.7 to dynamically create a TestSuite. With it, you can easily create parametrized tests.
For example:
import unittest
class GeneralTestCase(unittest.TestCase):
def __init__(self, methodName, param1=None, param2=None):
super(GeneralTestCase, self).__init__(methodName)
self.param1 = param1
self.param2 = param2
def runTest(self):
pass # Test that depends on param 1 and 2.
def load_tests(loader, tests, pattern):
test_cases = unittest.TestSuite()
for p1, p2 in [(1, 2), (3, 4)]:
test_cases.addTest(GeneralTestCase('runTest', p1, p2))
return test_cases
That code will run all the TestCases in the TestSuite returned by load_tests. No other tests are automatically run by the discovery mechanism.
Alternatively, you can also use inheritance as shown in this ticket: http://bugs.python.org/msg151444
回答6:
It can be done by using pytest. Just write the file test_me.py
with content:
import pytest
@pytest.mark.parametrize('name, left, right', [['foo', 'a', 'a'],
['bar', 'a', 'b'],
['baz', 'b', 'b']])
def test_me(name, left, right):
assert left == right, name
And run your test with command py.test --tb=short test_me.py
. Then the output will be looks like:
=========================== test session starts ============================
platform darwin -- Python 2.7.6 -- py-1.4.23 -- pytest-2.6.1
collected 3 items
test_me.py .F.
================================= FAILURES =================================
_____________________________ test_me[bar-a-b] _____________________________
test_me.py:8: in test_me
assert left == right, name
E AssertionError: bar
==================== 1 failed, 2 passed in 0.01 seconds ====================
It simple!. Also pytest has more features like fixtures
, mark
, assert
, etc ...
回答7:
Use the ddt library. It adds simple decorators for the test methods:
import unittest
from ddt import ddt, data
from mycode import larger_than_two
@ddt
class FooTestCase(unittest.TestCase):
@data(3, 4, 12, 23)
def test_larger_than_two(self, value):
self.assertTrue(larger_than_two(value))
@data(1, -3, 2, 0)
def test_not_larger_than_two(self, value):
self.assertFalse(larger_than_two(value))
This library can be installed with pip
. It doesn't require nose
, and works excellent with the standard library unittest
module.
回答8:
You would benefit from trying the TestScenarios library.
testscenarios provides clean dependency injection for python unittest style tests. This can be used for interface testing (testing many implementations via a single test suite) or for classic dependency injection (provide tests with dependencies externally to the test code itself, allowing easy testing in different situations).
回答9:
There's also Hypothesis which adds fuzz or property based testing: https://pypi.python.org/pypi/hypothesis
This is a very powerful testing method.
回答10:
You can use nose-ittr plugin (pip install nose-ittr
).
It's very easy to integrate with existing tests, minimal changes (if any) are required. It also supports nose multiprocessing plugin.
Not that you can also have a customize setup
function per test.
@ittr(number=[1, 2, 3, 4])
def test_even(self):
assert_equal(self.number % 2, 0)
It is also possible to pass nosetest
parameters like with their build-in plugin attrib
, this way you can run only a specific test with specific parameter:
nosetest -a number=2
回答11:
I came across ParamUnittest the other day when looking at the source code to radon (example usage on the github repo). It should work with other frameworks that extend TestCase (like Nose).
Here is an example:
import unittest
import paramunittest
@paramunittest.parametrized(
('1', '2'),
#(4, 3), <---- uncomment to have a failing test
('2', '3'),
(('4', ), {'b': '5'}),
((), {'a': 5, 'b': 6}),
{'a': 5, 'b': 6},
)
class TestBar(TestCase):
def setParameters(self, a, b):
self.a = a
self.b = b
def testLess(self):
self.assertLess(self.a, self.b)
回答12:
I use metaclasses and decorators for generate tests. You can check my implementation python_wrap_cases. This library doesn't require any test frameworks.
Your example:
import unittest
from python_wrap_cases import wrap_case
@wrap_case
class TestSequence(unittest.TestCase):
@wrap_case("foo", "a", "a")
@wrap_case("bar", "a", "b")
@wrap_case("lee", "b", "b")
def testsample(self, name, a, b):
print "test", name
self.assertEqual(a, b)
Console output:
testsample_u'bar'_u'a'_u'b' (tests.example.test_stackoverflow.TestSequence) ... test bar
FAIL
testsample_u'foo'_u'a'_u'a' (tests.example.test_stackoverflow.TestSequence) ... test foo
ok
testsample_u'lee'_u'b'_u'b' (tests.example.test_stackoverflow.TestSequence) ... test lee
ok
Also you may use generators. For example this code generate all possible combinations of tests with arguments a__list
and b__list
import unittest
from python_wrap_cases import wrap_case
@wrap_case
class TestSequence(unittest.TestCase):
@wrap_case(a__list=["a", "b"], b__list=["a", "b"])
def testsample(self, a, b):
self.assertEqual(a, b)
Console output:
testsample_a(u'a')_b(u'a') (tests.example.test_stackoverflow.TestSequence) ... ok
testsample_a(u'a')_b(u'b') (tests.example.test_stackoverflow.TestSequence) ... FAIL
testsample_a(u'b')_b(u'a') (tests.example.test_stackoverflow.TestSequence) ... FAIL
testsample_a(u'b')_b(u'b') (tests.example.test_stackoverflow.TestSequence) ... ok
回答13:
Just use metaclasses, as seen here;
class DocTestMeta(type):
"""
Test functions are generated in metaclass due to the way some
test loaders work. For example, setupClass() won't get called
unless there are other existing test methods, and will also
prevent unit test loader logic being called before the test
methods have been defined.
"""
def __init__(self, name, bases, attrs):
super(DocTestMeta, self).__init__(name, bases, attrs)
def __new__(cls, name, bases, attrs):
def func(self):
"""Inner test method goes here"""
self.assertTrue(1)
func.__name__ = 'test_sample'
attrs[func.__name__] = func
return super(DocTestMeta, cls).__new__(cls, name, bases, attrs)
class ExampleTestCase(TestCase):
"""Our example test case, with no methods defined"""
__metaclass__ = DocTestMeta
Output:
test_sample (ExampleTestCase) ... OK
回答14:
import unittest
def generator(test_class, a, b):
def test(self):
self.assertEqual(a, b)
return test
def add_test_methods(test_class):
#First element of list is variable "a", then variable "b", then name of test case that will be used as suffix.
test_list = [[2,3, 'one'], [5,5, 'two'], [0,0, 'three']]
for case in test_list:
test = generator(test_class, case[0], case[1])
setattr(test_class, "test_%s" % case[2], test)
class TestAuto(unittest.TestCase):
def setUp(self):
print 'Setup'
pass
def tearDown(self):
print 'TearDown'
pass
_add_test_methods(TestAuto) # It's better to start with underscore so it is not detected as a test itself
if __name__ == '__main__':
unittest.main(verbosity=1)
RESULT:
>>>
Setup
FTearDown
Setup
TearDown
.Setup
TearDown
.
======================================================================
FAIL: test_one (__main__.TestAuto)
----------------------------------------------------------------------
Traceback (most recent call last):
File "D:/inchowar/Desktop/PyTrash/test_auto_3.py", line 5, in test
self.assertEqual(a, b)
AssertionError: 2 != 3
----------------------------------------------------------------------
Ran 3 tests in 0.019s
FAILED (failures=1)
回答15:
You can use TestSuite
and custom TestCase
classes.
import unittest
class CustomTest(unittest.TestCase):
def __init__(self, name, a, b):
super().__init__()
self.name = name
self.a = a
self.b = b
def runTest(self):
print("test", self.name)
self.assertEqual(self.a, self.b)
if __name__ == '__main__':
suite = unittest.TestSuite()
suite.addTest(CustomTest("Foo", 1337, 1337))
suite.addTest(CustomTest("Bar", 0xDEAD, 0xC0DE))
unittest.TextTestRunner().run(suite)
回答16:
I'd been having trouble with a very particular style of parameterized tests. All our Selenium tests can run locally, but they also should be able to be run remotely against several platforms on SauceLabs. Basically, I wanted to take a large amount of already-written test cases and parameterize them with the fewest changes to code possible. Furthermore, I needed to be able to pass the parameters into the setUp method, something which I haven't seen any solutions for elsewhere.
Here's what I've come up with:
import inspect
import types
test_platforms = [
{'browserName': "internet explorer", 'platform': "Windows 7", 'version': "10.0"},
{'browserName': "internet explorer", 'platform': "Windows 7", 'version': "11.0"},
{'browserName': "firefox", 'platform': "Linux", 'version': "43.0"},
]
def sauce_labs():
def wrapper(cls):
return test_on_platforms(cls)
return wrapper
def test_on_platforms(base_class):
for name, function in inspect.getmembers(base_class, inspect.isfunction):
if name.startswith('test_'):
for platform in test_platforms:
new_name = '_'.join(list([name, ''.join(platform['browserName'].title().split()), platform['version']]))
new_function = types.FunctionType(function.__code__, function.__globals__, new_name,
function.__defaults__, function.__closure__)
setattr(new_function, 'platform', platform)
setattr(base_class, new_name, new_function)
delattr(base_class, name)
return base_class
With this, all I had to do was add a simple decorator @sauce_labs() to each regular old TestCase, and now when running them, they're wrapped up and rewritten, so that all the test methods are parameterized and renamed. LoginTests.test_login(self) runs as LoginTests.test_login_internet_explorer_10.0(self), LoginTests.test_login_internet_explorer_11.0(self), and LoginTests.test_login_firefox_43.0(self), and each one has the parameter self.platform to decide what browser/platform to run against, even in LoginTests.setUp, which is crucial for my task since that's where the connection to SauceLabs is initialized.
Anyway, I hope this might be of help to someone looking to do a similar "global" parameterization of their tests!
回答17:
This solution works with unittest
and nose
:
#!/usr/bin/env python
import unittest
def make_function(description, a, b):
def ghost(self):
self.assertEqual(a, b, description)
print description
ghost.__name__ = 'test_{0}'.format(description)
return ghost
class TestsContainer(unittest.TestCase):
pass
testsmap = {
'foo': [1, 1],
'bar': [1, 2],
'baz': [5, 5]}
def generator():
for name, params in testsmap.iteritems():
test_func = make_function(name, params[0], params[1])
setattr(TestsContainer, 'test_{0}'.format(name), test_func)
generator()
if __name__ == '__main__':
unittest.main()
回答18:
The metaclass-based answers still work in Python3, but instead of the __metaclass__
attribute one has to use the metaclass
parameter, as in:
class ExampleTestCase(TestCase,metaclass=DocTestMeta):
pass
回答19:
Meta-programming is fun, but can get on the way. Most solutions here make it difficult to:
- selectively launch a test
- point back to the code given test's name
So, my first suggestion is to follow the simple/explicit path (works with any test runner):
import unittest
class TestSequence(unittest.TestCase):
def _test_complex_property(self, a, b):
self.assertEqual(a,b)
def test_foo(self):
self._test_complex_property("a", "a")
def test_bar(self):
self._test_complex_property("a", "b")
def test_lee(self):
self._test_complex_property("b", "b")
if __name__ == '__main__':
unittest.main()
Since we shouldn't repeat ourselves, my second suggestion builds on @Javier's answer: embrace property based testing. Hypothesis library:
- is "more relentlessly devious about test case generation than us mere humans"
- will provide simple count-examples
- works with any test runner
has many more interesting features (statistics, additional test output, ...)
class TestSequence(unittest.TestCase):
@given(st.text(), st.text()) def test_complex_property(self, a, b): self.assertEqual(a,b)
To test your specific examples, just add:
@example("a", "a")
@example("a", "b")
@example("b", "b")
To run only one particular example, you can comment out the other examples (provided example will be run first). You may want to use @given(st.nothing())
. Another option is to replace the whole block by:
@given(st.just("a"), st.just("b"))
Ok, you don't have distinct test names. But maybe you just need:
- a descriptive name of the property under test.
- which input leads to failure (falsifying example).
Funnier example
回答20:
Super late to the party, but I had trouble making these work for setUpClass
.
Here's a version of @Javier's answer that gives setUpClass
access to dynamically allocated attributes.
import unittest
class GeneralTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
print ''
print cls.p1
print cls.p2
def runTest1(self):
self.assertTrue((self.p2 - self.p1) == 1)
def runTest2(self):
self.assertFalse((self.p2 - self.p1) == 2)
def load_tests(loader, tests, pattern):
test_cases = unittest.TestSuite()
for p1, p2 in [(1, 2), (3, 4)]:
clsname = 'TestCase_{}_{}'.format(p1, p2)
dct = {
'p1': p1,
'p2': p2,
}
cls = type(clsname, (GeneralTestCase,), dct)
test_cases.addTest(cls('runTest1'))
test_cases.addTest(cls('runTest2'))
return test_cases
Outputs
1
2
..
3
4
..
----------------------------------------------------------------------
Ran 4 tests in 0.000s
OK
回答21:
I have found that this works well for my purposes, especially if I need to generate tests that do slightly difference processes on a collection of data.
import unittest
def rename(newName):
def renamingFunc(func):
func.__name__ == newName
return func
return renamingFunc
class TestGenerator(unittest.TestCase):
TEST_DATA = {}
@classmethod
def generateTests(cls):
for dataName, dataValue in TestGenerator.TEST_DATA:
for func in cls.getTests(dataName, dataValue):
setattr(cls, "test_{:s}_{:s}".format(func.__name__, dataName), func)
@classmethod
def getTests(cls):
raise(NotImplementedError("This must be implemented"))
class TestCluster(TestGenerator):
TEST_CASES = []
@staticmethod
def getTests(dataName, dataValue):
def makeTest(case):
@rename("{:s}".format(case["name"]))
def test(self):
# Do things with self, case, data
pass
return test
return [makeTest(c) for c in TestCluster.TEST_CASES]
TestCluster.generateTests()
The TestGenerator
class can be used to spawn different sets of test cases like TestCluster
.
TestCluster
can be thought of as an implementation of the TestGenerator
interface.
回答22:
import unittest
def generator(test_class, a, b,c,d,name): def test(self): print('Testexecution=',name) print('a=',a) print('b=',b) print('c=',c) print('d=',d)
return test
def add_test_methods(test_class): test_list = [[3,3,5,6, 'one'], [5,5,8,9, 'two'], [0,0,5,6, 'three'],[0,0,2,3,'Four']] for case in test_list: print('case=',case[0], case[1],case[2],case[3],case[4]) test = generator(test_class, case[0], case[1],case[2],case[3],case[4]) setattr(test_class, "test_%s" % case[4], test)
class TestAuto(unittest.TestCase): def setUp(self): print ('Setup') pass
def tearDown(self):
print ('TearDown')
pass
add_test_methods(TestAuto)
if name == 'main': unittest.main(verbosity=1)
回答23:
Besides using setattr, we can use load_tests since python 3.2. Please refer to blog post blog.livreuro.com/en/coding/python/how-to-generate-discoverable-unit-tests-in-python-dynamically/
class Test(unittest.TestCase):
pass
def _test(self, file_name):
open(file_name, 'r') as f:
self.assertEqual('test result',f.read())
def _generate_test(file_name):
def test(self):
_test(self, file_name)
return test
def _generate_tests():
for file in files:
file_name = os.path.splitext(os.path.basename(file))[0]
setattr(Test, 'test_%s' % file_name, _generate_test(file))
test_cases = (Test,)
def load_tests(loader, tests, pattern):
_generate_tests()
suite = TestSuite()
for test_class in test_cases:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
return suite
if __name__ == '__main__':
_generate_tests()
unittest.main()
回答24:
Following is my solution. I find this useful when: 1. Should work for unittest.Testcase and unittest discover 2. Have a set of tests to be run for different parameter settings. 3. Very simple no dependency on other packages import unittest
class BaseClass(unittest.TestCase):
def setUp(self):
self.param = 2
self.base = 2
def test_me(self):
self.assertGreaterEqual(5, self.param+self.base)
def test_me_too(self):
self.assertLessEqual(3, self.param+self.base)
class Child_One(BaseClass):
def setUp(self):
BaseClass.setUp(self)
self.param = 4
class Child_Two(BaseClass):
def setUp(self):
BaseClass.setUp(self)
self.param = 1
来源:https://stackoverflow.com/questions/32899/how-do-you-generate-dynamic-parameterized-unit-tests-in-python