Docs
testing
Testing in Python
Overview
Testing is essential for writing reliable, maintainable code. Python provides excellent testing tools from built-in unittest to third-party pytest. This module covers testing fundamentals to advanced practices.
1. Why Test?
Benefits of Testing
- •Catch bugs early - Find issues before production
- •Documentation - Tests show how code should work
- •Refactoring confidence - Change code safely
- •Better design - Testable code is often better designed
- •Collaboration - Tests help team members understand code
Types of Tests
Unit Tests - Test individual functions/classes
Integration Tests - Test components working together
End-to-End Tests - Test complete user workflows
Performance Tests - Test speed and resource usage
Test Pyramid
/\
/E2E\ Few, slow, expensive
/------\
/Integr- \ Some tests
/ ation \
/------------\
/ Unit Tests \ Many, fast, cheap
/________________\
2. unittest - Built-in Testing Framework
Basic Structure
import unittest
class TestMyFunction(unittest.TestCase):
def setUp(self):
"""Run before each test method."""
self.data = [1, 2, 3]
def tearDown(self):
"""Run after each test method."""
self.data = None
def test_something(self):
"""Test names must start with 'test_'."""
result = sum(self.data)
self.assertEqual(result, 6)
def test_another_thing(self):
"""Each test should test one thing."""
self.assertEqual(len(self.data), 3)
if __name__ == '__main__':
unittest.main()
Assertion Methods
# Equality
self.assertEqual(a, b) # a == b
self.assertNotEqual(a, b) # a != b
# Boolean
self.assertTrue(x) # bool(x) is True
self.assertFalse(x) # bool(x) is False
# Identity
self.assertIs(a, b) # a is b
self.assertIsNot(a, b) # a is not b
self.assertIsNone(x) # x is None
self.assertIsNotNone(x) # x is not None
# Membership
self.assertIn(a, b) # a in b
self.assertNotIn(a, b) # a not in b
# Types
self.assertIsInstance(a, b) # isinstance(a, b)
self.assertNotIsInstance(a, b)
# Comparisons
self.assertGreater(a, b) # a > b
self.assertGreaterEqual(a, b) # a >= b
self.assertLess(a, b) # a < b
self.assertLessEqual(a, b) # a <= b
# Floating point
self.assertAlmostEqual(a, b, places=7)
# Exceptions
self.assertRaises(ExceptionType, func, args)
# With context manager
with self.assertRaises(ValueError):
int("not a number")
# Check exception message
with self.assertRaises(ValueError) as context:
int("abc")
self.assertIn("invalid literal", str(context.exception))
setUp and tearDown Levels
class TestWithSetup(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run once before all tests in class."""
cls.connection = create_database_connection()
@classmethod
def tearDownClass(cls):
"""Run once after all tests in class."""
cls.connection.close()
def setUp(self):
"""Run before each test method."""
self.transaction = self.connection.begin()
def tearDown(self):
"""Run after each test method."""
self.transaction.rollback()
Running Tests
# Run all tests in a file
python -m unittest test_module.py
# Run specific test class
python -m unittest test_module.TestClassName
# Run specific test method
python -m unittest test_module.TestClassName.test_method
# Discover and run all tests
python -m unittest discover
# Verbose output
python -m unittest -v test_module.py
3. pytest - Modern Testing Framework
Installation
pip install pytest
Basic pytest
# test_example.py
def test_addition():
assert 1 + 1 == 2
def test_string():
assert "hello".upper() == "HELLO"
def test_list():
items = [1, 2, 3]
assert len(items) == 3
assert 2 in items
Running pytest
# Run all tests
pytest
# Verbose output
pytest -v
# Run specific file
pytest test_example.py
# Run specific test
pytest test_example.py::test_addition
# Show print statements
pytest -s
# Stop on first failure
pytest -x
# Run last failed tests
pytest --lf
# Run tests matching pattern
pytest -k "addition"
pytest Fixtures
import pytest
@pytest.fixture
def sample_list():
"""Fixture to provide test data."""
return [1, 2, 3, 4, 5]
@pytest.fixture
def database():
"""Fixture with setup and teardown."""
db = create_database()
yield db # This is where test runs
db.close() # Cleanup after test
def test_list_sum(sample_list):
"""Use fixture as parameter."""
assert sum(sample_list) == 15
def test_list_length(sample_list):
"""Same fixture, fresh data."""
assert len(sample_list) == 5
Fixture Scopes
@pytest.fixture(scope="function") # Default: new for each test
def per_test_fixture():
return create_resource()
@pytest.fixture(scope="class") # Shared within class
def per_class_fixture():
return create_resource()
@pytest.fixture(scope="module") # Shared within module
def per_module_fixture():
return create_resource()
@pytest.fixture(scope="session") # Shared across all tests
def per_session_fixture():
return create_resource()
Parametrized Tests
import pytest
@pytest.mark.parametrize("input,expected", [
(1, 2),
(2, 4),
(3, 6),
(4, 8),
])
def test_double(input, expected):
assert input * 2 == expected
@pytest.mark.parametrize("word,reversed_word", [
("hello", "olleh"),
("python", "nohtyp"),
("", ""),
])
def test_reverse_string(word, reversed_word):
assert word[::-1] == reversed_word
Markers
import pytest
@pytest.mark.slow
def test_slow_operation():
"""This test is slow."""
import time
time.sleep(5)
@pytest.mark.skip(reason="Not implemented yet")
def test_future_feature():
pass
@pytest.mark.skipif(sys.version_info < (3, 10), reason="Requires Python 3.10+")
def test_new_feature():
pass
@pytest.mark.xfail(reason="Known bug")
def test_known_bug():
assert False # Expected to fail
# Run with markers
# pytest -m slow # Run only slow tests
# pytest -m "not slow" # Skip slow tests
conftest.py
# conftest.py - Shared fixtures and configuration
import pytest
@pytest.fixture(scope="session")
def app():
"""Create application instance."""
from myapp import create_app
return create_app(testing=True)
@pytest.fixture
def client(app):
"""Create test client."""
return app.test_client()
# Fixtures are automatically available to all tests in directory
4. Mocking
What is Mocking?
Mocking replaces real objects with fake ones to:
- •Isolate code under test
- •Avoid external dependencies (databases, APIs)
- •Control behavior for edge cases
- •Speed up tests
unittest.mock
from unittest.mock import Mock, patch, MagicMock
# Create a mock object
mock = Mock()
mock.method() # Call doesn't raise error
mock.method.return_value = 42
assert mock.method() == 42
# Check if called
mock.method.assert_called()
mock.method.assert_called_once()
mock.method.assert_called_with(arg1, arg2)
# Track calls
mock.method("hello")
mock.method("world")
print(mock.method.call_count) # 2
print(mock.method.call_args_list) # [call('hello'), call('world')]
Patching
from unittest.mock import patch
# Function to test
def get_user_data(user_id):
response = requests.get(f"https://api.example.com/users/{user_id}")
return response.json()
# Test with patch
@patch('module.requests.get')
def test_get_user_data(mock_get):
# Configure mock
mock_get.return_value.json.return_value = {"name": "John", "id": 1}
# Call function
result = get_user_data(1)
# Assertions
assert result["name"] == "John"
mock_get.assert_called_once_with("https://api.example.com/users/1")
# Context manager form
def test_with_context():
with patch('module.requests.get') as mock_get:
mock_get.return_value.json.return_value = {"name": "Jane"}
result = get_user_data(2)
assert result["name"] == "Jane"
MagicMock
from unittest.mock import MagicMock
# MagicMock supports magic methods
magic = MagicMock()
magic.__len__.return_value = 5
assert len(magic) == 5
magic.__getitem__.return_value = "value"
assert magic[0] == "value"
assert magic["key"] == "value"
Side Effects
from unittest.mock import Mock
# Raise exception
mock = Mock()
mock.side_effect = ValueError("Something went wrong")
# mock() # Raises ValueError
# Return different values on successive calls
mock.side_effect = [1, 2, 3]
assert mock() == 1
assert mock() == 2
assert mock() == 3
# Custom function
def custom_side_effect(arg):
if arg < 0:
raise ValueError("Negative!")
return arg * 2
mock.side_effect = custom_side_effect
assert mock(5) == 10
Patching Classes
from unittest.mock import patch
class Database:
def connect(self):
return "real connection"
def query(self, sql):
return "real results"
@patch.object(Database, 'connect')
def test_database(mock_connect):
mock_connect.return_value = "mock connection"
db = Database()
assert db.connect() == "mock connection"
# Patch entire class
@patch('module.Database')
def test_with_mock_class(MockDatabase):
instance = MockDatabase.return_value
instance.query.return_value = ["mocked", "results"]
db = Database()
assert db.query("SELECT *") == ["mocked", "results"]
5. Test-Driven Development (TDD)
TDD Cycle
1. RED - Write a failing test
2. GREEN - Write minimal code to pass
3. REFACTOR - Improve code while tests pass
Example TDD Workflow
# Step 1: Write failing test
def test_add():
from calculator import add
assert add(2, 3) == 5
# Step 2: Run test (FAILS - no calculator module)
# Step 3: Write minimal code
# calculator.py
def add(a, b):
return a + b
# Step 4: Run test (PASSES)
# Step 5: Write another failing test
def test_add_negative():
from calculator import add
assert add(-1, 1) == 0
# Step 6: Run tests (should pass with current implementation)
# Step 7: Refactor if needed, keeping tests green
6. Testing Patterns
Arrange-Act-Assert (AAA)
def test_user_creation():
# Arrange - Set up test data
name = "John Doe"
email = "john@example.com"
# Act - Perform action
user = User(name=name, email=email)
# Assert - Verify result
assert user.name == name
assert user.email == email
assert user.id is not None
Given-When-Then (BDD Style)
def test_login_success():
# Given: A registered user
user = create_user("john@example.com", "password123")
# When: They attempt to login with correct credentials
result = login("john@example.com", "password123")
# Then: They should be authenticated
assert result.success
assert result.user == user
Test Isolation
class TestUserService:
def test_create_user_success(self, db_session):
"""Each test gets fresh database."""
service = UserService(db_session)
user = service.create("John", "john@example.com")
assert user.id is not None
def test_create_user_duplicate_email(self, db_session):
"""This test doesn't see users from other tests."""
service = UserService(db_session)
service.create("John", "john@example.com")
with pytest.raises(DuplicateEmailError):
service.create("Jane", "john@example.com")
7. Code Coverage
Using coverage.py
# Install
pip install coverage
# Run tests with coverage
coverage run -m pytest
# Generate report
coverage report
# Generate HTML report
coverage html
# Open htmlcov/index.html in browser
# Show missing lines
coverage report -m
With pytest-cov
pip install pytest-cov
# Run with coverage
pytest --cov=mymodule
# With HTML report
pytest --cov=mymodule --cov-report=html
# Set minimum coverage
pytest --cov=mymodule --cov-fail-under=80
Coverage Configuration (pyproject.toml)
[tool.coverage.run]
source = ["src"]
branch = true
[tool.coverage.report]
exclude_lines = [
"pragma: no cover",
"if TYPE_CHECKING:",
"raise NotImplementedError",
]
8. Testing Best Practices
1. Test One Thing
# Bad - tests multiple things
def test_user():
user = User("John", 30)
assert user.name == "John"
assert user.age == 30
assert user.is_adult()
assert user.format() == "John (30)"
# Good - separate concerns
def test_user_creation():
user = User("John", 30)
assert user.name == "John"
assert user.age == 30
def test_user_is_adult():
assert User("John", 30).is_adult()
assert not User("Kid", 10).is_adult()
2. Clear Test Names
# Bad
def test_1():
pass
# Good
def test_calculate_discount_applies_10_percent_for_orders_over_100():
pass
3. Use Factories for Test Data
import factory
class UserFactory(factory.Factory):
class Meta:
model = User
name = factory.Faker('name')
email = factory.Faker('email')
age = factory.Faker('random_int', min=18, max=80)
def test_user_service():
user = UserFactory()
# or UserFactory(name="Specific Name")
4. Don't Test Implementation Details
# Bad - tests internal implementation
def test_user_internal_state():
user = User("John")
assert user._internal_list == []
# Good - tests behavior
def test_user_has_no_orders_initially():
user = User("John")
assert user.order_count() == 0
9. Testing Async Code
pytest-asyncio
pip install pytest-asyncio
import pytest
import asyncio
@pytest.mark.asyncio
async def test_async_function():
result = await async_fetch_data()
assert result == expected
@pytest.fixture
async def async_client():
client = await create_async_client()
yield client
await client.close()
@pytest.mark.asyncio
async def test_with_async_fixture(async_client):
response = await async_client.get("/")
assert response.status == 200
10. Property-Based Testing
Hypothesis
pip install hypothesis
from hypothesis import given
import hypothesis.strategies as st
@given(st.integers(), st.integers())
def test_addition_commutative(a, b):
"""a + b should equal b + a for all integers."""
assert a + b == b + a
@given(st.lists(st.integers()))
def test_sorted_is_sorted(lst):
"""sorted() should return a sorted list."""
result = sorted(lst)
assert all(result[i] <= result[i+1] for i in range(len(result)-1))
@given(st.text())
def test_encode_decode(s):
"""Encoding then decoding should return original."""
assert s.encode('utf-8').decode('utf-8') == s
Summary
| Tool | Use Case |
|---|---|
| unittest | Built-in, good for simple tests |
| pytest | Modern, powerful, recommended |
| mock | Replace dependencies |
| coverage | Measure test coverage |
| hypothesis | Property-based testing |
| factory_boy | Generate test data |
| pytest-asyncio | Test async code |
Next Steps
After mastering testing:
- •Practice TDD on a small project
- •Set up CI/CD with automated testing
- •Explore behavior-driven development (BDD)
- •Learn about mutation testing