Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 21 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/

# Translations
*.mo
Expand Down Expand Up @@ -101,4 +102,23 @@ ENV/
/site

# mypy
.mypy_cache/
.mypy_cache/

# Poetry - DO NOT ignore lock file
# poetry.lock is intentionally not ignored
dist/

# IDE files
.vscode/
.idea/
*.swp
*.swo
*~

# OS files
.DS_Store
Thumbs.db

# Testing artifacts
test-results/
.benchmarks/
320 changes: 320 additions & 0 deletions poetry.lock

Large diffs are not rendered by default.

86 changes: 86 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
[tool.poetry]
name = "stn"
version = "1.0.1"
description = "Spatial Transformer Networks."
authors = ["Kevin Zakka <kevinarmandzakka@gmail.com>"]
license = "MIT"
readme = "README.md"
homepage = "https://github.com/kevinzakka/spatial-transformer-network"
repository = "https://github.com/kevinzakka/spatial-transformer-network"
keywords = ["ai", "neural networks", "machine learning", "ml", "deep learning", "dl", "spatial transformer networks"]
packages = [{include = "stn"}]

[tool.poetry.dependencies]
python = "^3.8"
numpy = "*"


[tool.poetry.group.dev.dependencies]
pytest = "^7.4.0"
pytest-cov = "^4.1.0"
pytest-mock = "^3.11.1"

[tool.poetry.scripts]
test = "pytest:main"
tests = "pytest:main"

[tool.pytest.ini_options]
minversion = "7.0"
addopts = [
"-ra",
"--strict-markers",
"--strict-config",
"--cov=stn",
"--cov-branch",
"--cov-report=term-missing:skip-covered",
"--cov-report=html",
"--cov-report=xml",
"--cov-fail-under=80",
]
testpaths = ["tests"]
python_files = "test_*.py"
python_classes = "Test*"
python_functions = "test_*"
markers = [
"unit: marks tests as unit tests (fast, isolated)",
"integration: marks tests as integration tests (slower, may have dependencies)",
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
]

[tool.coverage.run]
source = ["stn"]
branch = true
parallel = true
omit = [
"*/tests/*",
"*/test_*.py",
"*/__pycache__/*",
"*/site-packages/*",
]

[tool.coverage.report]
precision = 2
show_missing = true
skip_covered = false
exclude_lines = [
"pragma: no cover",
"def __repr__",
"def __str__",
"raise AssertionError",
"raise NotImplementedError",
"if __name__ == .__main__.:",
"if TYPE_CHECKING:",
"if typing.TYPE_CHECKING:",
"@overload",
"@abstractmethod",
]

[tool.coverage.html]
directory = "htmlcov"

[tool.coverage.xml]
output = "coverage.xml"

[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
Empty file added tests/__init__.py
Empty file.
145 changes: 145 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,145 @@
import os
import tempfile
import shutil
from pathlib import Path
from typing import Generator, Dict, Any

import pytest
import numpy as np


@pytest.fixture
def temp_dir() -> Generator[Path, None, None]:
"""Create a temporary directory for test files."""
temp_path = tempfile.mkdtemp()
yield Path(temp_path)
shutil.rmtree(temp_path)


@pytest.fixture
def mock_config() -> Dict[str, Any]:
"""Provide a mock configuration dictionary for testing."""
return {
"batch_size": 32,
"learning_rate": 0.001,
"epochs": 10,
"image_size": (224, 224),
"num_classes": 10,
"device": "cpu",
}


@pytest.fixture
def sample_numpy_array() -> np.ndarray:
"""Generate a sample numpy array for testing."""
np.random.seed(42)
return np.random.randn(10, 224, 224, 3).astype(np.float32)


@pytest.fixture
def sample_batch() -> Dict[str, np.ndarray]:
"""Generate a sample batch of data for testing."""
np.random.seed(42)
batch_size = 4
height, width, channels = 28, 28, 1

return {
"images": np.random.randn(batch_size, height, width, channels).astype(np.float32),
"labels": np.random.randint(0, 10, size=(batch_size,)),
"theta": np.array([[1, 0, 0, 0, 1, 0]] * batch_size).astype(np.float32),
}


@pytest.fixture
def identity_theta() -> np.ndarray:
"""Return identity transformation parameters."""
return np.array([1, 0, 0, 0, 1, 0], dtype=np.float32)


@pytest.fixture
def mock_tensorflow_session():
"""Mock TensorFlow session for testing without GPU."""
try:
import tensorflow as tf
# Configure TensorFlow to use CPU only for tests
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

if hasattr(tf, 'config'):
# TF 2.x
physical_devices = tf.config.list_physical_devices('GPU')
for device in physical_devices:
tf.config.set_memory_growth(device, True)

return tf.Session() if hasattr(tf, 'Session') else None
except ImportError:
pytest.skip("TensorFlow not installed")


@pytest.fixture(autouse=True)
def cleanup_tensorflow():
"""Clean up TensorFlow resources after each test."""
yield
try:
import tensorflow as tf
if hasattr(tf, 'keras'):
tf.keras.backend.clear_session()
elif hasattr(tf, 'reset_default_graph'):
tf.reset_default_graph()
except ImportError:
pass


@pytest.fixture
def sample_image_path(temp_dir: Path) -> Path:
"""Create a sample image file for testing."""
image_path = temp_dir / "test_image.npy"
np.save(image_path, np.random.randn(224, 224, 3))
return image_path


@pytest.fixture
def mock_model_weights(temp_dir: Path) -> Path:
"""Create mock model weights file."""
weights_path = temp_dir / "model_weights.npy"
weights = {
"conv1": np.random.randn(3, 3, 3, 32),
"conv2": np.random.randn(3, 3, 32, 64),
"fc1": np.random.randn(1024, 256),
"fc2": np.random.randn(256, 6),
}
np.save(weights_path, weights)
return weights_path


@pytest.fixture(scope="session")
def test_data_dir() -> Path:
"""Return the path to test data directory."""
return Path(__file__).parent / "test_data"


def pytest_configure(config):
"""Configure pytest with custom settings."""
# Add custom markers descriptions
config.addinivalue_line(
"markers", "unit: mark test as a unit test"
)
config.addinivalue_line(
"markers", "integration: mark test as an integration test"
)
config.addinivalue_line(
"markers", "slow: mark test as slow running"
)


def pytest_collection_modifyitems(config, items):
"""Modify test collection to add markers based on test location."""
for item in items:
# Add markers based on test file location
if "unit" in str(item.fspath):
item.add_marker(pytest.mark.unit)
elif "integration" in str(item.fspath):
item.add_marker(pytest.mark.integration)

# Mark tests that have 'slow' in their name
if "slow" in item.nodeid:
item.add_marker(pytest.mark.slow)
Empty file added tests/integration/__init__.py
Empty file.
125 changes: 125 additions & 0 deletions tests/test_setup_validation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
"""Validation tests to ensure the testing infrastructure is properly set up."""
import sys
from pathlib import Path

import pytest
import numpy as np


class TestSetupValidation:
"""Test class to validate the testing infrastructure."""

@pytest.mark.unit
def test_pytest_installed(self):
"""Verify pytest is installed and importable."""
assert pytest.__version__

@pytest.mark.unit
def test_coverage_plugin_available(self):
"""Verify pytest-cov plugin is available."""
import pytest_cov
assert pytest_cov

@pytest.mark.unit
def test_mock_plugin_available(self):
"""Verify pytest-mock plugin is available."""
import pytest_mock
assert pytest_mock

@pytest.mark.unit
def test_project_importable(self):
"""Verify the stn package can be imported."""
try:
import stn
assert stn
except ImportError as e:
if "tensorflow" in str(e):
pytest.skip("TensorFlow not installed (optional dependency)")
else:
raise

@pytest.mark.unit
def test_fixtures_available(self, temp_dir, mock_config, sample_numpy_array):
"""Verify custom fixtures are available and working."""
assert temp_dir.exists()
assert isinstance(mock_config, dict)
assert isinstance(sample_numpy_array, np.ndarray)

@pytest.mark.unit
def test_markers_configured(self, request):
"""Verify custom markers are properly configured."""
markers = [marker.name for marker in request.node.iter_markers()]
assert 'unit' in markers

@pytest.mark.integration
def test_integration_marker(self, request):
"""Test that integration marker works."""
markers = [marker.name for marker in request.node.iter_markers()]
assert 'integration' in markers

@pytest.mark.slow
@pytest.mark.unit
def test_slow_marker(self, request):
"""Test that slow marker works."""
markers = [marker.name for marker in request.node.iter_markers()]
assert 'slow' in markers
assert 'unit' in markers

@pytest.mark.unit
def test_temp_dir_fixture(self, temp_dir):
"""Test temporary directory fixture creates and cleans up properly."""
test_file = temp_dir / "test.txt"
test_file.write_text("test content")
assert test_file.exists()
assert test_file.read_text() == "test content"

@pytest.mark.unit
def test_sample_batch_fixture(self, sample_batch):
"""Test sample batch fixture provides correct data structure."""
assert 'images' in sample_batch
assert 'labels' in sample_batch
assert 'theta' in sample_batch

assert sample_batch['images'].shape == (4, 28, 28, 1)
assert sample_batch['labels'].shape == (4,)
assert sample_batch['theta'].shape == (4, 6)

@pytest.mark.unit
def test_identity_theta_fixture(self, identity_theta):
"""Test identity theta fixture returns correct transformation."""
expected = np.array([1, 0, 0, 0, 1, 0], dtype=np.float32)
np.testing.assert_array_equal(identity_theta, expected)

@pytest.mark.unit
def test_coverage_configured(self):
"""Verify coverage is properly configured."""
# This test will pass if coverage is running
# The actual verification happens when running with coverage
assert True

@pytest.mark.unit
@pytest.mark.parametrize("value,expected", [
(1, 1),
(2, 4),
(3, 9),
])
def test_parametrize_works(self, value, expected):
"""Verify pytest parametrize decorator works."""
assert value ** 2 == expected


@pytest.mark.unit
def test_module_level_test():
"""Test that module-level tests work."""
assert True


def test_poetry_scripts_configured():
"""Test that poetry run test and poetry run tests commands are configured."""
pyproject_path = Path(__file__).parent.parent / "pyproject.toml"
assert pyproject_path.exists()

content = pyproject_path.read_text()
assert "[tool.poetry.scripts]" in content
assert 'test = "pytest:main"' in content
assert 'tests = "pytest:main"' in content
Empty file added tests/unit/__init__.py
Empty file.