Files
gh-vanman2024-cli-builder-p…/skills/cli-testing-patterns/scripts/setup-pytest-testing.sh
2025-11-30 09:04:14 +08:00

449 lines
10 KiB
Bash
Executable File

#!/bin/bash
#
# Setup pytest for CLI Testing (Python)
#
# This script installs and configures pytest for testing Click-based CLI applications
# Includes coverage reporting, fixtures, and CLI testing utilities
set -e
echo "🔧 Setting up pytest for CLI testing..."
# Check if Python is available
if ! command -v python3 &> /dev/null; then
echo "❌ Error: python3 is not installed"
exit 1
fi
# Check if pip is available
if ! command -v pip3 &> /dev/null; then
echo "❌ Error: pip3 is not installed"
exit 1
fi
# Install pytest and related dependencies
echo "📦 Installing pytest and dependencies..."
pip3 install --upgrade \
pytest \
pytest-cov \
pytest-mock \
click
# Create pytest configuration
echo "⚙️ Creating pytest configuration..."
cat > pytest.ini << 'EOF'
[pytest]
testpaths = tests
python_files = test_*.py
python_classes = Test*
python_functions = test_*
addopts =
-v
--strict-markers
--tb=short
--cov=src
--cov-report=term-missing
--cov-report=html
--cov-report=xml
markers =
unit: Unit tests
integration: Integration tests
slow: Slow running tests
cli: CLI command tests
filterwarnings =
ignore::DeprecationWarning
EOF
# Create tests directory structure
echo "📁 Creating test directory structure..."
mkdir -p tests/{unit,integration,fixtures}
# Create conftest.py with common fixtures
echo "📝 Creating pytest fixtures..."
cat > tests/conftest.py << 'EOF'
"""
Pytest configuration and fixtures for CLI testing
"""
import pytest
import tempfile
import shutil
from pathlib import Path
from click.testing import CliRunner
from src.cli import cli # Adjust import based on your CLI module
@pytest.fixture
def runner():
"""Create a CliRunner instance for testing"""
return CliRunner()
@pytest.fixture
def isolated_runner():
"""Create a CliRunner with isolated filesystem"""
runner = CliRunner()
with runner.isolated_filesystem():
yield runner
@pytest.fixture
def temp_workspace(tmp_path):
"""Create a temporary workspace directory"""
workspace = tmp_path / 'workspace'
workspace.mkdir()
yield workspace
# Cleanup handled by tmp_path fixture
@pytest.fixture
def mock_config(temp_workspace):
"""Create a mock configuration file"""
config_file = temp_workspace / '.clirc'
config_content = """
api_key: your_test_key_here
environment: development
verbose: false
"""
config_file.write_text(config_content)
return config_file
@pytest.fixture
def cli_harness(runner):
"""Create CLI test harness with helper methods"""
class CLIHarness:
def __init__(self, runner):
self.runner = runner
def run(self, args, input_data=None):
"""Run CLI command and return result"""
return self.runner.invoke(cli, args, input=input_data)
def assert_success(self, args, expected_in_output=None):
"""Assert command succeeds"""
result = self.run(args)
assert result.exit_code == 0, f"Command failed: {result.output}"
if expected_in_output:
assert expected_in_output in result.output
return result
def assert_failure(self, args, expected_in_output=None):
"""Assert command fails"""
result = self.run(args)
assert result.exit_code != 0, f"Command should have failed: {result.output}"
if expected_in_output:
assert expected_in_output in result.output
return result
return CLIHarness(runner)
EOF
# Create __init__.py files
touch tests/__init__.py
touch tests/unit/__init__.py
touch tests/integration/__init__.py
touch tests/fixtures/__init__.py
# Create sample test file
echo "📝 Creating sample test file..."
cat > tests/unit/test_cli.py << 'EOF'
"""
Unit tests for CLI commands
"""
import pytest
from click.testing import CliRunner
from src.cli import cli # Adjust import based on your CLI module
class TestVersionCommand:
"""Test version command"""
def test_version_flag(self, runner):
"""Should display version with --version"""
result = runner.invoke(cli, ['--version'])
assert result.exit_code == 0
# Adjust assertion based on your version format
def test_version_output_format(self, runner):
"""Should display version in correct format"""
result = runner.invoke(cli, ['--version'])
assert result.output.count('.') >= 2 # X.Y.Z format
class TestHelpCommand:
"""Test help command"""
def test_help_flag(self, runner):
"""Should display help with --help"""
result = runner.invoke(cli, ['--help'])
assert result.exit_code == 0
assert 'Usage:' in result.output
def test_help_shows_commands(self, runner):
"""Should list available commands"""
result = runner.invoke(cli, ['--help'])
assert 'Commands:' in result.output
class TestErrorHandling:
"""Test error handling"""
def test_unknown_command(self, runner):
"""Should handle unknown commands gracefully"""
result = runner.invoke(cli, ['unknown-command'])
assert result.exit_code != 0
assert 'no such command' in result.output.lower()
def test_invalid_option(self, runner):
"""Should handle invalid options"""
result = runner.invoke(cli, ['--invalid-option'])
assert result.exit_code != 0
EOF
# Create sample integration test
echo "📝 Creating sample integration test..."
cat > tests/integration/test_workflow.py << 'EOF'
"""
Integration tests for CLI workflows
"""
import pytest
from click.testing import CliRunner
from src.cli import cli # Adjust import based on your CLI module
@pytest.mark.integration
class TestCompleteWorkflow:
"""Test complete CLI workflows"""
def test_init_and_config_workflow(self, isolated_runner):
"""Should complete init -> config workflow"""
runner = isolated_runner
# Initialize project
result = runner.invoke(cli, ['init', 'test-project'])
assert result.exit_code == 0
# Configure project
result = runner.invoke(cli, ['config', 'set', 'key', 'value'])
assert result.exit_code == 0
# Verify configuration
result = runner.invoke(cli, ['config', 'get', 'key'])
assert result.exit_code == 0
assert 'value' in result.output
EOF
# Create requirements file for testing
echo "📝 Creating requirements-test.txt..."
cat > requirements-test.txt << 'EOF'
pytest>=7.0.0
pytest-cov>=4.0.0
pytest-mock>=3.10.0
click>=8.0.0
EOF
# Create .coveragerc for coverage configuration
echo "⚙️ Creating coverage configuration..."
cat > .coveragerc << 'EOF'
[run]
source = src
omit =
tests/*
*/venv/*
*/virtualenv/*
*/__pycache__/*
[report]
exclude_lines =
pragma: no cover
def __repr__
raise AssertionError
raise NotImplementedError
if __name__ == .__main__.:
if TYPE_CHECKING:
@abstractmethod
precision = 2
show_missing = True
[html]
directory = htmlcov
EOF
# Update .gitignore
echo "📝 Updating .gitignore..."
if [ -f .gitignore ]; then
grep -qxF '__pycache__/' .gitignore || echo '__pycache__/' >> .gitignore
grep -qxF '*.pyc' .gitignore || echo '*.pyc' >> .gitignore
grep -qxF '.pytest_cache/' .gitignore || echo '.pytest_cache/' >> .gitignore
grep -qxF 'htmlcov/' .gitignore || echo 'htmlcov/' >> .gitignore
grep -qxF '.coverage' .gitignore || echo '.coverage' >> .gitignore
grep -qxF 'coverage.xml' .gitignore || echo 'coverage.xml' >> .gitignore
else
cat > .gitignore << 'EOF'
__pycache__/
*.pyc
*.pyo
*.pyd
.pytest_cache/
htmlcov/
.coverage
coverage.xml
*.log
.env
.env.local
EOF
fi
# Create Makefile for convenient test commands
echo "📝 Creating Makefile..."
cat > Makefile << 'EOF'
.PHONY: test test-unit test-integration test-cov clean
test:
pytest
test-unit:
pytest tests/unit -v
test-integration:
pytest tests/integration -v
test-cov:
pytest --cov --cov-report=html --cov-report=term
test-watch:
pytest --watch
clean:
rm -rf .pytest_cache htmlcov .coverage coverage.xml
find . -type d -name __pycache__ -exec rm -rf {} +
find . -type f -name "*.pyc" -delete
EOF
# Create README for tests
echo "📝 Creating test documentation..."
cat > tests/README.md << 'EOF'
# CLI Tests
## Running Tests
```bash
# Run all tests
pytest
# Run unit tests only
pytest tests/unit
# Run integration tests only
pytest tests/integration
# Run with coverage
pytest --cov --cov-report=html
# Run specific test file
pytest tests/unit/test_cli.py
# Run specific test function
pytest tests/unit/test_cli.py::test_version_flag
# Run with verbose output
pytest -v
# Run and show print statements
pytest -s
```
## Using Makefile
```bash
# Run all tests
make test
# Run unit tests
make test-unit
# Run integration tests
make test-integration
# Run with coverage report
make test-cov
# Clean test artifacts
make clean
```
## Test Structure
- `unit/` - Unit tests for individual functions and commands
- `integration/` - Integration tests for complete workflows
- `fixtures/` - Shared test fixtures and utilities
- `conftest.py` - Pytest configuration and common fixtures
## Writing Tests
Use the fixtures from `conftest.py`:
```python
def test_example(runner):
"""Test using CliRunner fixture"""
result = runner.invoke(cli, ['command', '--flag'])
assert result.exit_code == 0
assert 'expected' in result.output
def test_with_harness(cli_harness):
"""Test using CLI harness"""
result = cli_harness.assert_success(['command'], 'expected output')
```
## Test Markers
Use markers to categorize tests:
```python
@pytest.mark.unit
def test_unit_example():
pass
@pytest.mark.integration
def test_integration_example():
pass
@pytest.mark.slow
def test_slow_operation():
pass
```
Run specific markers:
```bash
pytest -m unit
pytest -m "not slow"
```
## Coverage
Coverage reports are generated in `htmlcov/` directory.
Open `htmlcov/index.html` to view detailed coverage report.
Target: 80%+ coverage for all modules.
EOF
echo "✅ pytest setup complete!"
echo ""
echo "Next steps:"
echo " 1. Run 'pytest' to execute tests"
echo " 2. Run 'make test-cov' to see coverage report"
echo " 3. Add more tests in tests/unit/ and tests/integration/"
echo ""
echo "📚 Test files created:"
echo " - pytest.ini"
echo " - .coveragerc"
echo " - tests/conftest.py"
echo " - tests/unit/test_cli.py"
echo " - tests/integration/test_workflow.py"
echo " - tests/README.md"
echo " - Makefile"