Initial commit

This commit is contained in:
Zhongwei Li
2025-11-30 09:04:14 +08:00
commit 70c36b5eff
248 changed files with 47482 additions and 0 deletions

View File

@@ -0,0 +1,82 @@
#!/bin/bash
#
# Run CLI Tests
#
# Detects the project type and runs appropriate tests with coverage
set -e
echo "🧪 Running CLI tests..."
# Detect project type
if [ -f "package.json" ]; then
PROJECT_TYPE="node"
elif [ -f "setup.py" ] || [ -f "pyproject.toml" ]; then
PROJECT_TYPE="python"
else
echo "❌ Error: Could not detect project type"
echo " Expected package.json (Node.js) or setup.py/pyproject.toml (Python)"
exit 1
fi
# Run tests based on project type
if [ "$PROJECT_TYPE" == "node" ]; then
echo "📦 Node.js project detected"
# Check if npm test is configured
if ! grep -q '"test"' package.json 2>/dev/null; then
echo "❌ Error: No test script found in package.json"
echo " Run setup-jest-testing.sh first"
exit 1
fi
# Install dependencies if needed
if [ ! -d "node_modules" ]; then
echo "📦 Installing dependencies..."
npm install
fi
# Run tests with coverage
echo "🧪 Running Jest tests..."
npm run test:coverage
# Display coverage summary
if [ -f "coverage/lcov-report/index.html" ]; then
echo ""
echo "✅ Tests complete!"
echo "📊 Coverage report: coverage/lcov-report/index.html"
fi
elif [ "$PROJECT_TYPE" == "python" ]; then
echo "🐍 Python project detected"
# Check if pytest is installed
if ! command -v pytest &> /dev/null; then
echo "❌ Error: pytest is not installed"
echo " Run setup-pytest-testing.sh first"
exit 1
fi
# Create/activate virtual environment if it exists
if [ -d "venv" ]; then
echo "🔧 Activating virtual environment..."
source venv/bin/activate
elif [ -d ".venv" ]; then
echo "🔧 Activating virtual environment..."
source .venv/bin/activate
fi
# Run tests with coverage
echo "🧪 Running pytest tests..."
pytest --cov --cov-report=term-missing --cov-report=html
# Display coverage summary
if [ -d "htmlcov" ]; then
echo ""
echo "✅ Tests complete!"
echo "📊 Coverage report: htmlcov/index.html"
fi
fi
echo ""
echo "🎉 All tests passed!"

View File

@@ -0,0 +1,235 @@
#!/bin/bash
#
# Setup Jest for CLI Testing (Node.js/TypeScript)
#
# This script installs and configures Jest for testing CLI applications
# Includes TypeScript support, coverage reporting, and CLI testing utilities
set -e
echo "🔧 Setting up Jest for CLI testing..."
# Check if npm is available
if ! command -v npm &> /dev/null; then
echo "❌ Error: npm is not installed"
exit 1
fi
# Install Jest and related dependencies
echo "📦 Installing Jest and dependencies..."
npm install --save-dev \
jest \
@types/jest \
ts-jest \
@types/node
# Create Jest configuration
echo "⚙️ Creating Jest configuration..."
cat > jest.config.js << 'EOF'
module.exports = {
preset: 'ts-jest',
testEnvironment: 'node',
roots: ['<rootDir>/tests'],
testMatch: [
'**/__tests__/**/*.ts',
'**/?(*.)+(spec|test).ts'
],
collectCoverageFrom: [
'src/**/*.{ts,js}',
'!src/**/*.d.ts',
'!src/**/*.test.ts',
'!src/**/__tests__/**'
],
coverageDirectory: 'coverage',
coverageReporters: ['text', 'lcov', 'html'],
coverageThresholds: {
global: {
branches: 70,
functions: 70,
lines: 70,
statements: 70
}
},
verbose: true,
testTimeout: 10000
};
EOF
# Create tests directory structure
echo "📁 Creating test directory structure..."
mkdir -p tests/{unit,integration,helpers}
# Create test helper file
echo "📝 Creating test helpers..."
cat > tests/helpers/cli-helpers.ts << 'EOF'
import { execSync } from 'child_process';
import path from 'path';
export interface CLIResult {
stdout: string;
stderr: string;
code: number;
}
export const CLI_PATH = path.join(__dirname, '../../bin/cli');
export function runCLI(args: string): CLIResult {
try {
const stdout = execSync(`${CLI_PATH} ${args}`, {
encoding: 'utf8',
stdio: 'pipe',
});
return { stdout, stderr: '', code: 0 };
} catch (error: any) {
return {
stdout: error.stdout || '',
stderr: error.stderr || '',
code: error.status || 1,
};
}
}
EOF
# Create sample test file
echo "📝 Creating sample test file..."
cat > tests/unit/cli.test.ts << 'EOF'
import { runCLI } from '../helpers/cli-helpers';
describe('CLI Tests', () => {
test('should display version', () => {
const { stdout, code } = runCLI('--version');
expect(code).toBe(0);
expect(stdout).toMatch(/\d+\.\d+\.\d+/);
});
test('should display help', () => {
const { stdout, code } = runCLI('--help');
expect(code).toBe(0);
expect(stdout).toContain('Usage:');
});
});
EOF
# Create TypeScript configuration for tests
echo "⚙️ Creating TypeScript configuration..."
if [ ! -f tsconfig.json ]; then
cat > tsconfig.json << 'EOF'
{
"compilerOptions": {
"target": "ES2020",
"module": "commonjs",
"lib": ["ES2020"],
"outDir": "./dist",
"rootDir": "./src",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"resolveJsonModule": true,
"declaration": true,
"declarationMap": true,
"sourceMap": true
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist", "tests"]
}
EOF
fi
# Update package.json scripts
echo "⚙️ Updating package.json scripts..."
if [ -f package.json ]; then
# Check if jq is available for JSON manipulation
if command -v jq &> /dev/null; then
# Add test scripts using jq
tmp=$(mktemp)
jq '.scripts.test = "jest" |
.scripts["test:watch"] = "jest --watch" |
.scripts["test:coverage"] = "jest --coverage" |
.scripts["test:ci"] = "jest --ci --coverage --maxWorkers=2"' \
package.json > "$tmp"
mv "$tmp" package.json
else
echo "⚠️ jq not found. Please manually add test scripts to package.json:"
echo ' "test": "jest"'
echo ' "test:watch": "jest --watch"'
echo ' "test:coverage": "jest --coverage"'
echo ' "test:ci": "jest --ci --coverage --maxWorkers=2"'
fi
fi
# Create .gitignore entries
echo "📝 Updating .gitignore..."
if [ -f .gitignore ]; then
grep -qxF 'coverage/' .gitignore || echo 'coverage/' >> .gitignore
grep -qxF '*.log' .gitignore || echo '*.log' >> .gitignore
else
cat > .gitignore << 'EOF'
node_modules/
dist/
coverage/
*.log
.env
.env.local
EOF
fi
# Create README for tests
echo "📝 Creating test documentation..."
cat > tests/README.md << 'EOF'
# CLI Tests
## Running Tests
```bash
# Run all tests
npm test
# Run tests in watch mode
npm run test:watch
# Run tests with coverage
npm run test:coverage
# Run tests in CI mode
npm run test:ci
```
## Test Structure
- `unit/` - Unit tests for individual functions
- `integration/` - Integration tests for complete workflows
- `helpers/` - Test helper functions and utilities
## Writing Tests
Use the `runCLI` helper to execute CLI commands:
```typescript
import { runCLI } from '../helpers/cli-helpers';
test('should execute command', () => {
const { stdout, stderr, code } = runCLI('command --flag');
expect(code).toBe(0);
expect(stdout).toContain('expected output');
});
```
## Coverage
Coverage reports are generated in the `coverage/` directory.
Target: 70% coverage for branches, functions, lines, and statements.
EOF
echo "✅ Jest setup complete!"
echo ""
echo "Next steps:"
echo " 1. Run 'npm test' to execute tests"
echo " 2. Add more tests in tests/unit/ and tests/integration/"
echo " 3. Run 'npm run test:coverage' to see coverage report"
echo ""
echo "📚 Test files created:"
echo " - jest.config.js"
echo " - tests/helpers/cli-helpers.ts"
echo " - tests/unit/cli.test.ts"
echo " - tests/README.md"

View File

@@ -0,0 +1,448 @@
#!/bin/bash
#
# Setup pytest for CLI Testing (Python)
#
# This script installs and configures pytest for testing Click-based CLI applications
# Includes coverage reporting, fixtures, and CLI testing utilities
set -e
echo "🔧 Setting up pytest for CLI testing..."
# Check if Python is available
if ! command -v python3 &> /dev/null; then
echo "❌ Error: python3 is not installed"
exit 1
fi
# Check if pip is available
if ! command -v pip3 &> /dev/null; then
echo "❌ Error: pip3 is not installed"
exit 1
fi
# Install pytest and related dependencies
echo "📦 Installing pytest and dependencies..."
pip3 install --upgrade \
pytest \
pytest-cov \
pytest-mock \
click
# Create pytest configuration
echo "⚙️ Creating pytest configuration..."
cat > pytest.ini << 'EOF'
[pytest]
testpaths = tests
python_files = test_*.py
python_classes = Test*
python_functions = test_*
addopts =
-v
--strict-markers
--tb=short
--cov=src
--cov-report=term-missing
--cov-report=html
--cov-report=xml
markers =
unit: Unit tests
integration: Integration tests
slow: Slow running tests
cli: CLI command tests
filterwarnings =
ignore::DeprecationWarning
EOF
# Create tests directory structure
echo "📁 Creating test directory structure..."
mkdir -p tests/{unit,integration,fixtures}
# Create conftest.py with common fixtures
echo "📝 Creating pytest fixtures..."
cat > tests/conftest.py << 'EOF'
"""
Pytest configuration and fixtures for CLI testing
"""
import pytest
import tempfile
import shutil
from pathlib import Path
from click.testing import CliRunner
from src.cli import cli # Adjust import based on your CLI module
@pytest.fixture
def runner():
"""Create a CliRunner instance for testing"""
return CliRunner()
@pytest.fixture
def isolated_runner():
"""Create a CliRunner with isolated filesystem"""
runner = CliRunner()
with runner.isolated_filesystem():
yield runner
@pytest.fixture
def temp_workspace(tmp_path):
"""Create a temporary workspace directory"""
workspace = tmp_path / 'workspace'
workspace.mkdir()
yield workspace
# Cleanup handled by tmp_path fixture
@pytest.fixture
def mock_config(temp_workspace):
"""Create a mock configuration file"""
config_file = temp_workspace / '.clirc'
config_content = """
api_key: your_test_key_here
environment: development
verbose: false
"""
config_file.write_text(config_content)
return config_file
@pytest.fixture
def cli_harness(runner):
"""Create CLI test harness with helper methods"""
class CLIHarness:
def __init__(self, runner):
self.runner = runner
def run(self, args, input_data=None):
"""Run CLI command and return result"""
return self.runner.invoke(cli, args, input=input_data)
def assert_success(self, args, expected_in_output=None):
"""Assert command succeeds"""
result = self.run(args)
assert result.exit_code == 0, f"Command failed: {result.output}"
if expected_in_output:
assert expected_in_output in result.output
return result
def assert_failure(self, args, expected_in_output=None):
"""Assert command fails"""
result = self.run(args)
assert result.exit_code != 0, f"Command should have failed: {result.output}"
if expected_in_output:
assert expected_in_output in result.output
return result
return CLIHarness(runner)
EOF
# Create __init__.py files
touch tests/__init__.py
touch tests/unit/__init__.py
touch tests/integration/__init__.py
touch tests/fixtures/__init__.py
# Create sample test file
echo "📝 Creating sample test file..."
cat > tests/unit/test_cli.py << 'EOF'
"""
Unit tests for CLI commands
"""
import pytest
from click.testing import CliRunner
from src.cli import cli # Adjust import based on your CLI module
class TestVersionCommand:
"""Test version command"""
def test_version_flag(self, runner):
"""Should display version with --version"""
result = runner.invoke(cli, ['--version'])
assert result.exit_code == 0
# Adjust assertion based on your version format
def test_version_output_format(self, runner):
"""Should display version in correct format"""
result = runner.invoke(cli, ['--version'])
assert result.output.count('.') >= 2 # X.Y.Z format
class TestHelpCommand:
"""Test help command"""
def test_help_flag(self, runner):
"""Should display help with --help"""
result = runner.invoke(cli, ['--help'])
assert result.exit_code == 0
assert 'Usage:' in result.output
def test_help_shows_commands(self, runner):
"""Should list available commands"""
result = runner.invoke(cli, ['--help'])
assert 'Commands:' in result.output
class TestErrorHandling:
"""Test error handling"""
def test_unknown_command(self, runner):
"""Should handle unknown commands gracefully"""
result = runner.invoke(cli, ['unknown-command'])
assert result.exit_code != 0
assert 'no such command' in result.output.lower()
def test_invalid_option(self, runner):
"""Should handle invalid options"""
result = runner.invoke(cli, ['--invalid-option'])
assert result.exit_code != 0
EOF
# Create sample integration test
echo "📝 Creating sample integration test..."
cat > tests/integration/test_workflow.py << 'EOF'
"""
Integration tests for CLI workflows
"""
import pytest
from click.testing import CliRunner
from src.cli import cli # Adjust import based on your CLI module
@pytest.mark.integration
class TestCompleteWorkflow:
"""Test complete CLI workflows"""
def test_init_and_config_workflow(self, isolated_runner):
"""Should complete init -> config workflow"""
runner = isolated_runner
# Initialize project
result = runner.invoke(cli, ['init', 'test-project'])
assert result.exit_code == 0
# Configure project
result = runner.invoke(cli, ['config', 'set', 'key', 'value'])
assert result.exit_code == 0
# Verify configuration
result = runner.invoke(cli, ['config', 'get', 'key'])
assert result.exit_code == 0
assert 'value' in result.output
EOF
# Create requirements file for testing
echo "📝 Creating requirements-test.txt..."
cat > requirements-test.txt << 'EOF'
pytest>=7.0.0
pytest-cov>=4.0.0
pytest-mock>=3.10.0
click>=8.0.0
EOF
# Create .coveragerc for coverage configuration
echo "⚙️ Creating coverage configuration..."
cat > .coveragerc << 'EOF'
[run]
source = src
omit =
tests/*
*/venv/*
*/virtualenv/*
*/__pycache__/*
[report]
exclude_lines =
pragma: no cover
def __repr__
raise AssertionError
raise NotImplementedError
if __name__ == .__main__.:
if TYPE_CHECKING:
@abstractmethod
precision = 2
show_missing = True
[html]
directory = htmlcov
EOF
# Update .gitignore
echo "📝 Updating .gitignore..."
if [ -f .gitignore ]; then
grep -qxF '__pycache__/' .gitignore || echo '__pycache__/' >> .gitignore
grep -qxF '*.pyc' .gitignore || echo '*.pyc' >> .gitignore
grep -qxF '.pytest_cache/' .gitignore || echo '.pytest_cache/' >> .gitignore
grep -qxF 'htmlcov/' .gitignore || echo 'htmlcov/' >> .gitignore
grep -qxF '.coverage' .gitignore || echo '.coverage' >> .gitignore
grep -qxF 'coverage.xml' .gitignore || echo 'coverage.xml' >> .gitignore
else
cat > .gitignore << 'EOF'
__pycache__/
*.pyc
*.pyo
*.pyd
.pytest_cache/
htmlcov/
.coverage
coverage.xml
*.log
.env
.env.local
EOF
fi
# Create Makefile for convenient test commands
echo "📝 Creating Makefile..."
cat > Makefile << 'EOF'
.PHONY: test test-unit test-integration test-cov clean
test:
pytest
test-unit:
pytest tests/unit -v
test-integration:
pytest tests/integration -v
test-cov:
pytest --cov --cov-report=html --cov-report=term
test-watch:
pytest --watch
clean:
rm -rf .pytest_cache htmlcov .coverage coverage.xml
find . -type d -name __pycache__ -exec rm -rf {} +
find . -type f -name "*.pyc" -delete
EOF
# Create README for tests
echo "📝 Creating test documentation..."
cat > tests/README.md << 'EOF'
# CLI Tests
## Running Tests
```bash
# Run all tests
pytest
# Run unit tests only
pytest tests/unit
# Run integration tests only
pytest tests/integration
# Run with coverage
pytest --cov --cov-report=html
# Run specific test file
pytest tests/unit/test_cli.py
# Run specific test function
pytest tests/unit/test_cli.py::test_version_flag
# Run with verbose output
pytest -v
# Run and show print statements
pytest -s
```
## Using Makefile
```bash
# Run all tests
make test
# Run unit tests
make test-unit
# Run integration tests
make test-integration
# Run with coverage report
make test-cov
# Clean test artifacts
make clean
```
## Test Structure
- `unit/` - Unit tests for individual functions and commands
- `integration/` - Integration tests for complete workflows
- `fixtures/` - Shared test fixtures and utilities
- `conftest.py` - Pytest configuration and common fixtures
## Writing Tests
Use the fixtures from `conftest.py`:
```python
def test_example(runner):
"""Test using CliRunner fixture"""
result = runner.invoke(cli, ['command', '--flag'])
assert result.exit_code == 0
assert 'expected' in result.output
def test_with_harness(cli_harness):
"""Test using CLI harness"""
result = cli_harness.assert_success(['command'], 'expected output')
```
## Test Markers
Use markers to categorize tests:
```python
@pytest.mark.unit
def test_unit_example():
pass
@pytest.mark.integration
def test_integration_example():
pass
@pytest.mark.slow
def test_slow_operation():
pass
```
Run specific markers:
```bash
pytest -m unit
pytest -m "not slow"
```
## Coverage
Coverage reports are generated in `htmlcov/` directory.
Open `htmlcov/index.html` to view detailed coverage report.
Target: 80%+ coverage for all modules.
EOF
echo "✅ pytest setup complete!"
echo ""
echo "Next steps:"
echo " 1. Run 'pytest' to execute tests"
echo " 2. Run 'make test-cov' to see coverage report"
echo " 3. Add more tests in tests/unit/ and tests/integration/"
echo ""
echo "📚 Test files created:"
echo " - pytest.ini"
echo " - .coveragerc"
echo " - tests/conftest.py"
echo " - tests/unit/test_cli.py"
echo " - tests/integration/test_workflow.py"
echo " - tests/README.md"
echo " - Makefile"

View File

@@ -0,0 +1,127 @@
#!/bin/bash
#
# Validate Test Coverage
#
# Checks that test coverage meets minimum thresholds
set -e
# Default thresholds
MIN_COVERAGE=${MIN_COVERAGE:-70}
echo "📊 Validating test coverage..."
# Detect project type
if [ -f "package.json" ]; then
PROJECT_TYPE="node"
elif [ -f "setup.py" ] || [ -f "pyproject.toml" ]; then
PROJECT_TYPE="python"
else
echo "❌ Error: Could not detect project type"
exit 1
fi
# Check coverage for Node.js projects
if [ "$PROJECT_TYPE" == "node" ]; then
echo "📦 Node.js project detected"
# Check if coverage data exists
if [ ! -d "coverage" ]; then
echo "❌ Error: No coverage data found"
echo " Run 'npm run test:coverage' first"
exit 1
fi
# Check if coverage summary exists
if [ ! -f "coverage/coverage-summary.json" ]; then
echo "❌ Error: coverage-summary.json not found"
exit 1
fi
# Extract coverage percentages using jq if available
if command -v jq &> /dev/null; then
LINES=$(jq '.total.lines.pct' coverage/coverage-summary.json)
STATEMENTS=$(jq '.total.statements.pct' coverage/coverage-summary.json)
FUNCTIONS=$(jq '.total.functions.pct' coverage/coverage-summary.json)
BRANCHES=$(jq '.total.branches.pct' coverage/coverage-summary.json)
echo ""
echo "Coverage Summary:"
echo " Lines: ${LINES}%"
echo " Statements: ${STATEMENTS}%"
echo " Functions: ${FUNCTIONS}%"
echo " Branches: ${BRANCHES}%"
echo ""
# Check thresholds
FAILED=0
if (( $(echo "$LINES < $MIN_COVERAGE" | bc -l) )); then
echo "❌ Lines coverage (${LINES}%) below threshold (${MIN_COVERAGE}%)"
FAILED=1
fi
if (( $(echo "$STATEMENTS < $MIN_COVERAGE" | bc -l) )); then
echo "❌ Statements coverage (${STATEMENTS}%) below threshold (${MIN_COVERAGE}%)"
FAILED=1
fi
if (( $(echo "$FUNCTIONS < $MIN_COVERAGE" | bc -l) )); then
echo "❌ Functions coverage (${FUNCTIONS}%) below threshold (${MIN_COVERAGE}%)"
FAILED=1
fi
if (( $(echo "$BRANCHES < $MIN_COVERAGE" | bc -l) )); then
echo "❌ Branches coverage (${BRANCHES}%) below threshold (${MIN_COVERAGE}%)"
FAILED=1
fi
if [ $FAILED -eq 1 ]; then
echo ""
echo "❌ Coverage validation failed"
exit 1
fi
echo "✅ Coverage thresholds met!"
else
echo "⚠️ jq not installed, skipping detailed validation"
echo " Install jq for detailed coverage validation"
fi
# Check coverage for Python projects
elif [ "$PROJECT_TYPE" == "python" ]; then
echo "🐍 Python project detected"
# Check if coverage data exists
if [ ! -f ".coverage" ]; then
echo "❌ Error: No coverage data found"
echo " Run 'pytest --cov' first"
exit 1
fi
# Generate coverage report
if command -v coverage &> /dev/null; then
echo ""
coverage report
# Get total coverage percentage
TOTAL_COVERAGE=$(coverage report | tail -1 | awk '{print $NF}' | sed 's/%//')
echo ""
echo "Total Coverage: ${TOTAL_COVERAGE}%"
echo "Minimum Required: ${MIN_COVERAGE}%"
# Compare coverage
if (( $(echo "$TOTAL_COVERAGE < $MIN_COVERAGE" | bc -l) )); then
echo ""
echo "❌ Coverage (${TOTAL_COVERAGE}%) below threshold (${MIN_COVERAGE}%)"
exit 1
fi
echo ""
echo "✅ Coverage thresholds met!"
else
echo "❌ Error: coverage tool not installed"
echo " Install with: pip install coverage"
exit 1
fi
fi
echo ""
echo "🎉 Coverage validation passed!"