Initial commit
This commit is contained in:
1011
skills/testing-strategy/EXAMPLES.md
Normal file
1011
skills/testing-strategy/EXAMPLES.md
Normal file
File diff suppressed because it is too large
Load Diff
946
skills/testing-strategy/REFERENCE.md
Normal file
946
skills/testing-strategy/REFERENCE.md
Normal file
@@ -0,0 +1,946 @@
|
||||
# Testing Reference
|
||||
|
||||
Complete configurations, project structures, and setup guides for Grey Haven testing infrastructure.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [TypeScript Configuration](#typescript-configuration)
|
||||
- [Python Configuration](#python-configuration)
|
||||
- [Project Structures](#project-structures)
|
||||
- [Doppler Configuration](#doppler-configuration)
|
||||
- [GitHub Actions Configuration](#github-actions-configuration)
|
||||
- [Coverage Configuration](#coverage-configuration)
|
||||
|
||||
## TypeScript Configuration
|
||||
|
||||
### Complete vitest.config.ts
|
||||
|
||||
```typescript
|
||||
// vitest.config.ts
|
||||
import { defineConfig } from "vitest/config";
|
||||
import react from "@vitejs/plugin-react";
|
||||
import path from "path";
|
||||
|
||||
export default defineConfig({
|
||||
plugins: [react()],
|
||||
test: {
|
||||
// Enable global test APIs (describe, it, expect)
|
||||
globals: true,
|
||||
|
||||
// Use jsdom for browser-like environment
|
||||
environment: "jsdom",
|
||||
|
||||
// Run setup file before tests
|
||||
setupFiles: ["./tests/setup.ts"],
|
||||
|
||||
// Coverage configuration
|
||||
coverage: {
|
||||
// Use V8 coverage provider (faster than Istanbul)
|
||||
provider: "v8",
|
||||
|
||||
// Coverage reporters
|
||||
reporter: ["text", "json", "html"],
|
||||
|
||||
// Exclude from coverage
|
||||
exclude: [
|
||||
"node_modules/",
|
||||
"tests/",
|
||||
"**/*.config.ts",
|
||||
"**/*.d.ts",
|
||||
"**/types/",
|
||||
"**/__mocks__/",
|
||||
],
|
||||
|
||||
// Minimum coverage thresholds (enforced in CI)
|
||||
thresholds: {
|
||||
lines: 80,
|
||||
functions: 80,
|
||||
branches: 80,
|
||||
statements: 80,
|
||||
},
|
||||
},
|
||||
|
||||
// Environment variables for tests
|
||||
env: {
|
||||
// Doppler provides these at runtime
|
||||
DATABASE_URL_ADMIN: process.env.DATABASE_URL_ADMIN || "postgresql://localhost/test",
|
||||
REDIS_URL: process.env.REDIS_URL || "redis://localhost:6379",
|
||||
VITE_API_URL: process.env.VITE_API_URL || "http://localhost:3000",
|
||||
},
|
||||
|
||||
// Test timeout (ms)
|
||||
testTimeout: 10000,
|
||||
|
||||
// Hook timeouts
|
||||
hookTimeout: 10000,
|
||||
|
||||
// Retry failed tests
|
||||
retry: 0,
|
||||
|
||||
// Run tests in parallel
|
||||
threads: true,
|
||||
|
||||
// Maximum concurrent threads
|
||||
maxThreads: 4,
|
||||
|
||||
// Minimum concurrent threads
|
||||
minThreads: 1,
|
||||
},
|
||||
|
||||
// Path aliases
|
||||
resolve: {
|
||||
alias: {
|
||||
"~": path.resolve(__dirname, "./src"),
|
||||
},
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
**Field Explanations:**
|
||||
|
||||
- `globals: true` - Makes test APIs available without imports
|
||||
- `environment: "jsdom"` - Simulates browser environment for React components
|
||||
- `setupFiles` - Runs before each test file
|
||||
- `coverage.provider: "v8"` - Fast coverage using V8 engine
|
||||
- `coverage.thresholds` - Enforces minimum coverage percentages
|
||||
- `testTimeout: 10000` - Each test must complete within 10 seconds
|
||||
- `threads: true` - Run tests in parallel for speed
|
||||
- `retry: 0` - Don't retry failed tests (fail fast)
|
||||
|
||||
### Test Setup File (tests/setup.ts)
|
||||
|
||||
```typescript
|
||||
// tests/setup.ts
|
||||
import { afterEach, beforeAll, afterAll, vi } from "vitest";
|
||||
import { cleanup } from "@testing-library/react";
|
||||
import "@testing-library/jest-dom/vitest";
|
||||
|
||||
// Cleanup after each test case
|
||||
afterEach(() => {
|
||||
cleanup();
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
// Setup before all tests
|
||||
beforeAll(() => {
|
||||
// Mock environment variables
|
||||
process.env.VITE_API_URL = "http://localhost:3000";
|
||||
process.env.DATABASE_URL_ADMIN = "postgresql://localhost/test";
|
||||
|
||||
// Mock window.matchMedia (for responsive components)
|
||||
Object.defineProperty(window, "matchMedia", {
|
||||
writable: true,
|
||||
value: vi.fn().mockImplementation((query) => ({
|
||||
matches: false,
|
||||
media: query,
|
||||
onchange: null,
|
||||
addListener: vi.fn(),
|
||||
removeListener: vi.fn(),
|
||||
addEventListener: vi.fn(),
|
||||
removeEventListener: vi.fn(),
|
||||
dispatchEvent: vi.fn(),
|
||||
})),
|
||||
});
|
||||
|
||||
// Mock IntersectionObserver
|
||||
global.IntersectionObserver = vi.fn().mockImplementation(() => ({
|
||||
observe: vi.fn(),
|
||||
unobserve: vi.fn(),
|
||||
disconnect: vi.fn(),
|
||||
}));
|
||||
});
|
||||
|
||||
// Cleanup after all tests
|
||||
afterAll(async () => {
|
||||
// Close database connections
|
||||
// Clean up any resources
|
||||
});
|
||||
```
|
||||
|
||||
### Package.json Scripts
|
||||
|
||||
```json
|
||||
{
|
||||
"scripts": {
|
||||
"test": "vitest run",
|
||||
"test:watch": "vitest",
|
||||
"test:ui": "vitest --ui",
|
||||
"test:coverage": "vitest run --coverage",
|
||||
"test:unit": "vitest run tests/unit",
|
||||
"test:integration": "vitest run tests/integration",
|
||||
"test:e2e": "playwright test",
|
||||
"test:e2e:ui": "playwright test --ui"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@playwright/test": "^1.40.0",
|
||||
"@testing-library/jest-dom": "^6.1.5",
|
||||
"@testing-library/react": "^14.1.2",
|
||||
"@testing-library/user-event": "^14.5.1",
|
||||
"@vitest/ui": "^1.0.4",
|
||||
"@faker-js/faker": "^8.3.1",
|
||||
"vitest": "^1.0.4",
|
||||
"@vitest/coverage-v8": "^1.0.4"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Playwright Configuration
|
||||
|
||||
```typescript
|
||||
// playwright.config.ts
|
||||
import { defineConfig, devices } from "@playwright/test";
|
||||
|
||||
export default defineConfig({
|
||||
testDir: "./tests/e2e",
|
||||
fullyParallel: true,
|
||||
forbidOnly: !!process.env.CI,
|
||||
retries: process.env.CI ? 2 : 0,
|
||||
workers: process.env.CI ? 1 : undefined,
|
||||
reporter: "html",
|
||||
|
||||
use: {
|
||||
baseURL: process.env.PLAYWRIGHT_BASE_URL || "http://localhost:3000",
|
||||
trace: "on-first-retry",
|
||||
},
|
||||
|
||||
projects: [
|
||||
{
|
||||
name: "chromium",
|
||||
use: { ...devices["Desktop Chrome"] },
|
||||
},
|
||||
{
|
||||
name: "firefox",
|
||||
use: { ...devices["Desktop Firefox"] },
|
||||
},
|
||||
{
|
||||
name: "webkit",
|
||||
use: { ...devices["Desktop Safari"] },
|
||||
},
|
||||
],
|
||||
|
||||
webServer: {
|
||||
command: "bun run dev",
|
||||
url: "http://localhost:3000",
|
||||
reuseExistingServer: !process.env.CI,
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
## Python Configuration
|
||||
|
||||
### Complete pyproject.toml
|
||||
|
||||
```toml
|
||||
# pyproject.toml
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
# Test discovery
|
||||
testpaths = ["tests"]
|
||||
python_files = ["test_*.py"]
|
||||
python_classes = ["Test*"]
|
||||
python_functions = ["test_*"]
|
||||
|
||||
# Command line options
|
||||
addopts = [
|
||||
"--strict-markers", # Error on unknown markers
|
||||
"--strict-config", # Error on config errors
|
||||
"-ra", # Show extra test summary
|
||||
"--cov=app", # Measure coverage of app/ directory
|
||||
"--cov-report=term-missing", # Show missing lines in terminal
|
||||
"--cov-report=html", # Generate HTML coverage report
|
||||
"--cov-report=xml", # Generate XML for CI tools
|
||||
"--cov-fail-under=80", # Fail if coverage < 80%
|
||||
"-v", # Verbose output
|
||||
]
|
||||
|
||||
# Test markers (use with @pytest.mark.unit, etc.)
|
||||
markers = [
|
||||
"unit: Fast, isolated unit tests",
|
||||
"integration: Tests involving multiple components",
|
||||
"e2e: End-to-end tests through full flows",
|
||||
"benchmark: Performance tests",
|
||||
"slow: Tests that take >5 seconds",
|
||||
]
|
||||
|
||||
# Async support
|
||||
asyncio_mode = "auto"
|
||||
|
||||
# Test output
|
||||
console_output_style = "progress"
|
||||
|
||||
# Warnings
|
||||
filterwarnings = [
|
||||
"error", # Treat warnings as errors
|
||||
"ignore::DeprecationWarning", # Ignore deprecation warnings
|
||||
"ignore::PendingDeprecationWarning", # Ignore pending deprecations
|
||||
]
|
||||
|
||||
# Coverage configuration
|
||||
[tool.coverage.run]
|
||||
source = ["app"]
|
||||
omit = [
|
||||
"*/tests/*",
|
||||
"*/conftest.py",
|
||||
"*/__init__.py",
|
||||
"*/migrations/*",
|
||||
"*/config/*",
|
||||
]
|
||||
branch = true
|
||||
parallel = true
|
||||
|
||||
[tool.coverage.report]
|
||||
precision = 2
|
||||
show_missing = true
|
||||
skip_covered = false
|
||||
exclude_lines = [
|
||||
"pragma: no cover",
|
||||
"def __repr__",
|
||||
"def __str__",
|
||||
"raise AssertionError",
|
||||
"raise NotImplementedError",
|
||||
"if __name__ == .__main__.:",
|
||||
"if TYPE_CHECKING:",
|
||||
"class .*\\bProtocol\\):",
|
||||
"@(abc\\.)?abstractmethod",
|
||||
]
|
||||
|
||||
[tool.coverage.html]
|
||||
directory = "htmlcov"
|
||||
|
||||
[tool.coverage.xml]
|
||||
output = "coverage.xml"
|
||||
```
|
||||
|
||||
**Configuration Explanations:**
|
||||
|
||||
- `testpaths = ["tests"]` - Only look for tests in tests/ directory
|
||||
- `--strict-markers` - Fail if test uses undefined marker
|
||||
- `--cov=app` - Measure coverage of app/ directory
|
||||
- `--cov-fail-under=80` - CI fails if coverage < 80%
|
||||
- `asyncio_mode = "auto"` - Auto-detect async tests
|
||||
- `branch = true` - Measure branch coverage (more thorough)
|
||||
- `parallel = true` - Support parallel test execution
|
||||
|
||||
### Development Dependencies
|
||||
|
||||
```txt
|
||||
# requirements-dev.txt
|
||||
|
||||
# Testing
|
||||
pytest==8.0.0
|
||||
pytest-asyncio==0.23.3
|
||||
pytest-cov==4.1.0
|
||||
pytest-mock==3.12.0
|
||||
pytest-benchmark==4.0.0
|
||||
|
||||
# Test utilities
|
||||
faker==22.0.0
|
||||
factory-boy==3.3.0
|
||||
httpx==0.26.0
|
||||
|
||||
# Type checking
|
||||
mypy==1.8.0
|
||||
|
||||
# Linting
|
||||
ruff==0.1.9
|
||||
|
||||
# Task runner
|
||||
taskipy==1.12.2
|
||||
```
|
||||
|
||||
### Taskfile Configuration
|
||||
|
||||
```toml
|
||||
# pyproject.toml (continued)
|
||||
|
||||
[tool.taskipy.tasks]
|
||||
# Testing tasks
|
||||
test = "doppler run -- pytest"
|
||||
test-unit = "doppler run -- pytest -m unit"
|
||||
test-integration = "doppler run -- pytest -m integration"
|
||||
test-e2e = "doppler run -- pytest -m e2e"
|
||||
test-benchmark = "doppler run -- pytest -m benchmark"
|
||||
test-coverage = "doppler run -- pytest --cov=app --cov-report=html"
|
||||
test-watch = "doppler run -- pytest-watch"
|
||||
|
||||
# Linting and formatting
|
||||
lint = "ruff check app tests"
|
||||
format = "ruff format app tests"
|
||||
typecheck = "mypy app"
|
||||
|
||||
# Combined checks
|
||||
check = "task lint && task typecheck && task test"
|
||||
```
|
||||
|
||||
## Project Structures
|
||||
|
||||
### TypeScript Project Structure
|
||||
|
||||
```plaintext
|
||||
project-root/
|
||||
├── src/
|
||||
│ ├── routes/ # TanStack Router pages
|
||||
│ │ ├── index.tsx
|
||||
│ │ ├── settings/
|
||||
│ │ │ ├── profile.tsx
|
||||
│ │ │ └── account.tsx
|
||||
│ │ └── __root.tsx
|
||||
│ ├── lib/
|
||||
│ │ ├── components/ # React components
|
||||
│ │ │ ├── auth/
|
||||
│ │ │ │ ├── provider.tsx
|
||||
│ │ │ │ └── login-form.tsx
|
||||
│ │ │ ├── ui/ # UI primitives (shadcn)
|
||||
│ │ │ │ ├── button.tsx
|
||||
│ │ │ │ └── input.tsx
|
||||
│ │ │ └── UserProfile.tsx
|
||||
│ │ ├── server/ # Server-side code
|
||||
│ │ │ ├── db/
|
||||
│ │ │ │ ├── schema.ts # Drizzle schema
|
||||
│ │ │ │ └── index.ts # DB connection
|
||||
│ │ │ └── functions/ # Server functions
|
||||
│ │ │ ├── users.ts
|
||||
│ │ │ └── auth.ts
|
||||
│ │ ├── hooks/ # Custom React hooks
|
||||
│ │ │ ├── use-auth.ts
|
||||
│ │ │ └── use-users.ts
|
||||
│ │ ├── utils/ # Utility functions
|
||||
│ │ │ ├── format.ts
|
||||
│ │ │ └── validation.ts
|
||||
│ │ └── types/ # TypeScript types
|
||||
│ │ ├── user.ts
|
||||
│ │ └── api.ts
|
||||
│ └── public/ # Static assets
|
||||
│ └── favicon.ico
|
||||
├── tests/
|
||||
│ ├── setup.ts # Test setup
|
||||
│ ├── unit/ # Unit tests
|
||||
│ │ ├── lib/
|
||||
│ │ │ ├── components/
|
||||
│ │ │ │ └── UserProfile.test.tsx
|
||||
│ │ │ └── utils/
|
||||
│ │ │ └── format.test.ts
|
||||
│ │ └── server/
|
||||
│ │ └── functions/
|
||||
│ │ └── users.test.ts
|
||||
│ ├── integration/ # Integration tests
|
||||
│ │ ├── auth-flow.test.ts
|
||||
│ │ └── user-repository.test.ts
|
||||
│ ├── e2e/ # Playwright E2E tests
|
||||
│ │ ├── user-registration.spec.ts
|
||||
│ │ └── user-workflow.spec.ts
|
||||
│ └── factories/ # Test data factories
|
||||
│ ├── user.factory.ts
|
||||
│ └── tenant.factory.ts
|
||||
├── vitest.config.ts # Vitest configuration
|
||||
├── playwright.config.ts # Playwright configuration
|
||||
├── package.json
|
||||
└── tsconfig.json
|
||||
```
|
||||
|
||||
### Python Project Structure
|
||||
|
||||
```plaintext
|
||||
project-root/
|
||||
├── app/
|
||||
│ ├── __init__.py
|
||||
│ ├── main.py # FastAPI application
|
||||
│ ├── config/
|
||||
│ │ ├── __init__.py
|
||||
│ │ └── settings.py # Application settings
|
||||
│ ├── db/
|
||||
│ │ ├── __init__.py
|
||||
│ │ ├── base.py # Database connection
|
||||
│ │ ├── models/ # SQLModel entities
|
||||
│ │ │ ├── __init__.py
|
||||
│ │ │ ├── base.py # Base model
|
||||
│ │ │ ├── user.py
|
||||
│ │ │ └── tenant.py
|
||||
│ │ └── repositories/ # Repository pattern
|
||||
│ │ ├── __init__.py
|
||||
│ │ ├── base.py # Base repository
|
||||
│ │ └── user_repository.py
|
||||
│ ├── routers/ # FastAPI endpoints
|
||||
│ │ ├── __init__.py
|
||||
│ │ ├── users.py
|
||||
│ │ └── auth.py
|
||||
│ ├── services/ # Business logic
|
||||
│ │ ├── __init__.py
|
||||
│ │ ├── user_service.py
|
||||
│ │ └── auth_service.py
|
||||
│ ├── schemas/ # Pydantic schemas (API contracts)
|
||||
│ │ ├── __init__.py
|
||||
│ │ ├── user.py
|
||||
│ │ └── auth.py
|
||||
│ └── utils/ # Utilities
|
||||
│ ├── __init__.py
|
||||
│ ├── security.py
|
||||
│ └── validation.py
|
||||
├── tests/
|
||||
│ ├── __init__.py
|
||||
│ ├── conftest.py # Shared fixtures
|
||||
│ ├── unit/ # Unit tests (@pytest.mark.unit)
|
||||
│ │ ├── __init__.py
|
||||
│ │ ├── repositories/
|
||||
│ │ │ └── test_user_repository.py
|
||||
│ │ └── services/
|
||||
│ │ └── test_user_service.py
|
||||
│ ├── integration/ # Integration tests
|
||||
│ │ ├── __init__.py
|
||||
│ │ └── test_user_api.py
|
||||
│ ├── e2e/ # E2E tests
|
||||
│ │ ├── __init__.py
|
||||
│ │ └── test_full_user_flow.py
|
||||
│ ├── benchmark/ # Benchmark tests
|
||||
│ │ ├── __init__.py
|
||||
│ │ └── test_repository_performance.py
|
||||
│ └── factories/ # Test data factories
|
||||
│ ├── __init__.py
|
||||
│ └── user_factory.py
|
||||
├── pyproject.toml # Python project config
|
||||
├── requirements.txt # Production dependencies
|
||||
├── requirements-dev.txt # Development dependencies
|
||||
└── .python-version # Python version (3.12)
|
||||
```
|
||||
|
||||
## Doppler Configuration
|
||||
|
||||
### Doppler Setup
|
||||
|
||||
```bash
|
||||
# Install Doppler CLI
|
||||
brew install dopplerhq/cli/doppler # macOS
|
||||
# or
|
||||
curl -Ls https://cli.doppler.com/install.sh | sh # Linux
|
||||
|
||||
# Authenticate with Doppler
|
||||
doppler login
|
||||
|
||||
# Setup Doppler in project
|
||||
doppler setup
|
||||
|
||||
# Select project and config
|
||||
# Project: your-project-name
|
||||
# Config: test (or dev, staging, production)
|
||||
```
|
||||
|
||||
### Doppler Environment Configs
|
||||
|
||||
Grey Haven projects use these Doppler configs:
|
||||
|
||||
1. **dev** - Local development environment
|
||||
2. **test** - Running tests (CI and local)
|
||||
3. **staging** - Staging environment
|
||||
4. **production** - Production environment
|
||||
|
||||
### Test Environment Variables
|
||||
|
||||
**Database URLs:**
|
||||
|
||||
```bash
|
||||
# PostgreSQL connection URLs (Doppler managed)
|
||||
DATABASE_URL_ADMIN=postgresql+asyncpg://admin_user:password@localhost:5432/app_db
|
||||
DATABASE_URL_AUTHENTICATED=postgresql+asyncpg://authenticated_user:password@localhost:5432/app_db
|
||||
DATABASE_URL_ANON=postgresql+asyncpg://anon_user:password@localhost:5432/app_db
|
||||
|
||||
# Test database (separate from dev)
|
||||
DATABASE_URL_TEST=postgresql+asyncpg://test_user:password@localhost:5432/test_db
|
||||
```
|
||||
|
||||
**Redis:**
|
||||
|
||||
```bash
|
||||
# Use separate Redis DB for tests (0-15 available)
|
||||
REDIS_URL=redis://localhost:6379/1 # DB 1 for tests (dev uses 0)
|
||||
```
|
||||
|
||||
**Authentication:**
|
||||
|
||||
```bash
|
||||
# Better Auth secrets
|
||||
BETTER_AUTH_SECRET=test-secret-key-min-32-chars-long
|
||||
BETTER_AUTH_URL=http://localhost:3000
|
||||
|
||||
# JWT secrets
|
||||
JWT_SECRET_KEY=test-jwt-secret-key
|
||||
```
|
||||
|
||||
**External Services (use test/sandbox keys):**
|
||||
|
||||
```bash
|
||||
# Stripe (test mode)
|
||||
STRIPE_SECRET_KEY=sk_test_51AbCdEfGhIjKlMnOpQrStUv
|
||||
STRIPE_PUBLISHABLE_KEY=pk_test_51AbCdEfGhIjKlMnOpQrStUv
|
||||
|
||||
# Resend (test mode)
|
||||
RESEND_API_KEY=re_test_1234567890abcdef
|
||||
|
||||
# OpenAI (separate test key)
|
||||
OPENAI_API_KEY=sk-test-1234567890abcdef
|
||||
```
|
||||
|
||||
**E2E Testing:**
|
||||
|
||||
```bash
|
||||
# Playwright base URL
|
||||
PLAYWRIGHT_BASE_URL=http://localhost:3000
|
||||
|
||||
# Email testing service (for E2E tests)
|
||||
MAILTRAP_API_TOKEN=your_mailtrap_token
|
||||
```
|
||||
|
||||
### Running Tests with Doppler
|
||||
|
||||
**TypeScript:**
|
||||
|
||||
```bash
|
||||
# Run all tests with Doppler
|
||||
doppler run -- bun run test
|
||||
|
||||
# Run with specific config
|
||||
doppler run --config test -- bun run test
|
||||
|
||||
# Run coverage
|
||||
doppler run -- bun run test:coverage
|
||||
|
||||
# Run E2E
|
||||
doppler run -- bun run test:e2e
|
||||
```
|
||||
|
||||
**Python:**
|
||||
|
||||
```bash
|
||||
# Activate virtual environment first!
|
||||
source .venv/bin/activate
|
||||
|
||||
# Run all tests with Doppler
|
||||
doppler run -- pytest
|
||||
|
||||
# Run with specific config
|
||||
doppler run --config test -- pytest
|
||||
|
||||
# Run specific markers
|
||||
doppler run -- pytest -m unit
|
||||
doppler run -- pytest -m integration
|
||||
```
|
||||
|
||||
### Doppler in CI/CD
|
||||
|
||||
**GitHub Actions:**
|
||||
|
||||
```yaml
|
||||
- name: Install Doppler CLI
|
||||
uses: dopplerhq/cli-action@v3
|
||||
|
||||
- name: Run tests with Doppler
|
||||
env:
|
||||
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
|
||||
run: doppler run --config test -- bun run test:coverage
|
||||
```
|
||||
|
||||
**Get Doppler Service Token:**
|
||||
|
||||
1. Go to Doppler dashboard
|
||||
2. Select your project
|
||||
3. Go to Access → Service Tokens
|
||||
4. Create token for `test` config
|
||||
5. Add as `DOPPLER_TOKEN_TEST` secret in GitHub
|
||||
|
||||
## GitHub Actions Configuration
|
||||
|
||||
### TypeScript CI Workflow
|
||||
|
||||
```yaml
|
||||
# .github/workflows/test-typescript.yml
|
||||
name: TypeScript Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, develop]
|
||||
pull_request:
|
||||
branches: [main, develop]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16
|
||||
env:
|
||||
POSTGRES_DB: test_db
|
||||
POSTGRES_USER: test_user
|
||||
POSTGRES_PASSWORD: test_password
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
- 5432:5432
|
||||
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
options: >-
|
||||
--health-cmd "redis-cli ping"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
- 6379:6379
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "20"
|
||||
cache: "bun"
|
||||
|
||||
- name: Install Doppler CLI
|
||||
uses: dopplerhq/cli-action@v3
|
||||
|
||||
- name: Install dependencies
|
||||
run: bun install
|
||||
|
||||
- name: Run linter
|
||||
run: bun run lint
|
||||
|
||||
- name: Run type check
|
||||
run: bun run typecheck
|
||||
|
||||
- name: Run unit tests
|
||||
env:
|
||||
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
|
||||
run: doppler run --config test -- bun run test:unit
|
||||
|
||||
- name: Run integration tests
|
||||
env:
|
||||
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
|
||||
run: doppler run --config test -- bun run test:integration
|
||||
|
||||
- name: Run tests with coverage
|
||||
env:
|
||||
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
|
||||
run: doppler run --config test -- bun run test:coverage
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
files: ./coverage/coverage-final.json
|
||||
flags: typescript
|
||||
name: typescript-coverage
|
||||
|
||||
- name: Install Playwright browsers
|
||||
run: npx playwright install --with-deps
|
||||
|
||||
- name: Run E2E tests
|
||||
env:
|
||||
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
|
||||
run: doppler run --config test -- bun run test:e2e
|
||||
|
||||
- name: Upload Playwright report
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: playwright-report
|
||||
path: playwright-report/
|
||||
retention-days: 30
|
||||
```
|
||||
|
||||
### Python CI Workflow
|
||||
|
||||
```yaml
|
||||
# .github/workflows/test-python.yml
|
||||
name: Python Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, develop]
|
||||
pull_request:
|
||||
branches: [main, develop]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16
|
||||
env:
|
||||
POSTGRES_DB: test_db
|
||||
POSTGRES_USER: test_user
|
||||
POSTGRES_PASSWORD: test_password
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
- 5432:5432
|
||||
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
options: >-
|
||||
--health-cmd "redis-cli ping"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
- 6379:6379
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
cache: "pip"
|
||||
|
||||
- name: Install Doppler CLI
|
||||
uses: dopplerhq/cli-action@v3
|
||||
|
||||
- name: Create virtual environment
|
||||
run: python -m venv .venv
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install -r requirements.txt -r requirements-dev.txt
|
||||
|
||||
- name: Run linter
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
ruff check app tests
|
||||
|
||||
- name: Run type checker
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
mypy app
|
||||
|
||||
- name: Run unit tests
|
||||
env:
|
||||
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
doppler run --config test -- pytest -m unit
|
||||
|
||||
- name: Run integration tests
|
||||
env:
|
||||
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
doppler run --config test -- pytest -m integration
|
||||
|
||||
- name: Run all tests with coverage
|
||||
env:
|
||||
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
doppler run --config test -- pytest --cov=app --cov-report=xml --cov-report=html
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
files: ./coverage.xml
|
||||
flags: python
|
||||
name: python-coverage
|
||||
|
||||
- name: Upload coverage HTML
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage-report
|
||||
path: htmlcov/
|
||||
retention-days: 30
|
||||
```
|
||||
|
||||
## Coverage Configuration
|
||||
|
||||
### Coverage Thresholds
|
||||
|
||||
**Minimum requirements (enforced in CI):**
|
||||
|
||||
- **Lines:** 80%
|
||||
- **Functions:** 80%
|
||||
- **Branches:** 80%
|
||||
- **Statements:** 80%
|
||||
|
||||
**Target goals:**
|
||||
|
||||
- **Critical paths:** 90%+
|
||||
- **Security code:** 100% (auth, payments, tenant isolation)
|
||||
- **Utility functions:** 95%+
|
||||
|
||||
### Excluding from Coverage
|
||||
|
||||
**TypeScript (vitest.config.ts):**
|
||||
|
||||
```typescript
|
||||
coverage: {
|
||||
exclude: [
|
||||
"node_modules/",
|
||||
"tests/",
|
||||
"**/*.config.ts",
|
||||
"**/*.d.ts",
|
||||
"**/types/",
|
||||
"**/__mocks__/",
|
||||
"**/migrations/",
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
**Python (pyproject.toml):**
|
||||
|
||||
```toml
|
||||
[tool.coverage.run]
|
||||
omit = [
|
||||
"*/tests/*",
|
||||
"*/conftest.py",
|
||||
"*/__init__.py",
|
||||
"*/migrations/*",
|
||||
"*/config/*",
|
||||
]
|
||||
```
|
||||
|
||||
### Coverage Reports
|
||||
|
||||
**Viewing coverage locally:**
|
||||
|
||||
```bash
|
||||
# TypeScript
|
||||
bun run test:coverage
|
||||
open coverage/index.html
|
||||
|
||||
# Python
|
||||
source .venv/bin/activate
|
||||
doppler run -- pytest --cov=app --cov-report=html
|
||||
open htmlcov/index.html
|
||||
```
|
||||
|
||||
**Coverage in CI:**
|
||||
|
||||
- Upload to Codecov for tracking over time
|
||||
- Fail build if coverage < 80%
|
||||
- Comment coverage diff on PRs
|
||||
- Track coverage trends
|
||||
|
||||
### Pre-commit Hook for Coverage
|
||||
|
||||
```yaml
|
||||
# .pre-commit-config.yaml
|
||||
repos:
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: test-coverage
|
||||
name: Check test coverage
|
||||
entry: sh -c 'source .venv/bin/activate && pytest --cov=app --cov-fail-under=80'
|
||||
language: system
|
||||
pass_filenames: false
|
||||
always_run: true
|
||||
```
|
||||
324
skills/testing-strategy/SKILL.md
Normal file
324
skills/testing-strategy/SKILL.md
Normal file
@@ -0,0 +1,324 @@
|
||||
---
|
||||
name: grey-haven-testing-strategy
|
||||
description: "Grey Haven's comprehensive testing strategy - Vitest unit/integration/e2e for TypeScript, pytest markers for Python, >80% coverage requirement, fixture patterns, and Doppler for test environments. Use when writing tests, setting up test infrastructure, running tests, debugging test failures, improving coverage, configuring CI/CD, or when user mentions 'test', 'testing', 'pytest', 'vitest', 'coverage', 'TDD', 'test-driven development', 'unit test', 'integration test', 'e2e', 'end-to-end', 'test fixtures', 'mocking', 'test setup', 'CI testing'."
|
||||
---
|
||||
|
||||
# Grey Haven Testing Strategy
|
||||
|
||||
**Comprehensive testing approach for TypeScript (Vitest) and Python (pytest) projects.**
|
||||
|
||||
Follow these standards when writing tests, setting up test infrastructure, or improving test coverage in Grey Haven codebases.
|
||||
|
||||
## Supporting Documentation
|
||||
|
||||
- **[EXAMPLES.md](EXAMPLES.md)** - Copy-paste test examples for Vitest and pytest
|
||||
- **[REFERENCE.md](REFERENCE.md)** - Complete configurations, project structures, and CI setup
|
||||
- **[templates/](templates/)** - Ready-to-use test templates
|
||||
- **[checklists/](checklists/)** - Testing quality checklists
|
||||
- **[scripts/](scripts/)** - Helper scripts for coverage and test execution
|
||||
|
||||
## Testing Philosophy
|
||||
|
||||
### Coverage Requirements
|
||||
|
||||
- **Minimum: 80% code coverage** for all projects (enforced in CI)
|
||||
- **Target: 90%+ coverage** for critical paths
|
||||
- **100% coverage** for security-critical code (auth, payments, multi-tenant isolation)
|
||||
|
||||
### Test Types (Markers)
|
||||
|
||||
Grey Haven uses consistent test markers across languages:
|
||||
|
||||
1. **unit**: Fast, isolated tests of single functions/classes
|
||||
2. **integration**: Tests involving multiple components or external dependencies
|
||||
3. **e2e**: End-to-end tests through full user flows
|
||||
4. **benchmark**: Performance tests measuring speed/memory
|
||||
|
||||
## TypeScript Testing (Vitest)
|
||||
|
||||
### Quick Setup
|
||||
|
||||
**Project Structure:**
|
||||
|
||||
```
|
||||
tests/
|
||||
├── unit/ # Fast, isolated tests
|
||||
├── integration/ # Multi-component tests
|
||||
└── e2e/ # Playwright tests
|
||||
```
|
||||
|
||||
**Key Configuration:**
|
||||
|
||||
```typescript
|
||||
// vitest.config.ts
|
||||
export default defineConfig({
|
||||
test: {
|
||||
globals: true,
|
||||
environment: "jsdom",
|
||||
setupFiles: ["./tests/setup.ts"],
|
||||
coverage: {
|
||||
thresholds: { lines: 80, functions: 80, branches: 80, statements: 80 },
|
||||
},
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
**Running Tests:**
|
||||
|
||||
```bash
|
||||
bun run test # Run all tests
|
||||
bun run test:coverage # With coverage report
|
||||
bun run test:watch # Watch mode
|
||||
bun run test:ui # UI mode
|
||||
bun run test tests/unit/ # Unit tests only
|
||||
```
|
||||
|
||||
**See [EXAMPLES.md](EXAMPLES.md#vitest-examples) for complete test examples.**
|
||||
|
||||
## Python Testing (pytest)
|
||||
|
||||
### Quick Setup
|
||||
|
||||
**Project Structure:**
|
||||
|
||||
```
|
||||
tests/
|
||||
├── conftest.py # Shared fixtures
|
||||
├── unit/ # @pytest.mark.unit
|
||||
├── integration/ # @pytest.mark.integration
|
||||
├── e2e/ # @pytest.mark.e2e
|
||||
└── benchmark/ # @pytest.mark.benchmark
|
||||
```
|
||||
|
||||
**Key Configuration:**
|
||||
|
||||
```toml
|
||||
# pyproject.toml
|
||||
[tool.pytest.ini_options]
|
||||
addopts = ["--cov=app", "--cov-fail-under=80"]
|
||||
markers = [
|
||||
"unit: Fast, isolated unit tests",
|
||||
"integration: Tests involving multiple components",
|
||||
"e2e: End-to-end tests through full flows",
|
||||
"benchmark: Performance tests",
|
||||
]
|
||||
```
|
||||
|
||||
**Running Tests:**
|
||||
|
||||
```bash
|
||||
# ⚠️ ALWAYS activate virtual environment first!
|
||||
source .venv/bin/activate
|
||||
|
||||
# Run with Doppler for environment variables
|
||||
doppler run -- pytest # All tests
|
||||
doppler run -- pytest --cov=app # With coverage
|
||||
doppler run -- pytest -m unit # Unit tests only
|
||||
doppler run -- pytest -m integration # Integration tests only
|
||||
doppler run -- pytest -m e2e # E2E tests only
|
||||
doppler run -- pytest -v # Verbose output
|
||||
```
|
||||
|
||||
**See [EXAMPLES.md](EXAMPLES.md#pytest-examples) for complete test examples.**
|
||||
|
||||
## Test Markers Explained
|
||||
|
||||
### Unit Tests
|
||||
|
||||
**Characteristics:**
|
||||
- Fast execution (< 100ms per test)
|
||||
- No external dependencies (database, API, file system)
|
||||
- Mock all external services
|
||||
- Test single function/class in isolation
|
||||
|
||||
**Use for:**
|
||||
- Utility functions
|
||||
- Business logic
|
||||
- Data transformations
|
||||
- Component rendering (React Testing Library)
|
||||
|
||||
### Integration Tests
|
||||
|
||||
**Characteristics:**
|
||||
- Test multiple components together
|
||||
- May use real database/Redis (with cleanup)
|
||||
- Test API endpoints with FastAPI TestClient
|
||||
- Test React Query + server functions
|
||||
|
||||
**Use for:**
|
||||
- API endpoint flows
|
||||
- Database operations with repositories
|
||||
- Authentication flows
|
||||
- Multi-component interactions
|
||||
|
||||
### E2E Tests
|
||||
|
||||
**Characteristics:**
|
||||
- Test complete user flows
|
||||
- Use Playwright (TypeScript) or httpx (Python)
|
||||
- Test from user perspective
|
||||
- Slower execution (seconds per test)
|
||||
|
||||
**Use for:**
|
||||
- Registration/login flows
|
||||
- Critical user journeys
|
||||
- Form submissions
|
||||
- Multi-page workflows
|
||||
|
||||
### Benchmark Tests
|
||||
|
||||
**Characteristics:**
|
||||
- Measure performance metrics
|
||||
- Track execution time
|
||||
- Monitor memory usage
|
||||
- Detect performance regressions
|
||||
|
||||
**Use for:**
|
||||
- Database query performance
|
||||
- Algorithm optimization
|
||||
- API response times
|
||||
- Batch operations
|
||||
|
||||
## Environment Variables with Doppler
|
||||
|
||||
**⚠️ CRITICAL: Grey Haven uses Doppler for ALL environment variables.**
|
||||
|
||||
```bash
|
||||
# Install Doppler
|
||||
brew install dopplerhq/cli/doppler
|
||||
|
||||
# Authenticate and setup
|
||||
doppler login
|
||||
doppler setup
|
||||
|
||||
# Run tests with Doppler
|
||||
doppler run -- bun run test # TypeScript
|
||||
doppler run -- pytest # Python
|
||||
|
||||
# Use specific config
|
||||
doppler run --config test -- pytest
|
||||
```
|
||||
|
||||
**Doppler provides:**
|
||||
- `DATABASE_URL_TEST` - Test database connection
|
||||
- `REDIS_URL` - Redis for tests (separate DB)
|
||||
- `BETTER_AUTH_SECRET` - Auth secrets
|
||||
- `STRIPE_SECRET_KEY` - External service keys (test mode)
|
||||
- `PLAYWRIGHT_BASE_URL` - E2E test URL
|
||||
|
||||
**See [REFERENCE.md](REFERENCE.md#doppler-configuration) for complete setup.**
|
||||
|
||||
## Test Fixtures and Factories
|
||||
|
||||
### TypeScript Factories
|
||||
|
||||
```typescript
|
||||
// tests/factories/user.factory.ts
|
||||
import { faker } from "@faker-js/faker";
|
||||
|
||||
export function createMockUser(overrides = {}) {
|
||||
return {
|
||||
id: faker.string.uuid(),
|
||||
tenant_id: faker.string.uuid(),
|
||||
email_address: faker.internet.email(),
|
||||
name: faker.person.fullName(),
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### Python Fixtures
|
||||
|
||||
```python
|
||||
# tests/conftest.py
|
||||
@pytest.fixture
|
||||
async def test_user(session, tenant_id):
|
||||
"""Create test user with tenant isolation."""
|
||||
user = User(
|
||||
tenant_id=tenant_id,
|
||||
email_address="test@example.com",
|
||||
name="Test User",
|
||||
)
|
||||
session.add(user)
|
||||
await session.commit()
|
||||
return user
|
||||
```
|
||||
|
||||
**See [EXAMPLES.md](EXAMPLES.md#test-factories-and-fixtures) for more patterns.**
|
||||
|
||||
## Multi-Tenant Testing
|
||||
|
||||
**⚠️ ALWAYS test tenant isolation in multi-tenant projects:**
|
||||
|
||||
```python
|
||||
@pytest.mark.unit
|
||||
async def test_tenant_isolation(session, test_user, tenant_id):
|
||||
"""Verify queries filter by tenant_id."""
|
||||
repo = UserRepository(session)
|
||||
|
||||
# Should find with correct tenant
|
||||
user = await repo.get_by_id(test_user.id, tenant_id)
|
||||
assert user is not None
|
||||
|
||||
# Should NOT find with different tenant
|
||||
different_tenant = uuid4()
|
||||
user = await repo.get_by_id(test_user.id, different_tenant)
|
||||
assert user is None
|
||||
```
|
||||
|
||||
## Continuous Integration
|
||||
|
||||
**GitHub Actions with Doppler:**
|
||||
|
||||
```yaml
|
||||
# .github/workflows/test.yml
|
||||
- name: Run tests with Doppler
|
||||
env:
|
||||
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
|
||||
run: doppler run --config test -- bun run test:coverage
|
||||
```
|
||||
|
||||
**See [REFERENCE.md](REFERENCE.md#github-actions-configuration) for complete workflow.**
|
||||
|
||||
## When to Apply This Skill
|
||||
|
||||
Use this skill when:
|
||||
|
||||
- ✅ Writing new tests for features
|
||||
- ✅ Setting up test infrastructure (Vitest/pytest)
|
||||
- ✅ Configuring CI/CD test pipelines
|
||||
- ✅ Debugging failing tests
|
||||
- ✅ Improving test coverage (<80%)
|
||||
- ✅ Reviewing test code quality
|
||||
- ✅ Setting up Doppler for test environments
|
||||
- ✅ Creating test fixtures and factories
|
||||
- ✅ Implementing TDD workflow
|
||||
- ✅ User mentions: "test", "testing", "pytest", "vitest", "coverage", "TDD", "unit test", "integration test", "e2e", "test setup", "CI testing"
|
||||
|
||||
## Template References
|
||||
|
||||
These testing patterns come from Grey Haven production templates:
|
||||
|
||||
- **Frontend**: `cvi-template` (Vitest + Playwright + React Testing Library)
|
||||
- **Backend**: `cvi-backend-template` (pytest + FastAPI TestClient + async fixtures)
|
||||
|
||||
## Critical Reminders
|
||||
|
||||
1. **Coverage: 80% minimum** (enforced in CI, blocks merge)
|
||||
2. **Test markers**: unit, integration, e2e, benchmark (use consistently)
|
||||
3. **Doppler**: ALWAYS use for test environment variables (never commit .env!)
|
||||
4. **Virtual env**: MUST activate for Python tests (`source .venv/bin/activate`)
|
||||
5. **Tenant isolation**: ALWAYS test multi-tenant scenarios
|
||||
6. **Fixtures**: Use factories for test data generation (faker library)
|
||||
7. **Mocking**: Mock external services in unit tests (use vi.mock or pytest mocks)
|
||||
8. **CI**: Run tests with `doppler run --config test`
|
||||
9. **Database**: Use separate test database (Doppler provides `DATABASE_URL_TEST`)
|
||||
10. **Cleanup**: Clean up test data after each test (use fixtures with cleanup)
|
||||
|
||||
## Next Steps
|
||||
|
||||
- **Need test examples?** See [EXAMPLES.md](EXAMPLES.md) for copy-paste code
|
||||
- **Need configurations?** See [REFERENCE.md](REFERENCE.md) for complete configs
|
||||
- **Need templates?** See [templates/](templates/) for starter files
|
||||
- **Need checklists?** Use [checklists/](checklists/) for systematic test reviews
|
||||
- **Need to run tests?** Use [scripts/](scripts/) for helper utilities
|
||||
214
skills/testing-strategy/checklists/test-quality-review.md
Normal file
214
skills/testing-strategy/checklists/test-quality-review.md
Normal file
@@ -0,0 +1,214 @@
|
||||
# Test Code Review Checklist
|
||||
|
||||
Use this checklist when reviewing test code in pull requests.
|
||||
|
||||
## General Test Quality
|
||||
|
||||
### Test Structure
|
||||
|
||||
- [ ] **Clear test names**: Descriptive, follows `test_should_do_something_when_condition` pattern
|
||||
- [ ] **One assertion focus**: Each test verifies one specific behavior
|
||||
- [ ] **Arrange-Act-Assert**: Tests follow AAA pattern clearly
|
||||
- [ ] **No magic numbers**: Test values are self-explanatory or use named constants
|
||||
- [ ] **Readable setup**: Test setup is clear and concise
|
||||
|
||||
### Test Independence
|
||||
|
||||
- [ ] **No shared state**: Tests don't depend on each other
|
||||
- [ ] **Can run in any order**: Tests pass when run individually or in any sequence
|
||||
- [ ] **Proper cleanup**: Tests clean up resources (database, files, mocks)
|
||||
- [ ] **Isolated changes**: Tests don't pollute global state
|
||||
- [ ] **Fresh fixtures**: Each test gets fresh test data
|
||||
|
||||
### Test Coverage
|
||||
|
||||
- [ ] **New code is tested**: All new functions/components have tests
|
||||
- [ ] **Edge cases covered**: Null, empty, invalid inputs tested
|
||||
- [ ] **Error paths tested**: Error handling and failure scenarios verified
|
||||
- [ ] **Happy path tested**: Normal, expected behavior verified
|
||||
- [ ] **Branch coverage**: All if/else and switch branches tested
|
||||
|
||||
## TypeScript/Vitest Review
|
||||
|
||||
### Component Tests
|
||||
|
||||
- [ ] **Correct rendering**: Components render without errors
|
||||
- [ ] **User interactions**: Click, input, form submissions tested
|
||||
- [ ] **Loading states**: Loading indicators tested
|
||||
- [ ] **Error states**: Error messages and boundaries tested
|
||||
- [ ] **Async handling**: Uses `waitFor()` for async state changes
|
||||
- [ ] **Query wrapper**: TanStack Query components wrapped correctly
|
||||
- [ ] **Accessibility**: Uses semantic queries (`getByRole`, `getByLabelText`)
|
||||
|
||||
### Mocking
|
||||
|
||||
- [ ] **Appropriate mocking**: Mocks external dependencies (APIs, modules)
|
||||
- [ ] **Not over-mocked**: Integration tests use real implementations where appropriate
|
||||
- [ ] **Clear mock setup**: Mock configuration is easy to understand
|
||||
- [ ] **Mock verification**: Tests verify mocks were called correctly
|
||||
- [ ] **Mock cleanup**: Mocks cleared after each test (`vi.clearAllMocks()`)
|
||||
|
||||
### Best Practices
|
||||
|
||||
- [ ] **Path aliases**: Uses `~/ ` for imports (not relative paths)
|
||||
- [ ] **TypeScript types**: Test code is properly typed
|
||||
- [ ] **Testing Library**: Uses `@testing-library/react` best practices
|
||||
- [ ] **Vitest globals**: Uses globals (`describe`, `it`, `expect`) correctly
|
||||
- [ ] **No console warnings**: Tests don't produce React warnings
|
||||
|
||||
## Python/pytest Review
|
||||
|
||||
### Unit Tests
|
||||
|
||||
- [ ] **Isolated tests**: No external dependencies (database, network)
|
||||
- [ ] **Fast execution**: Unit tests complete in < 100ms
|
||||
- [ ] **Proper fixtures**: Uses pytest fixtures appropriately
|
||||
- [ ] **Mocking external services**: Uses `unittest.mock` or `pytest-mock`
|
||||
- [ ] **Type hints**: Test functions have type hints
|
||||
|
||||
### Integration Tests
|
||||
|
||||
- [ ] **Real dependencies**: Uses real database/services where appropriate
|
||||
- [ ] **Transaction handling**: Tests verify rollback on errors
|
||||
- [ ] **Tenant isolation**: Tests verify multi-tenant data separation
|
||||
- [ ] **Async/await**: Async tests use `async def` and `await`
|
||||
- [ ] **Database cleanup**: Fixtures clean up test data
|
||||
|
||||
### Markers
|
||||
|
||||
- [ ] **Correct markers**: Tests marked with `@pytest.mark.unit`, `@pytest.mark.integration`, etc.
|
||||
- [ ] **Consistent markers**: Markers match test type (unit, integration, e2e, benchmark)
|
||||
- [ ] **Slow marker**: Tests >5 seconds marked with `@pytest.mark.slow`
|
||||
|
||||
### Best Practices
|
||||
|
||||
- [ ] **Descriptive docstrings**: Test functions have clear docstrings
|
||||
- [ ] **Factory usage**: Uses factory pattern for test data
|
||||
- [ ] **No hardcoded IDs**: Uses `uuid4()` for test IDs
|
||||
- [ ] **Proper imports**: Imports organized and clear
|
||||
- [ ] **No test pollution**: Tests don't leave data in database
|
||||
|
||||
## Multi-Tenant Testing
|
||||
|
||||
### Tenant Isolation
|
||||
|
||||
- [ ] **Tenant ID filtering**: All queries filter by `tenant_id`
|
||||
- [ ] **Cross-tenant access denied**: Tests verify users can't access other tenant's data
|
||||
- [ ] **Tenant header required**: API tests include `X-Tenant-ID` header
|
||||
- [ ] **Repository methods**: All repository methods accept `tenant_id` parameter
|
||||
- [ ] **Query verification**: Tests verify correct `tenant_id` in database queries
|
||||
|
||||
### Security
|
||||
|
||||
- [ ] **Authentication tested**: Protected endpoints require auth
|
||||
- [ ] **Authorization tested**: Users can only access authorized resources
|
||||
- [ ] **Input validation**: Invalid input properly rejected
|
||||
- [ ] **SQL injection protected**: No raw SQL in tests (uses ORM)
|
||||
- [ ] **XSS protection**: Input sanitization tested where applicable
|
||||
|
||||
## Environment & Configuration
|
||||
|
||||
### Doppler
|
||||
|
||||
- [ ] **Doppler used**: Tests run with `doppler run --`
|
||||
- [ ] **No hardcoded secrets**: No API keys or secrets in test code
|
||||
- [ ] **Correct config**: Tests use `test` Doppler config
|
||||
- [ ] **Environment isolation**: Test database separate from dev
|
||||
|
||||
### Test Data
|
||||
|
||||
- [ ] **Faker/factory-boy**: Random test data uses faker
|
||||
- [ ] **Realistic data**: Test data resembles production data
|
||||
- [ ] **No PII**: Test data doesn't contain real personal information
|
||||
- [ ] **Deterministic when needed**: Uses seed for reproducible random data when necessary
|
||||
|
||||
## Performance
|
||||
|
||||
### Test Speed
|
||||
|
||||
- [ ] **Fast unit tests**: Unit tests < 100ms each
|
||||
- [ ] **Reasonable integration tests**: Integration tests < 1 second each
|
||||
- [ ] **Parallel execution**: Tests can run in parallel
|
||||
- [ ] **No unnecessary waits**: No `sleep()` or arbitrary delays
|
||||
- [ ] **Optimized queries**: Database queries efficient
|
||||
|
||||
### Resource Usage
|
||||
|
||||
- [ ] **Minimal test data**: Creates only necessary test data
|
||||
- [ ] **Connection cleanup**: Database connections closed properly
|
||||
- [ ] **Memory efficient**: No memory leaks in test setup
|
||||
- [ ] **File cleanup**: Temporary files deleted after tests
|
||||
|
||||
## CI/CD Compatibility
|
||||
|
||||
### GitHub Actions
|
||||
|
||||
- [ ] **Passes in CI**: Tests pass in GitHub Actions
|
||||
- [ ] **No flaky tests**: Tests pass consistently (not intermittent failures)
|
||||
- [ ] **Correct services**: Required services (postgres, redis) configured
|
||||
- [ ] **Coverage upload**: Coverage reports uploaded correctly
|
||||
- [ ] **Timeout appropriate**: Tests complete within CI timeout limits
|
||||
|
||||
### Coverage
|
||||
|
||||
- [ ] **Meets threshold**: Coverage meets 80% minimum
|
||||
- [ ] **No false positives**: Coverage accurately reflects tested code
|
||||
- [ ] **Coverage trends**: Coverage doesn't decrease from baseline
|
||||
- [ ] **Critical paths covered**: Important features have high coverage
|
||||
|
||||
## Documentation
|
||||
|
||||
### Test Documentation
|
||||
|
||||
- [ ] **Clear test names**: Test intent obvious from name
|
||||
- [ ] **Helpful comments**: Complex test logic explained
|
||||
- [ ] **Fixture documentation**: Custom fixtures documented
|
||||
- [ ] **Test file organization**: Tests organized logically
|
||||
- [ ] **README updated**: Testing docs updated if patterns changed
|
||||
|
||||
### Code Comments
|
||||
|
||||
- [ ] **Why, not what**: Comments explain why, not what code does
|
||||
- [ ] **No commented-out code**: Old test code removed
|
||||
- [ ] **TODO comments tracked**: Any TODOs have tracking tickets
|
||||
- [ ] **No misleading comments**: Comments accurate and up-to-date
|
||||
|
||||
## Red Flags to Watch For
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- [ ] ❌ Tests that only test mocks
|
||||
- [ ] ❌ Tests with no assertions
|
||||
- [ ] ❌ Tests that test private implementation
|
||||
- [ ] ❌ Brittle tests that break on refactoring
|
||||
- [ ] ❌ Tests that depend on execution order
|
||||
- [ ] ❌ Excessive setup code (>50% of test)
|
||||
- [ ] ❌ Tests with sleep/wait instead of proper async handling
|
||||
- [ ] ❌ Tests that write to production database
|
||||
- [ ] ❌ Tests that make real API calls
|
||||
- [ ] ❌ Tests with hardcoded production credentials
|
||||
|
||||
### Smells
|
||||
|
||||
- [ ] ⚠️ Very long test functions (>50 lines)
|
||||
- [ ] ⚠️ Duplicate test code (could use fixtures)
|
||||
- [ ] ⚠️ Tests with multiple assertions on different behaviors
|
||||
- [ ] ⚠️ Tests that take >5 seconds
|
||||
- [ ] ⚠️ Tests that fail intermittently
|
||||
- [ ] ⚠️ Tests with complex logic (loops, conditionals)
|
||||
- [ ] ⚠️ Tests that require manual setup to run
|
||||
- [ ] ⚠️ Missing error assertions
|
||||
- [ ] ⚠️ Testing framework workarounds/hacks
|
||||
|
||||
## Approval Criteria
|
||||
|
||||
Before approving PR with tests:
|
||||
|
||||
- [ ] All tests pass locally and in CI
|
||||
- [ ] Coverage meets minimum threshold (80%)
|
||||
- [ ] Tests follow Grey Haven conventions
|
||||
- [ ] No anti-patterns or red flags
|
||||
- [ ] Test code is readable and maintainable
|
||||
- [ ] Tests verify correct behavior (not just implementation)
|
||||
- [ ] Security and tenant isolation tested
|
||||
- [ ] Documentation updated if needed
|
||||
192
skills/testing-strategy/checklists/testing-checklist.md
Normal file
192
skills/testing-strategy/checklists/testing-checklist.md
Normal file
@@ -0,0 +1,192 @@
|
||||
# Testing Checklist
|
||||
|
||||
Use this checklist before submitting PRs to ensure comprehensive test coverage and quality.
|
||||
|
||||
## Pre-PR Testing Checklist
|
||||
|
||||
### Test Coverage
|
||||
|
||||
- [ ] All new functions/methods have unit tests
|
||||
- [ ] All new components have component tests
|
||||
- [ ] All new API endpoints have integration tests
|
||||
- [ ] Critical user flows have E2E tests
|
||||
- [ ] Code coverage is at least 80% (run `bun test --coverage` or `pytest --cov`)
|
||||
- [ ] No coverage regression from previous version
|
||||
- [ ] Security-critical code has 100% coverage (auth, payments, tenant isolation)
|
||||
|
||||
### Test Quality
|
||||
|
||||
- [ ] Tests follow naming convention: `test_should_do_something_when_condition`
|
||||
- [ ] Each test has a single, clear assertion focus
|
||||
- [ ] Tests are independent (can run in any order)
|
||||
- [ ] Tests clean up after themselves (no database pollution)
|
||||
- [ ] No hardcoded values (use constants or fixtures)
|
||||
- [ ] Test data uses factories (faker/factory-boy)
|
||||
- [ ] Mock external services (APIs, email, payments)
|
||||
- [ ] Tests run in < 10 seconds (unit tests < 100ms each)
|
||||
|
||||
### Test Markers
|
||||
|
||||
- [ ] Unit tests marked with `@pytest.mark.unit` or in `tests/unit/`
|
||||
- [ ] Integration tests marked with `@pytest.mark.integration` or in `tests/integration/`
|
||||
- [ ] E2E tests marked with `@pytest.mark.e2e` or in `tests/e2e/`
|
||||
- [ ] Slow tests marked with `@pytest.mark.slow` (> 5 seconds)
|
||||
|
||||
### Multi-Tenant Testing
|
||||
|
||||
- [ ] All database queries test tenant isolation
|
||||
- [ ] Repository methods verify correct `tenant_id` filtering
|
||||
- [ ] API endpoints test tenant header validation
|
||||
- [ ] Cross-tenant access attempts are tested and fail correctly
|
||||
|
||||
### Environment Variables
|
||||
|
||||
- [ ] All tests use Doppler for environment variables
|
||||
- [ ] No hardcoded secrets or API keys
|
||||
- [ ] Test database is separate from development database
|
||||
- [ ] `.env` files are NOT committed to repository
|
||||
- [ ] CI uses `DOPPLER_TOKEN_TEST` secret
|
||||
|
||||
### Error Handling
|
||||
|
||||
- [ ] Tests verify error messages and status codes
|
||||
- [ ] Edge cases are tested (null, empty, invalid input)
|
||||
- [ ] Validation errors return correct HTTP status (422)
|
||||
- [ ] Database errors are handled gracefully
|
||||
- [ ] Tests verify rollback on transaction errors
|
||||
|
||||
### TypeScript Specific
|
||||
|
||||
- [ ] React Testing Library used for component tests
|
||||
- [ ] TanStack Query components tested with QueryClientProvider wrapper
|
||||
- [ ] Server function mocks use `vi.mock()`
|
||||
- [ ] Async components use `waitFor()` for assertions
|
||||
- [ ] Vitest globals enabled in config (`globals: true`)
|
||||
|
||||
### Python Specific
|
||||
|
||||
- [ ] Virtual environment activated before running tests
|
||||
- [ ] Async fixtures used for async code (`async def`)
|
||||
- [ ] FastAPI TestClient used for API tests
|
||||
- [ ] Database fixtures use session-scoped engine
|
||||
- [ ] SQLAlchemy sessions auto-rollback in fixtures
|
||||
|
||||
### CI/CD
|
||||
|
||||
- [ ] Tests pass locally with `bun test` or `doppler run -- pytest`
|
||||
- [ ] Tests pass in CI (GitHub Actions)
|
||||
- [ ] Coverage report uploaded to Codecov
|
||||
- [ ] No test warnings or deprecation messages
|
||||
- [ ] Pre-commit hooks pass (if configured)
|
||||
|
||||
## Test Types Checklist
|
||||
|
||||
### Unit Tests
|
||||
|
||||
- [ ] Test single function/class in isolation
|
||||
- [ ] Mock all external dependencies
|
||||
- [ ] No database or network calls
|
||||
- [ ] Fast execution (< 100ms per test)
|
||||
- [ ] Cover all code branches (if/else, try/catch)
|
||||
|
||||
### Integration Tests
|
||||
|
||||
- [ ] Test multiple components together
|
||||
- [ ] Use real database (with cleanup)
|
||||
- [ ] Test complete API request/response cycles
|
||||
- [ ] Verify database state changes
|
||||
- [ ] Test transaction handling
|
||||
|
||||
### E2E Tests
|
||||
|
||||
- [ ] Test complete user workflows
|
||||
- [ ] Use Playwright for TypeScript
|
||||
- [ ] Test from user perspective (UI interactions)
|
||||
- [ ] Verify multi-step processes
|
||||
- [ ] Test critical business flows
|
||||
|
||||
### Benchmark Tests
|
||||
|
||||
- [ ] Measure performance metrics
|
||||
- [ ] Set performance thresholds
|
||||
- [ ] Test with realistic data volumes
|
||||
- [ ] Monitor for regressions
|
||||
|
||||
## Coverage Goals by Component
|
||||
|
||||
### Utility Functions
|
||||
|
||||
- [ ] 95%+ coverage
|
||||
- [ ] All branches tested
|
||||
- [ ] Edge cases handled
|
||||
|
||||
### Business Logic (Services)
|
||||
|
||||
- [ ] 90%+ coverage
|
||||
- [ ] All business rules tested
|
||||
- [ ] Error scenarios covered
|
||||
|
||||
### API Endpoints
|
||||
|
||||
- [ ] 85%+ coverage
|
||||
- [ ] All HTTP methods tested
|
||||
- [ ] All response codes verified
|
||||
|
||||
### Database Repositories
|
||||
|
||||
- [ ] 90%+ coverage
|
||||
- [ ] CRUD operations tested
|
||||
- [ ] Tenant isolation verified
|
||||
|
||||
### React Components
|
||||
|
||||
- [ ] 80%+ coverage
|
||||
- [ ] Rendering tested
|
||||
- [ ] User interactions tested
|
||||
- [ ] Loading/error states tested
|
||||
|
||||
### Security Features
|
||||
|
||||
- [ ] 100% coverage
|
||||
- [ ] Authentication tested
|
||||
- [ ] Authorization tested
|
||||
- [ ] Tenant isolation verified
|
||||
|
||||
## Common Testing Mistakes to Avoid
|
||||
|
||||
### Don't
|
||||
|
||||
- [ ] ❌ Test implementation details
|
||||
- [ ] ❌ Test private methods directly
|
||||
- [ ] ❌ Write tests that depend on execution order
|
||||
- [ ] ❌ Use real external services in tests
|
||||
- [ ] ❌ Hardcode test data
|
||||
- [ ] ❌ Commit `.env` files
|
||||
- [ ] ❌ Skip test cleanup
|
||||
- [ ] ❌ Test multiple things in one test
|
||||
- [ ] ❌ Forget to await async operations
|
||||
- [ ] ❌ Mock too much (integration tests)
|
||||
|
||||
### Do
|
||||
|
||||
- [ ] ✅ Test public APIs and behaviors
|
||||
- [ ] ✅ Write independent, isolated tests
|
||||
- [ ] ✅ Mock external services
|
||||
- [ ] ✅ Use test factories for data
|
||||
- [ ] ✅ Use Doppler for environment variables
|
||||
- [ ] ✅ Clean up test data
|
||||
- [ ] ✅ Focus each test on one assertion
|
||||
- [ ] ✅ Use `waitFor()` for async rendering
|
||||
- [ ] ✅ Test error scenarios
|
||||
- [ ] ✅ Verify tenant isolation
|
||||
|
||||
## Post-Testing Checklist
|
||||
|
||||
- [ ] All tests pass locally
|
||||
- [ ] Coverage meets minimum threshold (80%)
|
||||
- [ ] No failing tests in CI
|
||||
- [ ] Coverage report reviewed
|
||||
- [ ] Test output reviewed for warnings
|
||||
- [ ] Performance acceptable (no slow tests)
|
||||
- [ ] Documentation updated (if test patterns changed)
|
||||
- [ ] Reviewers can understand test intent
|
||||
260
skills/testing-strategy/scripts/coverage_check.py
Executable file
260
skills/testing-strategy/scripts/coverage_check.py
Executable file
@@ -0,0 +1,260 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Check test coverage and enforce 80% minimum threshold for Grey Haven projects.
|
||||
|
||||
Analyzes coverage reports from Vitest or pytest and provides detailed
|
||||
breakdown of coverage by file, function, and line.
|
||||
|
||||
Usage:
|
||||
# Check Vitest coverage
|
||||
python scripts/coverage_check.py
|
||||
|
||||
# Check pytest coverage
|
||||
python scripts/coverage_check.py --backend pytest
|
||||
|
||||
# Show detailed file-by-file breakdown
|
||||
python scripts/coverage_check.py --detailed
|
||||
|
||||
# Check coverage and fail if below threshold
|
||||
python scripts/coverage_check.py --strict
|
||||
|
||||
# Generate coverage report if missing
|
||||
python scripts/coverage_check.py --generate
|
||||
|
||||
Always run with --help first to see all options.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import sys
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def run_command(cmd: str, capture: bool = True) -> tuple[bool, str]:
|
||||
"""Run a shell command and return success status and output."""
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
shell=True,
|
||||
capture_output=capture,
|
||||
text=True
|
||||
)
|
||||
return result.returncode == 0, result.stdout if capture else ""
|
||||
|
||||
|
||||
def check_vitest_coverage(detailed: bool = False) -> dict:
|
||||
"""Check Vitest coverage from coverage/coverage-summary.json."""
|
||||
coverage_file = Path("coverage/coverage-summary.json")
|
||||
|
||||
if not coverage_file.exists():
|
||||
print("ERROR: Coverage report not found: coverage/coverage-summary.json")
|
||||
print(" Run tests with coverage first:")
|
||||
print(" doppler run --config test -- vitest run --coverage")
|
||||
sys.exit(1)
|
||||
|
||||
with open(coverage_file) as f:
|
||||
coverage_data = json.load(f)
|
||||
|
||||
# Total coverage
|
||||
total = coverage_data["total"]
|
||||
|
||||
results = {
|
||||
"lines": total["lines"]["pct"],
|
||||
"statements": total["statements"]["pct"],
|
||||
"functions": total["functions"]["pct"],
|
||||
"branches": total["branches"]["pct"],
|
||||
}
|
||||
|
||||
# Detailed breakdown by file
|
||||
if detailed:
|
||||
print("\n Coverage by File:")
|
||||
print(f"{'File':<50} {'Lines':<10} {'Funcs':<10} {'Branches':<10}")
|
||||
print("=" * 80)
|
||||
|
||||
for file_path, file_data in coverage_data.items():
|
||||
if file_path == "total":
|
||||
continue
|
||||
|
||||
# Shorten file path for display
|
||||
short_path = file_path.replace(os.getcwd(), ".")
|
||||
if len(short_path) > 47:
|
||||
short_path = "..." + short_path[-44:]
|
||||
|
||||
lines_pct = file_data["lines"]["pct"]
|
||||
funcs_pct = file_data["functions"]["pct"]
|
||||
branches_pct = file_data["branches"]["pct"]
|
||||
|
||||
# Color code based on coverage
|
||||
if lines_pct < 80:
|
||||
status = "[BELOW 80%]"
|
||||
elif lines_pct < 90:
|
||||
status = "[80-90%]"
|
||||
else:
|
||||
status = "[ABOVE 90%]"
|
||||
|
||||
print(f"{status} {short_path:<47} {lines_pct:<9.1f}% {funcs_pct:<9.1f}% {branches_pct:<9.1f}%")
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def check_pytest_coverage(detailed: bool = False) -> dict:
|
||||
"""Check pytest coverage from .coverage file."""
|
||||
coverage_file = Path(".coverage")
|
||||
|
||||
if not coverage_file.exists():
|
||||
print("ERROR: Coverage report not found: .coverage")
|
||||
print(" Run tests with coverage first:")
|
||||
print(" doppler run --config test -- pytest --cov=app")
|
||||
sys.exit(1)
|
||||
|
||||
# Use coverage.py to get report
|
||||
success, output = run_command("coverage report --format=total")
|
||||
|
||||
if not success:
|
||||
print("ERROR: Failed to generate coverage report")
|
||||
sys.exit(1)
|
||||
|
||||
# Parse total coverage percentage
|
||||
total_coverage = float(output.strip().rstrip("%"))
|
||||
|
||||
# Get detailed report if requested
|
||||
if detailed:
|
||||
print("\n Coverage by File:")
|
||||
success, detailed_output = run_command("coverage report")
|
||||
print(detailed_output)
|
||||
|
||||
# pytest coverage doesn't separate by type, so we use total for all
|
||||
results = {
|
||||
"lines": total_coverage,
|
||||
"statements": total_coverage,
|
||||
"functions": total_coverage,
|
||||
"branches": total_coverage,
|
||||
}
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Check test coverage and enforce thresholds",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
# Check Vitest coverage
|
||||
python scripts/coverage_check.py
|
||||
|
||||
# Check pytest coverage with detailed breakdown
|
||||
python scripts/coverage_check.py --backend pytest --detailed
|
||||
|
||||
# Generate coverage and check (strict mode)
|
||||
python scripts/coverage_check.py --generate --strict
|
||||
|
||||
Coverage Thresholds:
|
||||
Minimum 80% coverage required for:
|
||||
- Lines
|
||||
- Functions
|
||||
- Branches
|
||||
- Statements
|
||||
|
||||
Backends:
|
||||
vitest - Vitest (TypeScript/React) - default
|
||||
pytest - pytest (Python/FastAPI)
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--backend",
|
||||
default="vitest",
|
||||
choices=["vitest", "pytest"],
|
||||
help="Test backend to check coverage for (default: vitest)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--detailed",
|
||||
action="store_true",
|
||||
help="Show detailed file-by-file breakdown"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--strict",
|
||||
action="store_true",
|
||||
help="Exit with error if coverage below 80 percent"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--generate",
|
||||
action="store_true",
|
||||
help="Generate coverage report before checking"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
print(f"\n{'=' * 70}")
|
||||
print(f" Coverage Check - {args.backend.upper()}")
|
||||
print(f"{'=' * 70}")
|
||||
|
||||
# Generate coverage if requested
|
||||
if args.generate:
|
||||
print("\n→ Generating coverage report...")
|
||||
|
||||
if args.backend == "vitest":
|
||||
cmd = "doppler run --config test -- vitest run --coverage"
|
||||
else:
|
||||
cmd = "doppler run --config test -- pytest --cov=app --cov-report=term --cov-report=html"
|
||||
|
||||
success, _ = run_command(cmd, capture=False)
|
||||
|
||||
if not success:
|
||||
print("\nERROR: Failed to generate coverage")
|
||||
sys.exit(1)
|
||||
|
||||
# Check coverage
|
||||
if args.backend == "vitest":
|
||||
coverage = check_vitest_coverage(args.detailed)
|
||||
else:
|
||||
coverage = check_pytest_coverage(args.detailed)
|
||||
|
||||
# Display summary
|
||||
print(f"\n{'=' * 70}")
|
||||
print(" Coverage Summary")
|
||||
print(f"{'=' * 70}")
|
||||
|
||||
threshold = 80.0
|
||||
all_pass = True
|
||||
|
||||
for metric, value in coverage.items():
|
||||
if value >= threshold:
|
||||
status = "SUCCESS:"
|
||||
else:
|
||||
status = "ERROR:"
|
||||
all_pass = False
|
||||
|
||||
print(f"{status} {metric.capitalize():<15} {value:>6.2f}% (threshold: {threshold}%)")
|
||||
|
||||
# Overall result
|
||||
print(f"\n{'=' * 70}")
|
||||
if all_pass:
|
||||
print(" SUCCESS: All coverage thresholds met!")
|
||||
else:
|
||||
print(" ERROR: Coverage below 80% threshold")
|
||||
|
||||
print(f"{'=' * 70}")
|
||||
|
||||
# Additional info
|
||||
if not all_pass:
|
||||
print("\nTIP: Tips to improve coverage:")
|
||||
print(" • Add unit tests for uncovered functions")
|
||||
print(" • Add integration tests for API endpoints")
|
||||
print(" • Add edge case tests for conditionals")
|
||||
print(" • Test error handling paths")
|
||||
|
||||
if args.backend == "vitest":
|
||||
print("\n View detailed report: coverage/index.html")
|
||||
else:
|
||||
print("\n View detailed report: htmlcov/index.html")
|
||||
|
||||
# Exit with error in strict mode if coverage below threshold
|
||||
if args.strict and not all_pass:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
236
skills/testing-strategy/scripts/run_tests.py
Executable file
236
skills/testing-strategy/scripts/run_tests.py
Executable file
@@ -0,0 +1,236 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Run tests for Grey Haven applications with Doppler environment variables.
|
||||
|
||||
Supports Vitest (TypeScript) and pytest (Python) with markers for different
|
||||
test types (unit, integration, e2e, benchmark).
|
||||
|
||||
Usage:
|
||||
# Run all tests with test environment
|
||||
python scripts/run_tests.py
|
||||
|
||||
# Run unit tests only
|
||||
python scripts/run_tests.py --type unit
|
||||
|
||||
# Run integration and e2e tests
|
||||
python scripts/run_tests.py --type integration --type e2e
|
||||
|
||||
# Run tests with coverage
|
||||
python scripts/run_tests.py --coverage
|
||||
|
||||
# Run tests in watch mode (for development)
|
||||
python scripts/run_tests.py --watch
|
||||
|
||||
# Run pytest instead of Vitest
|
||||
python scripts/run_tests.py --backend pytest
|
||||
|
||||
# Run with specific Doppler environment
|
||||
python scripts/run_tests.py --env ci
|
||||
|
||||
Always run with --help first to see all options.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import sys
|
||||
from typing import List
|
||||
|
||||
|
||||
def run_command(cmd: str, description: str) -> bool:
|
||||
"""Run a shell command and return success status."""
|
||||
print(f"\n→ {description}")
|
||||
print(f" Command: {cmd}\n")
|
||||
|
||||
result = subprocess.run(cmd, shell=True)
|
||||
return result.returncode == 0
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Run tests with Doppler environment variables",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
# Run all Vitest tests
|
||||
python scripts/run_tests.py
|
||||
|
||||
# Run unit tests only
|
||||
python scripts/run_tests.py --type unit
|
||||
|
||||
# Run integration and e2e tests with coverage
|
||||
python scripts/run_tests.py --type integration --type e2e --coverage
|
||||
|
||||
# Run pytest unit tests
|
||||
python scripts/run_tests.py --backend pytest --type unit
|
||||
|
||||
# Run tests in watch mode
|
||||
python scripts/run_tests.py --watch
|
||||
|
||||
Test Types (Markers):
|
||||
unit - Unit tests (fast, isolated)
|
||||
integration - Integration tests (database, external services)
|
||||
e2e - End-to-end tests (full application flow)
|
||||
benchmark - Performance benchmark tests
|
||||
|
||||
Backends:
|
||||
vitest - Vitest (TypeScript/React) - default
|
||||
pytest - pytest (Python/FastAPI)
|
||||
|
||||
Doppler Configuration:
|
||||
Uses 'test' config by default.
|
||||
Override with --env flag for CI environments.
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--backend",
|
||||
default="vitest",
|
||||
choices=["vitest", "pytest"],
|
||||
help="Test backend to use (default: vitest)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--type",
|
||||
action="append",
|
||||
choices=["unit", "integration", "e2e", "benchmark"],
|
||||
help="Test type(s) to run (can be repeated). If not specified, runs all tests."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--coverage",
|
||||
action="store_true",
|
||||
help="Run with coverage reporting"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--watch",
|
||||
action="store_true",
|
||||
help="Run in watch mode (for development)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--env",
|
||||
default="test",
|
||||
help="Doppler environment config to use (default: test)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--verbose",
|
||||
action="store_true",
|
||||
help="Verbose output"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
backend = args.backend
|
||||
env = args.env
|
||||
test_types = args.type or []
|
||||
|
||||
print(f"\n{'=' * 70}")
|
||||
print(f" Running {backend.upper()} Tests")
|
||||
print(f" Environment: {env}")
|
||||
if test_types:
|
||||
print(f" Types: {', '.join(test_types)}")
|
||||
print(f"{'=' * 70}")
|
||||
|
||||
# Construct test command based on backend
|
||||
if backend == "vitest":
|
||||
# Base Vitest command
|
||||
cmd_parts = ["doppler", "run", "--config", env, "--", "vitest"]
|
||||
|
||||
# Add test types as grep patterns
|
||||
if test_types:
|
||||
# Vitest uses file patterns or test name patterns
|
||||
# We'll use test name patterns matching our markers
|
||||
patterns = "|".join(test_types)
|
||||
cmd_parts.extend(["-t", f"({patterns})"])
|
||||
|
||||
# Add coverage flag
|
||||
if args.coverage:
|
||||
cmd_parts.append("--coverage")
|
||||
|
||||
# Add watch mode
|
||||
if args.watch:
|
||||
cmd_parts.append("--watch")
|
||||
|
||||
# Add verbose flag
|
||||
if args.verbose:
|
||||
cmd_parts.append("--reporter=verbose")
|
||||
|
||||
# Run mode (not watch)
|
||||
if not args.watch:
|
||||
cmd_parts.append("run")
|
||||
|
||||
elif backend == "pytest":
|
||||
# Base pytest command
|
||||
cmd_parts = ["doppler", "run", "--config", env, "--", "pytest"]
|
||||
|
||||
# Add test types as markers
|
||||
if test_types:
|
||||
markers = " or ".join(test_types)
|
||||
cmd_parts.extend(["-m", markers])
|
||||
|
||||
# Add coverage flag
|
||||
if args.coverage:
|
||||
cmd_parts.extend([
|
||||
"--cov=app",
|
||||
"--cov-report=term-missing",
|
||||
"--cov-report=html"
|
||||
])
|
||||
|
||||
# Add verbose flag
|
||||
if args.verbose:
|
||||
cmd_parts.append("-vv")
|
||||
|
||||
# pytest doesn't have built-in watch mode
|
||||
if args.watch:
|
||||
print("\nWARNING: Warning: pytest doesn't support watch mode natively")
|
||||
print(" Consider using pytest-watch: pip install pytest-watch")
|
||||
|
||||
cmd = " ".join(cmd_parts)
|
||||
success = run_command(cmd, f"Running {backend} tests")
|
||||
|
||||
if not success:
|
||||
print(f"\nERROR: Tests failed")
|
||||
sys.exit(1)
|
||||
|
||||
# Coverage threshold check (if coverage was run)
|
||||
if args.coverage and backend == "vitest":
|
||||
print("\n→ Checking coverage thresholds...")
|
||||
print(" Required: 80% (lines, functions, branches, statements)")
|
||||
|
||||
# Vitest coverage is configured in vitest.config.ts
|
||||
# Thresholds are enforced automatically
|
||||
print(" ✓ Coverage thresholds enforced by Vitest config")
|
||||
|
||||
elif args.coverage and backend == "pytest":
|
||||
print("\n→ Checking coverage thresholds...")
|
||||
print(" Required: 80% coverage")
|
||||
|
||||
# Check coverage with pytest-cov
|
||||
coverage_cmd = f"doppler run --config {env} -- pytest --cov=app --cov-fail-under=80 -q"
|
||||
coverage_success = run_command(coverage_cmd, "Validating coverage threshold")
|
||||
|
||||
if not coverage_success:
|
||||
print("\nERROR: Coverage below 80% threshold")
|
||||
print(" Add more tests to increase coverage")
|
||||
sys.exit(1)
|
||||
|
||||
# Success!
|
||||
print(f"\n{'=' * 70}")
|
||||
print(f" SUCCESS: All tests passed!")
|
||||
print(f"{'=' * 70}")
|
||||
|
||||
if args.coverage:
|
||||
if backend == "vitest":
|
||||
print("\n Coverage report: coverage/index.html")
|
||||
else:
|
||||
print("\n Coverage report: htmlcov/index.html")
|
||||
|
||||
print("\nNext steps:")
|
||||
if not test_types:
|
||||
print(" • All tests passed - ready to commit")
|
||||
else:
|
||||
print(f" • {', '.join(test_types)} tests passed")
|
||||
if not args.coverage:
|
||||
print(" • Run with --coverage to check code coverage")
|
||||
print(" • Deploy with: python scripts/deploy.py --env staging")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
110
skills/testing-strategy/templates/.github-workflows-test.yml
Normal file
110
skills/testing-strategy/templates/.github-workflows-test.yml
Normal file
@@ -0,0 +1,110 @@
|
||||
# .github/workflows/test.yml
|
||||
name: Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, develop]
|
||||
pull_request:
|
||||
branches: [main, develop]
|
||||
|
||||
jobs:
|
||||
test-typescript:
|
||||
name: TypeScript Tests
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16
|
||||
env:
|
||||
POSTGRES_DB: test_db
|
||||
POSTGRES_USER: test_user
|
||||
POSTGRES_PASSWORD: test_password
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
- 5432:5432
|
||||
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
options: >-
|
||||
--health-cmd "redis-cli ping"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
- 6379:6379
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: "20"
|
||||
cache: "bun"
|
||||
|
||||
- name: Install Doppler CLI
|
||||
uses: dopplerhq/cli-action@v3
|
||||
|
||||
- name: Install dependencies
|
||||
run: bun install
|
||||
|
||||
- name: Run tests with coverage
|
||||
env:
|
||||
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
|
||||
run: doppler run --config test -- bun run test:coverage
|
||||
|
||||
- name: Upload coverage
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
files: ./coverage/coverage-final.json
|
||||
|
||||
test-python:
|
||||
name: Python Tests
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16
|
||||
env:
|
||||
POSTGRES_DB: test_db
|
||||
POSTGRES_USER: test_user
|
||||
POSTGRES_PASSWORD: test_password
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
- 5432:5432
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
cache: "pip"
|
||||
|
||||
- name: Install Doppler CLI
|
||||
uses: dopplerhq/cli-action@v3
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -r requirements.txt -r requirements-dev.txt
|
||||
|
||||
- name: Run tests with coverage
|
||||
env:
|
||||
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
doppler run --config test -- pytest --cov=app --cov-report=xml
|
||||
|
||||
- name: Upload coverage
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
files: ./coverage.xml
|
||||
102
skills/testing-strategy/templates/conftest.py
Normal file
102
skills/testing-strategy/templates/conftest.py
Normal file
@@ -0,0 +1,102 @@
|
||||
# tests/conftest.py
|
||||
"""Shared test fixtures for all tests."""
|
||||
|
||||
import pytest
|
||||
import os
|
||||
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from httpx import AsyncClient
|
||||
from uuid import uuid4
|
||||
from app.main import app
|
||||
from app.db.models import Base
|
||||
|
||||
# Doppler provides DATABASE_URL_TEST at runtime
|
||||
DATABASE_URL_TEST = os.getenv(
|
||||
"DATABASE_URL_TEST",
|
||||
"postgresql+asyncpg://localhost/test_db"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
async def engine():
|
||||
"""Create test database engine."""
|
||||
engine = create_async_engine(DATABASE_URL_TEST, echo=False)
|
||||
|
||||
# Create all tables
|
||||
async with engine.begin() as conn:
|
||||
await conn.run_sync(Base.metadata.create_all)
|
||||
|
||||
yield engine
|
||||
|
||||
# Drop all tables
|
||||
async with engine.begin() as conn:
|
||||
await conn.run_sync(Base.metadata.drop_all)
|
||||
|
||||
await engine.dispose()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def session(engine):
|
||||
"""Create test database session with automatic rollback."""
|
||||
async_session = sessionmaker(
|
||||
engine,
|
||||
class_=AsyncSession,
|
||||
expire_on_commit=False
|
||||
)
|
||||
|
||||
async with async_session() as session:
|
||||
yield session
|
||||
await session.rollback()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def client():
|
||||
"""Create test HTTP client."""
|
||||
async with AsyncClient(app=app, base_url="http://test") as client:
|
||||
yield client
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def tenant_id():
|
||||
"""Provide test tenant ID."""
|
||||
return uuid4()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def test_user(session, tenant_id):
|
||||
"""Create test user."""
|
||||
from app.db.models.user import User
|
||||
|
||||
user = User(
|
||||
tenant_id=tenant_id,
|
||||
email_address="test@example.com",
|
||||
name="Test User",
|
||||
is_active=True,
|
||||
)
|
||||
session.add(user)
|
||||
await session.commit()
|
||||
await session.refresh(user)
|
||||
|
||||
return user
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def authenticated_client(client, test_user, tenant_id):
|
||||
"""Create authenticated HTTP client."""
|
||||
# Login and get token
|
||||
response = await client.post(
|
||||
"/api/auth/login",
|
||||
json={
|
||||
"email_address": test_user.email_address,
|
||||
"password": "testpassword",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
token = response.json()["access_token"]
|
||||
|
||||
# Add auth header to client
|
||||
client.headers["Authorization"] = f"Bearer {token}"
|
||||
client.headers["X-Tenant-ID"] = str(tenant_id)
|
||||
|
||||
return client
|
||||
113
skills/testing-strategy/templates/pytest-integration.py
Normal file
113
skills/testing-strategy/templates/pytest-integration.py
Normal file
@@ -0,0 +1,113 @@
|
||||
# tests/integration/test_FEATURE_api.py
|
||||
import pytest
|
||||
from httpx import AsyncClient
|
||||
from uuid import uuid4
|
||||
|
||||
|
||||
@pytest.mark.integration
|
||||
class TestYourAPI:
|
||||
"""Integration tests for Your API endpoints."""
|
||||
|
||||
async def test_create_endpoint(self, client: AsyncClient, tenant_id):
|
||||
"""Test POST /api/YOUR_RESOURCE creates resource."""
|
||||
response = await client.post(
|
||||
"/api/YOUR_RESOURCE",
|
||||
json={
|
||||
"name": "Test Resource",
|
||||
"description": "Test description",
|
||||
},
|
||||
headers={"X-Tenant-ID": str(tenant_id)},
|
||||
)
|
||||
|
||||
assert response.status_code == 201
|
||||
data = response.json()
|
||||
assert data["name"] == "Test Resource"
|
||||
assert data["tenant_id"] == str(tenant_id)
|
||||
|
||||
async def test_get_endpoint(self, client: AsyncClient, tenant_id, test_resource):
|
||||
"""Test GET /api/YOUR_RESOURCE/{id} retrieves resource."""
|
||||
response = await client.get(
|
||||
f"/api/YOUR_RESOURCE/{test_resource.id}",
|
||||
headers={"X-Tenant-ID": str(tenant_id)},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["id"] == str(test_resource.id)
|
||||
assert data["name"] == test_resource.name
|
||||
|
||||
async def test_get_enforces_tenant_isolation(
|
||||
self, client: AsyncClient, tenant_id, test_resource
|
||||
):
|
||||
"""Test GET enforces tenant isolation."""
|
||||
# Should succeed with correct tenant
|
||||
response = await client.get(
|
||||
f"/api/YOUR_RESOURCE/{test_resource.id}",
|
||||
headers={"X-Tenant-ID": str(tenant_id)},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
|
||||
# Should fail with different tenant
|
||||
different_tenant = str(uuid4())
|
||||
response = await client.get(
|
||||
f"/api/YOUR_RESOURCE/{test_resource.id}",
|
||||
headers={"X-Tenant-ID": different_tenant},
|
||||
)
|
||||
assert response.status_code == 404
|
||||
|
||||
async def test_list_endpoint(self, client: AsyncClient, tenant_id):
|
||||
"""Test GET /api/YOUR_RESOURCE lists resources."""
|
||||
response = await client.get(
|
||||
"/api/YOUR_RESOURCE",
|
||||
headers={"X-Tenant-ID": str(tenant_id)},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert isinstance(data, list)
|
||||
|
||||
async def test_update_endpoint(
|
||||
self, client: AsyncClient, tenant_id, test_resource
|
||||
):
|
||||
"""Test PATCH /api/YOUR_RESOURCE/{id} updates resource."""
|
||||
response = await client.patch(
|
||||
f"/api/YOUR_RESOURCE/{test_resource.id}",
|
||||
json={"name": "Updated Name"},
|
||||
headers={"X-Tenant-ID": str(tenant_id)},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["name"] == "Updated Name"
|
||||
|
||||
async def test_delete_endpoint(
|
||||
self, client: AsyncClient, tenant_id, test_resource
|
||||
):
|
||||
"""Test DELETE /api/YOUR_RESOURCE/{id} deletes resource."""
|
||||
response = await client.delete(
|
||||
f"/api/YOUR_RESOURCE/{test_resource.id}",
|
||||
headers={"X-Tenant-ID": str(tenant_id)},
|
||||
)
|
||||
|
||||
assert response.status_code == 204
|
||||
|
||||
# Verify deletion
|
||||
response = await client.get(
|
||||
f"/api/YOUR_RESOURCE/{test_resource.id}",
|
||||
headers={"X-Tenant-ID": str(tenant_id)},
|
||||
)
|
||||
assert response.status_code == 404
|
||||
|
||||
async def test_validation_errors(self, client: AsyncClient, tenant_id):
|
||||
"""Test endpoint validates input correctly."""
|
||||
response = await client.post(
|
||||
"/api/YOUR_RESOURCE",
|
||||
json={
|
||||
"name": "", # Invalid: empty name
|
||||
},
|
||||
headers={"X-Tenant-ID": str(tenant_id)},
|
||||
)
|
||||
|
||||
assert response.status_code == 422
|
||||
data = response.json()
|
||||
assert "detail" in data
|
||||
119
skills/testing-strategy/templates/pytest-unit.py
Normal file
119
skills/testing-strategy/templates/pytest-unit.py
Normal file
@@ -0,0 +1,119 @@
|
||||
# tests/unit/repositories/test_FEATURE_repository.py
|
||||
import pytest
|
||||
from uuid import uuid4
|
||||
from app.db.repositories.YOUR_repository import YourRepository
|
||||
from app.db.models.YOUR_model import YourModel
|
||||
|
||||
|
||||
@pytest.mark.unit
|
||||
class TestYourRepository:
|
||||
"""Unit tests for YourRepository."""
|
||||
|
||||
async def test_get_by_id_success(self, session, tenant_id):
|
||||
"""Test retrieving entity by ID."""
|
||||
repo = YourRepository(session)
|
||||
|
||||
# Create test entity
|
||||
entity = YourModel(
|
||||
tenant_id=tenant_id,
|
||||
name="Test Entity",
|
||||
)
|
||||
session.add(entity)
|
||||
await session.commit()
|
||||
await session.refresh(entity)
|
||||
|
||||
# Retrieve entity
|
||||
result = await repo.get_by_id(entity.id, tenant_id)
|
||||
|
||||
assert result is not None
|
||||
assert result.id == entity.id
|
||||
assert result.name == "Test Entity"
|
||||
|
||||
async def test_get_by_id_enforces_tenant_isolation(
|
||||
self, session, tenant_id
|
||||
):
|
||||
"""Test that get_by_id enforces tenant isolation."""
|
||||
repo = YourRepository(session)
|
||||
|
||||
# Create entity
|
||||
entity = YourModel(tenant_id=tenant_id, name="Test")
|
||||
session.add(entity)
|
||||
await session.commit()
|
||||
|
||||
# Try to access with different tenant_id
|
||||
different_tenant = uuid4()
|
||||
result = await repo.get_by_id(entity.id, different_tenant)
|
||||
|
||||
assert result is None
|
||||
|
||||
async def test_list_with_pagination(self, session, tenant_id):
|
||||
"""Test list with pagination."""
|
||||
repo = YourRepository(session)
|
||||
|
||||
# Create multiple entities
|
||||
entities = [
|
||||
YourModel(tenant_id=tenant_id, name=f"Entity {i}")
|
||||
for i in range(10)
|
||||
]
|
||||
session.add_all(entities)
|
||||
await session.commit()
|
||||
|
||||
# Get first page
|
||||
page1 = await repo.list(tenant_id, limit=5, offset=0)
|
||||
assert len(page1) == 5
|
||||
|
||||
# Get second page
|
||||
page2 = await repo.list(tenant_id, limit=5, offset=5)
|
||||
assert len(page2) == 5
|
||||
|
||||
# Verify no overlap
|
||||
page1_ids = {e.id for e in page1}
|
||||
page2_ids = {e.id for e in page2}
|
||||
assert page1_ids.isdisjoint(page2_ids)
|
||||
|
||||
async def test_create_success(self, session, tenant_id):
|
||||
"""Test creating new entity."""
|
||||
repo = YourRepository(session)
|
||||
|
||||
entity = await repo.create(
|
||||
tenant_id=tenant_id,
|
||||
name="New Entity",
|
||||
)
|
||||
|
||||
assert entity.id is not None
|
||||
assert entity.tenant_id == tenant_id
|
||||
assert entity.name == "New Entity"
|
||||
|
||||
async def test_update_success(self, session, tenant_id):
|
||||
"""Test updating existing entity."""
|
||||
repo = YourRepository(session)
|
||||
|
||||
# Create entity
|
||||
entity = YourModel(tenant_id=tenant_id, name="Original")
|
||||
session.add(entity)
|
||||
await session.commit()
|
||||
|
||||
# Update entity
|
||||
updated = await repo.update(
|
||||
entity.id,
|
||||
tenant_id,
|
||||
name="Updated",
|
||||
)
|
||||
|
||||
assert updated.name == "Updated"
|
||||
|
||||
async def test_delete_success(self, session, tenant_id):
|
||||
"""Test deleting entity."""
|
||||
repo = YourRepository(session)
|
||||
|
||||
# Create entity
|
||||
entity = YourModel(tenant_id=tenant_id, name="To Delete")
|
||||
session.add(entity)
|
||||
await session.commit()
|
||||
|
||||
# Delete entity
|
||||
await repo.delete(entity.id, tenant_id)
|
||||
|
||||
# Verify deletion
|
||||
result = await repo.get_by_id(entity.id, tenant_id)
|
||||
assert result is None
|
||||
54
skills/testing-strategy/templates/vitest-component.test.tsx
Normal file
54
skills/testing-strategy/templates/vitest-component.test.tsx
Normal file
@@ -0,0 +1,54 @@
|
||||
// tests/unit/lib/components/COMPONENT.test.tsx
|
||||
import { describe, it, expect, vi } from "vitest";
|
||||
import { render, screen, fireEvent, waitFor } from "@testing-library/react";
|
||||
import { QueryClient, QueryClientProvider } from "@tanstack/react-query";
|
||||
import YourComponent from "~/lib/components/YourComponent";
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock("~/lib/server/functions/YOUR_MODULE");
|
||||
|
||||
describe("YourComponent", () => {
|
||||
const queryClient = new QueryClient({
|
||||
defaultOptions: {
|
||||
queries: { retry: false },
|
||||
},
|
||||
});
|
||||
|
||||
const wrapper = ({ children }: { children: React.ReactNode }) => (
|
||||
<QueryClientProvider client={queryClient}>
|
||||
{children}
|
||||
</QueryClientProvider>
|
||||
);
|
||||
|
||||
it("renders correctly with initial state", () => {
|
||||
render(<YourComponent />, { wrapper });
|
||||
expect(screen.getByText("Expected Text")).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it("handles user interaction", async () => {
|
||||
render(<YourComponent />, { wrapper });
|
||||
|
||||
const button = screen.getByRole("button", { name: /click me/i });
|
||||
fireEvent.click(button);
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText("Updated Text")).toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
|
||||
it("displays loading state", () => {
|
||||
render(<YourComponent isLoading={true} />, { wrapper });
|
||||
expect(screen.getByText(/loading/i)).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it("displays error state", async () => {
|
||||
// Mock error
|
||||
vi.mocked(someFunction).mockRejectedValue(new Error("Test error"));
|
||||
|
||||
render(<YourComponent />, { wrapper });
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText(/error/i)).toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
});
|
||||
87
skills/testing-strategy/templates/vitest-integration.test.ts
Normal file
87
skills/testing-strategy/templates/vitest-integration.test.ts
Normal file
@@ -0,0 +1,87 @@
|
||||
// tests/integration/FEATURE-flow.test.ts
|
||||
import { describe, it, expect, beforeEach, afterEach } from "vitest";
|
||||
import { db } from "~/lib/server/db";
|
||||
import { users } from "~/lib/server/db/schema";
|
||||
import { eq } from "drizzle-orm";
|
||||
|
||||
describe("Feature Integration Tests", () => {
|
||||
const testTenantId = "550e8400-e29b-41d4-a716-446655440000";
|
||||
|
||||
beforeEach(async () => {
|
||||
// Setup test data
|
||||
await db.delete(users).where(eq(users.tenant_id, testTenantId));
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
// Cleanup test data
|
||||
await db.delete(users).where(eq(users.tenant_id, testTenantId));
|
||||
});
|
||||
|
||||
it("completes full workflow successfully", async () => {
|
||||
// 1. Create resource
|
||||
const [created] = await db
|
||||
.insert(users)
|
||||
.values({
|
||||
tenant_id: testTenantId,
|
||||
email_address: "test@example.com",
|
||||
name: "Test User",
|
||||
})
|
||||
.returning();
|
||||
|
||||
expect(created).toBeDefined();
|
||||
expect(created.email_address).toBe("test@example.com");
|
||||
|
||||
// 2. Retrieve resource
|
||||
const [retrieved] = await db
|
||||
.select()
|
||||
.from(users)
|
||||
.where(eq(users.id, created.id))
|
||||
.where(eq(users.tenant_id, testTenantId));
|
||||
|
||||
expect(retrieved).toBeDefined();
|
||||
expect(retrieved.id).toBe(created.id);
|
||||
|
||||
// 3. Update resource
|
||||
const [updated] = await db
|
||||
.update(users)
|
||||
.set({ name: "Updated Name" })
|
||||
.where(eq(users.id, created.id))
|
||||
.returning();
|
||||
|
||||
expect(updated.name).toBe("Updated Name");
|
||||
|
||||
// 4. Delete resource
|
||||
await db.delete(users).where(eq(users.id, created.id));
|
||||
|
||||
// 5. Verify deletion
|
||||
const [deleted] = await db
|
||||
.select()
|
||||
.from(users)
|
||||
.where(eq(users.id, created.id));
|
||||
|
||||
expect(deleted).toBeUndefined();
|
||||
});
|
||||
|
||||
it("enforces tenant isolation", async () => {
|
||||
const differentTenantId = "00000000-0000-0000-0000-000000000000";
|
||||
|
||||
// Create user in tenant 1
|
||||
const [user] = await db
|
||||
.insert(users)
|
||||
.values({
|
||||
tenant_id: testTenantId,
|
||||
email_address: "tenant1@example.com",
|
||||
name: "Tenant 1 User",
|
||||
})
|
||||
.returning();
|
||||
|
||||
// Attempt to access with different tenant_id
|
||||
const [result] = await db
|
||||
.select()
|
||||
.from(users)
|
||||
.where(eq(users.id, user.id))
|
||||
.where(eq(users.tenant_id, differentTenantId));
|
||||
|
||||
expect(result).toBeUndefined();
|
||||
});
|
||||
});
|
||||
19
skills/testing-strategy/templates/vitest-unit.test.ts
Normal file
19
skills/testing-strategy/templates/vitest-unit.test.ts
Normal file
@@ -0,0 +1,19 @@
|
||||
// tests/unit/lib/utils/FEATURE.test.ts
|
||||
import { describe, it, expect } from "vitest";
|
||||
import { functionToTest } from "~/lib/utils/FEATURE";
|
||||
|
||||
describe("functionToTest", () => {
|
||||
it("handles valid input correctly", () => {
|
||||
const result = functionToTest("valid input");
|
||||
expect(result).toBe("expected output");
|
||||
});
|
||||
|
||||
it("handles edge cases", () => {
|
||||
expect(functionToTest("")).toBe("");
|
||||
expect(functionToTest(null)).toBeNull();
|
||||
});
|
||||
|
||||
it("throws error for invalid input", () => {
|
||||
expect(() => functionToTest("invalid")).toThrow("Error message");
|
||||
});
|
||||
});
|
||||
49
skills/testing-strategy/templates/vitest.config.ts
Normal file
49
skills/testing-strategy/templates/vitest.config.ts
Normal file
@@ -0,0 +1,49 @@
|
||||
// vitest.config.ts
|
||||
import { defineConfig } from "vitest/config";
|
||||
import react from "@vitejs/plugin-react";
|
||||
import path from "path";
|
||||
|
||||
export default defineConfig({
|
||||
plugins: [react()],
|
||||
test: {
|
||||
// Enable global test APIs
|
||||
globals: true,
|
||||
|
||||
// Use jsdom for browser-like environment
|
||||
environment: "jsdom",
|
||||
|
||||
// Run setup file before tests
|
||||
setupFiles: ["./tests/setup.ts"],
|
||||
|
||||
// Coverage configuration
|
||||
coverage: {
|
||||
provider: "v8",
|
||||
reporter: ["text", "json", "html"],
|
||||
exclude: [
|
||||
"node_modules/",
|
||||
"tests/",
|
||||
"**/*.config.ts",
|
||||
"**/*.d.ts",
|
||||
],
|
||||
thresholds: {
|
||||
lines: 80,
|
||||
functions: 80,
|
||||
branches: 80,
|
||||
statements: 80,
|
||||
},
|
||||
},
|
||||
|
||||
// Environment variables
|
||||
env: {
|
||||
DATABASE_URL_ADMIN: process.env.DATABASE_URL_ADMIN || "postgresql://localhost/test",
|
||||
REDIS_URL: process.env.REDIS_URL || "redis://localhost:6379",
|
||||
},
|
||||
},
|
||||
|
||||
// Path aliases
|
||||
resolve: {
|
||||
alias: {
|
||||
"~": path.resolve(__dirname, "./src"),
|
||||
},
|
||||
},
|
||||
});
|
||||
Reference in New Issue
Block a user