Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 18:29:30 +08:00
commit 40d73f6839
33 changed files with 8109 additions and 0 deletions

View File

@@ -0,0 +1,13 @@
{
"name": "testing",
"description": "Comprehensive testing automation with Playwright visual regression, Chrome E2E testing, and frontend debugging tools",
"version": "1.0.0",
"author": {
"name": "Grey Haven Studio"
},
"skills": [
"./skills/react-tanstack-testing",
"./skills/test-generation",
"./skills/testing-strategy"
]
}

3
README.md Normal file
View File

@@ -0,0 +1,3 @@
# testing
Comprehensive testing automation with Playwright visual regression, Chrome E2E testing, and frontend debugging tools

161
plugin.lock.json Normal file
View File

@@ -0,0 +1,161 @@
{
"$schema": "internal://schemas/plugin.lock.v1.json",
"pluginId": "gh:greyhaven-ai/claude-code-config:grey-haven-plugins/testing",
"normalized": {
"repo": null,
"ref": "refs/tags/v20251128.0",
"commit": "fdbb623bcaf78d27219dae649a906d8c96ddf58c",
"treeHash": "6aabe4bda64b245976de7102a43d1fbd56b9a9e4898b4a99883dbd26a6d38111",
"generatedAt": "2025-11-28T10:17:04.094583Z",
"toolVersion": "publish_plugins.py@0.2.0"
},
"origin": {
"remote": "git@github.com:zhongweili/42plugin-data.git",
"branch": "master",
"commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390",
"repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data"
},
"manifest": {
"name": "testing",
"description": "Comprehensive testing automation with Playwright visual regression, Chrome E2E testing, and frontend debugging tools",
"version": "1.0.0"
},
"content": {
"files": [
{
"path": "README.md",
"sha256": "ecc17df5d975b6fbeba8b84351eb7b43cecf0caeba9ae22c683fea159975cc01"
},
{
"path": ".claude-plugin/plugin.json",
"sha256": "9ce5b18d70783a66ef5f8e5cadd22b4e9c7bd5ff05f4bad9739975f9948e3cf8"
},
{
"path": "skills/react-tanstack-testing/SKILL.md",
"sha256": "425b3fafd709004aa37972cbdc586f595f1e13f4643b1f8a83fd3dc51c78572d"
},
{
"path": "skills/react-tanstack-testing/examples/tanstack-router-testing.md",
"sha256": "277657d4ed802b401facf04df93348d4dcc186d397faab375feaf01e1f9e173f"
},
{
"path": "skills/react-tanstack-testing/examples/tanstack-form-testing.md",
"sha256": "fe4c7137933ccceccea743df4e43e34127adb92d35df73d94e47c619da86a406"
},
{
"path": "skills/react-tanstack-testing/examples/INDEX.md",
"sha256": "62f0dd3938110acab923c0ac64a4012d911962c800d92962c5bd554c00638827"
},
{
"path": "skills/react-tanstack-testing/examples/tanstack-table-testing.md",
"sha256": "c990e5107a683a6cc2bd277c3650eb59b96c68be9b8e1340e69b3a74194af5ee"
},
{
"path": "skills/react-tanstack-testing/examples/tanstack-query-testing.md",
"sha256": "b993956c6435ea60ead3925546d94dd024cf482b1d1196969ab450b65171cb8e"
},
{
"path": "skills/react-tanstack-testing/reference/server-components-testing.md",
"sha256": "3b48b636adf6c60284356534abf8aec920f1886df013cd757d46f89a9505e6d0"
},
{
"path": "skills/react-tanstack-testing/reference/testing-setup.md",
"sha256": "0eded45a5dfe869355aec7501ccd2ccdfaa0d046b0edebcef564767fcc297f68"
},
{
"path": "skills/react-tanstack-testing/reference/INDEX.md",
"sha256": "5485ee4c679efd032b02f785df28313d0bdfd19faab3c7d017d205119d867b82"
},
{
"path": "skills/react-tanstack-testing/reference/common-patterns.md",
"sha256": "9604d73c1e791c2e35a1e63a7e2c628117a10bb817749ff876f6cb5e505a227f"
},
{
"path": "skills/react-tanstack-testing/reference/testing-best-practices.md",
"sha256": "3609ef0cab553d058b0adcb8de476232da6f8ef5bd4549f4417816a1abcb527b"
},
{
"path": "skills/testing-strategy/EXAMPLES.md",
"sha256": "2ba2965b4a6dba6ba7052b792feefb6c84f1fa97c4d10d48e31fd3a76f03f8a4"
},
{
"path": "skills/testing-strategy/REFERENCE.md",
"sha256": "49f6be7553c80214ea62c4bb80c94eb702135eeb251d8115985aef42c1f33e75"
},
{
"path": "skills/testing-strategy/SKILL.md",
"sha256": "7eb4ecd695a275792c35fa60c30fefff9cf1925498e856fbdef2222a82bf0797"
},
{
"path": "skills/testing-strategy/checklists/test-quality-review.md",
"sha256": "9078fa780fd93fac72443bf20f88062434b7b0f24ed38b8cd7be4cf81e917fa5"
},
{
"path": "skills/testing-strategy/checklists/testing-checklist.md",
"sha256": "49c11e9e3e65ade56d7717be67de7a4700d7ececd605cb99daa8530be6a01c42"
},
{
"path": "skills/testing-strategy/scripts/run_tests.py",
"sha256": "e621322e82f099b859c51d5f9e00f7d9cdac35974db2f5e8110337f54093e6bd"
},
{
"path": "skills/testing-strategy/scripts/coverage_check.py",
"sha256": "16471b562cc4ca63678939365f8d74d335c7c39368b6a905fdee151457063a0f"
},
{
"path": "skills/testing-strategy/templates/vitest-unit.test.ts",
"sha256": "99e66fd22922781a4af687c8a36550ff16c5e9215114c43bab78c5f515ad5dbf"
},
{
"path": "skills/testing-strategy/templates/pytest-unit.py",
"sha256": "158fa2f4ce3d2cc16a760160036ffd9e3115b8429ef751c0629617185b1841a5"
},
{
"path": "skills/testing-strategy/templates/conftest.py",
"sha256": "46d5682a0952a296c4425db784b10d54945361f5a9346ac81ba6d2fa83953981"
},
{
"path": "skills/testing-strategy/templates/pytest-integration.py",
"sha256": "fa19a3cc32a26927daefebde3743ea66c7a68929dd41fec37c773add506c33ce"
},
{
"path": "skills/testing-strategy/templates/vitest-component.test.tsx",
"sha256": "3f9d213d128e7d1b43f7c38a678931425f83dd2019ef6c0becdbc52fcebcf1ca"
},
{
"path": "skills/testing-strategy/templates/.github-workflows-test.yml",
"sha256": "839a249b30874420320967e606996aec719e193a45f6b89ec04229a44a810199"
},
{
"path": "skills/testing-strategy/templates/vitest.config.ts",
"sha256": "02a73cc7efeef0d4e20e0deac970d7c9f2fcaa85e7e12e178069fff33e4b856d"
},
{
"path": "skills/testing-strategy/templates/vitest-integration.test.ts",
"sha256": "6410f1653eb9d01ab9fb1a23c52f42f3f0b1805fa3e62fe630e1aee225d6c80f"
},
{
"path": "skills/test-generation/SKILL.md",
"sha256": "d4e612b9103dfd00bbb6e8ad9109c767e06559fd6dfde447b92f94fa99099fac"
},
{
"path": "skills/test-generation/examples/INDEX.md",
"sha256": "f83e6b6682b28eeccd5f1cafa085776ae9f68abba71c9bf077809bdf4b923a22"
},
{
"path": "skills/test-generation/templates/INDEX.md",
"sha256": "fa5f8670ef77679d2298b7a7dc192c59c7cd972ab0c95a16173eb5384525e601"
},
{
"path": "skills/test-generation/reference/INDEX.md",
"sha256": "275b8827e48c8f5ddd808a93054e13e1a74fa6fc0fe70355a0015945642d0ed2"
}
],
"dirSha256": "6aabe4bda64b245976de7102a43d1fbd56b9a9e4898b4a99883dbd26a6d38111"
},
"security": {
"scannedAt": null,
"scannerVersion": null,
"flags": []
}
}

View File

@@ -0,0 +1,27 @@
# React TanStack Testing Skill
Specialized testing for React applications using TanStack ecosystem (Query, Router, Table, Form) with Vite and Vitest.
## Description
Comprehensive testing patterns for modern React with TanStack libraries including server state management, routing, and data handling.
## What's Included
- **Examples**: TanStack Query tests, Router tests, Form validation
- **Reference**: Testing patterns, mocking strategies
- **Templates**: Test suites for TanStack components
- **Checklists**: React testing best practices
## Use When
- Testing React + TanStack Query applications
- Testing complex routing with TanStack Router
- Validating server state management
## Related Agents
- `react-tanstack-tester`
- `test-generator`
**Skill Version**: 1.0

View File

@@ -0,0 +1,90 @@
# React TanStack Testing Examples
Complete testing examples for React applications using TanStack ecosystem libraries.
## Examples Overview
### TanStack Query Testing
**File**: [tanstack-query-testing.md](tanstack-query-testing.md)
Testing server state management with TanStack Query:
- Query hooks (loading, success, error states)
- Mutation hooks with optimistic updates
- Query invalidation and cache management
- Infinite queries and pagination
- Prefetching and cache warming
**Use when**: Testing components that fetch data, manage server state, or use caching.
---
### TanStack Router Testing
**File**: [tanstack-router-testing.md](tanstack-router-testing.md)
Testing routing and navigation with TanStack Router:
- Route navigation and programmatic routing
- Route parameters and search params
- Protected routes and authentication
- Route loaders and actions
- Nested routes and layouts
**Use when**: Testing navigation, route-based data fetching, or protected pages.
---
### TanStack Table Testing
**File**: [tanstack-table-testing.md](tanstack-table-testing.md)
Testing data tables with TanStack Table:
- Column rendering and data display
- Sorting (ascending, descending, none)
- Filtering (global and column-specific)
- Pagination and page size changes
- Row selection (single and multiple)
**Use when**: Testing data grids, tables with sorting/filtering, or complex data displays.
---
### TanStack Form Testing
**File**: [tanstack-form-testing.md](tanstack-form-testing.md)
Testing forms with TanStack Form:
- Field validation (required, email, min/max)
- Form submission with valid/invalid data
- Validation error display
- Field states (touched, dirty, pristine)
- Schema validation with Zod
**Use when**: Testing forms with validation, complex field interactions, or submission handling.
---
## Quick Reference
| Library | Primary Use Case | Key Test Patterns |
|---------|-----------------|-------------------|
| **Query** | Data fetching | Loading states, mutations, cache |
| **Router** | Navigation | Routes, params, loaders, protected |
| **Table** | Data display | Sorting, filtering, pagination |
| **Form** | User input | Validation, submission, errors |
## Testing Tools
All examples use:
- **Vitest** - Fast unit test runner
- **React Testing Library** - User-centric testing
- **MSW** - API mocking
- **@testing-library/user-event** - User interactions
## Best Practices
1. **Test user behavior** - Focus on what users see and do
2. **Mock API calls** - Use MSW for realistic network mocking
3. **Test all states** - Loading, success, error, empty
4. **Use proper queries** - Prefer `getByRole` over `getByTestId`
5. **Async utilities** - Use `waitFor`, `findBy` for async operations
---
Return to [main agent](../react-tanstack-tester.md)

View File

@@ -0,0 +1,488 @@
# TanStack Form Testing Examples
Complete examples for testing TanStack Form validation, submission, and field states.
## Test Setup
### Form Dependencies
```typescript
// src/test/form-utils.tsx
import { ReactElement } from 'react';
import { render, RenderOptions } from '@testing-library/react';
export function renderForm(ui: ReactElement, options?: RenderOptions) {
return render(ui, options);
}
```
## Example 1: Basic Form with Validation
### Form Component
```typescript
// src/components/UserForm.tsx
import { useForm } from '@tanstack/react-form';
import { zodValidator } from '@tanstack/zod-form-adapter';
import { z } from 'zod';
const userSchema = z.object({
name: z.string().min(2, 'Name must be at least 2 characters'),
email: z.string().email('Invalid email address'),
age: z.number().min(18, 'Must be at least 18 years old'),
});
export function UserForm({ onSubmit }: { onSubmit: (data: any) => void }) {
const form = useForm({
defaultValues: {
name: '',
email: '',
age: 0,
},
onSubmit: async ({ value }) => {
onSubmit(value);
},
validatorAdapter: zodValidator,
});
return (
<form
onSubmit={(e) => {
e.preventDefault();
form.handleSubmit();
}}
>
<div>
<form.Field
name="name"
validators={{
onChange: userSchema.shape.name,
}}
children={(field) => (
<div>
<label htmlFor="name">Name</label>
<input
id="name"
value={field.state.value}
onChange={(e) => field.handleChange(e.target.value)}
/>
{field.state.meta.errors.length > 0 && (
<span role="alert">{field.state.meta.errors[0]}</span>
)}
</div>
)}
/>
</div>
<div>
<form.Field
name="email"
validators={{
onChange: userSchema.shape.email,
}}
children={(field) => (
<div>
<label htmlFor="email">Email</label>
<input
id="email"
type="email"
value={field.state.value}
onChange={(e) => field.handleChange(e.target.value)}
/>
{field.state.meta.errors.length > 0 && (
<span role="alert">{field.state.meta.errors[0]}</span>
)}
</div>
)}
/>
</div>
<button type="submit">Submit</button>
</form>
);
}
```
### Test Suite
```typescript
// src/components/UserForm.test.tsx
import { describe, it, expect, vi } from 'vitest';
import { screen, render } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import { UserForm } from './UserForm';
describe('UserForm', () => {
it('renders all form fields', () => {
const onSubmit = vi.fn();
render(<UserForm onSubmit={onSubmit} />);
expect(screen.getByLabelText('Name')).toBeInTheDocument();
expect(screen.getByLabelText('Email')).toBeInTheDocument();
expect(screen.getByRole('button', { name: 'Submit' })).toBeInTheDocument();
});
it('shows validation error for short name', async () => {
const user = userEvent.setup();
const onSubmit = vi.fn();
render(<UserForm onSubmit={onSubmit} />);
await user.type(screen.getByLabelText('Name'), 'A');
await user.tab(); // Trigger blur/validation
expect(await screen.findByRole('alert')).toHaveTextContent('Name must be at least 2 characters');
});
it('shows validation error for invalid email', async () => {
const user = userEvent.setup();
const onSubmit = vi.fn();
render(<UserForm onSubmit={onSubmit} />);
await user.type(screen.getByLabelText('Email'), 'invalid-email');
await user.tab();
expect(await screen.findByRole('alert')).toHaveTextContent('Invalid email address');
});
it('submits form with valid data', async () => {
const user = userEvent.setup();
const onSubmit = vi.fn();
render(<UserForm onSubmit={onSubmit} />);
await user.type(screen.getByLabelText('Name'), 'Alice Johnson');
await user.type(screen.getByLabelText('Email'), 'alice@example.com');
await user.click(screen.getByRole('button', { name: 'Submit' }));
expect(onSubmit).toHaveBeenCalledWith({
name: 'Alice Johnson',
email: 'alice@example.com',
age: 0,
});
});
it('does not submit form with invalid data', async () => {
const user = userEvent.setup();
const onSubmit = vi.fn();
render(<UserForm onSubmit={onSubmit} />);
await user.type(screen.getByLabelText('Name'), 'A');
await user.type(screen.getByLabelText('Email'), 'invalid');
await user.click(screen.getByRole('button', { name: 'Submit' }));
expect(onSubmit).not.toHaveBeenCalled();
});
});
```
## Example 2: Testing Field States
### Form with Field State Display
```typescript
// src/components/FieldStateForm.tsx
import { useForm } from '@tanstack/react-form';
export function FieldStateForm() {
const form = useForm({
defaultValues: {
username: '',
},
});
return (
<form>
<form.Field
name="username"
children={(field) => (
<div>
<label htmlFor="username">Username</label>
<input
id="username"
value={field.state.value}
onChange={(e) => field.handleChange(e.target.value)}
onBlur={field.handleBlur}
/>
<div data-testid="field-states">
<span data-testid="is-touched">{field.state.meta.isTouched ? 'Touched' : 'Untouched'}</span>
<span data-testid="is-dirty">{field.state.meta.isDirty ? 'Dirty' : 'Pristine'}</span>
<span data-testid="is-valid">{field.state.meta.errors.length === 0 ? 'Valid' : 'Invalid'}</span>
</div>
</div>
)}
/>
</form>
);
}
```
### Test Suite
```typescript
// src/components/FieldStateForm.test.tsx
import { describe, it, expect } from 'vitest';
import { screen, render } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import { FieldStateForm } from './FieldStateForm';
describe('FieldStateForm', () => {
it('field starts untouched and pristine', () => {
render(<FieldStateForm />);
expect(screen.getByTestId('is-touched')).toHaveTextContent('Untouched');
expect(screen.getByTestId('is-dirty')).toHaveTextContent('Pristine');
});
it('field becomes touched after blur', async () => {
const user = userEvent.setup();
render(<FieldStateForm />);
const input = screen.getByLabelText('Username');
await user.click(input);
await user.tab(); // Blur the field
expect(screen.getByTestId('is-touched')).toHaveTextContent('Touched');
});
it('field becomes dirty after value change', async () => {
const user = userEvent.setup();
render(<FieldStateForm />);
await user.type(screen.getByLabelText('Username'), 'alice');
expect(screen.getByTestId('is-dirty')).toHaveTextContent('Dirty');
});
});
```
## Example 3: Testing Async Validation
### Form with Async Validation
```typescript
// src/components/UsernameForm.tsx
import { useForm } from '@tanstack/react-form';
async function checkUsernameAvailable(username: string): Promise<boolean> {
const response = await fetch(`/api/check-username?username=${username}`);
return response.json();
}
export function UsernameForm({ onSubmit }: { onSubmit: (data: any) => void }) {
const form = useForm({
defaultValues: {
username: '',
},
onSubmit: async ({ value }) => {
onSubmit(value);
},
});
return (
<form
onSubmit={(e) => {
e.preventDefault();
form.handleSubmit();
}}
>
<form.Field
name="username"
validators={{
onChangeAsync: async ({ value }) => {
if (value.length < 3) {
return 'Username must be at least 3 characters';
}
const isAvailable = await checkUsernameAvailable(value);
if (!isAvailable) {
return 'Username already taken';
}
return undefined;
},
}}
children={(field) => (
<div>
<label htmlFor="username">Username</label>
<input
id="username"
value={field.state.value}
onChange={(e) => field.handleChange(e.target.value)}
/>
{field.state.meta.isValidating && (
<span data-testid="validating">Checking availability...</span>
)}
{field.state.meta.errors.length > 0 && (
<span role="alert">{field.state.meta.errors[0]}</span>
)}
</div>
)}
/>
<button type="submit">Submit</button>
</form>
);
}
```
### Test Suite
```typescript
// src/components/UsernameForm.test.tsx
import { describe, it, expect, vi, beforeAll, afterEach, afterAll } from 'vitest';
import { screen, render, waitFor } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import { setupServer } from 'msw/node';
import { http, HttpResponse } from 'msw';
import { UsernameForm } from './UsernameForm';
const server = setupServer(
http.get('/api/check-username', ({ request }) => {
const url = new URL(request.url);
const username = url.searchParams.get('username');
return HttpResponse.json(username !== 'taken');
})
);
beforeAll(() => server.listen());
afterEach(() => server.resetHandlers());
afterAll(() => server.close());
describe('UsernameForm', () => {
it('shows validating indicator during async validation', async () => {
const user = userEvent.setup();
const onSubmit = vi.fn();
render(<UsernameForm onSubmit={onSubmit} />);
await user.type(screen.getByLabelText('Username'), 'alice');
expect(screen.getByTestId('validating')).toHaveTextContent('Checking availability...');
});
it('accepts available username', async () => {
const user = userEvent.setup();
const onSubmit = vi.fn();
render(<UsernameForm onSubmit={onSubmit} />);
await user.type(screen.getByLabelText('Username'), 'available');
await waitFor(() => {
expect(screen.queryByTestId('validating')).not.toBeInTheDocument();
});
expect(screen.queryByRole('alert')).not.toBeInTheDocument();
});
it('rejects taken username', async () => {
const user = userEvent.setup();
const onSubmit = vi.fn();
render(<UsernameForm onSubmit={onSubmit} />);
await user.type(screen.getByLabelText('Username'), 'taken');
expect(await screen.findByRole('alert')).toHaveTextContent('Username already taken');
});
});
```
## Example 4: Testing Form Reset
### Form with Reset Button
```typescript
// src/components/ResettableForm.tsx
import { useForm } from '@tanstack/react-form';
export function ResettableForm() {
const form = useForm({
defaultValues: {
name: '',
email: '',
},
});
return (
<form>
<div>
<label htmlFor="name">Name</label>
<form.Field
name="name"
children={(field) => (
<input
id="name"
value={field.state.value}
onChange={(e) => field.handleChange(e.target.value)}
/>
)}
/>
</div>
<div>
<label htmlFor="email">Email</label>
<form.Field
name="email"
children={(field) => (
<input
id="email"
value={field.state.value}
onChange={(e) => field.handleChange(e.target.value)}
/>
)}
/>
</div>
<button type="button" onClick={() => form.reset()}>
Reset
</button>
</form>
);
}
```
### Test Suite
```typescript
// src/components/ResettableForm.test.tsx
import { describe, it, expect } from 'vitest';
import { screen, render } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import { ResettableForm } from './ResettableForm';
describe('ResettableForm', () => {
it('resets form fields to default values', async () => {
const user = userEvent.setup();
render(<ResettableForm />);
const nameInput = screen.getByLabelText('Name') as HTMLInputElement;
const emailInput = screen.getByLabelText('Email') as HTMLInputElement;
// Fill in form
await user.type(nameInput, 'Alice');
await user.type(emailInput, 'alice@example.com');
expect(nameInput.value).toBe('Alice');
expect(emailInput.value).toBe('alice@example.com');
// Reset form
await user.click(screen.getByRole('button', { name: 'Reset' }));
expect(nameInput.value).toBe('');
expect(emailInput.value).toBe('');
});
});
```
## Key Takeaways
1. **Zod Validation**: Use `zodValidator` for type-safe schema validation
2. **Field States**: Test touched, dirty, pristine, and valid states
3. **Async Validation**: Use MSW to mock async validation endpoints
4. **Error Display**: Test that validation errors appear correctly
5. **Submission**: Test form submission with both valid and invalid data
6. **Reset**: Test form reset functionality clears all fields
---
**Previous**: [Table Testing](tanstack-table-testing.md) | **Index**: [Examples Index](INDEX.md)

View File

@@ -0,0 +1,475 @@
# TanStack Query Testing Examples
Complete examples for testing TanStack Query (React Query) hooks and components.
## Test Setup
### QueryClient Configuration
```typescript
// src/test/query-client.ts
import { QueryClient } from '@tanstack/react-query';
export function createTestQueryClient() {
return new QueryClient({
defaultOptions: {
queries: {
retry: false, // Don't retry in tests
gcTime: 0, // No garbage collection
staleTime: 0, // Always stale
},
mutations: {
retry: false,
},
},
});
}
```
### Custom Render with QueryClientProvider
```typescript
// src/test/test-utils.tsx
import { ReactElement } from 'react';
import { render, RenderOptions } from '@testing-library/react';
import { QueryClientProvider } from '@tanstack/react-query';
import { createTestQueryClient } from './query-client';
export function renderWithQuery(
ui: ReactElement,
options?: RenderOptions
) {
const queryClient = createTestQueryClient();
return render(
<QueryClientProvider client={queryClient}>
{ui}
</QueryClientProvider>,
options
);
}
```
## Example 1: Testing Query Hooks
### Hook Under Test
```typescript
// src/hooks/useUsers.ts
import { useQuery } from '@tanstack/react-query';
interface User {
id: string;
name: string;
email: string;
}
export function useUsers() {
return useQuery({
queryKey: ['users'],
queryFn: async () => {
const response = await fetch('/api/users');
if (!response.ok) throw new Error('Failed to fetch users');
return response.json() as Promise<User[]>;
},
});
}
```
### Test Suite
```typescript
// src/hooks/useUsers.test.ts
import { describe, it, expect, beforeAll, afterEach, afterAll } from 'vitest';
import { renderHook, waitFor } from '@testing-library/react';
import { QueryClientProvider } from '@tanstack/react-query';
import { setupServer } from 'msw/node';
import { http, HttpResponse } from 'msw';
import { createTestQueryClient } from '../test/query-client';
import { useUsers } from './useUsers';
// Mock API server
const server = setupServer(
http.get('/api/users', () => {
return HttpResponse.json([
{ id: '1', name: 'Alice', email: 'alice@example.com' },
{ id: '2', name: 'Bob', email: 'bob@example.com' },
]);
})
);
beforeAll(() => server.listen());
afterEach(() => server.resetHandlers());
afterAll(() => server.close());
describe('useUsers', () => {
it('fetches users successfully', async () => {
const queryClient = createTestQueryClient();
const wrapper = ({ children }: { children: React.ReactNode }) => (
<QueryClientProvider client={queryClient}>
{children}
</QueryClientProvider>
);
const { result } = renderHook(() => useUsers(), { wrapper });
// Initially loading
expect(result.current.isLoading).toBe(true);
expect(result.current.data).toBeUndefined();
// Wait for success
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(result.current.data).toEqual([
{ id: '1', name: 'Alice', email: 'alice@example.com' },
{ id: '2', name: 'Bob', email: 'bob@example.com' },
]);
});
});
```
## Example 2: Testing Mutation Hooks
### Hook Under Test
```typescript
// src/hooks/useCreateUser.ts
import { useMutation, useQueryClient } from '@tanstack/react-query';
interface CreateUserInput {
name: string;
email: string;
}
interface User extends CreateUserInput {
id: string;
}
export function useCreateUser() {
const queryClient = useQueryClient();
return useMutation({
mutationFn: async (input: CreateUserInput) => {
const response = await fetch('/api/users', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(input),
});
if (!response.ok) throw new Error('Failed to create user');
return response.json() as Promise<User>;
},
onSuccess: () => {
// Invalidate users query to refetch
queryClient.invalidateQueries({ queryKey: ['users'] });
},
});
}
```
### Test Suite
```typescript
// src/hooks/useCreateUser.test.ts
import { describe, it, expect, vi } from 'vitest';
import { renderHook, waitFor } from '@testing-library/react';
import { QueryClientProvider } from '@tanstack/react-query';
import { setupServer } from 'msw/node';
import { http, HttpResponse } from 'msw';
import { createTestQueryClient } from '../test/query-client';
import { useCreateUser } from './useCreateUser';
const server = setupServer(
http.post('/api/users', async ({ request }) => {
const body = await request.json();
return HttpResponse.json({
id: '3',
...body,
});
})
);
beforeAll(() => server.listen());
afterEach(() => server.resetHandlers());
afterAll(() => server.close());
describe('useCreateUser', () => {
it('creates user successfully', async () => {
const queryClient = createTestQueryClient();
const invalidateQueriesSpy = vi.spyOn(queryClient, 'invalidateQueries');
const wrapper = ({ children }: { children: React.ReactNode }) => (
<QueryClientProvider client={queryClient}>
{children}
</QueryClientProvider>
);
const { result } = renderHook(() => useCreateUser(), { wrapper });
// Initially idle
expect(result.current.isPending).toBe(false);
// Trigger mutation
result.current.mutate({
name: 'Charlie',
email: 'charlie@example.com',
});
// Wait for success
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(result.current.data).toEqual({
id: '3',
name: 'Charlie',
email: 'charlie@example.com',
});
// Verify cache invalidation
expect(invalidateQueriesSpy).toHaveBeenCalledWith({ queryKey: ['users'] });
});
});
```
## Example 3: Testing Components with Queries
### Component Under Test
```typescript
// src/components/UserList.tsx
import { useUsers } from '../hooks/useUsers';
export function UserList() {
const { data: users, isLoading, error } = useUsers();
if (isLoading) {
return <div data-testid="loading">Loading users...</div>;
}
if (error) {
return <div role="alert">Error: {error.message}</div>;
}
if (!users || users.length === 0) {
return <div data-testid="empty">No users found</div>;
}
return (
<ul>
{users.map((user) => (
<li key={user.id}>
{user.name} - {user.email}
</li>
))}
</ul>
);
}
```
### Test Suite
```typescript
// src/components/UserList.test.tsx
import { describe, it, expect } from 'vitest';
import { screen, waitFor } from '@testing-library/react';
import { setupServer } from 'msw/node';
import { http, HttpResponse } from 'msw';
import { renderWithQuery } from '../test/test-utils';
import { UserList } from './UserList';
const server = setupServer(
http.get('/api/users', () => {
return HttpResponse.json([
{ id: '1', name: 'Alice', email: 'alice@example.com' },
{ id: '2', name: 'Bob', email: 'bob@example.com' },
]);
})
);
beforeAll(() => server.listen());
afterEach(() => server.resetHandlers());
afterAll(() => server.close());
describe('UserList', () => {
it('displays users after loading', async () => {
renderWithQuery(<UserList />);
await waitFor(() => {
expect(screen.getByText('Alice - alice@example.com')).toBeInTheDocument();
});
expect(screen.getByText('Bob - bob@example.com')).toBeInTheDocument();
});
});
```
## Example 4: Testing Optimistic Updates
### Hook with Optimistic Update
```typescript
// src/hooks/useUpdateUser.ts
import { useMutation, useQueryClient } from '@tanstack/react-query';
interface User {
id: string;
name: string;
email: string;
}
export function useUpdateUser() {
const queryClient = useQueryClient();
return useMutation({
mutationFn: async (user: User) => {
const response = await fetch(`/api/users/${user.id}`, {
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(user),
});
if (!response.ok) throw new Error('Failed to update user');
return response.json() as Promise<User>;
},
onMutate: async (updatedUser) => {
// Cancel outgoing queries
await queryClient.cancelQueries({ queryKey: ['users'] });
// Snapshot previous value
const previousUsers = queryClient.getQueryData<User[]>(['users']);
// Optimistically update
if (previousUsers) {
queryClient.setQueryData<User[]>(
['users'],
previousUsers.map((user) =>
user.id === updatedUser.id ? updatedUser : user
)
);
}
// Return context for rollback
return { previousUsers };
},
onError: (_error, _variables, context) => {
// Rollback on error
if (context?.previousUsers) {
queryClient.setQueryData(['users'], context.previousUsers);
}
},
onSettled: () => {
// Refetch after error or success
queryClient.invalidateQueries({ queryKey: ['users'] });
},
});
}
```
### Test Suite
```typescript
// src/hooks/useUpdateUser.test.ts
import { describe, it, expect } from 'vitest';
import { renderHook, waitFor } from '@testing-library/react';
import { QueryClientProvider } from '@tanstack/react-query';
import { setupServer } from 'msw/node';
import { http, HttpResponse } from 'msw';
import { createTestQueryClient } from '../test/query-client';
import { useUpdateUser } from './useUpdateUser';
const server = setupServer(
http.put('/api/users/:id', async ({ request, params }) => {
const body = await request.json();
return HttpResponse.json({ ...body, id: params.id });
})
);
beforeAll(() => server.listen());
afterEach(() => server.resetHandlers());
afterAll(() => server.close());
describe('useUpdateUser', () => {
it('applies optimistic update immediately', async () => {
const queryClient = createTestQueryClient();
// Pre-populate cache
queryClient.setQueryData(['users'], [
{ id: '1', name: 'Alice', email: 'alice@example.com' },
{ id: '2', name: 'Bob', email: 'bob@example.com' },
]);
const wrapper = ({ children }: { children: React.ReactNode }) => (
<QueryClientProvider client={queryClient}>
{children}
</QueryClientProvider>
);
const { result } = renderHook(() => useUpdateUser(), { wrapper });
// Trigger mutation
result.current.mutate({
id: '1',
name: 'Alice Updated',
email: 'alice.updated@example.com',
});
// Immediately check optimistic update
const cachedUsers = queryClient.getQueryData(['users']);
expect(cachedUsers).toEqual([
{ id: '1', name: 'Alice Updated', email: 'alice.updated@example.com' },
{ id: '2', name: 'Bob', email: 'bob@example.com' },
]);
// Wait for mutation to complete
await waitFor(() => expect(result.current.isSuccess).toBe(true));
});
it('rolls back on mutation failure', async () => {
server.use(
http.put('/api/users/:id', () => {
return new HttpResponse(null, { status: 500 });
})
);
const queryClient = createTestQueryClient();
// Pre-populate cache
const originalUsers = [
{ id: '1', name: 'Alice', email: 'alice@example.com' },
{ id: '2', name: 'Bob', email: 'bob@example.com' },
];
queryClient.setQueryData(['users'], originalUsers);
const wrapper = ({ children }: { children: React.ReactNode }) => (
<QueryClientProvider client={queryClient}>
{children}
</QueryClientProvider>
);
const { result } = renderHook(() => useUpdateUser(), { wrapper });
// Trigger mutation
result.current.mutate({
id: '1',
name: 'Alice Updated',
email: 'alice.updated@example.com',
});
// Wait for error
await waitFor(() => expect(result.current.isError).toBe(true));
// Verify rollback
const cachedUsers = queryClient.getQueryData(['users']);
expect(cachedUsers).toEqual(originalUsers);
});
});
```
## Key Takeaways
1. **Test QueryClient Setup**: Always create test-specific QueryClient with retries disabled
2. **MSW for Mocking**: Use MSW to mock API endpoints realistically
3. **Test All States**: Loading, success, error, and empty states
4. **Optimistic Updates**: Test immediate UI changes and rollback on failure
5. **Cache Invalidation**: Verify queries are invalidated after mutations
---
**Next**: [TanStack Router Testing](tanstack-router-testing.md) | **Index**: [Examples Index](INDEX.md)

View File

@@ -0,0 +1,420 @@
# TanStack Router Testing Examples
Complete examples for testing TanStack Router navigation, routes, and data loading.
## Test Setup
### Router Test Configuration
```typescript
// src/test/router-utils.tsx
import { ReactElement } from 'react';
import { render } from '@testing-library/react';
import { createMemoryHistory, RouterProvider, createRootRoute, createRoute, createRouter } from '@tanstack/react-router';
export function renderWithRouter(
ui: ReactElement,
{ initialEntries = ['/'] } = {}
) {
const rootRoute = createRootRoute({
component: () => ui,
});
const router = createRouter({
routeTree: rootRoute,
history: createMemoryHistory({ initialEntries }),
});
return render(<RouterProvider router={router} />);
}
```
## Example 1: Testing Route Navigation
### Component with Navigation
```typescript
// src/components/Navigation.tsx
import { Link } from '@tanstack/react-router';
export function Navigation() {
return (
<nav>
<Link to="/">Home</Link>
<Link to="/about">About</Link>
<Link to="/users">Users</Link>
</nav>
);
}
```
### Test Suite
```typescript
// src/components/Navigation.test.tsx
import { describe, it, expect } from 'vitest';
import { screen } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import { createRootRoute, createRoute, createRouter, RouterProvider } from '@tanstack/react-router';
import { render } from '@testing-library/react';
import { Navigation } from './Navigation';
function Home() {
return <div>Home Page</div>;
}
function About() {
return <div>About Page</div>;
}
function Users() {
return <div>Users Page</div>;
}
describe('Navigation', () => {
it('navigates between routes', async () => {
const user = userEvent.setup();
// Create routes
const rootRoute = createRootRoute({
component: () => (
<>
<Navigation />
<div id="content" />
</>
),
});
const indexRoute = createRoute({
getParentRoute: () => rootRoute,
path: '/',
component: Home,
});
const aboutRoute = createRoute({
getParentRoute: () => rootRoute,
path: '/about',
component: About,
});
const usersRoute = createRoute({
getParentRoute: () => rootRoute,
path: '/users',
component: Users,
});
const routeTree = rootRoute.addChildren([indexRoute, aboutRoute, usersRoute]);
const router = createRouter({ routeTree });
render(<RouterProvider router={router} />);
// Initially on home
expect(screen.getByText('Home Page')).toBeInTheDocument();
// Navigate to About
await user.click(screen.getByRole('link', { name: /about/i }));
expect(await screen.findByText('About Page')).toBeInTheDocument();
// Navigate to Users
await user.click(screen.getByRole('link', { name: /users/i }));
expect(await screen.findByText('Users Page')).toBeInTheDocument();
});
});
```
## Example 2: Testing Route Parameters
### Component Using Route Params
```typescript
// src/pages/UserProfile.tsx
import { useParams } from '@tanstack/react-router';
export function UserProfile() {
const { userId } = useParams({ from: '/users/$userId' });
return (
<div>
<h1>User Profile</h1>
<p data-testid="user-id">User ID: {userId}</p>
</div>
);
}
```
### Test Suite
```typescript
// src/pages/UserProfile.test.tsx
import { describe, it, expect } from 'vitest';
import { screen, render } from '@testing-library/react';
import { createRootRoute, createRoute, createRouter, RouterProvider } from '@tanstack/react-router';
import { createMemoryHistory } from '@tanstack/react-router';
import { UserProfile } from './UserProfile';
describe('UserProfile', () => {
it('displays correct user ID from route params', () => {
const rootRoute = createRootRoute();
const userRoute = createRoute({
getParentRoute: () => rootRoute,
path: '/users/$userId',
component: UserProfile,
});
const routeTree = rootRoute.addChildren([userRoute]);
const history = createMemoryHistory({ initialEntries: ['/users/123'] });
const router = createRouter({ routeTree, history });
render(<RouterProvider router={router} />);
expect(screen.getByTestId('user-id')).toHaveTextContent('User ID: 123');
});
it('updates when route params change', () => {
const rootRoute = createRootRoute();
const userRoute = createRoute({
getParentRoute: () => rootRoute,
path: '/users/$userId',
component: UserProfile,
});
const routeTree = rootRoute.addChildren([userRoute]);
const history = createMemoryHistory({ initialEntries: ['/users/123'] });
const router = createRouter({ routeTree, history });
const { rerender } = render(<RouterProvider router={router} />);
expect(screen.getByTestId('user-id')).toHaveTextContent('User ID: 123');
// Navigate to different user
history.push('/users/456');
rerender(<RouterProvider router={router} />);
expect(screen.getByTestId('user-id')).toHaveTextContent('User ID: 456');
});
});
```
## Example 3: Testing Protected Routes
### Protected Route Component
```typescript
// src/components/ProtectedRoute.tsx
import { useAuth } from '../hooks/useAuth';
import { Navigate } from '@tanstack/react-router';
interface ProtectedRouteProps {
children: React.ReactNode;
}
export function ProtectedRoute({ children }: ProtectedRouteProps) {
const { isAuthenticated } = useAuth();
if (!isAuthenticated) {
return <Navigate to="/login" />;
}
return <>{children}</>;
}
```
### Test Suite
```typescript
// src/components/ProtectedRoute.test.tsx
import { describe, it, expect, vi } from 'vitest';
import { screen, render } from '@testing-library/react';
import { createRootRoute, createRoute, createRouter, RouterProvider } from '@tanstack/react-router';
import { ProtectedRoute } from './ProtectedRoute';
import * as useAuthModule from '../hooks/useAuth';
vi.mock('../hooks/useAuth');
function Dashboard() {
return <div>Dashboard</div>;
}
function Login() {
return <div>Login Page</div>;
}
describe('ProtectedRoute', () => {
it('renders children when authenticated', () => {
vi.mocked(useAuthModule.useAuth).mockReturnValue({
isAuthenticated: true,
user: { id: '1', name: 'Alice' },
login: vi.fn(),
logout: vi.fn(),
});
const rootRoute = createRootRoute();
const dashboardRoute = createRoute({
getParentRoute: () => rootRoute,
path: '/dashboard',
component: () => (
<ProtectedRoute>
<Dashboard />
</ProtectedRoute>
),
});
const loginRoute = createRoute({
getParentRoute: () => rootRoute,
path: '/login',
component: Login,
});
const routeTree = rootRoute.addChildren([dashboardRoute, loginRoute]);
const router = createRouter({ routeTree });
render(<RouterProvider router={router} />);
expect(screen.getByText('Dashboard')).toBeInTheDocument();
});
it('redirects to login when not authenticated', async () => {
vi.mocked(useAuthModule.useAuth).mockReturnValue({
isAuthenticated: false,
user: null,
login: vi.fn(),
logout: vi.fn(),
});
const rootRoute = createRootRoute();
const dashboardRoute = createRoute({
getParentRoute: () => rootRoute,
path: '/dashboard',
component: () => (
<ProtectedRoute>
<Dashboard />
</ProtectedRoute>
),
});
const loginRoute = createRoute({
getParentRoute: () => rootRoute,
path: '/login',
component: Login,
});
const routeTree = rootRoute.addChildren([dashboardRoute, loginRoute]);
const router = createRouter({ routeTree });
render(<RouterProvider router={router} />);
// Should redirect and show login
expect(await screen.findByText('Login Page')).toBeInTheDocument();
expect(screen.queryByText('Dashboard')).not.toBeInTheDocument();
});
});
```
## Example 4: Testing Route Loaders
### Route with Loader
```typescript
// src/routes/user.tsx
import { createRoute } from '@tanstack/react-router';
interface User {
id: string;
name: string;
email: string;
}
export const userRoute = createRoute({
path: '/users/$userId',
loader: async ({ params }) => {
const response = await fetch(`/api/users/${params.userId}`);
if (!response.ok) throw new Error('User not found');
return response.json() as Promise<User>;
},
component: function UserPage({ useLoaderData }) {
const user = useLoaderData();
return (
<div>
<h1>{user.name}</h1>
<p>{user.email}</p>
</div>
);
},
});
```
### Test Suite
```typescript
// src/routes/user.test.tsx
import { describe, it, expect, beforeAll, afterEach, afterAll } from 'vitest';
import { screen, render } from '@testing-library/react';
import { createRootRoute, createRouter, RouterProvider } from '@tanstack/react-router';
import { createMemoryHistory } from '@tanstack/react-router';
import { setupServer } from 'msw/node';
import { http, HttpResponse } from 'msw';
import { userRoute } from './user';
const server = setupServer(
http.get('/api/users/:userId', ({ params }) => {
return HttpResponse.json({
id: params.userId,
name: 'Alice Johnson',
email: 'alice@example.com',
});
})
);
beforeAll(() => server.listen());
afterEach(() => server.resetHandlers());
afterAll(() => server.close());
describe('userRoute', () => {
it('loads and displays user data', async () => {
const rootRoute = createRootRoute();
const routeTree = rootRoute.addChildren([userRoute]);
const history = createMemoryHistory({ initialEntries: ['/users/123'] });
const router = createRouter({ routeTree, history });
render(<RouterProvider router={router} />);
// Wait for loader to complete
expect(await screen.findByText('Alice Johnson')).toBeInTheDocument();
expect(screen.getByText('alice@example.com')).toBeInTheDocument();
});
it('handles loader error', async () => {
server.use(
http.get('/api/users/:userId', () => {
return new HttpResponse(null, { status: 404 });
})
);
const rootRoute = createRootRoute();
const routeTree = rootRoute.addChildren([userRoute]);
const history = createMemoryHistory({ initialEntries: ['/users/999'] });
const router = createRouter({ routeTree, history });
render(<RouterProvider router={router} />);
// Should show error (TanStack Router handles this automatically)
expect(await screen.findByText(/error/i)).toBeInTheDocument();
});
});
```
## Key Takeaways
1. **Memory History**: Use `createMemoryHistory` for controlled navigation in tests
2. **Route Setup**: Build complete route trees with `createRouter` for realistic tests
3. **Params Testing**: Pass `initialEntries` to test routes with params
4. **Protected Routes**: Mock authentication context to test access control
5. **Loaders**: Use MSW to mock loader data fetching
---
**Next**: [TanStack Table Testing](tanstack-table-testing.md) | **Previous**: [Query Testing](tanstack-query-testing.md)

View File

@@ -0,0 +1,496 @@
# TanStack Table Testing Examples
Complete examples for testing TanStack Table sorting, filtering, pagination, and selection.
## Test Setup
### Sample Data
```typescript
// src/test/table-data.ts
export interface User {
id: string;
name: string;
email: string;
role: string;
age: number;
}
export const mockUsers: User[] = [
{ id: '1', name: 'Alice', email: 'alice@example.com', role: 'Admin', age: 30 },
{ id: '2', name: 'Bob', email: 'bob@example.com', role: 'User', age: 25 },
{ id: '3', name: 'Charlie', email: 'charlie@example.com', role: 'User', age: 35 },
{ id: '4', name: 'Diana', email: 'diana@example.com', role: 'Admin', age: 28 },
];
```
## Example 1: Testing Table Rendering
### Basic Table Component
```typescript
// src/components/UserTable.tsx
import { useReactTable, getCoreRowModel, flexRender, ColumnDef } from '@tanstack/react-table';
import { User } from '../test/table-data';
interface UserTableProps {
data: User[];
}
export function UserTable({ data }: UserTableProps) {
const columns: ColumnDef<User>[] = [
{
accessorKey: 'name',
header: 'Name',
},
{
accessorKey: 'email',
header: 'Email',
},
{
accessorKey: 'role',
header: 'Role',
},
{
accessorKey: 'age',
header: 'Age',
},
];
const table = useReactTable({
data,
columns,
getCoreRowModel: getCoreRowModel(),
});
return (
<table>
<thead>
{table.getHeaderGroups().map((headerGroup) => (
<tr key={headerGroup.id}>
{headerGroup.headers.map((header) => (
<th key={header.id}>
{flexRender(header.column.columnDef.header, header.getContext())}
</th>
))}
</tr>
))}
</thead>
<tbody>
{table.getRowModel().rows.map((row) => (
<tr key={row.id}>
{row.getVisibleCells().map((cell) => (
<td key={cell.id}>
{flexRender(cell.column.columnDef.cell, cell.getContext())}
</td>
))}
</tr>
))}
</tbody>
</table>
);
}
```
### Test Suite
```typescript
// src/components/UserTable.test.tsx
import { describe, it, expect } from 'vitest';
import { screen, render } from '@testing-library/react';
import { UserTable } from './UserTable';
import { mockUsers } from '../test/table-data';
describe('UserTable', () => {
it('renders table headers', () => {
render(<UserTable data={mockUsers} />);
expect(screen.getByRole('columnheader', { name: 'Name' })).toBeInTheDocument();
expect(screen.getByRole('columnheader', { name: 'Email' })).toBeInTheDocument();
expect(screen.getByRole('columnheader', { name: 'Role' })).toBeInTheDocument();
expect(screen.getByRole('columnheader', { name: 'Age' })).toBeInTheDocument();
});
it('renders all user data', () => {
render(<UserTable data={mockUsers} />);
expect(screen.getByText('Alice')).toBeInTheDocument();
expect(screen.getByText('alice@example.com')).toBeInTheDocument();
expect(screen.getByText('Bob')).toBeInTheDocument();
expect(screen.getByText('bob@example.com')).toBeInTheDocument();
});
it('renders correct number of rows', () => {
render(<UserTable data={mockUsers} />);
const rows = screen.getAllByRole('row');
expect(rows).toHaveLength(5); // 1 header + 4 data rows
});
});
```
## Example 2: Testing Sorting
### Table with Sorting
```typescript
// src/components/SortableTable.tsx
import { useReactTable, getCoreRowModel, getSortedRowModel, flexRender, SortingState } from '@tanstack/react-table';
import { useState } from 'react';
export function SortableTable({ data, columns }) {
const [sorting, setSorting] = useState<SortingState>([]);
const table = useReactTable({
data,
columns,
state: { sorting },
onSortingChange: setSorting,
getCoreRowModel: getCoreRowModel(),
getSortedRowModel: getSortedRowModel(),
});
return (
<table>
<thead>
{table.getHeaderGroups().map((headerGroup) => (
<tr key={headerGroup.id}>
{headerGroup.headers.map((header) => (
<th key={header.id}>
{header.isPlaceholder ? null : (
<button
onClick={header.column.getToggleSortingHandler()}
aria-label={`Sort by ${header.column.id}`}
>
{flexRender(header.column.columnDef.header, header.getContext())}
{{
asc: ' 🔼',
desc: ' 🔽',
}[header.column.getIsSorted() as string] ?? null}
</button>
)}
</th>
))}
</tr>
))}
</thead>
<tbody>
{table.getRowModel().rows.map((row) => (
<tr key={row.id}>
{row.getVisibleCells().map((cell) => (
<td key={cell.id}>
{flexRender(cell.column.columnDef.cell, cell.getContext())}
</td>
))}
</tr>
))}
</tbody>
</table>
);
}
```
### Test Suite
```typescript
// src/components/SortableTable.test.tsx
import { describe, it, expect } from 'vitest';
import { screen, render, within } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import { SortableTable } from './SortableTable';
import { mockUsers } from '../test/table-data';
const columns = [
{ accessorKey: 'name', header: 'Name' },
{ accessorKey: 'age', header: 'Age' },
];
describe('SortableTable', () => {
it('sorts by name ascending', async () => {
const user = userEvent.setup();
render(<SortableTable data={mockUsers} columns={columns} />);
// Click name header to sort
await user.click(screen.getByRole('button', { name: 'Sort by name' }));
const rows = screen.getAllByRole('row');
const firstDataRow = rows[1]; // Skip header row
expect(within(firstDataRow).getByText('Alice')).toBeInTheDocument();
});
it('sorts by name descending on second click', async () => {
const user = userEvent.setup();
render(<SortableTable data={mockUsers} columns={columns} />);
const sortButton = screen.getByRole('button', { name: 'Sort by name' });
// First click: ascending
await user.click(sortButton);
// Second click: descending
await user.click(sortButton);
const rows = screen.getAllByRole('row');
const firstDataRow = rows[1];
expect(within(firstDataRow).getByText('Diana')).toBeInTheDocument();
});
it('sorts by age correctly', async () => {
const user = userEvent.setup();
render(<SortableTable data={mockUsers} columns={columns} />);
await user.click(screen.getByRole('button', { name: 'Sort by age' }));
const rows = screen.getAllByRole('row');
const ages = rows.slice(1).map((row) => parseInt(within(row).getAllByRole('cell')[1].textContent || '0'));
expect(ages).toEqual([25, 28, 30, 35]); // Sorted ascending
});
});
```
## Example 3: Testing Pagination
### Table with Pagination
```typescript
// src/components/PaginatedTable.tsx
import { useReactTable, getCoreRowModel, getPaginationRowModel, flexRender } from '@tanstack/react-table';
export function PaginatedTable({ data, columns }) {
const table = useReactTable({
data,
columns,
getCoreRowModel: getCoreRowModel(),
getPaginationRowModel: getPaginationRowModel(),
initialState: {
pagination: {
pageSize: 2,
},
},
});
return (
<div>
<table>
<thead>
{table.getHeaderGroups().map((headerGroup) => (
<tr key={headerGroup.id}>
{headerGroup.headers.map((header) => (
<th key={header.id}>
{flexRender(header.column.columnDef.header, header.getContext())}
</th>
))}
</tr>
))}
</thead>
<tbody>
{table.getRowModel().rows.map((row) => (
<tr key={row.id}>
{row.getVisibleCells().map((cell) => (
<td key={cell.id}>
{flexRender(cell.column.columnDef.cell, cell.getContext())}
</td>
))}
</tr>
))}
</tbody>
</table>
<div>
<button
onClick={() => table.previousPage()}
disabled={!table.getCanPreviousPage()}
>
Previous
</button>
<span>
Page {table.getState().pagination.pageIndex + 1} of {table.getPageCount()}
</span>
<button
onClick={() => table.nextPage()}
disabled={!table.getCanNextPage()}
>
Next
</button>
</div>
</div>
);
}
```
### Test Suite
```typescript
// src/components/PaginatedTable.test.tsx
import { describe, it, expect } from 'vitest';
import { screen, render } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import { PaginatedTable } from './PaginatedTable';
import { mockUsers } from '../test/table-data';
const columns = [
{ accessorKey: 'name', header: 'Name' },
];
describe('PaginatedTable', () => {
it('displays first page of results', () => {
render(<PaginatedTable data={mockUsers} columns={columns} />);
expect(screen.getByText('Alice')).toBeInTheDocument();
expect(screen.getByText('Bob')).toBeInTheDocument();
expect(screen.queryByText('Charlie')).not.toBeInTheDocument();
});
it('navigates to next page', async () => {
const user = userEvent.setup();
render(<PaginatedTable data={mockUsers} columns={columns} />);
await user.click(screen.getByRole('button', { name: 'Next' }));
expect(screen.queryByText('Alice')).not.toBeInTheDocument();
expect(screen.getByText('Charlie')).toBeInTheDocument();
expect(screen.getByText('Diana')).toBeInTheDocument();
});
it('displays correct page number', async () => {
const user = userEvent.setup();
render(<PaginatedTable data={mockUsers} columns={columns} />);
expect(screen.getByText('Page 1 of 2')).toBeInTheDocument();
await user.click(screen.getByRole('button', { name: 'Next' }));
expect(screen.getByText('Page 2 of 2')).toBeInTheDocument();
});
it('disables previous button on first page', () => {
render(<PaginatedTable data={mockUsers} columns={columns} />);
expect(screen.getByRole('button', { name: 'Previous' })).toBeDisabled();
});
it('disables next button on last page', async () => {
const user = userEvent.setup();
render(<PaginatedTable data={mockUsers} columns={columns} />);
await user.click(screen.getByRole('button', { name: 'Next' }));
expect(screen.getByRole('button', { name: 'Next' })).toBeDisabled();
});
});
```
## Example 4: Testing Row Selection
### Table with Row Selection
```typescript
// src/components/SelectableTable.tsx
import { useReactTable, getCoreRowModel, flexRender, RowSelectionState } from '@tanstack/react-table';
import { useState } from 'react';
export function SelectableTable({ data, columns }) {
const [rowSelection, setRowSelection] = useState<RowSelectionState>({});
const table = useReactTable({
data,
columns,
state: { rowSelection },
onRowSelectionChange: setRowSelection,
getCoreRowModel: getCoreRowModel(),
enableRowSelection: true,
});
return (
<div>
<p data-testid="selected-count">
{Object.keys(rowSelection).length} selected
</p>
<table>
<thead>
{table.getHeaderGroups().map((headerGroup) => (
<tr key={headerGroup.id}>
<th>
<input type="checkbox" checked={table.getIsAllRowsSelected()} onChange={table.getToggleAllRowsSelectedHandler()} aria-label="Select all rows" />
</th>
{headerGroup.headers.map((header) => (
<th key={header.id}>{flexRender(header.column.columnDef.header, header.getContext())}</th>
))}
</tr>
))}
</thead>
<tbody>
{table.getRowModel().rows.map((row) => (
<tr key={row.id}>
<td>
<input
type="checkbox"
checked={row.getIsSelected()}
onChange={row.getToggleSelectedHandler()}
aria-label={`Select row ${row.id}`}
/>
</td>
{row.getVisibleCells().map((cell) => (
<td key={cell.id}>
{flexRender(cell.column.columnDef.cell, cell.getContext())}
</td>
))}
</tr>
))}
</tbody>
</table>
</div>
);
}
```
### Test Suite
```typescript
// src/components/SelectableTable.test.tsx
import { describe, it, expect } from 'vitest';
import { screen, render } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import { SelectableTable } from './SelectableTable';
import { mockUsers } from '../test/table-data';
const columns = [
{ accessorKey: 'name', header: 'Name' },
];
describe('SelectableTable', () => {
it('selects individual row', async () => {
const user = userEvent.setup();
render(<SelectableTable data={mockUsers} columns={columns} />);
await user.click(screen.getByRole('checkbox', { name: 'Select row 0' }));
expect(screen.getByTestId('selected-count')).toHaveTextContent('1 selected');
});
it('selects all rows', async () => {
const user = userEvent.setup();
render(<SelectableTable data={mockUsers} columns={columns} />);
await user.click(screen.getByRole('checkbox', { name: 'Select all rows' }));
expect(screen.getByTestId('selected-count')).toHaveTextContent('4 selected');
});
});
```
## Key Takeaways
1. **Core Setup**: Use `useReactTable` with appropriate row models (core, sorted, pagination)
2. **Sorting**: Test ascending, descending, and unsorted states
3. **Pagination**: Test navigation, disabled states, and page indicators
4. **Selection**: Test individual and bulk selection
5. **Accessibility**: Use proper ARIA labels for buttons and checkboxes
---
**Next**: [TanStack Form Testing](tanstack-form-testing.md) | **Previous**: [Router Testing](tanstack-router-testing.md)

View File

@@ -0,0 +1,91 @@
# React TanStack Testing Reference
Comprehensive reference materials for testing React applications with TanStack ecosystem.
## Reference Overview
### Testing Setup
**File**: [testing-setup.md](testing-setup.md)
Complete configuration for Vitest and test environment:
- Vitest configuration for React testing
- Test environment setup (jsdom, globals)
- React Testing Library configuration
- MSW (Mock Service Worker) setup
- Test file patterns and structure
**Use when**: Setting up a new project or configuring testing infrastructure.
---
### Testing Best Practices
**File**: [testing-best-practices.md](testing-best-practices.md)
Patterns and best practices for effective testing:
- Custom test utilities and render functions
- Provider wrappers (Query, Router, Auth)
- Mock data factories
- Test organization strategies
- Coverage requirements
**Use when**: Creating reusable test utilities or establishing testing standards.
---
### Server Components Testing
**File**: [server-components-testing.md](server-components-testing.md)
Testing React Server Components and async patterns:
- Async component testing
- Suspense boundaries
- Server actions
- Streaming rendering
- Error boundaries
**Use when**: Testing React 19 Server Components or async data fetching.
---
### Common Patterns
**File**: [common-patterns.md](common-patterns.md)
Frequently used testing patterns:
- Loading states and skeletons
- Error boundaries and fallbacks
- Infinite queries and scrolling
- Prefetching strategies
- Optimistic updates
**Use when**: Testing common UI patterns like loading states or error handling.
---
## Quick Reference
| Topic | Key Concepts |
|-------|-------------|
| **Setup** | Vitest config, jsdom, MSW, cleanup |
| **Best Practices** | Custom utils, providers, factories |
| **Server Components** | Async testing, Suspense, streaming |
| **Common Patterns** | Loading, errors, infinite, prefetch |
## Testing Tools Reference
### Vitest
- **Fast** - Native ESM, parallel tests
- **Compatible** - Jest-compatible API
- **Built-in** - Coverage, mocking, snapshots
### React Testing Library
- **User-centric** - Test what users see/do
- **Queries** - getBy, findBy, queryBy
- **Events** - userEvent for realistic interactions
### MSW (Mock Service Worker)
- **Realistic** - Intercepts actual network requests
- **Flexible** - Override handlers per test
- **Portable** - Works in Node and browser
---
Return to [main agent](../react-tanstack-tester.md)

View File

@@ -0,0 +1,494 @@
# Common Testing Patterns
Frequently used patterns for testing loading states, errors, infinite queries, and prefetching.
## Testing Loading States
### Skeleton Loader
```typescript
// src/components/UserListSkeleton.tsx
export function UserListSkeleton() {
return (
<div data-testid="skeleton">
{Array.from({ length: 3 }).map((_, i) => (
<div key={i} className="skeleton-item">
<div className="skeleton-avatar" />
<div className="skeleton-text" />
</div>
))}
</div>
);
}
// src/components/UserList.tsx
export function UserList() {
const { data: users, isLoading } = useUsers();
if (isLoading) {
return <UserListSkeleton />;
}
return (
<ul>
{users?.map((user) => (
<li key={user.id}>{user.name}</li>
))}
</ul>
);
}
```
### Test Suite
```typescript
// src/components/UserList.test.tsx
import { describe, it, expect } from 'vitest';
import { screen, render } from '@testing-library/react';
import { renderWithQuery } from '../test/test-utils';
import { UserList } from './UserList';
describe('UserList', () => {
it('shows skeleton loader while loading', () => {
renderWithQuery(<UserList />);
expect(screen.getByTestId('skeleton')).toBeInTheDocument();
expect(screen.getAllByClassName('skeleton-item')).toHaveLength(3);
});
it('hides skeleton after data loads', async () => {
renderWithQuery(<UserList />);
// Wait for data to load
await screen.findByText('Alice');
expect(screen.queryByTestId('skeleton')).not.toBeInTheDocument();
});
});
```
## Testing Error States
### Error Component
```typescript
// src/components/ErrorMessage.tsx
interface ErrorMessageProps {
error: Error;
onRetry?: () => void;
}
export function ErrorMessage({ error, onRetry }: ErrorMessageProps) {
return (
<div role="alert">
<h2>Something went wrong</h2>
<p>{error.message}</p>
{onRetry && <button onClick={onRetry}>Try Again</button>}
</div>
);
}
// src/components/UserList.tsx
export function UserList() {
const { data: users, isLoading, error, refetch } = useUsers();
if (error) {
return <ErrorMessage error={error} onRetry={() => refetch()} />;
}
// ...
}
```
### Test Suite
```typescript
// src/components/UserList.test.tsx
import { server } from '../test/msw/server';
import { http, HttpResponse } from 'msw';
describe('UserList', () => {
it('shows error message on fetch failure', async () => {
server.use(
http.get('/api/users', () => {
return new HttpResponse(null, { status: 500 });
})
);
renderWithQuery(<UserList />);
expect(await screen.findByRole('alert')).toHaveTextContent('Something went wrong');
});
it('retries on error retry button click', async () => {
const user = userEvent.setup();
server.use(
http.get('/api/users', () => {
return new HttpResponse(null, { status: 500 });
})
);
renderWithQuery(<UserList />);
await screen.findByRole('alert');
// Fix the error
server.use(
http.get('/api/users', () => {
return HttpResponse.json([{ id: '1', name: 'Alice' }]);
})
);
await user.click(screen.getByRole('button', { name: 'Try Again' }));
expect(await screen.findByText('Alice')).toBeInTheDocument();
expect(screen.queryByRole('alert')).not.toBeInTheDocument();
});
});
```
## Testing Infinite Queries
### Infinite Query Component
```typescript
// src/hooks/useInfiniteUsers.ts
import { useInfiniteQuery } from '@tanstack/react-query';
export function useInfiniteUsers() {
return useInfiniteQuery({
queryKey: ['users', 'infinite'],
queryFn: async ({ pageParam = 0 }) => {
const response = await fetch(`/api/users?page=${pageParam}&limit=10`);
return response.json();
},
getNextPageParam: (lastPage, pages) => {
return lastPage.hasMore ? pages.length : undefined;
},
initialPageParam: 0,
});
}
// src/components/InfiniteUserList.tsx
export function InfiniteUserList() {
const {
data,
fetchNextPage,
hasNextPage,
isFetchingNextPage,
} = useInfiniteUsers();
return (
<div>
{data?.pages.map((page, i) => (
<div key={i}>
{page.users.map((user) => (
<div key={user.id}>{user.name}</div>
))}
</div>
))}
{hasNextPage && (
<button onClick={() => fetchNextPage()} disabled={isFetchingNextPage}>
{isFetchingNextPage ? 'Loading...' : 'Load More'}
</button>
)}
</div>
);
}
```
### Test Suite
```typescript
// src/components/InfiniteUserList.test.tsx
import { server } from '../test/msw/server';
import { http, HttpResponse } from 'msw';
beforeAll(() => {
const mockPages = [
{ users: [{ id: '1', name: 'Alice' }, { id: '2', name: 'Bob' }], hasMore: true },
{ users: [{ id: '3', name: 'Charlie' }, { id: '4', name: 'Diana' }], hasMore: true },
{ users: [{ id: '5', name: 'Eve' }], hasMore: false },
];
server.use(
http.get('/api/users', ({ request }) => {
const url = new URL(request.url);
const page = parseInt(url.searchParams.get('page') || '0');
return HttpResponse.json(mockPages[page] || { users: [], hasMore: false });
})
);
});
describe('InfiniteUserList', () => {
it('loads first page initially', async () => {
renderWithQuery(<InfiniteUserList />);
expect(await screen.findByText('Alice')).toBeInTheDocument();
expect(screen.getByText('Bob')).toBeInTheDocument();
});
it('loads next page on load more click', async () => {
const user = userEvent.setup();
renderWithQuery(<InfiniteUserList />);
await screen.findByText('Alice');
await user.click(screen.getByRole('button', { name: 'Load More' }));
expect(await screen.findByText('Charlie')).toBeInTheDocument();
expect(screen.getByText('Diana')).toBeInTheDocument();
});
it('hides load more button when no more pages', async () => {
const user = userEvent.setup();
renderWithQuery(<InfiniteUserList />);
await screen.findByText('Alice');
// Load page 2
await user.click(screen.getByRole('button', { name: 'Load More' }));
await screen.findByText('Charlie');
// Load page 3 (last page)
await user.click(screen.getByRole('button', { name: 'Load More' }));
await screen.findByText('Eve');
expect(screen.queryByRole('button', { name: 'Load More' })).not.toBeInTheDocument();
});
});
```
## Testing Intersection Observer (Infinite Scroll)
### Auto-loading Infinite List
```typescript
// src/components/AutoLoadingList.tsx
import { useRef, useEffect } from 'react';
export function AutoLoadingList() {
const { data, fetchNextPage, hasNextPage } = useInfiniteUsers();
const observerRef = useRef<HTMLDivElement>(null);
useEffect(() => {
const observer = new IntersectionObserver((entries) => {
if (entries[0].isIntersecting && hasNextPage) {
fetchNextPage();
}
});
if (observerRef.current) {
observer.observe(observerRef.current);
}
return () => observer.disconnect();
}, [fetchNextPage, hasNextPage]);
return (
<div>
{data?.pages.map((page) =>
page.users.map((user) => <div key={user.id}>{user.name}</div>)
)}
<div ref={observerRef} data-testid="load-more-trigger" />
</div>
);
}
```
### Test Suite
```typescript
// src/components/AutoLoadingList.test.tsx
describe('AutoLoadingList', () => {
it('loads next page when scrolling to bottom', async () => {
renderWithQuery(<AutoLoadingList />);
await screen.findByText('Alice');
// Simulate intersection observer trigger
const trigger = screen.getByTestId('load-more-trigger');
const observer = (window as any).IntersectionObserver.mock.calls[0][0];
// Trigger intersection
observer([{ isIntersecting: true, target: trigger }]);
// Wait for next page
expect(await screen.findByText('Charlie')).toBeInTheDocument();
});
});
```
## Testing Prefetching
### Hover Prefetch
```typescript
// src/components/UserCard.tsx
import { useQueryClient } from '@tanstack/react-query';
export function UserCard({ userId, name }: { userId: string; name: string }) {
const queryClient = useQueryClient();
const handleMouseEnter = () => {
// Prefetch user details on hover
queryClient.prefetchQuery({
queryKey: ['user', userId],
queryFn: () => fetch(`/api/users/${userId}`).then((r) => r.json()),
});
};
return (
<Link to={`/users/${userId}`} onMouseEnter={handleMouseEnter}>
{name}
</Link>
);
}
```
### Test Suite
```typescript
// src/components/UserCard.test.tsx
describe('UserCard', () => {
it('prefetches user data on hover', async () => {
const user = userEvent.setup();
const queryClient = createTestQueryClient();
const prefetchSpy = vi.spyOn(queryClient, 'prefetchQuery');
renderWithQuery(<UserCard userId="123" name="Alice" />, { queryClient });
const link = screen.getByRole('link', { name: 'Alice' });
await user.hover(link);
expect(prefetchSpy).toHaveBeenCalledWith({
queryKey: ['user', '123'],
queryFn: expect.any(Function),
});
});
it('caches prefetched data', async () => {
const user = userEvent.setup();
const queryClient = createTestQueryClient();
renderWithQuery(<UserCard userId="123" name="Alice" />, { queryClient });
const link = screen.getByRole('link', { name: 'Alice' });
await user.hover(link);
// Wait for prefetch to complete
await waitFor(() => {
const cachedData = queryClient.getQueryData(['user', '123']);
expect(cachedData).toBeDefined();
});
});
});
```
## Testing Optimistic Updates
### Optimistic Delete
```typescript
// src/hooks/useDeleteUser.ts
import { useMutation, useQueryClient } from '@tanstack/react-query';
export function useDeleteUser() {
const queryClient = useQueryClient();
return useMutation({
mutationFn: async (userId: string) => {
const response = await fetch(`/api/users/${userId}`, {
method: 'DELETE',
});
if (!response.ok) throw new Error('Delete failed');
},
onMutate: async (userId) => {
// Cancel outgoing queries
await queryClient.cancelQueries({ queryKey: ['users'] });
// Snapshot previous value
const previousUsers = queryClient.getQueryData(['users']);
// Optimistically remove user
queryClient.setQueryData(['users'], (old: any[]) =>
old.filter((user) => user.id !== userId)
);
return { previousUsers };
},
onError: (_error, _userId, context) => {
// Rollback on error
if (context?.previousUsers) {
queryClient.setQueryData(['users'], context.previousUsers);
}
},
onSettled: () => {
queryClient.invalidateQueries({ queryKey: ['users'] });
},
});
}
```
### Test Suite
```typescript
// src/hooks/useDeleteUser.test.tsx
describe('useDeleteUser', () => {
it('removes user optimistically', async () => {
const queryClient = createTestQueryClient();
queryClient.setQueryData(['users'], [
{ id: '1', name: 'Alice' },
{ id: '2', name: 'Bob' },
]);
const { result } = renderHookWithQuery(() => useDeleteUser(), { queryClient });
result.current.mutate('1');
// Immediately check optimistic update
const cachedUsers = queryClient.getQueryData(['users']);
expect(cachedUsers).toEqual([{ id: '2', name: 'Bob' }]);
});
it('rolls back on error', async () => {
server.use(
http.delete('/api/users/:id', () => {
return new HttpResponse(null, { status: 500 });
})
);
const queryClient = createTestQueryClient();
const originalUsers = [
{ id: '1', name: 'Alice' },
{ id: '2', name: 'Bob' },
];
queryClient.setQueryData(['users'], originalUsers);
const { result } = renderHookWithQuery(() => useDeleteUser(), { queryClient });
result.current.mutate('1');
await waitFor(() => expect(result.current.isError).toBe(true));
// Verify rollback
const cachedUsers = queryClient.getQueryData(['users']);
expect(cachedUsers).toEqual(originalUsers);
});
});
```
## Key Takeaways
1. **Loading States**: Always test skeleton loaders and spinners
2. **Error Handling**: Test error display and retry functionality
3. **Infinite Queries**: Test pagination, load more, and end of list
4. **Intersection Observer**: Mock IntersectionObserver for auto-loading
5. **Prefetching**: Test hover prefetch and cache population
6. **Optimistic Updates**: Test immediate UI updates and rollback on error
---
**Previous**: [Server Components](server-components-testing.md) | **Index**: [Reference Index](INDEX.md)

View File

@@ -0,0 +1,443 @@
# Server Components Testing
Testing patterns for React 19 Server Components, Suspense, and async rendering.
## Testing Async Server Components
### Basic Async Component
```typescript
// src/components/AsyncUserProfile.tsx
export async function AsyncUserProfile({ userId }: { userId: string }) {
// Fetch data in Server Component
const response = await fetch(`/api/users/${userId}`);
const user = await response.json();
return (
<div>
<h1>{user.name}</h1>
<p>{user.email}</p>
</div>
);
}
```
### Test Suite
```typescript
// src/components/AsyncUserProfile.test.tsx
import { describe, it, expect, beforeAll, afterEach, afterAll } from 'vitest';
import { render, screen } from '@testing-library/react';
import { setupServer } from 'msw/node';
import { http, HttpResponse } from 'msw';
import { AsyncUserProfile } from './AsyncUserProfile';
const server = setupServer(
http.get('/api/users/:userId', ({ params }) => {
return HttpResponse.json({
id: params.userId,
name: 'Alice Johnson',
email: 'alice@example.com',
});
})
);
beforeAll(() => server.listen());
afterEach(() => server.resetHandlers());
afterAll(() => server.close());
describe('AsyncUserProfile', () => {
it('renders user data after loading', async () => {
// Render async component
const component = await AsyncUserProfile({ userId: '123' });
render(component);
expect(screen.getByText('Alice Johnson')).toBeInTheDocument();
expect(screen.getByText('alice@example.com')).toBeInTheDocument();
});
});
```
## Testing with Suspense Boundaries
### Component with Suspense
```typescript
// src/app/users/[userId]/page.tsx
import { Suspense } from 'react';
import { AsyncUserProfile } from '@/components/AsyncUserProfile';
export default function UserProfilePage({ params }: { params: { userId: string } }) {
return (
<Suspense fallback={<div>Loading user profile...</div>}>
<AsyncUserProfile userId={params.userId} />
</Suspense>
);
}
```
### Test Suite
```typescript
// src/app/users/[userId]/page.test.tsx
import { describe, it, expect } from 'vitest';
import { render, screen } from '@testing-library/react';
import UserProfilePage from './page';
describe('UserProfilePage', () => {
it('shows loading state initially', () => {
render(<UserProfilePage params={{ userId: '123' }} />);
expect(screen.getByText('Loading user profile...')).toBeInTheDocument();
});
it('renders user profile after loading', async () => {
render(<UserProfilePage params={{ userId: '123' }} />);
// Wait for async component to resolve
expect(await screen.findByText('Alice Johnson')).toBeInTheDocument();
});
});
```
## Testing Streaming Rendering
### Streaming Component
```typescript
// src/components/StreamingUserList.tsx
export async function StreamingUserList() {
const response = await fetch('/api/users', {
// Enable streaming
next: { revalidate: 60 },
});
const users = await response.json();
return (
<ul>
{users.map((user) => (
<li key={user.id}>
<Suspense fallback={<div>Loading {user.name}...</div>}>
<UserCard userId={user.id} />
</Suspense>
</li>
))}
</ul>
);
}
```
### Test Suite
```typescript
// src/components/StreamingUserList.test.tsx
import { describe, it, expect } from 'vitest';
import { render, screen } from '@testing-library/react';
import { StreamingUserList } from './StreamingUserList';
describe('StreamingUserList', () => {
it('progressively renders user cards', async () => {
const component = await StreamingUserList();
render(component);
// All users should eventually appear
expect(await screen.findByText('Alice Johnson')).toBeInTheDocument();
expect(await screen.findByText('Bob Smith')).toBeInTheDocument();
});
it('shows loading placeholders while streaming', () => {
render(<Suspense fallback={<div>Loading list...</div>}><StreamingUserList /></Suspense>);
expect(screen.getByText('Loading list...')).toBeInTheDocument();
});
});
```
## Testing Server Actions
### Server Action
```typescript
// src/actions/createUser.ts
'use server';
import { revalidatePath } from 'next/cache';
export async function createUser(formData: FormData) {
const name = formData.get('name') as string;
const email = formData.get('email') as string;
const response = await fetch('/api/users', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name, email }),
});
if (!response.ok) {
throw new Error('Failed to create user');
}
const user = await response.json();
// Revalidate users list
revalidatePath('/users');
return user;
}
```
### Test Suite
```typescript
// src/actions/createUser.test.ts
import { describe, it, expect, vi } from 'vitest';
import { setupServer } from 'msw/node';
import { http, HttpResponse } from 'msw';
import { createUser } from './createUser';
// Mock revalidatePath
vi.mock('next/cache', () => ({
revalidatePath: vi.fn(),
}));
const server = setupServer(
http.post('/api/users', async ({ request }) => {
const body = await request.json();
return HttpResponse.json(
{ id: '123', ...body },
{ status: 201 }
);
})
);
beforeAll(() => server.listen());
afterEach(() => server.resetHandlers());
afterAll(() => server.close());
describe('createUser', () => {
it('creates user via server action', async () => {
const formData = new FormData();
formData.append('name', 'Charlie');
formData.append('email', 'charlie@example.com');
const user = await createUser(formData);
expect(user).toEqual({
id: '123',
name: 'Charlie',
email: 'charlie@example.com',
});
});
it('revalidates path after creation', async () => {
const { revalidatePath } = await import('next/cache');
const formData = new FormData();
formData.append('name', 'Diana');
formData.append('email', 'diana@example.com');
await createUser(formData);
expect(revalidatePath).toHaveBeenCalledWith('/users');
});
});
```
## Testing Form Actions
### Form with Server Action
```typescript
// src/components/UserForm.tsx
import { createUser } from '@/actions/createUser';
export function UserForm() {
return (
<form action={createUser}>
<label htmlFor="name">Name</label>
<input id="name" name="name" required />
<label htmlFor="email">Email</label>
<input id="email" name="email" type="email" required />
<button type="submit">Create User</button>
</form>
);
}
```
### Test Suite
```typescript
// src/components/UserForm.test.tsx
import { describe, it, expect, vi } from 'vitest';
import { render, screen } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import { UserForm } from './UserForm';
// Mock the server action
vi.mock('@/actions/createUser', () => ({
createUser: vi.fn(),
}));
describe('UserForm', () => {
it('submits form with server action', async () => {
const { createUser } = await import('@/actions/createUser');
const user = userEvent.setup();
render(<UserForm />);
await user.type(screen.getByLabelText('Name'), 'Alice');
await user.type(screen.getByLabelText('Email'), 'alice@example.com');
await user.click(screen.getByRole('button', { name: 'Create User' }));
expect(createUser).toHaveBeenCalledWith(expect.any(FormData));
const formData = (createUser as any).mock.calls[0][0] as FormData;
expect(formData.get('name')).toBe('Alice');
expect(formData.get('email')).toBe('alice@example.com');
});
});
```
## Testing Error Boundaries
### Error Boundary Component
```typescript
// src/components/AsyncUserError.tsx
export async function AsyncUserError({ userId }: { userId: string }) {
const response = await fetch(`/api/users/${userId}`);
if (!response.ok) {
throw new Error(`User ${userId} not found`);
}
const user = await response.json();
return <div>{user.name}</div>;
}
// src/app/users/[userId]/error.tsx
'use client';
export default function Error({
error,
reset,
}: {
error: Error;
reset: () => void;
}) {
return (
<div role="alert">
<h2>Something went wrong!</h2>
<p>{error.message}</p>
<button onClick={reset}>Try again</button>
</div>
);
}
```
### Test Suite
```typescript
// src/app/users/[userId]/error.test.tsx
import { describe, it, expect, vi } from 'vitest';
import { render, screen } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import Error from './error';
describe('Error Boundary', () => {
it('displays error message', () => {
const error = new Error('User not found');
const reset = vi.fn();
render(<Error error={error} reset={reset} />);
expect(screen.getByRole('alert')).toHaveTextContent('User not found');
});
it('calls reset on try again', async () => {
const user = userEvent.setup();
const error = new Error('User not found');
const reset = vi.fn();
render(<Error error={error} reset={reset} />);
await user.click(screen.getByRole('button', { name: 'Try again' }));
expect(reset).toHaveBeenCalled();
});
});
```
## Testing Parallel Data Fetching
### Parallel Async Components
```typescript
// src/app/dashboard/page.tsx
import { Suspense } from 'react';
async function UserStats() {
const response = await fetch('/api/stats/users');
const stats = await response.json();
return <div>{stats.count} users</div>;
}
async function PostStats() {
const response = await fetch('/api/stats/posts');
const stats = await response.json();
return <div>{stats.count} posts</div>;
}
export default function Dashboard() {
return (
<div>
<Suspense fallback={<div>Loading user stats...</div>}>
<UserStats />
</Suspense>
<Suspense fallback={<div>Loading post stats...</div>}>
<PostStats />
</Suspense>
</div>
);
}
```
### Test Suite
```typescript
// src/app/dashboard/page.test.tsx
import { describe, it, expect } from 'vitest';
import { render, screen } from '@testing-library/react';
import Dashboard from './page';
describe('Dashboard', () => {
it('loads components in parallel', async () => {
render(<Dashboard />);
// Both should load independently
expect(await screen.findByText('150 users')).toBeInTheDocument();
expect(await screen.findByText('300 posts')).toBeInTheDocument();
});
it('shows individual loading states', () => {
render(<Dashboard />);
expect(screen.getByText('Loading user stats...')).toBeInTheDocument();
expect(screen.getByText('Loading post stats...')).toBeInTheDocument();
});
});
```
## Key Takeaways
1. **Async Components**: Await component render before passing to `render()`
2. **Suspense**: Test both fallback and resolved states
3. **Server Actions**: Mock server actions and verify FormData
4. **Error Boundaries**: Test error display and reset functionality
5. **Parallel Fetching**: Each Suspense boundary loads independently
---
**Next**: [Common Patterns](common-patterns.md) | **Previous**: [Best Practices](testing-best-practices.md)

View File

@@ -0,0 +1,499 @@
# Testing Best Practices
Patterns and utilities for effective React and TanStack testing.
## Custom Render Functions
### Basic Custom Render
```typescript
// src/test/test-utils.tsx
import { ReactElement } from 'react';
import { render, RenderOptions } from '@testing-library/react';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
interface CustomRenderOptions extends Omit<RenderOptions, 'wrapper'> {
queryClient?: QueryClient;
}
export function renderWithQuery(
ui: ReactElement,
options?: CustomRenderOptions
) {
const queryClient = options?.queryClient ?? new QueryClient({
defaultOptions: {
queries: { retry: false, gcTime: 0, staleTime: 0 },
mutations: { retry: false },
},
});
return render(
<QueryClientProvider client={queryClient}>
{ui}
</QueryClientProvider>,
options
);
}
```
### Complete Provider Wrapper
```typescript
// src/test/test-utils.tsx
import { RouterProvider, createRouter, createMemoryHistory } from '@tanstack/react-router';
import { AuthProvider } from '../contexts/AuthContext';
interface AllProvidersOptions {
queryClient?: QueryClient;
initialRoute?: string;
authContext?: {
isAuthenticated: boolean;
user?: any;
};
}
export function createWrapper(options: AllProvidersOptions = {}) {
const queryClient = options.queryClient ?? createTestQueryClient();
const history = createMemoryHistory({
initialEntries: [options.initialRoute || '/'],
});
const router = createRouter({ routeTree, history });
return function Wrapper({ children }: { children: React.ReactNode }) {
return (
<QueryClientProvider client={queryClient}>
<AuthProvider value={options.authContext}>
<RouterProvider router={router}>
{children}
</RouterProvider>
</AuthProvider>
</QueryClientProvider>
);
};
}
export function renderWithAllProviders(
ui: ReactElement,
options?: AllProvidersOptions & RenderOptions
) {
const Wrapper = createWrapper(options);
return render(ui, { wrapper: Wrapper, ...options });
}
```
## Mock Data Factories
### User Factory
```typescript
// src/test/factories/userFactory.ts
import { faker } from '@faker-js/faker';
export interface User {
id: string;
name: string;
email: string;
role: 'admin' | 'user';
createdAt: string;
}
export function createMockUser(overrides?: Partial<User>): User {
return {
id: faker.string.uuid(),
name: faker.person.fullName(),
email: faker.internet.email(),
role: 'user',
createdAt: faker.date.past().toISOString(),
...overrides,
};
}
export function createMockUsers(count: number, overrides?: Partial<User>): User[] {
return Array.from({ length: count }, () => createMockUser(overrides));
}
// Usage in tests
it('displays user list', () => {
const users = createMockUsers(5);
render(<UserList users={users} />);
// ...
});
```
### Query Data Factory
```typescript
// src/test/factories/queryDataFactory.ts
import { QueryClient } from '@tanstack/react-query';
export function createQueryClientWithData(queryKey: any[], data: any) {
const queryClient = createTestQueryClient();
queryClient.setQueryData(queryKey, data);
return queryClient;
}
// Usage in tests
it('shows cached users', () => {
const users = createMockUsers(3);
const queryClient = createQueryClientWithData(['users'], users);
renderWithQuery(<UserList />, { queryClient });
// Users are already in cache, no loading state
});
```
## Test Organization
### File Structure
```
src/
├── components/
│ ├── UserList/
│ │ ├── UserList.tsx
│ │ ├── UserList.test.tsx
│ │ └── index.ts
├── hooks/
│ ├── useUsers.ts
│ ├── useUsers.test.ts
└── test/
├── setup.ts
├── test-utils.tsx
├── factories/
│ ├── userFactory.ts
│ └── postFactory.ts
└── msw/
├── server.ts
└── handlers.ts
```
### Test File Patterns
```typescript
// Component test pattern
describe('UserList', () => {
// Group by functionality
describe('rendering', () => {
it('displays all users', () => {});
it('shows empty state when no users', () => {});
});
describe('interactions', () => {
it('navigates to user detail on click', async () => {});
it('deletes user on delete button click', async () => {});
});
describe('loading states', () => {
it('shows skeleton while loading', () => {});
it('shows error message on failure', async () => {});
});
});
```
## MSW Best Practices
### Handler Organization
```typescript
// src/test/msw/handlers/users.ts
import { http, HttpResponse } from 'msw';
export const userHandlers = [
http.get('/api/users', () => {
return HttpResponse.json([/* default users */]);
}),
];
// src/test/msw/handlers/auth.ts
export const authHandlers = [
http.post('/api/auth/login', async ({ request }) => {
// Auth logic
}),
];
// src/test/msw/handlers/index.ts
import { userHandlers } from './users';
import { authHandlers } from './auth';
export const handlers = [...userHandlers, ...authHandlers];
```
### Dynamic Handlers
```typescript
// src/test/msw/handlers/users.ts
let mockUsers = [/* default users */];
export const userHandlers = [
http.get('/api/users', () => {
return HttpResponse.json(mockUsers);
}),
http.post('/api/users', async ({ request }) => {
const newUser = await request.json();
mockUsers = [...mockUsers, newUser];
return HttpResponse.json(newUser, { status: 201 });
}),
];
// Reset between tests
export function resetMockUsers() {
mockUsers = [/* default users */];
}
// In setup.ts
afterEach(() => {
resetMockUsers();
});
```
## Testing Hooks
### renderHook with Providers
```typescript
// src/test/test-utils.tsx
import { renderHook, RenderHookOptions } from '@testing-library/react';
export function renderHookWithQuery<TProps, TResult>(
hook: (props: TProps) => TResult,
options?: RenderHookOptions<TProps> & { queryClient?: QueryClient }
) {
const queryClient = options?.queryClient ?? createTestQueryClient();
const wrapper = ({ children }: { children: React.ReactNode }) => (
<QueryClientProvider client={queryClient}>
{children}
</QueryClientProvider>
);
return renderHook(hook, { wrapper, ...options });
}
// Usage
it('fetches users', async () => {
const { result } = renderHookWithQuery(() => useUsers());
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(result.current.data).toHaveLength(3);
});
```
## Async Testing Patterns
### Using waitFor
```typescript
import { waitFor } from '@testing-library/react';
it('loads data asynchronously', async () => {
render(<AsyncComponent />);
// Wait for loading to complete
await waitFor(() => {
expect(screen.getByText('Data loaded')).toBeInTheDocument();
});
});
```
### Using findBy Queries
```typescript
it('displays user after loading', async () => {
render(<UserProfile userId="123" />);
// findBy automatically waits (up to 1000ms by default)
const userName = await screen.findByText('Alice Johnson');
expect(userName).toBeInTheDocument();
});
```
## Testing User Interactions
### userEvent Setup
```typescript
import userEvent from '@testing-library/user-event';
it('handles user input', async () => {
const user = userEvent.setup();
render(<LoginForm />);
await user.type(screen.getByLabelText('Email'), 'alice@example.com');
await user.type(screen.getByLabelText('Password'), 'password123');
await user.click(screen.getByRole('button', { name: 'Login' }));
expect(await screen.findByText('Welcome')).toBeInTheDocument();
});
```
### Keyboard Navigation
```typescript
it('navigates form with keyboard', async () => {
const user = userEvent.setup();
render(<Form />);
await user.tab(); // Focus first field
await user.keyboard('Alice');
await user.tab(); // Move to next field
await user.keyboard('alice@example.com');
await user.keyboard('{Enter}'); // Submit form
});
```
## Testing Accessibility
### Query by Role
```typescript
it('has accessible structure', () => {
render(<UserList users={mockUsers} />);
expect(screen.getByRole('list')).toBeInTheDocument();
expect(screen.getAllByRole('listitem')).toHaveLength(3);
expect(screen.getByRole('button', { name: 'Add User' })).toBeInTheDocument();
});
```
### Aria Labels
```typescript
it('has proper aria labels', () => {
render(<DeleteButton onDelete={mockDelete} />);
const button = screen.getByRole('button', { name: 'Delete user' });
expect(button).toHaveAttribute('aria-label', 'Delete user');
});
```
## Performance Testing
### Test Rendering Performance
```typescript
it('renders large list efficiently', () => {
const users = createMockUsers(1000);
const start = performance.now();
render(<VirtualizedUserList users={users} />);
const duration = performance.now() - start;
expect(duration).toBeLessThan(100); // Should render in <100ms
});
```
### Test Query Performance
```typescript
it('avoids N+1 queries', () => {
const spy = vi.spyOn(window, 'fetch');
render(<UsersWithPosts />);
waitFor(() => {
expect(spy).toHaveBeenCalledTimes(1); // Single query with join
});
});
```
## Snapshot Testing
### Component Snapshot
```typescript
it('matches snapshot', () => {
const { container } = render(<UserCard user={mockUser} />);
expect(container).toMatchSnapshot();
});
```
### Inline Snapshot
```typescript
it('renders correct HTML', () => {
render(<Button>Click me</Button>);
expect(screen.getByRole('button')).toMatchInlineSnapshot(`
<button>
Click me
</button>
`);
});
```
## Coverage Exclusions
### Exclude from Coverage
```typescript
/* v8 ignore start */
if (process.env.NODE_ENV === 'development') {
// Dev-only code excluded from coverage
}
/* v8 ignore stop */
```
### Exclude Test Files
```typescript
// vitest.config.ts
export default defineConfig({
test: {
coverage: {
exclude: [
'src/test/**',
'**/*.test.{ts,tsx}',
'**/*.spec.{ts,tsx}',
'**/mockData.ts',
],
},
},
});
```
## CI/CD Best Practices
### Parallel Testing
```bash
# Run tests in parallel
vitest --threads --maxThreads=4
# Run specific tests
vitest src/components/UserList
```
### Coverage Enforcement
```typescript
// vitest.config.ts
export default defineConfig({
test: {
coverage: {
thresholds: {
lines: 80,
functions: 80,
branches: 80,
statements: 80,
},
// Fail CI if below thresholds
reporter: ['text', 'json-summary'],
},
},
});
```
## Key Takeaways
1. **Custom Utilities**: Create reusable render functions with all providers
2. **Mock Factories**: Use faker for consistent test data
3. **MSW Organization**: Group handlers by domain (users, auth, posts)
4. **Async Testing**: Prefer `findBy` and `waitFor` for async operations
5. **Accessibility**: Always query by role first
6. **Coverage**: Aim for 80%+ with meaningful tests
---
**Next**: [Server Components Testing](server-components-testing.md) | **Previous**: [Testing Setup](testing-setup.md)

View File

@@ -0,0 +1,421 @@
# Testing Setup Reference
Complete configuration guide for Vitest, React Testing Library, and MSW in TanStack projects.
## Vitest Configuration
### vitest.config.ts
```typescript
import { defineConfig } from 'vitest/config';
import react from '@vitejs/plugin-react';
import path from 'path';
export default defineConfig({
plugins: [react()],
test: {
globals: true,
environment: 'jsdom',
setupFiles: ['./src/test/setup.ts'],
coverage: {
provider: 'v8',
reporter: ['text', 'json', 'html'],
exclude: [
'node_modules/',
'src/test/',
'**/*.d.ts',
'**/*.config.*',
'**/mockData',
],
thresholds: {
lines: 80,
functions: 80,
branches: 80,
statements: 80,
},
},
},
resolve: {
alias: {
'@': path.resolve(__dirname, './src'),
},
},
});
```
### Key Options
| Option | Purpose | Value |
|--------|---------|-------|
| `globals` | Enable global test APIs (describe, it, expect) | `true` |
| `environment` | Test environment (jsdom for DOM testing) | `'jsdom'` |
| `setupFiles` | Files to run before each test file | `['./src/test/setup.ts']` |
| `coverage.provider` | Coverage provider | `'v8'` (faster) |
| `coverage.thresholds` | Minimum coverage percentages | 80% recommended |
## Test Setup File
### src/test/setup.ts
```typescript
import '@testing-library/jest-dom/vitest';
import { cleanup } from '@testing-library/react';
import { afterEach, beforeAll, afterAll, vi } from 'vitest';
import { server } from './msw/server';
// Cleanup after each test
afterEach(() => {
cleanup();
});
// MSW setup
beforeAll(() => server.listen({ onUnhandledRequest: 'error' }));
afterEach(() => server.resetHandlers());
afterAll(() => server.close());
// Mock window.matchMedia (for responsive components)
Object.defineProperty(window, 'matchMedia', {
writable: true,
value: vi.fn().mockImplementation((query) => ({
matches: false,
media: query,
onchange: null,
addListener: vi.fn(),
removeListener: vi.fn(),
addEventListener: vi.fn(),
removeEventListener: vi.fn(),
dispatchEvent: vi.fn(),
})),
});
// Mock IntersectionObserver (for infinite scroll)
global.IntersectionObserver = vi.fn().mockImplementation(() => ({
observe: vi.fn(),
unobserve: vi.fn(),
disconnect: vi.fn(),
}));
// Mock ResizeObserver (for table columns)
global.ResizeObserver = vi.fn().mockImplementation(() => ({
observe: vi.fn(),
unobserve: vi.fn(),
disconnect: vi.fn(),
}));
// Suppress console errors in tests (optional)
const originalError = console.error;
beforeAll(() => {
console.error = (...args: any[]) => {
if (
typeof args[0] === 'string' &&
args[0].includes('Warning: ReactDOM.render')
) {
return;
}
originalError.call(console, ...args);
};
});
afterAll(() => {
console.error = originalError;
});
```
## MSW Setup
### src/test/msw/server.ts
```typescript
import { setupServer } from 'msw/node';
import { handlers } from './handlers';
export const server = setupServer(...handlers);
```
### src/test/msw/handlers.ts
```typescript
import { http, HttpResponse } from 'msw';
export const handlers = [
// Users API
http.get('/api/users', () => {
return HttpResponse.json([
{ id: '1', name: 'Alice', email: 'alice@example.com' },
{ id: '2', name: 'Bob', email: 'bob@example.com' },
]);
}),
http.get('/api/users/:id', ({ params }) => {
return HttpResponse.json({
id: params.id,
name: 'Alice',
email: 'alice@example.com',
});
}),
http.post('/api/users', async ({ request }) => {
const body = await request.json();
return HttpResponse.json(
{ id: '3', ...body },
{ status: 201 }
);
}),
// Auth API
http.post('/api/auth/login', async ({ request }) => {
const { email, password } = await request.json();
if (email === 'test@example.com' && password === 'password') {
return HttpResponse.json({
token: 'mock-jwt-token',
user: { id: '1', email, name: 'Test User' },
});
}
return HttpResponse.json(
{ error: 'Invalid credentials' },
{ status: 401 }
);
}),
];
```
### Overriding Handlers in Tests
```typescript
import { server } from '../test/msw/server';
import { http, HttpResponse } from 'msw';
it('handles API error', async () => {
// Override handler for this test
server.use(
http.get('/api/users', () => {
return new HttpResponse(null, { status: 500 });
})
);
// Test error handling...
});
```
## TanStack Query Setup
### src/test/query-client.ts
```typescript
import { QueryClient } from '@tanstack/react-query';
export function createTestQueryClient() {
return new QueryClient({
defaultOptions: {
queries: {
retry: false, // Don't retry failed queries in tests
gcTime: 0, // No garbage collection
staleTime: 0, // Always consider data stale
},
mutations: {
retry: false,
},
},
logger: {
log: console.log,
warn: console.warn,
error: () => {}, // Silence error logs in tests
},
});
}
```
## TanStack Router Setup
### src/test/router-utils.tsx
```typescript
import { createMemoryHistory, createRouter } from '@tanstack/react-router';
import { routeTree } from '../routeTree.gen';
export function createTestRouter(initialEntries = ['/']) {
const history = createMemoryHistory({ initialEntries });
return createRouter({
routeTree,
history,
context: {
// Mock auth context
auth: {
isAuthenticated: true,
user: { id: '1', name: 'Test User' },
},
},
});
}
```
## Custom Test Utilities
### src/test/test-utils.tsx
```typescript
import { ReactElement } from 'react';
import { render, RenderOptions } from '@testing-library/react';
import { QueryClientProvider } from '@tanstack/react-query';
import { RouterProvider } from '@tanstack/react-router';
import { createTestQueryClient } from './query-client';
import { createTestRouter } from './router-utils';
interface WrapperProps {
children: React.ReactNode;
}
export function AllTheProviders({ children }: WrapperProps) {
const queryClient = createTestQueryClient();
const router = createTestRouter();
return (
<QueryClientProvider client={queryClient}>
<RouterProvider router={router}>
{children}
</RouterProvider>
</QueryClientProvider>
);
}
export function renderWithProviders(
ui: ReactElement,
options?: Omit<RenderOptions, 'wrapper'>
) {
return render(ui, { wrapper: AllTheProviders, ...options });
}
// Re-export everything from React Testing Library
export * from '@testing-library/react';
export { renderWithProviders as render };
```
## Package.json Scripts
```json
{
"scripts": {
"test": "vitest",
"test:ui": "vitest --ui",
"test:coverage": "vitest --coverage",
"test:run": "vitest run"
},
"devDependencies": {
"@testing-library/jest-dom": "^6.1.5",
"@testing-library/react": "^14.1.2",
"@testing-library/user-event": "^14.5.1",
"@vitest/coverage-v8": "^1.0.4",
"@vitest/ui": "^1.0.4",
"jsdom": "^23.0.1",
"msw": "^2.0.11",
"vitest": "^1.0.4"
}
}
```
## TypeScript Configuration
### tsconfig.json
```json
{
"compilerOptions": {
"types": ["vitest/globals", "@testing-library/jest-dom"]
}
}
```
## Coverage Configuration
### .gitignore
```
coverage/
.vitest/
```
### Coverage Thresholds
```typescript
// vitest.config.ts
export default defineConfig({
test: {
coverage: {
thresholds: {
lines: 80,
functions: 80,
branches: 80,
statements: 80,
// Per-file thresholds
perFile: true,
},
},
},
});
```
## Environment Variables
### .env.test
```bash
VITE_API_URL=http://localhost:3000/api
VITE_ENV=test
```
### Loading in Tests
```typescript
// src/test/setup.ts
import { loadEnv } from 'vite';
const env = loadEnv('test', process.cwd(), '');
process.env = { ...process.env, ...env };
```
## CI/CD Integration
### GitHub Actions
```yaml
name: Test
on: [push, pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
node-version: '20'
- run: npm ci
- run: npm run test:coverage
- uses: codecov/codecov-action@v3
with:
files: ./coverage/coverage-final.json
```
## Troubleshooting
### Common Issues
**Issue**: `ReferenceError: describe is not defined`
**Solution**: Add `globals: true` to vitest config
**Issue**: `Cannot find module '@testing-library/jest-dom/vitest'`
**Solution**: Install `@testing-library/jest-dom` package
**Issue**: MSW not intercepting requests
**Solution**: Ensure `server.listen()` is called in `beforeAll`
**Issue**: Tests fail with "Act" warnings
**Solution**: Wrap async operations with `waitFor` or `findBy`
---
**Next**: [Testing Best Practices](testing-best-practices.md) | **Index**: [Reference Index](INDEX.md)

View File

@@ -0,0 +1,25 @@
# Test Generation Skill
Comprehensive test suite generation with unit tests, integration tests, edge cases, and error handling.
## Description
Automated test generation analyzing code structure and generating thorough test coverage for existing implementations.
## What's Included
- **Examples**: Unit test generation, integration tests, edge cases
- **Reference**: Test patterns, coverage strategies
- **Templates**: Test suite templates for different frameworks
## Use When
- Need test coverage for existing code
- Improving low coverage areas
- Systematic test creation
## Related Agents
- `test-generator`
**Skill Version**: 1.0

View File

@@ -0,0 +1,36 @@
# Test Generator Examples
Real-world examples of comprehensive test suite generation for frontend components, backend APIs, and test coverage improvement.
## Files in This Directory
### [react-component-testing.md](react-component-testing.md)
Complete example of generating test suite for React component with TanStack Query - from no tests to 100% coverage. Shows unit tests, integration tests, user interaction tests, and accessibility tests.
**Scenario**: Payment form component with validation, API calls, and error handling - initially untested
**Result**: 42 tests generated, 100% coverage, caught 3 bugs during testing
**Technologies**: React 19, Vitest, Testing Library, TanStack Query
### [api-endpoint-testing.md](api-endpoint-testing.md)
Backend API testing example showing test generation for FastAPI endpoints - authentication, validation, database operations, and error handling.
**Scenario**: User management API endpoints with CRUD operations - 35% test coverage
**Result**: Coverage improved from 35% → 94%, 67 tests generated, found 5 edge case bugs
**Technologies**: FastAPI, pytest, SQLModel, PostgreSQL
### [test-coverage-workflow.md](test-coverage-workflow.md)
Step-by-step workflow for analyzing low-coverage codebase and systematically generating tests to reach 80%+ coverage.
**Scenario**: Legacy codebase with 42% coverage needs improvement for production deployment
**Result**: Coverage 42% → 87% over 3 days, 156 tests generated, zero production bugs first month
**Technologies**: Multi-language (TypeScript + Python), Vitest + pytest
## Navigation
**Parent**: [Test Generator Agent](../test-generator.md)
**Reference**: [Reference Index](../reference/INDEX.md)
**Templates**: [Templates Index](../templates/INDEX.md)
---
Return to [agent documentation](../test-generator.md)

View File

@@ -0,0 +1,39 @@
# Test Generator Reference Guide
Quick-lookup patterns and best practices for test generation across technologies.
## Files in This Directory
### [testing-patterns.md](testing-patterns.md)
Comprehensive patterns for Vitest (TypeScript) and pytest (Python) - from basic unit tests to advanced mocking strategies.
**When to use**: Writing tests, reviewing test code, establishing patterns
**Key topics**: AAA pattern, fixtures, mocking, async testing, TanStack Query patterns
### [test-structure-guide.md](test-structure-guide.md)
How to organize test files, naming conventions, test suite structure, and file organization for maximum maintainability.
**When to use**: Organizing test suites, structuring new projects
**Key topics**: File naming, directory structure, test grouping with describe blocks
### [mocking-strategies.md](mocking-strategies.md)
Complete guide to mocking - when to mock, how to mock, mock verification, and avoiding over-mocking.
**When to use**: Testing code with external dependencies
**Key topics**: API mocking, database mocking, module mocking, spy vs stub vs mock
### [coverage-standards.md](coverage-standards.md)
What different coverage percentages mean, when to aim for different levels, and how to interpret coverage reports.
**When to use**: Setting coverage goals, interpreting coverage gaps
**Key topics**: 80% rule, critical path coverage, diminishing returns
## Navigation
**Parent**: [Test Generator Agent](../test-generator.md)
**Examples**: [Examples Index](../examples/INDEX.md)
**Templates**: [Templates Index](../templates/INDEX.md)
---
Return to [agent documentation](../test-generator.md)

View File

@@ -0,0 +1,52 @@
# Test Generator Templates
Copy-paste templates for common testing scenarios - customize for your needs.
## Files in This Directory
### [unit-test-template.md](unit-test-template.md)
Ready-to-use unit test templates for TypeScript (Vitest) and Python (pytest) with AAA pattern, fixtures, and common test cases.
**When to use**: Creating new unit test files
**Languages**: TypeScript, Python
### [integration-test-template.md](integration-test-template.md)
Templates for integration tests covering API endpoints, database interactions, and multi-component workflows.
**When to use**: Testing interactions between components or services
**Languages**: TypeScript, Python
### [test-fixtures-template.md](test-fixtures-template.md)
Templates for test fixtures, factories, and test data builders with realistic examples.
**When to use**: Setting up test data and shared fixtures
**Languages**: TypeScript, Python
### [test-plan-template.md](test-plan-template.md)
Comprehensive test plan template for feature development with coverage goals, risk assessment, and test strategy.
**When to use**: Planning test coverage for new features or improvements
**Format**: Markdown checklist
## Usage
1. **Copy template** to your test directory
2. **Replace placeholders** (e.g., `ComponentName`, `YourService`)
3. **Customize** test cases for your specific needs
4. **Add tests** as you discover edge cases
## Template Conventions
**Placeholders**:
- `ComponentName` - Replace with your component name
- `functionName` - Replace with your function name
- `YourService` - Replace with your service class name
- `...` - Add more test cases
**Comments**:
- `// TODO:` - Action items to complete
- `// CUSTOMIZE:` - Areas to customize for your use case
---
Return to [agent documentation](../test-generator.md)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,946 @@
# Testing Reference
Complete configurations, project structures, and setup guides for Grey Haven testing infrastructure.
## Table of Contents
- [TypeScript Configuration](#typescript-configuration)
- [Python Configuration](#python-configuration)
- [Project Structures](#project-structures)
- [Doppler Configuration](#doppler-configuration)
- [GitHub Actions Configuration](#github-actions-configuration)
- [Coverage Configuration](#coverage-configuration)
## TypeScript Configuration
### Complete vitest.config.ts
```typescript
// vitest.config.ts
import { defineConfig } from "vitest/config";
import react from "@vitejs/plugin-react";
import path from "path";
export default defineConfig({
plugins: [react()],
test: {
// Enable global test APIs (describe, it, expect)
globals: true,
// Use jsdom for browser-like environment
environment: "jsdom",
// Run setup file before tests
setupFiles: ["./tests/setup.ts"],
// Coverage configuration
coverage: {
// Use V8 coverage provider (faster than Istanbul)
provider: "v8",
// Coverage reporters
reporter: ["text", "json", "html"],
// Exclude from coverage
exclude: [
"node_modules/",
"tests/",
"**/*.config.ts",
"**/*.d.ts",
"**/types/",
"**/__mocks__/",
],
// Minimum coverage thresholds (enforced in CI)
thresholds: {
lines: 80,
functions: 80,
branches: 80,
statements: 80,
},
},
// Environment variables for tests
env: {
// Doppler provides these at runtime
DATABASE_URL_ADMIN: process.env.DATABASE_URL_ADMIN || "postgresql://localhost/test",
REDIS_URL: process.env.REDIS_URL || "redis://localhost:6379",
VITE_API_URL: process.env.VITE_API_URL || "http://localhost:3000",
},
// Test timeout (ms)
testTimeout: 10000,
// Hook timeouts
hookTimeout: 10000,
// Retry failed tests
retry: 0,
// Run tests in parallel
threads: true,
// Maximum concurrent threads
maxThreads: 4,
// Minimum concurrent threads
minThreads: 1,
},
// Path aliases
resolve: {
alias: {
"~": path.resolve(__dirname, "./src"),
},
},
});
```
**Field Explanations:**
- `globals: true` - Makes test APIs available without imports
- `environment: "jsdom"` - Simulates browser environment for React components
- `setupFiles` - Runs before each test file
- `coverage.provider: "v8"` - Fast coverage using V8 engine
- `coverage.thresholds` - Enforces minimum coverage percentages
- `testTimeout: 10000` - Each test must complete within 10 seconds
- `threads: true` - Run tests in parallel for speed
- `retry: 0` - Don't retry failed tests (fail fast)
### Test Setup File (tests/setup.ts)
```typescript
// tests/setup.ts
import { afterEach, beforeAll, afterAll, vi } from "vitest";
import { cleanup } from "@testing-library/react";
import "@testing-library/jest-dom/vitest";
// Cleanup after each test case
afterEach(() => {
cleanup();
vi.clearAllMocks();
});
// Setup before all tests
beforeAll(() => {
// Mock environment variables
process.env.VITE_API_URL = "http://localhost:3000";
process.env.DATABASE_URL_ADMIN = "postgresql://localhost/test";
// Mock window.matchMedia (for responsive components)
Object.defineProperty(window, "matchMedia", {
writable: true,
value: vi.fn().mockImplementation((query) => ({
matches: false,
media: query,
onchange: null,
addListener: vi.fn(),
removeListener: vi.fn(),
addEventListener: vi.fn(),
removeEventListener: vi.fn(),
dispatchEvent: vi.fn(),
})),
});
// Mock IntersectionObserver
global.IntersectionObserver = vi.fn().mockImplementation(() => ({
observe: vi.fn(),
unobserve: vi.fn(),
disconnect: vi.fn(),
}));
});
// Cleanup after all tests
afterAll(async () => {
// Close database connections
// Clean up any resources
});
```
### Package.json Scripts
```json
{
"scripts": {
"test": "vitest run",
"test:watch": "vitest",
"test:ui": "vitest --ui",
"test:coverage": "vitest run --coverage",
"test:unit": "vitest run tests/unit",
"test:integration": "vitest run tests/integration",
"test:e2e": "playwright test",
"test:e2e:ui": "playwright test --ui"
},
"devDependencies": {
"@playwright/test": "^1.40.0",
"@testing-library/jest-dom": "^6.1.5",
"@testing-library/react": "^14.1.2",
"@testing-library/user-event": "^14.5.1",
"@vitest/ui": "^1.0.4",
"@faker-js/faker": "^8.3.1",
"vitest": "^1.0.4",
"@vitest/coverage-v8": "^1.0.4"
}
}
```
### Playwright Configuration
```typescript
// playwright.config.ts
import { defineConfig, devices } from "@playwright/test";
export default defineConfig({
testDir: "./tests/e2e",
fullyParallel: true,
forbidOnly: !!process.env.CI,
retries: process.env.CI ? 2 : 0,
workers: process.env.CI ? 1 : undefined,
reporter: "html",
use: {
baseURL: process.env.PLAYWRIGHT_BASE_URL || "http://localhost:3000",
trace: "on-first-retry",
},
projects: [
{
name: "chromium",
use: { ...devices["Desktop Chrome"] },
},
{
name: "firefox",
use: { ...devices["Desktop Firefox"] },
},
{
name: "webkit",
use: { ...devices["Desktop Safari"] },
},
],
webServer: {
command: "bun run dev",
url: "http://localhost:3000",
reuseExistingServer: !process.env.CI,
},
});
```
## Python Configuration
### Complete pyproject.toml
```toml
# pyproject.toml
[tool.pytest.ini_options]
# Test discovery
testpaths = ["tests"]
python_files = ["test_*.py"]
python_classes = ["Test*"]
python_functions = ["test_*"]
# Command line options
addopts = [
"--strict-markers", # Error on unknown markers
"--strict-config", # Error on config errors
"-ra", # Show extra test summary
"--cov=app", # Measure coverage of app/ directory
"--cov-report=term-missing", # Show missing lines in terminal
"--cov-report=html", # Generate HTML coverage report
"--cov-report=xml", # Generate XML for CI tools
"--cov-fail-under=80", # Fail if coverage < 80%
"-v", # Verbose output
]
# Test markers (use with @pytest.mark.unit, etc.)
markers = [
"unit: Fast, isolated unit tests",
"integration: Tests involving multiple components",
"e2e: End-to-end tests through full flows",
"benchmark: Performance tests",
"slow: Tests that take >5 seconds",
]
# Async support
asyncio_mode = "auto"
# Test output
console_output_style = "progress"
# Warnings
filterwarnings = [
"error", # Treat warnings as errors
"ignore::DeprecationWarning", # Ignore deprecation warnings
"ignore::PendingDeprecationWarning", # Ignore pending deprecations
]
# Coverage configuration
[tool.coverage.run]
source = ["app"]
omit = [
"*/tests/*",
"*/conftest.py",
"*/__init__.py",
"*/migrations/*",
"*/config/*",
]
branch = true
parallel = true
[tool.coverage.report]
precision = 2
show_missing = true
skip_covered = false
exclude_lines = [
"pragma: no cover",
"def __repr__",
"def __str__",
"raise AssertionError",
"raise NotImplementedError",
"if __name__ == .__main__.:",
"if TYPE_CHECKING:",
"class .*\\bProtocol\\):",
"@(abc\\.)?abstractmethod",
]
[tool.coverage.html]
directory = "htmlcov"
[tool.coverage.xml]
output = "coverage.xml"
```
**Configuration Explanations:**
- `testpaths = ["tests"]` - Only look for tests in tests/ directory
- `--strict-markers` - Fail if test uses undefined marker
- `--cov=app` - Measure coverage of app/ directory
- `--cov-fail-under=80` - CI fails if coverage < 80%
- `asyncio_mode = "auto"` - Auto-detect async tests
- `branch = true` - Measure branch coverage (more thorough)
- `parallel = true` - Support parallel test execution
### Development Dependencies
```txt
# requirements-dev.txt
# Testing
pytest==8.0.0
pytest-asyncio==0.23.3
pytest-cov==4.1.0
pytest-mock==3.12.0
pytest-benchmark==4.0.0
# Test utilities
faker==22.0.0
factory-boy==3.3.0
httpx==0.26.0
# Type checking
mypy==1.8.0
# Linting
ruff==0.1.9
# Task runner
taskipy==1.12.2
```
### Taskfile Configuration
```toml
# pyproject.toml (continued)
[tool.taskipy.tasks]
# Testing tasks
test = "doppler run -- pytest"
test-unit = "doppler run -- pytest -m unit"
test-integration = "doppler run -- pytest -m integration"
test-e2e = "doppler run -- pytest -m e2e"
test-benchmark = "doppler run -- pytest -m benchmark"
test-coverage = "doppler run -- pytest --cov=app --cov-report=html"
test-watch = "doppler run -- pytest-watch"
# Linting and formatting
lint = "ruff check app tests"
format = "ruff format app tests"
typecheck = "mypy app"
# Combined checks
check = "task lint && task typecheck && task test"
```
## Project Structures
### TypeScript Project Structure
```plaintext
project-root/
├── src/
│ ├── routes/ # TanStack Router pages
│ │ ├── index.tsx
│ │ ├── settings/
│ │ │ ├── profile.tsx
│ │ │ └── account.tsx
│ │ └── __root.tsx
│ ├── lib/
│ │ ├── components/ # React components
│ │ │ ├── auth/
│ │ │ │ ├── provider.tsx
│ │ │ │ └── login-form.tsx
│ │ │ ├── ui/ # UI primitives (shadcn)
│ │ │ │ ├── button.tsx
│ │ │ │ └── input.tsx
│ │ │ └── UserProfile.tsx
│ │ ├── server/ # Server-side code
│ │ │ ├── db/
│ │ │ │ ├── schema.ts # Drizzle schema
│ │ │ │ └── index.ts # DB connection
│ │ │ └── functions/ # Server functions
│ │ │ ├── users.ts
│ │ │ └── auth.ts
│ │ ├── hooks/ # Custom React hooks
│ │ │ ├── use-auth.ts
│ │ │ └── use-users.ts
│ │ ├── utils/ # Utility functions
│ │ │ ├── format.ts
│ │ │ └── validation.ts
│ │ └── types/ # TypeScript types
│ │ ├── user.ts
│ │ └── api.ts
│ └── public/ # Static assets
│ └── favicon.ico
├── tests/
│ ├── setup.ts # Test setup
│ ├── unit/ # Unit tests
│ │ ├── lib/
│ │ │ ├── components/
│ │ │ │ └── UserProfile.test.tsx
│ │ │ └── utils/
│ │ │ └── format.test.ts
│ │ └── server/
│ │ └── functions/
│ │ └── users.test.ts
│ ├── integration/ # Integration tests
│ │ ├── auth-flow.test.ts
│ │ └── user-repository.test.ts
│ ├── e2e/ # Playwright E2E tests
│ │ ├── user-registration.spec.ts
│ │ └── user-workflow.spec.ts
│ └── factories/ # Test data factories
│ ├── user.factory.ts
│ └── tenant.factory.ts
├── vitest.config.ts # Vitest configuration
├── playwright.config.ts # Playwright configuration
├── package.json
└── tsconfig.json
```
### Python Project Structure
```plaintext
project-root/
├── app/
│ ├── __init__.py
│ ├── main.py # FastAPI application
│ ├── config/
│ │ ├── __init__.py
│ │ └── settings.py # Application settings
│ ├── db/
│ │ ├── __init__.py
│ │ ├── base.py # Database connection
│ │ ├── models/ # SQLModel entities
│ │ │ ├── __init__.py
│ │ │ ├── base.py # Base model
│ │ │ ├── user.py
│ │ │ └── tenant.py
│ │ └── repositories/ # Repository pattern
│ │ ├── __init__.py
│ │ ├── base.py # Base repository
│ │ └── user_repository.py
│ ├── routers/ # FastAPI endpoints
│ │ ├── __init__.py
│ │ ├── users.py
│ │ └── auth.py
│ ├── services/ # Business logic
│ │ ├── __init__.py
│ │ ├── user_service.py
│ │ └── auth_service.py
│ ├── schemas/ # Pydantic schemas (API contracts)
│ │ ├── __init__.py
│ │ ├── user.py
│ │ └── auth.py
│ └── utils/ # Utilities
│ ├── __init__.py
│ ├── security.py
│ └── validation.py
├── tests/
│ ├── __init__.py
│ ├── conftest.py # Shared fixtures
│ ├── unit/ # Unit tests (@pytest.mark.unit)
│ │ ├── __init__.py
│ │ ├── repositories/
│ │ │ └── test_user_repository.py
│ │ └── services/
│ │ └── test_user_service.py
│ ├── integration/ # Integration tests
│ │ ├── __init__.py
│ │ └── test_user_api.py
│ ├── e2e/ # E2E tests
│ │ ├── __init__.py
│ │ └── test_full_user_flow.py
│ ├── benchmark/ # Benchmark tests
│ │ ├── __init__.py
│ │ └── test_repository_performance.py
│ └── factories/ # Test data factories
│ ├── __init__.py
│ └── user_factory.py
├── pyproject.toml # Python project config
├── requirements.txt # Production dependencies
├── requirements-dev.txt # Development dependencies
└── .python-version # Python version (3.12)
```
## Doppler Configuration
### Doppler Setup
```bash
# Install Doppler CLI
brew install dopplerhq/cli/doppler # macOS
# or
curl -Ls https://cli.doppler.com/install.sh | sh # Linux
# Authenticate with Doppler
doppler login
# Setup Doppler in project
doppler setup
# Select project and config
# Project: your-project-name
# Config: test (or dev, staging, production)
```
### Doppler Environment Configs
Grey Haven projects use these Doppler configs:
1. **dev** - Local development environment
2. **test** - Running tests (CI and local)
3. **staging** - Staging environment
4. **production** - Production environment
### Test Environment Variables
**Database URLs:**
```bash
# PostgreSQL connection URLs (Doppler managed)
DATABASE_URL_ADMIN=postgresql+asyncpg://admin_user:password@localhost:5432/app_db
DATABASE_URL_AUTHENTICATED=postgresql+asyncpg://authenticated_user:password@localhost:5432/app_db
DATABASE_URL_ANON=postgresql+asyncpg://anon_user:password@localhost:5432/app_db
# Test database (separate from dev)
DATABASE_URL_TEST=postgresql+asyncpg://test_user:password@localhost:5432/test_db
```
**Redis:**
```bash
# Use separate Redis DB for tests (0-15 available)
REDIS_URL=redis://localhost:6379/1 # DB 1 for tests (dev uses 0)
```
**Authentication:**
```bash
# Better Auth secrets
BETTER_AUTH_SECRET=test-secret-key-min-32-chars-long
BETTER_AUTH_URL=http://localhost:3000
# JWT secrets
JWT_SECRET_KEY=test-jwt-secret-key
```
**External Services (use test/sandbox keys):**
```bash
# Stripe (test mode)
STRIPE_SECRET_KEY=sk_test_51AbCdEfGhIjKlMnOpQrStUv
STRIPE_PUBLISHABLE_KEY=pk_test_51AbCdEfGhIjKlMnOpQrStUv
# Resend (test mode)
RESEND_API_KEY=re_test_1234567890abcdef
# OpenAI (separate test key)
OPENAI_API_KEY=sk-test-1234567890abcdef
```
**E2E Testing:**
```bash
# Playwright base URL
PLAYWRIGHT_BASE_URL=http://localhost:3000
# Email testing service (for E2E tests)
MAILTRAP_API_TOKEN=your_mailtrap_token
```
### Running Tests with Doppler
**TypeScript:**
```bash
# Run all tests with Doppler
doppler run -- bun run test
# Run with specific config
doppler run --config test -- bun run test
# Run coverage
doppler run -- bun run test:coverage
# Run E2E
doppler run -- bun run test:e2e
```
**Python:**
```bash
# Activate virtual environment first!
source .venv/bin/activate
# Run all tests with Doppler
doppler run -- pytest
# Run with specific config
doppler run --config test -- pytest
# Run specific markers
doppler run -- pytest -m unit
doppler run -- pytest -m integration
```
### Doppler in CI/CD
**GitHub Actions:**
```yaml
- name: Install Doppler CLI
uses: dopplerhq/cli-action@v3
- name: Run tests with Doppler
env:
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
run: doppler run --config test -- bun run test:coverage
```
**Get Doppler Service Token:**
1. Go to Doppler dashboard
2. Select your project
3. Go to Access → Service Tokens
4. Create token for `test` config
5. Add as `DOPPLER_TOKEN_TEST` secret in GitHub
## GitHub Actions Configuration
### TypeScript CI Workflow
```yaml
# .github/workflows/test-typescript.yml
name: TypeScript Tests
on:
push:
branches: [main, develop]
pull_request:
branches: [main, develop]
jobs:
test:
runs-on: ubuntu-latest
services:
postgres:
image: postgres:16
env:
POSTGRES_DB: test_db
POSTGRES_USER: test_user
POSTGRES_PASSWORD: test_password
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 5432:5432
redis:
image: redis:7-alpine
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 6379:6379
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: "20"
cache: "bun"
- name: Install Doppler CLI
uses: dopplerhq/cli-action@v3
- name: Install dependencies
run: bun install
- name: Run linter
run: bun run lint
- name: Run type check
run: bun run typecheck
- name: Run unit tests
env:
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
run: doppler run --config test -- bun run test:unit
- name: Run integration tests
env:
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
run: doppler run --config test -- bun run test:integration
- name: Run tests with coverage
env:
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
run: doppler run --config test -- bun run test:coverage
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v4
with:
files: ./coverage/coverage-final.json
flags: typescript
name: typescript-coverage
- name: Install Playwright browsers
run: npx playwright install --with-deps
- name: Run E2E tests
env:
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
run: doppler run --config test -- bun run test:e2e
- name: Upload Playwright report
if: always()
uses: actions/upload-artifact@v4
with:
name: playwright-report
path: playwright-report/
retention-days: 30
```
### Python CI Workflow
```yaml
# .github/workflows/test-python.yml
name: Python Tests
on:
push:
branches: [main, develop]
pull_request:
branches: [main, develop]
jobs:
test:
runs-on: ubuntu-latest
services:
postgres:
image: postgres:16
env:
POSTGRES_DB: test_db
POSTGRES_USER: test_user
POSTGRES_PASSWORD: test_password
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 5432:5432
redis:
image: redis:7-alpine
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 6379:6379
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: "3.12"
cache: "pip"
- name: Install Doppler CLI
uses: dopplerhq/cli-action@v3
- name: Create virtual environment
run: python -m venv .venv
- name: Install dependencies
run: |
source .venv/bin/activate
pip install --upgrade pip
pip install -r requirements.txt -r requirements-dev.txt
- name: Run linter
run: |
source .venv/bin/activate
ruff check app tests
- name: Run type checker
run: |
source .venv/bin/activate
mypy app
- name: Run unit tests
env:
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
run: |
source .venv/bin/activate
doppler run --config test -- pytest -m unit
- name: Run integration tests
env:
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
run: |
source .venv/bin/activate
doppler run --config test -- pytest -m integration
- name: Run all tests with coverage
env:
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
run: |
source .venv/bin/activate
doppler run --config test -- pytest --cov=app --cov-report=xml --cov-report=html
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v4
with:
files: ./coverage.xml
flags: python
name: python-coverage
- name: Upload coverage HTML
if: always()
uses: actions/upload-artifact@v4
with:
name: coverage-report
path: htmlcov/
retention-days: 30
```
## Coverage Configuration
### Coverage Thresholds
**Minimum requirements (enforced in CI):**
- **Lines:** 80%
- **Functions:** 80%
- **Branches:** 80%
- **Statements:** 80%
**Target goals:**
- **Critical paths:** 90%+
- **Security code:** 100% (auth, payments, tenant isolation)
- **Utility functions:** 95%+
### Excluding from Coverage
**TypeScript (vitest.config.ts):**
```typescript
coverage: {
exclude: [
"node_modules/",
"tests/",
"**/*.config.ts",
"**/*.d.ts",
"**/types/",
"**/__mocks__/",
"**/migrations/",
],
}
```
**Python (pyproject.toml):**
```toml
[tool.coverage.run]
omit = [
"*/tests/*",
"*/conftest.py",
"*/__init__.py",
"*/migrations/*",
"*/config/*",
]
```
### Coverage Reports
**Viewing coverage locally:**
```bash
# TypeScript
bun run test:coverage
open coverage/index.html
# Python
source .venv/bin/activate
doppler run -- pytest --cov=app --cov-report=html
open htmlcov/index.html
```
**Coverage in CI:**
- Upload to Codecov for tracking over time
- Fail build if coverage < 80%
- Comment coverage diff on PRs
- Track coverage trends
### Pre-commit Hook for Coverage
```yaml
# .pre-commit-config.yaml
repos:
- repo: local
hooks:
- id: test-coverage
name: Check test coverage
entry: sh -c 'source .venv/bin/activate && pytest --cov=app --cov-fail-under=80'
language: system
pass_filenames: false
always_run: true
```

View File

@@ -0,0 +1,324 @@
---
name: grey-haven-testing-strategy
description: "Grey Haven's comprehensive testing strategy - Vitest unit/integration/e2e for TypeScript, pytest markers for Python, >80% coverage requirement, fixture patterns, and Doppler for test environments. Use when writing tests, setting up test infrastructure, running tests, debugging test failures, improving coverage, configuring CI/CD, or when user mentions 'test', 'testing', 'pytest', 'vitest', 'coverage', 'TDD', 'test-driven development', 'unit test', 'integration test', 'e2e', 'end-to-end', 'test fixtures', 'mocking', 'test setup', 'CI testing'."
---
# Grey Haven Testing Strategy
**Comprehensive testing approach for TypeScript (Vitest) and Python (pytest) projects.**
Follow these standards when writing tests, setting up test infrastructure, or improving test coverage in Grey Haven codebases.
## Supporting Documentation
- **[EXAMPLES.md](EXAMPLES.md)** - Copy-paste test examples for Vitest and pytest
- **[REFERENCE.md](REFERENCE.md)** - Complete configurations, project structures, and CI setup
- **[templates/](templates/)** - Ready-to-use test templates
- **[checklists/](checklists/)** - Testing quality checklists
- **[scripts/](scripts/)** - Helper scripts for coverage and test execution
## Testing Philosophy
### Coverage Requirements
- **Minimum: 80% code coverage** for all projects (enforced in CI)
- **Target: 90%+ coverage** for critical paths
- **100% coverage** for security-critical code (auth, payments, multi-tenant isolation)
### Test Types (Markers)
Grey Haven uses consistent test markers across languages:
1. **unit**: Fast, isolated tests of single functions/classes
2. **integration**: Tests involving multiple components or external dependencies
3. **e2e**: End-to-end tests through full user flows
4. **benchmark**: Performance tests measuring speed/memory
## TypeScript Testing (Vitest)
### Quick Setup
**Project Structure:**
```
tests/
├── unit/ # Fast, isolated tests
├── integration/ # Multi-component tests
└── e2e/ # Playwright tests
```
**Key Configuration:**
```typescript
// vitest.config.ts
export default defineConfig({
test: {
globals: true,
environment: "jsdom",
setupFiles: ["./tests/setup.ts"],
coverage: {
thresholds: { lines: 80, functions: 80, branches: 80, statements: 80 },
},
},
});
```
**Running Tests:**
```bash
bun run test # Run all tests
bun run test:coverage # With coverage report
bun run test:watch # Watch mode
bun run test:ui # UI mode
bun run test tests/unit/ # Unit tests only
```
**See [EXAMPLES.md](EXAMPLES.md#vitest-examples) for complete test examples.**
## Python Testing (pytest)
### Quick Setup
**Project Structure:**
```
tests/
├── conftest.py # Shared fixtures
├── unit/ # @pytest.mark.unit
├── integration/ # @pytest.mark.integration
├── e2e/ # @pytest.mark.e2e
└── benchmark/ # @pytest.mark.benchmark
```
**Key Configuration:**
```toml
# pyproject.toml
[tool.pytest.ini_options]
addopts = ["--cov=app", "--cov-fail-under=80"]
markers = [
"unit: Fast, isolated unit tests",
"integration: Tests involving multiple components",
"e2e: End-to-end tests through full flows",
"benchmark: Performance tests",
]
```
**Running Tests:**
```bash
# ⚠️ ALWAYS activate virtual environment first!
source .venv/bin/activate
# Run with Doppler for environment variables
doppler run -- pytest # All tests
doppler run -- pytest --cov=app # With coverage
doppler run -- pytest -m unit # Unit tests only
doppler run -- pytest -m integration # Integration tests only
doppler run -- pytest -m e2e # E2E tests only
doppler run -- pytest -v # Verbose output
```
**See [EXAMPLES.md](EXAMPLES.md#pytest-examples) for complete test examples.**
## Test Markers Explained
### Unit Tests
**Characteristics:**
- Fast execution (< 100ms per test)
- No external dependencies (database, API, file system)
- Mock all external services
- Test single function/class in isolation
**Use for:**
- Utility functions
- Business logic
- Data transformations
- Component rendering (React Testing Library)
### Integration Tests
**Characteristics:**
- Test multiple components together
- May use real database/Redis (with cleanup)
- Test API endpoints with FastAPI TestClient
- Test React Query + server functions
**Use for:**
- API endpoint flows
- Database operations with repositories
- Authentication flows
- Multi-component interactions
### E2E Tests
**Characteristics:**
- Test complete user flows
- Use Playwright (TypeScript) or httpx (Python)
- Test from user perspective
- Slower execution (seconds per test)
**Use for:**
- Registration/login flows
- Critical user journeys
- Form submissions
- Multi-page workflows
### Benchmark Tests
**Characteristics:**
- Measure performance metrics
- Track execution time
- Monitor memory usage
- Detect performance regressions
**Use for:**
- Database query performance
- Algorithm optimization
- API response times
- Batch operations
## Environment Variables with Doppler
**⚠️ CRITICAL: Grey Haven uses Doppler for ALL environment variables.**
```bash
# Install Doppler
brew install dopplerhq/cli/doppler
# Authenticate and setup
doppler login
doppler setup
# Run tests with Doppler
doppler run -- bun run test # TypeScript
doppler run -- pytest # Python
# Use specific config
doppler run --config test -- pytest
```
**Doppler provides:**
- `DATABASE_URL_TEST` - Test database connection
- `REDIS_URL` - Redis for tests (separate DB)
- `BETTER_AUTH_SECRET` - Auth secrets
- `STRIPE_SECRET_KEY` - External service keys (test mode)
- `PLAYWRIGHT_BASE_URL` - E2E test URL
**See [REFERENCE.md](REFERENCE.md#doppler-configuration) for complete setup.**
## Test Fixtures and Factories
### TypeScript Factories
```typescript
// tests/factories/user.factory.ts
import { faker } from "@faker-js/faker";
export function createMockUser(overrides = {}) {
return {
id: faker.string.uuid(),
tenant_id: faker.string.uuid(),
email_address: faker.internet.email(),
name: faker.person.fullName(),
...overrides,
};
}
```
### Python Fixtures
```python
# tests/conftest.py
@pytest.fixture
async def test_user(session, tenant_id):
"""Create test user with tenant isolation."""
user = User(
tenant_id=tenant_id,
email_address="test@example.com",
name="Test User",
)
session.add(user)
await session.commit()
return user
```
**See [EXAMPLES.md](EXAMPLES.md#test-factories-and-fixtures) for more patterns.**
## Multi-Tenant Testing
**⚠️ ALWAYS test tenant isolation in multi-tenant projects:**
```python
@pytest.mark.unit
async def test_tenant_isolation(session, test_user, tenant_id):
"""Verify queries filter by tenant_id."""
repo = UserRepository(session)
# Should find with correct tenant
user = await repo.get_by_id(test_user.id, tenant_id)
assert user is not None
# Should NOT find with different tenant
different_tenant = uuid4()
user = await repo.get_by_id(test_user.id, different_tenant)
assert user is None
```
## Continuous Integration
**GitHub Actions with Doppler:**
```yaml
# .github/workflows/test.yml
- name: Run tests with Doppler
env:
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
run: doppler run --config test -- bun run test:coverage
```
**See [REFERENCE.md](REFERENCE.md#github-actions-configuration) for complete workflow.**
## When to Apply This Skill
Use this skill when:
- ✅ Writing new tests for features
- ✅ Setting up test infrastructure (Vitest/pytest)
- ✅ Configuring CI/CD test pipelines
- ✅ Debugging failing tests
- ✅ Improving test coverage (<80%)
- ✅ Reviewing test code quality
- ✅ Setting up Doppler for test environments
- ✅ Creating test fixtures and factories
- ✅ Implementing TDD workflow
- ✅ User mentions: "test", "testing", "pytest", "vitest", "coverage", "TDD", "unit test", "integration test", "e2e", "test setup", "CI testing"
## Template References
These testing patterns come from Grey Haven production templates:
- **Frontend**: `cvi-template` (Vitest + Playwright + React Testing Library)
- **Backend**: `cvi-backend-template` (pytest + FastAPI TestClient + async fixtures)
## Critical Reminders
1. **Coverage: 80% minimum** (enforced in CI, blocks merge)
2. **Test markers**: unit, integration, e2e, benchmark (use consistently)
3. **Doppler**: ALWAYS use for test environment variables (never commit .env!)
4. **Virtual env**: MUST activate for Python tests (`source .venv/bin/activate`)
5. **Tenant isolation**: ALWAYS test multi-tenant scenarios
6. **Fixtures**: Use factories for test data generation (faker library)
7. **Mocking**: Mock external services in unit tests (use vi.mock or pytest mocks)
8. **CI**: Run tests with `doppler run --config test`
9. **Database**: Use separate test database (Doppler provides `DATABASE_URL_TEST`)
10. **Cleanup**: Clean up test data after each test (use fixtures with cleanup)
## Next Steps
- **Need test examples?** See [EXAMPLES.md](EXAMPLES.md) for copy-paste code
- **Need configurations?** See [REFERENCE.md](REFERENCE.md) for complete configs
- **Need templates?** See [templates/](templates/) for starter files
- **Need checklists?** Use [checklists/](checklists/) for systematic test reviews
- **Need to run tests?** Use [scripts/](scripts/) for helper utilities

View File

@@ -0,0 +1,214 @@
# Test Code Review Checklist
Use this checklist when reviewing test code in pull requests.
## General Test Quality
### Test Structure
- [ ] **Clear test names**: Descriptive, follows `test_should_do_something_when_condition` pattern
- [ ] **One assertion focus**: Each test verifies one specific behavior
- [ ] **Arrange-Act-Assert**: Tests follow AAA pattern clearly
- [ ] **No magic numbers**: Test values are self-explanatory or use named constants
- [ ] **Readable setup**: Test setup is clear and concise
### Test Independence
- [ ] **No shared state**: Tests don't depend on each other
- [ ] **Can run in any order**: Tests pass when run individually or in any sequence
- [ ] **Proper cleanup**: Tests clean up resources (database, files, mocks)
- [ ] **Isolated changes**: Tests don't pollute global state
- [ ] **Fresh fixtures**: Each test gets fresh test data
### Test Coverage
- [ ] **New code is tested**: All new functions/components have tests
- [ ] **Edge cases covered**: Null, empty, invalid inputs tested
- [ ] **Error paths tested**: Error handling and failure scenarios verified
- [ ] **Happy path tested**: Normal, expected behavior verified
- [ ] **Branch coverage**: All if/else and switch branches tested
## TypeScript/Vitest Review
### Component Tests
- [ ] **Correct rendering**: Components render without errors
- [ ] **User interactions**: Click, input, form submissions tested
- [ ] **Loading states**: Loading indicators tested
- [ ] **Error states**: Error messages and boundaries tested
- [ ] **Async handling**: Uses `waitFor()` for async state changes
- [ ] **Query wrapper**: TanStack Query components wrapped correctly
- [ ] **Accessibility**: Uses semantic queries (`getByRole`, `getByLabelText`)
### Mocking
- [ ] **Appropriate mocking**: Mocks external dependencies (APIs, modules)
- [ ] **Not over-mocked**: Integration tests use real implementations where appropriate
- [ ] **Clear mock setup**: Mock configuration is easy to understand
- [ ] **Mock verification**: Tests verify mocks were called correctly
- [ ] **Mock cleanup**: Mocks cleared after each test (`vi.clearAllMocks()`)
### Best Practices
- [ ] **Path aliases**: Uses `~/ ` for imports (not relative paths)
- [ ] **TypeScript types**: Test code is properly typed
- [ ] **Testing Library**: Uses `@testing-library/react` best practices
- [ ] **Vitest globals**: Uses globals (`describe`, `it`, `expect`) correctly
- [ ] **No console warnings**: Tests don't produce React warnings
## Python/pytest Review
### Unit Tests
- [ ] **Isolated tests**: No external dependencies (database, network)
- [ ] **Fast execution**: Unit tests complete in < 100ms
- [ ] **Proper fixtures**: Uses pytest fixtures appropriately
- [ ] **Mocking external services**: Uses `unittest.mock` or `pytest-mock`
- [ ] **Type hints**: Test functions have type hints
### Integration Tests
- [ ] **Real dependencies**: Uses real database/services where appropriate
- [ ] **Transaction handling**: Tests verify rollback on errors
- [ ] **Tenant isolation**: Tests verify multi-tenant data separation
- [ ] **Async/await**: Async tests use `async def` and `await`
- [ ] **Database cleanup**: Fixtures clean up test data
### Markers
- [ ] **Correct markers**: Tests marked with `@pytest.mark.unit`, `@pytest.mark.integration`, etc.
- [ ] **Consistent markers**: Markers match test type (unit, integration, e2e, benchmark)
- [ ] **Slow marker**: Tests >5 seconds marked with `@pytest.mark.slow`
### Best Practices
- [ ] **Descriptive docstrings**: Test functions have clear docstrings
- [ ] **Factory usage**: Uses factory pattern for test data
- [ ] **No hardcoded IDs**: Uses `uuid4()` for test IDs
- [ ] **Proper imports**: Imports organized and clear
- [ ] **No test pollution**: Tests don't leave data in database
## Multi-Tenant Testing
### Tenant Isolation
- [ ] **Tenant ID filtering**: All queries filter by `tenant_id`
- [ ] **Cross-tenant access denied**: Tests verify users can't access other tenant's data
- [ ] **Tenant header required**: API tests include `X-Tenant-ID` header
- [ ] **Repository methods**: All repository methods accept `tenant_id` parameter
- [ ] **Query verification**: Tests verify correct `tenant_id` in database queries
### Security
- [ ] **Authentication tested**: Protected endpoints require auth
- [ ] **Authorization tested**: Users can only access authorized resources
- [ ] **Input validation**: Invalid input properly rejected
- [ ] **SQL injection protected**: No raw SQL in tests (uses ORM)
- [ ] **XSS protection**: Input sanitization tested where applicable
## Environment & Configuration
### Doppler
- [ ] **Doppler used**: Tests run with `doppler run --`
- [ ] **No hardcoded secrets**: No API keys or secrets in test code
- [ ] **Correct config**: Tests use `test` Doppler config
- [ ] **Environment isolation**: Test database separate from dev
### Test Data
- [ ] **Faker/factory-boy**: Random test data uses faker
- [ ] **Realistic data**: Test data resembles production data
- [ ] **No PII**: Test data doesn't contain real personal information
- [ ] **Deterministic when needed**: Uses seed for reproducible random data when necessary
## Performance
### Test Speed
- [ ] **Fast unit tests**: Unit tests < 100ms each
- [ ] **Reasonable integration tests**: Integration tests < 1 second each
- [ ] **Parallel execution**: Tests can run in parallel
- [ ] **No unnecessary waits**: No `sleep()` or arbitrary delays
- [ ] **Optimized queries**: Database queries efficient
### Resource Usage
- [ ] **Minimal test data**: Creates only necessary test data
- [ ] **Connection cleanup**: Database connections closed properly
- [ ] **Memory efficient**: No memory leaks in test setup
- [ ] **File cleanup**: Temporary files deleted after tests
## CI/CD Compatibility
### GitHub Actions
- [ ] **Passes in CI**: Tests pass in GitHub Actions
- [ ] **No flaky tests**: Tests pass consistently (not intermittent failures)
- [ ] **Correct services**: Required services (postgres, redis) configured
- [ ] **Coverage upload**: Coverage reports uploaded correctly
- [ ] **Timeout appropriate**: Tests complete within CI timeout limits
### Coverage
- [ ] **Meets threshold**: Coverage meets 80% minimum
- [ ] **No false positives**: Coverage accurately reflects tested code
- [ ] **Coverage trends**: Coverage doesn't decrease from baseline
- [ ] **Critical paths covered**: Important features have high coverage
## Documentation
### Test Documentation
- [ ] **Clear test names**: Test intent obvious from name
- [ ] **Helpful comments**: Complex test logic explained
- [ ] **Fixture documentation**: Custom fixtures documented
- [ ] **Test file organization**: Tests organized logically
- [ ] **README updated**: Testing docs updated if patterns changed
### Code Comments
- [ ] **Why, not what**: Comments explain why, not what code does
- [ ] **No commented-out code**: Old test code removed
- [ ] **TODO comments tracked**: Any TODOs have tracking tickets
- [ ] **No misleading comments**: Comments accurate and up-to-date
## Red Flags to Watch For
### Anti-Patterns
- [ ] ❌ Tests that only test mocks
- [ ] ❌ Tests with no assertions
- [ ] ❌ Tests that test private implementation
- [ ] ❌ Brittle tests that break on refactoring
- [ ] ❌ Tests that depend on execution order
- [ ] ❌ Excessive setup code (>50% of test)
- [ ] ❌ Tests with sleep/wait instead of proper async handling
- [ ] ❌ Tests that write to production database
- [ ] ❌ Tests that make real API calls
- [ ] ❌ Tests with hardcoded production credentials
### Smells
- [ ] ⚠️ Very long test functions (>50 lines)
- [ ] ⚠️ Duplicate test code (could use fixtures)
- [ ] ⚠️ Tests with multiple assertions on different behaviors
- [ ] ⚠️ Tests that take >5 seconds
- [ ] ⚠️ Tests that fail intermittently
- [ ] ⚠️ Tests with complex logic (loops, conditionals)
- [ ] ⚠️ Tests that require manual setup to run
- [ ] ⚠️ Missing error assertions
- [ ] ⚠️ Testing framework workarounds/hacks
## Approval Criteria
Before approving PR with tests:
- [ ] All tests pass locally and in CI
- [ ] Coverage meets minimum threshold (80%)
- [ ] Tests follow Grey Haven conventions
- [ ] No anti-patterns or red flags
- [ ] Test code is readable and maintainable
- [ ] Tests verify correct behavior (not just implementation)
- [ ] Security and tenant isolation tested
- [ ] Documentation updated if needed

View File

@@ -0,0 +1,192 @@
# Testing Checklist
Use this checklist before submitting PRs to ensure comprehensive test coverage and quality.
## Pre-PR Testing Checklist
### Test Coverage
- [ ] All new functions/methods have unit tests
- [ ] All new components have component tests
- [ ] All new API endpoints have integration tests
- [ ] Critical user flows have E2E tests
- [ ] Code coverage is at least 80% (run `bun test --coverage` or `pytest --cov`)
- [ ] No coverage regression from previous version
- [ ] Security-critical code has 100% coverage (auth, payments, tenant isolation)
### Test Quality
- [ ] Tests follow naming convention: `test_should_do_something_when_condition`
- [ ] Each test has a single, clear assertion focus
- [ ] Tests are independent (can run in any order)
- [ ] Tests clean up after themselves (no database pollution)
- [ ] No hardcoded values (use constants or fixtures)
- [ ] Test data uses factories (faker/factory-boy)
- [ ] Mock external services (APIs, email, payments)
- [ ] Tests run in < 10 seconds (unit tests < 100ms each)
### Test Markers
- [ ] Unit tests marked with `@pytest.mark.unit` or in `tests/unit/`
- [ ] Integration tests marked with `@pytest.mark.integration` or in `tests/integration/`
- [ ] E2E tests marked with `@pytest.mark.e2e` or in `tests/e2e/`
- [ ] Slow tests marked with `@pytest.mark.slow` (> 5 seconds)
### Multi-Tenant Testing
- [ ] All database queries test tenant isolation
- [ ] Repository methods verify correct `tenant_id` filtering
- [ ] API endpoints test tenant header validation
- [ ] Cross-tenant access attempts are tested and fail correctly
### Environment Variables
- [ ] All tests use Doppler for environment variables
- [ ] No hardcoded secrets or API keys
- [ ] Test database is separate from development database
- [ ] `.env` files are NOT committed to repository
- [ ] CI uses `DOPPLER_TOKEN_TEST` secret
### Error Handling
- [ ] Tests verify error messages and status codes
- [ ] Edge cases are tested (null, empty, invalid input)
- [ ] Validation errors return correct HTTP status (422)
- [ ] Database errors are handled gracefully
- [ ] Tests verify rollback on transaction errors
### TypeScript Specific
- [ ] React Testing Library used for component tests
- [ ] TanStack Query components tested with QueryClientProvider wrapper
- [ ] Server function mocks use `vi.mock()`
- [ ] Async components use `waitFor()` for assertions
- [ ] Vitest globals enabled in config (`globals: true`)
### Python Specific
- [ ] Virtual environment activated before running tests
- [ ] Async fixtures used for async code (`async def`)
- [ ] FastAPI TestClient used for API tests
- [ ] Database fixtures use session-scoped engine
- [ ] SQLAlchemy sessions auto-rollback in fixtures
### CI/CD
- [ ] Tests pass locally with `bun test` or `doppler run -- pytest`
- [ ] Tests pass in CI (GitHub Actions)
- [ ] Coverage report uploaded to Codecov
- [ ] No test warnings or deprecation messages
- [ ] Pre-commit hooks pass (if configured)
## Test Types Checklist
### Unit Tests
- [ ] Test single function/class in isolation
- [ ] Mock all external dependencies
- [ ] No database or network calls
- [ ] Fast execution (< 100ms per test)
- [ ] Cover all code branches (if/else, try/catch)
### Integration Tests
- [ ] Test multiple components together
- [ ] Use real database (with cleanup)
- [ ] Test complete API request/response cycles
- [ ] Verify database state changes
- [ ] Test transaction handling
### E2E Tests
- [ ] Test complete user workflows
- [ ] Use Playwright for TypeScript
- [ ] Test from user perspective (UI interactions)
- [ ] Verify multi-step processes
- [ ] Test critical business flows
### Benchmark Tests
- [ ] Measure performance metrics
- [ ] Set performance thresholds
- [ ] Test with realistic data volumes
- [ ] Monitor for regressions
## Coverage Goals by Component
### Utility Functions
- [ ] 95%+ coverage
- [ ] All branches tested
- [ ] Edge cases handled
### Business Logic (Services)
- [ ] 90%+ coverage
- [ ] All business rules tested
- [ ] Error scenarios covered
### API Endpoints
- [ ] 85%+ coverage
- [ ] All HTTP methods tested
- [ ] All response codes verified
### Database Repositories
- [ ] 90%+ coverage
- [ ] CRUD operations tested
- [ ] Tenant isolation verified
### React Components
- [ ] 80%+ coverage
- [ ] Rendering tested
- [ ] User interactions tested
- [ ] Loading/error states tested
### Security Features
- [ ] 100% coverage
- [ ] Authentication tested
- [ ] Authorization tested
- [ ] Tenant isolation verified
## Common Testing Mistakes to Avoid
### Don't
- [ ] ❌ Test implementation details
- [ ] ❌ Test private methods directly
- [ ] ❌ Write tests that depend on execution order
- [ ] ❌ Use real external services in tests
- [ ] ❌ Hardcode test data
- [ ] ❌ Commit `.env` files
- [ ] ❌ Skip test cleanup
- [ ] ❌ Test multiple things in one test
- [ ] ❌ Forget to await async operations
- [ ] ❌ Mock too much (integration tests)
### Do
- [ ] ✅ Test public APIs and behaviors
- [ ] ✅ Write independent, isolated tests
- [ ] ✅ Mock external services
- [ ] ✅ Use test factories for data
- [ ] ✅ Use Doppler for environment variables
- [ ] ✅ Clean up test data
- [ ] ✅ Focus each test on one assertion
- [ ] ✅ Use `waitFor()` for async rendering
- [ ] ✅ Test error scenarios
- [ ] ✅ Verify tenant isolation
## Post-Testing Checklist
- [ ] All tests pass locally
- [ ] Coverage meets minimum threshold (80%)
- [ ] No failing tests in CI
- [ ] Coverage report reviewed
- [ ] Test output reviewed for warnings
- [ ] Performance acceptable (no slow tests)
- [ ] Documentation updated (if test patterns changed)
- [ ] Reviewers can understand test intent

View File

@@ -0,0 +1,260 @@
#!/usr/bin/env python3
"""
Check test coverage and enforce 80% minimum threshold for Grey Haven projects.
Analyzes coverage reports from Vitest or pytest and provides detailed
breakdown of coverage by file, function, and line.
Usage:
# Check Vitest coverage
python scripts/coverage_check.py
# Check pytest coverage
python scripts/coverage_check.py --backend pytest
# Show detailed file-by-file breakdown
python scripts/coverage_check.py --detailed
# Check coverage and fail if below threshold
python scripts/coverage_check.py --strict
# Generate coverage report if missing
python scripts/coverage_check.py --generate
Always run with --help first to see all options.
"""
import argparse
import subprocess
import sys
import json
import os
from pathlib import Path
def run_command(cmd: str, capture: bool = True) -> tuple[bool, str]:
"""Run a shell command and return success status and output."""
result = subprocess.run(
cmd,
shell=True,
capture_output=capture,
text=True
)
return result.returncode == 0, result.stdout if capture else ""
def check_vitest_coverage(detailed: bool = False) -> dict:
"""Check Vitest coverage from coverage/coverage-summary.json."""
coverage_file = Path("coverage/coverage-summary.json")
if not coverage_file.exists():
print("ERROR: Coverage report not found: coverage/coverage-summary.json")
print(" Run tests with coverage first:")
print(" doppler run --config test -- vitest run --coverage")
sys.exit(1)
with open(coverage_file) as f:
coverage_data = json.load(f)
# Total coverage
total = coverage_data["total"]
results = {
"lines": total["lines"]["pct"],
"statements": total["statements"]["pct"],
"functions": total["functions"]["pct"],
"branches": total["branches"]["pct"],
}
# Detailed breakdown by file
if detailed:
print("\n Coverage by File:")
print(f"{'File':<50} {'Lines':<10} {'Funcs':<10} {'Branches':<10}")
print("=" * 80)
for file_path, file_data in coverage_data.items():
if file_path == "total":
continue
# Shorten file path for display
short_path = file_path.replace(os.getcwd(), ".")
if len(short_path) > 47:
short_path = "..." + short_path[-44:]
lines_pct = file_data["lines"]["pct"]
funcs_pct = file_data["functions"]["pct"]
branches_pct = file_data["branches"]["pct"]
# Color code based on coverage
if lines_pct < 80:
status = "[BELOW 80%]"
elif lines_pct < 90:
status = "[80-90%]"
else:
status = "[ABOVE 90%]"
print(f"{status} {short_path:<47} {lines_pct:<9.1f}% {funcs_pct:<9.1f}% {branches_pct:<9.1f}%")
return results
def check_pytest_coverage(detailed: bool = False) -> dict:
"""Check pytest coverage from .coverage file."""
coverage_file = Path(".coverage")
if not coverage_file.exists():
print("ERROR: Coverage report not found: .coverage")
print(" Run tests with coverage first:")
print(" doppler run --config test -- pytest --cov=app")
sys.exit(1)
# Use coverage.py to get report
success, output = run_command("coverage report --format=total")
if not success:
print("ERROR: Failed to generate coverage report")
sys.exit(1)
# Parse total coverage percentage
total_coverage = float(output.strip().rstrip("%"))
# Get detailed report if requested
if detailed:
print("\n Coverage by File:")
success, detailed_output = run_command("coverage report")
print(detailed_output)
# pytest coverage doesn't separate by type, so we use total for all
results = {
"lines": total_coverage,
"statements": total_coverage,
"functions": total_coverage,
"branches": total_coverage,
}
return results
def main():
parser = argparse.ArgumentParser(
description="Check test coverage and enforce thresholds",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Check Vitest coverage
python scripts/coverage_check.py
# Check pytest coverage with detailed breakdown
python scripts/coverage_check.py --backend pytest --detailed
# Generate coverage and check (strict mode)
python scripts/coverage_check.py --generate --strict
Coverage Thresholds:
Minimum 80% coverage required for:
- Lines
- Functions
- Branches
- Statements
Backends:
vitest - Vitest (TypeScript/React) - default
pytest - pytest (Python/FastAPI)
"""
)
parser.add_argument(
"--backend",
default="vitest",
choices=["vitest", "pytest"],
help="Test backend to check coverage for (default: vitest)"
)
parser.add_argument(
"--detailed",
action="store_true",
help="Show detailed file-by-file breakdown"
)
parser.add_argument(
"--strict",
action="store_true",
help="Exit with error if coverage below 80 percent"
)
parser.add_argument(
"--generate",
action="store_true",
help="Generate coverage report before checking"
)
args = parser.parse_args()
print(f"\n{'=' * 70}")
print(f" Coverage Check - {args.backend.upper()}")
print(f"{'=' * 70}")
# Generate coverage if requested
if args.generate:
print("\n→ Generating coverage report...")
if args.backend == "vitest":
cmd = "doppler run --config test -- vitest run --coverage"
else:
cmd = "doppler run --config test -- pytest --cov=app --cov-report=term --cov-report=html"
success, _ = run_command(cmd, capture=False)
if not success:
print("\nERROR: Failed to generate coverage")
sys.exit(1)
# Check coverage
if args.backend == "vitest":
coverage = check_vitest_coverage(args.detailed)
else:
coverage = check_pytest_coverage(args.detailed)
# Display summary
print(f"\n{'=' * 70}")
print(" Coverage Summary")
print(f"{'=' * 70}")
threshold = 80.0
all_pass = True
for metric, value in coverage.items():
if value >= threshold:
status = "SUCCESS:"
else:
status = "ERROR:"
all_pass = False
print(f"{status} {metric.capitalize():<15} {value:>6.2f}% (threshold: {threshold}%)")
# Overall result
print(f"\n{'=' * 70}")
if all_pass:
print(" SUCCESS: All coverage thresholds met!")
else:
print(" ERROR: Coverage below 80% threshold")
print(f"{'=' * 70}")
# Additional info
if not all_pass:
print("\nTIP: Tips to improve coverage:")
print(" • Add unit tests for uncovered functions")
print(" • Add integration tests for API endpoints")
print(" • Add edge case tests for conditionals")
print(" • Test error handling paths")
if args.backend == "vitest":
print("\n View detailed report: coverage/index.html")
else:
print("\n View detailed report: htmlcov/index.html")
# Exit with error in strict mode if coverage below threshold
if args.strict and not all_pass:
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,236 @@
#!/usr/bin/env python3
"""
Run tests for Grey Haven applications with Doppler environment variables.
Supports Vitest (TypeScript) and pytest (Python) with markers for different
test types (unit, integration, e2e, benchmark).
Usage:
# Run all tests with test environment
python scripts/run_tests.py
# Run unit tests only
python scripts/run_tests.py --type unit
# Run integration and e2e tests
python scripts/run_tests.py --type integration --type e2e
# Run tests with coverage
python scripts/run_tests.py --coverage
# Run tests in watch mode (for development)
python scripts/run_tests.py --watch
# Run pytest instead of Vitest
python scripts/run_tests.py --backend pytest
# Run with specific Doppler environment
python scripts/run_tests.py --env ci
Always run with --help first to see all options.
"""
import argparse
import subprocess
import sys
from typing import List
def run_command(cmd: str, description: str) -> bool:
"""Run a shell command and return success status."""
print(f"\n{description}")
print(f" Command: {cmd}\n")
result = subprocess.run(cmd, shell=True)
return result.returncode == 0
def main():
parser = argparse.ArgumentParser(
description="Run tests with Doppler environment variables",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Run all Vitest tests
python scripts/run_tests.py
# Run unit tests only
python scripts/run_tests.py --type unit
# Run integration and e2e tests with coverage
python scripts/run_tests.py --type integration --type e2e --coverage
# Run pytest unit tests
python scripts/run_tests.py --backend pytest --type unit
# Run tests in watch mode
python scripts/run_tests.py --watch
Test Types (Markers):
unit - Unit tests (fast, isolated)
integration - Integration tests (database, external services)
e2e - End-to-end tests (full application flow)
benchmark - Performance benchmark tests
Backends:
vitest - Vitest (TypeScript/React) - default
pytest - pytest (Python/FastAPI)
Doppler Configuration:
Uses 'test' config by default.
Override with --env flag for CI environments.
"""
)
parser.add_argument(
"--backend",
default="vitest",
choices=["vitest", "pytest"],
help="Test backend to use (default: vitest)"
)
parser.add_argument(
"--type",
action="append",
choices=["unit", "integration", "e2e", "benchmark"],
help="Test type(s) to run (can be repeated). If not specified, runs all tests."
)
parser.add_argument(
"--coverage",
action="store_true",
help="Run with coverage reporting"
)
parser.add_argument(
"--watch",
action="store_true",
help="Run in watch mode (for development)"
)
parser.add_argument(
"--env",
default="test",
help="Doppler environment config to use (default: test)"
)
parser.add_argument(
"--verbose",
action="store_true",
help="Verbose output"
)
args = parser.parse_args()
backend = args.backend
env = args.env
test_types = args.type or []
print(f"\n{'=' * 70}")
print(f" Running {backend.upper()} Tests")
print(f" Environment: {env}")
if test_types:
print(f" Types: {', '.join(test_types)}")
print(f"{'=' * 70}")
# Construct test command based on backend
if backend == "vitest":
# Base Vitest command
cmd_parts = ["doppler", "run", "--config", env, "--", "vitest"]
# Add test types as grep patterns
if test_types:
# Vitest uses file patterns or test name patterns
# We'll use test name patterns matching our markers
patterns = "|".join(test_types)
cmd_parts.extend(["-t", f"({patterns})"])
# Add coverage flag
if args.coverage:
cmd_parts.append("--coverage")
# Add watch mode
if args.watch:
cmd_parts.append("--watch")
# Add verbose flag
if args.verbose:
cmd_parts.append("--reporter=verbose")
# Run mode (not watch)
if not args.watch:
cmd_parts.append("run")
elif backend == "pytest":
# Base pytest command
cmd_parts = ["doppler", "run", "--config", env, "--", "pytest"]
# Add test types as markers
if test_types:
markers = " or ".join(test_types)
cmd_parts.extend(["-m", markers])
# Add coverage flag
if args.coverage:
cmd_parts.extend([
"--cov=app",
"--cov-report=term-missing",
"--cov-report=html"
])
# Add verbose flag
if args.verbose:
cmd_parts.append("-vv")
# pytest doesn't have built-in watch mode
if args.watch:
print("\nWARNING: Warning: pytest doesn't support watch mode natively")
print(" Consider using pytest-watch: pip install pytest-watch")
cmd = " ".join(cmd_parts)
success = run_command(cmd, f"Running {backend} tests")
if not success:
print(f"\nERROR: Tests failed")
sys.exit(1)
# Coverage threshold check (if coverage was run)
if args.coverage and backend == "vitest":
print("\n→ Checking coverage thresholds...")
print(" Required: 80% (lines, functions, branches, statements)")
# Vitest coverage is configured in vitest.config.ts
# Thresholds are enforced automatically
print(" ✓ Coverage thresholds enforced by Vitest config")
elif args.coverage and backend == "pytest":
print("\n→ Checking coverage thresholds...")
print(" Required: 80% coverage")
# Check coverage with pytest-cov
coverage_cmd = f"doppler run --config {env} -- pytest --cov=app --cov-fail-under=80 -q"
coverage_success = run_command(coverage_cmd, "Validating coverage threshold")
if not coverage_success:
print("\nERROR: Coverage below 80% threshold")
print(" Add more tests to increase coverage")
sys.exit(1)
# Success!
print(f"\n{'=' * 70}")
print(f" SUCCESS: All tests passed!")
print(f"{'=' * 70}")
if args.coverage:
if backend == "vitest":
print("\n Coverage report: coverage/index.html")
else:
print("\n Coverage report: htmlcov/index.html")
print("\nNext steps:")
if not test_types:
print(" • All tests passed - ready to commit")
else:
print(f"{', '.join(test_types)} tests passed")
if not args.coverage:
print(" • Run with --coverage to check code coverage")
print(" • Deploy with: python scripts/deploy.py --env staging")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,110 @@
# .github/workflows/test.yml
name: Tests
on:
push:
branches: [main, develop]
pull_request:
branches: [main, develop]
jobs:
test-typescript:
name: TypeScript Tests
runs-on: ubuntu-latest
services:
postgres:
image: postgres:16
env:
POSTGRES_DB: test_db
POSTGRES_USER: test_user
POSTGRES_PASSWORD: test_password
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 5432:5432
redis:
image: redis:7-alpine
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 6379:6379
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: "20"
cache: "bun"
- name: Install Doppler CLI
uses: dopplerhq/cli-action@v3
- name: Install dependencies
run: bun install
- name: Run tests with coverage
env:
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
run: doppler run --config test -- bun run test:coverage
- name: Upload coverage
uses: codecov/codecov-action@v4
with:
files: ./coverage/coverage-final.json
test-python:
name: Python Tests
runs-on: ubuntu-latest
services:
postgres:
image: postgres:16
env:
POSTGRES_DB: test_db
POSTGRES_USER: test_user
POSTGRES_PASSWORD: test_password
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 5432:5432
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.12"
cache: "pip"
- name: Install Doppler CLI
uses: dopplerhq/cli-action@v3
- name: Install dependencies
run: |
python -m venv .venv
source .venv/bin/activate
pip install -r requirements.txt -r requirements-dev.txt
- name: Run tests with coverage
env:
DOPPLER_TOKEN: ${{ secrets.DOPPLER_TOKEN_TEST }}
run: |
source .venv/bin/activate
doppler run --config test -- pytest --cov=app --cov-report=xml
- name: Upload coverage
uses: codecov/codecov-action@v4
with:
files: ./coverage.xml

View File

@@ -0,0 +1,102 @@
# tests/conftest.py
"""Shared test fixtures for all tests."""
import pytest
import os
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.orm import sessionmaker
from httpx import AsyncClient
from uuid import uuid4
from app.main import app
from app.db.models import Base
# Doppler provides DATABASE_URL_TEST at runtime
DATABASE_URL_TEST = os.getenv(
"DATABASE_URL_TEST",
"postgresql+asyncpg://localhost/test_db"
)
@pytest.fixture(scope="session")
async def engine():
"""Create test database engine."""
engine = create_async_engine(DATABASE_URL_TEST, echo=False)
# Create all tables
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
yield engine
# Drop all tables
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
await engine.dispose()
@pytest.fixture
async def session(engine):
"""Create test database session with automatic rollback."""
async_session = sessionmaker(
engine,
class_=AsyncSession,
expire_on_commit=False
)
async with async_session() as session:
yield session
await session.rollback()
@pytest.fixture
async def client():
"""Create test HTTP client."""
async with AsyncClient(app=app, base_url="http://test") as client:
yield client
@pytest.fixture
def tenant_id():
"""Provide test tenant ID."""
return uuid4()
@pytest.fixture
async def test_user(session, tenant_id):
"""Create test user."""
from app.db.models.user import User
user = User(
tenant_id=tenant_id,
email_address="test@example.com",
name="Test User",
is_active=True,
)
session.add(user)
await session.commit()
await session.refresh(user)
return user
@pytest.fixture
async def authenticated_client(client, test_user, tenant_id):
"""Create authenticated HTTP client."""
# Login and get token
response = await client.post(
"/api/auth/login",
json={
"email_address": test_user.email_address,
"password": "testpassword",
},
)
assert response.status_code == 200
token = response.json()["access_token"]
# Add auth header to client
client.headers["Authorization"] = f"Bearer {token}"
client.headers["X-Tenant-ID"] = str(tenant_id)
return client

View File

@@ -0,0 +1,113 @@
# tests/integration/test_FEATURE_api.py
import pytest
from httpx import AsyncClient
from uuid import uuid4
@pytest.mark.integration
class TestYourAPI:
"""Integration tests for Your API endpoints."""
async def test_create_endpoint(self, client: AsyncClient, tenant_id):
"""Test POST /api/YOUR_RESOURCE creates resource."""
response = await client.post(
"/api/YOUR_RESOURCE",
json={
"name": "Test Resource",
"description": "Test description",
},
headers={"X-Tenant-ID": str(tenant_id)},
)
assert response.status_code == 201
data = response.json()
assert data["name"] == "Test Resource"
assert data["tenant_id"] == str(tenant_id)
async def test_get_endpoint(self, client: AsyncClient, tenant_id, test_resource):
"""Test GET /api/YOUR_RESOURCE/{id} retrieves resource."""
response = await client.get(
f"/api/YOUR_RESOURCE/{test_resource.id}",
headers={"X-Tenant-ID": str(tenant_id)},
)
assert response.status_code == 200
data = response.json()
assert data["id"] == str(test_resource.id)
assert data["name"] == test_resource.name
async def test_get_enforces_tenant_isolation(
self, client: AsyncClient, tenant_id, test_resource
):
"""Test GET enforces tenant isolation."""
# Should succeed with correct tenant
response = await client.get(
f"/api/YOUR_RESOURCE/{test_resource.id}",
headers={"X-Tenant-ID": str(tenant_id)},
)
assert response.status_code == 200
# Should fail with different tenant
different_tenant = str(uuid4())
response = await client.get(
f"/api/YOUR_RESOURCE/{test_resource.id}",
headers={"X-Tenant-ID": different_tenant},
)
assert response.status_code == 404
async def test_list_endpoint(self, client: AsyncClient, tenant_id):
"""Test GET /api/YOUR_RESOURCE lists resources."""
response = await client.get(
"/api/YOUR_RESOURCE",
headers={"X-Tenant-ID": str(tenant_id)},
)
assert response.status_code == 200
data = response.json()
assert isinstance(data, list)
async def test_update_endpoint(
self, client: AsyncClient, tenant_id, test_resource
):
"""Test PATCH /api/YOUR_RESOURCE/{id} updates resource."""
response = await client.patch(
f"/api/YOUR_RESOURCE/{test_resource.id}",
json={"name": "Updated Name"},
headers={"X-Tenant-ID": str(tenant_id)},
)
assert response.status_code == 200
data = response.json()
assert data["name"] == "Updated Name"
async def test_delete_endpoint(
self, client: AsyncClient, tenant_id, test_resource
):
"""Test DELETE /api/YOUR_RESOURCE/{id} deletes resource."""
response = await client.delete(
f"/api/YOUR_RESOURCE/{test_resource.id}",
headers={"X-Tenant-ID": str(tenant_id)},
)
assert response.status_code == 204
# Verify deletion
response = await client.get(
f"/api/YOUR_RESOURCE/{test_resource.id}",
headers={"X-Tenant-ID": str(tenant_id)},
)
assert response.status_code == 404
async def test_validation_errors(self, client: AsyncClient, tenant_id):
"""Test endpoint validates input correctly."""
response = await client.post(
"/api/YOUR_RESOURCE",
json={
"name": "", # Invalid: empty name
},
headers={"X-Tenant-ID": str(tenant_id)},
)
assert response.status_code == 422
data = response.json()
assert "detail" in data

View File

@@ -0,0 +1,119 @@
# tests/unit/repositories/test_FEATURE_repository.py
import pytest
from uuid import uuid4
from app.db.repositories.YOUR_repository import YourRepository
from app.db.models.YOUR_model import YourModel
@pytest.mark.unit
class TestYourRepository:
"""Unit tests for YourRepository."""
async def test_get_by_id_success(self, session, tenant_id):
"""Test retrieving entity by ID."""
repo = YourRepository(session)
# Create test entity
entity = YourModel(
tenant_id=tenant_id,
name="Test Entity",
)
session.add(entity)
await session.commit()
await session.refresh(entity)
# Retrieve entity
result = await repo.get_by_id(entity.id, tenant_id)
assert result is not None
assert result.id == entity.id
assert result.name == "Test Entity"
async def test_get_by_id_enforces_tenant_isolation(
self, session, tenant_id
):
"""Test that get_by_id enforces tenant isolation."""
repo = YourRepository(session)
# Create entity
entity = YourModel(tenant_id=tenant_id, name="Test")
session.add(entity)
await session.commit()
# Try to access with different tenant_id
different_tenant = uuid4()
result = await repo.get_by_id(entity.id, different_tenant)
assert result is None
async def test_list_with_pagination(self, session, tenant_id):
"""Test list with pagination."""
repo = YourRepository(session)
# Create multiple entities
entities = [
YourModel(tenant_id=tenant_id, name=f"Entity {i}")
for i in range(10)
]
session.add_all(entities)
await session.commit()
# Get first page
page1 = await repo.list(tenant_id, limit=5, offset=0)
assert len(page1) == 5
# Get second page
page2 = await repo.list(tenant_id, limit=5, offset=5)
assert len(page2) == 5
# Verify no overlap
page1_ids = {e.id for e in page1}
page2_ids = {e.id for e in page2}
assert page1_ids.isdisjoint(page2_ids)
async def test_create_success(self, session, tenant_id):
"""Test creating new entity."""
repo = YourRepository(session)
entity = await repo.create(
tenant_id=tenant_id,
name="New Entity",
)
assert entity.id is not None
assert entity.tenant_id == tenant_id
assert entity.name == "New Entity"
async def test_update_success(self, session, tenant_id):
"""Test updating existing entity."""
repo = YourRepository(session)
# Create entity
entity = YourModel(tenant_id=tenant_id, name="Original")
session.add(entity)
await session.commit()
# Update entity
updated = await repo.update(
entity.id,
tenant_id,
name="Updated",
)
assert updated.name == "Updated"
async def test_delete_success(self, session, tenant_id):
"""Test deleting entity."""
repo = YourRepository(session)
# Create entity
entity = YourModel(tenant_id=tenant_id, name="To Delete")
session.add(entity)
await session.commit()
# Delete entity
await repo.delete(entity.id, tenant_id)
# Verify deletion
result = await repo.get_by_id(entity.id, tenant_id)
assert result is None

View File

@@ -0,0 +1,54 @@
// tests/unit/lib/components/COMPONENT.test.tsx
import { describe, it, expect, vi } from "vitest";
import { render, screen, fireEvent, waitFor } from "@testing-library/react";
import { QueryClient, QueryClientProvider } from "@tanstack/react-query";
import YourComponent from "~/lib/components/YourComponent";
// Mock dependencies
vi.mock("~/lib/server/functions/YOUR_MODULE");
describe("YourComponent", () => {
const queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
},
});
const wrapper = ({ children }: { children: React.ReactNode }) => (
<QueryClientProvider client={queryClient}>
{children}
</QueryClientProvider>
);
it("renders correctly with initial state", () => {
render(<YourComponent />, { wrapper });
expect(screen.getByText("Expected Text")).toBeInTheDocument();
});
it("handles user interaction", async () => {
render(<YourComponent />, { wrapper });
const button = screen.getByRole("button", { name: /click me/i });
fireEvent.click(button);
await waitFor(() => {
expect(screen.getByText("Updated Text")).toBeInTheDocument();
});
});
it("displays loading state", () => {
render(<YourComponent isLoading={true} />, { wrapper });
expect(screen.getByText(/loading/i)).toBeInTheDocument();
});
it("displays error state", async () => {
// Mock error
vi.mocked(someFunction).mockRejectedValue(new Error("Test error"));
render(<YourComponent />, { wrapper });
await waitFor(() => {
expect(screen.getByText(/error/i)).toBeInTheDocument();
});
});
});

View File

@@ -0,0 +1,87 @@
// tests/integration/FEATURE-flow.test.ts
import { describe, it, expect, beforeEach, afterEach } from "vitest";
import { db } from "~/lib/server/db";
import { users } from "~/lib/server/db/schema";
import { eq } from "drizzle-orm";
describe("Feature Integration Tests", () => {
const testTenantId = "550e8400-e29b-41d4-a716-446655440000";
beforeEach(async () => {
// Setup test data
await db.delete(users).where(eq(users.tenant_id, testTenantId));
});
afterEach(async () => {
// Cleanup test data
await db.delete(users).where(eq(users.tenant_id, testTenantId));
});
it("completes full workflow successfully", async () => {
// 1. Create resource
const [created] = await db
.insert(users)
.values({
tenant_id: testTenantId,
email_address: "test@example.com",
name: "Test User",
})
.returning();
expect(created).toBeDefined();
expect(created.email_address).toBe("test@example.com");
// 2. Retrieve resource
const [retrieved] = await db
.select()
.from(users)
.where(eq(users.id, created.id))
.where(eq(users.tenant_id, testTenantId));
expect(retrieved).toBeDefined();
expect(retrieved.id).toBe(created.id);
// 3. Update resource
const [updated] = await db
.update(users)
.set({ name: "Updated Name" })
.where(eq(users.id, created.id))
.returning();
expect(updated.name).toBe("Updated Name");
// 4. Delete resource
await db.delete(users).where(eq(users.id, created.id));
// 5. Verify deletion
const [deleted] = await db
.select()
.from(users)
.where(eq(users.id, created.id));
expect(deleted).toBeUndefined();
});
it("enforces tenant isolation", async () => {
const differentTenantId = "00000000-0000-0000-0000-000000000000";
// Create user in tenant 1
const [user] = await db
.insert(users)
.values({
tenant_id: testTenantId,
email_address: "tenant1@example.com",
name: "Tenant 1 User",
})
.returning();
// Attempt to access with different tenant_id
const [result] = await db
.select()
.from(users)
.where(eq(users.id, user.id))
.where(eq(users.tenant_id, differentTenantId));
expect(result).toBeUndefined();
});
});

View File

@@ -0,0 +1,19 @@
// tests/unit/lib/utils/FEATURE.test.ts
import { describe, it, expect } from "vitest";
import { functionToTest } from "~/lib/utils/FEATURE";
describe("functionToTest", () => {
it("handles valid input correctly", () => {
const result = functionToTest("valid input");
expect(result).toBe("expected output");
});
it("handles edge cases", () => {
expect(functionToTest("")).toBe("");
expect(functionToTest(null)).toBeNull();
});
it("throws error for invalid input", () => {
expect(() => functionToTest("invalid")).toThrow("Error message");
});
});

View File

@@ -0,0 +1,49 @@
// vitest.config.ts
import { defineConfig } from "vitest/config";
import react from "@vitejs/plugin-react";
import path from "path";
export default defineConfig({
plugins: [react()],
test: {
// Enable global test APIs
globals: true,
// Use jsdom for browser-like environment
environment: "jsdom",
// Run setup file before tests
setupFiles: ["./tests/setup.ts"],
// Coverage configuration
coverage: {
provider: "v8",
reporter: ["text", "json", "html"],
exclude: [
"node_modules/",
"tests/",
"**/*.config.ts",
"**/*.d.ts",
],
thresholds: {
lines: 80,
functions: 80,
branches: 80,
statements: 80,
},
},
// Environment variables
env: {
DATABASE_URL_ADMIN: process.env.DATABASE_URL_ADMIN || "postgresql://localhost/test",
REDIS_URL: process.env.REDIS_URL || "redis://localhost:6379",
},
},
// Path aliases
resolve: {
alias: {
"~": path.resolve(__dirname, "./src"),
},
},
});