Initial commit
This commit is contained in:
740
skills/testing-strategy/examples/cli-testing-example.md
Normal file
740
skills/testing-strategy/examples/cli-testing-example.md
Normal file
@@ -0,0 +1,740 @@
|
||||
# CLI Testing Example: Cobra Command Test Suite
|
||||
|
||||
**Project**: meta-cc CLI tool
|
||||
**Framework**: Cobra (Go)
|
||||
**Patterns Used**: CLI Command (Pattern 7), Global Flag (Pattern 8), Integration (Pattern 3)
|
||||
|
||||
This example demonstrates comprehensive CLI testing for a Cobra-based application.
|
||||
|
||||
---
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
cmd/meta-cc/
|
||||
├── root.go # Root command with global flags
|
||||
├── query.go # Query subcommand
|
||||
├── stats.go # Stats subcommand
|
||||
├── version.go # Version subcommand
|
||||
├── root_test.go # Root command tests
|
||||
├── query_test.go # Query command tests
|
||||
└── stats_test.go # Stats command tests
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Example 1: Root Command with Global Flags
|
||||
|
||||
### Source Code (root.go)
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
projectPath string
|
||||
sessionID string
|
||||
verbose bool
|
||||
)
|
||||
|
||||
func newRootCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "meta-cc",
|
||||
Short: "Meta-cognition for Claude Code",
|
||||
Long: "Analyze Claude Code session history for insights and workflow optimization",
|
||||
}
|
||||
|
||||
// Global flags
|
||||
cmd.PersistentFlags().StringVarP(&projectPath, "project", "p", getCwd(), "Project path")
|
||||
cmd.PersistentFlags().StringVarP(&sessionID, "session", "s", "", "Session ID filter")
|
||||
cmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "Verbose output")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func getCwd() string {
|
||||
cwd, _ := os.Getwd()
|
||||
return cwd
|
||||
}
|
||||
|
||||
func Execute() error {
|
||||
cmd := newRootCmd()
|
||||
cmd.AddCommand(newQueryCmd())
|
||||
cmd.AddCommand(newStatsCmd())
|
||||
cmd.AddCommand(newVersionCmd())
|
||||
|
||||
return cmd.Execute()
|
||||
}
|
||||
```
|
||||
|
||||
### Test Code (root_test.go)
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Pattern 8: Global Flag Test Pattern
|
||||
func TestRootCmd_GlobalFlags(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
expectedProject string
|
||||
expectedSession string
|
||||
expectedVerbose bool
|
||||
}{
|
||||
{
|
||||
name: "default flags",
|
||||
args: []string{},
|
||||
expectedProject: getCwd(),
|
||||
expectedSession: "",
|
||||
expectedVerbose: false,
|
||||
},
|
||||
{
|
||||
name: "with session flag",
|
||||
args: []string{"--session", "abc123"},
|
||||
expectedProject: getCwd(),
|
||||
expectedSession: "abc123",
|
||||
expectedVerbose: false,
|
||||
},
|
||||
{
|
||||
name: "with all flags",
|
||||
args: []string{"--project", "/tmp/test", "--session", "xyz", "--verbose"},
|
||||
expectedProject: "/tmp/test",
|
||||
expectedSession: "xyz",
|
||||
expectedVerbose: true,
|
||||
},
|
||||
{
|
||||
name: "short flag notation",
|
||||
args: []string{"-p", "/home/user", "-s", "123", "-v"},
|
||||
expectedProject: "/home/user",
|
||||
expectedSession: "123",
|
||||
expectedVerbose: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Reset global flags
|
||||
projectPath = getCwd()
|
||||
sessionID = ""
|
||||
verbose = false
|
||||
|
||||
// Create and parse command
|
||||
cmd := newRootCmd()
|
||||
cmd.SetArgs(tt.args)
|
||||
cmd.ParseFlags(tt.args)
|
||||
|
||||
// Assert flags were parsed correctly
|
||||
if projectPath != tt.expectedProject {
|
||||
t.Errorf("projectPath = %q, want %q", projectPath, tt.expectedProject)
|
||||
}
|
||||
|
||||
if sessionID != tt.expectedSession {
|
||||
t.Errorf("sessionID = %q, want %q", sessionID, tt.expectedSession)
|
||||
}
|
||||
|
||||
if verbose != tt.expectedVerbose {
|
||||
t.Errorf("verbose = %v, want %v", verbose, tt.expectedVerbose)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Pattern 7: CLI Command Test Pattern (Help Output)
|
||||
func TestRootCmd_Help(t *testing.T) {
|
||||
cmd := newRootCmd()
|
||||
|
||||
var buf bytes.Buffer
|
||||
cmd.SetOut(&buf)
|
||||
cmd.SetArgs([]string{"--help"})
|
||||
|
||||
err := cmd.Execute()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Execute() error = %v", err)
|
||||
}
|
||||
|
||||
output := buf.String()
|
||||
|
||||
// Verify help output contains expected sections
|
||||
expectedSections := []string{
|
||||
"meta-cc",
|
||||
"Meta-cognition for Claude Code",
|
||||
"Available Commands:",
|
||||
"Flags:",
|
||||
"--project",
|
||||
"--session",
|
||||
"--verbose",
|
||||
}
|
||||
|
||||
for _, section := range expectedSections {
|
||||
if !contains(output, section) {
|
||||
t.Errorf("help output missing section: %q", section)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func contains(s, substr string) bool {
|
||||
return len(s) >= len(substr) && (s == substr || len(s) > len(substr) && (s[:len(substr)] == substr || contains(s[1:], substr)))
|
||||
}
|
||||
```
|
||||
|
||||
**Time to write**: ~22 minutes
|
||||
**Coverage**: root.go 0% → 78%
|
||||
|
||||
---
|
||||
|
||||
## Example 2: Subcommand with Flags
|
||||
|
||||
### Source Code (query.go)
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/yaleh/meta-cc/internal/query"
|
||||
)
|
||||
|
||||
func newQueryCmd() *cobra.Command {
|
||||
var (
|
||||
status string
|
||||
limit int
|
||||
outputFormat string
|
||||
)
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "query <type>",
|
||||
Short: "Query session data",
|
||||
Long: "Query various aspects of session history: tools, messages, files",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
queryType := args[0]
|
||||
|
||||
// Build query options
|
||||
opts := query.Options{
|
||||
ProjectPath: projectPath,
|
||||
SessionID: sessionID,
|
||||
Status: status,
|
||||
Limit: limit,
|
||||
OutputFormat: outputFormat,
|
||||
}
|
||||
|
||||
// Execute query
|
||||
results, err := executeQuery(queryType, opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("query failed: %w", err)
|
||||
}
|
||||
|
||||
// Output results
|
||||
return outputResults(cmd.OutOrStdout(), results, outputFormat)
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().StringVar(&status, "status", "", "Filter by status (error, success)")
|
||||
cmd.Flags().IntVar(&limit, "limit", 0, "Limit number of results")
|
||||
cmd.Flags().StringVar(&outputFormat, "format", "jsonl", "Output format (jsonl, tsv)")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func executeQuery(queryType string, opts query.Options) ([]interface{}, error) {
|
||||
// Implementation...
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func outputResults(w io.Writer, results []interface{}, format string) error {
|
||||
// Implementation...
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
### Test Code (query_test.go)
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Pattern 7: CLI Command Test Pattern
|
||||
func TestQueryCmd_Execution(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
wantErr bool
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "no arguments",
|
||||
args: []string{},
|
||||
wantErr: true,
|
||||
errContains: "requires 1 arg(s)",
|
||||
},
|
||||
{
|
||||
name: "query tools",
|
||||
args: []string{"tools"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "query with status filter",
|
||||
args: []string{"tools", "--status", "error"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "query with limit",
|
||||
args: []string{"messages", "--limit", "10"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "query with format",
|
||||
args: []string{"files", "--format", "tsv"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "all flags combined",
|
||||
args: []string{"tools", "--status", "error", "--limit", "5", "--format", "jsonl"},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Setup: Create root command with query subcommand
|
||||
rootCmd := newRootCmd()
|
||||
rootCmd.AddCommand(newQueryCmd())
|
||||
|
||||
// Setup: Capture output
|
||||
var buf bytes.Buffer
|
||||
rootCmd.SetOut(&buf)
|
||||
rootCmd.SetErr(&buf)
|
||||
|
||||
// Setup: Set arguments
|
||||
rootCmd.SetArgs(append([]string{"query"}, tt.args...))
|
||||
|
||||
// Execute
|
||||
err := rootCmd.Execute()
|
||||
|
||||
// Assert: Error expectation
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("Execute() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
// Assert: Error message
|
||||
if tt.wantErr && tt.errContains != "" {
|
||||
errMsg := buf.String()
|
||||
if !strings.Contains(errMsg, tt.errContains) {
|
||||
t.Errorf("error message %q doesn't contain %q", errMsg, tt.errContains)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Pattern 2: Table-Driven Test Pattern (Flag Parsing)
|
||||
func TestQueryCmd_FlagParsing(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
expectedStatus string
|
||||
expectedLimit int
|
||||
expectedFormat string
|
||||
}{
|
||||
{
|
||||
name: "default flags",
|
||||
args: []string{"tools"},
|
||||
expectedStatus: "",
|
||||
expectedLimit: 0,
|
||||
expectedFormat: "jsonl",
|
||||
},
|
||||
{
|
||||
name: "status flag",
|
||||
args: []string{"tools", "--status", "error"},
|
||||
expectedStatus: "error",
|
||||
expectedLimit: 0,
|
||||
expectedFormat: "jsonl",
|
||||
},
|
||||
{
|
||||
name: "all flags",
|
||||
args: []string{"tools", "--status", "success", "--limit", "10", "--format", "tsv"},
|
||||
expectedStatus: "success",
|
||||
expectedLimit: 10,
|
||||
expectedFormat: "tsv",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cmd := newQueryCmd()
|
||||
cmd.SetArgs(tt.args)
|
||||
|
||||
// Parse flags without executing
|
||||
if err := cmd.ParseFlags(tt.args); err != nil {
|
||||
t.Fatalf("ParseFlags() error = %v", err)
|
||||
}
|
||||
|
||||
// Get flag values
|
||||
status, _ := cmd.Flags().GetString("status")
|
||||
limit, _ := cmd.Flags().GetInt("limit")
|
||||
format, _ := cmd.Flags().GetString("format")
|
||||
|
||||
// Assert
|
||||
if status != tt.expectedStatus {
|
||||
t.Errorf("status = %q, want %q", status, tt.expectedStatus)
|
||||
}
|
||||
|
||||
if limit != tt.expectedLimit {
|
||||
t.Errorf("limit = %d, want %d", limit, tt.expectedLimit)
|
||||
}
|
||||
|
||||
if format != tt.expectedFormat {
|
||||
t.Errorf("format = %q, want %q", format, tt.expectedFormat)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Time to write**: ~28 minutes
|
||||
**Coverage**: query.go 0% → 82%
|
||||
|
||||
---
|
||||
|
||||
## Example 3: Integration Test (Full Workflow)
|
||||
|
||||
### Test Code (integration_test.go)
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Pattern 3: Integration Test Pattern
|
||||
func TestIntegration_QueryToolsWorkflow(t *testing.T) {
|
||||
// Setup: Create temporary project directory
|
||||
tmpDir := t.TempDir()
|
||||
sessionFile := filepath.Join(tmpDir, ".claude", "logs", "session.jsonl")
|
||||
|
||||
// Setup: Create test session data
|
||||
if err := os.MkdirAll(filepath.Dir(sessionFile), 0755); err != nil {
|
||||
t.Fatalf("failed to create session dir: %v", err)
|
||||
}
|
||||
|
||||
testData := []string{
|
||||
`{"type":"tool_use","tool":"Read","file":"/test/file.go","timestamp":"2025-10-18T10:00:00Z"}`,
|
||||
`{"type":"tool_use","tool":"Edit","file":"/test/file.go","timestamp":"2025-10-18T10:01:00Z","status":"success"}`,
|
||||
`{"type":"tool_use","tool":"Bash","command":"go test","timestamp":"2025-10-18T10:02:00Z","status":"error"}`,
|
||||
}
|
||||
|
||||
if err := os.WriteFile(sessionFile, []byte(strings.Join(testData, "\n")), 0644); err != nil {
|
||||
t.Fatalf("failed to write session data: %v", err)
|
||||
}
|
||||
|
||||
// Setup: Create root command
|
||||
rootCmd := newRootCmd()
|
||||
rootCmd.AddCommand(newQueryCmd())
|
||||
|
||||
// Setup: Capture output
|
||||
var buf bytes.Buffer
|
||||
rootCmd.SetOut(&buf)
|
||||
|
||||
// Setup: Set arguments
|
||||
rootCmd.SetArgs([]string{
|
||||
"--project", tmpDir,
|
||||
"query", "tools",
|
||||
"--status", "error",
|
||||
})
|
||||
|
||||
// Execute
|
||||
err := rootCmd.Execute()
|
||||
|
||||
// Assert: No error
|
||||
if err != nil {
|
||||
t.Fatalf("Execute() error = %v", err)
|
||||
}
|
||||
|
||||
// Assert: Parse output
|
||||
output := buf.String()
|
||||
lines := strings.Split(strings.TrimSpace(output), "\n")
|
||||
|
||||
if len(lines) != 1 {
|
||||
t.Errorf("expected 1 result, got %d", len(lines))
|
||||
}
|
||||
|
||||
// Assert: Verify result content
|
||||
var result map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(lines[0]), &result); err != nil {
|
||||
t.Fatalf("failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
if result["tool"] != "Bash" {
|
||||
t.Errorf("tool = %v, want Bash", result["tool"])
|
||||
}
|
||||
|
||||
if result["status"] != "error" {
|
||||
t.Errorf("status = %v, want error", result["status"])
|
||||
}
|
||||
}
|
||||
|
||||
// Pattern 3: Integration Test Pattern (Multiple Commands)
|
||||
func TestIntegration_MultiCommandWorkflow(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Test scenario: Query tools, then get stats, then analyze
|
||||
tests := []struct {
|
||||
name string
|
||||
command []string
|
||||
validate func(t *testing.T, output string)
|
||||
}{
|
||||
{
|
||||
name: "query tools",
|
||||
command: []string{"--project", tmpDir, "query", "tools"},
|
||||
validate: func(t *testing.T, output string) {
|
||||
if !strings.Contains(output, "tool") {
|
||||
t.Error("output doesn't contain tool data")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "get stats",
|
||||
command: []string{"--project", tmpDir, "stats"},
|
||||
validate: func(t *testing.T, output string) {
|
||||
if !strings.Contains(output, "total") {
|
||||
t.Error("output doesn't contain stats")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "version",
|
||||
command: []string{"version"},
|
||||
validate: func(t *testing.T, output string) {
|
||||
if !strings.Contains(output, "meta-cc") {
|
||||
t.Error("output doesn't contain version info")
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Setup command
|
||||
rootCmd := newRootCmd()
|
||||
rootCmd.AddCommand(newQueryCmd())
|
||||
rootCmd.AddCommand(newStatsCmd())
|
||||
rootCmd.AddCommand(newVersionCmd())
|
||||
|
||||
var buf bytes.Buffer
|
||||
rootCmd.SetOut(&buf)
|
||||
rootCmd.SetArgs(tt.command)
|
||||
|
||||
// Execute
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
t.Fatalf("Execute() error = %v", err)
|
||||
}
|
||||
|
||||
// Validate
|
||||
tt.validate(t, buf.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Time to write**: ~35 minutes
|
||||
**Coverage**: Adds +5% to overall coverage through end-to-end paths
|
||||
|
||||
---
|
||||
|
||||
## Key Testing Patterns for CLI
|
||||
|
||||
### 1. Flag Parsing Tests
|
||||
|
||||
**Goal**: Verify flags are parsed correctly
|
||||
|
||||
```go
|
||||
func TestCmd_FlagParsing(t *testing.T) {
|
||||
cmd := newCmd()
|
||||
cmd.SetArgs([]string{"--flag", "value"})
|
||||
cmd.ParseFlags(cmd.Args())
|
||||
|
||||
flagValue, _ := cmd.Flags().GetString("flag")
|
||||
if flagValue != "value" {
|
||||
t.Errorf("flag = %q, want %q", flagValue, "value")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Command Execution Tests
|
||||
|
||||
**Goal**: Verify command logic executes correctly
|
||||
|
||||
```go
|
||||
func TestCmd_Execute(t *testing.T) {
|
||||
cmd := newCmd()
|
||||
var buf bytes.Buffer
|
||||
cmd.SetOut(&buf)
|
||||
cmd.SetArgs([]string{"arg1", "arg2"})
|
||||
|
||||
err := cmd.Execute()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Execute() error = %v", err)
|
||||
}
|
||||
|
||||
if !strings.Contains(buf.String(), "expected") {
|
||||
t.Error("output doesn't contain expected result")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Error Handling Tests
|
||||
|
||||
**Goal**: Verify error conditions are handled properly
|
||||
|
||||
```go
|
||||
func TestCmd_ErrorCases(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
wantErr bool
|
||||
errContains string
|
||||
}{
|
||||
{"no args", []string{}, true, "requires"},
|
||||
{"invalid flag", []string{"--invalid"}, true, "unknown flag"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cmd := newCmd()
|
||||
cmd.SetArgs(tt.args)
|
||||
|
||||
err := cmd.Execute()
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Testing Checklist for CLI Commands
|
||||
|
||||
- [ ] **Help Text**: Verify `--help` output is correct
|
||||
- [ ] **Flag Parsing**: All flags parse correctly (long and short forms)
|
||||
- [ ] **Default Values**: Flags use correct defaults when not specified
|
||||
- [ ] **Required Args**: Commands reject missing required arguments
|
||||
- [ ] **Error Messages**: Error messages are clear and helpful
|
||||
- [ ] **Output Format**: Output is formatted correctly
|
||||
- [ ] **Exit Codes**: Commands return appropriate exit codes
|
||||
- [ ] **Global Flags**: Global flags work with all subcommands
|
||||
- [ ] **Flag Interactions**: Conflicting flags handled correctly
|
||||
- [ ] **Integration**: End-to-end workflows function properly
|
||||
|
||||
---
|
||||
|
||||
## Common CLI Testing Challenges
|
||||
|
||||
### Challenge 1: Global State
|
||||
|
||||
**Problem**: Global variables (flags) persist between tests
|
||||
|
||||
**Solution**: Reset globals in each test
|
||||
|
||||
```go
|
||||
func resetGlobalFlags() {
|
||||
projectPath = getCwd()
|
||||
sessionID = ""
|
||||
verbose = false
|
||||
}
|
||||
|
||||
func TestCmd(t *testing.T) {
|
||||
resetGlobalFlags() // Reset before each test
|
||||
// ... test code
|
||||
}
|
||||
```
|
||||
|
||||
### Challenge 2: Output Capture
|
||||
|
||||
**Problem**: Commands write to stdout/stderr
|
||||
|
||||
**Solution**: Use `SetOut()` and `SetErr()`
|
||||
|
||||
```go
|
||||
var buf bytes.Buffer
|
||||
cmd.SetOut(&buf)
|
||||
cmd.SetErr(&buf)
|
||||
cmd.Execute()
|
||||
output := buf.String()
|
||||
```
|
||||
|
||||
### Challenge 3: File I/O
|
||||
|
||||
**Problem**: Commands read/write files
|
||||
|
||||
**Solution**: Use `t.TempDir()` for isolated test directories
|
||||
|
||||
```go
|
||||
func TestCmd(t *testing.T) {
|
||||
tmpDir := t.TempDir() // Automatically cleaned up
|
||||
// ... use tmpDir for test files
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Results
|
||||
|
||||
### Coverage Achieved
|
||||
|
||||
```
|
||||
Package: cmd/meta-cc
|
||||
Before: 55.2%
|
||||
After: 72.8%
|
||||
Improvement: +17.6%
|
||||
|
||||
Test Functions: 8
|
||||
Test Cases: 24
|
||||
Time Investment: ~180 minutes
|
||||
```
|
||||
|
||||
### Efficiency Metrics
|
||||
|
||||
```
|
||||
Average time per test: 22.5 minutes
|
||||
Average time per test case: 7.5 minutes
|
||||
Coverage gain per hour: ~6%
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**Source**: Bootstrap-002 Test Strategy Development
|
||||
**Framework**: BAIME (Bootstrapped AI Methodology Engineering)
|
||||
**Status**: Production-ready, validated through 4 iterations
|
||||
735
skills/testing-strategy/examples/fixture-examples.md
Normal file
735
skills/testing-strategy/examples/fixture-examples.md
Normal file
@@ -0,0 +1,735 @@
|
||||
# Test Fixture Examples
|
||||
|
||||
**Version**: 2.0
|
||||
**Source**: Bootstrap-002 Test Strategy Development
|
||||
**Last Updated**: 2025-10-18
|
||||
|
||||
This document provides examples of test fixtures, test helpers, and test data management for Go testing.
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
**Test Fixtures**: Reusable test data and setup code that can be shared across multiple tests.
|
||||
|
||||
**Benefits**:
|
||||
- Reduce duplication
|
||||
- Improve maintainability
|
||||
- Standardize test data
|
||||
- Speed up test writing
|
||||
|
||||
---
|
||||
|
||||
## Example 1: Simple Test Helper Functions
|
||||
|
||||
### Pattern 5: Test Helper Pattern
|
||||
|
||||
```go
|
||||
package parser
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Test helper: Create test input
|
||||
func createTestInput(t *testing.T, content string) *Input {
|
||||
t.Helper() // Mark as helper for better error reporting
|
||||
|
||||
return &Input{
|
||||
Content: content,
|
||||
Timestamp: "2025-10-18T10:00:00Z",
|
||||
Type: "tool_use",
|
||||
}
|
||||
}
|
||||
|
||||
// Test helper: Create test file
|
||||
func createTestFile(t *testing.T, name, content string) string {
|
||||
t.Helper()
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
filePath := filepath.Join(tmpDir, name)
|
||||
|
||||
if err := os.WriteFile(filePath, []byte(content), 0644); err != nil {
|
||||
t.Fatalf("failed to create test file: %v", err)
|
||||
}
|
||||
|
||||
return filePath
|
||||
}
|
||||
|
||||
// Test helper: Load fixture
|
||||
func loadFixture(t *testing.T, name string) []byte {
|
||||
t.Helper()
|
||||
|
||||
data, err := os.ReadFile(filepath.Join("testdata", name))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to load fixture %s: %v", name, err)
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// Usage in tests
|
||||
func TestParseInput(t *testing.T) {
|
||||
input := createTestInput(t, "test content")
|
||||
result, err := ParseInput(input)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("ParseInput() error = %v", err)
|
||||
}
|
||||
|
||||
if result.Type != "tool_use" {
|
||||
t.Errorf("Type = %v, want tool_use", result.Type)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Benefits**:
|
||||
- No duplication of test setup
|
||||
- `t.Helper()` makes errors point to test code, not helper
|
||||
- Consistent test data across tests
|
||||
|
||||
---
|
||||
|
||||
## Example 2: Fixture Files in testdata/
|
||||
|
||||
### Directory Structure
|
||||
|
||||
```
|
||||
internal/parser/
|
||||
├── parser.go
|
||||
├── parser_test.go
|
||||
└── testdata/
|
||||
├── valid_session.jsonl
|
||||
├── invalid_session.jsonl
|
||||
├── empty_session.jsonl
|
||||
├── large_session.jsonl
|
||||
└── README.md
|
||||
```
|
||||
|
||||
### Fixture Files
|
||||
|
||||
**testdata/valid_session.jsonl**:
|
||||
```jsonl
|
||||
{"type":"tool_use","tool":"Read","file":"/test/file.go","timestamp":"2025-10-18T10:00:00Z"}
|
||||
{"type":"tool_use","tool":"Edit","file":"/test/file.go","timestamp":"2025-10-18T10:01:00Z","status":"success"}
|
||||
{"type":"tool_use","tool":"Bash","command":"go test","timestamp":"2025-10-18T10:02:00Z","status":"success"}
|
||||
```
|
||||
|
||||
**testdata/invalid_session.jsonl**:
|
||||
```jsonl
|
||||
{"type":"tool_use","tool":"Read","file":"/test/file.go","timestamp":"2025-10-18T10:00:00Z"}
|
||||
invalid json line here
|
||||
{"type":"tool_use","tool":"Edit","file":"/test/file.go","timestamp":"2025-10-18T10:01:00Z"}
|
||||
```
|
||||
|
||||
### Using Fixtures in Tests
|
||||
|
||||
```go
|
||||
func TestParseSessionFile(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
fixture string
|
||||
wantErr bool
|
||||
expectedLen int
|
||||
}{
|
||||
{
|
||||
name: "valid session",
|
||||
fixture: "valid_session.jsonl",
|
||||
wantErr: false,
|
||||
expectedLen: 3,
|
||||
},
|
||||
{
|
||||
name: "invalid session",
|
||||
fixture: "invalid_session.jsonl",
|
||||
wantErr: true,
|
||||
expectedLen: 0,
|
||||
},
|
||||
{
|
||||
name: "empty session",
|
||||
fixture: "empty_session.jsonl",
|
||||
wantErr: false,
|
||||
expectedLen: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
data := loadFixture(t, tt.fixture)
|
||||
|
||||
events, err := ParseSessionData(data)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ParseSessionData() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
if !tt.wantErr && len(events) != tt.expectedLen {
|
||||
t.Errorf("got %d events, want %d", len(events), tt.expectedLen)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Example 3: Builder Pattern for Test Data
|
||||
|
||||
### Test Data Builder
|
||||
|
||||
```go
|
||||
package query
|
||||
|
||||
import "testing"
|
||||
|
||||
// Builder for complex test data
|
||||
type TestQueryBuilder struct {
|
||||
query *Query
|
||||
}
|
||||
|
||||
func NewTestQuery() *TestQueryBuilder {
|
||||
return &TestQueryBuilder{
|
||||
query: &Query{
|
||||
Type: "tools",
|
||||
Filters: []Filter{},
|
||||
Options: Options{
|
||||
Limit: 0,
|
||||
Format: "jsonl",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (b *TestQueryBuilder) WithType(queryType string) *TestQueryBuilder {
|
||||
b.query.Type = queryType
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *TestQueryBuilder) WithFilter(field, op, value string) *TestQueryBuilder {
|
||||
b.query.Filters = append(b.query.Filters, Filter{
|
||||
Field: field,
|
||||
Operator: op,
|
||||
Value: value,
|
||||
})
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *TestQueryBuilder) WithLimit(limit int) *TestQueryBuilder {
|
||||
b.query.Options.Limit = limit
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *TestQueryBuilder) WithFormat(format string) *TestQueryBuilder {
|
||||
b.query.Options.Format = format
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *TestQueryBuilder) Build() *Query {
|
||||
return b.query
|
||||
}
|
||||
|
||||
// Usage in tests
|
||||
func TestExecuteQuery(t *testing.T) {
|
||||
// Simple query
|
||||
query1 := NewTestQuery().
|
||||
WithType("tools").
|
||||
Build()
|
||||
|
||||
// Complex query
|
||||
query2 := NewTestQuery().
|
||||
WithType("messages").
|
||||
WithFilter("status", "=", "error").
|
||||
WithFilter("timestamp", ">=", "2025-10-01").
|
||||
WithLimit(10).
|
||||
WithFormat("tsv").
|
||||
Build()
|
||||
|
||||
result, err := ExecuteQuery(query2)
|
||||
// ... assertions
|
||||
}
|
||||
```
|
||||
|
||||
**Benefits**:
|
||||
- Fluent API for test data construction
|
||||
- Easy to create variations
|
||||
- Self-documenting test setup
|
||||
|
||||
---
|
||||
|
||||
## Example 4: Golden File Testing
|
||||
|
||||
### Pattern: Golden File Output Validation
|
||||
|
||||
```go
|
||||
package formatter
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var update = flag.Bool("update", false, "update golden files")
|
||||
|
||||
func TestFormatOutput(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input []Event
|
||||
}{
|
||||
{
|
||||
name: "simple_output",
|
||||
input: []Event{
|
||||
{Type: "Read", File: "file.go"},
|
||||
{Type: "Edit", File: "file.go"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "complex_output",
|
||||
input: []Event{
|
||||
{Type: "Read", File: "file1.go"},
|
||||
{Type: "Edit", File: "file1.go"},
|
||||
{Type: "Bash", Command: "go test"},
|
||||
{Type: "Read", File: "file2.go"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Format output
|
||||
output := FormatOutput(tt.input)
|
||||
|
||||
// Golden file path
|
||||
goldenPath := filepath.Join("testdata", tt.name+".golden")
|
||||
|
||||
// Update golden file if flag set
|
||||
if *update {
|
||||
if err := os.WriteFile(goldenPath, []byte(output), 0644); err != nil {
|
||||
t.Fatalf("failed to update golden file: %v", err)
|
||||
}
|
||||
t.Logf("updated golden file: %s", goldenPath)
|
||||
return
|
||||
}
|
||||
|
||||
// Load expected output
|
||||
expected, err := os.ReadFile(goldenPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read golden file: %v", err)
|
||||
}
|
||||
|
||||
// Compare
|
||||
if output != string(expected) {
|
||||
t.Errorf("output mismatch:\n=== GOT ===\n%s\n=== WANT ===\n%s", output, expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Usage**:
|
||||
```bash
|
||||
# Run tests normally (compares against golden files)
|
||||
go test ./...
|
||||
|
||||
# Update golden files
|
||||
go test ./... -update
|
||||
|
||||
# Review changes
|
||||
git diff testdata/
|
||||
```
|
||||
|
||||
**Benefits**:
|
||||
- Easy to maintain expected outputs
|
||||
- Visual diff of changes
|
||||
- Great for complex string outputs
|
||||
|
||||
---
|
||||
|
||||
## Example 5: Table-Driven Fixtures
|
||||
|
||||
### Shared Test Data for Multiple Tests
|
||||
|
||||
```go
|
||||
package analyzer
|
||||
|
||||
import "testing"
|
||||
|
||||
// Shared test fixtures
|
||||
var testEvents = []struct {
|
||||
name string
|
||||
events []Event
|
||||
}{
|
||||
{
|
||||
name: "tdd_pattern",
|
||||
events: []Event{
|
||||
{Type: "Write", File: "file_test.go"},
|
||||
{Type: "Bash", Command: "go test"},
|
||||
{Type: "Edit", File: "file.go"},
|
||||
{Type: "Bash", Command: "go test"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "refactor_pattern",
|
||||
events: []Event{
|
||||
{Type: "Read", File: "old.go"},
|
||||
{Type: "Write", File: "new.go"},
|
||||
{Type: "Edit", File: "new.go"},
|
||||
{Type: "Bash", Command: "go test"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Test 1 uses fixtures
|
||||
func TestDetectPatterns(t *testing.T) {
|
||||
for _, fixture := range testEvents {
|
||||
t.Run(fixture.name, func(t *testing.T) {
|
||||
patterns := DetectPatterns(fixture.events)
|
||||
|
||||
if len(patterns) == 0 {
|
||||
t.Error("no patterns detected")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test 2 uses same fixtures
|
||||
func TestAnalyzeWorkflow(t *testing.T) {
|
||||
for _, fixture := range testEvents {
|
||||
t.Run(fixture.name, func(t *testing.T) {
|
||||
workflow := AnalyzeWorkflow(fixture.events)
|
||||
|
||||
if workflow.Type == "" {
|
||||
t.Error("workflow type not detected")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Benefits**:
|
||||
- Fixtures shared across multiple test functions
|
||||
- Consistent test data
|
||||
- Easy to add new fixtures for all tests
|
||||
|
||||
---
|
||||
|
||||
## Example 6: Mock Data Generators
|
||||
|
||||
### Random Test Data Generation
|
||||
|
||||
```go
|
||||
package parser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Generate random test events
|
||||
func generateTestEvents(t *testing.T, count int) []Event {
|
||||
t.Helper()
|
||||
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
tools := []string{"Read", "Edit", "Write", "Bash", "Grep"}
|
||||
statuses := []string{"success", "error"}
|
||||
|
||||
events := make([]Event, count)
|
||||
for i := 0; i < count; i++ {
|
||||
events[i] = Event{
|
||||
Type: "tool_use",
|
||||
Tool: tools[rand.Intn(len(tools))],
|
||||
File: fmt.Sprintf("/test/file%d.go", rand.Intn(10)),
|
||||
Status: statuses[rand.Intn(len(statuses))],
|
||||
Timestamp: time.Now().Add(time.Duration(i) * time.Second).Format(time.RFC3339),
|
||||
}
|
||||
}
|
||||
|
||||
return events
|
||||
}
|
||||
|
||||
// Usage in tests
|
||||
func TestParseEvents_LargeDataset(t *testing.T) {
|
||||
events := generateTestEvents(t, 1000)
|
||||
|
||||
parsed, err := ParseEvents(events)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("ParseEvents() error = %v", err)
|
||||
}
|
||||
|
||||
if len(parsed) != 1000 {
|
||||
t.Errorf("got %d events, want 1000", len(parsed))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnalyzeEvents_Performance(t *testing.T) {
|
||||
events := generateTestEvents(t, 10000)
|
||||
|
||||
start := time.Now()
|
||||
AnalyzeEvents(events)
|
||||
duration := time.Since(start)
|
||||
|
||||
if duration > 1*time.Second {
|
||||
t.Errorf("analysis took %v, want <1s", duration)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**When to use**:
|
||||
- Performance testing
|
||||
- Stress testing
|
||||
- Property-based testing
|
||||
- Large dataset testing
|
||||
|
||||
---
|
||||
|
||||
## Example 7: Cleanup and Teardown
|
||||
|
||||
### Proper Resource Cleanup
|
||||
|
||||
```go
|
||||
func TestWithTempDirectory(t *testing.T) {
|
||||
// Using t.TempDir() (preferred)
|
||||
tmpDir := t.TempDir() // Automatically cleaned up
|
||||
|
||||
// Create test files
|
||||
testFile := filepath.Join(tmpDir, "test.txt")
|
||||
os.WriteFile(testFile, []byte("test"), 0644)
|
||||
|
||||
// Test code...
|
||||
// No manual cleanup needed
|
||||
}
|
||||
|
||||
func TestWithCleanup(t *testing.T) {
|
||||
// Using t.Cleanup() for custom cleanup
|
||||
oldValue := globalVar
|
||||
globalVar = "test"
|
||||
|
||||
t.Cleanup(func() {
|
||||
globalVar = oldValue
|
||||
})
|
||||
|
||||
// Test code...
|
||||
// globalVar will be restored automatically
|
||||
}
|
||||
|
||||
func TestWithDefer(t *testing.T) {
|
||||
// Using defer (also works)
|
||||
oldValue := globalVar
|
||||
defer func() { globalVar = oldValue }()
|
||||
|
||||
globalVar = "test"
|
||||
|
||||
// Test code...
|
||||
}
|
||||
|
||||
func TestMultipleCleanups(t *testing.T) {
|
||||
// Multiple cleanups execute in LIFO order
|
||||
t.Cleanup(func() {
|
||||
fmt.Println("cleanup 1")
|
||||
})
|
||||
|
||||
t.Cleanup(func() {
|
||||
fmt.Println("cleanup 2")
|
||||
})
|
||||
|
||||
// Test code...
|
||||
|
||||
// Output:
|
||||
// cleanup 2
|
||||
// cleanup 1
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Example 8: Integration Test Fixtures
|
||||
|
||||
### Complete Test Environment Setup
|
||||
|
||||
```go
|
||||
package integration
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Setup complete test environment
|
||||
func setupTestEnvironment(t *testing.T) *TestEnv {
|
||||
t.Helper()
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create directory structure
|
||||
dirs := []string{
|
||||
".claude/logs",
|
||||
".claude/tools",
|
||||
"src",
|
||||
"tests",
|
||||
}
|
||||
|
||||
for _, dir := range dirs {
|
||||
path := filepath.Join(tmpDir, dir)
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
t.Fatalf("failed to create dir %s: %v", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create test files
|
||||
sessionFile := filepath.Join(tmpDir, ".claude/logs/session.jsonl")
|
||||
testSessionData := `{"type":"tool_use","tool":"Read","file":"test.go"}
|
||||
{"type":"tool_use","tool":"Edit","file":"test.go"}
|
||||
{"type":"tool_use","tool":"Bash","command":"go test"}`
|
||||
|
||||
if err := os.WriteFile(sessionFile, []byte(testSessionData), 0644); err != nil {
|
||||
t.Fatalf("failed to create session file: %v", err)
|
||||
}
|
||||
|
||||
// Create config
|
||||
configFile := filepath.Join(tmpDir, ".claude/config.json")
|
||||
configData := `{"project":"test","version":"1.0.0"}`
|
||||
|
||||
if err := os.WriteFile(configFile, []byte(configData), 0644); err != nil {
|
||||
t.Fatalf("failed to create config: %v", err)
|
||||
}
|
||||
|
||||
return &TestEnv{
|
||||
RootDir: tmpDir,
|
||||
SessionFile: sessionFile,
|
||||
ConfigFile: configFile,
|
||||
}
|
||||
}
|
||||
|
||||
type TestEnv struct {
|
||||
RootDir string
|
||||
SessionFile string
|
||||
ConfigFile string
|
||||
}
|
||||
|
||||
// Usage in integration tests
|
||||
func TestIntegration_FullWorkflow(t *testing.T) {
|
||||
env := setupTestEnvironment(t)
|
||||
|
||||
// Run full workflow
|
||||
result, err := RunWorkflow(env.RootDir)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("RunWorkflow() error = %v", err)
|
||||
}
|
||||
|
||||
if result.EventsProcessed != 3 {
|
||||
t.Errorf("EventsProcessed = %d, want 3", result.EventsProcessed)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Best Practices for Fixtures
|
||||
|
||||
### 1. Use testdata/ Directory
|
||||
|
||||
```
|
||||
package/
|
||||
├── code.go
|
||||
├── code_test.go
|
||||
└── testdata/
|
||||
├── fixture1.json
|
||||
├── fixture2.json
|
||||
└── README.md # Document fixtures
|
||||
```
|
||||
|
||||
### 2. Name Fixtures Descriptively
|
||||
|
||||
```
|
||||
❌ data1.json, data2.json
|
||||
✅ valid_session.jsonl, invalid_session.jsonl, empty_session.jsonl
|
||||
```
|
||||
|
||||
### 3. Keep Fixtures Small
|
||||
|
||||
```go
|
||||
// Bad: 1000-line fixture
|
||||
data := loadFixture(t, "large_fixture.json")
|
||||
|
||||
// Good: Minimal fixture
|
||||
data := loadFixture(t, "minimal_valid.json")
|
||||
```
|
||||
|
||||
### 4. Document Fixtures
|
||||
|
||||
**testdata/README.md**:
|
||||
```markdown
|
||||
# Test Fixtures
|
||||
|
||||
## valid_session.jsonl
|
||||
Complete valid session with 3 tool uses (Read, Edit, Bash).
|
||||
|
||||
## invalid_session.jsonl
|
||||
Session with malformed JSON on line 2 (for error testing).
|
||||
|
||||
## empty_session.jsonl
|
||||
Empty file (for edge case testing).
|
||||
```
|
||||
|
||||
### 5. Use Helpers for Variations
|
||||
|
||||
```go
|
||||
func createTestEvent(t *testing.T, options ...func(*Event)) *Event {
|
||||
t.Helper()
|
||||
|
||||
event := &Event{
|
||||
Type: "tool_use",
|
||||
Tool: "Read",
|
||||
Status: "success",
|
||||
}
|
||||
|
||||
for _, opt := range options {
|
||||
opt(event)
|
||||
}
|
||||
|
||||
return event
|
||||
}
|
||||
|
||||
// Option functions
|
||||
func WithTool(tool string) func(*Event) {
|
||||
return func(e *Event) { e.Tool = tool }
|
||||
}
|
||||
|
||||
func WithStatus(status string) func(*Event) {
|
||||
return func(e *Event) { e.Status = status }
|
||||
}
|
||||
|
||||
// Usage
|
||||
event1 := createTestEvent(t) // Default
|
||||
event2 := createTestEvent(t, WithTool("Edit"))
|
||||
event3 := createTestEvent(t, WithTool("Bash"), WithStatus("error"))
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Fixture Efficiency Comparison
|
||||
|
||||
| Approach | Time to Create Test | Maintainability | Flexibility |
|
||||
|----------|---------------------|-----------------|-------------|
|
||||
| **Inline data** | Fast (2-3 min) | Low (duplicated) | High |
|
||||
| **Helper functions** | Medium (5 min) | High (reusable) | Very High |
|
||||
| **Fixture files** | Slow (10 min) | Very High (centralized) | Medium |
|
||||
| **Builder pattern** | Medium (8 min) | High (composable) | Very High |
|
||||
| **Golden files** | Fast (2 min) | Very High (visual diff) | Low |
|
||||
|
||||
**Recommendation**: Use fixture files for complex data, helpers for variations, inline for simple cases.
|
||||
|
||||
---
|
||||
|
||||
**Source**: Bootstrap-002 Test Strategy Development
|
||||
**Framework**: BAIME (Bootstrapped AI Methodology Engineering)
|
||||
**Status**: Production-ready, validated through 4 iterations
|
||||
621
skills/testing-strategy/examples/gap-closure-walkthrough.md
Normal file
621
skills/testing-strategy/examples/gap-closure-walkthrough.md
Normal file
@@ -0,0 +1,621 @@
|
||||
# Gap Closure Walkthrough: 60% → 80% Coverage
|
||||
|
||||
**Project**: meta-cc CLI tool
|
||||
**Starting Coverage**: 72.1%
|
||||
**Target Coverage**: 80%+
|
||||
**Duration**: 4 iterations (3-4 hours total)
|
||||
**Outcome**: 72.5% (+0.4% net, after adding new features)
|
||||
|
||||
This document provides a complete walkthrough of improving test coverage using the gap closure methodology.
|
||||
|
||||
---
|
||||
|
||||
## Iteration 0: Baseline
|
||||
|
||||
### Initial State
|
||||
|
||||
```bash
|
||||
$ go test -coverprofile=coverage.out ./...
|
||||
ok github.com/yaleh/meta-cc/cmd/meta-cc 0.234s coverage: 55.2% of statements
|
||||
ok github.com/yaleh/meta-cc/internal/analyzer 0.156s coverage: 68.7% of statements
|
||||
ok github.com/yaleh/meta-cc/internal/parser 0.098s coverage: 82.3% of statements
|
||||
ok github.com/yaleh/meta-cc/internal/query 0.145s coverage: 65.3% of statements
|
||||
total: (statements) 72.1%
|
||||
```
|
||||
|
||||
### Problems Identified
|
||||
|
||||
```
|
||||
Low Coverage Packages:
|
||||
1. cmd/meta-cc (55.2%) - CLI command handlers
|
||||
2. internal/query (65.3%) - Query executor and filters
|
||||
3. internal/analyzer (68.7%) - Pattern detection
|
||||
|
||||
Zero Coverage Functions (15 total):
|
||||
- cmd/meta-cc: 7 functions (flag parsing, command execution)
|
||||
- internal/query: 5 functions (filter validation, query execution)
|
||||
- internal/analyzer: 3 functions (pattern matching)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Iteration 1: Low-Hanging Fruit (CLI Commands)
|
||||
|
||||
### Goal
|
||||
|
||||
Improve cmd/meta-cc coverage from 55.2% to 70%+ by testing command handlers.
|
||||
|
||||
### Analysis
|
||||
|
||||
```bash
|
||||
$ go tool cover -func=coverage.out | grep "cmd/meta-cc" | grep "0.0%"
|
||||
|
||||
cmd/meta-cc/root.go:25: initGlobalFlags 0.0%
|
||||
cmd/meta-cc/root.go:42: Execute 0.0%
|
||||
cmd/meta-cc/query.go:15: newQueryCmd 0.0%
|
||||
cmd/meta-cc/query.go:45: executeQuery 0.0%
|
||||
cmd/meta-cc/stats.go:12: newStatsCmd 0.0%
|
||||
cmd/meta-cc/stats.go:28: executeStats 0.0%
|
||||
cmd/meta-cc/version.go:10: newVersionCmd 0.0%
|
||||
```
|
||||
|
||||
### Test Plan
|
||||
|
||||
```
|
||||
Session 1: CLI Command Testing
|
||||
Time Budget: 90 minutes
|
||||
|
||||
Tests:
|
||||
1. TestNewQueryCmd (CLI Command pattern) - 15 min
|
||||
2. TestExecuteQuery (Integration pattern) - 20 min
|
||||
3. TestNewStatsCmd (CLI Command pattern) - 15 min
|
||||
4. TestExecuteStats (Integration pattern) - 20 min
|
||||
5. TestNewVersionCmd (CLI Command pattern) - 10 min
|
||||
|
||||
Buffer: 10 minutes
|
||||
```
|
||||
|
||||
### Implementation
|
||||
|
||||
#### Test 1: TestNewQueryCmd
|
||||
|
||||
```bash
|
||||
$ ./scripts/generate-test.sh newQueryCmd --pattern cli-command \
|
||||
--package cmd/meta-cc --output cmd/meta-cc/query_test.go
|
||||
```
|
||||
|
||||
**Generated (with TODOs filled in)**:
|
||||
```go
|
||||
func TestNewQueryCmd(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
wantErr bool
|
||||
wantOutput string
|
||||
}{
|
||||
{
|
||||
name: "no args",
|
||||
args: []string{},
|
||||
wantErr: true,
|
||||
wantOutput: "requires a query type",
|
||||
},
|
||||
{
|
||||
name: "query tools",
|
||||
args: []string{"tools"},
|
||||
wantErr: false,
|
||||
wantOutput: "tool_name",
|
||||
},
|
||||
{
|
||||
name: "query with filter",
|
||||
args: []string{"tools", "--status", "error"},
|
||||
wantErr: false,
|
||||
wantOutput: "error",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Setup: Create command
|
||||
cmd := newQueryCmd()
|
||||
cmd.SetArgs(tt.args)
|
||||
|
||||
// Setup: Capture output
|
||||
var buf bytes.Buffer
|
||||
cmd.SetOut(&buf)
|
||||
cmd.SetErr(&buf)
|
||||
|
||||
// Execute
|
||||
err := cmd.Execute()
|
||||
|
||||
// Assert: Error expectation
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("Execute() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
|
||||
// Assert: Output contains expected string
|
||||
output := buf.String()
|
||||
if !strings.Contains(output, tt.wantOutput) {
|
||||
t.Errorf("output doesn't contain %q: %s", tt.wantOutput, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Time**: 18 minutes (vs 15 estimated)
|
||||
**Result**: PASS
|
||||
|
||||
#### Test 2-5: Similar Pattern
|
||||
|
||||
Tests 2-5 followed similar structure, each taking 12-22 minutes.
|
||||
|
||||
### Results
|
||||
|
||||
```bash
|
||||
$ go test ./cmd/meta-cc/... -v
|
||||
=== RUN TestNewQueryCmd
|
||||
=== RUN TestNewQueryCmd/no_args
|
||||
=== RUN TestNewQueryCmd/query_tools
|
||||
=== RUN TestNewQueryCmd/query_with_filter
|
||||
--- PASS: TestNewQueryCmd (0.12s)
|
||||
=== RUN TestExecuteQuery
|
||||
--- PASS: TestExecuteQuery (0.08s)
|
||||
=== RUN TestNewStatsCmd
|
||||
--- PASS: TestNewStatsCmd (0.05s)
|
||||
=== RUN TestExecuteStats
|
||||
--- PASS: TestExecuteStats (0.07s)
|
||||
=== RUN TestNewVersionCmd
|
||||
--- PASS: TestNewVersionCmd (0.02s)
|
||||
PASS
|
||||
ok github.com/yaleh/meta-cc/cmd/meta-cc 0.412s coverage: 72.8% of statements
|
||||
|
||||
$ go test -cover ./...
|
||||
total: (statements) 73.2%
|
||||
```
|
||||
|
||||
**Iteration 1 Summary**:
|
||||
- Time: 85 minutes (vs 90 estimated)
|
||||
- Coverage: 72.1% → 73.2% (+1.1%)
|
||||
- Package: cmd/meta-cc 55.2% → 72.8% (+17.6%)
|
||||
- Tests added: 5 test functions, 12 test cases
|
||||
|
||||
---
|
||||
|
||||
## Iteration 2: Error Handling (Query Validation)
|
||||
|
||||
### Goal
|
||||
|
||||
Improve internal/query coverage from 65.3% to 75%+ by testing validation functions.
|
||||
|
||||
### Analysis
|
||||
|
||||
```bash
|
||||
$ go tool cover -func=coverage.out | grep "internal/query" | awk '$NF+0 < 60.0'
|
||||
|
||||
internal/query/filters.go:18: ValidateFilter 0.0%
|
||||
internal/query/filters.go:42: ParseTimeRange 33.3%
|
||||
internal/query/executor.go:25: ValidateQuery 0.0%
|
||||
internal/query/executor.go:58: ExecuteQuery 45.2%
|
||||
```
|
||||
|
||||
### Test Plan
|
||||
|
||||
```
|
||||
Session 2: Query Validation Error Paths
|
||||
Time Budget: 75 minutes
|
||||
|
||||
Tests:
|
||||
1. TestValidateFilter (Error Path + Table-Driven) - 15 min
|
||||
2. TestParseTimeRange (Error Path + Table-Driven) - 15 min
|
||||
3. TestValidateQuery (Error Path + Table-Driven) - 15 min
|
||||
4. TestExecuteQuery edge cases - 20 min
|
||||
|
||||
Buffer: 10 minutes
|
||||
```
|
||||
|
||||
### Implementation
|
||||
|
||||
#### Test 1: TestValidateFilter
|
||||
|
||||
```bash
|
||||
$ ./scripts/generate-test.sh ValidateFilter --pattern error-path --scenarios 5
|
||||
```
|
||||
|
||||
```go
|
||||
func TestValidateFilter_ErrorCases(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
filter *Filter
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "nil filter",
|
||||
filter: nil,
|
||||
wantErr: true,
|
||||
errMsg: "filter cannot be nil",
|
||||
},
|
||||
{
|
||||
name: "empty field",
|
||||
filter: &Filter{Field: "", Value: "test"},
|
||||
wantErr: true,
|
||||
errMsg: "field cannot be empty",
|
||||
},
|
||||
{
|
||||
name: "invalid operator",
|
||||
filter: &Filter{Field: "status", Operator: "invalid", Value: "test"},
|
||||
wantErr: true,
|
||||
errMsg: "invalid operator",
|
||||
},
|
||||
{
|
||||
name: "invalid time format",
|
||||
filter: &Filter{Field: "timestamp", Operator: ">=", Value: "not-a-time"},
|
||||
wantErr: true,
|
||||
errMsg: "invalid time format",
|
||||
},
|
||||
{
|
||||
name: "valid filter",
|
||||
filter: &Filter{Field: "status", Operator: "=", Value: "error"},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := ValidateFilter(tt.filter)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ValidateFilter() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
if tt.wantErr && !strings.Contains(err.Error(), tt.errMsg) {
|
||||
t.Errorf("expected error containing '%s', got '%s'", tt.errMsg, err.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Time**: 14 minutes
|
||||
**Result**: PASS, 1 bug found (missing nil check)
|
||||
|
||||
#### Bug Found During Testing
|
||||
|
||||
The test revealed ValidateFilter didn't handle nil input. Fixed:
|
||||
|
||||
```go
|
||||
func ValidateFilter(filter *Filter) error {
|
||||
// BUG FIX: Add nil check
|
||||
if filter == nil {
|
||||
return fmt.Errorf("filter cannot be nil")
|
||||
}
|
||||
|
||||
if filter.Field == "" {
|
||||
return fmt.Errorf("field cannot be empty")
|
||||
}
|
||||
// ... rest of validation
|
||||
}
|
||||
```
|
||||
|
||||
This is a **value of TDD**: Test revealed bug before it caused production issues.
|
||||
|
||||
### Results
|
||||
|
||||
```bash
|
||||
$ go test ./internal/query/... -v
|
||||
=== RUN TestValidateFilter_ErrorCases
|
||||
--- PASS: TestValidateFilter_ErrorCases (0.00s)
|
||||
=== RUN TestParseTimeRange
|
||||
--- PASS: TestParseTimeRange (0.01s)
|
||||
=== RUN TestValidateQuery
|
||||
--- PASS: TestValidateQuery (0.00s)
|
||||
=== RUN TestExecuteQuery
|
||||
--- PASS: TestExecuteQuery (0.15s)
|
||||
PASS
|
||||
ok github.com/yaleh/meta-cc/internal/query 0.187s coverage: 78.3% of statements
|
||||
|
||||
$ go test -cover ./...
|
||||
total: (statements) 74.5%
|
||||
```
|
||||
|
||||
**Iteration 2 Summary**:
|
||||
- Time: 68 minutes (vs 75 estimated)
|
||||
- Coverage: 73.2% → 74.5% (+1.3%)
|
||||
- Package: internal/query 65.3% → 78.3% (+13.0%)
|
||||
- Tests added: 4 test functions, 15 test cases
|
||||
- **Bugs found: 1** (nil pointer issue)
|
||||
|
||||
---
|
||||
|
||||
## Iteration 3: Pattern Detection (Analyzer)
|
||||
|
||||
### Goal
|
||||
|
||||
Improve internal/analyzer coverage from 68.7% to 75%+.
|
||||
|
||||
### Analysis
|
||||
|
||||
```bash
|
||||
$ go tool cover -func=coverage.out | grep "internal/analyzer" | grep "0.0%"
|
||||
|
||||
internal/analyzer/patterns.go:20: DetectPatterns 0.0%
|
||||
internal/analyzer/patterns.go:45: MatchPattern 0.0%
|
||||
internal/analyzer/sequences.go:15: FindSequences 0.0%
|
||||
```
|
||||
|
||||
### Test Plan
|
||||
|
||||
```
|
||||
Session 3: Analyzer Pattern Detection
|
||||
Time Budget: 90 minutes
|
||||
|
||||
Tests:
|
||||
1. TestDetectPatterns (Table-Driven) - 20 min
|
||||
2. TestMatchPattern (Table-Driven) - 20 min
|
||||
3. TestFindSequences (Integration) - 25 min
|
||||
|
||||
Buffer: 25 minutes
|
||||
```
|
||||
|
||||
### Implementation
|
||||
|
||||
#### Test 1: TestDetectPatterns
|
||||
|
||||
```go
|
||||
func TestDetectPatterns(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
events []Event
|
||||
expected []Pattern
|
||||
}{
|
||||
{
|
||||
name: "empty events",
|
||||
events: []Event{},
|
||||
expected: []Pattern{},
|
||||
},
|
||||
{
|
||||
name: "single pattern",
|
||||
events: []Event{
|
||||
{Type: "Read", Target: "file.go"},
|
||||
{Type: "Edit", Target: "file.go"},
|
||||
{Type: "Bash", Command: "go test"},
|
||||
},
|
||||
expected: []Pattern{
|
||||
{Name: "TDD", Confidence: 0.8},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple patterns",
|
||||
events: []Event{
|
||||
{Type: "Read", Target: "file.go"},
|
||||
{Type: "Write", Target: "file_test.go"},
|
||||
{Type: "Bash", Command: "go test"},
|
||||
{Type: "Edit", Target: "file.go"},
|
||||
},
|
||||
expected: []Pattern{
|
||||
{Name: "TDD", Confidence: 0.9},
|
||||
{Name: "Test-First", Confidence: 0.85},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
patterns := DetectPatterns(tt.events)
|
||||
|
||||
if len(patterns) != len(tt.expected) {
|
||||
t.Errorf("got %d patterns, want %d", len(patterns), len(tt.expected))
|
||||
return
|
||||
}
|
||||
|
||||
for i, pattern := range patterns {
|
||||
if pattern.Name != tt.expected[i].Name {
|
||||
t.Errorf("pattern[%d].Name = %s, want %s",
|
||||
i, pattern.Name, tt.expected[i].Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Time**: 22 minutes
|
||||
**Result**: PASS
|
||||
|
||||
### Results
|
||||
|
||||
```bash
|
||||
$ go test ./internal/analyzer/... -v
|
||||
=== RUN TestDetectPatterns
|
||||
--- PASS: TestDetectPatterns (0.02s)
|
||||
=== RUN TestMatchPattern
|
||||
--- PASS: TestMatchPattern (0.01s)
|
||||
=== RUN TestFindSequences
|
||||
--- PASS: TestFindSequences (0.03s)
|
||||
PASS
|
||||
ok github.com/yaleh/meta-cc/internal/analyzer 0.078s coverage: 76.4% of statements
|
||||
|
||||
$ go test -cover ./...
|
||||
total: (statements) 75.8%
|
||||
```
|
||||
|
||||
**Iteration 3 Summary**:
|
||||
- Time: 78 minutes (vs 90 estimated)
|
||||
- Coverage: 74.5% → 75.8% (+1.3%)
|
||||
- Package: internal/analyzer 68.7% → 76.4% (+7.7%)
|
||||
- Tests added: 3 test functions, 8 test cases
|
||||
|
||||
---
|
||||
|
||||
## Iteration 4: Edge Cases and Integration
|
||||
|
||||
### Goal
|
||||
|
||||
Add edge cases and integration tests to push coverage above 76%.
|
||||
|
||||
### Analysis
|
||||
|
||||
Reviewed coverage HTML report to find branches not covered:
|
||||
|
||||
```bash
|
||||
$ go tool cover -html=coverage.out
|
||||
# Identified 8 uncovered branches across packages
|
||||
```
|
||||
|
||||
### Test Plan
|
||||
|
||||
```
|
||||
Session 4: Edge Cases and Integration
|
||||
Time Budget: 60 minutes
|
||||
|
||||
Add edge cases to existing tests:
|
||||
1. Nil pointer checks - 15 min
|
||||
2. Empty input cases - 15 min
|
||||
3. Integration test (full workflow) - 25 min
|
||||
|
||||
Buffer: 5 minutes
|
||||
```
|
||||
|
||||
### Implementation
|
||||
|
||||
Added edge cases to existing test functions:
|
||||
- Nil input handling
|
||||
- Empty collections
|
||||
- Boundary values
|
||||
- Concurrent access
|
||||
|
||||
### Results
|
||||
|
||||
```bash
|
||||
$ go test -cover ./...
|
||||
total: (statements) 76.2%
|
||||
```
|
||||
|
||||
However, new features were added during testing, which added uncovered code:
|
||||
|
||||
```bash
|
||||
$ git diff --stat HEAD~4
|
||||
cmd/meta-cc/analyze.go | 45 ++++++++++++++++++++
|
||||
internal/analyzer/confidence.go | 32 ++++++++++++++
|
||||
# ... 150 lines of new code added
|
||||
```
|
||||
|
||||
**Final coverage after accounting for new features**: 72.5%
|
||||
**(Net change: +0.4%, but would have been +4.1% without new features)**
|
||||
|
||||
**Iteration 4 Summary**:
|
||||
- Time: 58 minutes (vs 60 estimated)
|
||||
- Coverage: 75.8% → 76.2% → 72.5% (after new features)
|
||||
- Tests added: 12 new test cases (additions to existing tests)
|
||||
|
||||
---
|
||||
|
||||
## Overall Results
|
||||
|
||||
### Coverage Progression
|
||||
|
||||
```
|
||||
Iteration 0 (Baseline): 72.1%
|
||||
Iteration 1 (CLI): 73.2% (+1.1%)
|
||||
Iteration 2 (Validation): 74.5% (+1.3%)
|
||||
Iteration 3 (Analyzer): 75.8% (+1.3%)
|
||||
Iteration 4 (Edge Cases): 76.2% (+0.4%)
|
||||
After New Features: 72.5% (+0.4% net)
|
||||
```
|
||||
|
||||
### Time Investment
|
||||
|
||||
```
|
||||
Iteration 1: 85 min (CLI commands)
|
||||
Iteration 2: 68 min (validation error paths)
|
||||
Iteration 3: 78 min (pattern detection)
|
||||
Iteration 4: 58 min (edge cases)
|
||||
-----------
|
||||
Total: 289 min (4.8 hours)
|
||||
```
|
||||
|
||||
### Tests Added
|
||||
|
||||
```
|
||||
Test Functions: 12
|
||||
Test Cases: 47
|
||||
Lines of Test Code: ~850
|
||||
```
|
||||
|
||||
### Efficiency Metrics
|
||||
|
||||
```
|
||||
Time per test function: 24 min average
|
||||
Time per test case: 6.1 min average
|
||||
Coverage per hour: ~0.8%
|
||||
Tests per hour: ~10 test cases
|
||||
```
|
||||
|
||||
### Key Learnings
|
||||
|
||||
1. **CLI testing is high-impact**: +17.6% package coverage in 85 minutes
|
||||
2. **Error path testing finds bugs**: Found 1 nil pointer bug
|
||||
3. **Table-driven tests are efficient**: 6-7 scenarios in 12-15 minutes
|
||||
4. **Integration tests are slower**: 20-25 min but valuable for end-to-end validation
|
||||
5. **New features dilute coverage**: +150 LOC added → coverage dropped 3.7%
|
||||
|
||||
---
|
||||
|
||||
## Methodology Validation
|
||||
|
||||
### What Worked Well
|
||||
|
||||
✅ **Automation tools saved 30-40 min per session**
|
||||
- Coverage analyzer identified priorities instantly
|
||||
- Test generator provided scaffolds
|
||||
- Combined workflow was seamless
|
||||
|
||||
✅ **Pattern-based approach was consistent**
|
||||
- CLI Command pattern: 13-18 min per test
|
||||
- Error Path + Table-Driven: 14-16 min per test
|
||||
- Integration tests: 20-25 min per test
|
||||
|
||||
✅ **Incremental approach manageable**
|
||||
- 1-hour sessions were sustainable
|
||||
- Clear goals kept focus
|
||||
- Buffer time absorbed surprises
|
||||
|
||||
### What Could Improve
|
||||
|
||||
⚠️ **Coverage accounting for new features**
|
||||
- Need to track "gross coverage gain" vs "net coverage"
|
||||
- Should separate "coverage improvement" from "feature addition"
|
||||
|
||||
⚠️ **Integration test isolation**
|
||||
- Some integration tests were brittle
|
||||
- Need better test data fixtures
|
||||
|
||||
⚠️ **Time estimates**
|
||||
- CLI tests: actual 18 min vs estimated 15 min (+20%)
|
||||
- Should adjust estimates for "filling in TODOs"
|
||||
|
||||
---
|
||||
|
||||
## Recommendations
|
||||
|
||||
### For Similar Projects
|
||||
|
||||
1. **Start with CLI handlers**: High visibility, high impact
|
||||
2. **Focus on error paths early**: Find bugs, high ROI
|
||||
3. **Use table-driven tests**: 3-5 scenarios in one test function
|
||||
4. **Track gross vs net coverage**: Account for new feature additions
|
||||
5. **1-hour sessions**: Sustainable, maintains focus
|
||||
|
||||
### For Mature Projects (>75% coverage)
|
||||
|
||||
1. **Focus on edge cases**: Diminishing returns on new functions
|
||||
2. **Add integration tests**: End-to-end validation
|
||||
3. **Don't chase 100%**: 80-85% is healthy target
|
||||
4. **Refactor hard-to-test code**: If <50% coverage, consider refactor
|
||||
|
||||
---
|
||||
|
||||
**Source**: Bootstrap-002 Test Strategy Development (Real Experiment Data)
|
||||
**Framework**: BAIME (Bootstrapped AI Methodology Engineering)
|
||||
**Status**: Complete, validated through 4 iterations
|
||||
Reference in New Issue
Block a user