Initial commit
This commit is contained in:
657
skills/go-concurrency/SKILL.md
Normal file
657
skills/go-concurrency/SKILL.md
Normal file
@@ -0,0 +1,657 @@
|
||||
---
|
||||
name: go-concurrency
|
||||
description: Advanced concurrency patterns with goroutines, channels, context, and synchronization primitives. Use when working with concurrent Go code, implementing parallel processing, or debugging race conditions.
|
||||
---
|
||||
|
||||
# Go Concurrency Skill
|
||||
|
||||
This skill provides expert guidance on Go's concurrency primitives and patterns, covering goroutines, channels, synchronization, and best practices for building concurrent systems.
|
||||
|
||||
## When to Use
|
||||
|
||||
Activate this skill when:
|
||||
- Implementing concurrent/parallel processing
|
||||
- Working with goroutines and channels
|
||||
- Using synchronization primitives (mutexes, wait groups, etc.)
|
||||
- Debugging race conditions
|
||||
- Optimizing concurrent performance
|
||||
- Implementing worker pools or pipelines
|
||||
- Handling context cancellation
|
||||
|
||||
## Goroutine Fundamentals
|
||||
|
||||
### Basic Goroutines
|
||||
|
||||
```go
|
||||
// Simple goroutine
|
||||
go func() {
|
||||
fmt.Println("Hello from goroutine")
|
||||
}()
|
||||
|
||||
// Goroutine with parameters
|
||||
go func(msg string) {
|
||||
fmt.Println(msg)
|
||||
}("Hello")
|
||||
|
||||
// Goroutine with closure
|
||||
message := "Hello"
|
||||
go func() {
|
||||
fmt.Println(message) // Captures message
|
||||
}()
|
||||
```
|
||||
|
||||
### Common Pitfalls
|
||||
|
||||
```go
|
||||
// ❌ BAD: Loop variable capture
|
||||
for i := 0; i < 5; i++ {
|
||||
go func() {
|
||||
fmt.Println(i) // All goroutines may print 5
|
||||
}()
|
||||
}
|
||||
|
||||
// ✅ GOOD: Pass as parameter
|
||||
for i := 0; i < 5; i++ {
|
||||
go func(n int) {
|
||||
fmt.Println(n) // Each prints correct value
|
||||
}(i)
|
||||
}
|
||||
|
||||
// ✅ GOOD: Create local copy
|
||||
for i := 0; i < 5; i++ {
|
||||
i := i // Create new variable
|
||||
go func() {
|
||||
fmt.Println(i)
|
||||
}()
|
||||
}
|
||||
```
|
||||
|
||||
## Channel Patterns
|
||||
|
||||
### Channel Types
|
||||
|
||||
```go
|
||||
// Unbuffered channel (synchronous)
|
||||
ch := make(chan int)
|
||||
|
||||
// Buffered channel (asynchronous up to buffer size)
|
||||
ch := make(chan int, 10)
|
||||
|
||||
// Send-only channel
|
||||
func send(ch chan<- int) {
|
||||
ch <- 42
|
||||
}
|
||||
|
||||
// Receive-only channel
|
||||
func receive(ch <-chan int) {
|
||||
value := <-ch
|
||||
}
|
||||
|
||||
// Bidirectional channel
|
||||
ch := make(chan int)
|
||||
```
|
||||
|
||||
### Channel Operations
|
||||
|
||||
```go
|
||||
// Send
|
||||
ch <- value
|
||||
|
||||
// Receive
|
||||
value := <-ch
|
||||
|
||||
// Receive with ok check
|
||||
value, ok := <-ch
|
||||
if !ok {
|
||||
// Channel closed
|
||||
}
|
||||
|
||||
// Close channel
|
||||
close(ch)
|
||||
|
||||
// Range over channel
|
||||
for value := range ch {
|
||||
fmt.Println(value)
|
||||
}
|
||||
```
|
||||
|
||||
### Select Statement
|
||||
|
||||
```go
|
||||
// Wait for first available operation
|
||||
select {
|
||||
case msg1 := <-ch1:
|
||||
fmt.Println("Received from ch1:", msg1)
|
||||
case msg2 := <-ch2:
|
||||
fmt.Println("Received from ch2:", msg2)
|
||||
case ch3 <- value:
|
||||
fmt.Println("Sent to ch3")
|
||||
default:
|
||||
fmt.Println("No channels ready")
|
||||
}
|
||||
|
||||
// Timeout pattern
|
||||
select {
|
||||
case result := <-ch:
|
||||
return result, nil
|
||||
case <-time.After(5 * time.Second):
|
||||
return nil, errors.New("timeout")
|
||||
}
|
||||
|
||||
// Context cancellation
|
||||
select {
|
||||
case result := <-ch:
|
||||
return result, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
```
|
||||
|
||||
## Synchronization Primitives
|
||||
|
||||
### Mutex
|
||||
|
||||
```go
|
||||
type SafeCounter struct {
|
||||
mu sync.Mutex
|
||||
count int
|
||||
}
|
||||
|
||||
func (c *SafeCounter) Increment() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.count++
|
||||
}
|
||||
|
||||
func (c *SafeCounter) Value() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.count
|
||||
}
|
||||
```
|
||||
|
||||
### RWMutex
|
||||
|
||||
```go
|
||||
type Cache struct {
|
||||
mu sync.RWMutex
|
||||
items map[string]interface{}
|
||||
}
|
||||
|
||||
func (c *Cache) Get(key string) (interface{}, bool) {
|
||||
c.mu.RLock() // Multiple readers allowed
|
||||
defer c.mu.RUnlock()
|
||||
value, ok := c.items[key]
|
||||
return value, ok
|
||||
}
|
||||
|
||||
func (c *Cache) Set(key string, value interface{}) {
|
||||
c.mu.Lock() // Exclusive write access
|
||||
defer c.mu.Unlock()
|
||||
c.items[key] = value
|
||||
}
|
||||
```
|
||||
|
||||
### WaitGroup
|
||||
|
||||
```go
|
||||
func processItems(items []Item) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, item := range items {
|
||||
wg.Add(1)
|
||||
go func(item Item) {
|
||||
defer wg.Done()
|
||||
process(item)
|
||||
}(item)
|
||||
}
|
||||
|
||||
wg.Wait() // Wait for all goroutines
|
||||
}
|
||||
```
|
||||
|
||||
### Once
|
||||
|
||||
```go
|
||||
type Database struct {
|
||||
instance *sql.DB
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
func (d *Database) GetConnection() *sql.DB {
|
||||
d.once.Do(func() {
|
||||
d.instance, _ = sql.Open("postgres", "connection-string")
|
||||
})
|
||||
return d.instance
|
||||
}
|
||||
```
|
||||
|
||||
## Concurrency Patterns
|
||||
|
||||
### Worker Pool
|
||||
|
||||
```go
|
||||
type WorkerPool struct {
|
||||
workerCount int
|
||||
jobs chan Job
|
||||
results chan Result
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
type Job struct {
|
||||
ID int
|
||||
Data interface{}
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
JobID int
|
||||
Value interface{}
|
||||
Error error
|
||||
}
|
||||
|
||||
func NewWorkerPool(workerCount int) *WorkerPool {
|
||||
return &WorkerPool{
|
||||
workerCount: workerCount,
|
||||
jobs: make(chan Job, 100),
|
||||
results: make(chan Result, 100),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *WorkerPool) Start(ctx context.Context) {
|
||||
for i := 0; i < p.workerCount; i++ {
|
||||
p.wg.Add(1)
|
||||
go p.worker(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *WorkerPool) worker(ctx context.Context) {
|
||||
defer p.wg.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case job, ok := <-p.jobs:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
result := processJob(job)
|
||||
select {
|
||||
case p.results <- result:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *WorkerPool) Submit(job Job) {
|
||||
p.jobs <- job
|
||||
}
|
||||
|
||||
func (p *WorkerPool) Results() <-chan Result {
|
||||
return p.results
|
||||
}
|
||||
|
||||
func (p *WorkerPool) Close() {
|
||||
close(p.jobs)
|
||||
p.wg.Wait()
|
||||
close(p.results)
|
||||
}
|
||||
|
||||
// Usage
|
||||
ctx := context.Background()
|
||||
pool := NewWorkerPool(10)
|
||||
pool.Start(ctx)
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
pool.Submit(Job{ID: i, Data: fmt.Sprintf("job-%d", i)})
|
||||
}
|
||||
|
||||
go func() {
|
||||
for result := range pool.Results() {
|
||||
if result.Error != nil {
|
||||
log.Printf("Job %d failed: %v", result.JobID, result.Error)
|
||||
} else {
|
||||
log.Printf("Job %d completed: %v", result.JobID, result.Value)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
pool.Close()
|
||||
```
|
||||
|
||||
### Pipeline Pattern
|
||||
|
||||
```go
|
||||
// Generator stage
|
||||
func generator(ctx context.Context, nums ...int) <-chan int {
|
||||
out := make(chan int)
|
||||
go func() {
|
||||
defer close(out)
|
||||
for _, n := range nums {
|
||||
select {
|
||||
case out <- n:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// Processing stage
|
||||
func square(ctx context.Context, in <-chan int) <-chan int {
|
||||
out := make(chan int)
|
||||
go func() {
|
||||
defer close(out)
|
||||
for n := range in {
|
||||
select {
|
||||
case out <- n * n:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// Another processing stage
|
||||
func double(ctx context.Context, in <-chan int) <-chan int {
|
||||
out := make(chan int)
|
||||
go func() {
|
||||
defer close(out)
|
||||
for n := range in {
|
||||
select {
|
||||
case out <- n * 2:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// Usage - compose pipeline
|
||||
ctx := context.Background()
|
||||
numbers := generator(ctx, 1, 2, 3, 4, 5)
|
||||
squared := square(ctx, numbers)
|
||||
doubled := double(ctx, squared)
|
||||
|
||||
for result := range doubled {
|
||||
fmt.Println(result)
|
||||
}
|
||||
```
|
||||
|
||||
### Fan-Out/Fan-In
|
||||
|
||||
```go
|
||||
// Fan-out: distribute work to multiple goroutines
|
||||
func fanOut(ctx context.Context, input <-chan int, workers int) []<-chan int {
|
||||
channels := make([]<-chan int, workers)
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
channels[i] = worker(ctx, input)
|
||||
}
|
||||
|
||||
return channels
|
||||
}
|
||||
|
||||
func worker(ctx context.Context, input <-chan int) <-chan int {
|
||||
output := make(chan int)
|
||||
go func() {
|
||||
defer close(output)
|
||||
for n := range input {
|
||||
select {
|
||||
case output <- expensiveOperation(n):
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return output
|
||||
}
|
||||
|
||||
// Fan-in: merge multiple channels into one
|
||||
func fanIn(ctx context.Context, channels ...<-chan int) <-chan int {
|
||||
var wg sync.WaitGroup
|
||||
output := make(chan int)
|
||||
|
||||
multiplex := func(ch <-chan int) {
|
||||
defer wg.Done()
|
||||
for n := range ch {
|
||||
select {
|
||||
case output <- n:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
wg.Add(len(channels))
|
||||
for _, ch := range channels {
|
||||
go multiplex(ch)
|
||||
}
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(output)
|
||||
}()
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
// Usage
|
||||
ctx := context.Background()
|
||||
input := generator(ctx, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
|
||||
|
||||
// Fan-out to 3 workers
|
||||
workers := fanOut(ctx, input, 3)
|
||||
|
||||
// Fan-in results
|
||||
results := fanIn(ctx, workers...)
|
||||
|
||||
for result := range results {
|
||||
fmt.Println(result)
|
||||
}
|
||||
```
|
||||
|
||||
### Semaphore Pattern
|
||||
|
||||
```go
|
||||
type Semaphore struct {
|
||||
sem chan struct{}
|
||||
}
|
||||
|
||||
func NewSemaphore(maxConcurrency int) *Semaphore {
|
||||
return &Semaphore{
|
||||
sem: make(chan struct{}, maxConcurrency),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Semaphore) Acquire() {
|
||||
s.sem <- struct{}{}
|
||||
}
|
||||
|
||||
func (s *Semaphore) Release() {
|
||||
<-s.sem
|
||||
}
|
||||
|
||||
// Usage
|
||||
sem := NewSemaphore(5) // Max 5 concurrent operations
|
||||
|
||||
for _, item := range items {
|
||||
sem.Acquire()
|
||||
go func(item Item) {
|
||||
defer sem.Release()
|
||||
process(item)
|
||||
}(item)
|
||||
}
|
||||
```
|
||||
|
||||
### Rate Limiting
|
||||
|
||||
```go
|
||||
// Token bucket rate limiter
|
||||
type RateLimiter struct {
|
||||
ticker *time.Ticker
|
||||
tokens chan struct{}
|
||||
}
|
||||
|
||||
func NewRateLimiter(rate time.Duration, burst int) *RateLimiter {
|
||||
rl := &RateLimiter{
|
||||
ticker: time.NewTicker(rate),
|
||||
tokens: make(chan struct{}, burst),
|
||||
}
|
||||
|
||||
// Fill bucket initially
|
||||
for i := 0; i < burst; i++ {
|
||||
rl.tokens <- struct{}{}
|
||||
}
|
||||
|
||||
// Refill tokens
|
||||
go func() {
|
||||
for range rl.ticker.C {
|
||||
select {
|
||||
case rl.tokens <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return rl
|
||||
}
|
||||
|
||||
func (rl *RateLimiter) Wait(ctx context.Context) error {
|
||||
select {
|
||||
case <-rl.tokens:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (rl *RateLimiter) Stop() {
|
||||
rl.ticker.Stop()
|
||||
}
|
||||
|
||||
// Usage
|
||||
limiter := NewRateLimiter(time.Second/10, 5) // 10 requests per second, burst of 5
|
||||
defer limiter.Stop()
|
||||
|
||||
for _, request := range requests {
|
||||
if err := limiter.Wait(ctx); err != nil {
|
||||
log.Printf("Rate limit error: %v", err)
|
||||
continue
|
||||
}
|
||||
processRequest(request)
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling in Concurrent Code
|
||||
|
||||
### errgroup Package
|
||||
|
||||
```go
|
||||
import "golang.org/x/sync/errgroup"
|
||||
|
||||
func fetchURLs(ctx context.Context, urls []string) error {
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
for _, url := range urls {
|
||||
url := url // Capture for goroutine
|
||||
g.Go(func() error {
|
||||
return fetchURL(ctx, url)
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for all goroutines, return first error
|
||||
return g.Wait()
|
||||
}
|
||||
|
||||
// With limited concurrency
|
||||
func fetchURLsLimited(ctx context.Context, urls []string) error {
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
g.SetLimit(10) // Max 10 concurrent
|
||||
|
||||
for _, url := range urls {
|
||||
url := url
|
||||
g.Go(func() error {
|
||||
return fetchURL(ctx, url)
|
||||
})
|
||||
}
|
||||
|
||||
return g.Wait()
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Always close channels from sender side**
|
||||
2. **Use context for cancellation and timeouts**
|
||||
3. **Avoid goroutine leaks - ensure they can exit**
|
||||
4. **Use buffered channels to avoid blocking**
|
||||
5. **Prefer sync.RWMutex for read-heavy workloads**
|
||||
6. **Don't use defer in hot loops**
|
||||
7. **Test with race detector: `go test -race`**
|
||||
8. **Use errgroup for error propagation**
|
||||
9. **Limit concurrent operations with worker pools**
|
||||
10. **Profile before optimizing**
|
||||
|
||||
## Race Condition Detection
|
||||
|
||||
```bash
|
||||
# Run tests with race detector
|
||||
go test -race ./...
|
||||
|
||||
# Run program with race detector
|
||||
go run -race main.go
|
||||
|
||||
# Build with race detector
|
||||
go build -race
|
||||
```
|
||||
|
||||
## Common Patterns to Avoid
|
||||
|
||||
```go
|
||||
// ❌ BAD: Unbounded goroutine creation
|
||||
for _, item := range millionItems {
|
||||
go process(item) // May create millions of goroutines
|
||||
}
|
||||
|
||||
// ✅ GOOD: Use worker pool
|
||||
pool := NewWorkerPool(100)
|
||||
for _, item := range millionItems {
|
||||
pool.Submit(item)
|
||||
}
|
||||
|
||||
// ❌ BAD: Goroutine leak
|
||||
func leak() <-chan int {
|
||||
ch := make(chan int)
|
||||
go func() {
|
||||
ch <- expensiveComputation() // If receiver never reads, goroutine leaks
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
// ✅ GOOD: Use context for cancellation
|
||||
func noLeak(ctx context.Context) <-chan int {
|
||||
ch := make(chan int)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
result := expensiveComputation()
|
||||
select {
|
||||
case ch <- result:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
Additional examples and patterns are available in:
|
||||
- `assets/examples/` - Complete concurrency examples
|
||||
- `assets/patterns/` - Common concurrency patterns
|
||||
- `references/` - Links to Go concurrency resources and papers
|
||||
654
skills/go-optimization/SKILL.md
Normal file
654
skills/go-optimization/SKILL.md
Normal file
@@ -0,0 +1,654 @@
|
||||
---
|
||||
name: go-optimization
|
||||
description: Performance optimization techniques including profiling, memory management, benchmarking, and runtime tuning. Use when optimizing Go code performance, reducing memory usage, or analyzing bottlenecks.
|
||||
---
|
||||
|
||||
# Go Optimization Skill
|
||||
|
||||
This skill provides expert guidance on Go performance optimization, covering profiling, benchmarking, memory management, and runtime tuning for building high-performance applications.
|
||||
|
||||
## When to Use
|
||||
|
||||
Activate this skill when:
|
||||
- Profiling application performance
|
||||
- Optimizing CPU-intensive operations
|
||||
- Reducing memory allocations
|
||||
- Tuning garbage collection
|
||||
- Writing benchmarks
|
||||
- Analyzing performance bottlenecks
|
||||
- Optimizing hot paths
|
||||
- Reducing lock contention
|
||||
|
||||
## Profiling
|
||||
|
||||
### CPU Profiling
|
||||
|
||||
```go
|
||||
import (
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Start CPU profiling
|
||||
f, err := os.Create("cpu.prof")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
|
||||
// Your code here
|
||||
runApplication()
|
||||
}
|
||||
|
||||
// Analyze:
|
||||
// go tool pprof cpu.prof
|
||||
// (pprof) top10
|
||||
// (pprof) list functionName
|
||||
// (pprof) web
|
||||
```
|
||||
|
||||
### Memory Profiling
|
||||
|
||||
```go
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
)
|
||||
|
||||
func writeMemProfile(filename string) {
|
||||
f, err := os.Create(filename)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
runtime.GC() // Force GC before snapshot
|
||||
if err := pprof.WriteHeapProfile(f); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Analyze:
|
||||
// go tool pprof -alloc_space mem.prof
|
||||
// go tool pprof -inuse_space mem.prof
|
||||
```
|
||||
|
||||
### HTTP Profiling
|
||||
|
||||
```go
|
||||
import (
|
||||
_ "net/http/pprof"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Enable pprof endpoints
|
||||
go func() {
|
||||
log.Println(http.ListenAndServe("localhost:6060", nil))
|
||||
}()
|
||||
|
||||
// Your application
|
||||
runServer()
|
||||
}
|
||||
|
||||
// Access profiles:
|
||||
// http://localhost:6060/debug/pprof/
|
||||
// go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30
|
||||
// go tool pprof http://localhost:6060/debug/pprof/heap
|
||||
```
|
||||
|
||||
### Execution Tracing
|
||||
|
||||
```go
|
||||
import (
|
||||
"os"
|
||||
"runtime/trace"
|
||||
)
|
||||
|
||||
func main() {
|
||||
f, err := os.Create("trace.out")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if err := trace.Start(f); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer trace.Stop()
|
||||
|
||||
// Your code
|
||||
runApplication()
|
||||
}
|
||||
|
||||
// View trace:
|
||||
// go tool trace trace.out
|
||||
```
|
||||
|
||||
## Benchmarking
|
||||
|
||||
### Basic Benchmarks
|
||||
|
||||
```go
|
||||
func BenchmarkStringConcat(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = "hello" + " " + "world"
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStringBuilder(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("hello")
|
||||
sb.WriteString(" ")
|
||||
sb.WriteString("world")
|
||||
_ = sb.String()
|
||||
}
|
||||
}
|
||||
|
||||
// Run: go test -bench=. -benchmem
|
||||
```
|
||||
|
||||
### Sub-benchmarks
|
||||
|
||||
```go
|
||||
func BenchmarkEncode(b *testing.B) {
|
||||
data := generateTestData()
|
||||
|
||||
b.Run("JSON", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
json.Marshal(data)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("MessagePack", func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
msgpack.Marshal(data)
|
||||
}
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
### Parallel Benchmarks
|
||||
|
||||
```go
|
||||
func BenchmarkConcurrentAccess(b *testing.B) {
|
||||
cache := NewCache()
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
cache.Get("key")
|
||||
}
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
### Benchmark Comparison
|
||||
|
||||
```bash
|
||||
# Run benchmarks and save results
|
||||
go test -bench=. -benchmem > old.txt
|
||||
|
||||
# Make optimizations
|
||||
|
||||
# Run again and compare
|
||||
go test -bench=. -benchmem > new.txt
|
||||
benchstat old.txt new.txt
|
||||
```
|
||||
|
||||
## Memory Optimization
|
||||
|
||||
### Escape Analysis
|
||||
|
||||
```go
|
||||
// Check what escapes to heap
|
||||
// go build -gcflags="-m" main.go
|
||||
|
||||
// ✅ GOOD: Stack allocation
|
||||
func stackAlloc() int {
|
||||
x := 42
|
||||
return x
|
||||
}
|
||||
|
||||
// ❌ BAD: Heap escape
|
||||
func heapEscape() *int {
|
||||
x := 42
|
||||
return &x // x escapes to heap
|
||||
}
|
||||
|
||||
// ✅ GOOD: Interface without allocation
|
||||
func noAlloc(w io.Writer, data []byte) {
|
||||
w.Write(data)
|
||||
}
|
||||
|
||||
// ❌ BAD: Interface causes allocation
|
||||
func withAlloc() io.Writer {
|
||||
var b bytes.Buffer
|
||||
return &b // &b escapes
|
||||
}
|
||||
```
|
||||
|
||||
### Pre-allocation
|
||||
|
||||
```go
|
||||
// ❌ BAD: Growing slice
|
||||
func badAppend(n int) []int {
|
||||
var result []int
|
||||
for i := 0; i < n; i++ {
|
||||
result = append(result, i) // Multiple allocations
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ✅ GOOD: Pre-allocate
|
||||
func goodAppend(n int) []int {
|
||||
result := make([]int, 0, n) // Single allocation
|
||||
for i := 0; i < n; i++ {
|
||||
result = append(result, i)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ✅ GOOD: Known length
|
||||
func knownLength(n int) []int {
|
||||
result := make([]int, n)
|
||||
for i := 0; i < n; i++ {
|
||||
result[i] = i
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ❌ BAD: String concatenation
|
||||
func badConcat(strs []string) string {
|
||||
result := ""
|
||||
for _, s := range strs {
|
||||
result += s // New allocation each time
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ✅ GOOD: strings.Builder
|
||||
func goodConcat(strs []string) string {
|
||||
var sb strings.Builder
|
||||
sb.Grow(estimateSize(strs))
|
||||
for _, s := range strs {
|
||||
sb.WriteString(s)
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
```
|
||||
|
||||
### sync.Pool
|
||||
|
||||
```go
|
||||
var bufferPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
}
|
||||
|
||||
func processData(data []byte) []byte {
|
||||
// Get buffer from pool
|
||||
buf := bufferPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
defer bufferPool.Put(buf)
|
||||
|
||||
// Use buffer
|
||||
buf.Write(data)
|
||||
// Process...
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// String builder pool
|
||||
var sbPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &strings.Builder{}
|
||||
},
|
||||
}
|
||||
|
||||
func buildString(parts []string) string {
|
||||
sb := sbPool.Get().(*strings.Builder)
|
||||
sb.Reset()
|
||||
defer sbPool.Put(sb)
|
||||
|
||||
for _, part := range parts {
|
||||
sb.WriteString(part)
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
```
|
||||
|
||||
### Zero-Copy Techniques
|
||||
|
||||
```go
|
||||
// Use byte slices instead of strings
|
||||
func parseHeader(header []byte) (key, value []byte) {
|
||||
i := bytes.IndexByte(header, ':')
|
||||
if i < 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return header[:i], header[i+1:]
|
||||
}
|
||||
|
||||
// Reuse buffers
|
||||
type Parser struct {
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (p *Parser) Parse(data []byte) error {
|
||||
p.buf = p.buf[:0] // Reset length, keep capacity
|
||||
p.buf = append(p.buf, data...)
|
||||
// Process p.buf...
|
||||
return nil
|
||||
}
|
||||
|
||||
// Direct writing
|
||||
func writeResponse(w io.Writer, data interface{}) error {
|
||||
enc := json.NewEncoder(w) // Write directly to w
|
||||
return enc.Encode(data)
|
||||
}
|
||||
```
|
||||
|
||||
## Garbage Collection Tuning
|
||||
|
||||
### GC Control
|
||||
|
||||
```go
|
||||
import "runtime/debug"
|
||||
|
||||
// Adjust GC target percentage
|
||||
debug.SetGCPercent(100) // Default
|
||||
// Higher = less frequent GC, more memory
|
||||
// Lower = more frequent GC, less memory
|
||||
|
||||
// Force GC (use sparingly!)
|
||||
runtime.GC()
|
||||
|
||||
// Monitor GC stats
|
||||
var stats runtime.MemStats
|
||||
runtime.ReadMemStats(&stats)
|
||||
fmt.Printf("Alloc = %v MB\n", stats.Alloc/1024/1024)
|
||||
fmt.Printf("TotalAlloc = %v MB\n", stats.TotalAlloc/1024/1024)
|
||||
fmt.Printf("Sys = %v MB\n", stats.Sys/1024/1024)
|
||||
fmt.Printf("NumGC = %v\n", stats.NumGC)
|
||||
```
|
||||
|
||||
### GOGC Environment Variable
|
||||
|
||||
```bash
|
||||
# Default (100%)
|
||||
GOGC=100 ./myapp
|
||||
|
||||
# More aggressive GC (uses less memory)
|
||||
GOGC=50 ./myapp
|
||||
|
||||
# Less frequent GC (uses more memory)
|
||||
GOGC=200 ./myapp
|
||||
|
||||
# Disable GC (for debugging)
|
||||
GOGC=off ./myapp
|
||||
```
|
||||
|
||||
## Concurrency Optimization
|
||||
|
||||
### Reduce Lock Contention
|
||||
|
||||
```go
|
||||
// ❌ BAD: Single lock
|
||||
type BadCache struct {
|
||||
mu sync.Mutex
|
||||
items map[string]interface{}
|
||||
}
|
||||
|
||||
// ✅ GOOD: RWMutex
|
||||
type GoodCache struct {
|
||||
mu sync.RWMutex
|
||||
items map[string]interface{}
|
||||
}
|
||||
|
||||
func (c *GoodCache) Get(key string) interface{} {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
return c.items[key]
|
||||
}
|
||||
|
||||
// ✅ BETTER: Sharded locks
|
||||
type ShardedCache struct {
|
||||
shards [256]*shard
|
||||
}
|
||||
|
||||
type shard struct {
|
||||
mu sync.RWMutex
|
||||
items map[string]interface{}
|
||||
}
|
||||
|
||||
func (c *ShardedCache) Get(key string) interface{} {
|
||||
shard := c.getShard(key)
|
||||
shard.mu.RLock()
|
||||
defer shard.mu.RUnlock()
|
||||
return shard.items[key]
|
||||
}
|
||||
```
|
||||
|
||||
### Channel Buffering
|
||||
|
||||
```go
|
||||
// ❌ BAD: Unbuffered channel causes blocking
|
||||
ch := make(chan int)
|
||||
|
||||
// ✅ GOOD: Buffered channel
|
||||
ch := make(chan int, 100)
|
||||
|
||||
// Optimal buffer size depends on:
|
||||
// - Producer/consumer rates
|
||||
// - Memory constraints
|
||||
// - Latency requirements
|
||||
```
|
||||
|
||||
### Atomic Operations
|
||||
|
||||
```go
|
||||
import "sync/atomic"
|
||||
|
||||
type Counter struct {
|
||||
value int64
|
||||
}
|
||||
|
||||
func (c *Counter) Increment() {
|
||||
atomic.AddInt64(&c.value, 1)
|
||||
}
|
||||
|
||||
func (c *Counter) Value() int64 {
|
||||
return atomic.LoadInt64(&c.value)
|
||||
}
|
||||
|
||||
// ✅ Faster than mutex for simple operations
|
||||
// ❌ Limited to basic types and operations
|
||||
```
|
||||
|
||||
## Algorithmic Optimization
|
||||
|
||||
### Map Pre-sizing
|
||||
|
||||
```go
|
||||
// ❌ BAD: Growing map
|
||||
func badMap(items []Item) map[string]Item {
|
||||
m := make(map[string]Item)
|
||||
for _, item := range items {
|
||||
m[item.ID] = item
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// ✅ GOOD: Pre-sized map
|
||||
func goodMap(items []Item) map[string]Item {
|
||||
m := make(map[string]Item, len(items))
|
||||
for _, item := range items {
|
||||
m[item.ID] = item
|
||||
}
|
||||
return m
|
||||
}
|
||||
```
|
||||
|
||||
### Avoid Unnecessary Work
|
||||
|
||||
```go
|
||||
// ❌ BAD: Repeated computation
|
||||
func process(items []Item) {
|
||||
for _, item := range items {
|
||||
if isValid(item) {
|
||||
result := expensiveComputation(item)
|
||||
if result > threshold {
|
||||
handleResult(result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ GOOD: Early returns
|
||||
func process(items []Item) {
|
||||
for _, item := range items {
|
||||
if !isValid(item) {
|
||||
continue // Skip early
|
||||
}
|
||||
result := expensiveComputation(item)
|
||||
if result <= threshold {
|
||||
continue // Skip early
|
||||
}
|
||||
handleResult(result)
|
||||
}
|
||||
}
|
||||
|
||||
// ✅ BETTER: Fast path
|
||||
func process(items []Item) {
|
||||
for _, item := range items {
|
||||
// Fast path for common case
|
||||
if item.IsSimple() {
|
||||
handleSimple(item)
|
||||
continue
|
||||
}
|
||||
// Slow path for complex case
|
||||
handleComplex(item)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Runtime Tuning
|
||||
|
||||
### GOMAXPROCS
|
||||
|
||||
```go
|
||||
import "runtime"
|
||||
|
||||
// Set number of OS threads
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
// For CPU-bound: NumCPU
|
||||
// For I/O-bound: NumCPU * 2 or more
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
# Max OS threads
|
||||
GOMAXPROCS=8 ./myapp
|
||||
|
||||
# GC aggressiveness
|
||||
GOGC=100 ./myapp
|
||||
|
||||
# Memory limit (Go 1.19+)
|
||||
GOMEMLIMIT=4GiB ./myapp
|
||||
|
||||
# Trace execution
|
||||
GODEBUG=gctrace=1 ./myapp
|
||||
```
|
||||
|
||||
## Performance Patterns
|
||||
|
||||
### Inline Functions
|
||||
|
||||
```go
|
||||
// Compiler inlines small functions automatically
|
||||
|
||||
//go:inline
|
||||
func add(a, b int) int {
|
||||
return a + b
|
||||
}
|
||||
|
||||
// Keep hot-path functions small for inlining
|
||||
```
|
||||
|
||||
### Avoid Interface Allocations
|
||||
|
||||
```go
|
||||
// ❌ BAD: Interface allocation
|
||||
func badPrint(value interface{}) {
|
||||
fmt.Println(value) // value escapes
|
||||
}
|
||||
|
||||
// ✅ GOOD: Type-specific functions
|
||||
func printInt(value int) {
|
||||
fmt.Println(value)
|
||||
}
|
||||
|
||||
func printString(value string) {
|
||||
fmt.Println(value)
|
||||
}
|
||||
```
|
||||
|
||||
### Batch Operations
|
||||
|
||||
```go
|
||||
// ❌ BAD: Individual operations
|
||||
for _, item := range items {
|
||||
db.Insert(item) // N database calls
|
||||
}
|
||||
|
||||
// ✅ GOOD: Batch operations
|
||||
db.BatchInsert(items) // 1 database call
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Profile before optimizing** - Measure, don't guess
|
||||
2. **Focus on hot paths** - Optimize the 20% that matters
|
||||
3. **Reduce allocations** - Reuse objects, pre-allocate
|
||||
4. **Use appropriate data structures** - Map vs slice vs array
|
||||
5. **Minimize lock contention** - Use RWMutex, sharding
|
||||
6. **Benchmark changes** - Use benchstat for comparisons
|
||||
7. **Test with race detector** - `go test -race`
|
||||
8. **Monitor in production** - Use profiling endpoints
|
||||
9. **Balance readability and performance** - Don't over-optimize
|
||||
10. **Use PGO** - Profile-guided optimization (Go 1.20+)
|
||||
|
||||
## Profile-Guided Optimization (PGO)
|
||||
|
||||
```bash
|
||||
# 1. Build with profiling
|
||||
go build -o myapp
|
||||
|
||||
# 2. Run and collect profile
|
||||
./myapp -cpuprofile=default.pgo
|
||||
|
||||
# 3. Rebuild with PGO
|
||||
go build -pgo=default.pgo -o myapp-optimized
|
||||
|
||||
# Performance improvement: 5-15% typical
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
Additional resources in:
|
||||
- `assets/examples/` - Performance optimization examples
|
||||
- `assets/benchmarks/` - Benchmark templates
|
||||
- `references/` - Links to profiling guides and performance papers
|
||||
574
skills/go-patterns/SKILL.md
Normal file
574
skills/go-patterns/SKILL.md
Normal file
@@ -0,0 +1,574 @@
|
||||
---
|
||||
name: go-patterns
|
||||
description: Modern Go patterns, idioms, and best practices from Go 1.18+. Use when user needs guidance on idiomatic Go code, design patterns, or modern Go features like generics and workspaces.
|
||||
---
|
||||
|
||||
# Go Patterns Skill
|
||||
|
||||
This skill provides comprehensive guidance on modern Go patterns, idioms, and best practices, with special focus on features introduced in Go 1.18 and later.
|
||||
|
||||
## When to Use
|
||||
|
||||
Activate this skill when:
|
||||
- Writing idiomatic Go code
|
||||
- Implementing design patterns in Go
|
||||
- Using modern Go features (generics, fuzzing, workspaces)
|
||||
- Refactoring code to be more idiomatic
|
||||
- Teaching Go best practices
|
||||
- Code review for idiom compliance
|
||||
|
||||
## Modern Go Features
|
||||
|
||||
### Generics (Go 1.18+)
|
||||
|
||||
**Type Parameters:**
|
||||
```go
|
||||
// Generic function
|
||||
func Map[T, U any](slice []T, f func(T) U) []U {
|
||||
result := make([]U, len(slice))
|
||||
for i, v := range slice {
|
||||
result[i] = f(v)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Usage
|
||||
numbers := []int{1, 2, 3, 4, 5}
|
||||
doubled := Map(numbers, func(n int) int { return n * 2 })
|
||||
```
|
||||
|
||||
**Type Constraints:**
|
||||
```go
|
||||
// Ordered constraint
|
||||
type Ordered interface {
|
||||
~int | ~int8 | ~int16 | ~int32 | ~int64 |
|
||||
~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 |
|
||||
~float32 | ~float64 | ~string
|
||||
}
|
||||
|
||||
func Min[T Ordered](a, b T) T {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Custom constraints
|
||||
type Numeric interface {
|
||||
~int | ~int64 | ~float64
|
||||
}
|
||||
|
||||
func Sum[T Numeric](values []T) T {
|
||||
var sum T
|
||||
for _, v := range values {
|
||||
sum += v
|
||||
}
|
||||
return sum
|
||||
}
|
||||
```
|
||||
|
||||
**Generic Data Structures:**
|
||||
```go
|
||||
// Generic stack
|
||||
type Stack[T any] struct {
|
||||
items []T
|
||||
}
|
||||
|
||||
func NewStack[T any]() *Stack[T] {
|
||||
return &Stack[T]{items: make([]T, 0)}
|
||||
}
|
||||
|
||||
func (s *Stack[T]) Push(item T) {
|
||||
s.items = append(s.items, item)
|
||||
}
|
||||
|
||||
func (s *Stack[T]) Pop() (T, bool) {
|
||||
if len(s.items) == 0 {
|
||||
var zero T
|
||||
return zero, false
|
||||
}
|
||||
item := s.items[len(s.items)-1]
|
||||
s.items = s.items[:len(s.items)-1]
|
||||
return item, true
|
||||
}
|
||||
|
||||
// Generic map utilities
|
||||
func Keys[K comparable, V any](m map[K]V) []K {
|
||||
keys := make([]K, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
func Values[K comparable, V any](m map[K]V) []V {
|
||||
values := make([]V, 0, len(m))
|
||||
for _, v := range m {
|
||||
values = append(values, v)
|
||||
}
|
||||
return values
|
||||
}
|
||||
```
|
||||
|
||||
### Workspaces (Go 1.18+)
|
||||
|
||||
**go.work file:**
|
||||
```
|
||||
go 1.21
|
||||
|
||||
use (
|
||||
./service
|
||||
./shared
|
||||
./tools
|
||||
)
|
||||
|
||||
replace example.com/legacy => ./vendor/legacy
|
||||
```
|
||||
|
||||
**Benefits:**
|
||||
- Multi-module development
|
||||
- Local dependency overrides
|
||||
- Simplified testing across modules
|
||||
- Better monorepo support
|
||||
|
||||
## Essential Go Patterns
|
||||
|
||||
### Functional Options Pattern
|
||||
|
||||
```go
|
||||
type Server struct {
|
||||
host string
|
||||
port int
|
||||
timeout time.Duration
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
type Option func(*Server)
|
||||
|
||||
func WithHost(host string) Option {
|
||||
return func(s *Server) {
|
||||
s.host = host
|
||||
}
|
||||
}
|
||||
|
||||
func WithPort(port int) Option {
|
||||
return func(s *Server) {
|
||||
s.port = port
|
||||
}
|
||||
}
|
||||
|
||||
func WithTimeout(timeout time.Duration) Option {
|
||||
return func(s *Server) {
|
||||
s.timeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
func WithLogger(logger *log.Logger) Option {
|
||||
return func(s *Server) {
|
||||
s.logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
func NewServer(opts ...Option) *Server {
|
||||
s := &Server{
|
||||
host: "localhost",
|
||||
port: 8080,
|
||||
timeout: 30 * time.Second,
|
||||
logger: log.Default(),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(s)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Usage
|
||||
server := NewServer(
|
||||
WithHost("0.0.0.0"),
|
||||
WithPort(3000),
|
||||
WithTimeout(60 * time.Second),
|
||||
)
|
||||
```
|
||||
|
||||
### Builder Pattern
|
||||
|
||||
```go
|
||||
type Query struct {
|
||||
table string
|
||||
where []string
|
||||
orderBy string
|
||||
limit int
|
||||
offset int
|
||||
}
|
||||
|
||||
type QueryBuilder struct {
|
||||
query Query
|
||||
}
|
||||
|
||||
func NewQueryBuilder(table string) *QueryBuilder {
|
||||
return &QueryBuilder{
|
||||
query: Query{table: table},
|
||||
}
|
||||
}
|
||||
|
||||
func (b *QueryBuilder) Where(condition string) *QueryBuilder {
|
||||
b.query.where = append(b.query.where, condition)
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *QueryBuilder) OrderBy(field string) *QueryBuilder {
|
||||
b.query.orderBy = field
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *QueryBuilder) Limit(limit int) *QueryBuilder {
|
||||
b.query.limit = limit
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *QueryBuilder) Offset(offset int) *QueryBuilder {
|
||||
b.query.offset = offset
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *QueryBuilder) Build() Query {
|
||||
return b.query
|
||||
}
|
||||
|
||||
// Usage
|
||||
query := NewQueryBuilder("users").
|
||||
Where("age > 18").
|
||||
Where("active = true").
|
||||
OrderBy("created_at DESC").
|
||||
Limit(10).
|
||||
Offset(20).
|
||||
Build()
|
||||
```
|
||||
|
||||
### Strategy Pattern
|
||||
|
||||
```go
|
||||
// Strategy interface
|
||||
type PaymentStrategy interface {
|
||||
Pay(amount float64) error
|
||||
}
|
||||
|
||||
// Concrete strategies
|
||||
type CreditCardPayment struct {
|
||||
cardNumber string
|
||||
}
|
||||
|
||||
func (c *CreditCardPayment) Pay(amount float64) error {
|
||||
fmt.Printf("Paying $%.2f with credit card %s\n", amount, c.cardNumber)
|
||||
return nil
|
||||
}
|
||||
|
||||
type PayPalPayment struct {
|
||||
email string
|
||||
}
|
||||
|
||||
func (p *PayPalPayment) Pay(amount float64) error {
|
||||
fmt.Printf("Paying $%.2f with PayPal account %s\n", amount, p.email)
|
||||
return nil
|
||||
}
|
||||
|
||||
type CryptoPayment struct {
|
||||
walletAddress string
|
||||
}
|
||||
|
||||
func (c *CryptoPayment) Pay(amount float64) error {
|
||||
fmt.Printf("Paying $%.2f to wallet %s\n", amount, c.walletAddress)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Context
|
||||
type PaymentProcessor struct {
|
||||
strategy PaymentStrategy
|
||||
}
|
||||
|
||||
func NewPaymentProcessor(strategy PaymentStrategy) *PaymentProcessor {
|
||||
return &PaymentProcessor{strategy: strategy}
|
||||
}
|
||||
|
||||
func (p *PaymentProcessor) ProcessPayment(amount float64) error {
|
||||
return p.strategy.Pay(amount)
|
||||
}
|
||||
|
||||
// Usage
|
||||
processor := NewPaymentProcessor(&CreditCardPayment{cardNumber: "1234-5678"})
|
||||
processor.ProcessPayment(100.00)
|
||||
|
||||
processor = NewPaymentProcessor(&PayPalPayment{email: "user@example.com"})
|
||||
processor.ProcessPayment(50.00)
|
||||
```
|
||||
|
||||
### Observer Pattern
|
||||
|
||||
```go
|
||||
type Observer interface {
|
||||
Update(event Event)
|
||||
}
|
||||
|
||||
type Event struct {
|
||||
Type string
|
||||
Data interface{}
|
||||
}
|
||||
|
||||
type Subject struct {
|
||||
observers []Observer
|
||||
}
|
||||
|
||||
func (s *Subject) Attach(observer Observer) {
|
||||
s.observers = append(s.observers, observer)
|
||||
}
|
||||
|
||||
func (s *Subject) Detach(observer Observer) {
|
||||
for i, obs := range s.observers {
|
||||
if obs == observer {
|
||||
s.observers = append(s.observers[:i], s.observers[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Subject) Notify(event Event) {
|
||||
for _, observer := range s.observers {
|
||||
observer.Update(event)
|
||||
}
|
||||
}
|
||||
|
||||
// Concrete observer
|
||||
type Logger struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (l *Logger) Update(event Event) {
|
||||
fmt.Printf("[%s] Received event: %s\n", l.name, event.Type)
|
||||
}
|
||||
|
||||
// Usage
|
||||
subject := &Subject{}
|
||||
logger1 := &Logger{name: "Logger1"}
|
||||
logger2 := &Logger{name: "Logger2"}
|
||||
|
||||
subject.Attach(logger1)
|
||||
subject.Attach(logger2)
|
||||
|
||||
subject.Notify(Event{Type: "UserCreated", Data: "user123"})
|
||||
```
|
||||
|
||||
## Idiomatic Go Patterns
|
||||
|
||||
### Error Handling
|
||||
|
||||
**Sentinel Errors:**
|
||||
```go
|
||||
var (
|
||||
ErrNotFound = errors.New("resource not found")
|
||||
ErrUnauthorized = errors.New("unauthorized access")
|
||||
ErrInvalidInput = errors.New("invalid input")
|
||||
)
|
||||
|
||||
func GetUser(id string) (*User, error) {
|
||||
if id == "" {
|
||||
return nil, ErrInvalidInput
|
||||
}
|
||||
|
||||
user := findUser(id)
|
||||
if user == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
return user, nil
|
||||
}
|
||||
|
||||
// Check with errors.Is
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
// Handle not found
|
||||
}
|
||||
```
|
||||
|
||||
**Custom Error Types:**
|
||||
```go
|
||||
type ValidationError struct {
|
||||
Field string
|
||||
Message string
|
||||
}
|
||||
|
||||
func (e *ValidationError) Error() string {
|
||||
return fmt.Sprintf("validation error on %s: %s", e.Field, e.Message)
|
||||
}
|
||||
|
||||
// Check with errors.As
|
||||
var valErr *ValidationError
|
||||
if errors.As(err, &valErr) {
|
||||
fmt.Printf("Validation failed: %s\n", valErr.Field)
|
||||
}
|
||||
```
|
||||
|
||||
**Error Wrapping:**
|
||||
```go
|
||||
func ProcessUser(id string) error {
|
||||
user, err := GetUser(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("process user: %w", err)
|
||||
}
|
||||
|
||||
if err := ValidateUser(user); err != nil {
|
||||
return fmt.Errorf("validate user %s: %w", id, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
### Interface Patterns
|
||||
|
||||
**Small Interfaces:**
|
||||
```go
|
||||
// Good: Small, focused interfaces
|
||||
type Reader interface {
|
||||
Read(p []byte) (n int, err error)
|
||||
}
|
||||
|
||||
type Writer interface {
|
||||
Write(p []byte) (n int, err error)
|
||||
}
|
||||
|
||||
type Closer interface {
|
||||
Close() error
|
||||
}
|
||||
|
||||
// Compose interfaces
|
||||
type ReadWriteCloser interface {
|
||||
Reader
|
||||
Writer
|
||||
Closer
|
||||
}
|
||||
```
|
||||
|
||||
**Interface Segregation:**
|
||||
```go
|
||||
// Instead of one large interface
|
||||
type Repository interface {
|
||||
Create(ctx context.Context, user *User) error
|
||||
Read(ctx context.Context, id string) (*User, error)
|
||||
Update(ctx context.Context, user *User) error
|
||||
Delete(ctx context.Context, id string) error
|
||||
List(ctx context.Context) ([]*User, error)
|
||||
Search(ctx context.Context, query string) ([]*User, error)
|
||||
}
|
||||
|
||||
// Better: Separate interfaces
|
||||
type UserCreator interface {
|
||||
Create(ctx context.Context, user *User) error
|
||||
}
|
||||
|
||||
type UserReader interface {
|
||||
Read(ctx context.Context, id string) (*User, error)
|
||||
List(ctx context.Context) ([]*User, error)
|
||||
}
|
||||
|
||||
type UserUpdater interface {
|
||||
Update(ctx context.Context, user *User) error
|
||||
}
|
||||
|
||||
type UserDeleter interface {
|
||||
Delete(ctx context.Context, id string) error
|
||||
}
|
||||
|
||||
type UserSearcher interface {
|
||||
Search(ctx context.Context, query string) ([]*User, error)
|
||||
}
|
||||
```
|
||||
|
||||
### Context Patterns
|
||||
|
||||
**Proper Context Usage:**
|
||||
```go
|
||||
func FetchData(ctx context.Context, url string) ([]byte, error) {
|
||||
// Create request with context
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create request: %w", err)
|
||||
}
|
||||
|
||||
// Check for cancellation before expensive operation
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
// Execute request
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("execute request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return io.ReadAll(resp.Body)
|
||||
}
|
||||
|
||||
// Context with timeout
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
data, err := FetchData(ctx, "https://api.example.com/data")
|
||||
```
|
||||
|
||||
**Context Values:**
|
||||
```go
|
||||
type contextKey string
|
||||
|
||||
const (
|
||||
requestIDKey contextKey = "requestID"
|
||||
userIDKey contextKey = "userID"
|
||||
)
|
||||
|
||||
func WithRequestID(ctx context.Context, requestID string) context.Context {
|
||||
return context.WithValue(ctx, requestIDKey, requestID)
|
||||
}
|
||||
|
||||
func GetRequestID(ctx context.Context) (string, bool) {
|
||||
requestID, ok := ctx.Value(requestIDKey).(string)
|
||||
return requestID, ok
|
||||
}
|
||||
|
||||
func WithUserID(ctx context.Context, userID string) context.Context {
|
||||
return context.WithValue(ctx, userIDKey, userID)
|
||||
}
|
||||
|
||||
func GetUserID(ctx context.Context) (string, bool) {
|
||||
userID, ok := ctx.Value(userIDKey).(string)
|
||||
return userID, ok
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Accept interfaces, return structs**
|
||||
2. **Make the zero value useful**
|
||||
3. **Use composition over inheritance**
|
||||
4. **Handle errors explicitly**
|
||||
5. **Use defer for cleanup**
|
||||
6. **Prefer sync.RWMutex for read-heavy workloads**
|
||||
7. **Use context for cancellation and timeouts**
|
||||
8. **Keep interfaces small**
|
||||
9. **Document exported identifiers**
|
||||
10. **Use go fmt and go vet**
|
||||
|
||||
## Resources
|
||||
|
||||
Additional patterns and examples are available in the `assets/` directory:
|
||||
- `examples/` - Complete code examples
|
||||
- `patterns/` - Design pattern implementations
|
||||
- `antipatterns/` - Common mistakes to avoid
|
||||
|
||||
See `references/` directory for:
|
||||
- Links to official Go documentation
|
||||
- Effective Go guidelines
|
||||
- Go proverbs
|
||||
- Community best practices
|
||||
Reference in New Issue
Block a user