Initial commit
This commit is contained in:
172
commands/optimize/.scripts/analyze-bundle.sh
Executable file
172
commands/optimize/.scripts/analyze-bundle.sh
Executable file
@@ -0,0 +1,172 @@
|
||||
#!/bin/bash
|
||||
# Purpose: Analyze webpack/vite bundle size and composition
|
||||
# Version: 1.0.0
|
||||
# Usage: ./analyze-bundle.sh [build-dir] [output-dir]
|
||||
# Returns: 0=success, 1=analysis failed, 2=invalid arguments
|
||||
# Dependencies: Node.js, npm, webpack-bundle-analyzer or vite-bundle-visualizer
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Color output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Arguments
|
||||
BUILD_DIR="${1:-./dist}"
|
||||
OUTPUT_DIR="${2:-./bundle-analysis}"
|
||||
|
||||
# Validate build directory exists
|
||||
if [ ! -d "$BUILD_DIR" ]; then
|
||||
echo -e "${RED}Error: Build directory not found: $BUILD_DIR${NC}"
|
||||
echo "Please run 'npm run build' first"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Create output directory
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
TIMESTAMP=$(date +%Y%m%d-%H%M%S)
|
||||
|
||||
echo -e "${GREEN}Analyzing bundle in: $BUILD_DIR${NC}"
|
||||
echo "Output directory: $OUTPUT_DIR"
|
||||
|
||||
# Detect build tool
|
||||
if [ -f "stats.json" ] || [ -f "$BUILD_DIR/stats.json" ]; then
|
||||
BUILD_TOOL="webpack"
|
||||
elif [ -f "vite.config.js" ] || [ -f "vite.config.ts" ]; then
|
||||
BUILD_TOOL="vite"
|
||||
elif [ -f "next.config.js" ]; then
|
||||
BUILD_TOOL="nextjs"
|
||||
else
|
||||
BUILD_TOOL="unknown"
|
||||
fi
|
||||
|
||||
echo "Detected build tool: $BUILD_TOOL"
|
||||
|
||||
# Analyze bundle based on build tool
|
||||
case $BUILD_TOOL in
|
||||
webpack)
|
||||
echo -e "\n${YELLOW}Running webpack-bundle-analyzer...${NC}"
|
||||
|
||||
# Check if webpack-bundle-analyzer is installed
|
||||
if ! npm list webpack-bundle-analyzer &> /dev/null; then
|
||||
echo "Installing webpack-bundle-analyzer..."
|
||||
npm install --save-dev webpack-bundle-analyzer
|
||||
fi
|
||||
|
||||
# Find stats.json
|
||||
STATS_FILE="stats.json"
|
||||
if [ -f "$BUILD_DIR/stats.json" ]; then
|
||||
STATS_FILE="$BUILD_DIR/stats.json"
|
||||
fi
|
||||
|
||||
# Generate report
|
||||
npx webpack-bundle-analyzer "$STATS_FILE" \
|
||||
--mode static \
|
||||
--report "${OUTPUT_DIR}/bundle-report-${TIMESTAMP}.html" \
|
||||
--no-open
|
||||
|
||||
echo -e "${GREEN}✓ Bundle analysis complete${NC}"
|
||||
echo "Report: ${OUTPUT_DIR}/bundle-report-${TIMESTAMP}.html"
|
||||
;;
|
||||
|
||||
vite)
|
||||
echo -e "\n${YELLOW}Running vite bundle analysis...${NC}"
|
||||
|
||||
# Check if vite-bundle-visualizer is installed
|
||||
if ! npm list rollup-plugin-visualizer &> /dev/null; then
|
||||
echo "Installing rollup-plugin-visualizer..."
|
||||
npm install --save-dev rollup-plugin-visualizer
|
||||
fi
|
||||
|
||||
# Use rollup-plugin-visualizer
|
||||
npx vite-bundle-visualizer \
|
||||
--output "${OUTPUT_DIR}/bundle-report-${TIMESTAMP}.html"
|
||||
|
||||
echo -e "${GREEN}✓ Bundle analysis complete${NC}"
|
||||
;;
|
||||
|
||||
nextjs)
|
||||
echo -e "\n${YELLOW}Running Next.js bundle analysis...${NC}"
|
||||
|
||||
# Check if @next/bundle-analyzer is installed
|
||||
if ! npm list @next/bundle-analyzer &> /dev/null; then
|
||||
echo "Installing @next/bundle-analyzer..."
|
||||
npm install --save-dev @next/bundle-analyzer
|
||||
fi
|
||||
|
||||
# Rebuild with analyzer
|
||||
ANALYZE=true npm run build
|
||||
|
||||
echo -e "${GREEN}✓ Bundle analysis complete${NC}"
|
||||
;;
|
||||
|
||||
*)
|
||||
echo -e "${YELLOW}Unknown build tool. Performing generic analysis...${NC}"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Calculate bundle sizes
|
||||
echo -e "\n${YELLOW}Calculating bundle sizes...${NC}"
|
||||
|
||||
# Find all JS/CSS files
|
||||
find "$BUILD_DIR" -type f \( -name "*.js" -o -name "*.css" \) -exec ls -lh {} \; | \
|
||||
awk '{print $9, $5}' > "${OUTPUT_DIR}/file-sizes-${TIMESTAMP}.txt"
|
||||
|
||||
# Calculate totals
|
||||
TOTAL_JS=$(find "$BUILD_DIR" -type f -name "*.js" -exec du -ch {} + | grep total | awk '{print $1}')
|
||||
TOTAL_CSS=$(find "$BUILD_DIR" -type f -name "*.css" -exec du -ch {} + | grep total | awk '{print $1}')
|
||||
TOTAL_ALL=$(du -sh "$BUILD_DIR" | awk '{print $1}')
|
||||
|
||||
echo -e "\n=== Bundle Size Summary ==="
|
||||
echo "Total JavaScript: $TOTAL_JS"
|
||||
echo "Total CSS: $TOTAL_CSS"
|
||||
echo "Total Build Size: $TOTAL_ALL"
|
||||
|
||||
# Identify large files (>500KB)
|
||||
echo -e "\n=== Large Files (>500KB) ==="
|
||||
find "$BUILD_DIR" -type f -size +500k -exec ls -lh {} \; | \
|
||||
awk '{print $5, $9}' | sort -hr
|
||||
|
||||
# Check for common issues
|
||||
echo -e "\n${YELLOW}Checking for common issues...${NC}"
|
||||
|
||||
# Check for source maps in production
|
||||
SOURCEMAPS=$(find "$BUILD_DIR" -type f -name "*.map" | wc -l)
|
||||
if [ "$SOURCEMAPS" -gt 0 ]; then
|
||||
echo -e "${YELLOW}⚠ Found $SOURCEMAPS source map files in build${NC}"
|
||||
echo " Consider disabling source maps for production"
|
||||
fi
|
||||
|
||||
# Check for unminified files
|
||||
UNMINIFIED=$(find "$BUILD_DIR" -type f -name "*.js" ! -name "*.min.js" -exec grep -l "function " {} \; 2>/dev/null | wc -l)
|
||||
if [ "$UNMINIFIED" -gt 0 ]; then
|
||||
echo -e "${YELLOW}⚠ Found potential unminified files${NC}"
|
||||
echo " Verify minification is enabled"
|
||||
fi
|
||||
|
||||
# Generate JSON summary
|
||||
cat > "${OUTPUT_DIR}/summary-${TIMESTAMP}.json" <<EOF
|
||||
{
|
||||
"timestamp": "${TIMESTAMP}",
|
||||
"buildTool": "${BUILD_TOOL}",
|
||||
"buildDir": "${BUILD_DIR}",
|
||||
"totalJS": "${TOTAL_JS}",
|
||||
"totalCSS": "${TOTAL_CSS}",
|
||||
"totalSize": "${TOTAL_ALL}",
|
||||
"sourceMaps": ${SOURCEMAPS},
|
||||
"issues": {
|
||||
"sourceMapsInProduction": $([ "$SOURCEMAPS" -gt 0 ] && echo "true" || echo "false"),
|
||||
"potentiallyUnminified": $([ "$UNMINIFIED" -gt 0 ] && echo "true" || echo "false")
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
echo -e "\n${GREEN}✓ Bundle analysis complete${NC}"
|
||||
echo "Results saved to:"
|
||||
echo " - ${OUTPUT_DIR}/bundle-report-${TIMESTAMP}.html"
|
||||
echo " - ${OUTPUT_DIR}/file-sizes-${TIMESTAMP}.txt"
|
||||
echo " - ${OUTPUT_DIR}/summary-${TIMESTAMP}.json"
|
||||
|
||||
exit 0
|
||||
314
commands/optimize/.scripts/load-test.sh
Executable file
314
commands/optimize/.scripts/load-test.sh
Executable file
@@ -0,0 +1,314 @@
|
||||
#!/bin/bash
|
||||
# Purpose: Run k6 load testing with various scenarios
|
||||
# Version: 1.0.0
|
||||
# Usage: ./load-test.sh <url> [scenario] [duration] [vus]
|
||||
# Returns: 0=success, 1=test failed, 2=invalid arguments
|
||||
# Dependencies: k6
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Color output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Arguments
|
||||
URL="${1:-}"
|
||||
SCENARIO="${2:-smoke}"
|
||||
DURATION="${3:-60s}"
|
||||
VUS="${4:-50}"
|
||||
|
||||
# Validate arguments
|
||||
if [ -z "$URL" ]; then
|
||||
echo -e "${RED}Error: URL is required${NC}"
|
||||
echo "Usage: $0 <url> [scenario] [duration] [vus]"
|
||||
echo ""
|
||||
echo "Scenarios:"
|
||||
echo " smoke - Quick test with few users (default)"
|
||||
echo " load - Normal load test"
|
||||
echo " stress - Gradually increasing load"
|
||||
echo " spike - Sudden traffic spike"
|
||||
echo " soak - Long-duration test"
|
||||
echo ""
|
||||
echo "Example: $0 https://api.example.com/health load 300s 100"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Check if k6 is installed
|
||||
if ! command -v k6 &> /dev/null; then
|
||||
echo -e "${YELLOW}k6 not found. Installing...${NC}"
|
||||
# Installation instructions
|
||||
echo "Please install k6:"
|
||||
echo " macOS: brew install k6"
|
||||
echo " Linux: sudo apt-get install k6 or snap install k6"
|
||||
echo " Windows: choco install k6"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Create output directory
|
||||
OUTPUT_DIR="./load-test-results"
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
TIMESTAMP=$(date +%Y%m%d-%H%M%S)
|
||||
|
||||
echo -e "${GREEN}Running k6 load test${NC}"
|
||||
echo "URL: $URL"
|
||||
echo "Scenario: $SCENARIO"
|
||||
echo "Duration: $DURATION"
|
||||
echo "VUs: $VUS"
|
||||
|
||||
# Generate k6 test script based on scenario
|
||||
TEST_SCRIPT="${OUTPUT_DIR}/test-${SCENARIO}-${TIMESTAMP}.js"
|
||||
|
||||
case $SCENARIO in
|
||||
smoke)
|
||||
cat > "$TEST_SCRIPT" <<'EOF'
|
||||
import http from 'k6/http';
|
||||
import { check, sleep } from 'k6';
|
||||
import { Rate } from 'k6/metrics';
|
||||
|
||||
const errorRate = new Rate('errors');
|
||||
|
||||
export const options = {
|
||||
vus: 1,
|
||||
duration: '30s',
|
||||
thresholds: {
|
||||
http_req_duration: ['p(95)<1000'],
|
||||
http_req_failed: ['rate<0.01'],
|
||||
},
|
||||
};
|
||||
|
||||
export default function () {
|
||||
const res = http.get(__ENV.TARGET_URL);
|
||||
|
||||
const success = check(res, {
|
||||
'status is 200': (r) => r.status === 200,
|
||||
'response time OK': (r) => r.timings.duration < 1000,
|
||||
});
|
||||
|
||||
errorRate.add(!success);
|
||||
sleep(1);
|
||||
}
|
||||
EOF
|
||||
;;
|
||||
|
||||
load)
|
||||
cat > "$TEST_SCRIPT" <<'EOF'
|
||||
import http from 'k6/http';
|
||||
import { check, sleep } from 'k6';
|
||||
import { Rate } from 'k6/metrics';
|
||||
|
||||
const errorRate = new Rate('errors');
|
||||
|
||||
export const options = {
|
||||
stages: [
|
||||
{ duration: '30s', target: __ENV.VUS / 2 },
|
||||
{ duration: __ENV.DURATION, target: __ENV.VUS },
|
||||
{ duration: '30s', target: 0 },
|
||||
],
|
||||
thresholds: {
|
||||
http_req_duration: ['p(95)<500', 'p(99)<1000'],
|
||||
http_req_failed: ['rate<0.01'],
|
||||
errors: ['rate<0.1'],
|
||||
},
|
||||
};
|
||||
|
||||
export default function () {
|
||||
const res = http.get(__ENV.TARGET_URL);
|
||||
|
||||
const success = check(res, {
|
||||
'status is 200': (r) => r.status === 200,
|
||||
'response time < 500ms': (r) => r.timings.duration < 500,
|
||||
});
|
||||
|
||||
errorRate.add(!success);
|
||||
sleep(1);
|
||||
}
|
||||
EOF
|
||||
;;
|
||||
|
||||
stress)
|
||||
cat > "$TEST_SCRIPT" <<'EOF'
|
||||
import http from 'k6/http';
|
||||
import { check, sleep } from 'k6';
|
||||
import { Rate } from 'k6/metrics';
|
||||
|
||||
const errorRate = new Rate('errors');
|
||||
|
||||
export const options = {
|
||||
stages: [
|
||||
{ duration: '1m', target: __ENV.VUS / 4 },
|
||||
{ duration: '2m', target: __ENV.VUS / 2 },
|
||||
{ duration: '2m', target: __ENV.VUS },
|
||||
{ duration: '2m', target: __ENV.VUS * 1.5 },
|
||||
{ duration: '2m', target: __ENV.VUS * 2 },
|
||||
{ duration: '1m', target: 0 },
|
||||
],
|
||||
thresholds: {
|
||||
http_req_duration: ['p(95)<1000'],
|
||||
http_req_failed: ['rate<0.05'],
|
||||
},
|
||||
};
|
||||
|
||||
export default function () {
|
||||
const res = http.get(__ENV.TARGET_URL);
|
||||
|
||||
const success = check(res, {
|
||||
'status is 200': (r) => r.status === 200,
|
||||
});
|
||||
|
||||
errorRate.add(!success);
|
||||
sleep(1);
|
||||
}
|
||||
EOF
|
||||
;;
|
||||
|
||||
spike)
|
||||
cat > "$TEST_SCRIPT" <<'EOF'
|
||||
import http from 'k6/http';
|
||||
import { check, sleep } from 'k6';
|
||||
import { Rate } from 'k6/metrics';
|
||||
|
||||
const errorRate = new Rate('errors');
|
||||
|
||||
export const options = {
|
||||
stages: [
|
||||
{ duration: '1m', target: __ENV.VUS / 2 },
|
||||
{ duration: '30s', target: __ENV.VUS * 5 },
|
||||
{ duration: '1m', target: __ENV.VUS / 2 },
|
||||
{ duration: '30s', target: 0 },
|
||||
],
|
||||
thresholds: {
|
||||
http_req_duration: ['p(95)<2000'],
|
||||
http_req_failed: ['rate<0.1'],
|
||||
},
|
||||
};
|
||||
|
||||
export default function () {
|
||||
const res = http.get(__ENV.TARGET_URL);
|
||||
|
||||
const success = check(res, {
|
||||
'status is 200': (r) => r.status === 200,
|
||||
});
|
||||
|
||||
errorRate.add(!success);
|
||||
sleep(1);
|
||||
}
|
||||
EOF
|
||||
;;
|
||||
|
||||
soak)
|
||||
cat > "$TEST_SCRIPT" <<'EOF'
|
||||
import http from 'k6/http';
|
||||
import { check, sleep } from 'k6';
|
||||
import { Rate } from 'k6/metrics';
|
||||
|
||||
const errorRate = new Rate('errors');
|
||||
|
||||
export const options = {
|
||||
stages: [
|
||||
{ duration: '2m', target: __ENV.VUS },
|
||||
{ duration: '3h', target: __ENV.VUS },
|
||||
{ duration: '2m', target: 0 },
|
||||
],
|
||||
thresholds: {
|
||||
http_req_duration: ['p(95)<500'],
|
||||
http_req_failed: ['rate<0.01'],
|
||||
},
|
||||
};
|
||||
|
||||
export default function () {
|
||||
const res = http.get(__ENV.TARGET_URL);
|
||||
|
||||
const success = check(res, {
|
||||
'status is 200': (r) => r.status === 200,
|
||||
});
|
||||
|
||||
errorRate.add(!success);
|
||||
sleep(1);
|
||||
}
|
||||
EOF
|
||||
;;
|
||||
|
||||
*)
|
||||
echo -e "${RED}Error: Unknown scenario: $SCENARIO${NC}"
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
|
||||
# Run k6 test
|
||||
echo -e "\n${YELLOW}Starting load test...${NC}"
|
||||
k6 run \
|
||||
--out json="${OUTPUT_DIR}/results-${SCENARIO}-${TIMESTAMP}.json" \
|
||||
--summary-export="${OUTPUT_DIR}/summary-${SCENARIO}-${TIMESTAMP}.json" \
|
||||
--env TARGET_URL="$URL" \
|
||||
--env DURATION="$DURATION" \
|
||||
--env VUS="$VUS" \
|
||||
"$TEST_SCRIPT"
|
||||
|
||||
# Check if test passed
|
||||
if [ $? -eq 0 ]; then
|
||||
echo -e "\n${GREEN}✓ Load test passed${NC}"
|
||||
TEST_STATUS="passed"
|
||||
else
|
||||
echo -e "\n${RED}✗ Load test failed (thresholds not met)${NC}"
|
||||
TEST_STATUS="failed"
|
||||
fi
|
||||
|
||||
# Parse results
|
||||
echo -e "\n${YELLOW}Parsing results...${NC}"
|
||||
node -e "
|
||||
const fs = require('fs');
|
||||
const summary = JSON.parse(fs.readFileSync('${OUTPUT_DIR}/summary-${SCENARIO}-${TIMESTAMP}.json'));
|
||||
|
||||
console.log('\n=== Load Test Results ===');
|
||||
console.log('Scenario:', '${SCENARIO}');
|
||||
console.log('Status:', '${TEST_STATUS}'.toUpperCase());
|
||||
|
||||
const metrics = summary.metrics;
|
||||
|
||||
if (metrics.http_reqs) {
|
||||
console.log('\n=== Request Statistics ===');
|
||||
console.log('Total Requests:', metrics.http_reqs.count);
|
||||
console.log('Request Rate:', metrics.http_reqs.rate.toFixed(2), 'req/s');
|
||||
}
|
||||
|
||||
if (metrics.http_req_duration) {
|
||||
console.log('\n=== Response Time ===');
|
||||
console.log('Average:', metrics.http_req_duration.avg.toFixed(2), 'ms');
|
||||
console.log('Min:', metrics.http_req_duration.min.toFixed(2), 'ms');
|
||||
console.log('Max:', metrics.http_req_duration.max.toFixed(2), 'ms');
|
||||
console.log('p50:', metrics.http_req_duration.p50.toFixed(2), 'ms');
|
||||
console.log('p95:', metrics.http_req_duration.p95.toFixed(2), 'ms');
|
||||
console.log('p99:', metrics.http_req_duration.p99.toFixed(2), 'ms');
|
||||
}
|
||||
|
||||
if (metrics.http_req_failed) {
|
||||
console.log('\n=== Error Rate ===');
|
||||
console.log('Failed Requests:', (metrics.http_req_failed.rate * 100).toFixed(2), '%');
|
||||
}
|
||||
|
||||
if (metrics.vus) {
|
||||
console.log('\n=== Virtual Users ===');
|
||||
console.log('Max VUs:', metrics.vus.max);
|
||||
}
|
||||
|
||||
// Check thresholds
|
||||
console.log('\n=== Threshold Results ===');
|
||||
Object.entries(summary.root_group.checks || {}).forEach(([name, check]) => {
|
||||
const status = check.passes === check.fails ? '✓' : '✗';
|
||||
console.log(status, name);
|
||||
});
|
||||
"
|
||||
|
||||
echo -e "\n${GREEN}✓ Load test complete${NC}"
|
||||
echo "Results saved to:"
|
||||
echo " - ${OUTPUT_DIR}/results-${SCENARIO}-${TIMESTAMP}.json"
|
||||
echo " - ${OUTPUT_DIR}/summary-${SCENARIO}-${TIMESTAMP}.json"
|
||||
echo " - ${OUTPUT_DIR}/test-${SCENARIO}-${TIMESTAMP}.js"
|
||||
|
||||
if [ "$TEST_STATUS" = "failed" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
exit 0
|
||||
119
commands/optimize/.scripts/profile-frontend.sh
Executable file
119
commands/optimize/.scripts/profile-frontend.sh
Executable file
@@ -0,0 +1,119 @@
|
||||
#!/bin/bash
|
||||
# Purpose: Automated Lighthouse performance profiling for frontend pages
|
||||
# Version: 1.0.0
|
||||
# Usage: ./profile-frontend.sh <url> [output-dir]
|
||||
# Returns: 0=success, 1=lighthouse failed, 2=invalid arguments
|
||||
# Dependencies: Node.js, npm, lighthouse
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Color output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Arguments
|
||||
URL="${1:-}"
|
||||
OUTPUT_DIR="${2:-./lighthouse-reports}"
|
||||
|
||||
# Validate arguments
|
||||
if [ -z "$URL" ]; then
|
||||
echo -e "${RED}Error: URL is required${NC}"
|
||||
echo "Usage: $0 <url> [output-dir]"
|
||||
echo "Example: $0 https://example.com ./reports"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Check if lighthouse is installed
|
||||
if ! command -v lighthouse &> /dev/null; then
|
||||
echo -e "${YELLOW}Lighthouse not found. Installing...${NC}"
|
||||
npm install -g lighthouse
|
||||
fi
|
||||
|
||||
# Create output directory
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
TIMESTAMP=$(date +%Y%m%d-%H%M%S)
|
||||
|
||||
echo -e "${GREEN}Running Lighthouse audit for: $URL${NC}"
|
||||
echo "Output directory: $OUTPUT_DIR"
|
||||
|
||||
# Run Lighthouse with various strategies
|
||||
echo -e "\n${YELLOW}1. Desktop audit (fast connection)${NC}"
|
||||
lighthouse "$URL" \
|
||||
--output=json \
|
||||
--output=html \
|
||||
--output-path="${OUTPUT_DIR}/desktop-${TIMESTAMP}" \
|
||||
--preset=desktop \
|
||||
--throttling.rttMs=40 \
|
||||
--throttling.throughputKbps=10240 \
|
||||
--throttling.cpuSlowdownMultiplier=1 \
|
||||
--chrome-flags="--headless --no-sandbox"
|
||||
|
||||
echo -e "\n${YELLOW}2. Mobile audit (3G connection)${NC}"
|
||||
lighthouse "$URL" \
|
||||
--output=json \
|
||||
--output=html \
|
||||
--output-path="${OUTPUT_DIR}/mobile-${TIMESTAMP}" \
|
||||
--preset=mobile \
|
||||
--throttling.rttMs=150 \
|
||||
--throttling.throughputKbps=1600 \
|
||||
--throttling.cpuSlowdownMultiplier=4 \
|
||||
--chrome-flags="--headless --no-sandbox"
|
||||
|
||||
# Extract key metrics
|
||||
echo -e "\n${GREEN}Extracting key metrics...${NC}"
|
||||
node -e "
|
||||
const fs = require('fs');
|
||||
const desktop = JSON.parse(fs.readFileSync('${OUTPUT_DIR}/desktop-${TIMESTAMP}.report.json'));
|
||||
const mobile = JSON.parse(fs.readFileSync('${OUTPUT_DIR}/mobile-${TIMESTAMP}.report.json'));
|
||||
|
||||
console.log('\n=== Performance Scores ===');
|
||||
console.log('Desktop Performance:', Math.round(desktop.categories.performance.score * 100));
|
||||
console.log('Mobile Performance:', Math.round(mobile.categories.performance.score * 100));
|
||||
|
||||
console.log('\n=== Web Vitals (Desktop) ===');
|
||||
const dMetrics = desktop.audits;
|
||||
console.log('LCP:', Math.round(dMetrics['largest-contentful-paint'].numericValue), 'ms');
|
||||
console.log('FID:', Math.round(dMetrics['max-potential-fid'].numericValue), 'ms');
|
||||
console.log('CLS:', dMetrics['cumulative-layout-shift'].numericValue.toFixed(3));
|
||||
console.log('TTFB:', Math.round(dMetrics['server-response-time'].numericValue), 'ms');
|
||||
console.log('TBT:', Math.round(dMetrics['total-blocking-time'].numericValue), 'ms');
|
||||
|
||||
console.log('\n=== Web Vitals (Mobile) ===');
|
||||
const mMetrics = mobile.audits;
|
||||
console.log('LCP:', Math.round(mMetrics['largest-contentful-paint'].numericValue), 'ms');
|
||||
console.log('FID:', Math.round(mMetrics['max-potential-fid'].numericValue), 'ms');
|
||||
console.log('CLS:', mMetrics['cumulative-layout-shift'].numericValue.toFixed(3));
|
||||
console.log('TTFB:', Math.round(mMetrics['server-response-time'].numericValue), 'ms');
|
||||
console.log('TBT:', Math.round(mMetrics['total-blocking-time'].numericValue), 'ms');
|
||||
|
||||
// Save summary
|
||||
const summary = {
|
||||
timestamp: '${TIMESTAMP}',
|
||||
url: '${URL}',
|
||||
desktop: {
|
||||
performance: Math.round(desktop.categories.performance.score * 100),
|
||||
lcp: Math.round(dMetrics['largest-contentful-paint'].numericValue),
|
||||
fid: Math.round(dMetrics['max-potential-fid'].numericValue),
|
||||
cls: dMetrics['cumulative-layout-shift'].numericValue,
|
||||
},
|
||||
mobile: {
|
||||
performance: Math.round(mobile.categories.performance.score * 100),
|
||||
lcp: Math.round(mMetrics['largest-contentful-paint'].numericValue),
|
||||
fid: Math.round(mMetrics['max-potential-fid'].numericValue),
|
||||
cls: mMetrics['cumulative-layout-shift'].numericValue,
|
||||
}
|
||||
};
|
||||
|
||||
fs.writeFileSync('${OUTPUT_DIR}/summary-${TIMESTAMP}.json', JSON.stringify(summary, null, 2));
|
||||
console.log('\nSummary saved to: ${OUTPUT_DIR}/summary-${TIMESTAMP}.json');
|
||||
"
|
||||
|
||||
echo -e "\n${GREEN}✓ Lighthouse audit complete${NC}"
|
||||
echo "Reports saved to: $OUTPUT_DIR"
|
||||
echo " - desktop-${TIMESTAMP}.report.html"
|
||||
echo " - mobile-${TIMESTAMP}.report.html"
|
||||
echo " - summary-${TIMESTAMP}.json"
|
||||
|
||||
exit 0
|
||||
226
commands/optimize/.scripts/query-profiler.sh
Executable file
226
commands/optimize/.scripts/query-profiler.sh
Executable file
@@ -0,0 +1,226 @@
|
||||
#!/bin/bash
|
||||
# Purpose: Profile database queries and identify slow operations
|
||||
# Version: 1.0.0
|
||||
# Usage: ./query-profiler.sh <database-url> [threshold-ms] [output-dir]
|
||||
# Returns: 0=success, 1=profiling failed, 2=invalid arguments
|
||||
# Dependencies: psql (PostgreSQL) or mysql (MySQL)
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Color output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Arguments
|
||||
DATABASE_URL="${1:-}"
|
||||
THRESHOLD_MS="${2:-500}"
|
||||
OUTPUT_DIR="${3:-./query-profiles}"
|
||||
|
||||
# Validate arguments
|
||||
if [ -z "$DATABASE_URL" ]; then
|
||||
echo -e "${RED}Error: Database URL is required${NC}"
|
||||
echo "Usage: $0 <database-url> [threshold-ms] [output-dir]"
|
||||
echo "Example: $0 postgresql://user:pass@localhost:5432/dbname 500 ./reports"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Create output directory
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
TIMESTAMP=$(date +%Y%m%d-%H%M%S)
|
||||
|
||||
echo -e "${GREEN}Starting database query profiling${NC}"
|
||||
echo "Threshold: ${THRESHOLD_MS}ms"
|
||||
echo "Output directory: $OUTPUT_DIR"
|
||||
|
||||
# Detect database type
|
||||
if [[ "$DATABASE_URL" == postgresql://* ]] || [[ "$DATABASE_URL" == postgres://* ]]; then
|
||||
DB_TYPE="postgresql"
|
||||
elif [[ "$DATABASE_URL" == mysql://* ]]; then
|
||||
DB_TYPE="mysql"
|
||||
else
|
||||
echo -e "${YELLOW}Warning: Could not detect database type from URL${NC}"
|
||||
DB_TYPE="unknown"
|
||||
fi
|
||||
|
||||
echo "Database type: $DB_TYPE"
|
||||
|
||||
# PostgreSQL profiling
|
||||
if [ "$DB_TYPE" = "postgresql" ]; then
|
||||
echo -e "\n${YELLOW}Running PostgreSQL query analysis...${NC}"
|
||||
|
||||
# Enable pg_stat_statements if not already enabled
|
||||
psql "$DATABASE_URL" -c "CREATE EXTENSION IF NOT EXISTS pg_stat_statements;" 2>/dev/null || true
|
||||
|
||||
# Get slow queries
|
||||
echo "Finding slow queries (>${THRESHOLD_MS}ms)..."
|
||||
psql "$DATABASE_URL" -t -A -F"," > "${OUTPUT_DIR}/slow-queries-${TIMESTAMP}.csv" <<EOF
|
||||
SELECT
|
||||
substring(query, 1, 100) AS short_query,
|
||||
calls,
|
||||
round(mean_exec_time::numeric, 2) AS avg_time_ms,
|
||||
round(max_exec_time::numeric, 2) AS max_time_ms,
|
||||
round(total_exec_time::numeric, 2) AS total_time_ms,
|
||||
round((100 * total_exec_time / sum(total_exec_time) OVER ())::numeric, 2) AS pct_total_time
|
||||
FROM pg_stat_statements
|
||||
WHERE mean_exec_time > ${THRESHOLD_MS}
|
||||
AND query NOT LIKE '%pg_stat_statements%'
|
||||
ORDER BY mean_exec_time DESC
|
||||
LIMIT 50;
|
||||
EOF
|
||||
|
||||
# Get most called queries
|
||||
echo "Finding most frequently called queries..."
|
||||
psql "$DATABASE_URL" -t -A -F"," > "${OUTPUT_DIR}/frequent-queries-${TIMESTAMP}.csv" <<EOF
|
||||
SELECT
|
||||
substring(query, 1, 100) AS short_query,
|
||||
calls,
|
||||
round(mean_exec_time::numeric, 2) AS avg_time_ms,
|
||||
round(total_exec_time::numeric, 2) AS total_time_ms
|
||||
FROM pg_stat_statements
|
||||
WHERE query NOT LIKE '%pg_stat_statements%'
|
||||
ORDER BY calls DESC
|
||||
LIMIT 50;
|
||||
EOF
|
||||
|
||||
# Get index usage statistics
|
||||
echo "Analyzing index usage..."
|
||||
psql "$DATABASE_URL" -t -A -F"," > "${OUTPUT_DIR}/index-usage-${TIMESTAMP}.csv" <<EOF
|
||||
SELECT
|
||||
schemaname,
|
||||
tablename,
|
||||
indexname,
|
||||
idx_scan,
|
||||
idx_tup_read,
|
||||
idx_tup_fetch,
|
||||
pg_size_pretty(pg_relation_size(indexrelid)) AS index_size
|
||||
FROM pg_stat_user_indexes
|
||||
ORDER BY idx_scan ASC
|
||||
LIMIT 50;
|
||||
EOF
|
||||
|
||||
# Find missing indexes (tables with sequential scans)
|
||||
echo "Finding potential missing indexes..."
|
||||
psql "$DATABASE_URL" -t -A -F"," > "${OUTPUT_DIR}/missing-indexes-${TIMESTAMP}.csv" <<EOF
|
||||
SELECT
|
||||
schemaname,
|
||||
tablename,
|
||||
seq_scan,
|
||||
seq_tup_read,
|
||||
idx_scan,
|
||||
CASE WHEN seq_scan > 0 THEN seq_tup_read / seq_scan ELSE 0 END AS avg_seq_read,
|
||||
pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) AS table_size
|
||||
FROM pg_stat_user_tables
|
||||
WHERE seq_scan > 1000
|
||||
AND (idx_scan = 0 OR seq_scan > idx_scan)
|
||||
ORDER BY seq_tup_read DESC
|
||||
LIMIT 30;
|
||||
EOF
|
||||
|
||||
# Table statistics
|
||||
echo "Gathering table statistics..."
|
||||
psql "$DATABASE_URL" -t -A -F"," > "${OUTPUT_DIR}/table-stats-${TIMESTAMP}.csv" <<EOF
|
||||
SELECT
|
||||
schemaname,
|
||||
tablename,
|
||||
pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) AS total_size,
|
||||
n_live_tup,
|
||||
n_dead_tup,
|
||||
CASE WHEN n_live_tup > 0 THEN round((n_dead_tup::float / n_live_tup::float) * 100, 2) ELSE 0 END AS dead_pct,
|
||||
last_vacuum,
|
||||
last_autovacuum
|
||||
FROM pg_stat_user_tables
|
||||
ORDER BY pg_total_relation_size(schemaname||'.'||tablename) DESC
|
||||
LIMIT 30;
|
||||
EOF
|
||||
|
||||
# Generate text report
|
||||
echo -e "\n${GREEN}=== Slow Queries Summary ===${NC}"
|
||||
echo "Queries slower than ${THRESHOLD_MS}ms:"
|
||||
head -10 "${OUTPUT_DIR}/slow-queries-${TIMESTAMP}.csv" | column -t -s','
|
||||
|
||||
echo -e "\n${GREEN}=== Most Frequent Queries ===${NC}"
|
||||
head -10 "${OUTPUT_DIR}/frequent-queries-${TIMESTAMP}.csv" | column -t -s','
|
||||
|
||||
echo -e "\n${GREEN}=== Potential Missing Indexes ===${NC}"
|
||||
head -10 "${OUTPUT_DIR}/missing-indexes-${TIMESTAMP}.csv" | column -t -s','
|
||||
|
||||
echo -e "\n${YELLOW}=== Recommendations ===${NC}"
|
||||
|
||||
# Check for unused indexes
|
||||
UNUSED_INDEXES=$(awk -F',' '$4 == 0' "${OUTPUT_DIR}/index-usage-${TIMESTAMP}.csv" | wc -l)
|
||||
if [ "$UNUSED_INDEXES" -gt 0 ]; then
|
||||
echo -e "${YELLOW}⚠ Found $UNUSED_INDEXES unused indexes (0 scans)${NC}"
|
||||
echo " Consider removing to save space and improve write performance"
|
||||
fi
|
||||
|
||||
# Check for missing indexes
|
||||
MISSING_INDEXES=$(wc -l < "${OUTPUT_DIR}/missing-indexes-${TIMESTAMP}.csv")
|
||||
if [ "$MISSING_INDEXES" -gt 1 ]; then
|
||||
echo -e "${YELLOW}⚠ Found $((MISSING_INDEXES - 1)) tables with high sequential scans${NC}"
|
||||
echo " Consider adding indexes on frequently queried columns"
|
||||
fi
|
||||
|
||||
# Check for bloated tables
|
||||
BLOATED_TABLES=$(awk -F',' '$6 > 20' "${OUTPUT_DIR}/table-stats-${TIMESTAMP}.csv" | wc -l)
|
||||
if [ "$BLOATED_TABLES" -gt 0 ]; then
|
||||
echo -e "${YELLOW}⚠ Found $BLOATED_TABLES tables with >20% dead tuples${NC}"
|
||||
echo " Run VACUUM ANALYZE on these tables"
|
||||
fi
|
||||
|
||||
# MySQL profiling
|
||||
elif [ "$DB_TYPE" = "mysql" ]; then
|
||||
echo -e "\n${YELLOW}Running MySQL query analysis...${NC}"
|
||||
|
||||
# Enable slow query log temporarily
|
||||
mysql "$DATABASE_URL" -e "SET GLOBAL slow_query_log = 'ON';" 2>/dev/null || true
|
||||
mysql "$DATABASE_URL" -e "SET GLOBAL long_query_time = $(echo "scale=3; $THRESHOLD_MS/1000" | bc);" 2>/dev/null || true
|
||||
|
||||
echo "Analyzing query performance..."
|
||||
mysql "$DATABASE_URL" -e "
|
||||
SELECT
|
||||
DIGEST_TEXT AS query,
|
||||
COUNT_STAR AS exec_count,
|
||||
ROUND(AVG_TIMER_WAIT/1000000000, 2) AS avg_time_ms,
|
||||
ROUND(MAX_TIMER_WAIT/1000000000, 2) AS max_time_ms,
|
||||
ROUND(SUM_TIMER_WAIT/1000000000, 2) AS total_time_ms
|
||||
FROM performance_schema.events_statements_summary_by_digest
|
||||
WHERE AVG_TIMER_WAIT/1000000000 > ${THRESHOLD_MS}
|
||||
ORDER BY AVG_TIMER_WAIT DESC
|
||||
LIMIT 50;
|
||||
" > "${OUTPUT_DIR}/slow-queries-${TIMESTAMP}.txt"
|
||||
|
||||
echo -e "${GREEN}Query analysis complete${NC}"
|
||||
cat "${OUTPUT_DIR}/slow-queries-${TIMESTAMP}.txt"
|
||||
|
||||
else
|
||||
echo -e "${RED}Error: Unsupported database type${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Generate JSON summary
|
||||
SLOW_QUERY_COUNT=$([ -f "${OUTPUT_DIR}/slow-queries-${TIMESTAMP}.csv" ] && tail -n +1 "${OUTPUT_DIR}/slow-queries-${TIMESTAMP}.csv" | wc -l || echo "0")
|
||||
|
||||
cat > "${OUTPUT_DIR}/summary-${TIMESTAMP}.json" <<EOF
|
||||
{
|
||||
"timestamp": "${TIMESTAMP}",
|
||||
"databaseType": "${DB_TYPE}",
|
||||
"thresholdMs": ${THRESHOLD_MS},
|
||||
"slowQueryCount": ${SLOW_QUERY_COUNT},
|
||||
"unusedIndexes": ${UNUSED_INDEXES:-0},
|
||||
"potentialMissingIndexes": $((${MISSING_INDEXES:-1} - 1)),
|
||||
"bloatedTables": ${BLOATED_TABLES:-0}
|
||||
}
|
||||
EOF
|
||||
|
||||
echo -e "\n${GREEN}✓ Query profiling complete${NC}"
|
||||
echo "Results saved to:"
|
||||
echo " - ${OUTPUT_DIR}/slow-queries-${TIMESTAMP}.csv"
|
||||
echo " - ${OUTPUT_DIR}/frequent-queries-${TIMESTAMP}.csv"
|
||||
echo " - ${OUTPUT_DIR}/index-usage-${TIMESTAMP}.csv"
|
||||
echo " - ${OUTPUT_DIR}/missing-indexes-${TIMESTAMP}.csv"
|
||||
echo " - ${OUTPUT_DIR}/table-stats-${TIMESTAMP}.csv"
|
||||
echo " - ${OUTPUT_DIR}/summary-${TIMESTAMP}.json"
|
||||
|
||||
exit 0
|
||||
544
commands/optimize/README.md
Normal file
544
commands/optimize/README.md
Normal file
@@ -0,0 +1,544 @@
|
||||
# Optimize Skill
|
||||
|
||||
Comprehensive performance optimization across database, backend, frontend, and infrastructure layers for full-stack applications.
|
||||
|
||||
## Overview
|
||||
|
||||
The `/10x-fullstack-engineer:optimize` skill provides systematic performance optimization capabilities covering all layers of a modern web application. It identifies bottlenecks, implements optimizations, and measures improvements across:
|
||||
|
||||
- **Database**: Query optimization, indexing, connection pooling, caching
|
||||
- **Backend**: API performance, algorithm efficiency, concurrency, caching
|
||||
- **Frontend**: Bundle size, rendering, Web Vitals, asset optimization
|
||||
- **Infrastructure**: Auto-scaling, CDN, resource allocation, cost efficiency
|
||||
- **Benchmarking**: Load testing, rendering benchmarks, regression detection
|
||||
|
||||
## Available Operations
|
||||
|
||||
### 1. analyze
|
||||
**Purpose**: Comprehensive performance analysis with bottleneck identification
|
||||
|
||||
Performs deep analysis across all application layers, establishes baseline metrics, and creates prioritized optimization opportunity matrix.
|
||||
|
||||
**Usage**:
|
||||
```bash
|
||||
/10x-fullstack-engineer:optimize analyze target:"production app" scope:all metrics:"baseline"
|
||||
```
|
||||
|
||||
**Parameters**:
|
||||
- `target` (required): Application or component to analyze (e.g., "user dashboard", "checkout flow")
|
||||
- `scope` (optional): Layer focus - `frontend`, `backend`, `database`, `infrastructure`, or `all` (default: `all`)
|
||||
- `metrics` (optional): Metrics mode - `baseline` or `compare` (default: `baseline`)
|
||||
- `baseline` (optional): Baseline version for comparison (e.g., "v1.2.0")
|
||||
|
||||
**What it does**:
|
||||
- Runs Lighthouse audits for frontend performance
|
||||
- Profiles backend API response times
|
||||
- Analyzes database slow queries and index usage
|
||||
- Checks infrastructure resource utilization
|
||||
- Identifies bottlenecks using detection matrix
|
||||
- Creates prioritized optimization opportunity matrix
|
||||
- Generates comprehensive performance profile
|
||||
|
||||
**Example Output**:
|
||||
- Performance snapshot with metrics across all layers
|
||||
- Bottleneck identification with severity ratings
|
||||
- Optimization opportunity matrix with ROI estimates
|
||||
- Recommended action plan with phases
|
||||
|
||||
---
|
||||
|
||||
### 2. database
|
||||
**Purpose**: Database query and schema optimization
|
||||
|
||||
Optimizes slow queries, adds missing indexes, fixes N+1 problems, and improves connection pool configuration.
|
||||
|
||||
**Usage**:
|
||||
```bash
|
||||
/10x-fullstack-engineer:optimize database target:queries context:"slow SELECT statements" threshold:500ms
|
||||
```
|
||||
|
||||
**Parameters**:
|
||||
- `target` (required): What to optimize - `queries`, `schema`, `indexes`, `connections`, or `all`
|
||||
- `context` (optional): Specific details (table names, query patterns)
|
||||
- `threshold` (optional): Time threshold for slow queries in milliseconds (default: 500ms)
|
||||
- `environment` (optional): Target environment (default: development)
|
||||
|
||||
**Key Optimizations**:
|
||||
- **Query Analysis**: Identifies slow queries using pg_stat_statements, MySQL performance schema, or MongoDB profiler
|
||||
- **Index Creation**: Adds missing indexes based on query patterns and table scans
|
||||
- **N+1 Query Fixes**: Converts sequential queries to eager loading or joins
|
||||
- **Connection Pooling**: Optimizes pool size and configuration
|
||||
- **Query Caching**: Implements Redis or materialized views for frequently accessed data
|
||||
- **Schema Optimization**: Denormalization, partitioning, column type optimization
|
||||
|
||||
**Example Results**:
|
||||
- User email lookup: 450ms → 8ms (98% faster) by adding index
|
||||
- Posts with join: 820ms → 45ms (95% faster) by fixing N+1 problem
|
||||
- Pagination: 1,200ms → 15ms (99% faster) with cursor-based approach
|
||||
|
||||
---
|
||||
|
||||
### 3. backend
|
||||
**Purpose**: Backend API and algorithm optimization
|
||||
|
||||
Optimizes API response times, algorithm complexity, caching strategies, and concurrency handling.
|
||||
|
||||
**Usage**:
|
||||
```bash
|
||||
/10x-fullstack-engineer:optimize backend target:api endpoints:"/api/users,/api/products" load_profile:high
|
||||
```
|
||||
|
||||
**Parameters**:
|
||||
- `target` (required): What to optimize - `api`, `algorithms`, `caching`, `concurrency`, or `all`
|
||||
- `endpoints` (optional): Specific API endpoints (comma-separated)
|
||||
- `load_profile` (optional): Expected load - `low`, `medium`, `high` (default: medium)
|
||||
- `priority` (optional): Optimization priority - `low`, `medium`, `high`, `critical` (default: high)
|
||||
|
||||
**Key Optimizations**:
|
||||
- **N+1 Query Elimination**: Converts sequential database calls to eager loading or DataLoader batching
|
||||
- **Response Caching**: Implements Redis caching with TTL and invalidation strategies
|
||||
- **Compression**: Adds gzip/brotli compression (70-80% size reduction)
|
||||
- **Algorithm Complexity**: Replaces O(n²) operations with O(n) using Map/Set
|
||||
- **Parallelization**: Uses Promise.all for independent async operations
|
||||
- **Request Batching**: Batches multiple requests into single database query
|
||||
- **JSON Serialization**: Uses fast-json-stringify for known schemas (2-3x faster)
|
||||
- **Middleware Optimization**: Applies middleware selectively to reduce overhead
|
||||
|
||||
**Example Results**:
|
||||
- API feed endpoint: 850ms → 95ms (89% faster) by fixing N+1 queries
|
||||
- Response caching: 82% cache hit rate, 82% database load reduction
|
||||
- Algorithm optimization: 2,400ms → 12ms (99.5% faster) for O(n²) → O(n) conversion
|
||||
- Parallelization: 190ms → 80ms (58% faster) for independent queries
|
||||
|
||||
---
|
||||
|
||||
### 4. frontend
|
||||
**Purpose**: Frontend bundle and rendering optimization
|
||||
|
||||
Reduces bundle size, optimizes rendering performance, improves Web Vitals, and optimizes asset loading.
|
||||
|
||||
**Usage**:
|
||||
```bash
|
||||
/10x-fullstack-engineer:optimize frontend target:all pages:"checkout,dashboard" framework:react
|
||||
```
|
||||
|
||||
**Parameters**:
|
||||
- `target` (required): What to optimize - `bundles`, `rendering`, `assets`, `images`, `fonts`, or `all`
|
||||
- `pages` (optional): Specific pages (comma-separated)
|
||||
- `metrics_target` (optional): Target Lighthouse score (e.g., "lighthouse>90")
|
||||
- `framework` (optional): Framework - `react`, `vue`, `angular`, `svelte` (auto-detected)
|
||||
|
||||
**Key Optimizations**:
|
||||
- **Code Splitting**: Lazy load routes and components (70-80% smaller initial bundle)
|
||||
- **Tree Shaking**: Remove unused code with proper imports (90%+ reduction for lodash/moment)
|
||||
- **Dependency Optimization**: Replace heavy libraries (moment → date-fns: 95% smaller)
|
||||
- **React Memoization**: Use React.memo, useMemo, useCallback to prevent re-renders
|
||||
- **Virtual Scrolling**: Render only visible items (98% faster for large lists)
|
||||
- **Image Optimization**: Modern formats (WebP/AVIF: 80-85% smaller), lazy loading, responsive srcset
|
||||
- **Font Optimization**: Variable fonts, font-display: swap, preload critical fonts
|
||||
- **Critical CSS**: Inline above-the-fold CSS, defer non-critical
|
||||
- **Web Vitals**: Optimize LCP, FID/INP, CLS
|
||||
|
||||
**Example Results**:
|
||||
- Bundle size: 2.5MB → 650KB (74% smaller)
|
||||
- Initial load: 3.8s → 1.2s (68% faster)
|
||||
- LCP: 4.2s → 1.8s (57% faster)
|
||||
- Virtual scrolling: 2,500ms → 45ms (98% faster) for 10,000 items
|
||||
- Hero image: 1.2MB → 180KB (85% smaller) with AVIF
|
||||
|
||||
---
|
||||
|
||||
### 5. infrastructure
|
||||
**Purpose**: Infrastructure and deployment optimization
|
||||
|
||||
Optimizes auto-scaling, CDN configuration, resource allocation, deployment strategies, and cost efficiency.
|
||||
|
||||
**Usage**:
|
||||
```bash
|
||||
/10x-fullstack-engineer:optimize infrastructure target:scaling environment:production provider:aws
|
||||
```
|
||||
|
||||
**Parameters**:
|
||||
- `target` (required): What to optimize - `scaling`, `cdn`, `resources`, `deployment`, `costs`, or `all`
|
||||
- `environment` (optional): Target environment (default: production)
|
||||
- `provider` (optional): Cloud provider - `aws`, `azure`, `gcp`, `vercel` (auto-detected)
|
||||
- `budget_constraint` (optional): Prioritize cost reduction (default: false)
|
||||
|
||||
**Key Optimizations**:
|
||||
- **Auto-Scaling**: Horizontal/vertical pod autoscaling (HPA/VPA), AWS Auto Scaling Groups
|
||||
- **CDN Configuration**: CloudFront, cache headers, compression, immutable assets
|
||||
- **Resource Right-Sizing**: Optimize CPU/memory requests based on actual usage (50-60% savings)
|
||||
- **Container Optimization**: Multi-stage builds, Alpine base images (85% smaller)
|
||||
- **Blue-Green Deployment**: Zero-downtime deployments with instant rollback
|
||||
- **Spot Instances**: Use for batch jobs (70-90% cost savings)
|
||||
- **Storage Lifecycle**: Auto-archive to Glacier (80%+ cost reduction)
|
||||
- **Reserved Instances**: Convert stable workloads (37% savings)
|
||||
|
||||
**Example Results**:
|
||||
- Auto-scaling: Off-peak 8 pods (47% reduction), peak 25 pods (67% increase)
|
||||
- Resource right-sizing: 62% CPU reduction, 61% memory reduction per pod
|
||||
- CDN: 85% origin request reduction, 84% faster TTFB (750ms → 120ms)
|
||||
- Container images: 1.2GB → 180MB (85% smaller)
|
||||
- Total cost: $7,100/month → $4,113/month (42% reduction, $35,844/year savings)
|
||||
|
||||
---
|
||||
|
||||
### 6. benchmark
|
||||
**Purpose**: Performance benchmarking and regression testing
|
||||
|
||||
Performs load testing, rendering benchmarks, database query benchmarks, and detects performance regressions.
|
||||
|
||||
**Usage**:
|
||||
```bash
|
||||
/10x-fullstack-engineer:optimize benchmark type:load baseline:"v1.2.0" duration:300s concurrency:100
|
||||
```
|
||||
|
||||
**Parameters**:
|
||||
- `type` (required): Benchmark type - `load`, `rendering`, `query`, `integration`, or `all`
|
||||
- `baseline` (optional): Baseline version for comparison (e.g., "v1.2.0")
|
||||
- `duration` (optional): Test duration in seconds (default: 60s)
|
||||
- `concurrency` (optional): Concurrent users/connections (default: 50)
|
||||
- `target` (optional): Specific URL or endpoint
|
||||
|
||||
**Key Capabilities**:
|
||||
- **Load Testing**: k6-based load tests with configurable scenarios (constant, spike, stress)
|
||||
- **Rendering Benchmarks**: Lighthouse CI for Web Vitals and performance scores
|
||||
- **Query Benchmarks**: pg_bench or custom scripts for database performance
|
||||
- **E2E Benchmarks**: Playwright/Puppeteer for user flow performance
|
||||
- **Baseline Management**: Save and compare performance across versions
|
||||
- **Regression Detection**: Automated detection with configurable thresholds
|
||||
- **CI/CD Integration**: GitHub Actions workflow for continuous monitoring
|
||||
|
||||
**Example Results**:
|
||||
- Load test: 150.77 req/s, p95: 245ms, 0.02% errors
|
||||
- Lighthouse: Performance score 94 (+32 from baseline)
|
||||
- Query benchmark: User lookup 8ms avg (98% faster than baseline)
|
||||
- Regression detection: 12 metrics improved, 0 regressions
|
||||
|
||||
---
|
||||
|
||||
## Common Workflows
|
||||
|
||||
### 1. Full Application Optimization
|
||||
|
||||
```bash
|
||||
# Step 1: Analyze overall performance
|
||||
/10x-fullstack-engineer:optimize analyze target:"production app" scope:all metrics:"baseline"
|
||||
|
||||
# Step 2: Optimize based on analysis priorities
|
||||
/10x-fullstack-engineer:optimize database target:all context:"queries from analysis" threshold:200ms
|
||||
/10x-fullstack-engineer:optimize backend target:api endpoints:"/api/search,/api/feed" priority:high
|
||||
/10x-fullstack-engineer:optimize frontend target:all pages:"checkout,dashboard" framework:react
|
||||
|
||||
# Step 3: Benchmark improvements
|
||||
/10x-fullstack-engineer:optimize benchmark type:all baseline:"pre-optimization" duration:600s
|
||||
|
||||
# Step 4: Optimize infrastructure for efficiency
|
||||
/10x-fullstack-engineer:optimize infrastructure target:costs environment:production budget_constraint:true
|
||||
```
|
||||
|
||||
### 2. Frontend Performance Sprint
|
||||
|
||||
```bash
|
||||
# Analyze frontend baseline
|
||||
/10x-fullstack-engineer:optimize analyze target:"web app" scope:frontend metrics:"baseline"
|
||||
|
||||
# Optimize bundles and rendering
|
||||
/10x-fullstack-engineer:optimize frontend target:bundles pages:"home,dashboard,profile" framework:react
|
||||
/10x-fullstack-engineer:optimize frontend target:rendering pages:"dashboard" framework:react
|
||||
|
||||
# Optimize assets
|
||||
/10x-fullstack-engineer:optimize frontend target:images pages:"home,product"
|
||||
/10x-fullstack-engineer:optimize frontend target:fonts pages:"all"
|
||||
|
||||
# Benchmark results
|
||||
/10x-fullstack-engineer:optimize benchmark type:rendering baseline:"pre-sprint" duration:60s
|
||||
```
|
||||
|
||||
### 3. Backend API Performance
|
||||
|
||||
```bash
|
||||
# Analyze backend performance
|
||||
/10x-fullstack-engineer:optimize analyze target:"REST API" scope:backend metrics:"baseline"
|
||||
|
||||
# Fix slow queries first
|
||||
/10x-fullstack-engineer:optimize database target:queries threshold:200ms context:"from analysis"
|
||||
|
||||
# Optimize API layer
|
||||
/10x-fullstack-engineer:optimize backend target:api endpoints:"/api/users,/api/posts" load_profile:high
|
||||
|
||||
# Add caching
|
||||
/10x-fullstack-engineer:optimize backend target:caching endpoints:"/api/users,/api/posts"
|
||||
|
||||
# Benchmark under load
|
||||
/10x-fullstack-engineer:optimize benchmark type:load baseline:"pre-optimization" duration:300s concurrency:200
|
||||
```
|
||||
|
||||
### 4. Cost Optimization
|
||||
|
||||
```bash
|
||||
# Analyze infrastructure costs
|
||||
/10x-fullstack-engineer:optimize analyze target:"production" scope:infrastructure metrics:"baseline"
|
||||
|
||||
# Right-size resources
|
||||
/10x-fullstack-engineer:optimize infrastructure target:resources environment:production budget_constraint:true
|
||||
|
||||
# Optimize scaling
|
||||
/10x-fullstack-engineer:optimize infrastructure target:scaling environment:production
|
||||
|
||||
# Configure CDN to reduce bandwidth
|
||||
/10x-fullstack-engineer:optimize infrastructure target:cdn environment:production
|
||||
|
||||
# Optimize storage costs
|
||||
/10x-fullstack-engineer:optimize infrastructure target:costs environment:production budget_constraint:true
|
||||
```
|
||||
|
||||
### 5. Regression Testing
|
||||
|
||||
```bash
|
||||
# Save baseline before changes
|
||||
/10x-fullstack-engineer:optimize benchmark type:all baseline:"v1.5.0" duration:300s
|
||||
|
||||
# After implementing changes, compare
|
||||
/10x-fullstack-engineer:optimize benchmark type:all baseline:"v1.5.0" duration:300s
|
||||
|
||||
# Analyze specific regressions
|
||||
/10x-fullstack-engineer:optimize analyze target:"changed endpoints" scope:backend metrics:"compare" baseline:"v1.5.0"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Performance Metrics and Thresholds
|
||||
|
||||
### Frontend (Web Vitals)
|
||||
- **LCP** (Largest Contentful Paint): Good < 2.5s, Needs Improvement 2.5-4s, Poor > 4s
|
||||
- **FID/INP** (First Input Delay / Interaction to Next Paint): Good < 100ms, Needs Improvement 100-300ms, Poor > 300ms
|
||||
- **CLS** (Cumulative Layout Shift): Good < 0.1, Needs Improvement 0.1-0.25, Poor > 0.25
|
||||
- **TTFB** (Time to First Byte): Good < 600ms, Needs Improvement 600-1000ms, Poor > 1000ms
|
||||
- **Bundle Size**: Target < 500KB initial (gzipped)
|
||||
|
||||
### Backend (API Performance)
|
||||
- **p50 Response Time**: Target < 200ms
|
||||
- **p95 Response Time**: Target < 500ms
|
||||
- **p99 Response Time**: Target < 1000ms
|
||||
- **Throughput**: Varies by application, track baseline
|
||||
- **Error Rate**: Target < 1%
|
||||
|
||||
### Database (Query Performance)
|
||||
- **Average Query Time**: Target < 100ms
|
||||
- **Slow Query Count**: Target 0 queries > 500ms
|
||||
- **Cache Hit Rate**: Target > 85%
|
||||
- **Connection Pool Utilization**: Target < 75%
|
||||
|
||||
### Infrastructure (Resource Utilization)
|
||||
- **CPU Utilization**: Target 50-75% (allows headroom)
|
||||
- **Memory Utilization**: Target < 70%
|
||||
- **Disk I/O Wait**: Target < 5%
|
||||
- **Network Utilization**: Track baseline
|
||||
|
||||
---
|
||||
|
||||
## Layer-Specific Guidance
|
||||
|
||||
### Database Optimization Priorities
|
||||
1. **Add missing indexes** - Highest ROI for slow queries
|
||||
2. **Fix N+1 query problems** - Often 90%+ improvement
|
||||
3. **Implement caching** - Reduce database load by 70-90%
|
||||
4. **Optimize connection pool** - Eliminate connection timeouts
|
||||
5. **Schema optimization** - Denormalization, partitioning for scale
|
||||
|
||||
### Backend Optimization Priorities
|
||||
1. **Cache frequently accessed data** - 80%+ reduction in database calls
|
||||
2. **Fix N+1 problems** - Replace sequential queries with batch operations
|
||||
3. **Parallelize independent operations** - 50%+ improvement for I/O-bound work
|
||||
4. **Add response compression** - 70-80% bandwidth reduction
|
||||
5. **Optimize algorithms** - Replace O(n²) with O(n) for large datasets
|
||||
|
||||
### Frontend Optimization Priorities
|
||||
1. **Code splitting by route** - 70-80% smaller initial bundle
|
||||
2. **Replace heavy dependencies** - Often 90%+ savings (moment → date-fns)
|
||||
3. **Optimize images** - 80-85% smaller with modern formats
|
||||
4. **Implement lazy loading** - Images, components, routes
|
||||
5. **Optimize rendering** - Memoization, virtual scrolling for lists
|
||||
|
||||
### Infrastructure Optimization Priorities
|
||||
1. **Enable auto-scaling** - 30-50% cost savings with same performance
|
||||
2. **Right-size resources** - 50-60% savings on over-provisioned workloads
|
||||
3. **Configure CDN** - 80%+ origin request reduction
|
||||
4. **Use reserved instances** - 30-40% savings for stable workloads
|
||||
5. **Optimize storage lifecycle** - 70-80% savings for old data
|
||||
|
||||
---
|
||||
|
||||
## Typical Performance Improvements
|
||||
|
||||
Based on real-world optimizations, expect:
|
||||
|
||||
### Database
|
||||
- **Index addition**: 95-98% query speedup (450ms → 8ms)
|
||||
- **N+1 fix**: 90-95% improvement (2,100ms → 180ms)
|
||||
- **Caching**: 70-90% database load reduction
|
||||
- **Connection pooling**: Eliminate timeout errors
|
||||
|
||||
### Backend
|
||||
- **N+1 elimination**: 85-95% faster (850ms → 95ms)
|
||||
- **Caching**: 80%+ cache hit rates, 80% load reduction
|
||||
- **Compression**: 70-80% bandwidth savings
|
||||
- **Algorithm optimization**: 99%+ for O(n²) → O(n) (2,400ms → 12ms)
|
||||
- **Parallelization**: 50-60% faster (190ms → 80ms)
|
||||
|
||||
### Frontend
|
||||
- **Code splitting**: 70-80% smaller initial bundle (2.5MB → 650KB)
|
||||
- **Dependency optimization**: 90-95% savings (moment → date-fns)
|
||||
- **Image optimization**: 80-85% smaller (1.2MB → 180KB)
|
||||
- **Virtual scrolling**: 98% faster (2,500ms → 45ms)
|
||||
- **Load time**: 60-70% faster (3.8s → 1.2s)
|
||||
|
||||
### Infrastructure
|
||||
- **Auto-scaling**: 30-50% cost reduction
|
||||
- **Right-sizing**: 50-60% savings per resource
|
||||
- **CDN**: 80-85% origin request reduction
|
||||
- **Reserved instances**: 30-40% savings
|
||||
- **Overall**: 40-45% total infrastructure cost reduction
|
||||
|
||||
---
|
||||
|
||||
## Tools and Technologies
|
||||
|
||||
### Profiling and Analysis
|
||||
- **Lighthouse**: Frontend performance audits
|
||||
- **Chrome DevTools**: Performance profiling, network waterfall
|
||||
- **pg_stat_statements**: PostgreSQL query analysis
|
||||
- **clinic.js**: Node.js profiling (doctor, flame, bubbleprof)
|
||||
- **k6**: Load testing and benchmarking
|
||||
- **CloudWatch/Prometheus**: Infrastructure metrics
|
||||
|
||||
### Optimization Tools
|
||||
- **webpack-bundle-analyzer**: Bundle size analysis
|
||||
- **depcheck**: Find unused dependencies
|
||||
- **React DevTools Profiler**: React rendering analysis
|
||||
- **redis**: Caching layer
|
||||
- **ImageOptim/Sharp**: Image optimization
|
||||
- **Lighthouse CI**: Continuous performance monitoring
|
||||
|
||||
### Benchmarking Tools
|
||||
- **k6**: Load testing with scenarios
|
||||
- **Lighthouse CI**: Rendering benchmarks
|
||||
- **pg_bench**: Database benchmarking
|
||||
- **autocannon**: HTTP load testing
|
||||
- **Playwright**: E2E performance testing
|
||||
|
||||
---
|
||||
|
||||
## Integration with 10x-Fullstack-Engineer Agent
|
||||
|
||||
All optimization operations leverage the **10x-fullstack-engineer** agent for:
|
||||
|
||||
- **Expert performance analysis** across all layers
|
||||
- **Industry best practices** for optimization
|
||||
- **Trade-off analysis** between performance and maintainability
|
||||
- **Scalability considerations** for future growth
|
||||
- **Production-ready implementation** guidance
|
||||
- **Security considerations** for optimizations
|
||||
- **Cost-benefit analysis** for infrastructure changes
|
||||
|
||||
The agent ensures optimizations are:
|
||||
- Safe for production deployment
|
||||
- Maintainable and well-documented
|
||||
- Aligned with architectural patterns
|
||||
- Balanced between performance and complexity
|
||||
|
||||
---
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Before Optimizing
|
||||
1. **Measure first**: Always establish baseline metrics
|
||||
2. **Identify bottlenecks**: Use profiling to find actual problems
|
||||
3. **Prioritize**: Focus on high-impact, low-effort optimizations first
|
||||
4. **Set targets**: Define clear performance goals
|
||||
|
||||
### During Optimization
|
||||
1. **One change at a time**: Measure impact of each optimization
|
||||
2. **Preserve functionality**: Ensure tests pass after changes
|
||||
3. **Document trade-offs**: Record decisions and rationale
|
||||
4. **Monitor closely**: Watch for unexpected side effects
|
||||
|
||||
### After Optimization
|
||||
1. **Benchmark improvements**: Quantify performance gains
|
||||
2. **Monitor in production**: Track real-world impact
|
||||
3. **Set up alerts**: Detect future regressions
|
||||
4. **Update baselines**: Use new metrics as baseline for future work
|
||||
|
||||
### Continuous Monitoring
|
||||
1. **Automated benchmarks**: Run in CI/CD pipeline
|
||||
2. **Performance budgets**: Fail builds that exceed thresholds
|
||||
3. **Real user monitoring**: Track actual user experience
|
||||
4. **Regular reviews**: Quarterly performance audits
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Optimization Not Showing Expected Results
|
||||
|
||||
**Issue**: Applied optimization but metrics didn't improve
|
||||
|
||||
**Possible causes**:
|
||||
- Caching not clearing properly (invalidate cache)
|
||||
- Different bottleneck than expected (re-profile)
|
||||
- Configuration not applied (verify deployment)
|
||||
- Measurement methodology issue (check profiling setup)
|
||||
|
||||
**Solution**: Re-run analysis to verify bottleneck, ensure optimization is deployed, measure with multiple tools
|
||||
|
||||
### Performance Regression After Deployment
|
||||
|
||||
**Issue**: Performance worse after optimization
|
||||
|
||||
**Possible causes**:
|
||||
- Optimization introduced bug or inefficiency
|
||||
- Cache warming needed
|
||||
- Auto-scaling not configured properly
|
||||
- Unexpected load pattern
|
||||
|
||||
**Solution**: Compare metrics before/after, rollback if critical, investigate with profiling tools
|
||||
|
||||
### Benchmarks Not Matching Production
|
||||
|
||||
**Issue**: Benchmark shows improvements but production doesn't
|
||||
|
||||
**Possible causes**:
|
||||
- Different load patterns
|
||||
- Network latency in production
|
||||
- Database size differences
|
||||
- Cache cold in production
|
||||
|
||||
**Solution**: Use production-like data, run benchmarks under realistic conditions, allow cache warming time
|
||||
|
||||
---
|
||||
|
||||
## Related Skills
|
||||
|
||||
- `/test` - Ensure optimizations don't break functionality
|
||||
- `/deploy` - Deploy optimizations safely to production
|
||||
- `/monitor` - Track performance metrics over time
|
||||
- `/architect` - Design scalable architectures from the start
|
||||
|
||||
---
|
||||
|
||||
## Contributing
|
||||
|
||||
When adding new optimizations to this skill:
|
||||
|
||||
1. Document the optimization technique
|
||||
2. Provide before/after examples
|
||||
3. Include expected performance improvements
|
||||
4. Add profiling/measurement instructions
|
||||
5. Document trade-offs and considerations
|
||||
|
||||
---
|
||||
|
||||
## License
|
||||
|
||||
Part of the 10x-fullstack-engineer plugin. See plugin.json for licensing details.
|
||||
494
commands/optimize/analyze.md
Normal file
494
commands/optimize/analyze.md
Normal file
@@ -0,0 +1,494 @@
|
||||
# Performance Analysis Operation
|
||||
|
||||
You are executing the **analyze** operation to perform comprehensive performance analysis and identify bottlenecks across all application layers.
|
||||
|
||||
## Parameters
|
||||
|
||||
**Received**: `$ARGUMENTS` (after removing 'analyze' operation name)
|
||||
|
||||
Expected format: `target:"area" [scope:"frontend|backend|database|infrastructure|all"] [metrics:"baseline|compare"] [baseline:"version-or-timestamp"]`
|
||||
|
||||
**Parameter definitions**:
|
||||
- `target` (required): Application or component to analyze (e.g., "user dashboard", "checkout flow", "production app")
|
||||
- `scope` (optional): Layer to focus on - `frontend`, `backend`, `database`, `infrastructure`, or `all` (default: `all`)
|
||||
- `metrics` (optional): Metrics mode - `baseline` (establish baseline), `compare` (compare against baseline) (default: `baseline`)
|
||||
- `baseline` (optional): Baseline version or timestamp for comparison (e.g., "v1.2.0", "2025-10-01")
|
||||
|
||||
## Workflow
|
||||
|
||||
### 1. Define Analysis Scope
|
||||
|
||||
Based on the `target` and `scope` parameters, determine what to analyze:
|
||||
|
||||
**Scope: all** (comprehensive analysis):
|
||||
- Frontend: Page load, rendering, bundle size
|
||||
- Backend: API response times, throughput, error rates
|
||||
- Database: Query performance, connection pools, cache hit rates
|
||||
- Infrastructure: Resource utilization, scaling efficiency
|
||||
|
||||
**Scope: frontend**:
|
||||
- Web Vitals (LCP, FID, CLS, INP, TTFB, FCP)
|
||||
- Bundle sizes and composition
|
||||
- Network waterfall analysis
|
||||
- Runtime performance (memory, CPU)
|
||||
|
||||
**Scope: backend**:
|
||||
- API endpoint response times (p50, p95, p99)
|
||||
- Throughput and concurrency handling
|
||||
- Error rates and types
|
||||
- Dependency latency (database, external APIs)
|
||||
|
||||
**Scope: database**:
|
||||
- Query execution times
|
||||
- Index effectiveness
|
||||
- Connection pool utilization
|
||||
- Cache hit rates
|
||||
|
||||
**Scope: infrastructure**:
|
||||
- CPU, memory, disk, network utilization
|
||||
- Container/instance metrics
|
||||
- Auto-scaling behavior
|
||||
- CDN effectiveness
|
||||
|
||||
### 2. Establish Baseline Metrics
|
||||
|
||||
Run comprehensive performance profiling:
|
||||
|
||||
**Frontend Profiling**:
|
||||
```bash
|
||||
# Lighthouse audit
|
||||
npx lighthouse [url] --output=json --output-path=./perf-baseline-lighthouse.json
|
||||
|
||||
# Bundle analysis
|
||||
npm run build -- --stats
|
||||
npx webpack-bundle-analyzer dist/stats.json --mode static --report ./perf-baseline-bundle.html
|
||||
|
||||
# Check for unused dependencies
|
||||
npx depcheck > ./perf-baseline-deps.txt
|
||||
|
||||
# Runtime profiling (if applicable)
|
||||
# Use browser DevTools Performance tab
|
||||
```
|
||||
|
||||
**Backend Profiling**:
|
||||
```bash
|
||||
# API response times (if monitoring exists)
|
||||
# Check APM dashboard or logs
|
||||
|
||||
# Profile Node.js application
|
||||
node --prof app.js
|
||||
# Then process the profile
|
||||
node --prof-process isolate-*.log > perf-baseline-profile.txt
|
||||
|
||||
# Memory snapshot
|
||||
node --inspect app.js
|
||||
# Take heap snapshot via Chrome DevTools
|
||||
|
||||
# Load test to get baseline throughput
|
||||
npx k6 run --duration 60s --vus 50 load-test.js
|
||||
```
|
||||
|
||||
**Database Profiling**:
|
||||
```sql
|
||||
-- PostgreSQL: Enable pg_stat_statements
|
||||
CREATE EXTENSION IF NOT EXISTS pg_stat_statements;
|
||||
|
||||
-- Capture slow queries
|
||||
SELECT
|
||||
query,
|
||||
calls,
|
||||
total_exec_time,
|
||||
mean_exec_time,
|
||||
max_exec_time,
|
||||
stddev_exec_time
|
||||
FROM pg_stat_statements
|
||||
ORDER BY mean_exec_time DESC
|
||||
LIMIT 50;
|
||||
|
||||
-- Check index usage
|
||||
SELECT
|
||||
schemaname,
|
||||
tablename,
|
||||
indexname,
|
||||
idx_scan,
|
||||
idx_tup_read,
|
||||
idx_tup_fetch
|
||||
FROM pg_stat_user_indexes
|
||||
ORDER BY idx_scan ASC;
|
||||
|
||||
-- Table statistics
|
||||
SELECT
|
||||
schemaname,
|
||||
tablename,
|
||||
n_live_tup,
|
||||
n_dead_tup,
|
||||
last_vacuum,
|
||||
last_autovacuum
|
||||
FROM pg_stat_user_tables;
|
||||
```
|
||||
|
||||
**Infrastructure Profiling**:
|
||||
```bash
|
||||
# Container metrics (if using Docker/Kubernetes)
|
||||
docker stats --no-stream
|
||||
|
||||
# Or for Kubernetes
|
||||
kubectl top nodes
|
||||
kubectl top pods
|
||||
|
||||
# Server resource utilization
|
||||
top -b -n 1 | head -20
|
||||
free -h
|
||||
df -h
|
||||
iostat -x 1 5
|
||||
```
|
||||
|
||||
### 3. Identify Bottlenecks
|
||||
|
||||
Analyze collected metrics to identify performance bottlenecks:
|
||||
|
||||
**Bottleneck Detection Matrix**:
|
||||
|
||||
| Layer | Indicator | Severity | Common Causes |
|
||||
|-------|-----------|----------|---------------|
|
||||
| **Frontend** | LCP > 2.5s | High | Large images, render-blocking resources, slow TTFB |
|
||||
| **Frontend** | Bundle > 1MB | Medium | Unused dependencies, no code splitting, large libraries |
|
||||
| **Frontend** | CLS > 0.1 | Medium | Missing dimensions, dynamic content injection |
|
||||
| **Frontend** | INP > 200ms | High | Long tasks, unoptimized event handlers |
|
||||
| **Backend** | p95 > 1000ms | High | Slow queries, N+1 problems, synchronous I/O |
|
||||
| **Backend** | p99 > 5000ms | Critical | Database locks, resource exhaustion, cascading failures |
|
||||
| **Backend** | Error rate > 1% | High | Unhandled errors, timeout issues, dependency failures |
|
||||
| **Database** | Query > 500ms | High | Missing indexes, full table scans, complex joins |
|
||||
| **Database** | Cache hit < 80% | Medium | Insufficient cache size, poor cache strategy |
|
||||
| **Database** | Connection pool exhaustion | Critical | Connection leaks, insufficient pool size |
|
||||
| **Infrastructure** | CPU > 80% | High | Insufficient resources, inefficient algorithms |
|
||||
| **Infrastructure** | Memory > 90% | Critical | Memory leaks, oversized caches, insufficient resources |
|
||||
|
||||
**Prioritization Framework**:
|
||||
|
||||
1. **Critical** - Immediate impact on user experience or system stability
|
||||
2. **High** - Significant performance degradation
|
||||
3. **Medium** - Noticeable but not blocking
|
||||
4. **Low** - Minor optimization opportunity
|
||||
|
||||
### 4. Create Optimization Opportunity Matrix
|
||||
|
||||
For each identified bottleneck, assess:
|
||||
|
||||
**Impact Assessment**:
|
||||
- Performance improvement potential (low/medium/high)
|
||||
- Implementation effort (hours/days)
|
||||
- Risk level (low/medium/high)
|
||||
- Dependencies on other optimizations
|
||||
|
||||
**Optimization Opportunities**:
|
||||
|
||||
```markdown
|
||||
## Opportunity Matrix
|
||||
|
||||
| ID | Layer | Issue | Impact | Effort | Priority | Recommendation |
|
||||
|----|-------|-------|--------|--------|----------|----------------|
|
||||
| 1 | Database | Missing index on users.email | High | 1h | Critical | Add index immediately |
|
||||
| 2 | Frontend | Bundle size 2.5MB | High | 4h | High | Implement code splitting |
|
||||
| 3 | Backend | N+1 query in /api/users | High | 2h | High | Add eager loading |
|
||||
| 4 | Infrastructure | No CDN for static assets | Medium | 3h | Medium | Configure CloudFront |
|
||||
| 5 | Frontend | Unoptimized images | Medium | 2h | Medium | Add next/image or similar |
|
||||
```
|
||||
|
||||
### 5. Generate Performance Profile
|
||||
|
||||
Create a comprehensive performance profile:
|
||||
|
||||
**Performance Snapshot**:
|
||||
```json
|
||||
{
|
||||
"timestamp": "2025-10-14T12:00:00Z",
|
||||
"version": "v1.2.3",
|
||||
"environment": "production",
|
||||
"metrics": {
|
||||
"frontend": {
|
||||
"lcp": 3200,
|
||||
"fid": 150,
|
||||
"cls": 0.15,
|
||||
"ttfb": 800,
|
||||
"bundle_size": 2500000
|
||||
},
|
||||
"backend": {
|
||||
"p50_response_time": 120,
|
||||
"p95_response_time": 850,
|
||||
"p99_response_time": 2100,
|
||||
"throughput_rps": 450,
|
||||
"error_rate": 0.02
|
||||
},
|
||||
"database": {
|
||||
"avg_query_time": 45,
|
||||
"slow_query_count": 23,
|
||||
"cache_hit_rate": 0.72,
|
||||
"connection_pool_utilization": 0.85
|
||||
},
|
||||
"infrastructure": {
|
||||
"cpu_utilization": 0.68,
|
||||
"memory_utilization": 0.75,
|
||||
"disk_io_wait": 0.03
|
||||
}
|
||||
},
|
||||
"bottlenecks": [
|
||||
{
|
||||
"id": "BTL001",
|
||||
"layer": "frontend",
|
||||
"severity": "high",
|
||||
"issue": "Large LCP time",
|
||||
"metric": "lcp",
|
||||
"value": 3200,
|
||||
"threshold": 2500,
|
||||
"impact": "Poor user experience on initial page load"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 6. Recommend Next Steps
|
||||
|
||||
Based on analysis results, recommend:
|
||||
|
||||
**Immediate Actions** (Critical bottlenecks):
|
||||
- List specific optimizations with highest ROI
|
||||
- Estimated improvement for each
|
||||
- Implementation order
|
||||
|
||||
**Short-term Actions** (High priority):
|
||||
- Optimizations to tackle in current sprint
|
||||
- Potential dependencies
|
||||
|
||||
**Long-term Actions** (Medium/Low priority):
|
||||
- Architectural improvements
|
||||
- Infrastructure upgrades
|
||||
- Technical debt reduction
|
||||
|
||||
## Output Format
|
||||
|
||||
```markdown
|
||||
# Performance Analysis Report: [Target]
|
||||
|
||||
**Analysis Date**: [Date and time]
|
||||
**Analyzed Version**: [Version or commit]
|
||||
**Environment**: [production/staging/development]
|
||||
**Scope**: [all/frontend/backend/database/infrastructure]
|
||||
|
||||
## Executive Summary
|
||||
|
||||
[2-3 paragraph summary of overall findings, critical issues, and recommended priorities]
|
||||
|
||||
## Baseline Metrics
|
||||
|
||||
### Frontend Performance
|
||||
| Metric | Value | Status | Threshold |
|
||||
|--------|-------|--------|-----------|
|
||||
| LCP (Largest Contentful Paint) | 3.2s | ⚠️ Needs Improvement | < 2.5s |
|
||||
| FID (First Input Delay) | 150ms | ✅ Good | < 100ms |
|
||||
| CLS (Cumulative Layout Shift) | 0.15 | ⚠️ Needs Improvement | < 0.1 |
|
||||
| TTFB (Time to First Byte) | 800ms | ⚠️ Needs Improvement | < 600ms |
|
||||
| Bundle Size (gzipped) | 2.5MB | ❌ Poor | < 500KB |
|
||||
|
||||
### Backend Performance
|
||||
| Metric | Value | Status | Threshold |
|
||||
|--------|-------|--------|-----------|
|
||||
| P50 Response Time | 120ms | ✅ Good | < 200ms |
|
||||
| P95 Response Time | 850ms | ⚠️ Needs Improvement | < 500ms |
|
||||
| P99 Response Time | 2100ms | ❌ Poor | < 1000ms |
|
||||
| Throughput | 450 req/s | ✅ Good | > 400 req/s |
|
||||
| Error Rate | 2% | ⚠️ Needs Improvement | < 1% |
|
||||
|
||||
### Database Performance
|
||||
| Metric | Value | Status | Threshold |
|
||||
|--------|-------|--------|-----------|
|
||||
| Avg Query Time | 45ms | ✅ Good | < 100ms |
|
||||
| Slow Query Count (>500ms) | 23 queries | ❌ Poor | 0 queries |
|
||||
| Cache Hit Rate | 72% | ⚠️ Needs Improvement | > 85% |
|
||||
| Connection Pool Utilization | 85% | ⚠️ Needs Improvement | < 75% |
|
||||
|
||||
### Infrastructure Performance
|
||||
| Metric | Value | Status | Threshold |
|
||||
|--------|-------|--------|-----------|
|
||||
| CPU Utilization | 68% | ✅ Good | < 75% |
|
||||
| Memory Utilization | 75% | ⚠️ Needs Improvement | < 70% |
|
||||
| Disk I/O Wait | 3% | ✅ Good | < 5% |
|
||||
|
||||
## Bottlenecks Identified
|
||||
|
||||
### Critical Priority
|
||||
|
||||
#### BTL001: Frontend - Large LCP Time (3.2s)
|
||||
**Impact**: High - Users experience slow initial page load
|
||||
**Cause**:
|
||||
- Large hero image (1.2MB) loaded synchronously
|
||||
- Render-blocking CSS and JavaScript
|
||||
- No image optimization
|
||||
|
||||
**Recommendation**:
|
||||
1. Optimize and lazy-load hero image (reduce to <200KB)
|
||||
2. Defer non-critical CSS/JS
|
||||
3. Implement resource hints (preload critical assets)
|
||||
**Expected Improvement**: LCP reduction to ~1.8s (44% improvement)
|
||||
|
||||
#### BTL002: Database - Missing Index on users.email
|
||||
**Impact**: High - Slow user lookup queries affecting multiple endpoints
|
||||
**Queries Affected**:
|
||||
```sql
|
||||
SELECT * FROM users WHERE email = $1; -- 450ms avg
|
||||
```
|
||||
**Recommendation**:
|
||||
```sql
|
||||
CREATE INDEX CONCURRENTLY idx_users_email ON users(email);
|
||||
```
|
||||
**Expected Improvement**: Query time reduction to <10ms (95% improvement)
|
||||
|
||||
### High Priority
|
||||
|
||||
#### BTL003: Backend - N+1 Query Problem in /api/users Endpoint
|
||||
**Impact**: High - p95 response time of 850ms
|
||||
**Cause**:
|
||||
```javascript
|
||||
// Current (N+1 problem)
|
||||
const users = await User.findAll();
|
||||
for (const user of users) {
|
||||
user.posts = await Post.findAll({ where: { userId: user.id } });
|
||||
}
|
||||
```
|
||||
**Recommendation**:
|
||||
```javascript
|
||||
// Optimized (eager loading)
|
||||
const users = await User.findAll({
|
||||
include: [{ model: Post, as: 'posts' }]
|
||||
});
|
||||
```
|
||||
**Expected Improvement**: Response time reduction to ~200ms (75% improvement)
|
||||
|
||||
#### BTL004: Frontend - Bundle Size 2.5MB
|
||||
**Impact**: High - Slow initial load especially on mobile
|
||||
**Cause**:
|
||||
- No code splitting
|
||||
- Unused dependencies (moment.js, lodash full import)
|
||||
- No tree shaking
|
||||
|
||||
**Recommendation**:
|
||||
1. Implement code splitting by route
|
||||
2. Replace moment.js with date-fns (92% smaller)
|
||||
3. Use tree-shakeable imports
|
||||
```javascript
|
||||
// Before
|
||||
import _ from 'lodash';
|
||||
import moment from 'moment';
|
||||
|
||||
// After
|
||||
import { debounce, throttle } from 'lodash-es';
|
||||
import { format, parseISO } from 'date-fns';
|
||||
```
|
||||
**Expected Improvement**: Bundle reduction to ~800KB (68% improvement)
|
||||
|
||||
### Medium Priority
|
||||
|
||||
[Additional bottlenecks with similar format]
|
||||
|
||||
## Optimization Opportunity Matrix
|
||||
|
||||
| ID | Layer | Issue | Impact | Effort | Priority | Est. Improvement |
|
||||
|----|-------|-------|--------|--------|----------|------------------|
|
||||
| BTL001 | Frontend | Large LCP | High | 4h | Critical | 44% LCP reduction |
|
||||
| BTL002 | Database | Missing index | High | 1h | Critical | 95% query speedup |
|
||||
| BTL003 | Backend | N+1 queries | High | 2h | High | 75% response time reduction |
|
||||
| BTL004 | Frontend | Bundle size | High | 6h | High | 68% bundle reduction |
|
||||
| BTL005 | Infrastructure | No CDN | Medium | 3h | Medium | 30% TTFB reduction |
|
||||
| BTL006 | Database | Low cache hit | Medium | 4h | Medium | 15% query improvement |
|
||||
|
||||
## Profiling Data
|
||||
|
||||
### Frontend Profiling Results
|
||||
[Include relevant Lighthouse report summary, bundle analysis, etc.]
|
||||
|
||||
### Backend Profiling Results
|
||||
[Include relevant API response time distribution, slow endpoint list, etc.]
|
||||
|
||||
### Database Profiling Results
|
||||
[Include slow query details, table scan frequency, etc.]
|
||||
|
||||
### Infrastructure Profiling Results
|
||||
[Include resource utilization charts, scaling behavior, etc.]
|
||||
|
||||
## Recommended Action Plan
|
||||
|
||||
### Phase 1: Critical Fixes (Immediate - 1-2 days)
|
||||
1. **Add missing database indexes** (BTL002) - 1 hour
|
||||
- Estimated improvement: 95% reduction in user lookup queries
|
||||
2. **Optimize hero image and implement lazy loading** (BTL001) - 4 hours
|
||||
- Estimated improvement: 44% LCP reduction
|
||||
|
||||
### Phase 2: High-Priority Optimizations (This week - 3-5 days)
|
||||
1. **Fix N+1 query problems** (BTL003) - 2 hours
|
||||
- Estimated improvement: 75% response time reduction on affected endpoints
|
||||
2. **Implement bundle optimization** (BTL004) - 6 hours
|
||||
- Estimated improvement: 68% bundle size reduction
|
||||
|
||||
### Phase 3: Infrastructure Improvements (Next sprint - 1-2 weeks)
|
||||
1. **Configure CDN for static assets** (BTL005) - 3 hours
|
||||
- Estimated improvement: 30% TTFB reduction
|
||||
2. **Optimize database caching strategy** (BTL006) - 4 hours
|
||||
- Estimated improvement: 15% overall query performance
|
||||
|
||||
## Expected Overall Impact
|
||||
|
||||
If all critical and high-priority optimizations are implemented:
|
||||
|
||||
| Metric | Current | Expected | Improvement |
|
||||
|--------|---------|----------|-------------|
|
||||
| LCP | 3.2s | 1.5s | 53% faster |
|
||||
| Bundle Size | 2.5MB | 650KB | 74% smaller |
|
||||
| P95 Response Time | 850ms | 250ms | 71% faster |
|
||||
| User Lookup Query | 450ms | 8ms | 98% faster |
|
||||
| Overall Performance Score | 62/100 | 88/100 | +26 points |
|
||||
|
||||
## Monitoring Recommendations
|
||||
|
||||
After implementing optimizations, monitor these key metrics:
|
||||
|
||||
**Frontend**:
|
||||
- Real User Monitoring (RUM) for Web Vitals
|
||||
- Bundle size in CI/CD pipeline
|
||||
- Lighthouse CI for regression detection
|
||||
|
||||
**Backend**:
|
||||
- APM for endpoint response times
|
||||
- Error rate monitoring
|
||||
- Database query performance
|
||||
|
||||
**Database**:
|
||||
- Slow query log monitoring
|
||||
- Index hit rate
|
||||
- Connection pool metrics
|
||||
|
||||
**Infrastructure**:
|
||||
- Resource utilization alerts
|
||||
- Auto-scaling triggers
|
||||
- CDN cache hit rates
|
||||
|
||||
## Testing Instructions
|
||||
|
||||
### Before Optimization
|
||||
1. Run Lighthouse audit: `npx lighthouse [url] --output=json --output-path=baseline.json`
|
||||
2. Capture API metrics: [specify how]
|
||||
3. Profile database: [SQL queries above]
|
||||
4. Save baseline for comparison
|
||||
|
||||
### After Optimization
|
||||
1. Repeat all baseline measurements
|
||||
2. Compare metrics using provided scripts
|
||||
3. Verify no functionality regressions
|
||||
4. Monitor for 24-48 hours in production
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. Review and prioritize optimizations with team
|
||||
2. Create tasks for Phase 1 critical fixes
|
||||
3. Implement optimizations using `/optimize [layer]` operations
|
||||
4. Benchmark improvements using `/optimize benchmark`
|
||||
5. Document lessons learned and update performance budget
|
||||
948
commands/optimize/backend.md
Normal file
948
commands/optimize/backend.md
Normal file
@@ -0,0 +1,948 @@
|
||||
# Backend Optimization Operation
|
||||
|
||||
You are executing the **backend** operation to optimize backend API performance, algorithms, caching, and concurrency handling.
|
||||
|
||||
## Parameters
|
||||
|
||||
**Received**: `$ARGUMENTS` (after removing 'backend' operation name)
|
||||
|
||||
Expected format: `target:"api|algorithms|caching|concurrency|all" [endpoints:"endpoint-list"] [load_profile:"low|medium|high"] [priority:"low|medium|high|critical"]`
|
||||
|
||||
**Parameter definitions**:
|
||||
- `target` (required): What to optimize - `api`, `algorithms`, `caching`, `concurrency`, or `all`
|
||||
- `endpoints` (optional): Specific API endpoints to optimize (comma-separated, e.g., "/api/users,/api/posts")
|
||||
- `load_profile` (optional): Expected load level - `low`, `medium`, `high` (default: medium)
|
||||
- `priority` (optional): Optimization priority - `low`, `medium`, `high`, `critical` (default: high)
|
||||
|
||||
## Workflow
|
||||
|
||||
### 1. Identify Backend Framework and Runtime
|
||||
|
||||
Detect backend technology:
|
||||
```bash
|
||||
# Check package.json for framework
|
||||
grep -E "express|fastify|koa|nestjs|hapi" package.json 2>/dev/null
|
||||
|
||||
# Check for runtime
|
||||
node --version 2>/dev/null || echo "No Node.js"
|
||||
python --version 2>/dev/null || echo "No Python"
|
||||
go version 2>/dev/null || echo "No Go"
|
||||
ruby --version 2>/dev/null || echo "No Ruby"
|
||||
|
||||
# Check for web framework files
|
||||
ls -la server.js app.js main.py app.py main.go 2>/dev/null
|
||||
```
|
||||
|
||||
### 2. Profile API Performance
|
||||
|
||||
**Node.js Profiling**:
|
||||
```bash
|
||||
# Start application with profiling
|
||||
node --prof app.js
|
||||
|
||||
# Or use clinic.js for comprehensive profiling
|
||||
npx clinic doctor -- node app.js
|
||||
# Then make requests to your API
|
||||
|
||||
# Process the profile
|
||||
node --prof-process isolate-*.log > profile.txt
|
||||
|
||||
# Use clinic.js flame graph
|
||||
npx clinic flame -- node app.js
|
||||
```
|
||||
|
||||
**API Response Time Analysis**:
|
||||
```bash
|
||||
# Test endpoint response times
|
||||
curl -w "@curl-format.txt" -o /dev/null -s "http://localhost:3000/api/users"
|
||||
|
||||
# curl-format.txt content:
|
||||
# time_namelookup: %{time_namelookup}\n
|
||||
# time_connect: %{time_connect}\n
|
||||
# time_appconnect: %{time_appconnect}\n
|
||||
# time_pretransfer: %{time_pretransfer}\n
|
||||
# time_redirect: %{time_redirect}\n
|
||||
# time_starttransfer: %{time_starttransfer}\n
|
||||
# time_total: %{time_total}\n
|
||||
|
||||
# Load test with k6
|
||||
npx k6 run --vus 50 --duration 30s loadtest.js
|
||||
```
|
||||
|
||||
**APM Tools** (if available):
|
||||
- New Relic: Check transaction traces
|
||||
- DataDog: Review APM dashboard
|
||||
- Application Insights: Analyze dependencies
|
||||
|
||||
### 3. API Optimization
|
||||
|
||||
#### 3.1. Fix N+1 Query Problems
|
||||
|
||||
**Problem Detection**:
|
||||
```javascript
|
||||
// BEFORE (N+1 problem)
|
||||
app.get('/api/users', async (req, res) => {
|
||||
const users = await User.findAll(); // 1 query
|
||||
|
||||
for (const user of users) {
|
||||
// N additional queries (1 per user)
|
||||
user.posts = await Post.findAll({ where: { userId: user.id } });
|
||||
}
|
||||
|
||||
res.json(users);
|
||||
});
|
||||
// Total: 1 + N queries for N users
|
||||
```
|
||||
|
||||
**Solution - Eager Loading**:
|
||||
```javascript
|
||||
// AFTER (eager loading)
|
||||
app.get('/api/users', async (req, res) => {
|
||||
const users = await User.findAll({
|
||||
include: [{ model: Post, as: 'posts' }] // Single query with JOIN
|
||||
});
|
||||
|
||||
res.json(users);
|
||||
});
|
||||
// Total: 1 query
|
||||
// Performance improvement: ~95% faster for 100 users
|
||||
```
|
||||
|
||||
**Solution - DataLoader (for GraphQL or complex cases)**:
|
||||
```javascript
|
||||
const DataLoader = require('dataloader');
|
||||
|
||||
// Batch load posts by user IDs
|
||||
const postLoader = new DataLoader(async (userIds) => {
|
||||
const posts = await Post.findAll({
|
||||
where: { userId: { $in: userIds } }
|
||||
});
|
||||
|
||||
// Group posts by userId
|
||||
const postsByUserId = {};
|
||||
posts.forEach(post => {
|
||||
if (!postsByUserId[post.userId]) {
|
||||
postsByUserId[post.userId] = [];
|
||||
}
|
||||
postsByUserId[post.userId].push(post);
|
||||
});
|
||||
|
||||
// Return posts in same order as userIds
|
||||
return userIds.map(id => postsByUserId[id] || []);
|
||||
});
|
||||
|
||||
// Usage
|
||||
app.get('/api/users', async (req, res) => {
|
||||
const users = await User.findAll();
|
||||
|
||||
// Load posts in batch
|
||||
await Promise.all(
|
||||
users.map(async (user) => {
|
||||
user.posts = await postLoader.load(user.id);
|
||||
})
|
||||
);
|
||||
|
||||
res.json(users);
|
||||
});
|
||||
// Total: 2 queries (users + batched posts)
|
||||
```
|
||||
|
||||
#### 3.2. Implement Response Caching
|
||||
|
||||
**In-Memory Caching (Simple)**:
|
||||
```javascript
|
||||
const cache = new Map();
|
||||
const CACHE_TTL = 5 * 60 * 1000; // 5 minutes
|
||||
|
||||
function cacheMiddleware(key, ttl = CACHE_TTL) {
|
||||
return (req, res, next) => {
|
||||
const cacheKey = typeof key === 'function' ? key(req) : key;
|
||||
const cached = cache.get(cacheKey);
|
||||
|
||||
if (cached && Date.now() - cached.timestamp < ttl) {
|
||||
return res.json(cached.data);
|
||||
}
|
||||
|
||||
// Override res.json to cache the response
|
||||
const originalJson = res.json.bind(res);
|
||||
res.json = (data) => {
|
||||
cache.set(cacheKey, { data, timestamp: Date.now() });
|
||||
return originalJson(data);
|
||||
};
|
||||
|
||||
next();
|
||||
};
|
||||
}
|
||||
|
||||
// Usage
|
||||
app.get('/api/users',
|
||||
cacheMiddleware(req => `users:${req.query.page || 1}`),
|
||||
async (req, res) => {
|
||||
const users = await User.findAll();
|
||||
res.json(users);
|
||||
}
|
||||
);
|
||||
```
|
||||
|
||||
**Redis Caching (Production)**:
|
||||
```javascript
|
||||
const Redis = require('ioredis');
|
||||
const redis = new Redis(process.env.REDIS_URL);
|
||||
|
||||
async function cacheMiddleware(keyFn, ttl = 300) {
|
||||
return async (req, res, next) => {
|
||||
const cacheKey = keyFn(req);
|
||||
|
||||
try {
|
||||
const cached = await redis.get(cacheKey);
|
||||
if (cached) {
|
||||
return res.json(JSON.parse(cached));
|
||||
}
|
||||
|
||||
const originalJson = res.json.bind(res);
|
||||
res.json = async (data) => {
|
||||
await redis.setex(cacheKey, ttl, JSON.stringify(data));
|
||||
return originalJson(data);
|
||||
};
|
||||
|
||||
next();
|
||||
} catch (error) {
|
||||
console.error('Cache error:', error);
|
||||
next(); // Continue without cache on error
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Usage with cache invalidation
|
||||
app.get('/api/posts/:id', cacheMiddleware(req => `post:${req.params.id}`, 600), async (req, res) => {
|
||||
const post = await Post.findByPk(req.params.id);
|
||||
res.json(post);
|
||||
});
|
||||
|
||||
app.put('/api/posts/:id', async (req, res) => {
|
||||
const post = await Post.update(req.body, { where: { id: req.params.id } });
|
||||
|
||||
// Invalidate cache
|
||||
await redis.del(`post:${req.params.id}`);
|
||||
|
||||
res.json(post);
|
||||
});
|
||||
```
|
||||
|
||||
#### 3.3. Add Request Compression
|
||||
|
||||
```javascript
|
||||
const compression = require('compression');
|
||||
|
||||
app.use(compression({
|
||||
// Compress responses > 1KB
|
||||
threshold: 1024,
|
||||
// Compression level (0-9, higher = better compression but slower)
|
||||
level: 6,
|
||||
// Only compress certain content types
|
||||
filter: (req, res) => {
|
||||
if (req.headers['x-no-compression']) {
|
||||
return false;
|
||||
}
|
||||
return compression.filter(req, res);
|
||||
}
|
||||
}));
|
||||
|
||||
// Typical compression results:
|
||||
// - JSON responses: 70-80% size reduction
|
||||
// - Text responses: 60-70% size reduction
|
||||
// - Already compressed (images, video): minimal effect
|
||||
```
|
||||
|
||||
#### 3.4. Implement Rate Limiting
|
||||
|
||||
```javascript
|
||||
const rateLimit = require('express-rate-limit');
|
||||
|
||||
// General API rate limit
|
||||
const apiLimiter = rateLimit({
|
||||
windowMs: 15 * 60 * 1000, // 15 minutes
|
||||
max: 100, // Limit each IP to 100 requests per window
|
||||
message: 'Too many requests from this IP, please try again later',
|
||||
standardHeaders: true,
|
||||
legacyHeaders: false,
|
||||
});
|
||||
|
||||
// Stricter limit for expensive endpoints
|
||||
const strictLimiter = rateLimit({
|
||||
windowMs: 15 * 60 * 1000,
|
||||
max: 10,
|
||||
message: 'Too many requests for this resource'
|
||||
});
|
||||
|
||||
app.use('/api/', apiLimiter);
|
||||
app.use('/api/search', strictLimiter);
|
||||
app.use('/api/export', strictLimiter);
|
||||
```
|
||||
|
||||
#### 3.5. Optimize JSON Serialization
|
||||
|
||||
```javascript
|
||||
// BEFORE (default JSON.stringify)
|
||||
app.get('/api/users', async (req, res) => {
|
||||
const users = await User.findAll();
|
||||
res.json(users); // Uses JSON.stringify
|
||||
});
|
||||
|
||||
// AFTER (fast-json-stringify for known schemas)
|
||||
const fastJson = require('fast-json-stringify');
|
||||
|
||||
const userSchema = fastJson({
|
||||
type: 'array',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
id: { type: 'integer' },
|
||||
name: { type: 'string' },
|
||||
email: { type: 'string' },
|
||||
createdAt: { type: 'string', format: 'date-time' }
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
app.get('/api/users', async (req, res) => {
|
||||
const users = await User.findAll();
|
||||
res.set('Content-Type', 'application/json');
|
||||
res.send(userSchema(users)); // 2-3x faster serialization
|
||||
});
|
||||
```
|
||||
|
||||
### 4. Algorithm Optimization
|
||||
|
||||
#### 4.1. Replace Inefficient Algorithms
|
||||
|
||||
**Example: Array Search Optimization**
|
||||
|
||||
```javascript
|
||||
// BEFORE (O(n) lookup for each iteration = O(n²))
|
||||
function enrichUsers(users, userData) {
|
||||
return users.map(user => ({
|
||||
...user,
|
||||
data: userData.find(d => d.userId === user.id) // O(n) search
|
||||
}));
|
||||
}
|
||||
// Time complexity: O(n²) for n users
|
||||
|
||||
// AFTER (O(n) with Map)
|
||||
function enrichUsers(users, userData) {
|
||||
const dataMap = new Map(
|
||||
userData.map(d => [d.userId, d])
|
||||
); // O(n) to build map
|
||||
|
||||
return users.map(user => ({
|
||||
...user,
|
||||
data: dataMap.get(user.id) // O(1) lookup
|
||||
}));
|
||||
}
|
||||
// Time complexity: O(n)
|
||||
// Performance improvement: 100x for 1000 users
|
||||
```
|
||||
|
||||
**Example: Sorting Optimization**
|
||||
|
||||
```javascript
|
||||
// BEFORE (multiple array iterations)
|
||||
function getTopUsers(users) {
|
||||
return users
|
||||
.filter(u => u.isActive) // O(n)
|
||||
.map(u => ({ ...u, score: calculateScore(u) })) // O(n)
|
||||
.sort((a, b) => b.score - a.score) // O(n log n)
|
||||
.slice(0, 10); // O(1)
|
||||
}
|
||||
// Total: O(n log n)
|
||||
|
||||
// AFTER (single pass + partial sort)
|
||||
function getTopUsers(users) {
|
||||
const scored = [];
|
||||
|
||||
for (const user of users) {
|
||||
if (!user.isActive) continue;
|
||||
|
||||
const score = calculateScore(user);
|
||||
scored.push({ ...user, score });
|
||||
|
||||
// Keep only top 10 (partial sort)
|
||||
if (scored.length > 10) {
|
||||
scored.sort((a, b) => b.score - a.score);
|
||||
scored.length = 10;
|
||||
}
|
||||
}
|
||||
|
||||
return scored.sort((a, b) => b.score - a.score);
|
||||
}
|
||||
// Total: O(n) average case
|
||||
// Performance improvement: 10x for 10,000 users
|
||||
```
|
||||
|
||||
#### 4.2. Memoization for Expensive Computations
|
||||
|
||||
```javascript
|
||||
// Memoization decorator
|
||||
function memoize(fn, keyFn = (...args) => JSON.stringify(args)) {
|
||||
const cache = new Map();
|
||||
|
||||
return function(...args) {
|
||||
const key = keyFn(...args);
|
||||
|
||||
if (cache.has(key)) {
|
||||
return cache.get(key);
|
||||
}
|
||||
|
||||
const result = fn.apply(this, args);
|
||||
cache.set(key, result);
|
||||
return result;
|
||||
};
|
||||
}
|
||||
|
||||
// BEFORE (recalculates every time)
|
||||
function calculateUserScore(user) {
|
||||
// Expensive calculation
|
||||
let score = 0;
|
||||
score += user.posts * 10;
|
||||
score += user.comments * 5;
|
||||
score += user.likes * 2;
|
||||
score += complexAlgorithm(user.activity);
|
||||
return score;
|
||||
}
|
||||
|
||||
// AFTER (memoized)
|
||||
const calculateUserScore = memoize(
|
||||
(user) => {
|
||||
let score = 0;
|
||||
score += user.posts * 10;
|
||||
score += user.comments * 5;
|
||||
score += user.likes * 2;
|
||||
score += complexAlgorithm(user.activity);
|
||||
return score;
|
||||
},
|
||||
(user) => user.id // Cache key
|
||||
);
|
||||
|
||||
// Subsequent calls with same user.id return cached result
|
||||
```
|
||||
|
||||
### 5. Concurrency Optimization
|
||||
|
||||
#### 5.1. Async/Await Parallelization
|
||||
|
||||
```javascript
|
||||
// BEFORE (sequential - slow)
|
||||
async function getUserData(userId) {
|
||||
const user = await User.findByPk(userId); // 50ms
|
||||
const posts = await Post.findAll({ where: { userId } }); // 80ms
|
||||
const comments = await Comment.findAll({ where: { userId } }); // 60ms
|
||||
|
||||
return { user, posts, comments };
|
||||
}
|
||||
// Total time: 50 + 80 + 60 = 190ms
|
||||
|
||||
// AFTER (parallel - fast)
|
||||
async function getUserData(userId) {
|
||||
const [user, posts, comments] = await Promise.all([
|
||||
User.findByPk(userId), // 50ms
|
||||
Post.findAll({ where: { userId } }), // 80ms
|
||||
Comment.findAll({ where: { userId } }) // 60ms
|
||||
]);
|
||||
|
||||
return { user, posts, comments };
|
||||
}
|
||||
// Total time: max(50, 80, 60) = 80ms
|
||||
// Performance improvement: 2.4x faster
|
||||
```
|
||||
|
||||
#### 5.2. Worker Threads for CPU-Intensive Tasks
|
||||
|
||||
```javascript
|
||||
const { Worker } = require('worker_threads');
|
||||
|
||||
// cpu-intensive-worker.js
|
||||
const { parentPort, workerData } = require('worker_threads');
|
||||
|
||||
function cpuIntensiveTask(data) {
|
||||
// Complex computation
|
||||
let result = 0;
|
||||
for (let i = 0; i < data.iterations; i++) {
|
||||
result += Math.sqrt(i) * Math.sin(i);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
parentPort.postMessage(cpuIntensiveTask(workerData));
|
||||
|
||||
// Main application
|
||||
function runWorker(workerData) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const worker = new Worker('./cpu-intensive-worker.js', { workerData });
|
||||
|
||||
worker.on('message', resolve);
|
||||
worker.on('error', reject);
|
||||
worker.on('exit', (code) => {
|
||||
if (code !== 0) {
|
||||
reject(new Error(`Worker stopped with exit code ${code}`));
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// BEFORE (blocks event loop)
|
||||
app.post('/api/process', async (req, res) => {
|
||||
const result = cpuIntensiveTask(req.body); // Blocks for 500ms
|
||||
res.json({ result });
|
||||
});
|
||||
|
||||
// AFTER (offloaded to worker)
|
||||
app.post('/api/process', async (req, res) => {
|
||||
const result = await runWorker(req.body); // Non-blocking
|
||||
res.json({ result });
|
||||
});
|
||||
// Main thread remains responsive
|
||||
```
|
||||
|
||||
#### 5.3. Request Batching and Debouncing
|
||||
|
||||
```javascript
|
||||
// Batch multiple requests into single database query
|
||||
class BatchLoader {
|
||||
constructor(loadFn, delay = 10) {
|
||||
this.loadFn = loadFn;
|
||||
this.delay = delay;
|
||||
this.queue = [];
|
||||
this.timer = null;
|
||||
}
|
||||
|
||||
load(key) {
|
||||
return new Promise((resolve, reject) => {
|
||||
this.queue.push({ key, resolve, reject });
|
||||
|
||||
if (!this.timer) {
|
||||
this.timer = setTimeout(() => this.flush(), this.delay);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async flush() {
|
||||
const queue = this.queue;
|
||||
this.queue = [];
|
||||
this.timer = null;
|
||||
|
||||
try {
|
||||
const keys = queue.map(item => item.key);
|
||||
const results = await this.loadFn(keys);
|
||||
|
||||
queue.forEach((item, index) => {
|
||||
item.resolve(results[index]);
|
||||
});
|
||||
} catch (error) {
|
||||
queue.forEach(item => item.reject(error));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Usage
|
||||
const userLoader = new BatchLoader(async (userIds) => {
|
||||
// Single query for all user IDs
|
||||
const users = await User.findAll({
|
||||
where: { id: { $in: userIds } }
|
||||
});
|
||||
|
||||
// Return in same order as requested
|
||||
return userIds.map(id => users.find(u => u.id === id));
|
||||
});
|
||||
|
||||
// BEFORE (N separate queries)
|
||||
app.get('/api/feed', async (req, res) => {
|
||||
const posts = await Post.findAll({ limit: 50 });
|
||||
|
||||
for (const post of posts) {
|
||||
post.author = await User.findByPk(post.userId); // N queries
|
||||
}
|
||||
|
||||
res.json(posts);
|
||||
});
|
||||
|
||||
// AFTER (batched into 1 query)
|
||||
app.get('/api/feed', async (req, res) => {
|
||||
const posts = await Post.findAll({ limit: 50 });
|
||||
|
||||
await Promise.all(
|
||||
posts.map(async (post) => {
|
||||
post.author = await userLoader.load(post.userId); // Batched
|
||||
})
|
||||
);
|
||||
|
||||
res.json(posts);
|
||||
});
|
||||
// Improvement: 50 queries → 2 queries (posts + batched users)
|
||||
```
|
||||
|
||||
### 6. Response Streaming for Large Datasets
|
||||
|
||||
```javascript
|
||||
const { Transform } = require('stream');
|
||||
|
||||
// BEFORE (loads entire dataset into memory)
|
||||
app.get('/api/export/users', async (req, res) => {
|
||||
const users = await User.findAll(); // Loads all users into memory
|
||||
res.json(users); // May cause OOM for large datasets
|
||||
});
|
||||
|
||||
// AFTER (streams data)
|
||||
app.get('/api/export/users', async (req, res) => {
|
||||
res.setHeader('Content-Type', 'application/json');
|
||||
res.write('[');
|
||||
|
||||
let first = true;
|
||||
const stream = User.findAll({ stream: true }); // Database stream
|
||||
|
||||
for await (const user of stream) {
|
||||
if (!first) res.write(',');
|
||||
res.write(JSON.stringify(user));
|
||||
first = false;
|
||||
}
|
||||
|
||||
res.write(']');
|
||||
res.end();
|
||||
});
|
||||
// Memory usage: O(1) instead of O(n)
|
||||
// Can handle millions of records
|
||||
```
|
||||
|
||||
### 7. Optimize Middleware Stack
|
||||
|
||||
```javascript
|
||||
// BEFORE (all middleware runs for all routes)
|
||||
app.use(bodyParser.json());
|
||||
app.use(bodyParser.urlencoded({ extended: true }));
|
||||
app.use(cookieParser());
|
||||
app.use(session({ /* config */ }));
|
||||
app.use(passport.initialize());
|
||||
app.use(passport.session());
|
||||
app.use(cors());
|
||||
|
||||
app.get('/api/public/health', (req, res) => {
|
||||
res.json({ status: 'ok' });
|
||||
// Still parsed body, cookies, session unnecessarily
|
||||
});
|
||||
|
||||
// AFTER (selective middleware)
|
||||
const publicRouter = express.Router();
|
||||
publicRouter.get('/health', (req, res) => {
|
||||
res.json({ status: 'ok' });
|
||||
});
|
||||
|
||||
const apiRouter = express.Router();
|
||||
apiRouter.use(bodyParser.json());
|
||||
apiRouter.use(authenticate);
|
||||
apiRouter.get('/users', async (req, res) => { /* ... */ });
|
||||
|
||||
app.use('/api/public', publicRouter);
|
||||
app.use('/api', apiRouter);
|
||||
// Health check endpoint has minimal overhead
|
||||
```
|
||||
|
||||
### 8. Database Connection Management
|
||||
|
||||
```javascript
|
||||
// BEFORE (creates new connection per request)
|
||||
app.get('/api/users', async (req, res) => {
|
||||
const client = await pool.connect(); // Slow
|
||||
const result = await client.query('SELECT * FROM users');
|
||||
client.release();
|
||||
res.json(result.rows);
|
||||
});
|
||||
|
||||
// AFTER (uses connection pool efficiently)
|
||||
const { Pool } = require('pg');
|
||||
const pool = new Pool({
|
||||
max: 20,
|
||||
min: 5,
|
||||
idleTimeoutMillis: 30000
|
||||
});
|
||||
|
||||
app.get('/api/users', async (req, res) => {
|
||||
const result = await pool.query('SELECT * FROM users'); // Reuses connection
|
||||
res.json(result.rows);
|
||||
});
|
||||
// Connection acquisition: 50ms → 0.5ms
|
||||
```
|
||||
|
||||
## Output Format
|
||||
|
||||
```markdown
|
||||
# Backend Optimization Report: [Context]
|
||||
|
||||
**Optimization Date**: [Date]
|
||||
**Backend**: [Framework and version]
|
||||
**Runtime**: [Node.js/Python/Go version]
|
||||
**Load Profile**: [low/medium/high]
|
||||
|
||||
## Executive Summary
|
||||
|
||||
[2-3 paragraphs summarizing findings and optimizations]
|
||||
|
||||
## Baseline Metrics
|
||||
|
||||
### API Performance
|
||||
|
||||
| Endpoint | p50 | p95 | p99 | RPS | Error Rate |
|
||||
|----------|-----|-----|-----|-----|------------|
|
||||
| GET /api/users | 120ms | 450ms | 980ms | 45 | 0.5% |
|
||||
| POST /api/posts | 230ms | 780ms | 1800ms | 20 | 1.2% |
|
||||
| GET /api/feed | 850ms | 2100ms | 4500ms | 12 | 2.3% |
|
||||
|
||||
### Resource Utilization
|
||||
- **CPU**: 68% average
|
||||
- **Memory**: 1.2GB / 2GB (60%)
|
||||
- **Event Loop Lag**: 45ms average
|
||||
|
||||
## Optimizations Implemented
|
||||
|
||||
### 1. Fixed N+1 Query Problem in /api/feed
|
||||
|
||||
**Before**:
|
||||
```javascript
|
||||
const posts = await Post.findAll();
|
||||
for (const post of posts) {
|
||||
post.author = await User.findByPk(post.userId); // N queries
|
||||
}
|
||||
// Result: 1 + 50 = 51 queries for 50 posts
|
||||
```
|
||||
|
||||
**After**:
|
||||
```javascript
|
||||
const posts = await Post.findAll({
|
||||
include: [{ model: User, as: 'author' }]
|
||||
});
|
||||
// Result: 1 query with JOIN
|
||||
```
|
||||
|
||||
**Impact**:
|
||||
- **Before**: 850ms p50 response time
|
||||
- **After**: 95ms p50 response time
|
||||
- **Improvement**: 88.8% faster
|
||||
|
||||
### 2. Implemented Redis Caching
|
||||
|
||||
**Implementation**:
|
||||
```javascript
|
||||
const cacheMiddleware = (key, ttl) => async (req, res, next) => {
|
||||
const cached = await redis.get(key(req));
|
||||
if (cached) return res.json(JSON.parse(cached));
|
||||
|
||||
const originalJson = res.json.bind(res);
|
||||
res.json = async (data) => {
|
||||
await redis.setex(key(req), ttl, JSON.stringify(data));
|
||||
return originalJson(data);
|
||||
};
|
||||
next();
|
||||
};
|
||||
|
||||
app.get('/api/users',
|
||||
cacheMiddleware(req => `users:${req.query.page}`, 300),
|
||||
handler
|
||||
);
|
||||
```
|
||||
|
||||
**Impact**:
|
||||
- **Cache Hit Rate**: 82% (after 24 hours)
|
||||
- **Cached Response Time**: 5ms
|
||||
- **Database Load Reduction**: 82%
|
||||
|
||||
### 3. Parallelized Independent Queries
|
||||
|
||||
**Before**:
|
||||
```javascript
|
||||
const user = await User.findByPk(userId); // 50ms
|
||||
const posts = await Post.findAll({ where: { userId } }); // 80ms
|
||||
const comments = await Comment.findAll({ where: { userId } }); // 60ms
|
||||
// Total: 190ms
|
||||
```
|
||||
|
||||
**After**:
|
||||
```javascript
|
||||
const [user, posts, comments] = await Promise.all([
|
||||
User.findByPk(userId),
|
||||
Post.findAll({ where: { userId } }),
|
||||
Comment.findAll({ where: { userId } })
|
||||
]);
|
||||
// Total: 80ms (max of parallel operations)
|
||||
```
|
||||
|
||||
**Impact**: 57.9% faster (190ms → 80ms)
|
||||
|
||||
### 4. Added Response Compression
|
||||
|
||||
**Implementation**:
|
||||
```javascript
|
||||
app.use(compression({ level: 6, threshold: 1024 }));
|
||||
```
|
||||
|
||||
**Impact**:
|
||||
- **JSON Response Size**: 450KB → 95KB (78.9% reduction)
|
||||
- **Network Transfer Time**: 180ms → 38ms (on 20Mbps connection)
|
||||
- **Bandwidth Savings**: 79%
|
||||
|
||||
### 5. Optimized Algorithm Complexity
|
||||
|
||||
**Before (O(n²) lookup)**:
|
||||
```javascript
|
||||
users.map(user => ({
|
||||
...user,
|
||||
data: userData.find(d => d.userId === user.id) // O(n) per iteration
|
||||
}));
|
||||
// Time: 2,400ms for 1,000 users
|
||||
```
|
||||
|
||||
**After (O(n) with Map)**:
|
||||
```javascript
|
||||
const dataMap = new Map(userData.map(d => [d.userId, d]));
|
||||
users.map(user => ({
|
||||
...user,
|
||||
data: dataMap.get(user.id) // O(1) lookup
|
||||
}));
|
||||
// Time: 12ms for 1,000 users
|
||||
```
|
||||
|
||||
**Impact**: 99.5% faster (2,400ms → 12ms)
|
||||
|
||||
## Results Summary
|
||||
|
||||
### Overall API Performance
|
||||
|
||||
| Metric | Before | After | Improvement |
|
||||
|--------|--------|-------|-------------|
|
||||
| Avg Response Time (p50) | 285ms | 65ms | 77.2% faster |
|
||||
| p95 Response Time | 1,100ms | 180ms | 83.6% faster |
|
||||
| p99 Response Time | 3,200ms | 450ms | 85.9% faster |
|
||||
| Throughput | 85 RPS | 320 RPS | 276% increase |
|
||||
| Error Rate | 1.5% | 0.1% | 93.3% reduction |
|
||||
|
||||
### Endpoint-Specific Improvements
|
||||
|
||||
| Endpoint | Before (p50) | After (p50) | Improvement |
|
||||
|----------|--------------|-------------|-------------|
|
||||
| GET /api/users | 120ms | 8ms | 93.3% |
|
||||
| GET /api/feed | 850ms | 95ms | 88.8% |
|
||||
| POST /api/posts | 230ms | 65ms | 71.7% |
|
||||
|
||||
### Resource Utilization
|
||||
|
||||
| Metric | Before | After | Change |
|
||||
|--------|--------|-------|--------|
|
||||
| CPU Usage | 68% | 32% | -53% |
|
||||
| Memory Usage | 60% | 45% | -25% |
|
||||
| Event Loop Lag | 45ms | 8ms | -82.2% |
|
||||
|
||||
## Load Testing Results
|
||||
|
||||
**Before Optimization**:
|
||||
```
|
||||
Requests: 5,000
|
||||
Duration: 58.8s
|
||||
RPS: 85
|
||||
p95: 1,100ms
|
||||
p99: 3,200ms
|
||||
Errors: 75 (1.5%)
|
||||
```
|
||||
|
||||
**After Optimization**:
|
||||
```
|
||||
Requests: 5,000
|
||||
Duration: 15.6s
|
||||
RPS: 320
|
||||
p95: 180ms
|
||||
p99: 450ms
|
||||
Errors: 5 (0.1%)
|
||||
```
|
||||
|
||||
**Improvement**: 276% more throughput, 83.6% faster p95
|
||||
|
||||
## Trade-offs and Considerations
|
||||
|
||||
**Caching Strategy**:
|
||||
- **Benefit**: 82% reduction in database load
|
||||
- **Trade-off**: Cache invalidation complexity, eventual consistency
|
||||
- **Mitigation**: TTL-based expiration (5 minutes) acceptable for this use case
|
||||
|
||||
**Response Compression**:
|
||||
- **Benefit**: 79% bandwidth savings
|
||||
- **Trade-off**: ~5ms CPU overhead per request
|
||||
- **Conclusion**: Worth it for responses > 1KB
|
||||
|
||||
**Algorithm Optimization**:
|
||||
- **Benefit**: 99.5% faster for large datasets
|
||||
- **Trade-off**: Increased memory usage (Map storage)
|
||||
- **Conclusion**: Negligible memory increase, massive performance gain
|
||||
|
||||
## Monitoring Recommendations
|
||||
|
||||
**Key Metrics to Track**:
|
||||
|
||||
1. **Response Times**:
|
||||
```javascript
|
||||
// Use middleware to track
|
||||
app.use((req, res, next) => {
|
||||
const start = Date.now();
|
||||
res.on('finish', () => {
|
||||
const duration = Date.now() - start;
|
||||
metrics.histogram('response_time', duration, {
|
||||
endpoint: req.path,
|
||||
method: req.method,
|
||||
status: res.statusCode
|
||||
});
|
||||
});
|
||||
next();
|
||||
});
|
||||
```
|
||||
|
||||
2. **Cache Hit Rates**:
|
||||
```javascript
|
||||
// Track Redis cache effectiveness
|
||||
const cacheStats = {
|
||||
hits: 0,
|
||||
misses: 0,
|
||||
hitRate: () => cacheStats.hits / (cacheStats.hits + cacheStats.misses)
|
||||
};
|
||||
```
|
||||
|
||||
3. **Event Loop Lag**:
|
||||
```javascript
|
||||
const { monitorEventLoopDelay } = require('perf_hooks');
|
||||
const h = monitorEventLoopDelay({ resolution: 20 });
|
||||
h.enable();
|
||||
|
||||
setInterval(() => {
|
||||
console.log('Event loop delay:', h.mean / 1000000, 'ms');
|
||||
}, 60000);
|
||||
```
|
||||
|
||||
4. **Memory Leaks**:
|
||||
```javascript
|
||||
// Track memory usage trends
|
||||
setInterval(() => {
|
||||
const usage = process.memoryUsage();
|
||||
metrics.gauge('memory.heap_used', usage.heapUsed);
|
||||
metrics.gauge('memory.heap_total', usage.heapTotal);
|
||||
}, 60000);
|
||||
```
|
||||
|
||||
### Alerts to Configure
|
||||
|
||||
- Response time p95 > 500ms
|
||||
- Error rate > 1%
|
||||
- Cache hit rate < 70%
|
||||
- Event loop lag > 50ms
|
||||
- Memory usage > 80%
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. **Implement** worker threads for CPU-intensive report generation
|
||||
2. **Consider** horizontal scaling with load balancer
|
||||
3. **Evaluate** GraphQL migration for flexible data fetching
|
||||
4. **Monitor** cache invalidation patterns for optimization
|
||||
5. **Review** remaining slow endpoints for optimization opportunities
|
||||
683
commands/optimize/benchmark.md
Normal file
683
commands/optimize/benchmark.md
Normal file
@@ -0,0 +1,683 @@
|
||||
# Performance Benchmarking Operation
|
||||
|
||||
You are executing the **benchmark** operation to perform load testing, rendering benchmarks, query benchmarks, and regression detection.
|
||||
|
||||
## Parameters
|
||||
|
||||
**Received**: `$ARGUMENTS` (after removing 'benchmark' operation name)
|
||||
|
||||
Expected format: `type:"load|rendering|query|integration|all" [baseline:"version-or-tag"] [duration:"seconds"] [concurrency:"number"] [target:"url-or-endpoint"]`
|
||||
|
||||
**Parameter definitions**:
|
||||
- `type` (required): Benchmark type - `load`, `rendering`, `query`, `integration`, or `all`
|
||||
- `baseline` (optional): Baseline version for comparison (e.g., "v1.2.0", "main", "baseline-2025-10-14")
|
||||
- `duration` (optional): Test duration in seconds (default: 60s)
|
||||
- `concurrency` (optional): Number of concurrent users/connections (default: 50)
|
||||
- `target` (optional): Specific URL or endpoint to benchmark
|
||||
|
||||
## Workflow
|
||||
|
||||
### 1. Setup Benchmarking Environment
|
||||
|
||||
```bash
|
||||
# Install benchmarking tools
|
||||
npm install -g k6 lighthouse-ci autocannon
|
||||
|
||||
# For database benchmarking
|
||||
npm install -g pg-bench
|
||||
|
||||
# Create benchmark results directory
|
||||
mkdir -p benchmark-results/$(date +%Y-%m-%d)
|
||||
```
|
||||
|
||||
### 2. Load Testing with k6
|
||||
|
||||
**Basic Load Test Script**:
|
||||
```javascript
|
||||
// loadtest.js
|
||||
import http from 'k6/http';
|
||||
import { check, sleep } from 'k6';
|
||||
import { Rate } from 'k6/metrics';
|
||||
|
||||
const errorRate = new Rate('errors');
|
||||
|
||||
export const options = {
|
||||
stages: [
|
||||
{ duration: '30s', target: 20 }, // Ramp up to 20 users
|
||||
{ duration: '1m', target: 50 }, // Stay at 50 users
|
||||
{ duration: '30s', target: 100 }, // Spike to 100 users
|
||||
{ duration: '1m', target: 50 }, // Back to 50 users
|
||||
{ duration: '30s', target: 0 }, // Ramp down
|
||||
],
|
||||
thresholds: {
|
||||
http_req_duration: ['p(95)<500', 'p(99)<1000'], // 95% < 500ms, 99% < 1s
|
||||
http_req_failed: ['rate<0.01'], // Error rate < 1%
|
||||
errors: ['rate<0.1'],
|
||||
},
|
||||
};
|
||||
|
||||
export default function () {
|
||||
const responses = http.batch([
|
||||
['GET', 'https://api.example.com/users'],
|
||||
['GET', 'https://api.example.com/posts'],
|
||||
['GET', 'https://api.example.com/comments'],
|
||||
]);
|
||||
|
||||
responses.forEach((res) => {
|
||||
const success = check(res, {
|
||||
'status is 200': (r) => r.status === 200,
|
||||
'response time < 500ms': (r) => r.timings.duration < 500,
|
||||
});
|
||||
|
||||
errorRate.add(!success);
|
||||
});
|
||||
|
||||
sleep(1);
|
||||
}
|
||||
```
|
||||
|
||||
**Run Load Test**:
|
||||
```bash
|
||||
# Basic load test
|
||||
k6 run loadtest.js
|
||||
|
||||
# Custom configuration
|
||||
k6 run --vus 100 --duration 300s loadtest.js
|
||||
|
||||
# Output to JSON for analysis
|
||||
k6 run --out json=results.json loadtest.js
|
||||
|
||||
# Cloud run (for distributed testing)
|
||||
k6 cloud run loadtest.js
|
||||
```
|
||||
|
||||
**Advanced Load Test with Scenarios**:
|
||||
```javascript
|
||||
// advanced-loadtest.js
|
||||
import http from 'k6/http';
|
||||
import { check } from 'k6';
|
||||
|
||||
export const options = {
|
||||
scenarios: {
|
||||
// Scenario 1: Constant load
|
||||
constant_load: {
|
||||
executor: 'constant-vus',
|
||||
vus: 50,
|
||||
duration: '5m',
|
||||
tags: { scenario: 'constant' },
|
||||
},
|
||||
// Scenario 2: Spike test
|
||||
spike_test: {
|
||||
executor: 'ramping-vus',
|
||||
startVUs: 0,
|
||||
stages: [
|
||||
{ duration: '10s', target: 200 },
|
||||
{ duration: '30s', target: 200 },
|
||||
{ duration: '10s', target: 0 },
|
||||
],
|
||||
startTime: '5m',
|
||||
tags: { scenario: 'spike' },
|
||||
},
|
||||
// Scenario 3: Stress test
|
||||
stress_test: {
|
||||
executor: 'ramping-arrival-rate',
|
||||
startRate: 50,
|
||||
timeUnit: '1s',
|
||||
stages: [
|
||||
{ duration: '2m', target: 100 },
|
||||
{ duration: '3m', target: 200 },
|
||||
{ duration: '2m', target: 400 },
|
||||
],
|
||||
startTime: '10m',
|
||||
tags: { scenario: 'stress' },
|
||||
},
|
||||
},
|
||||
thresholds: {
|
||||
'http_req_duration{scenario:constant}': ['p(95)<500'],
|
||||
'http_req_duration{scenario:spike}': ['p(95)<1000'],
|
||||
'http_req_failed': ['rate<0.05'],
|
||||
},
|
||||
};
|
||||
|
||||
export default function () {
|
||||
const res = http.get('https://api.example.com/users');
|
||||
check(res, {
|
||||
'status is 200': (r) => r.status === 200,
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Frontend Rendering Benchmarks
|
||||
|
||||
**Lighthouse CI Configuration**:
|
||||
```json
|
||||
// lighthouserc.json
|
||||
{
|
||||
"ci": {
|
||||
"collect": {
|
||||
"url": [
|
||||
"http://localhost:3000",
|
||||
"http://localhost:3000/dashboard",
|
||||
"http://localhost:3000/profile"
|
||||
],
|
||||
"numberOfRuns": 3,
|
||||
"settings": {
|
||||
"preset": "desktop",
|
||||
"throttling": {
|
||||
"rttMs": 40,
|
||||
"throughputKbps": 10240,
|
||||
"cpuSlowdownMultiplier": 1
|
||||
}
|
||||
}
|
||||
},
|
||||
"assert": {
|
||||
"assertions": {
|
||||
"categories:performance": ["error", {"minScore": 0.9}],
|
||||
"categories:accessibility": ["error", {"minScore": 0.9}],
|
||||
"first-contentful-paint": ["error", {"maxNumericValue": 2000}],
|
||||
"largest-contentful-paint": ["error", {"maxNumericValue": 2500}],
|
||||
"cumulative-layout-shift": ["error", {"maxNumericValue": 0.1}],
|
||||
"total-blocking-time": ["error", {"maxNumericValue": 300}]
|
||||
}
|
||||
},
|
||||
"upload": {
|
||||
"target": "filesystem",
|
||||
"outputDir": "./benchmark-results"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Run Lighthouse CI**:
|
||||
```bash
|
||||
# Single run
|
||||
lhci autorun
|
||||
|
||||
# Compare with baseline
|
||||
lhci autorun --config=lighthouserc.json
|
||||
|
||||
# Upload results for comparison
|
||||
lhci upload --target=temporary-public-storage
|
||||
```
|
||||
|
||||
**Custom Rendering Benchmark**:
|
||||
```javascript
|
||||
// rendering-benchmark.js
|
||||
const puppeteer = require('puppeteer');
|
||||
|
||||
async function benchmarkRendering(url, iterations = 10) {
|
||||
const browser = await puppeteer.launch();
|
||||
const results = [];
|
||||
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
const page = await browser.newPage();
|
||||
|
||||
// Start performance measurement
|
||||
await page.goto(url, { waitUntil: 'networkidle2' });
|
||||
|
||||
const metrics = await page.evaluate(() => {
|
||||
const navigation = performance.getEntriesByType('navigation')[0];
|
||||
const paint = performance.getEntriesByType('paint');
|
||||
|
||||
return {
|
||||
domContentLoaded: navigation.domContentLoadedEventEnd - navigation.domContentLoadedEventStart,
|
||||
loadComplete: navigation.loadEventEnd - navigation.loadEventStart,
|
||||
firstPaint: paint.find(p => p.name === 'first-paint')?.startTime,
|
||||
firstContentfulPaint: paint.find(p => p.name === 'first-contentful-paint')?.startTime,
|
||||
domInteractive: navigation.domInteractive,
|
||||
};
|
||||
});
|
||||
|
||||
results.push(metrics);
|
||||
await page.close();
|
||||
}
|
||||
|
||||
await browser.close();
|
||||
|
||||
// Calculate averages
|
||||
const avg = (key) => results.reduce((sum, r) => sum + r[key], 0) / results.length;
|
||||
|
||||
return {
|
||||
avgDOMContentLoaded: avg('domContentLoaded'),
|
||||
avgLoadComplete: avg('loadComplete'),
|
||||
avgFirstPaint: avg('firstPaint'),
|
||||
avgFirstContentfulPaint: avg('firstContentfulPaint'),
|
||||
avgDOMInteractive: avg('domInteractive'),
|
||||
};
|
||||
}
|
||||
|
||||
// Run benchmark
|
||||
benchmarkRendering('http://localhost:3000').then(console.log);
|
||||
```
|
||||
|
||||
### 4. Database Query Benchmarks
|
||||
|
||||
**PostgreSQL - pg_bench**:
|
||||
```bash
|
||||
# Initialize benchmark database
|
||||
pgbench -i -s 50 benchmark_db
|
||||
|
||||
# Run benchmark (50 clients, 1000 transactions each)
|
||||
pgbench -c 50 -t 1000 benchmark_db
|
||||
|
||||
# Custom SQL script benchmark
|
||||
cat > custom-queries.sql <<'EOF'
|
||||
SELECT * FROM users WHERE email = 'test@example.com';
|
||||
SELECT p.*, u.name FROM posts p JOIN users u ON p.user_id = u.id LIMIT 100;
|
||||
EOF
|
||||
|
||||
pgbench -c 10 -t 100 -f custom-queries.sql benchmark_db
|
||||
|
||||
# Output JSON results
|
||||
pgbench -c 50 -t 1000 --log --log-prefix=benchmark benchmark_db
|
||||
```
|
||||
|
||||
**Custom Query Benchmark Script**:
|
||||
```javascript
|
||||
// query-benchmark.js
|
||||
const { Pool } = require('pg');
|
||||
const pool = new Pool({ connectionString: process.env.DATABASE_URL });
|
||||
|
||||
async function benchmarkQuery(query, params = [], iterations = 1000) {
|
||||
const times = [];
|
||||
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
const start = process.hrtime.bigint();
|
||||
await pool.query(query, params);
|
||||
const end = process.hrtime.bigint();
|
||||
|
||||
times.push(Number(end - start) / 1_000_000); // Convert to ms
|
||||
}
|
||||
|
||||
times.sort((a, b) => a - b);
|
||||
|
||||
return {
|
||||
iterations,
|
||||
min: times[0].toFixed(2),
|
||||
max: times[times.length - 1].toFixed(2),
|
||||
avg: (times.reduce((a, b) => a + b, 0) / times.length).toFixed(2),
|
||||
p50: times[Math.floor(times.length * 0.50)].toFixed(2),
|
||||
p95: times[Math.floor(times.length * 0.95)].toFixed(2),
|
||||
p99: times[Math.floor(times.length * 0.99)].toFixed(2),
|
||||
};
|
||||
}
|
||||
|
||||
// Run benchmarks
|
||||
async function runBenchmarks() {
|
||||
console.log('Benchmarking user lookup by email...');
|
||||
const userLookup = await benchmarkQuery(
|
||||
'SELECT * FROM users WHERE email = $1',
|
||||
['test@example.com']
|
||||
);
|
||||
console.log(userLookup);
|
||||
|
||||
console.log('\nBenchmarking posts with user join...');
|
||||
const postsJoin = await benchmarkQuery(
|
||||
'SELECT p.*, u.name FROM posts p JOIN users u ON p.user_id = u.id LIMIT 100'
|
||||
);
|
||||
console.log(postsJoin);
|
||||
|
||||
await pool.end();
|
||||
}
|
||||
|
||||
runBenchmarks();
|
||||
```
|
||||
|
||||
### 5. Integration/E2E Benchmarks
|
||||
|
||||
**Playwright Performance Testing**:
|
||||
```javascript
|
||||
// e2e-benchmark.js
|
||||
const { chromium } = require('playwright');
|
||||
|
||||
async function benchmarkUserFlow(iterations = 10) {
|
||||
const results = [];
|
||||
|
||||
for (let i = 0; i < iterations; i++) {
|
||||
const browser = await chromium.launch();
|
||||
const context = await browser.newContext();
|
||||
const page = await context.newPage();
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
// User flow
|
||||
await page.goto('http://localhost:3000');
|
||||
await page.fill('input[name="email"]', 'user@example.com');
|
||||
await page.fill('input[name="password"]', 'password123');
|
||||
await page.click('button[type="submit"]');
|
||||
await page.waitForSelector('.dashboard');
|
||||
await page.click('a[href="/profile"]');
|
||||
await page.waitForSelector('.profile-page');
|
||||
|
||||
const endTime = Date.now();
|
||||
results.push(endTime - startTime);
|
||||
|
||||
await browser.close();
|
||||
}
|
||||
|
||||
const avg = results.reduce((a, b) => a + b, 0) / results.length;
|
||||
const min = Math.min(...results);
|
||||
const max = Math.max(...results);
|
||||
|
||||
return { avg, min, max, results };
|
||||
}
|
||||
|
||||
benchmarkUserFlow().then(console.log);
|
||||
```
|
||||
|
||||
### 6. Baseline Management and Comparison
|
||||
|
||||
**Save Baseline**:
|
||||
```bash
|
||||
# Save current performance as baseline
|
||||
mkdir -p baselines/
|
||||
|
||||
# k6 results
|
||||
k6 run --out json=baselines/baseline-$(date +%Y-%m-%d)-load.json loadtest.js
|
||||
|
||||
# Lighthouse results
|
||||
lhci autorun --config=lighthouserc.json
|
||||
cp -r .lighthouseci/ baselines/baseline-$(date +%Y-%m-%d)-lighthouse/
|
||||
|
||||
# Query benchmarks
|
||||
node query-benchmark.js > baselines/baseline-$(date +%Y-%m-%d)-queries.json
|
||||
```
|
||||
|
||||
**Compare with Baseline**:
|
||||
```javascript
|
||||
// compare-benchmarks.js
|
||||
const fs = require('fs');
|
||||
|
||||
function compareBenchmarks(currentFile, baselineFile) {
|
||||
const current = JSON.parse(fs.readFileSync(currentFile));
|
||||
const baseline = JSON.parse(fs.readFileSync(baselineFile));
|
||||
|
||||
const metrics = ['p50', 'p95', 'p99', 'avg'];
|
||||
const comparison = {};
|
||||
|
||||
metrics.forEach(metric => {
|
||||
const currentValue = parseFloat(current[metric]);
|
||||
const baselineValue = parseFloat(baseline[metric]);
|
||||
const diff = currentValue - baselineValue;
|
||||
const percentChange = (diff / baselineValue) * 100;
|
||||
|
||||
comparison[metric] = {
|
||||
current: currentValue,
|
||||
baseline: baselineValue,
|
||||
diff: diff.toFixed(2),
|
||||
percentChange: percentChange.toFixed(2),
|
||||
regression: diff > 0,
|
||||
};
|
||||
});
|
||||
|
||||
return comparison;
|
||||
}
|
||||
|
||||
// Usage
|
||||
const comparison = compareBenchmarks(
|
||||
'results/current-queries.json',
|
||||
'baselines/baseline-2025-10-01-queries.json'
|
||||
);
|
||||
|
||||
console.log('Performance Comparison:');
|
||||
Object.entries(comparison).forEach(([metric, data]) => {
|
||||
const emoji = data.regression ? '⚠️' : '✅';
|
||||
console.log(`${emoji} ${metric}: ${data.percentChange}% change`);
|
||||
});
|
||||
```
|
||||
|
||||
### 7. Regression Detection
|
||||
|
||||
**Automated Regression Detection**:
|
||||
```javascript
|
||||
// detect-regression.js
|
||||
function detectRegression(comparison, thresholds = {
|
||||
p50: 10, // 10% increase is regression
|
||||
p95: 15,
|
||||
p99: 20,
|
||||
}) {
|
||||
const regressions = [];
|
||||
|
||||
Object.entries(comparison).forEach(([metric, data]) => {
|
||||
const threshold = thresholds[metric] || 10;
|
||||
|
||||
if (data.percentChange > threshold) {
|
||||
regressions.push({
|
||||
metric,
|
||||
change: data.percentChange,
|
||||
threshold,
|
||||
current: data.current,
|
||||
baseline: data.baseline,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
hasRegression: regressions.length > 0,
|
||||
regressions,
|
||||
};
|
||||
}
|
||||
|
||||
// Usage in CI/CD
|
||||
const comparison = compareBenchmarks('current.json', 'baseline.json');
|
||||
const regression = detectRegression(comparison);
|
||||
|
||||
if (regression.hasRegression) {
|
||||
console.error('Performance regression detected!');
|
||||
regression.regressions.forEach(r => {
|
||||
console.error(`${r.metric}: ${r.change}% increase (threshold: ${r.threshold}%)`);
|
||||
});
|
||||
process.exit(1); // Fail CI build
|
||||
}
|
||||
```
|
||||
|
||||
### 8. Continuous Performance Monitoring
|
||||
|
||||
**GitHub Actions Workflow**:
|
||||
```yaml
|
||||
# .github/workflows/performance.yml
|
||||
name: Performance Benchmarks
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
schedule:
|
||||
- cron: '0 0 * * *' # Daily at midnight
|
||||
|
||||
jobs:
|
||||
benchmark:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: '18'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
npm ci
|
||||
npm install -g k6 @lhci/cli
|
||||
|
||||
- name: Build application
|
||||
run: npm run build
|
||||
|
||||
- name: Start server
|
||||
run: npm start &
|
||||
env:
|
||||
NODE_ENV: production
|
||||
|
||||
- name: Wait for server
|
||||
run: npx wait-on http://localhost:3000
|
||||
|
||||
- name: Run Lighthouse CI
|
||||
run: lhci autorun --config=lighthouserc.json
|
||||
|
||||
- name: Run load tests
|
||||
run: k6 run --out json=results-load.json loadtest.js
|
||||
|
||||
- name: Compare with baseline
|
||||
run: node scripts/compare-benchmarks.js
|
||||
|
||||
- name: Upload results
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: benchmark-results
|
||||
path: benchmark-results/
|
||||
|
||||
- name: Comment PR with results
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
const results = JSON.parse(fs.readFileSync('benchmark-results/summary.json'));
|
||||
github.rest.issues.createComment({
|
||||
issue_number: context.issue.number,
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
body: `## Performance Benchmark Results\n\n${results.summary}`
|
||||
});
|
||||
```
|
||||
|
||||
## Output Format
|
||||
|
||||
```markdown
|
||||
# Performance Benchmark Report
|
||||
|
||||
**Benchmark Date**: [Date]
|
||||
**Benchmark Type**: [load/rendering/query/integration/all]
|
||||
**Baseline**: [version or "none"]
|
||||
**Duration**: [test duration]
|
||||
**Concurrency**: [concurrent users/connections]
|
||||
|
||||
## Executive Summary
|
||||
|
||||
[Summary of benchmark results and any regressions detected]
|
||||
|
||||
## Load Testing Results (k6)
|
||||
|
||||
### Test Configuration
|
||||
- **Virtual Users**: 50 (ramped from 0 to 100)
|
||||
- **Duration**: 5 minutes
|
||||
- **Scenarios**: Constant load, spike test, stress test
|
||||
|
||||
### Results
|
||||
|
||||
| Metric | Value | Threshold | Status |
|
||||
|--------|-------|-----------|--------|
|
||||
| Total Requests | 45,230 | - | - |
|
||||
| Request Rate | 150.77/s | - | - |
|
||||
| Request Duration (p50) | 85ms | <200ms | ✅ Pass |
|
||||
| Request Duration (p95) | 245ms | <500ms | ✅ Pass |
|
||||
| Request Duration (p99) | 680ms | <1000ms | ✅ Pass |
|
||||
| Failed Requests | 0.02% | <1% | ✅ Pass |
|
||||
|
||||
### Comparison with Baseline
|
||||
|
||||
| Metric | Current | Baseline (v1.2.0) | Change |
|
||||
|--------|---------|-------------------|--------|
|
||||
| p50 | 85ms | 120ms | -29% ✅ |
|
||||
| p95 | 245ms | 450ms | -46% ✅ |
|
||||
| p99 | 680ms | 980ms | -31% ✅ |
|
||||
| Request Rate | 150.77/s | 85/s | +77% ✅ |
|
||||
|
||||
**Overall**: 46% improvement in p95 response time
|
||||
|
||||
## Frontend Rendering Benchmarks (Lighthouse)
|
||||
|
||||
### Home Page
|
||||
|
||||
| Metric | Score | Value | Baseline | Change |
|
||||
|--------|-------|-------|----------|--------|
|
||||
| Performance | 94 | - | 62 | +32 ✅ |
|
||||
| FCP | - | 0.8s | 2.1s | -62% ✅ |
|
||||
| LCP | - | 1.8s | 4.2s | -57% ✅ |
|
||||
| TBT | - | 45ms | 280ms | -84% ✅ |
|
||||
| CLS | - | 0.02 | 0.18 | -89% ✅ |
|
||||
|
||||
### Dashboard Page
|
||||
|
||||
| Metric | Score | Value | Baseline | Change |
|
||||
|--------|-------|-------|----------|--------|
|
||||
| Performance | 89 | - | 48 | +41 ✅ |
|
||||
| LCP | - | 2.1s | 5.8s | -64% ✅ |
|
||||
| TBT | - | 65ms | 420ms | -85% ✅ |
|
||||
|
||||
## Database Query Benchmarks
|
||||
|
||||
### User Lookup by Email (1000 iterations)
|
||||
|
||||
| Metric | Current | Baseline | Change |
|
||||
|--------|---------|----------|--------|
|
||||
| Min | 6ms | 380ms | -98% ✅ |
|
||||
| Avg | 8ms | 450ms | -98% ✅ |
|
||||
| p50 | 7ms | 445ms | -98% ✅ |
|
||||
| p95 | 12ms | 520ms | -98% ✅ |
|
||||
| p99 | 18ms | 680ms | -97% ✅ |
|
||||
|
||||
**Optimization**: Added index on users.email
|
||||
|
||||
### Posts with User Join (1000 iterations)
|
||||
|
||||
| Metric | Current | Baseline | Change |
|
||||
|--------|---------|----------|--------|
|
||||
| Avg | 45ms | 820ms | -95% ✅ |
|
||||
| p95 | 68ms | 1200ms | -94% ✅ |
|
||||
| p99 | 95ms | 2100ms | -95% ✅ |
|
||||
|
||||
**Optimization**: Fixed N+1 query with eager loading
|
||||
|
||||
## Integration/E2E Benchmarks
|
||||
|
||||
### User Login Flow (10 iterations)
|
||||
|
||||
| Metric | Value | Baseline | Change |
|
||||
|--------|-------|----------|--------|
|
||||
| Average | 1,245ms | 3,850ms | -68% ✅ |
|
||||
| Min | 1,120ms | 3,200ms | -65% ✅ |
|
||||
| Max | 1,420ms | 4,500ms | -68% ✅ |
|
||||
|
||||
**Flow**: Home → Login → Dashboard → Profile
|
||||
|
||||
## Regression Analysis
|
||||
|
||||
**Regressions Detected**: None
|
||||
|
||||
**Performance Improvements**: 12 metrics improved
|
||||
- Load testing: 46% faster p95 response time
|
||||
- Frontend rendering: 57% faster LCP
|
||||
- Database queries: 98% faster average query time
|
||||
- E2E flows: 68% faster completion time
|
||||
|
||||
## Recommendations
|
||||
|
||||
1. **Continue Monitoring**: Set up daily benchmarks to catch regressions early
|
||||
2. **Performance Budget**: Establish budgets based on current metrics
|
||||
- p95 response time < 300ms
|
||||
- LCP < 2.5s
|
||||
- Database queries < 100ms average
|
||||
3. **Optimize Further**: Investigate remaining slow queries in analytics module
|
||||
|
||||
## Testing Instructions
|
||||
|
||||
### Run Load Tests
|
||||
```bash
|
||||
k6 run --vus 50 --duration 60s loadtest.js
|
||||
```
|
||||
|
||||
### Run Rendering Benchmarks
|
||||
```bash
|
||||
lhci autorun --config=lighthouserc.json
|
||||
```
|
||||
|
||||
### Run Query Benchmarks
|
||||
```bash
|
||||
node query-benchmark.js
|
||||
```
|
||||
|
||||
### Compare with Baseline
|
||||
```bash
|
||||
node scripts/compare-benchmarks.js results/current.json baselines/baseline-2025-10-01.json
|
||||
```
|
||||
927
commands/optimize/database.md
Normal file
927
commands/optimize/database.md
Normal file
@@ -0,0 +1,927 @@
|
||||
# Database Optimization Operation
|
||||
|
||||
You are executing the **database** operation to optimize database queries, schema, indexes, and connection management.
|
||||
|
||||
## Parameters
|
||||
|
||||
**Received**: `$ARGUMENTS` (after removing 'database' operation name)
|
||||
|
||||
Expected format: `target:"queries|schema|indexes|connections|all" [context:"specific-details"] [threshold:"time-in-ms"] [environment:"prod|staging|dev"]`
|
||||
|
||||
**Parameter definitions**:
|
||||
- `target` (required): What to optimize - `queries`, `schema`, `indexes`, `connections`, or `all`
|
||||
- `context` (optional): Specific context like table names, query patterns, or problem description
|
||||
- `threshold` (optional): Time threshold for slow queries in milliseconds (default: 500ms)
|
||||
- `environment` (optional): Target environment (default: development)
|
||||
|
||||
## Workflow
|
||||
|
||||
### 1. Identify Database Technology
|
||||
|
||||
Detect database type from codebase:
|
||||
```bash
|
||||
# Check for database configuration
|
||||
grep -r "DATABASE_URL\|DB_CONNECTION\|database" .env* config/ 2>/dev/null | head -5
|
||||
|
||||
# Check package dependencies
|
||||
grep -E "pg|mysql|mongodb|sqlite" package.json 2>/dev/null
|
||||
```
|
||||
|
||||
Common patterns:
|
||||
- **PostgreSQL**: `pg`, `pg_stat_statements`, `.pgpass`
|
||||
- **MySQL**: `mysql2`, `mysql`, `.my.cnf`
|
||||
- **MongoDB**: `mongoose`, `mongodb`
|
||||
- **SQLite**: `sqlite3`, `.db` files
|
||||
|
||||
### 2. Enable Performance Monitoring
|
||||
|
||||
**PostgreSQL**:
|
||||
```sql
|
||||
-- Enable pg_stat_statements extension (if not already)
|
||||
CREATE EXTENSION IF NOT EXISTS pg_stat_statements;
|
||||
|
||||
-- Reset statistics for fresh baseline
|
||||
SELECT pg_stat_statements_reset();
|
||||
|
||||
-- Enable slow query logging
|
||||
ALTER SYSTEM SET log_min_duration_statement = 500; -- 500ms threshold
|
||||
SELECT pg_reload_conf();
|
||||
```
|
||||
|
||||
**MySQL**:
|
||||
```sql
|
||||
-- Enable slow query log
|
||||
SET GLOBAL slow_query_log = 'ON';
|
||||
SET GLOBAL long_query_time = 0.5; -- 500ms threshold
|
||||
SET GLOBAL log_queries_not_using_indexes = 'ON';
|
||||
```
|
||||
|
||||
**MongoDB**:
|
||||
```javascript
|
||||
// Enable profiling
|
||||
db.setProfilingLevel(1, { slowms: 500 });
|
||||
|
||||
// View profiler status
|
||||
db.getProfilingStatus();
|
||||
```
|
||||
|
||||
### 3. Analyze Slow Queries
|
||||
|
||||
**PostgreSQL - Find Slow Queries**:
|
||||
```sql
|
||||
-- Top 20 slow queries by average time
|
||||
SELECT
|
||||
substring(query, 1, 100) AS short_query,
|
||||
round(mean_exec_time::numeric, 2) AS avg_time_ms,
|
||||
calls,
|
||||
round(total_exec_time::numeric, 2) AS total_time_ms,
|
||||
round((100 * total_exec_time / sum(total_exec_time) OVER ())::numeric, 2) AS percentage_cpu
|
||||
FROM pg_stat_statements
|
||||
WHERE query NOT LIKE '%pg_stat_statements%'
|
||||
ORDER BY mean_exec_time DESC
|
||||
LIMIT 20;
|
||||
|
||||
-- Queries with most calls (potential optimization targets)
|
||||
SELECT
|
||||
substring(query, 1, 100) AS short_query,
|
||||
calls,
|
||||
round(mean_exec_time::numeric, 2) AS avg_time_ms,
|
||||
round(total_exec_time::numeric, 2) AS total_time_ms
|
||||
FROM pg_stat_statements
|
||||
WHERE query NOT LIKE '%pg_stat_statements%'
|
||||
ORDER BY calls DESC
|
||||
LIMIT 20;
|
||||
|
||||
-- Most time-consuming queries
|
||||
SELECT
|
||||
substring(query, 1, 100) AS short_query,
|
||||
round(total_exec_time::numeric, 2) AS total_time_ms,
|
||||
calls,
|
||||
round(mean_exec_time::numeric, 2) AS avg_time_ms
|
||||
FROM pg_stat_statements
|
||||
WHERE query NOT LIKE '%pg_stat_statements%'
|
||||
ORDER BY total_exec_time DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
**MySQL - Find Slow Queries**:
|
||||
```sql
|
||||
-- Analyze slow query log
|
||||
SELECT
|
||||
DIGEST_TEXT AS query,
|
||||
COUNT_STAR AS exec_count,
|
||||
AVG_TIMER_WAIT/1000000000 AS avg_time_ms,
|
||||
SUM_TIMER_WAIT/1000000000 AS total_time_ms
|
||||
FROM performance_schema.events_statements_summary_by_digest
|
||||
ORDER BY AVG_TIMER_WAIT DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
**MongoDB - Find Slow Queries**:
|
||||
```javascript
|
||||
// View slow operations
|
||||
db.system.profile.find({
|
||||
millis: { $gt: 500 }
|
||||
}).sort({ ts: -1 }).limit(20).pretty();
|
||||
|
||||
// Aggregate slow operations by type
|
||||
db.system.profile.aggregate([
|
||||
{ $match: { millis: { $gt: 500 } } },
|
||||
{ $group: {
|
||||
_id: "$command",
|
||||
count: { $sum: 1 },
|
||||
avgTime: { $avg: "$millis" }
|
||||
}},
|
||||
{ $sort: { avgTime: -1 } }
|
||||
]);
|
||||
```
|
||||
|
||||
### 4. Analyze Query Execution Plans
|
||||
|
||||
For each slow query, analyze the execution plan:
|
||||
|
||||
**PostgreSQL - EXPLAIN ANALYZE**:
|
||||
```sql
|
||||
-- Replace [SLOW_QUERY] with actual query
|
||||
EXPLAIN (ANALYZE, BUFFERS, FORMAT JSON)
|
||||
[SLOW_QUERY];
|
||||
|
||||
-- Human-readable format
|
||||
EXPLAIN (ANALYZE, BUFFERS)
|
||||
SELECT u.id, u.email, COUNT(p.id) AS post_count
|
||||
FROM users u
|
||||
LEFT JOIN posts p ON p.user_id = u.id
|
||||
WHERE u.created_at > NOW() - INTERVAL '30 days'
|
||||
GROUP BY u.id, u.email;
|
||||
```
|
||||
|
||||
Look for these indicators:
|
||||
- **Seq Scan** - Full table scan (bad for large tables, consider index)
|
||||
- **Index Scan** - Using index (good)
|
||||
- **Nested Loop** - Join method (may be slow for large datasets)
|
||||
- **Hash Join** / **Merge Join** - Usually better for large datasets
|
||||
- **High execution time** - Optimization opportunity
|
||||
|
||||
**MySQL - EXPLAIN**:
|
||||
```sql
|
||||
EXPLAIN FORMAT=JSON
|
||||
SELECT u.id, u.email, COUNT(p.id) AS post_count
|
||||
FROM users u
|
||||
LEFT JOIN posts p ON p.user_id = u.id
|
||||
WHERE u.created_at > DATE_SUB(NOW(), INTERVAL 30 DAY)
|
||||
GROUP BY u.id, u.email;
|
||||
```
|
||||
|
||||
Look for:
|
||||
- `type: ALL` - Full table scan (bad)
|
||||
- `type: index` or `type: range` - Using index (good)
|
||||
- `rows: high_number` - Large row count suggests optimization needed
|
||||
|
||||
**MongoDB - Explain**:
|
||||
```javascript
|
||||
db.users.find({
|
||||
createdAt: { $gte: new Date(Date.now() - 30*24*60*60*1000) }
|
||||
}).explain("executionStats");
|
||||
```
|
||||
|
||||
Look for:
|
||||
- `COLLSCAN` - Collection scan (bad, add index)
|
||||
- `IXSCAN` - Index scan (good)
|
||||
- `executionTimeMillis` - Total execution time
|
||||
|
||||
### 5. Index Analysis and Optimization
|
||||
|
||||
**PostgreSQL - Missing Indexes**:
|
||||
```sql
|
||||
-- Find tables with missing indexes (frequent seq scans)
|
||||
SELECT
|
||||
schemaname,
|
||||
tablename,
|
||||
seq_scan,
|
||||
seq_tup_read,
|
||||
idx_scan,
|
||||
seq_tup_read / seq_scan AS avg_seq_read
|
||||
FROM pg_stat_user_tables
|
||||
WHERE seq_scan > 0
|
||||
ORDER BY seq_tup_read DESC
|
||||
LIMIT 20;
|
||||
|
||||
-- Find unused indexes (candidates for removal)
|
||||
SELECT
|
||||
schemaname,
|
||||
tablename,
|
||||
indexname,
|
||||
idx_scan,
|
||||
pg_size_pretty(pg_relation_size(indexrelid)) AS index_size
|
||||
FROM pg_stat_user_indexes
|
||||
WHERE idx_scan = 0
|
||||
AND indexrelname NOT LIKE '%_pkey'
|
||||
ORDER BY pg_relation_size(indexrelid) DESC;
|
||||
|
||||
-- Check duplicate indexes
|
||||
SELECT
|
||||
pg_size_pretty(SUM(pg_relation_size(idx))::BIGINT) AS total_size,
|
||||
(array_agg(idx))[1] AS idx1,
|
||||
(array_agg(idx))[2] AS idx2,
|
||||
(array_agg(idx))[3] AS idx3,
|
||||
(array_agg(idx))[4] AS idx4
|
||||
FROM (
|
||||
SELECT
|
||||
indexrelid::regclass AS idx,
|
||||
(indrelid::text ||E'\n'|| indclass::text ||E'\n'|| indkey::text ||E'\n'|| COALESCE(indexprs::text,'')||E'\n' || COALESCE(indpred::text,'')) AS key
|
||||
FROM pg_index
|
||||
) sub
|
||||
GROUP BY key
|
||||
HAVING COUNT(*) > 1
|
||||
ORDER BY SUM(pg_relation_size(idx)) DESC;
|
||||
```
|
||||
|
||||
**Index Creation Examples**:
|
||||
|
||||
```sql
|
||||
-- Simple index (single column)
|
||||
CREATE INDEX CONCURRENTLY idx_users_email ON users(email);
|
||||
|
||||
-- Composite index (multiple columns) - order matters!
|
||||
CREATE INDEX CONCURRENTLY idx_posts_user_created
|
||||
ON posts(user_id, created_at DESC);
|
||||
|
||||
-- Partial index (filtered)
|
||||
CREATE INDEX CONCURRENTLY idx_users_active_email
|
||||
ON users(email)
|
||||
WHERE status = 'active';
|
||||
|
||||
-- Expression index
|
||||
CREATE INDEX CONCURRENTLY idx_users_lower_email
|
||||
ON users(LOWER(email));
|
||||
|
||||
-- GiST index for full-text search
|
||||
CREATE INDEX CONCURRENTLY idx_posts_search
|
||||
ON posts USING GiST(to_tsvector('english', title || ' ' || content));
|
||||
```
|
||||
|
||||
**MySQL - Index Analysis**:
|
||||
```sql
|
||||
-- Check indexes on a table
|
||||
SHOW INDEXES FROM users;
|
||||
|
||||
-- Find unused indexes
|
||||
SELECT
|
||||
TABLE_NAME,
|
||||
INDEX_NAME,
|
||||
CARDINALITY
|
||||
FROM information_schema.STATISTICS
|
||||
WHERE TABLE_SCHEMA = DATABASE()
|
||||
GROUP BY TABLE_NAME, INDEX_NAME
|
||||
HAVING SUM(CARDINALITY) = 0;
|
||||
|
||||
-- Create index
|
||||
CREATE INDEX idx_users_email ON users(email);
|
||||
|
||||
-- Create composite index
|
||||
CREATE INDEX idx_posts_user_created ON posts(user_id, created_at);
|
||||
```
|
||||
|
||||
**MongoDB - Index Analysis**:
|
||||
```javascript
|
||||
// List all indexes on collection
|
||||
db.users.getIndexes();
|
||||
|
||||
// Check index usage
|
||||
db.users.aggregate([
|
||||
{ $indexStats: {} }
|
||||
]);
|
||||
|
||||
// Create single field index
|
||||
db.users.createIndex({ email: 1 });
|
||||
|
||||
// Create compound index
|
||||
db.posts.createIndex({ userId: 1, createdAt: -1 });
|
||||
|
||||
// Create text index for search
|
||||
db.posts.createIndex({ title: "text", content: "text" });
|
||||
|
||||
// Create partial index
|
||||
db.users.createIndex(
|
||||
{ email: 1 },
|
||||
{ partialFilterExpression: { status: "active" } }
|
||||
);
|
||||
```
|
||||
|
||||
### 6. Query Optimization Examples
|
||||
|
||||
**Example 1: N+1 Query Problem**
|
||||
|
||||
```javascript
|
||||
// BEFORE (N+1 problem)
|
||||
async function getUsersWithPosts() {
|
||||
const users = await User.findAll(); // 1 query
|
||||
for (const user of users) {
|
||||
user.posts = await Post.findAll({ // N queries (one per user)
|
||||
where: { userId: user.id }
|
||||
});
|
||||
}
|
||||
return users;
|
||||
}
|
||||
|
||||
// AFTER (eager loading)
|
||||
async function getUsersWithPosts() {
|
||||
const users = await User.findAll({ // 1 query with join
|
||||
include: [{ model: Post, as: 'posts' }]
|
||||
});
|
||||
return users;
|
||||
}
|
||||
|
||||
// SQL generated:
|
||||
// SELECT u.*, p.* FROM users u LEFT JOIN posts p ON p.user_id = u.id;
|
||||
```
|
||||
|
||||
**Example 2: SELECT * Optimization**
|
||||
|
||||
```sql
|
||||
-- BEFORE (fetches all columns)
|
||||
SELECT * FROM users WHERE email = 'user@example.com';
|
||||
|
||||
-- AFTER (fetch only needed columns)
|
||||
SELECT id, email, name, created_at FROM users WHERE email = 'user@example.com';
|
||||
```
|
||||
|
||||
**Example 3: Inefficient JOIN**
|
||||
|
||||
```sql
|
||||
-- BEFORE (subquery for each row)
|
||||
SELECT
|
||||
u.id,
|
||||
u.name,
|
||||
(SELECT COUNT(*) FROM posts WHERE user_id = u.id) AS post_count
|
||||
FROM users u;
|
||||
|
||||
-- AFTER (single join with aggregation)
|
||||
SELECT
|
||||
u.id,
|
||||
u.name,
|
||||
COUNT(p.id) AS post_count
|
||||
FROM users u
|
||||
LEFT JOIN posts p ON p.user_id = u.id
|
||||
GROUP BY u.id, u.name;
|
||||
```
|
||||
|
||||
**Example 4: Pagination with OFFSET**
|
||||
|
||||
```sql
|
||||
-- BEFORE (inefficient for large offsets)
|
||||
SELECT * FROM posts ORDER BY created_at DESC LIMIT 20 OFFSET 10000;
|
||||
|
||||
-- AFTER (cursor-based pagination)
|
||||
SELECT * FROM posts
|
||||
WHERE created_at < '2025-10-01T00:00:00Z' -- cursor from last result
|
||||
ORDER BY created_at DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
**Example 5: OR to UNION Optimization**
|
||||
|
||||
```sql
|
||||
-- BEFORE (prevents index usage)
|
||||
SELECT * FROM users WHERE email = 'test@example.com' OR username = 'testuser';
|
||||
|
||||
-- AFTER (allows index usage on both columns)
|
||||
SELECT * FROM users WHERE email = 'test@example.com'
|
||||
UNION
|
||||
SELECT * FROM users WHERE username = 'testuser';
|
||||
```
|
||||
|
||||
### 7. Schema Optimization
|
||||
|
||||
**Normalization vs. Denormalization**:
|
||||
|
||||
```sql
|
||||
-- Normalized (3NF) - reduces redundancy but requires joins
|
||||
CREATE TABLE users (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(255),
|
||||
email VARCHAR(255)
|
||||
);
|
||||
|
||||
CREATE TABLE user_profiles (
|
||||
user_id INTEGER PRIMARY KEY REFERENCES users(id),
|
||||
bio TEXT,
|
||||
avatar_url VARCHAR(500)
|
||||
);
|
||||
|
||||
-- Denormalized - faster reads, some redundancy
|
||||
CREATE TABLE users (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name VARCHAR(255),
|
||||
email VARCHAR(255),
|
||||
bio TEXT,
|
||||
avatar_url VARCHAR(500)
|
||||
);
|
||||
```
|
||||
|
||||
**Partitioning Large Tables**:
|
||||
|
||||
```sql
|
||||
-- PostgreSQL table partitioning by date
|
||||
CREATE TABLE posts (
|
||||
id BIGSERIAL,
|
||||
user_id INTEGER,
|
||||
content TEXT,
|
||||
created_at TIMESTAMP NOT NULL,
|
||||
PRIMARY KEY (id, created_at)
|
||||
) PARTITION BY RANGE (created_at);
|
||||
|
||||
-- Create partitions
|
||||
CREATE TABLE posts_2025_q1 PARTITION OF posts
|
||||
FOR VALUES FROM ('2025-01-01') TO ('2025-04-01');
|
||||
|
||||
CREATE TABLE posts_2025_q2 PARTITION OF posts
|
||||
FOR VALUES FROM ('2025-04-01') TO ('2025-07-01');
|
||||
```
|
||||
|
||||
**Column Type Optimization**:
|
||||
|
||||
```sql
|
||||
-- BEFORE (inefficient types)
|
||||
CREATE TABLE users (
|
||||
id BIGSERIAL,
|
||||
email VARCHAR(500),
|
||||
status VARCHAR(50),
|
||||
age NUMERIC,
|
||||
is_verified CHAR(1)
|
||||
);
|
||||
|
||||
-- AFTER (optimized types)
|
||||
CREATE TABLE users (
|
||||
id SERIAL, -- Use SERIAL if < 2 billion records
|
||||
email VARCHAR(255), -- Right-sized
|
||||
status VARCHAR(20) CHECK (status IN ('active', 'inactive', 'suspended')), -- Constrained
|
||||
age SMALLINT CHECK (age >= 0 AND age <= 150), -- Appropriate int size
|
||||
is_verified BOOLEAN -- Native boolean
|
||||
);
|
||||
```
|
||||
|
||||
### 8. Connection Pool Optimization
|
||||
|
||||
**Node.js (pg) Example**:
|
||||
|
||||
```javascript
|
||||
// BEFORE (default settings)
|
||||
const pool = new Pool({
|
||||
connectionString: process.env.DATABASE_URL
|
||||
});
|
||||
|
||||
// AFTER (optimized for application)
|
||||
const pool = new Pool({
|
||||
connectionString: process.env.DATABASE_URL,
|
||||
max: 20, // Maximum pool size (based on workload)
|
||||
min: 5, // Minimum idle connections
|
||||
idleTimeoutMillis: 30000, // Remove idle connections after 30s
|
||||
connectionTimeoutMillis: 2000, // Fail fast if no connection available
|
||||
statement_timeout: 5000, // Query timeout (5s)
|
||||
query_timeout: 5000
|
||||
});
|
||||
|
||||
// Monitor pool health
|
||||
pool.on('connect', () => {
|
||||
console.log('Database connection established');
|
||||
});
|
||||
|
||||
pool.on('error', (err) => {
|
||||
console.error('Unexpected database error', err);
|
||||
});
|
||||
|
||||
// Check pool status
|
||||
setInterval(() => {
|
||||
console.log({
|
||||
total: pool.totalCount,
|
||||
idle: pool.idleCount,
|
||||
waiting: pool.waitingCount
|
||||
});
|
||||
}, 60000);
|
||||
```
|
||||
|
||||
**Connection Pool Sizing Formula**:
|
||||
```
|
||||
Optimal Pool Size = (Core Count × 2) + Effective Spindle Count
|
||||
|
||||
Example for 4-core server with SSD:
|
||||
Pool Size = (4 × 2) + 1 = 9 connections
|
||||
```
|
||||
|
||||
### 9. Query Caching
|
||||
|
||||
**Application-Level Caching (Redis)**:
|
||||
|
||||
```javascript
|
||||
// BEFORE (no caching)
|
||||
async function getUser(userId) {
|
||||
return await User.findByPk(userId);
|
||||
}
|
||||
|
||||
// AFTER (with Redis cache)
|
||||
async function getUser(userId) {
|
||||
const cacheKey = `user:${userId}`;
|
||||
|
||||
// Try cache first
|
||||
const cached = await redis.get(cacheKey);
|
||||
if (cached) {
|
||||
return JSON.parse(cached);
|
||||
}
|
||||
|
||||
// Cache miss - query database
|
||||
const user = await User.findByPk(userId);
|
||||
|
||||
// Store in cache (TTL: 5 minutes)
|
||||
await redis.setex(cacheKey, 300, JSON.stringify(user));
|
||||
|
||||
return user;
|
||||
}
|
||||
|
||||
// Invalidate cache on update
|
||||
async function updateUser(userId, data) {
|
||||
const user = await User.update(data, { where: { id: userId } });
|
||||
|
||||
// Invalidate cache
|
||||
await redis.del(`user:${userId}`);
|
||||
|
||||
return user;
|
||||
}
|
||||
```
|
||||
|
||||
**Database-Level Caching**:
|
||||
|
||||
```sql
|
||||
-- PostgreSQL materialized view (cached aggregate)
|
||||
CREATE MATERIALIZED VIEW user_stats AS
|
||||
SELECT
|
||||
user_id,
|
||||
COUNT(*) AS post_count,
|
||||
MAX(created_at) AS last_post_at
|
||||
FROM posts
|
||||
GROUP BY user_id;
|
||||
|
||||
-- Create index on materialized view
|
||||
CREATE INDEX idx_user_stats_user_id ON user_stats(user_id);
|
||||
|
||||
-- Refresh periodically (in cron job)
|
||||
REFRESH MATERIALIZED VIEW CONCURRENTLY user_stats;
|
||||
```
|
||||
|
||||
### 10. Measure Impact
|
||||
|
||||
After implementing optimizations:
|
||||
|
||||
```sql
|
||||
-- PostgreSQL: Compare before/after query times
|
||||
SELECT
|
||||
query,
|
||||
calls,
|
||||
mean_exec_time,
|
||||
total_exec_time
|
||||
FROM pg_stat_statements
|
||||
WHERE query LIKE '%[your_query_pattern]%'
|
||||
ORDER BY mean_exec_time DESC;
|
||||
|
||||
-- Check index usage after creating indexes
|
||||
SELECT
|
||||
schemaname,
|
||||
tablename,
|
||||
indexname,
|
||||
idx_scan,
|
||||
idx_tup_read,
|
||||
idx_tup_fetch
|
||||
FROM pg_stat_user_indexes
|
||||
WHERE indexname IN ('idx_users_email', 'idx_posts_user_created')
|
||||
ORDER BY idx_scan DESC;
|
||||
```
|
||||
|
||||
## Output Format
|
||||
|
||||
```markdown
|
||||
# Database Optimization Report: [Context]
|
||||
|
||||
**Optimization Date**: [Date]
|
||||
**Database**: [PostgreSQL/MySQL/MongoDB version]
|
||||
**Environment**: [production/staging/development]
|
||||
**Threshold**: [X]ms for slow queries
|
||||
|
||||
## Executive Summary
|
||||
|
||||
[2-3 paragraph summary of findings and optimizations applied]
|
||||
|
||||
## Baseline Metrics
|
||||
|
||||
### Slow Queries Identified
|
||||
|
||||
| Query Pattern | Avg Time | Calls | Total Time | % CPU |
|
||||
|---------------|----------|-------|------------|-------|
|
||||
| SELECT users WHERE email = ... | 450ms | 1,250 | 562s | 12.3% |
|
||||
| SELECT posts with user JOIN | 820ms | 450 | 369s | 8.1% |
|
||||
| SELECT COUNT(*) FROM activity_logs | 2,100ms | 120 | 252s | 5.5% |
|
||||
|
||||
### Index Analysis
|
||||
|
||||
**Missing Indexes**: 3 tables with frequent sequential scans
|
||||
**Unused Indexes**: 2 indexes with 0 scans (candidates for removal)
|
||||
**Duplicate Indexes**: 1 set of duplicate indexes found
|
||||
|
||||
### Connection Pool Metrics
|
||||
|
||||
- **Total Connections**: 15
|
||||
- **Idle Connections**: 3
|
||||
- **Active Connections**: 12
|
||||
- **Waiting Requests**: 5 (indicates pool exhaustion)
|
||||
|
||||
## Optimizations Implemented
|
||||
|
||||
### 1. Added Missing Indexes
|
||||
|
||||
#### Index: idx_users_email
|
||||
```sql
|
||||
CREATE INDEX CONCURRENTLY idx_users_email ON users(email);
|
||||
```
|
||||
|
||||
**Impact**:
|
||||
- **Before**: 450ms avg, 1,250 calls, Seq Scan on 500K rows
|
||||
- **After**: 8ms avg, 1,250 calls, Index Scan
|
||||
- **Improvement**: 98.2% faster (442ms saved per query)
|
||||
- **Total Time Saved**: 552s per analysis period
|
||||
|
||||
**Execution Plan Comparison**:
|
||||
```
|
||||
BEFORE:
|
||||
Seq Scan on users (cost=0.00..15234.50 rows=1 width=124) (actual time=442.231..448.891 rows=1)
|
||||
Filter: (email = 'user@example.com')
|
||||
Rows Removed by Filter: 499999
|
||||
|
||||
AFTER:
|
||||
Index Scan using idx_users_email on users (cost=0.42..8.44 rows=1 width=124) (actual time=0.031..0.033 rows=1)
|
||||
Index Cond: (email = 'user@example.com')
|
||||
```
|
||||
|
||||
#### Index: idx_posts_user_created
|
||||
```sql
|
||||
CREATE INDEX CONCURRENTLY idx_posts_user_created ON posts(user_id, created_at DESC);
|
||||
```
|
||||
|
||||
**Impact**:
|
||||
- **Before**: 820ms avg, Nested Loop + Seq Scan
|
||||
- **After**: 45ms avg, Index Scan with sorted results
|
||||
- **Improvement**: 94.5% faster (775ms saved per query)
|
||||
|
||||
### 2. Query Optimizations
|
||||
|
||||
#### Optimization: Fixed N+1 Query in User Posts Endpoint
|
||||
|
||||
**Before**:
|
||||
```javascript
|
||||
const users = await User.findAll();
|
||||
for (const user of users) {
|
||||
user.posts = await Post.findAll({ where: { userId: user.id } });
|
||||
}
|
||||
// Result: 1 + N queries (251 queries for 250 users)
|
||||
```
|
||||
|
||||
**After**:
|
||||
```javascript
|
||||
const users = await User.findAll({
|
||||
include: [{ model: Post, as: 'posts' }]
|
||||
});
|
||||
// Result: 1 query with JOIN
|
||||
```
|
||||
|
||||
**Impact**:
|
||||
- **Before**: 2,100ms for 250 users (1 + 250 queries)
|
||||
- **After**: 180ms for 250 users (1 query)
|
||||
- **Improvement**: 91.4% faster
|
||||
|
||||
#### Optimization: Cursor-Based Pagination
|
||||
|
||||
**Before**:
|
||||
```sql
|
||||
SELECT * FROM posts ORDER BY created_at DESC LIMIT 20 OFFSET 10000;
|
||||
-- Execution time: 1,200ms (must scan and skip 10,000 rows)
|
||||
```
|
||||
|
||||
**After**:
|
||||
```sql
|
||||
SELECT * FROM posts
|
||||
WHERE created_at < '2025-09-01T12:00:00Z'
|
||||
ORDER BY created_at DESC
|
||||
LIMIT 20;
|
||||
-- Execution time: 15ms (index seek directly to position)
|
||||
```
|
||||
|
||||
**Impact**: 98.8% faster pagination for deep pages
|
||||
|
||||
### 3. Schema Optimizations
|
||||
|
||||
#### Denormalized User Activity Counts
|
||||
|
||||
**Before**:
|
||||
```sql
|
||||
-- Expensive aggregation on every query
|
||||
SELECT u.*, COUNT(p.id) AS post_count
|
||||
FROM users u
|
||||
LEFT JOIN posts p ON p.user_id = u.id
|
||||
GROUP BY u.id;
|
||||
```
|
||||
|
||||
**After**:
|
||||
```sql
|
||||
-- Added cached column with trigger updates
|
||||
ALTER TABLE users ADD COLUMN post_count INTEGER DEFAULT 0;
|
||||
|
||||
-- Trigger to maintain count
|
||||
CREATE OR REPLACE FUNCTION update_user_post_count()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF TG_OP = 'INSERT' THEN
|
||||
UPDATE users SET post_count = post_count + 1 WHERE id = NEW.user_id;
|
||||
ELSIF TG_OP = 'DELETE' THEN
|
||||
UPDATE users SET post_count = post_count - 1 WHERE id = OLD.user_id;
|
||||
END IF;
|
||||
RETURN NULL;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER update_post_count
|
||||
AFTER INSERT OR DELETE ON posts
|
||||
FOR EACH ROW EXECUTE FUNCTION update_user_post_count();
|
||||
|
||||
-- Simple query now
|
||||
SELECT * FROM users;
|
||||
```
|
||||
|
||||
**Impact**:
|
||||
- **Before**: 340ms (aggregation query)
|
||||
- **After**: 12ms (simple select)
|
||||
- **Improvement**: 96.5% faster
|
||||
|
||||
### 4. Connection Pool Optimization
|
||||
|
||||
**Before**:
|
||||
```javascript
|
||||
const pool = new Pool(); // Default settings
|
||||
// Max: 10, Min: 0
|
||||
// Frequent connection exhaustion under load
|
||||
```
|
||||
|
||||
**After**:
|
||||
```javascript
|
||||
const pool = new Pool({
|
||||
max: 20, // Increased for higher concurrency
|
||||
min: 5, // Keep warm connections
|
||||
idleTimeoutMillis: 30000,
|
||||
connectionTimeoutMillis: 2000,
|
||||
statement_timeout: 5000
|
||||
});
|
||||
```
|
||||
|
||||
**Impact**:
|
||||
- **Before**: 45 connection timeout errors per hour under load
|
||||
- **After**: 0 connection timeout errors
|
||||
- **Improvement**: Eliminated connection pool exhaustion
|
||||
|
||||
### 5. Query Result Caching
|
||||
|
||||
**Implementation**:
|
||||
```javascript
|
||||
async function getUserProfile(userId) {
|
||||
const cacheKey = `user:${userId}:profile`;
|
||||
const cached = await redis.get(cacheKey);
|
||||
|
||||
if (cached) return JSON.parse(cached);
|
||||
|
||||
const profile = await User.findByPk(userId, {
|
||||
include: ['profile', 'settings']
|
||||
});
|
||||
|
||||
await redis.setex(cacheKey, 300, JSON.stringify(profile));
|
||||
return profile;
|
||||
}
|
||||
```
|
||||
|
||||
**Impact**:
|
||||
- **Cache Hit Rate**: 87% (after 24 hours)
|
||||
- **Avg Response Time (cached)**: 3ms
|
||||
- **Avg Response Time (uncached)**: 45ms
|
||||
- **Database Load Reduction**: 87%
|
||||
|
||||
## Results Summary
|
||||
|
||||
### Overall Performance Improvements
|
||||
|
||||
| Metric | Before | After | Improvement |
|
||||
|--------|--------|-------|-------------|
|
||||
| Avg Query Time | 285ms | 34ms | 88% faster |
|
||||
| Slow Query Count (>500ms) | 23 queries | 2 queries | 91% reduction |
|
||||
| Database CPU Usage | 68% | 32% | 53% reduction |
|
||||
| Connection Pool Timeouts | 45/hour | 0/hour | 100% elimination |
|
||||
| Cache Hit Rate | N/A | 87% | New capability |
|
||||
|
||||
### Query-Specific Improvements
|
||||
|
||||
| Query | Before | After | Improvement |
|
||||
|-------|--------|-------|-------------|
|
||||
| User lookup by email | 450ms | 8ms | 98.2% |
|
||||
| User posts listing | 820ms | 45ms | 94.5% |
|
||||
| User activity with posts | 2,100ms | 180ms | 91.4% |
|
||||
| Deep pagination | 1,200ms | 15ms | 98.8% |
|
||||
|
||||
### Index Impact
|
||||
|
||||
| Index | Scans | Rows Read | Impact |
|
||||
|-------|-------|-----------|--------|
|
||||
| idx_users_email | 1,250 | 1,250 | Direct lookups |
|
||||
| idx_posts_user_created | 450 | 9,000 | User posts queries |
|
||||
|
||||
## Monitoring Recommendations
|
||||
|
||||
### Key Metrics to Track
|
||||
|
||||
1. **Query Performance**:
|
||||
```sql
|
||||
-- Weekly query performance review
|
||||
SELECT
|
||||
substring(query, 1, 100) AS query,
|
||||
calls,
|
||||
mean_exec_time,
|
||||
total_exec_time
|
||||
FROM pg_stat_statements
|
||||
WHERE mean_exec_time > 100
|
||||
ORDER BY mean_exec_time DESC
|
||||
LIMIT 20;
|
||||
```
|
||||
|
||||
2. **Index Usage**:
|
||||
```sql
|
||||
-- Monitor new index effectiveness
|
||||
SELECT * FROM pg_stat_user_indexes
|
||||
WHERE indexname LIKE 'idx_%'
|
||||
ORDER BY idx_scan DESC;
|
||||
```
|
||||
|
||||
3. **Connection Pool Health**:
|
||||
```javascript
|
||||
// Log pool metrics every minute
|
||||
setInterval(() => {
|
||||
console.log('Pool:', pool.totalCount, 'Idle:', pool.idleCount);
|
||||
}, 60000);
|
||||
```
|
||||
|
||||
4. **Cache Hit Rates**:
|
||||
```javascript
|
||||
// Track Redis cache effectiveness
|
||||
const stats = await redis.info('stats');
|
||||
// Monitor keyspace_hits vs keyspace_misses
|
||||
```
|
||||
|
||||
### Alerts to Configure
|
||||
|
||||
- Slow query count > 10 per hour
|
||||
- Connection pool utilization > 85%
|
||||
- Cache hit rate < 70%
|
||||
- Database CPU > 80%
|
||||
|
||||
## Trade-offs and Considerations
|
||||
|
||||
**Denormalization Trade-offs**:
|
||||
- **Benefit**: Faster reads (96.5% improvement)
|
||||
- **Cost**: Increased storage (minimal), trigger overhead on writes
|
||||
- **Conclusion**: Worth it for read-heavy workloads
|
||||
|
||||
**Connection Pool Size**:
|
||||
- **Benefit**: Eliminated timeouts
|
||||
- **Cost**: Increased memory usage (~20MB)
|
||||
- **Consideration**: Monitor database connection limits
|
||||
|
||||
**Caching Strategy**:
|
||||
- **Benefit**: 87% reduction in database load
|
||||
- **Cost**: Cache invalidation complexity, Redis dependency
|
||||
- **Consideration**: Implement cache warming for critical data
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. **Monitor** new indexes and query performance for 1 week
|
||||
2. **Implement** additional caching for frequently accessed data
|
||||
3. **Consider** table partitioning for `activity_logs` (2M+ rows)
|
||||
4. **Schedule** VACUUM ANALYZE for optimized tables
|
||||
5. **Review** remaining 2 slow queries for further optimization
|
||||
|
||||
## Maintenance Recommendations
|
||||
|
||||
**Weekly**:
|
||||
- Review pg_stat_statements for new slow queries
|
||||
- Check index usage statistics
|
||||
|
||||
**Monthly**:
|
||||
- Analyze table statistics: `VACUUM ANALYZE`
|
||||
- Review and remove unused indexes
|
||||
- Check for table bloat
|
||||
|
||||
**Quarterly**:
|
||||
- Review schema design for optimization opportunities
|
||||
- Evaluate partitioning strategy for large tables
|
||||
- Update connection pool settings based on usage patterns
|
||||
793
commands/optimize/frontend.md
Normal file
793
commands/optimize/frontend.md
Normal file
@@ -0,0 +1,793 @@
|
||||
# Frontend Optimization Operation
|
||||
|
||||
You are executing the **frontend** operation to optimize frontend bundle size, rendering performance, asset loading, and Web Vitals.
|
||||
|
||||
## Parameters
|
||||
|
||||
**Received**: `$ARGUMENTS` (after removing 'frontend' operation name)
|
||||
|
||||
Expected format: `target:"bundles|rendering|assets|images|fonts|all" [pages:"page-list"] [metrics_target:"lighthouse-score"] [framework:"react|vue|angular|svelte"]`
|
||||
|
||||
**Parameter definitions**:
|
||||
- `target` (required): What to optimize - `bundles`, `rendering`, `assets`, `images`, `fonts`, or `all`
|
||||
- `pages` (optional): Specific pages to optimize (comma-separated, e.g., "dashboard,profile,checkout")
|
||||
- `metrics_target` (optional): Target Lighthouse score (e.g., "lighthouse>90", "lcp<2.5s")
|
||||
- `framework` (optional): Framework being used - `react`, `vue`, `angular`, `svelte` (auto-detected if not specified)
|
||||
|
||||
## Workflow
|
||||
|
||||
### 1. Detect Frontend Framework and Build Tool
|
||||
|
||||
```bash
|
||||
# Check framework
|
||||
grep -E "\"react\"|\"vue\"|\"@angular\"|\"svelte\"" package.json | head -5
|
||||
|
||||
# Check build tool
|
||||
grep -E "\"webpack\"|\"vite\"|\"parcel\"|\"rollup\"|\"esbuild\"" package.json | head -5
|
||||
|
||||
# Check for Next.js, Nuxt, etc.
|
||||
ls next.config.js nuxt.config.js vite.config.js webpack.config.js 2>/dev/null
|
||||
```
|
||||
|
||||
### 2. Run Performance Audit
|
||||
|
||||
**Lighthouse Audit**:
|
||||
```bash
|
||||
# Single page audit
|
||||
npx lighthouse https://your-app.com --output=json --output-path=./audit-baseline.json --view
|
||||
|
||||
# Multiple pages
|
||||
for page in dashboard profile checkout; do
|
||||
npx lighthouse "https://your-app.com/$page" \
|
||||
--output=json \
|
||||
--output-path="./audit-$page.json"
|
||||
done
|
||||
|
||||
# Use Lighthouse CI for automated audits
|
||||
npm install -g @lhci/cli
|
||||
lhci autorun --config=lighthouserc.json
|
||||
```
|
||||
|
||||
**Bundle Analysis**:
|
||||
```bash
|
||||
# Webpack Bundle Analyzer
|
||||
npm run build -- --stats
|
||||
npx webpack-bundle-analyzer dist/stats.json
|
||||
|
||||
# Vite bundle analysis
|
||||
npx vite-bundle-visualizer
|
||||
|
||||
# Next.js bundle analysis
|
||||
npm install @next/bundle-analyzer
|
||||
# Then configure in next.config.js
|
||||
```
|
||||
|
||||
### 3. Bundle Optimization
|
||||
|
||||
#### 3.1. Code Splitting by Route
|
||||
|
||||
**React (with React Router)**:
|
||||
```javascript
|
||||
// BEFORE (everything in one bundle)
|
||||
import Dashboard from './pages/Dashboard';
|
||||
import Profile from './pages/Profile';
|
||||
import Settings from './pages/Settings';
|
||||
|
||||
function App() {
|
||||
return (
|
||||
<Routes>
|
||||
<Route path="/dashboard" element={<Dashboard />} />
|
||||
<Route path="/profile" element={<Profile />} />
|
||||
<Route path="/settings" element={<Settings />} />
|
||||
</Routes>
|
||||
);
|
||||
}
|
||||
// Result: 2.5MB initial bundle
|
||||
|
||||
// AFTER (lazy loading by route)
|
||||
import { lazy, Suspense } from 'react';
|
||||
|
||||
const Dashboard = lazy(() => import('./pages/Dashboard'));
|
||||
const Profile = lazy(() => import('./pages/Profile'));
|
||||
const Settings = lazy(() => import('./pages/Settings'));
|
||||
|
||||
function App() {
|
||||
return (
|
||||
<Suspense fallback={<LoadingSpinner />}>
|
||||
<Routes>
|
||||
<Route path="/dashboard" element={<Dashboard />} />
|
||||
<Route path="/profile" element={<Profile />} />
|
||||
<Route path="/settings" element={<Settings />} />
|
||||
</Routes>
|
||||
</Suspense>
|
||||
);
|
||||
}
|
||||
// Result: 450KB initial + 3 smaller chunks
|
||||
// Improvement: 82% smaller initial bundle
|
||||
```
|
||||
|
||||
**Next.js (automatic code splitting)**:
|
||||
```javascript
|
||||
// Next.js automatically splits by page, but you can add dynamic imports:
|
||||
import dynamic from 'next/dynamic';
|
||||
|
||||
const HeavyComponent = dynamic(() => import('../components/HeavyChart'), {
|
||||
loading: () => <p>Loading chart...</p>,
|
||||
ssr: false // Don't render on server if not needed
|
||||
});
|
||||
|
||||
export default function Dashboard() {
|
||||
return (
|
||||
<div>
|
||||
<h1>Dashboard</h1>
|
||||
<HeavyComponent data={data} />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
**Vue (with Vue Router)**:
|
||||
```javascript
|
||||
// BEFORE
|
||||
import Dashboard from './views/Dashboard.vue';
|
||||
import Profile from './views/Profile.vue';
|
||||
|
||||
const routes = [
|
||||
{ path: '/dashboard', component: Dashboard },
|
||||
{ path: '/profile', component: Profile }
|
||||
];
|
||||
|
||||
// AFTER (lazy loading)
|
||||
const routes = [
|
||||
{ path: '/dashboard', component: () => import('./views/Dashboard.vue') },
|
||||
{ path: '/profile', component: () => import('./views/Profile.vue') }
|
||||
];
|
||||
```
|
||||
|
||||
#### 3.2. Tree Shaking and Dead Code Elimination
|
||||
|
||||
**Proper Import Strategy**:
|
||||
```javascript
|
||||
// BEFORE (imports entire library)
|
||||
import _ from 'lodash'; // 70KB
|
||||
import moment from 'moment'; // 232KB
|
||||
import { Button, Modal, Table, Form, Input } from 'antd'; // Imports all
|
||||
|
||||
const formatted = moment().format('YYYY-MM-DD');
|
||||
const debounced = _.debounce(fn, 300);
|
||||
|
||||
// AFTER (tree-shakeable imports)
|
||||
import { debounce } from 'lodash-es'; // 2KB (tree-shakeable)
|
||||
import { format } from 'date-fns'; // 12KB (tree-shakeable)
|
||||
import Button from 'antd/es/button'; // Import only what's needed
|
||||
import Modal from 'antd/es/modal';
|
||||
|
||||
const formatted = format(new Date(), 'yyyy-MM-dd');
|
||||
const debounced = debounce(fn, 300);
|
||||
|
||||
// Bundle size reduction: ~290KB → ~20KB (93% smaller)
|
||||
```
|
||||
|
||||
**Webpack Configuration**:
|
||||
```javascript
|
||||
// webpack.config.js
|
||||
module.exports = {
|
||||
mode: 'production',
|
||||
optimization: {
|
||||
usedExports: true, // Tree shaking
|
||||
sideEffects: false, // Assume no side effects (check package.json)
|
||||
minimize: true,
|
||||
splitChunks: {
|
||||
chunks: 'all',
|
||||
cacheGroups: {
|
||||
vendor: {
|
||||
test: /[\\/]node_modules[\\/]/,
|
||||
name: 'vendors',
|
||||
priority: 10
|
||||
},
|
||||
common: {
|
||||
minChunks: 2,
|
||||
priority: 5,
|
||||
reuseExistingChunk: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
#### 3.3. Remove Unused Dependencies
|
||||
|
||||
```bash
|
||||
# Analyze unused dependencies
|
||||
npx depcheck
|
||||
|
||||
# Example output:
|
||||
# Unused dependencies:
|
||||
# * moment (use date-fns instead)
|
||||
# * jquery (not used in React app)
|
||||
# * bootstrap (using Tailwind instead)
|
||||
|
||||
# Remove them
|
||||
npm uninstall moment jquery bootstrap
|
||||
|
||||
# Check bundle impact
|
||||
npm run build
|
||||
```
|
||||
|
||||
#### 3.4. Optimize Bundle Chunks
|
||||
|
||||
```javascript
|
||||
// Vite config for optimal chunking
|
||||
export default defineConfig({
|
||||
build: {
|
||||
rollupOptions: {
|
||||
output: {
|
||||
manualChunks: {
|
||||
'vendor-react': ['react', 'react-dom', 'react-router-dom'],
|
||||
'vendor-ui': ['antd', '@ant-design/icons'],
|
||||
'vendor-utils': ['axios', 'lodash-es', 'date-fns']
|
||||
}
|
||||
}
|
||||
},
|
||||
chunkSizeWarningLimit: 500 // Warn if chunk > 500KB
|
||||
}
|
||||
});
|
||||
|
||||
// Next.js config for optimal chunking
|
||||
module.exports = {
|
||||
webpack: (config, { isServer }) => {
|
||||
if (!isServer) {
|
||||
config.optimization.splitChunks = {
|
||||
chunks: 'all',
|
||||
cacheGroups: {
|
||||
default: false,
|
||||
vendors: false,
|
||||
framework: {
|
||||
name: 'framework',
|
||||
chunks: 'all',
|
||||
test: /(?<!node_modules.*)[\\/]node_modules[\\/](react|react-dom|scheduler|prop-types)[\\/]/,
|
||||
priority: 40,
|
||||
enforce: true
|
||||
},
|
||||
lib: {
|
||||
test: /[\\/]node_modules[\\/]/,
|
||||
name: 'lib',
|
||||
priority: 30,
|
||||
minChunks: 1,
|
||||
reuseExistingChunk: true
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
return config;
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### 4. Rendering Optimization
|
||||
|
||||
#### 4.1. React - Prevent Unnecessary Re-renders
|
||||
|
||||
**Memoization**:
|
||||
```javascript
|
||||
// BEFORE (re-renders on every parent update)
|
||||
function UserList({ users, onSelect }) {
|
||||
return users.map(user => (
|
||||
<UserCard key={user.id} user={user} onSelect={onSelect} />
|
||||
));
|
||||
}
|
||||
|
||||
function UserCard({ user, onSelect }) {
|
||||
console.log('Rendering UserCard:', user.id);
|
||||
return (
|
||||
<div onClick={() => onSelect(user)}>
|
||||
{user.name} - {user.email}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
// Result: All cards re-render even if only one user changes
|
||||
|
||||
// AFTER (memoized components)
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
|
||||
const UserCard = memo(({ user, onSelect }) => {
|
||||
console.log('Rendering UserCard:', user.id);
|
||||
return (
|
||||
<div onClick={() => onSelect(user)}>
|
||||
{user.name} - {user.email}
|
||||
</div>
|
||||
);
|
||||
});
|
||||
|
||||
function UserList({ users, onSelect }) {
|
||||
const memoizedOnSelect = useCallback(onSelect, []); // Stable reference
|
||||
|
||||
return users.map(user => (
|
||||
<UserCard key={user.id} user={user} onSelect={memoizedOnSelect} />
|
||||
));
|
||||
}
|
||||
// Result: Only changed cards re-render
|
||||
// Performance: 90% fewer renders for 100 cards
|
||||
```
|
||||
|
||||
**useMemo for Expensive Computations**:
|
||||
```javascript
|
||||
// BEFORE (recalculates on every render)
|
||||
function Dashboard({ data }) {
|
||||
const stats = calculateComplexStats(data); // Expensive: 50ms
|
||||
|
||||
return <StatsDisplay stats={stats} />;
|
||||
}
|
||||
// Result: 50ms wasted on every render, even if data unchanged
|
||||
|
||||
// AFTER (memoized calculation)
|
||||
function Dashboard({ data }) {
|
||||
const stats = useMemo(
|
||||
() => calculateComplexStats(data),
|
||||
[data] // Only recalculate when data changes
|
||||
);
|
||||
|
||||
return <StatsDisplay stats={stats} />;
|
||||
}
|
||||
// Result: 0ms for unchanged data, 50ms only when data changes
|
||||
```
|
||||
|
||||
#### 4.2. Virtual Scrolling for Long Lists
|
||||
|
||||
```javascript
|
||||
// BEFORE (renders all 10,000 items)
|
||||
function LargeList({ items }) {
|
||||
return (
|
||||
<div className="list">
|
||||
{items.map(item => (
|
||||
<ListItem key={item.id} data={item} />
|
||||
))}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
// Result: Initial render: 2,500ms, 10,000 DOM nodes
|
||||
|
||||
// AFTER (virtual scrolling with react-window)
|
||||
import { FixedSizeList } from 'react-window';
|
||||
|
||||
function LargeList({ items }) {
|
||||
const Row = ({ index, style }) => (
|
||||
<div style={style}>
|
||||
<ListItem data={items[index]} />
|
||||
</div>
|
||||
);
|
||||
|
||||
return (
|
||||
<FixedSizeList
|
||||
height={600}
|
||||
itemCount={items.length}
|
||||
itemSize={50}
|
||||
width="100%"
|
||||
>
|
||||
{Row}
|
||||
</FixedSizeList>
|
||||
);
|
||||
}
|
||||
// Result: Initial render: 45ms, only ~20 visible DOM nodes
|
||||
// Performance: 98% faster, 99.8% fewer DOM nodes
|
||||
```
|
||||
|
||||
#### 4.3. Debounce Expensive Operations
|
||||
|
||||
```javascript
|
||||
// BEFORE (triggers on every keystroke)
|
||||
function SearchBox() {
|
||||
const [query, setQuery] = useState('');
|
||||
|
||||
const handleSearch = (value) => {
|
||||
setQuery(value);
|
||||
fetchResults(value); // API call on every keystroke
|
||||
};
|
||||
|
||||
return <input onChange={(e) => handleSearch(e.target.value)} />;
|
||||
}
|
||||
// Result: 50 API calls for typing "performance optimization"
|
||||
|
||||
// AFTER (debounced search)
|
||||
import { useMemo } from 'react';
|
||||
import { debounce } from 'lodash-es';
|
||||
|
||||
function SearchBox() {
|
||||
const [query, setQuery] = useState('');
|
||||
|
||||
const debouncedSearch = useMemo(
|
||||
() => debounce((value) => fetchResults(value), 300),
|
||||
[]
|
||||
);
|
||||
|
||||
const handleSearch = (value) => {
|
||||
setQuery(value);
|
||||
debouncedSearch(value);
|
||||
};
|
||||
|
||||
return <input onChange={(e) => handleSearch(e.target.value)} />;
|
||||
}
|
||||
// Result: 1-2 API calls for typing "performance optimization"
|
||||
// Performance: 96% fewer API calls
|
||||
```
|
||||
|
||||
### 5. Image Optimization
|
||||
|
||||
#### 5.1. Modern Image Formats
|
||||
|
||||
```javascript
|
||||
// BEFORE (traditional formats)
|
||||
<img src="/images/hero.jpg" alt="Hero" />
|
||||
// hero.jpg: 1.2MB
|
||||
|
||||
// AFTER (modern formats with fallback)
|
||||
<picture>
|
||||
<source srcset="/images/hero.avif" type="image/avif" />
|
||||
<source srcset="/images/hero.webp" type="image/webp" />
|
||||
<img src="/images/hero.jpg" alt="Hero" loading="lazy" />
|
||||
</picture>
|
||||
// hero.avif: 180KB (85% smaller)
|
||||
// hero.webp: 240KB (80% smaller)
|
||||
```
|
||||
|
||||
**Next.js Image Optimization**:
|
||||
```javascript
|
||||
// BEFORE
|
||||
<img src="/hero.jpg" alt="Hero" />
|
||||
|
||||
// AFTER (automatic optimization)
|
||||
import Image from 'next/image';
|
||||
|
||||
<Image
|
||||
src="/hero.jpg"
|
||||
alt="Hero"
|
||||
width={1200}
|
||||
height={600}
|
||||
priority // Load immediately for above-fold images
|
||||
placeholder="blur" // Show blur while loading
|
||||
blurDataURL="data:image/..." // Inline blur placeholder
|
||||
/>
|
||||
// Automatically serves WebP/AVIF based on browser support
|
||||
```
|
||||
|
||||
#### 5.2. Lazy Loading
|
||||
|
||||
```javascript
|
||||
// BEFORE (all images load immediately)
|
||||
<div className="gallery">
|
||||
{images.map(img => (
|
||||
<img key={img.id} src={img.url} alt={img.title} />
|
||||
))}
|
||||
</div>
|
||||
// Result: 50 images load on page load (slow)
|
||||
|
||||
// AFTER (native lazy loading)
|
||||
<div className="gallery">
|
||||
{images.map(img => (
|
||||
<img
|
||||
key={img.id}
|
||||
src={img.url}
|
||||
alt={img.title}
|
||||
loading="lazy" // Native browser lazy loading
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
// Result: Only visible images load initially
|
||||
// Performance: 85% fewer initial network requests
|
||||
```
|
||||
|
||||
#### 5.3. Responsive Images
|
||||
|
||||
```javascript
|
||||
// BEFORE (serves same large image to all devices)
|
||||
<img src="/hero-2400w.jpg" alt="Hero" />
|
||||
// Mobile: Downloads 2.4MB image for 375px screen
|
||||
|
||||
// AFTER (responsive srcset)
|
||||
<img
|
||||
src="/hero-800w.jpg"
|
||||
srcset="
|
||||
/hero-400w.jpg 400w,
|
||||
/hero-800w.jpg 800w,
|
||||
/hero-1200w.jpg 1200w,
|
||||
/hero-2400w.jpg 2400w
|
||||
"
|
||||
sizes="
|
||||
(max-width: 600px) 400px,
|
||||
(max-width: 900px) 800px,
|
||||
(max-width: 1200px) 1200px,
|
||||
2400px
|
||||
"
|
||||
alt="Hero"
|
||||
/>
|
||||
// Mobile: Downloads 120KB image for 375px screen
|
||||
// Performance: 95% smaller download on mobile
|
||||
```
|
||||
|
||||
### 6. Asset Optimization
|
||||
|
||||
#### 6.1. Font Loading Strategy
|
||||
|
||||
```css
|
||||
/* BEFORE (blocks rendering) */
|
||||
@import url('https://fonts.googleapis.com/css2?family=Roboto:wght@400;700&display=swap');
|
||||
|
||||
/* AFTER (optimized loading) */
|
||||
/* Use font-display: swap to show fallback text immediately */
|
||||
@font-face {
|
||||
font-family: 'Roboto';
|
||||
src: url('/fonts/roboto.woff2') format('woff2');
|
||||
font-weight: 400;
|
||||
font-style: normal;
|
||||
font-display: swap; /* Show text immediately with fallback font */
|
||||
}
|
||||
|
||||
/* Preload critical fonts in HTML */
|
||||
<link rel="preload" href="/fonts/roboto.woff2" as="font" type="font/woff2" crossorigin>
|
||||
```
|
||||
|
||||
**Variable Fonts** (single file for multiple weights):
|
||||
```css
|
||||
/* BEFORE (multiple files) */
|
||||
/* roboto-regular.woff2: 50KB */
|
||||
/* roboto-bold.woff2: 52KB */
|
||||
/* roboto-light.woff2: 48KB */
|
||||
/* Total: 150KB */
|
||||
|
||||
/* AFTER (variable font) */
|
||||
@font-face {
|
||||
font-family: 'Roboto';
|
||||
src: url('/fonts/roboto-variable.woff2') format('woff2-variations');
|
||||
font-weight: 300 700; /* Supports all weights from 300-700 */
|
||||
}
|
||||
/* roboto-variable.woff2: 75KB */
|
||||
/* Savings: 50% smaller */
|
||||
```
|
||||
|
||||
#### 6.2. Critical CSS
|
||||
|
||||
```html
|
||||
<!-- BEFORE (blocks rendering until full CSS loads) -->
|
||||
<link rel="stylesheet" href="/styles/main.css"> <!-- 250KB -->
|
||||
|
||||
<!-- AFTER (inline critical CSS, defer non-critical) -->
|
||||
<style>
|
||||
/* Inline critical above-the-fold CSS (< 14KB) */
|
||||
.header { ... }
|
||||
.hero { ... }
|
||||
.nav { ... }
|
||||
</style>
|
||||
|
||||
<!-- Defer non-critical CSS -->
|
||||
<link rel="preload" href="/styles/main.css" as="style" onload="this.onload=null;this.rel='stylesheet'">
|
||||
<noscript><link rel="stylesheet" href="/styles/main.css"></noscript>
|
||||
|
||||
<!-- Or use media query trick -->
|
||||
<link rel="stylesheet" href="/styles/main.css" media="print" onload="this.media='all'">
|
||||
```
|
||||
|
||||
#### 6.3. JavaScript Defer/Async
|
||||
|
||||
```html
|
||||
<!-- BEFORE (blocks HTML parsing) -->
|
||||
<script src="/js/analytics.js"></script>
|
||||
<script src="/js/chat-widget.js"></script>
|
||||
<script src="/js/app.js"></script>
|
||||
|
||||
<!-- AFTER (non-blocking) -->
|
||||
<!-- async: Download in parallel, execute as soon as ready (order not guaranteed) -->
|
||||
<script src="/js/analytics.js" async></script>
|
||||
<script src="/js/chat-widget.js" async></script>
|
||||
|
||||
<!-- defer: Download in parallel, execute after HTML parsed (order guaranteed) -->
|
||||
<script src="/js/app.js" defer></script>
|
||||
|
||||
<!-- Performance: Eliminates script blocking time -->
|
||||
```
|
||||
|
||||
### 7. Caching and Service Workers
|
||||
|
||||
**Service Worker for Offline Support**:
|
||||
```javascript
|
||||
// sw.js
|
||||
const CACHE_NAME = 'app-v1';
|
||||
const urlsToCache = [
|
||||
'/',
|
||||
'/styles/main.css',
|
||||
'/js/app.js',
|
||||
'/images/logo.png'
|
||||
];
|
||||
|
||||
self.addEventListener('install', (event) => {
|
||||
event.waitUntil(
|
||||
caches.open(CACHE_NAME).then((cache) => cache.addAll(urlsToCache))
|
||||
);
|
||||
});
|
||||
|
||||
self.addEventListener('fetch', (event) => {
|
||||
event.respondWith(
|
||||
caches.match(event.request).then((response) => {
|
||||
// Return cached version or fetch from network
|
||||
return response || fetch(event.request);
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
// Register in app
|
||||
if ('serviceWorker' in navigator) {
|
||||
navigator.serviceWorker.register('/sw.js');
|
||||
}
|
||||
```
|
||||
|
||||
### 8. Web Vitals Optimization
|
||||
|
||||
**Optimize LCP (Largest Contentful Paint < 2.5s)**:
|
||||
- Preload critical resources: `<link rel="preload" href="hero.jpg" as="image">`
|
||||
- Use CDN for static assets
|
||||
- Optimize server response time (TTFB < 600ms)
|
||||
- Optimize images (modern formats, compression)
|
||||
|
||||
**Optimize FID/INP (First Input Delay / Interaction to Next Paint < 200ms)**:
|
||||
- Reduce JavaScript execution time
|
||||
- Break up long tasks (yield to main thread)
|
||||
- Use web workers for heavy computation
|
||||
- Debounce/throttle event handlers
|
||||
|
||||
**Optimize CLS (Cumulative Layout Shift < 0.1)**:
|
||||
- Set explicit width/height for images and videos
|
||||
- Reserve space for dynamic content
|
||||
- Avoid inserting content above existing content
|
||||
- Use CSS `aspect-ratio` for responsive media
|
||||
|
||||
```css
|
||||
/* Prevent CLS for images */
|
||||
img {
|
||||
width: 100%;
|
||||
height: auto;
|
||||
aspect-ratio: 16 / 9; /* Reserve space before image loads */
|
||||
}
|
||||
```
|
||||
|
||||
## Output Format
|
||||
|
||||
```markdown
|
||||
# Frontend Optimization Report: [Context]
|
||||
|
||||
**Optimization Date**: [Date]
|
||||
**Framework**: [React/Vue/Angular version]
|
||||
**Build Tool**: [Webpack/Vite/Next.js version]
|
||||
**Target Pages**: [List of pages]
|
||||
|
||||
## Executive Summary
|
||||
|
||||
[Summary of findings and optimizations]
|
||||
|
||||
## Baseline Metrics
|
||||
|
||||
### Lighthouse Scores (Before)
|
||||
|
||||
| Page | Performance | Accessibility | Best Practices | SEO |
|
||||
|------|-------------|---------------|----------------|-----|
|
||||
| Home | 62 | 88 | 79 | 92 |
|
||||
| Dashboard | 48 | 91 | 75 | 89 |
|
||||
| Profile | 55 | 90 | 82 | 91 |
|
||||
|
||||
### Web Vitals (Before)
|
||||
|
||||
| Page | LCP | FID | CLS | TTFB |
|
||||
|------|-----|-----|-----|------|
|
||||
| Home | 4.2s | 180ms | 0.18 | 950ms |
|
||||
| Dashboard | 5.8s | 320ms | 0.25 | 1200ms |
|
||||
|
||||
### Bundle Sizes (Before)
|
||||
|
||||
| Bundle | Size (gzipped) | Percentage |
|
||||
|--------|----------------|------------|
|
||||
| main.js | 850KB | 68% |
|
||||
| vendor.js | 320KB | 25% |
|
||||
| styles.css | 85KB | 7% |
|
||||
| **Total** | **1.25MB** | **100%** |
|
||||
|
||||
## Optimizations Implemented
|
||||
|
||||
### 1. Implemented Code Splitting
|
||||
|
||||
**Before**: Single 850KB main bundle
|
||||
**After**: Initial 180KB + route chunks (120KB, 95KB, 85KB)
|
||||
|
||||
**Impact**: 79% smaller initial bundle
|
||||
|
||||
### 2. Replaced Heavy Dependencies
|
||||
|
||||
- Moment.js (232KB) → date-fns (12KB) = 94.8% smaller
|
||||
- Lodash (70KB) → lodash-es tree-shakeable (2KB used) = 97.1% smaller
|
||||
- Total savings: 288KB
|
||||
|
||||
### 3. Implemented Virtual Scrolling
|
||||
|
||||
**User List (10,000 items)**:
|
||||
- Before: 2,500ms initial render, 10,000 DOM nodes
|
||||
- After: 45ms initial render, ~20 visible DOM nodes
|
||||
- **Improvement**: 98% faster
|
||||
|
||||
### 4. Optimized Images
|
||||
|
||||
**Hero Image**:
|
||||
- Before: hero.jpg (1.2MB)
|
||||
- After: hero.avif (180KB)
|
||||
- **Savings**: 85%
|
||||
|
||||
**Implemented**:
|
||||
- Modern formats (WebP, AVIF)
|
||||
- Lazy loading for below-fold images
|
||||
- Responsive srcset for different screen sizes
|
||||
|
||||
### 5. Optimized Rendering with React.memo
|
||||
|
||||
**Product Grid (500 items)**:
|
||||
- Before: All 500 components re-render on filter change
|
||||
- After: Only filtered subset re-renders (~50 items)
|
||||
- **Improvement**: 90% fewer re-renders
|
||||
|
||||
## Results Summary
|
||||
|
||||
### Lighthouse Scores (After)
|
||||
|
||||
| Page | Performance | Accessibility | Best Practices | SEO | Improvement |
|
||||
|------|-------------|---------------|----------------|-----|-------------|
|
||||
| Home | 94 (+32) | 95 (+7) | 92 (+13) | 100 (+8) | +32 points |
|
||||
| Dashboard | 89 (+41) | 95 (+4) | 92 (+17) | 96 (+7) | +41 points |
|
||||
| Profile | 91 (+36) | 95 (+5) | 92 (+10) | 100 (+9) | +36 points |
|
||||
|
||||
### Web Vitals (After)
|
||||
|
||||
| Page | LCP | FID | CLS | TTFB | Improvement |
|
||||
|------|-----|-----|-----|------|-------------|
|
||||
| Home | 1.8s | 45ms | 0.02 | 320ms | 57% faster LCP |
|
||||
| Dashboard | 2.1s | 65ms | 0.04 | 450ms | 64% faster LCP |
|
||||
|
||||
### Bundle Sizes (After)
|
||||
|
||||
| Bundle | Size (gzipped) | Change |
|
||||
|--------|----------------|--------|
|
||||
| main.js | 180KB | -79% |
|
||||
| vendor-react.js | 95KB | New |
|
||||
| vendor-ui.js | 85KB | New |
|
||||
| styles.css | 45KB | -47% |
|
||||
| **Total Initial** | **405KB** | **-68%** |
|
||||
|
||||
### Load Time Improvements
|
||||
|
||||
| Metric | Before | After | Improvement |
|
||||
|--------|--------|-------|-------------|
|
||||
| Initial Bundle Load | 3.8s | 1.2s | 68% faster |
|
||||
| Time to Interactive | 6.5s | 2.3s | 65% faster |
|
||||
| First Contentful Paint | 2.1s | 0.8s | 62% faster |
|
||||
| Largest Contentful Paint | 4.2s | 1.8s | 57% faster |
|
||||
|
||||
## Trade-offs and Considerations
|
||||
|
||||
**Code Splitting**:
|
||||
- **Benefit**: 68% smaller initial bundle
|
||||
- **Trade-off**: Additional network requests for route chunks
|
||||
- **Mitigation**: Chunks are cached, prefetch likely routes
|
||||
|
||||
**Image Format Optimization**:
|
||||
- **Benefit**: 85% smaller images
|
||||
- **Trade-off**: Build step complexity (convert to AVIF/WebP)
|
||||
- **Fallback**: JPEG fallback for older browsers
|
||||
|
||||
## Monitoring Recommendations
|
||||
|
||||
1. **Real User Monitoring** for Web Vitals
|
||||
2. **Lighthouse CI** in pull request checks
|
||||
3. **Bundle size tracking** in CI/CD
|
||||
4. **Performance budgets** (e.g., initial bundle < 500KB)
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. Implement service worker for offline support
|
||||
2. Add resource hints (prefetch, preconnect)
|
||||
3. Consider migrating to Next.js for automatic optimizations
|
||||
4. Implement CDN for static assets
|
||||
677
commands/optimize/infrastructure.md
Normal file
677
commands/optimize/infrastructure.md
Normal file
@@ -0,0 +1,677 @@
|
||||
# Infrastructure Optimization Operation
|
||||
|
||||
You are executing the **infrastructure** operation to optimize infrastructure scaling, CDN configuration, resource allocation, deployment, and cost efficiency.
|
||||
|
||||
## Parameters
|
||||
|
||||
**Received**: `$ARGUMENTS` (after removing 'infrastructure' operation name)
|
||||
|
||||
Expected format: `target:"scaling|cdn|resources|deployment|costs|all" [environment:"prod|staging|dev"] [provider:"aws|azure|gcp|vercel"] [budget_constraint:"true|false"]`
|
||||
|
||||
**Parameter definitions**:
|
||||
- `target` (required): What to optimize - `scaling`, `cdn`, `resources`, `deployment`, `costs`, or `all`
|
||||
- `environment` (optional): Target environment (default: production)
|
||||
- `provider` (optional): Cloud provider (auto-detected if not specified)
|
||||
- `budget_constraint` (optional): Prioritize cost reduction (default: false)
|
||||
|
||||
## Workflow
|
||||
|
||||
### 1. Detect Infrastructure Provider
|
||||
|
||||
```bash
|
||||
# Check for cloud provider configuration
|
||||
ls -la .aws/ .azure/ .gcp/ vercel.json netlify.toml 2>/dev/null
|
||||
|
||||
# Check for container orchestration
|
||||
kubectl config current-context 2>/dev/null
|
||||
docker-compose version 2>/dev/null
|
||||
|
||||
# Check for IaC tools
|
||||
ls -la terraform/ *.tf serverless.yml cloudformation/ 2>/dev/null
|
||||
```
|
||||
|
||||
### 2. Analyze Current Infrastructure
|
||||
|
||||
**Resource Utilization (Kubernetes)**:
|
||||
```bash
|
||||
# Node resource usage
|
||||
kubectl top nodes
|
||||
|
||||
# Pod resource usage
|
||||
kubectl top pods --all-namespaces
|
||||
|
||||
# Check resource requests vs limits
|
||||
kubectl get pods -o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.containers[*].resources}{"\n"}{end}'
|
||||
```
|
||||
|
||||
**Resource Utilization (AWS EC2)**:
|
||||
```bash
|
||||
# CloudWatch metrics
|
||||
aws cloudwatch get-metric-statistics \
|
||||
--namespace AWS/EC2 \
|
||||
--metric-name CPUUtilization \
|
||||
--dimensions Name=InstanceId,Value=i-1234567890abcdef0 \
|
||||
--start-time 2025-10-07T00:00:00Z \
|
||||
--end-time 2025-10-14T00:00:00Z \
|
||||
--period 3600 \
|
||||
--statistics Average
|
||||
```
|
||||
|
||||
### 3. Scaling Optimization
|
||||
|
||||
#### 3.1. Horizontal Pod Autoscaling (Kubernetes)
|
||||
|
||||
```yaml
|
||||
# BEFORE (fixed 3 replicas)
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: api-server
|
||||
spec:
|
||||
replicas: 3 # Fixed count, wastes resources at low traffic
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: api
|
||||
image: api:v1.0.0
|
||||
resources:
|
||||
requests:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
|
||||
# AFTER (horizontal pod autoscaler)
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: api-server-hpa
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: api-server
|
||||
minReplicas: 2 # Minimum for high availability
|
||||
maxReplicas: 10 # Scale up under load
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: 70 # Target 70% CPU
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: 80
|
||||
behavior:
|
||||
scaleDown:
|
||||
stabilizationWindowSeconds: 300 # Wait 5 min before scaling down
|
||||
scaleUp:
|
||||
stabilizationWindowSeconds: 0 # Scale up immediately
|
||||
policies:
|
||||
- type: Percent
|
||||
value: 100 # Double pods at a time
|
||||
periodSeconds: 15
|
||||
|
||||
# Result:
|
||||
# - Off-peak: 2 pods (save 33% resources)
|
||||
# - Peak: Up to 10 pods (handle 5x traffic)
|
||||
# - Cost savings: ~40% while maintaining performance
|
||||
```
|
||||
|
||||
#### 3.2. Vertical Pod Autoscaling
|
||||
|
||||
```yaml
|
||||
# Automatically adjust resource requests/limits
|
||||
apiVersion: autoscaling.k8s.io/v1
|
||||
kind: VerticalPodAutoscaler
|
||||
metadata:
|
||||
name: api-server-vpa
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: api-server
|
||||
updatePolicy:
|
||||
updateMode: "Auto" # Automatically apply recommendations
|
||||
resourcePolicy:
|
||||
containerPolicies:
|
||||
- containerName: api
|
||||
minAllowed:
|
||||
memory: "256Mi"
|
||||
cpu: "100m"
|
||||
maxAllowed:
|
||||
memory: "2Gi"
|
||||
cpu: "2000m"
|
||||
controlledResources: ["cpu", "memory"]
|
||||
```
|
||||
|
||||
#### 3.3. AWS Auto Scaling Groups
|
||||
|
||||
```json
|
||||
{
|
||||
"AutoScalingGroupName": "api-server-asg",
|
||||
"MinSize": 2,
|
||||
"MaxSize": 10,
|
||||
"DesiredCapacity": 2,
|
||||
"DefaultCooldown": 300,
|
||||
"HealthCheckType": "ELB",
|
||||
"HealthCheckGracePeriod": 180,
|
||||
"TargetGroupARNs": ["arn:aws:elasticloadbalancing:..."],
|
||||
"TargetTrackingScalingPolicies": [
|
||||
{
|
||||
"PolicyName": "target-tracking-cpu",
|
||||
"TargetValue": 70.0,
|
||||
"PredefinedMetricSpecification": {
|
||||
"PredefinedMetricType": "ASGAverageCPUUtilization"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 4. CDN Optimization
|
||||
|
||||
#### 4.1. CloudFront Configuration (AWS)
|
||||
|
||||
```json
|
||||
{
|
||||
"DistributionConfig": {
|
||||
"CallerReference": "api-cdn-2025",
|
||||
"Comment": "Optimized CDN for static assets",
|
||||
"Enabled": true,
|
||||
"PriceClass": "PriceClass_100",
|
||||
"Origins": [
|
||||
{
|
||||
"Id": "S3-static-assets",
|
||||
"DomainName": "static-assets.s3.amazonaws.com",
|
||||
"S3OriginConfig": {
|
||||
"OriginAccessIdentity": "origin-access-identity/cloudfront/..."
|
||||
}
|
||||
}
|
||||
],
|
||||
"DefaultCacheBehavior": {
|
||||
"TargetOriginId": "S3-static-assets",
|
||||
"ViewerProtocolPolicy": "redirect-to-https",
|
||||
"Compress": true,
|
||||
"MinTTL": 0,
|
||||
"DefaultTTL": 86400,
|
||||
"MaxTTL": 31536000,
|
||||
"ForwardedValues": {
|
||||
"QueryString": false,
|
||||
"Cookies": { "Forward": "none" }
|
||||
}
|
||||
},
|
||||
"CacheBehaviors": [
|
||||
{
|
||||
"PathPattern": "*.js",
|
||||
"TargetOriginId": "S3-static-assets",
|
||||
"Compress": true,
|
||||
"MinTTL": 31536000,
|
||||
"CachePolicyId": "immutable-assets"
|
||||
},
|
||||
{
|
||||
"PathPattern": "*.css",
|
||||
"TargetOriginId": "S3-static-assets",
|
||||
"Compress": true,
|
||||
"MinTTL": 31536000
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Cache Headers**:
|
||||
```javascript
|
||||
// Express server - set appropriate cache headers
|
||||
app.use('/static', express.static('public', {
|
||||
maxAge: '1y', // Immutable assets with hash in filename
|
||||
immutable: true
|
||||
}));
|
||||
|
||||
app.use('/api', (req, res, next) => {
|
||||
res.set('Cache-Control', 'no-cache'); // API responses
|
||||
next();
|
||||
});
|
||||
|
||||
// HTML pages - short cache with revalidation
|
||||
app.get('/', (req, res) => {
|
||||
res.set('Cache-Control', 'public, max-age=300, must-revalidate');
|
||||
res.sendFile('index.html');
|
||||
});
|
||||
```
|
||||
|
||||
#### 4.2. Image Optimization with CDN
|
||||
|
||||
```nginx
|
||||
# Nginx configuration for image optimization
|
||||
location ~* \.(jpg|jpeg|png|gif|webp)$ {
|
||||
expires 1y;
|
||||
add_header Cache-Control "public, immutable";
|
||||
|
||||
# Enable compression
|
||||
gzip on;
|
||||
gzip_comp_level 6;
|
||||
|
||||
# Serve WebP if browser supports it
|
||||
set $webp_suffix "";
|
||||
if ($http_accept ~* "webp") {
|
||||
set $webp_suffix ".webp";
|
||||
}
|
||||
try_files $uri$webp_suffix $uri =404;
|
||||
}
|
||||
```
|
||||
|
||||
### 5. Resource Right-Sizing
|
||||
|
||||
#### 5.1. Analyze Resource Usage Patterns
|
||||
|
||||
```bash
|
||||
# Kubernetes - Resource usage over time
|
||||
kubectl top pods --containers --namespace production | awk '{
|
||||
if (NR>1) {
|
||||
split($3, cpu, "m"); split($4, mem, "Mi");
|
||||
print $1, $2, cpu[1], mem[1]
|
||||
}
|
||||
}' > resource-usage.txt
|
||||
|
||||
# Analyze patterns
|
||||
# If CPU consistently <30% → reduce CPU request
|
||||
# If memory consistently <50% → reduce memory request
|
||||
```
|
||||
|
||||
**Optimization Example**:
|
||||
```yaml
|
||||
# BEFORE (over-provisioned)
|
||||
resources:
|
||||
requests:
|
||||
memory: "2Gi" # Usage: 600Mi (30%)
|
||||
cpu: "1000m" # Usage: 200m (20%)
|
||||
limits:
|
||||
memory: "4Gi"
|
||||
cpu: "2000m"
|
||||
|
||||
# AFTER (right-sized)
|
||||
resources:
|
||||
requests:
|
||||
memory: "768Mi" # 600Mi + 28% headroom
|
||||
cpu: "300m" # 200m + 50% headroom
|
||||
limits:
|
||||
memory: "1.5Gi" # 2x request
|
||||
cpu: "600m" # 2x request
|
||||
|
||||
# Savings: 62% CPU, 61% memory
|
||||
# Cost impact: ~60% reduction per pod
|
||||
```
|
||||
|
||||
#### 5.2. Reserved Instances / Savings Plans
|
||||
|
||||
**AWS Reserved Instances**:
|
||||
```bash
|
||||
# Analyze instance usage patterns
|
||||
aws ce get-reservation-utilization \
|
||||
--time-period Start=2024-10-01,End=2025-10-01 \
|
||||
--granularity MONTHLY
|
||||
|
||||
# Recommendation: Convert frequently-used instances to Reserved Instances
|
||||
# Example savings:
|
||||
# - On-Demand t3.large: $0.0832/hour = $612/month
|
||||
# - Reserved t3.large (1 year): $0.0520/hour = $383/month
|
||||
# - Savings: 37% ($229/month per instance)
|
||||
```
|
||||
|
||||
### 6. Deployment Optimization
|
||||
|
||||
#### 6.1. Container Image Optimization
|
||||
|
||||
```dockerfile
|
||||
# BEFORE (large image: 1.2GB)
|
||||
FROM node:18
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
RUN npm install
|
||||
CMD ["npm", "start"]
|
||||
|
||||
# AFTER (optimized image: 180MB)
|
||||
# Multi-stage build
|
||||
FROM node:18-alpine AS builder
|
||||
WORKDIR /app
|
||||
COPY package*.json ./
|
||||
RUN npm ci --only=production
|
||||
COPY . .
|
||||
RUN npm run build
|
||||
|
||||
FROM node:18-alpine
|
||||
WORKDIR /app
|
||||
COPY --from=builder /app/dist ./dist
|
||||
COPY --from=builder /app/node_modules ./node_modules
|
||||
COPY package*.json ./
|
||||
|
||||
# Create non-root user
|
||||
RUN addgroup -g 1001 -S nodejs && adduser -S nodejs -u 1001
|
||||
USER nodejs
|
||||
|
||||
EXPOSE 3000
|
||||
CMD ["node", "dist/main.js"]
|
||||
|
||||
# Image size: 1.2GB → 180MB (85% smaller)
|
||||
# Security: Non-root user, minimal attack surface
|
||||
```
|
||||
|
||||
#### 6.2. Blue-Green Deployment
|
||||
|
||||
```yaml
|
||||
# Kubernetes Blue-Green deployment
|
||||
# Green (new version)
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: api-green
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: api
|
||||
version: green
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: api
|
||||
version: green
|
||||
spec:
|
||||
containers:
|
||||
- name: api
|
||||
image: api:v2.0.0
|
||||
|
||||
---
|
||||
# Service - switch traffic by changing selector
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: api-service
|
||||
spec:
|
||||
selector:
|
||||
app: api
|
||||
version: green # Change from 'blue' to 'green' to switch traffic
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 3000
|
||||
|
||||
# Zero-downtime deployment
|
||||
# Instant rollback by changing selector back to 'blue'
|
||||
```
|
||||
|
||||
### 7. Cost Optimization
|
||||
|
||||
#### 7.1. Spot Instances for Non-Critical Workloads
|
||||
|
||||
```yaml
|
||||
# Kubernetes - Use spot instances for batch jobs
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: data-processing
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
nodeSelector:
|
||||
node.kubernetes.io/instance-type: spot # Use spot instances
|
||||
tolerations:
|
||||
- key: "spot"
|
||||
operator: "Equal"
|
||||
value: "true"
|
||||
effect: "NoSchedule"
|
||||
containers:
|
||||
- name: processor
|
||||
image: data-processor:v1.0.0
|
||||
|
||||
# Savings: 70-90% cost reduction for spot vs on-demand
|
||||
# Trade-off: May be interrupted (acceptable for batch jobs)
|
||||
```
|
||||
|
||||
#### 7.2. Storage Optimization
|
||||
|
||||
```bash
|
||||
# S3 Lifecycle Policy
|
||||
aws s3api put-bucket-lifecycle-configuration \
|
||||
--bucket static-assets \
|
||||
--lifecycle-configuration '{
|
||||
"Rules": [
|
||||
{
|
||||
"Id": "archive-old-logs",
|
||||
"Status": "Enabled",
|
||||
"Filter": { "Prefix": "logs/" },
|
||||
"Transitions": [
|
||||
{
|
||||
"Days": 30,
|
||||
"StorageClass": "STANDARD_IA"
|
||||
},
|
||||
{
|
||||
"Days": 90,
|
||||
"StorageClass": "GLACIER"
|
||||
}
|
||||
],
|
||||
"Expiration": { "Days": 365 }
|
||||
}
|
||||
]
|
||||
}'
|
||||
|
||||
# Cost impact:
|
||||
# - Standard: $0.023/GB/month
|
||||
# - Standard-IA: $0.0125/GB/month (46% cheaper)
|
||||
# - Glacier: $0.004/GB/month (83% cheaper)
|
||||
```
|
||||
|
||||
#### 7.3. Database Instance Right-Sizing
|
||||
|
||||
```sql
|
||||
-- Analyze actual database usage
|
||||
SELECT
|
||||
datname,
|
||||
pg_size_pretty(pg_database_size(datname)) AS size
|
||||
FROM pg_database
|
||||
ORDER BY pg_database_size(datname) DESC;
|
||||
|
||||
-- Check connection usage
|
||||
SELECT count(*) AS connections,
|
||||
max_conn,
|
||||
max_conn - count(*) AS available
|
||||
FROM pg_stat_activity,
|
||||
(SELECT setting::int AS max_conn FROM pg_settings WHERE name='max_connections') mc
|
||||
GROUP BY max_conn;
|
||||
|
||||
-- Recommendation: If consistently using <30% connections and <50% storage
|
||||
-- Consider downsizing from db.r5.xlarge to db.r5.large
|
||||
-- Savings: ~50% cost reduction
|
||||
```
|
||||
|
||||
### 8. Monitoring and Alerting
|
||||
|
||||
**CloudWatch Alarms (AWS)**:
|
||||
```json
|
||||
{
|
||||
"AlarmName": "high-cpu-utilization",
|
||||
"ComparisonOperator": "GreaterThanThreshold",
|
||||
"EvaluationPeriods": 2,
|
||||
"MetricName": "CPUUtilization",
|
||||
"Namespace": "AWS/EC2",
|
||||
"Period": 300,
|
||||
"Statistic": "Average",
|
||||
"Threshold": 80.0,
|
||||
"ActionsEnabled": true,
|
||||
"AlarmActions": ["arn:aws:sns:us-east-1:123456789012:ops-team"]
|
||||
}
|
||||
```
|
||||
|
||||
**Prometheus Alerts (Kubernetes)**:
|
||||
```yaml
|
||||
groups:
|
||||
- name: infrastructure
|
||||
rules:
|
||||
- alert: HighMemoryUsage
|
||||
expr: (node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes > 0.85
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "High memory usage on {{ $labels.instance }}"
|
||||
|
||||
- alert: HighCPUUsage
|
||||
expr: 100 - (avg by (instance) (irate(node_cpu_seconds_total{mode="idle"}[5m])) * 100) > 80
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
```
|
||||
|
||||
## Output Format
|
||||
|
||||
```markdown
|
||||
# Infrastructure Optimization Report: [Environment]
|
||||
|
||||
**Optimization Date**: [Date]
|
||||
**Provider**: [AWS/Azure/GCP/Hybrid]
|
||||
**Environment**: [production/staging]
|
||||
**Target**: [scaling/cdn/resources/costs/all]
|
||||
|
||||
## Executive Summary
|
||||
|
||||
[Summary of infrastructure state and optimizations]
|
||||
|
||||
## Baseline Metrics
|
||||
|
||||
### Resource Utilization
|
||||
- **CPU**: 68% average across nodes
|
||||
- **Memory**: 72% average
|
||||
- **Network**: 45% utilization
|
||||
- **Storage**: 60% utilization
|
||||
|
||||
### Cost Breakdown (Monthly)
|
||||
- **Compute**: $4,500 (EC2 instances)
|
||||
- **Database**: $1,200 (RDS)
|
||||
- **Storage**: $800 (S3, EBS)
|
||||
- **Network**: $600 (Data transfer, CloudFront)
|
||||
- **Total**: $7,100/month
|
||||
|
||||
### Scaling Configuration
|
||||
- **Auto Scaling**: Fixed 5 instances (no scaling)
|
||||
- **Pod Count**: Fixed 15 pods
|
||||
- **Resource Allocation**: Static (no HPA/VPA)
|
||||
|
||||
## Optimizations Implemented
|
||||
|
||||
### 1. Horizontal Pod Autoscaling
|
||||
|
||||
**Before**: Fixed 15 pods
|
||||
**After**: 8-25 pods based on load
|
||||
|
||||
**Impact**:
|
||||
- Off-peak: 8 pods (47% reduction)
|
||||
- Peak: 25 pods (67% increase capacity)
|
||||
- Cost savings: $1,350/month (30%)
|
||||
|
||||
### 2. Resource Right-Sizing
|
||||
|
||||
**Optimized 12 deployments**:
|
||||
- Average CPU reduction: 55%
|
||||
- Average memory reduction: 48%
|
||||
- Cost impact: $945/month savings
|
||||
|
||||
### 3. CDN Configuration
|
||||
|
||||
**Implemented**:
|
||||
- CloudFront for static assets
|
||||
- Cache-Control headers optimized
|
||||
- Compression enabled
|
||||
|
||||
**Impact**:
|
||||
- Origin requests: 85% reduction
|
||||
- TTFB: 750ms → 120ms (84% faster)
|
||||
- Bandwidth costs: $240/month savings
|
||||
|
||||
### 4. Reserved Instances
|
||||
|
||||
**Converted**:
|
||||
- 3 x t3.large on-demand → Reserved
|
||||
- Commitment: 1 year, no upfront
|
||||
|
||||
**Savings**: $687/month (37% per instance)
|
||||
|
||||
### 5. Storage Lifecycle Policies
|
||||
|
||||
**Implemented**:
|
||||
- Logs: Standard → Standard-IA (30d) → Glacier (90d)
|
||||
- Backups: Glacier after 30 days
|
||||
- Old assets: Glacier after 180 days
|
||||
|
||||
**Savings**: $285/month
|
||||
|
||||
## Results Summary
|
||||
|
||||
### Cost Optimization
|
||||
|
||||
| Category | Before | After | Savings |
|
||||
|----------|--------|-------|---------|
|
||||
| Compute | $4,500 | $2,518 | $1,982 (44%) |
|
||||
| Database | $1,200 | $720 | $480 (40%) |
|
||||
| Storage | $800 | $515 | $285 (36%) |
|
||||
| Network | $600 | $360 | $240 (40%) |
|
||||
| **Total** | **$7,100** | **$4,113** | **$2,987 (42%)** |
|
||||
|
||||
**Annual Savings**: $35,844
|
||||
|
||||
### Performance Improvements
|
||||
|
||||
| Metric | Before | After | Improvement |
|
||||
|--------|--------|-------|-------------|
|
||||
| Average Response Time | 285ms | 125ms | 56% faster |
|
||||
| TTFB (with CDN) | 750ms | 120ms | 84% faster |
|
||||
| Resource Utilization | 68% | 75% | Better efficiency |
|
||||
| Auto-scaling Response | N/A | 30s | Handles traffic spikes |
|
||||
|
||||
### Scalability Improvements
|
||||
|
||||
- **Traffic Capacity**: 2x increase (25 pods vs 15 fixed)
|
||||
- **Scaling Response Time**: 30 seconds to scale up
|
||||
- **Cost Efficiency**: Pay for what you use
|
||||
|
||||
## Trade-offs and Considerations
|
||||
|
||||
**Auto-scaling**:
|
||||
- **Benefit**: 42% cost reduction, 2x capacity
|
||||
- **Trade-off**: 30s delay for cold starts
|
||||
- **Mitigation**: Min 8 pods for baseline capacity
|
||||
|
||||
**Reserved Instances**:
|
||||
- **Benefit**: 37% savings per instance
|
||||
- **Trade-off**: 1-year commitment
|
||||
- **Risk**: Low (steady baseline load confirmed)
|
||||
|
||||
**CDN Caching**:
|
||||
- **Benefit**: 84% faster TTFB, 85% fewer origin requests
|
||||
- **Trade-off**: Cache invalidation complexity
|
||||
- **Mitigation**: Short TTL for dynamic content
|
||||
|
||||
## Monitoring Recommendations
|
||||
|
||||
1. **Cost Tracking**:
|
||||
- Daily cost reports
|
||||
- Budget alerts at 80%, 100%
|
||||
- Tag-based cost allocation
|
||||
|
||||
2. **Performance Monitoring**:
|
||||
- CloudWatch dashboards
|
||||
- Prometheus + Grafana
|
||||
- APM for application metrics
|
||||
|
||||
3. **Auto-scaling Health**:
|
||||
- HPA metrics (scale events)
|
||||
- Resource utilization trends
|
||||
- Alert on frequent scaling
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. Evaluate spot instances for batch workloads (potential 70% savings)
|
||||
2. Implement multi-region deployment for better global performance
|
||||
3. Consider serverless for low-traffic endpoints
|
||||
4. Review database read replicas for read-heavy workloads
|
||||
96
commands/optimize/skill.md
Normal file
96
commands/optimize/skill.md
Normal file
@@ -0,0 +1,96 @@
|
||||
---
|
||||
description: Comprehensive performance optimization across database, backend, frontend, and infrastructure layers
|
||||
argument-hint: <operation> [parameters...]
|
||||
model: inherit
|
||||
---
|
||||
|
||||
# Performance Optimization Skill
|
||||
|
||||
You are routing performance optimization requests to specialized operations. Parse the `$ARGUMENTS` to determine which optimization operation to execute.
|
||||
|
||||
## Available Operations
|
||||
|
||||
- **analyze** - Comprehensive performance analysis with bottleneck identification
|
||||
- **database** - Database query and schema optimization
|
||||
- **backend** - Backend API and algorithm optimization
|
||||
- **frontend** - Frontend bundle and rendering optimization
|
||||
- **infrastructure** - Infrastructure and deployment optimization
|
||||
- **benchmark** - Performance benchmarking and regression testing
|
||||
|
||||
## Routing Logic
|
||||
|
||||
Extract the first word from `$ARGUMENTS` as the operation name, and pass the remainder as operation parameters.
|
||||
|
||||
**Arguments received**: `$ARGUMENTS`
|
||||
|
||||
**Base directory**: `/home/danie/projects/plugins/architect/open-plugins/plugins/10x-fullstack-engineer/commands/optimize/`
|
||||
|
||||
**Routing Instructions**:
|
||||
|
||||
1. **Parse the operation**: Extract the first word from `$ARGUMENTS`
|
||||
2. **Load operation instructions**: Read the corresponding operation file
|
||||
3. **Execute with context**: Follow the operation's instructions with remaining parameters
|
||||
4. **Invoke the agent**: Leverage the 10x-fullstack-engineer agent for optimization expertise
|
||||
|
||||
## Operation Routing
|
||||
|
||||
```
|
||||
analyze → Read and follow: /home/danie/projects/plugins/architect/open-plugins/plugins/10x-fullstack-engineer/commands/optimize/analyze.md
|
||||
database → Read and follow: /home/danie/projects/plugins/architect/open-plugins/plugins/10x-fullstack-engineer/commands/optimize/database.md
|
||||
backend → Read and follow: /home/danie/projects/plugins/architect/open-plugins/plugins/10x-fullstack-engineer/commands/optimize/backend.md
|
||||
frontend → Read and follow: /home/danie/projects/plugins/architect/open-plugins/plugins/10x-fullstack-engineer/commands/optimize/frontend.md
|
||||
infrastructure → Read and follow: /home/danie/projects/plugins/architect/open-plugins/plugins/10x-fullstack-engineer/commands/optimize/infrastructure.md
|
||||
benchmark → Read and follow: /home/danie/projects/plugins/architect/open-plugins/plugins/10x-fullstack-engineer/commands/optimize/benchmark.md
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
If no operation is specified or the operation is not recognized, display:
|
||||
|
||||
**Available optimization operations**:
|
||||
- `/optimize analyze` - Comprehensive performance analysis
|
||||
- `/optimize database` - Database optimization
|
||||
- `/optimize backend` - Backend API optimization
|
||||
- `/optimize frontend` - Frontend bundle and rendering optimization
|
||||
- `/optimize infrastructure` - Infrastructure and deployment optimization
|
||||
- `/optimize benchmark` - Performance benchmarking
|
||||
|
||||
**Example usage**:
|
||||
```
|
||||
/optimize analyze target:"user dashboard" scope:all metrics:"baseline"
|
||||
/optimize database target:queries context:"slow SELECT statements" threshold:500ms
|
||||
/optimize backend target:api endpoints:"/api/users,/api/products" load_profile:high
|
||||
/optimize frontend target:bundles pages:"dashboard,profile" metrics_target:"lighthouse>90"
|
||||
/optimize infrastructure target:scaling environment:production provider:aws
|
||||
/optimize benchmark type:load baseline:"v1.2.0" duration:300s concurrency:100
|
||||
```
|
||||
|
||||
**Comprehensive workflow example**:
|
||||
```bash
|
||||
# 1. Analyze overall performance
|
||||
/optimize analyze target:"production app" scope:all metrics:"baseline"
|
||||
|
||||
# 2. Optimize specific layers based on analysis
|
||||
/optimize database target:all context:"queries from analysis" threshold:200ms
|
||||
/optimize backend target:api endpoints:"/api/search" priority:high
|
||||
/optimize frontend target:all pages:"checkout,dashboard" framework:react
|
||||
|
||||
# 3. Benchmark improvements
|
||||
/optimize benchmark type:all baseline:"pre-optimization" duration:600s
|
||||
|
||||
# 4. Optimize infrastructure for efficiency
|
||||
/optimize infrastructure target:costs environment:production budget_constraint:true
|
||||
```
|
||||
|
||||
## Integration with 10x-Fullstack-Engineer
|
||||
|
||||
All optimization operations should leverage the **10x-fullstack-engineer** agent for:
|
||||
- Expert performance analysis across all layers
|
||||
- Industry best practices for optimization
|
||||
- Trade-off analysis between performance and maintainability
|
||||
- Scalability considerations
|
||||
- Production-ready implementation guidance
|
||||
|
||||
## Execution
|
||||
|
||||
Based on the parsed operation from `$ARGUMENTS`, read the appropriate operation file and follow its instructions with the remaining parameters.
|
||||
Reference in New Issue
Block a user