Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 18:20:21 +08:00
commit bbbaf7acad
63 changed files with 38552 additions and 0 deletions

View File

@@ -0,0 +1,172 @@
#!/bin/bash
# Purpose: Analyze webpack/vite bundle size and composition
# Version: 1.0.0
# Usage: ./analyze-bundle.sh [build-dir] [output-dir]
# Returns: 0=success, 1=analysis failed, 2=invalid arguments
# Dependencies: Node.js, npm, webpack-bundle-analyzer or vite-bundle-visualizer
set -euo pipefail
# Color output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
# Arguments
BUILD_DIR="${1:-./dist}"
OUTPUT_DIR="${2:-./bundle-analysis}"
# Validate build directory exists
if [ ! -d "$BUILD_DIR" ]; then
echo -e "${RED}Error: Build directory not found: $BUILD_DIR${NC}"
echo "Please run 'npm run build' first"
exit 2
fi
# Create output directory
mkdir -p "$OUTPUT_DIR"
TIMESTAMP=$(date +%Y%m%d-%H%M%S)
echo -e "${GREEN}Analyzing bundle in: $BUILD_DIR${NC}"
echo "Output directory: $OUTPUT_DIR"
# Detect build tool
if [ -f "stats.json" ] || [ -f "$BUILD_DIR/stats.json" ]; then
BUILD_TOOL="webpack"
elif [ -f "vite.config.js" ] || [ -f "vite.config.ts" ]; then
BUILD_TOOL="vite"
elif [ -f "next.config.js" ]; then
BUILD_TOOL="nextjs"
else
BUILD_TOOL="unknown"
fi
echo "Detected build tool: $BUILD_TOOL"
# Analyze bundle based on build tool
case $BUILD_TOOL in
webpack)
echo -e "\n${YELLOW}Running webpack-bundle-analyzer...${NC}"
# Check if webpack-bundle-analyzer is installed
if ! npm list webpack-bundle-analyzer &> /dev/null; then
echo "Installing webpack-bundle-analyzer..."
npm install --save-dev webpack-bundle-analyzer
fi
# Find stats.json
STATS_FILE="stats.json"
if [ -f "$BUILD_DIR/stats.json" ]; then
STATS_FILE="$BUILD_DIR/stats.json"
fi
# Generate report
npx webpack-bundle-analyzer "$STATS_FILE" \
--mode static \
--report "${OUTPUT_DIR}/bundle-report-${TIMESTAMP}.html" \
--no-open
echo -e "${GREEN}✓ Bundle analysis complete${NC}"
echo "Report: ${OUTPUT_DIR}/bundle-report-${TIMESTAMP}.html"
;;
vite)
echo -e "\n${YELLOW}Running vite bundle analysis...${NC}"
# Check if vite-bundle-visualizer is installed
if ! npm list rollup-plugin-visualizer &> /dev/null; then
echo "Installing rollup-plugin-visualizer..."
npm install --save-dev rollup-plugin-visualizer
fi
# Use rollup-plugin-visualizer
npx vite-bundle-visualizer \
--output "${OUTPUT_DIR}/bundle-report-${TIMESTAMP}.html"
echo -e "${GREEN}✓ Bundle analysis complete${NC}"
;;
nextjs)
echo -e "\n${YELLOW}Running Next.js bundle analysis...${NC}"
# Check if @next/bundle-analyzer is installed
if ! npm list @next/bundle-analyzer &> /dev/null; then
echo "Installing @next/bundle-analyzer..."
npm install --save-dev @next/bundle-analyzer
fi
# Rebuild with analyzer
ANALYZE=true npm run build
echo -e "${GREEN}✓ Bundle analysis complete${NC}"
;;
*)
echo -e "${YELLOW}Unknown build tool. Performing generic analysis...${NC}"
;;
esac
# Calculate bundle sizes
echo -e "\n${YELLOW}Calculating bundle sizes...${NC}"
# Find all JS/CSS files
find "$BUILD_DIR" -type f \( -name "*.js" -o -name "*.css" \) -exec ls -lh {} \; | \
awk '{print $9, $5}' > "${OUTPUT_DIR}/file-sizes-${TIMESTAMP}.txt"
# Calculate totals
TOTAL_JS=$(find "$BUILD_DIR" -type f -name "*.js" -exec du -ch {} + | grep total | awk '{print $1}')
TOTAL_CSS=$(find "$BUILD_DIR" -type f -name "*.css" -exec du -ch {} + | grep total | awk '{print $1}')
TOTAL_ALL=$(du -sh "$BUILD_DIR" | awk '{print $1}')
echo -e "\n=== Bundle Size Summary ==="
echo "Total JavaScript: $TOTAL_JS"
echo "Total CSS: $TOTAL_CSS"
echo "Total Build Size: $TOTAL_ALL"
# Identify large files (>500KB)
echo -e "\n=== Large Files (>500KB) ==="
find "$BUILD_DIR" -type f -size +500k -exec ls -lh {} \; | \
awk '{print $5, $9}' | sort -hr
# Check for common issues
echo -e "\n${YELLOW}Checking for common issues...${NC}"
# Check for source maps in production
SOURCEMAPS=$(find "$BUILD_DIR" -type f -name "*.map" | wc -l)
if [ "$SOURCEMAPS" -gt 0 ]; then
echo -e "${YELLOW}⚠ Found $SOURCEMAPS source map files in build${NC}"
echo " Consider disabling source maps for production"
fi
# Check for unminified files
UNMINIFIED=$(find "$BUILD_DIR" -type f -name "*.js" ! -name "*.min.js" -exec grep -l "function " {} \; 2>/dev/null | wc -l)
if [ "$UNMINIFIED" -gt 0 ]; then
echo -e "${YELLOW}⚠ Found potential unminified files${NC}"
echo " Verify minification is enabled"
fi
# Generate JSON summary
cat > "${OUTPUT_DIR}/summary-${TIMESTAMP}.json" <<EOF
{
"timestamp": "${TIMESTAMP}",
"buildTool": "${BUILD_TOOL}",
"buildDir": "${BUILD_DIR}",
"totalJS": "${TOTAL_JS}",
"totalCSS": "${TOTAL_CSS}",
"totalSize": "${TOTAL_ALL}",
"sourceMaps": ${SOURCEMAPS},
"issues": {
"sourceMapsInProduction": $([ "$SOURCEMAPS" -gt 0 ] && echo "true" || echo "false"),
"potentiallyUnminified": $([ "$UNMINIFIED" -gt 0 ] && echo "true" || echo "false")
}
}
EOF
echo -e "\n${GREEN}✓ Bundle analysis complete${NC}"
echo "Results saved to:"
echo " - ${OUTPUT_DIR}/bundle-report-${TIMESTAMP}.html"
echo " - ${OUTPUT_DIR}/file-sizes-${TIMESTAMP}.txt"
echo " - ${OUTPUT_DIR}/summary-${TIMESTAMP}.json"
exit 0

View File

@@ -0,0 +1,314 @@
#!/bin/bash
# Purpose: Run k6 load testing with various scenarios
# Version: 1.0.0
# Usage: ./load-test.sh <url> [scenario] [duration] [vus]
# Returns: 0=success, 1=test failed, 2=invalid arguments
# Dependencies: k6
set -euo pipefail
# Color output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
# Arguments
URL="${1:-}"
SCENARIO="${2:-smoke}"
DURATION="${3:-60s}"
VUS="${4:-50}"
# Validate arguments
if [ -z "$URL" ]; then
echo -e "${RED}Error: URL is required${NC}"
echo "Usage: $0 <url> [scenario] [duration] [vus]"
echo ""
echo "Scenarios:"
echo " smoke - Quick test with few users (default)"
echo " load - Normal load test"
echo " stress - Gradually increasing load"
echo " spike - Sudden traffic spike"
echo " soak - Long-duration test"
echo ""
echo "Example: $0 https://api.example.com/health load 300s 100"
exit 2
fi
# Check if k6 is installed
if ! command -v k6 &> /dev/null; then
echo -e "${YELLOW}k6 not found. Installing...${NC}"
# Installation instructions
echo "Please install k6:"
echo " macOS: brew install k6"
echo " Linux: sudo apt-get install k6 or snap install k6"
echo " Windows: choco install k6"
exit 2
fi
# Create output directory
OUTPUT_DIR="./load-test-results"
mkdir -p "$OUTPUT_DIR"
TIMESTAMP=$(date +%Y%m%d-%H%M%S)
echo -e "${GREEN}Running k6 load test${NC}"
echo "URL: $URL"
echo "Scenario: $SCENARIO"
echo "Duration: $DURATION"
echo "VUs: $VUS"
# Generate k6 test script based on scenario
TEST_SCRIPT="${OUTPUT_DIR}/test-${SCENARIO}-${TIMESTAMP}.js"
case $SCENARIO in
smoke)
cat > "$TEST_SCRIPT" <<'EOF'
import http from 'k6/http';
import { check, sleep } from 'k6';
import { Rate } from 'k6/metrics';
const errorRate = new Rate('errors');
export const options = {
vus: 1,
duration: '30s',
thresholds: {
http_req_duration: ['p(95)<1000'],
http_req_failed: ['rate<0.01'],
},
};
export default function () {
const res = http.get(__ENV.TARGET_URL);
const success = check(res, {
'status is 200': (r) => r.status === 200,
'response time OK': (r) => r.timings.duration < 1000,
});
errorRate.add(!success);
sleep(1);
}
EOF
;;
load)
cat > "$TEST_SCRIPT" <<'EOF'
import http from 'k6/http';
import { check, sleep } from 'k6';
import { Rate } from 'k6/metrics';
const errorRate = new Rate('errors');
export const options = {
stages: [
{ duration: '30s', target: __ENV.VUS / 2 },
{ duration: __ENV.DURATION, target: __ENV.VUS },
{ duration: '30s', target: 0 },
],
thresholds: {
http_req_duration: ['p(95)<500', 'p(99)<1000'],
http_req_failed: ['rate<0.01'],
errors: ['rate<0.1'],
},
};
export default function () {
const res = http.get(__ENV.TARGET_URL);
const success = check(res, {
'status is 200': (r) => r.status === 200,
'response time < 500ms': (r) => r.timings.duration < 500,
});
errorRate.add(!success);
sleep(1);
}
EOF
;;
stress)
cat > "$TEST_SCRIPT" <<'EOF'
import http from 'k6/http';
import { check, sleep } from 'k6';
import { Rate } from 'k6/metrics';
const errorRate = new Rate('errors');
export const options = {
stages: [
{ duration: '1m', target: __ENV.VUS / 4 },
{ duration: '2m', target: __ENV.VUS / 2 },
{ duration: '2m', target: __ENV.VUS },
{ duration: '2m', target: __ENV.VUS * 1.5 },
{ duration: '2m', target: __ENV.VUS * 2 },
{ duration: '1m', target: 0 },
],
thresholds: {
http_req_duration: ['p(95)<1000'],
http_req_failed: ['rate<0.05'],
},
};
export default function () {
const res = http.get(__ENV.TARGET_URL);
const success = check(res, {
'status is 200': (r) => r.status === 200,
});
errorRate.add(!success);
sleep(1);
}
EOF
;;
spike)
cat > "$TEST_SCRIPT" <<'EOF'
import http from 'k6/http';
import { check, sleep } from 'k6';
import { Rate } from 'k6/metrics';
const errorRate = new Rate('errors');
export const options = {
stages: [
{ duration: '1m', target: __ENV.VUS / 2 },
{ duration: '30s', target: __ENV.VUS * 5 },
{ duration: '1m', target: __ENV.VUS / 2 },
{ duration: '30s', target: 0 },
],
thresholds: {
http_req_duration: ['p(95)<2000'],
http_req_failed: ['rate<0.1'],
},
};
export default function () {
const res = http.get(__ENV.TARGET_URL);
const success = check(res, {
'status is 200': (r) => r.status === 200,
});
errorRate.add(!success);
sleep(1);
}
EOF
;;
soak)
cat > "$TEST_SCRIPT" <<'EOF'
import http from 'k6/http';
import { check, sleep } from 'k6';
import { Rate } from 'k6/metrics';
const errorRate = new Rate('errors');
export const options = {
stages: [
{ duration: '2m', target: __ENV.VUS },
{ duration: '3h', target: __ENV.VUS },
{ duration: '2m', target: 0 },
],
thresholds: {
http_req_duration: ['p(95)<500'],
http_req_failed: ['rate<0.01'],
},
};
export default function () {
const res = http.get(__ENV.TARGET_URL);
const success = check(res, {
'status is 200': (r) => r.status === 200,
});
errorRate.add(!success);
sleep(1);
}
EOF
;;
*)
echo -e "${RED}Error: Unknown scenario: $SCENARIO${NC}"
exit 2
;;
esac
# Run k6 test
echo -e "\n${YELLOW}Starting load test...${NC}"
k6 run \
--out json="${OUTPUT_DIR}/results-${SCENARIO}-${TIMESTAMP}.json" \
--summary-export="${OUTPUT_DIR}/summary-${SCENARIO}-${TIMESTAMP}.json" \
--env TARGET_URL="$URL" \
--env DURATION="$DURATION" \
--env VUS="$VUS" \
"$TEST_SCRIPT"
# Check if test passed
if [ $? -eq 0 ]; then
echo -e "\n${GREEN}✓ Load test passed${NC}"
TEST_STATUS="passed"
else
echo -e "\n${RED}✗ Load test failed (thresholds not met)${NC}"
TEST_STATUS="failed"
fi
# Parse results
echo -e "\n${YELLOW}Parsing results...${NC}"
node -e "
const fs = require('fs');
const summary = JSON.parse(fs.readFileSync('${OUTPUT_DIR}/summary-${SCENARIO}-${TIMESTAMP}.json'));
console.log('\n=== Load Test Results ===');
console.log('Scenario:', '${SCENARIO}');
console.log('Status:', '${TEST_STATUS}'.toUpperCase());
const metrics = summary.metrics;
if (metrics.http_reqs) {
console.log('\n=== Request Statistics ===');
console.log('Total Requests:', metrics.http_reqs.count);
console.log('Request Rate:', metrics.http_reqs.rate.toFixed(2), 'req/s');
}
if (metrics.http_req_duration) {
console.log('\n=== Response Time ===');
console.log('Average:', metrics.http_req_duration.avg.toFixed(2), 'ms');
console.log('Min:', metrics.http_req_duration.min.toFixed(2), 'ms');
console.log('Max:', metrics.http_req_duration.max.toFixed(2), 'ms');
console.log('p50:', metrics.http_req_duration.p50.toFixed(2), 'ms');
console.log('p95:', metrics.http_req_duration.p95.toFixed(2), 'ms');
console.log('p99:', metrics.http_req_duration.p99.toFixed(2), 'ms');
}
if (metrics.http_req_failed) {
console.log('\n=== Error Rate ===');
console.log('Failed Requests:', (metrics.http_req_failed.rate * 100).toFixed(2), '%');
}
if (metrics.vus) {
console.log('\n=== Virtual Users ===');
console.log('Max VUs:', metrics.vus.max);
}
// Check thresholds
console.log('\n=== Threshold Results ===');
Object.entries(summary.root_group.checks || {}).forEach(([name, check]) => {
const status = check.passes === check.fails ? '✓' : '✗';
console.log(status, name);
});
"
echo -e "\n${GREEN}✓ Load test complete${NC}"
echo "Results saved to:"
echo " - ${OUTPUT_DIR}/results-${SCENARIO}-${TIMESTAMP}.json"
echo " - ${OUTPUT_DIR}/summary-${SCENARIO}-${TIMESTAMP}.json"
echo " - ${OUTPUT_DIR}/test-${SCENARIO}-${TIMESTAMP}.js"
if [ "$TEST_STATUS" = "failed" ]; then
exit 1
fi
exit 0

View File

@@ -0,0 +1,119 @@
#!/bin/bash
# Purpose: Automated Lighthouse performance profiling for frontend pages
# Version: 1.0.0
# Usage: ./profile-frontend.sh <url> [output-dir]
# Returns: 0=success, 1=lighthouse failed, 2=invalid arguments
# Dependencies: Node.js, npm, lighthouse
set -euo pipefail
# Color output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Arguments
URL="${1:-}"
OUTPUT_DIR="${2:-./lighthouse-reports}"
# Validate arguments
if [ -z "$URL" ]; then
echo -e "${RED}Error: URL is required${NC}"
echo "Usage: $0 <url> [output-dir]"
echo "Example: $0 https://example.com ./reports"
exit 2
fi
# Check if lighthouse is installed
if ! command -v lighthouse &> /dev/null; then
echo -e "${YELLOW}Lighthouse not found. Installing...${NC}"
npm install -g lighthouse
fi
# Create output directory
mkdir -p "$OUTPUT_DIR"
TIMESTAMP=$(date +%Y%m%d-%H%M%S)
echo -e "${GREEN}Running Lighthouse audit for: $URL${NC}"
echo "Output directory: $OUTPUT_DIR"
# Run Lighthouse with various strategies
echo -e "\n${YELLOW}1. Desktop audit (fast connection)${NC}"
lighthouse "$URL" \
--output=json \
--output=html \
--output-path="${OUTPUT_DIR}/desktop-${TIMESTAMP}" \
--preset=desktop \
--throttling.rttMs=40 \
--throttling.throughputKbps=10240 \
--throttling.cpuSlowdownMultiplier=1 \
--chrome-flags="--headless --no-sandbox"
echo -e "\n${YELLOW}2. Mobile audit (3G connection)${NC}"
lighthouse "$URL" \
--output=json \
--output=html \
--output-path="${OUTPUT_DIR}/mobile-${TIMESTAMP}" \
--preset=mobile \
--throttling.rttMs=150 \
--throttling.throughputKbps=1600 \
--throttling.cpuSlowdownMultiplier=4 \
--chrome-flags="--headless --no-sandbox"
# Extract key metrics
echo -e "\n${GREEN}Extracting key metrics...${NC}"
node -e "
const fs = require('fs');
const desktop = JSON.parse(fs.readFileSync('${OUTPUT_DIR}/desktop-${TIMESTAMP}.report.json'));
const mobile = JSON.parse(fs.readFileSync('${OUTPUT_DIR}/mobile-${TIMESTAMP}.report.json'));
console.log('\n=== Performance Scores ===');
console.log('Desktop Performance:', Math.round(desktop.categories.performance.score * 100));
console.log('Mobile Performance:', Math.round(mobile.categories.performance.score * 100));
console.log('\n=== Web Vitals (Desktop) ===');
const dMetrics = desktop.audits;
console.log('LCP:', Math.round(dMetrics['largest-contentful-paint'].numericValue), 'ms');
console.log('FID:', Math.round(dMetrics['max-potential-fid'].numericValue), 'ms');
console.log('CLS:', dMetrics['cumulative-layout-shift'].numericValue.toFixed(3));
console.log('TTFB:', Math.round(dMetrics['server-response-time'].numericValue), 'ms');
console.log('TBT:', Math.round(dMetrics['total-blocking-time'].numericValue), 'ms');
console.log('\n=== Web Vitals (Mobile) ===');
const mMetrics = mobile.audits;
console.log('LCP:', Math.round(mMetrics['largest-contentful-paint'].numericValue), 'ms');
console.log('FID:', Math.round(mMetrics['max-potential-fid'].numericValue), 'ms');
console.log('CLS:', mMetrics['cumulative-layout-shift'].numericValue.toFixed(3));
console.log('TTFB:', Math.round(mMetrics['server-response-time'].numericValue), 'ms');
console.log('TBT:', Math.round(mMetrics['total-blocking-time'].numericValue), 'ms');
// Save summary
const summary = {
timestamp: '${TIMESTAMP}',
url: '${URL}',
desktop: {
performance: Math.round(desktop.categories.performance.score * 100),
lcp: Math.round(dMetrics['largest-contentful-paint'].numericValue),
fid: Math.round(dMetrics['max-potential-fid'].numericValue),
cls: dMetrics['cumulative-layout-shift'].numericValue,
},
mobile: {
performance: Math.round(mobile.categories.performance.score * 100),
lcp: Math.round(mMetrics['largest-contentful-paint'].numericValue),
fid: Math.round(mMetrics['max-potential-fid'].numericValue),
cls: mMetrics['cumulative-layout-shift'].numericValue,
}
};
fs.writeFileSync('${OUTPUT_DIR}/summary-${TIMESTAMP}.json', JSON.stringify(summary, null, 2));
console.log('\nSummary saved to: ${OUTPUT_DIR}/summary-${TIMESTAMP}.json');
"
echo -e "\n${GREEN}✓ Lighthouse audit complete${NC}"
echo "Reports saved to: $OUTPUT_DIR"
echo " - desktop-${TIMESTAMP}.report.html"
echo " - mobile-${TIMESTAMP}.report.html"
echo " - summary-${TIMESTAMP}.json"
exit 0

View File

@@ -0,0 +1,226 @@
#!/bin/bash
# Purpose: Profile database queries and identify slow operations
# Version: 1.0.0
# Usage: ./query-profiler.sh <database-url> [threshold-ms] [output-dir]
# Returns: 0=success, 1=profiling failed, 2=invalid arguments
# Dependencies: psql (PostgreSQL) or mysql (MySQL)
set -euo pipefail
# Color output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
# Arguments
DATABASE_URL="${1:-}"
THRESHOLD_MS="${2:-500}"
OUTPUT_DIR="${3:-./query-profiles}"
# Validate arguments
if [ -z "$DATABASE_URL" ]; then
echo -e "${RED}Error: Database URL is required${NC}"
echo "Usage: $0 <database-url> [threshold-ms] [output-dir]"
echo "Example: $0 postgresql://user:pass@localhost:5432/dbname 500 ./reports"
exit 2
fi
# Create output directory
mkdir -p "$OUTPUT_DIR"
TIMESTAMP=$(date +%Y%m%d-%H%M%S)
echo -e "${GREEN}Starting database query profiling${NC}"
echo "Threshold: ${THRESHOLD_MS}ms"
echo "Output directory: $OUTPUT_DIR"
# Detect database type
if [[ "$DATABASE_URL" == postgresql://* ]] || [[ "$DATABASE_URL" == postgres://* ]]; then
DB_TYPE="postgresql"
elif [[ "$DATABASE_URL" == mysql://* ]]; then
DB_TYPE="mysql"
else
echo -e "${YELLOW}Warning: Could not detect database type from URL${NC}"
DB_TYPE="unknown"
fi
echo "Database type: $DB_TYPE"
# PostgreSQL profiling
if [ "$DB_TYPE" = "postgresql" ]; then
echo -e "\n${YELLOW}Running PostgreSQL query analysis...${NC}"
# Enable pg_stat_statements if not already enabled
psql "$DATABASE_URL" -c "CREATE EXTENSION IF NOT EXISTS pg_stat_statements;" 2>/dev/null || true
# Get slow queries
echo "Finding slow queries (>${THRESHOLD_MS}ms)..."
psql "$DATABASE_URL" -t -A -F"," > "${OUTPUT_DIR}/slow-queries-${TIMESTAMP}.csv" <<EOF
SELECT
substring(query, 1, 100) AS short_query,
calls,
round(mean_exec_time::numeric, 2) AS avg_time_ms,
round(max_exec_time::numeric, 2) AS max_time_ms,
round(total_exec_time::numeric, 2) AS total_time_ms,
round((100 * total_exec_time / sum(total_exec_time) OVER ())::numeric, 2) AS pct_total_time
FROM pg_stat_statements
WHERE mean_exec_time > ${THRESHOLD_MS}
AND query NOT LIKE '%pg_stat_statements%'
ORDER BY mean_exec_time DESC
LIMIT 50;
EOF
# Get most called queries
echo "Finding most frequently called queries..."
psql "$DATABASE_URL" -t -A -F"," > "${OUTPUT_DIR}/frequent-queries-${TIMESTAMP}.csv" <<EOF
SELECT
substring(query, 1, 100) AS short_query,
calls,
round(mean_exec_time::numeric, 2) AS avg_time_ms,
round(total_exec_time::numeric, 2) AS total_time_ms
FROM pg_stat_statements
WHERE query NOT LIKE '%pg_stat_statements%'
ORDER BY calls DESC
LIMIT 50;
EOF
# Get index usage statistics
echo "Analyzing index usage..."
psql "$DATABASE_URL" -t -A -F"," > "${OUTPUT_DIR}/index-usage-${TIMESTAMP}.csv" <<EOF
SELECT
schemaname,
tablename,
indexname,
idx_scan,
idx_tup_read,
idx_tup_fetch,
pg_size_pretty(pg_relation_size(indexrelid)) AS index_size
FROM pg_stat_user_indexes
ORDER BY idx_scan ASC
LIMIT 50;
EOF
# Find missing indexes (tables with sequential scans)
echo "Finding potential missing indexes..."
psql "$DATABASE_URL" -t -A -F"," > "${OUTPUT_DIR}/missing-indexes-${TIMESTAMP}.csv" <<EOF
SELECT
schemaname,
tablename,
seq_scan,
seq_tup_read,
idx_scan,
CASE WHEN seq_scan > 0 THEN seq_tup_read / seq_scan ELSE 0 END AS avg_seq_read,
pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) AS table_size
FROM pg_stat_user_tables
WHERE seq_scan > 1000
AND (idx_scan = 0 OR seq_scan > idx_scan)
ORDER BY seq_tup_read DESC
LIMIT 30;
EOF
# Table statistics
echo "Gathering table statistics..."
psql "$DATABASE_URL" -t -A -F"," > "${OUTPUT_DIR}/table-stats-${TIMESTAMP}.csv" <<EOF
SELECT
schemaname,
tablename,
pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) AS total_size,
n_live_tup,
n_dead_tup,
CASE WHEN n_live_tup > 0 THEN round((n_dead_tup::float / n_live_tup::float) * 100, 2) ELSE 0 END AS dead_pct,
last_vacuum,
last_autovacuum
FROM pg_stat_user_tables
ORDER BY pg_total_relation_size(schemaname||'.'||tablename) DESC
LIMIT 30;
EOF
# Generate text report
echo -e "\n${GREEN}=== Slow Queries Summary ===${NC}"
echo "Queries slower than ${THRESHOLD_MS}ms:"
head -10 "${OUTPUT_DIR}/slow-queries-${TIMESTAMP}.csv" | column -t -s','
echo -e "\n${GREEN}=== Most Frequent Queries ===${NC}"
head -10 "${OUTPUT_DIR}/frequent-queries-${TIMESTAMP}.csv" | column -t -s','
echo -e "\n${GREEN}=== Potential Missing Indexes ===${NC}"
head -10 "${OUTPUT_DIR}/missing-indexes-${TIMESTAMP}.csv" | column -t -s','
echo -e "\n${YELLOW}=== Recommendations ===${NC}"
# Check for unused indexes
UNUSED_INDEXES=$(awk -F',' '$4 == 0' "${OUTPUT_DIR}/index-usage-${TIMESTAMP}.csv" | wc -l)
if [ "$UNUSED_INDEXES" -gt 0 ]; then
echo -e "${YELLOW}⚠ Found $UNUSED_INDEXES unused indexes (0 scans)${NC}"
echo " Consider removing to save space and improve write performance"
fi
# Check for missing indexes
MISSING_INDEXES=$(wc -l < "${OUTPUT_DIR}/missing-indexes-${TIMESTAMP}.csv")
if [ "$MISSING_INDEXES" -gt 1 ]; then
echo -e "${YELLOW}⚠ Found $((MISSING_INDEXES - 1)) tables with high sequential scans${NC}"
echo " Consider adding indexes on frequently queried columns"
fi
# Check for bloated tables
BLOATED_TABLES=$(awk -F',' '$6 > 20' "${OUTPUT_DIR}/table-stats-${TIMESTAMP}.csv" | wc -l)
if [ "$BLOATED_TABLES" -gt 0 ]; then
echo -e "${YELLOW}⚠ Found $BLOATED_TABLES tables with >20% dead tuples${NC}"
echo " Run VACUUM ANALYZE on these tables"
fi
# MySQL profiling
elif [ "$DB_TYPE" = "mysql" ]; then
echo -e "\n${YELLOW}Running MySQL query analysis...${NC}"
# Enable slow query log temporarily
mysql "$DATABASE_URL" -e "SET GLOBAL slow_query_log = 'ON';" 2>/dev/null || true
mysql "$DATABASE_URL" -e "SET GLOBAL long_query_time = $(echo "scale=3; $THRESHOLD_MS/1000" | bc);" 2>/dev/null || true
echo "Analyzing query performance..."
mysql "$DATABASE_URL" -e "
SELECT
DIGEST_TEXT AS query,
COUNT_STAR AS exec_count,
ROUND(AVG_TIMER_WAIT/1000000000, 2) AS avg_time_ms,
ROUND(MAX_TIMER_WAIT/1000000000, 2) AS max_time_ms,
ROUND(SUM_TIMER_WAIT/1000000000, 2) AS total_time_ms
FROM performance_schema.events_statements_summary_by_digest
WHERE AVG_TIMER_WAIT/1000000000 > ${THRESHOLD_MS}
ORDER BY AVG_TIMER_WAIT DESC
LIMIT 50;
" > "${OUTPUT_DIR}/slow-queries-${TIMESTAMP}.txt"
echo -e "${GREEN}Query analysis complete${NC}"
cat "${OUTPUT_DIR}/slow-queries-${TIMESTAMP}.txt"
else
echo -e "${RED}Error: Unsupported database type${NC}"
exit 1
fi
# Generate JSON summary
SLOW_QUERY_COUNT=$([ -f "${OUTPUT_DIR}/slow-queries-${TIMESTAMP}.csv" ] && tail -n +1 "${OUTPUT_DIR}/slow-queries-${TIMESTAMP}.csv" | wc -l || echo "0")
cat > "${OUTPUT_DIR}/summary-${TIMESTAMP}.json" <<EOF
{
"timestamp": "${TIMESTAMP}",
"databaseType": "${DB_TYPE}",
"thresholdMs": ${THRESHOLD_MS},
"slowQueryCount": ${SLOW_QUERY_COUNT},
"unusedIndexes": ${UNUSED_INDEXES:-0},
"potentialMissingIndexes": $((${MISSING_INDEXES:-1} - 1)),
"bloatedTables": ${BLOATED_TABLES:-0}
}
EOF
echo -e "\n${GREEN}✓ Query profiling complete${NC}"
echo "Results saved to:"
echo " - ${OUTPUT_DIR}/slow-queries-${TIMESTAMP}.csv"
echo " - ${OUTPUT_DIR}/frequent-queries-${TIMESTAMP}.csv"
echo " - ${OUTPUT_DIR}/index-usage-${TIMESTAMP}.csv"
echo " - ${OUTPUT_DIR}/missing-indexes-${TIMESTAMP}.csv"
echo " - ${OUTPUT_DIR}/table-stats-${TIMESTAMP}.csv"
echo " - ${OUTPUT_DIR}/summary-${TIMESTAMP}.json"
exit 0