Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 18:20:21 +08:00
commit bbbaf7acad
63 changed files with 38552 additions and 0 deletions

View File

@@ -0,0 +1,230 @@
#!/bin/bash
# Purpose: Analyze log files for patterns, errors, and anomalies
# Version: 1.0.0
# Usage: ./analyze-logs.sh --file <log-file> [options]
# Returns: 0=success, 1=error, 2=invalid params
# Dependencies: awk, grep, sed, jq (optional for JSON logs)
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Default values
LOG_FILE=""
PATTERN=""
LEVEL=""
CONTEXT_LINES=5
START_TIME=""
END_TIME=""
OUTPUT_FORMAT="text"
SINCE=""
# Help message
show_help() {
cat << EOF
Log Analysis Utility
Usage: $0 --file <log-file> [options]
Options:
--file FILE Log file to analyze (required)
--pattern REGEX Filter by regex pattern
--level LEVEL Filter by log level (ERROR|WARN|INFO|DEBUG)
--context N Show N lines before and after matches (default: 5)
--start TIME Start time (format: "YYYY-MM-DD HH:MM:SS")
--end TIME End time (format: "YYYY-MM-DD HH:MM:SS")
--since DURATION Time ago (e.g., "1 hour ago", "30 minutes ago")
--format FORMAT Output format: text|json (default: text)
-h, --help Show this help message
Examples:
# Find all errors in last hour
$0 --file app.log --level ERROR --since "1 hour ago"
# Find timeout errors with context
$0 --file app.log --pattern "timeout" --context 10
# Analyze specific timeframe
$0 --file app.log --start "2024-10-14 14:00:00" --end "2024-10-14 15:00:00"
EOF
exit 0
}
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
--file)
LOG_FILE="$2"
shift 2
;;
--pattern)
PATTERN="$2"
shift 2
;;
--level)
LEVEL="$2"
shift 2
;;
--context)
CONTEXT_LINES="$2"
shift 2
;;
--start)
START_TIME="$2"
shift 2
;;
--end)
END_TIME="$2"
shift 2
;;
--since)
SINCE="$2"
shift 2
;;
--format)
OUTPUT_FORMAT="$2"
shift 2
;;
-h|--help)
show_help
;;
*)
echo -e "${RED}Error: Unknown option $1${NC}" >&2
exit 2
;;
esac
done
# Validate required parameters
if [ -z "$LOG_FILE" ]; then
echo -e "${RED}Error: --file is required${NC}" >&2
echo "Use --help for usage information"
exit 2
fi
if [ ! -f "$LOG_FILE" ]; then
echo -e "${RED}Error: Log file not found: $LOG_FILE${NC}" >&2
exit 1
fi
# Functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Convert "since" to start time
if [ -n "$SINCE" ]; then
if command -v date &> /dev/null; then
START_TIME=$(date -d "$SINCE" '+%Y-%m-%d %H:%M:%S' 2>/dev/null || date -v -1H '+%Y-%m-%d %H:%M:%S')
fi
fi
log_info "Analyzing log file: $LOG_FILE"
# Build grep command
GREP_CMD="cat '$LOG_FILE'"
# Time filtering
if [ -n "$START_TIME" ]; then
log_info "Filtering from: $START_TIME"
GREP_CMD="$GREP_CMD | awk '\$0 >= \"$START_TIME\"'"
fi
if [ -n "$END_TIME" ]; then
log_info "Filtering to: $END_TIME"
GREP_CMD="$GREP_CMD | awk '\$0 <= \"$END_TIME\"'"
fi
# Level filtering
if [ -n "$LEVEL" ]; then
log_info "Filtering by level: $LEVEL"
GREP_CMD="$GREP_CMD | grep -i '$LEVEL'"
fi
# Pattern filtering
if [ -n "$PATTERN" ]; then
log_info "Filtering by pattern: $PATTERN"
GREP_CMD="$GREP_CMD | grep -E '$PATTERN' -A $CONTEXT_LINES -B $CONTEXT_LINES"
fi
# Execute filtering
FILTERED_OUTPUT=$(eval "$GREP_CMD")
if [ -z "$FILTERED_OUTPUT" ]; then
log_warn "No matching log entries found"
exit 0
fi
# Count results
MATCH_COUNT=$(echo "$FILTERED_OUTPUT" | wc -l)
log_info "Found $MATCH_COUNT matching lines"
# Analysis
echo ""
echo "═══════════════════════════════════════════════════════════"
echo " LOG ANALYSIS RESULTS"
echo "═══════════════════════════════════════════════════════════"
echo ""
# Error statistics
echo "Error Statistics:"
echo "─────────────────────────────────────────────────────────"
ERROR_COUNT=$(echo "$FILTERED_OUTPUT" | grep -i "ERROR" | wc -l || echo "0")
WARN_COUNT=$(echo "$FILTERED_OUTPUT" | grep -i "WARN" | wc -l || echo "0")
INFO_COUNT=$(echo "$FILTERED_OUTPUT" | grep -i "INFO" | wc -l || echo "0")
echo " ERROR: $ERROR_COUNT"
echo " WARN: $WARN_COUNT"
echo " INFO: $INFO_COUNT"
echo ""
# Top errors
echo "Top Error Messages (Top 10):"
echo "─────────────────────────────────────────────────────────"
echo "$FILTERED_OUTPUT" | grep -i "ERROR" | awk -F'ERROR' '{print $2}' | sort | uniq -c | sort -rn | head -10 || echo " No errors found"
echo ""
# Time distribution (if timestamps present)
echo "Time Distribution:"
echo "─────────────────────────────────────────────────────────"
echo "$FILTERED_OUTPUT" | awk '{print substr($0, 1, 13)}' | sort | uniq -c | tail -20 || echo " No timestamp pattern detected"
echo ""
# Output filtered results
if [ "$OUTPUT_FORMAT" = "json" ]; then
log_info "Generating JSON output..."
# Simple JSON array of log lines
echo "{"
echo " \"file\": \"$LOG_FILE\","
echo " \"matches\": $MATCH_COUNT,"
echo " \"entries\": ["
echo "$FILTERED_OUTPUT" | awk '{printf " \"%s\",\n", $0}' | sed '$ s/,$//'
echo " ]"
echo "}"
else
echo "Matching Log Entries:"
echo "─────────────────────────────────────────────────────────"
echo "$FILTERED_OUTPUT"
fi
echo ""
log_success "Analysis complete"
exit 0

View File

@@ -0,0 +1,418 @@
#!/bin/bash
# Purpose: Monitor memory usage and detect leaks
# Version: 1.0.0
# Usage: ./memory-check.sh --app <app-name> [options]
# Returns: 0=success, 1=error, 2=invalid params
# Dependencies: ps, awk, bc
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Default values
APP_NAME=""
DURATION=300
INTERVAL=10
THRESHOLD=1024
OUTPUT_DIR="./memory-check-output"
ALERT_ON_GROWTH=true
# Help message
show_help() {
cat << EOF
Memory Monitoring Utility
Usage: $0 --app <app-name> [options]
Options:
--app NAME Application/process name to monitor (required)
--duration N Monitoring duration in seconds (default: 300)
--interval N Sampling interval in seconds (default: 10)
--threshold MB Alert if memory exceeds threshold in MB (default: 1024)
--output DIR Output directory (default: ./memory-check-output)
--no-alert Disable growth alerts
-h, --help Show this help message
Examples:
# Monitor Node.js app for 5 minutes
$0 --app node --duration 300
# Monitor with custom threshold
$0 --app node --duration 600 --threshold 2048
# Quick check (1 minute)
$0 --app node --duration 60 --interval 5
EOF
exit 0
}
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
--app)
APP_NAME="$2"
shift 2
;;
--duration)
DURATION="$2"
shift 2
;;
--interval)
INTERVAL="$2"
shift 2
;;
--threshold)
THRESHOLD="$2"
shift 2
;;
--output)
OUTPUT_DIR="$2"
shift 2
;;
--no-alert)
ALERT_ON_GROWTH=false
shift
;;
-h|--help)
show_help
;;
*)
echo -e "${RED}Error: Unknown option $1${NC}" >&2
exit 2
;;
esac
done
# Validate required parameters
if [ -z "$APP_NAME" ]; then
echo -e "${RED}Error: --app is required${NC}" >&2
echo "Use --help for usage information"
exit 2
fi
# Functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
alert() {
echo -e "${RED}[ALERT]${NC} $1"
}
# Create output directory
mkdir -p "$OUTPUT_DIR"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
log_info "Starting memory monitoring for: $APP_NAME"
log_info "Duration: ${DURATION}s, Interval: ${INTERVAL}s, Threshold: ${THRESHOLD}MB"
log_info "Output directory: $OUTPUT_DIR"
# Find process ID
PIDS=$(pgrep -f "$APP_NAME" || echo "")
if [ -z "$PIDS" ]; then
log_error "No process found matching: $APP_NAME"
exit 1
fi
PID=$(echo "$PIDS" | head -1)
log_info "Found process: PID $PID"
# Output files
MEMORY_LOG="$OUTPUT_DIR/memory-log-$TIMESTAMP.txt"
CHART_FILE="$OUTPUT_DIR/memory-chart-$TIMESTAMP.txt"
REPORT_FILE="$OUTPUT_DIR/memory-report-$TIMESTAMP.txt"
# Write header
echo "Timestamp,RSS_KB,VSZ_KB,Percent_MEM" > "$MEMORY_LOG"
log_info "Monitoring memory usage..."
# Track min/max
MIN_RSS=0
MAX_RSS=0
READINGS=()
# Collect memory samples
SAMPLES=$((DURATION / INTERVAL))
for i in $(seq 1 $SAMPLES); do
# Get memory stats
MEM_STATS=$(ps -p "$PID" -o rss=,vsz=,%mem= 2>/dev/null || echo "")
if [ -z "$MEM_STATS" ]; then
log_error "Process $PID not found. It may have terminated."
break
fi
# Parse values
RSS=$(echo "$MEM_STATS" | awk '{print $1}')
VSZ=$(echo "$MEM_STATS" | awk '{print $2}')
PMEM=$(echo "$MEM_STATS" | awk '{print $3}')
TIMESTAMP_NOW=$(date '+%Y-%m-%d %H:%M:%S')
# Update min/max
if [ "$MIN_RSS" -eq 0 ] || [ "$RSS" -lt "$MIN_RSS" ]; then
MIN_RSS=$RSS
fi
if [ "$RSS" -gt "$MAX_RSS" ]; then
MAX_RSS=$RSS
fi
# Store reading
READINGS+=($RSS)
# Log to file
echo "$TIMESTAMP_NOW,$RSS,$VSZ,$PMEM" >> "$MEMORY_LOG"
# Convert to MB for display
RSS_MB=$(echo "scale=2; $RSS/1024" | bc)
VSZ_MB=$(echo "scale=2; $VSZ/1024" | bc)
# Progress display
echo -ne "\r Sample $i/$SAMPLES: RSS=${RSS_MB}MB, VSZ=${VSZ_MB}MB, %MEM=${PMEM}% "
# Check threshold
if (( $(echo "$RSS_MB > $THRESHOLD" | bc -l) )); then
echo "" # New line before alert
alert "Memory threshold exceeded: ${RSS_MB}MB > ${THRESHOLD}MB"
fi
sleep "$INTERVAL"
done
echo "" # New line after progress
log_success "Memory monitoring complete"
# Calculate statistics
MIN_MB=$(echo "scale=2; $MIN_RSS/1024" | bc)
MAX_MB=$(echo "scale=2; $MAX_RSS/1024" | bc)
GROWTH_MB=$(echo "scale=2; ($MAX_RSS-$MIN_RSS)/1024" | bc)
# Calculate average
TOTAL_RSS=0
for rss in "${READINGS[@]}"; do
TOTAL_RSS=$((TOTAL_RSS + rss))
done
AVG_RSS=$((TOTAL_RSS / ${#READINGS[@]}))
AVG_MB=$(echo "scale=2; $AVG_RSS/1024" | bc)
# Detect leak (memory consistently growing)
LEAK_DETECTED=false
if (( $(echo "$GROWTH_MB > 50" | bc -l) )); then
# Check if growth is consistent (not just spike)
FIRST_HALF_AVG=0
SECOND_HALF_AVG=0
MID_POINT=$((${#READINGS[@]} / 2))
for i in $(seq 0 $((MID_POINT - 1))); do
FIRST_HALF_AVG=$((FIRST_HALF_AVG + READINGS[$i]))
done
FIRST_HALF_AVG=$((FIRST_HALF_AVG / MID_POINT))
for i in $(seq $MID_POINT $((${#READINGS[@]} - 1))); do
SECOND_HALF_AVG=$((SECOND_HALF_AVG + READINGS[$i]))
done
SECOND_HALF_AVG=$((SECOND_HALF_AVG / (${#READINGS[@]} - MID_POINT)))
CONSISTENT_GROWTH=$((SECOND_HALF_AVG - FIRST_HALF_AVG))
CONSISTENT_GROWTH_MB=$(echo "scale=2; $CONSISTENT_GROWTH/1024" | bc)
if (( $(echo "$CONSISTENT_GROWTH_MB > 25" | bc -l) )); then
LEAK_DETECTED=true
fi
fi
# Generate ASCII chart
log_info "Generating memory chart..."
cat > "$CHART_FILE" << EOF
Memory Usage Over Time
═══════════════════════════════════════════════════════════
RSS (Resident Set Size) in MB
EOF
# Simple ASCII chart (40 rows, scale based on max)
CHART_HEIGHT=20
SCALE_FACTOR=$(echo "scale=2; $MAX_RSS / $CHART_HEIGHT" | bc)
for row in $(seq $CHART_HEIGHT -1 0); do
THRESHOLD_LINE=$(echo "scale=0; $row * $SCALE_FACTOR / 1024" | bc)
printf "%4d MB |" "$THRESHOLD_LINE"
for reading in "${READINGS[@]}"; do
READING_ROW=$(echo "scale=0; $reading / $SCALE_FACTOR" | bc)
if [ "$READING_ROW" -ge "$row" ]; then
printf "█"
else
printf " "
fi
done
echo ""
done
printf " +"
for i in $(seq 1 ${#READINGS[@]}); do printf "─"; done
echo ""
printf " "
for i in $(seq 1 ${#READINGS[@]}); do
if [ $((i % 10)) -eq 0 ]; then
printf "|"
else
printf " "
fi
done
echo ""
cat >> "$CHART_FILE" << EOF
Legend: Each column = ${INTERVAL}s interval
Total duration: ${DURATION}s
EOF
cat "$CHART_FILE"
# Generate report
log_info "Generating memory report..."
cat > "$REPORT_FILE" << EOF
═══════════════════════════════════════════════════════════
MEMORY MONITORING REPORT
═══════════════════════════════════════════════════════════
Application: $APP_NAME
PID: $PID
Duration: ${DURATION}s (${SAMPLES} samples)
Interval: ${INTERVAL}s
Timestamp: $TIMESTAMP
Memory Statistics:
─────────────────────────────────────────────────────────
Minimum RSS: ${MIN_MB} MB
Maximum RSS: ${MAX_MB} MB
Average RSS: ${AVG_MB} MB
Memory Growth: ${GROWTH_MB} MB
Threshold: ${THRESHOLD} MB
EOF
# Leak analysis
if [ "$LEAK_DETECTED" = true ]; then
cat >> "$REPORT_FILE" << EOF
⚠ MEMORY LEAK DETECTED
─────────────────────────────────────────────────────────
Memory grew consistently by ${CONSISTENT_GROWTH_MB} MB
First half average: $(echo "scale=2; $FIRST_HALF_AVG/1024" | bc) MB
Second half average: $(echo "scale=2; $SECOND_HALF_AVG/1024" | bc) MB
Recommendations:
1. Take heap snapshots for detailed analysis
2. Check for:
- Event listeners not removed
- Timers not cleared (setInterval, setTimeout)
- Unbounded caches or arrays
- Circular references
- Closures holding large objects
3. Use memory profiling tools:
- Node.js: node --inspect, heap snapshots
- Python: memory_profiler, tracemalloc
4. Consider using /debug memory operation for deeper analysis
EOF
if [ "$ALERT_ON_GROWTH" = true ]; then
alert "MEMORY LEAK DETECTED! Growth: ${CONSISTENT_GROWTH_MB} MB"
fi
else
cat >> "$REPORT_FILE" << EOF
✓ NO MEMORY LEAK DETECTED
─────────────────────────────────────────────────────────
Memory usage is stable
Growth of ${GROWTH_MB} MB is within acceptable range
EOF
log_success "No memory leak detected"
fi
# Threshold warnings
if (( $(echo "$MAX_MB > $THRESHOLD" | bc -l) )); then
cat >> "$REPORT_FILE" << EOF
⚠ THRESHOLD EXCEEDED
─────────────────────────────────────────────────────────
Peak memory (${MAX_MB} MB) exceeded threshold (${THRESHOLD} MB)
Recommendations:
1. Increase memory allocation if necessary
2. Optimize memory usage:
- Use streaming for large data
- Implement pagination
- Use efficient data structures
- Clear unused objects
3. Set appropriate container/VM memory limits
EOF
fi
# Output files
cat >> "$REPORT_FILE" << EOF
Output Files:
─────────────────────────────────────────────────────────
Memory Log: $MEMORY_LOG
Memory Chart: $CHART_FILE
This Report: $REPORT_FILE
Next Steps:
─────────────────────────────────────────────────────────
EOF
if [ "$LEAK_DETECTED" = true ]; then
cat >> "$REPORT_FILE" << EOF
1. Use /debug memory for heap profiling
2. Take heap snapshots before and after operations
3. Review code for common leak patterns
4. Monitor production with these findings
EOF
else
cat >> "$REPORT_FILE" << EOF
1. Continue monitoring in production
2. Set up alerts for memory threshold
3. Schedule periodic memory checks
EOF
fi
echo "" >> "$REPORT_FILE"
echo "═══════════════════════════════════════════════════════════" >> "$REPORT_FILE"
log_success "Report saved to: $REPORT_FILE"
# Display report
cat "$REPORT_FILE"
# Exit with appropriate code
if [ "$LEAK_DETECTED" = true ]; then
exit 1
else
exit 0
fi

View File

@@ -0,0 +1,297 @@
#!/bin/bash
# Purpose: Profile application performance (CPU, memory, I/O)
# Version: 1.0.0
# Usage: ./profile.sh --app <app-name> [options]
# Returns: 0=success, 1=error, 2=invalid params
# Dependencies: ps, top, pidstat (optional)
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Default values
APP_NAME=""
DURATION=60
INTERVAL=1
OUTPUT_DIR="./profile-output"
PROFILE_TYPE="all"
ENDPOINT=""
# Help message
show_help() {
cat << EOF
Application Profiling Utility
Usage: $0 --app <app-name> [options]
Options:
--app NAME Application/process name to profile (required)
--duration N Profile duration in seconds (default: 60)
--interval N Sampling interval in seconds (default: 1)
--type TYPE Profile type: cpu|memory|io|all (default: all)
--endpoint URL Optional: HTTP endpoint to load test during profiling
--output DIR Output directory (default: ./profile-output)
-h, --help Show this help message
Examples:
# Profile Node.js app for 2 minutes
$0 --app node --duration 120
# Profile with load test
$0 --app node --duration 60 --endpoint http://localhost:3000/api/test
# Profile only CPU
$0 --app node --duration 30 --type cpu
EOF
exit 0
}
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
--app)
APP_NAME="$2"
shift 2
;;
--duration)
DURATION="$2"
shift 2
;;
--interval)
INTERVAL="$2"
shift 2
;;
--type)
PROFILE_TYPE="$2"
shift 2
;;
--endpoint)
ENDPOINT="$2"
shift 2
;;
--output)
OUTPUT_DIR="$2"
shift 2
;;
-h|--help)
show_help
;;
*)
echo -e "${RED}Error: Unknown option $1${NC}" >&2
exit 2
;;
esac
done
# Validate required parameters
if [ -z "$APP_NAME" ]; then
echo -e "${RED}Error: --app is required${NC}" >&2
echo "Use --help for usage information"
exit 2
fi
# Functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Create output directory
mkdir -p "$OUTPUT_DIR"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
log_info "Starting profiling for: $APP_NAME"
log_info "Duration: ${DURATION}s, Interval: ${INTERVAL}s"
log_info "Output directory: $OUTPUT_DIR"
# Find process ID
PIDS=$(pgrep -f "$APP_NAME" || echo "")
if [ -z "$PIDS" ]; then
log_error "No process found matching: $APP_NAME"
exit 1
fi
PID=$(echo "$PIDS" | head -1)
log_info "Found process: PID $PID"
# Start load test if endpoint provided
LOAD_TEST_PID=""
if [ -n "$ENDPOINT" ]; then
log_info "Starting load test on: $ENDPOINT"
if command -v ab &> /dev/null; then
# Use Apache Bench
ab -n 100000 -c 10 "$ENDPOINT" > "$OUTPUT_DIR/load-test-$TIMESTAMP.log" 2>&1 &
LOAD_TEST_PID=$!
log_info "Load test started (PID: $LOAD_TEST_PID)"
else
log_warn "Apache Bench (ab) not found, skipping load test"
fi
fi
# CPU Profiling
if [ "$PROFILE_TYPE" = "cpu" ] || [ "$PROFILE_TYPE" = "all" ]; then
log_info "Profiling CPU usage..."
CPU_OUTPUT="$OUTPUT_DIR/cpu-profile-$TIMESTAMP.txt"
# Collect CPU samples
for i in $(seq 1 $DURATION); do
ps -p "$PID" -o %cpu,rss,vsz,cmd >> "$CPU_OUTPUT" 2>/dev/null || true
sleep "$INTERVAL"
done
log_success "CPU profile saved to: $CPU_OUTPUT"
# Calculate statistics
AVG_CPU=$(awk 'NR>1 {sum+=$1; count++} END {if (count>0) print sum/count; else print 0}' "$CPU_OUTPUT")
MAX_CPU=$(awk 'NR>1 {if ($1>max) max=$1} END {print max+0}' "$CPU_OUTPUT")
echo "CPU Statistics:" > "$OUTPUT_DIR/cpu-summary-$TIMESTAMP.txt"
echo " Average CPU: $AVG_CPU%" >> "$OUTPUT_DIR/cpu-summary-$TIMESTAMP.txt"
echo " Peak CPU: $MAX_CPU%" >> "$OUTPUT_DIR/cpu-summary-$TIMESTAMP.txt"
fi
# Memory Profiling
if [ "$PROFILE_TYPE" = "memory" ] || [ "$PROFILE_TYPE" = "all" ]; then
log_info "Profiling memory usage..."
MEM_OUTPUT="$OUTPUT_DIR/memory-profile-$TIMESTAMP.txt"
# Collect memory samples
for i in $(seq 1 $DURATION); do
ps -p "$PID" -o rss,vsz,%mem,cmd >> "$MEM_OUTPUT" 2>/dev/null || true
sleep "$INTERVAL"
done
log_success "Memory profile saved to: $MEM_OUTPUT"
# Calculate statistics
AVG_RSS=$(awk 'NR>1 {sum+=$1; count++} END {if (count>0) print sum/count; else print 0}' "$MEM_OUTPUT")
MAX_RSS=$(awk 'NR>1 {if ($1>max) max=$1} END {print max+0}' "$MEM_OUTPUT")
MIN_RSS=$(awk 'NR>1 {if (min=="") min=$1; if ($1<min) min=$1} END {print min+0}' "$MEM_OUTPUT")
echo "Memory Statistics:" > "$OUTPUT_DIR/memory-summary-$TIMESTAMP.txt"
echo " Average RSS: $(echo "scale=2; $AVG_RSS/1024" | bc) MB" >> "$OUTPUT_DIR/memory-summary-$TIMESTAMP.txt"
echo " Peak RSS: $(echo "scale=2; $MAX_RSS/1024" | bc) MB" >> "$OUTPUT_DIR/memory-summary-$TIMESTAMP.txt"
echo " Min RSS: $(echo "scale=2; $MIN_RSS/1024" | bc) MB" >> "$OUTPUT_DIR/memory-summary-$TIMESTAMP.txt"
echo " Memory Growth: $(echo "scale=2; ($MAX_RSS-$MIN_RSS)/1024" | bc) MB" >> "$OUTPUT_DIR/memory-summary-$TIMESTAMP.txt"
fi
# I/O Profiling
if [ "$PROFILE_TYPE" = "io" ] || [ "$PROFILE_TYPE" = "all" ]; then
log_info "Profiling I/O usage..."
IO_OUTPUT="$OUTPUT_DIR/io-profile-$TIMESTAMP.txt"
# Check if process has I/O stats available
if [ -f "/proc/$PID/io" ]; then
# Collect I/O samples
for i in $(seq 1 $DURATION); do
echo "=== Sample $i ===" >> "$IO_OUTPUT"
cat "/proc/$PID/io" >> "$IO_OUTPUT" 2>/dev/null || true
sleep "$INTERVAL"
done
log_success "I/O profile saved to: $IO_OUTPUT"
else
log_warn "I/O profiling not available for this process"
fi
fi
# Stop load test if running
if [ -n "$LOAD_TEST_PID" ]; then
log_info "Stopping load test..."
kill "$LOAD_TEST_PID" 2>/dev/null || true
wait "$LOAD_TEST_PID" 2>/dev/null || true
fi
# Generate summary report
REPORT_FILE="$OUTPUT_DIR/profile-report-$TIMESTAMP.txt"
cat > "$REPORT_FILE" << EOF
═══════════════════════════════════════════════════════════
PERFORMANCE PROFILE REPORT
═══════════════════════════════════════════════════════════
Application: $APP_NAME
PID: $PID
Duration: ${DURATION}s
Interval: ${INTERVAL}s
Timestamp: $TIMESTAMP
EOF
# Add CPU summary if available
if [ -f "$OUTPUT_DIR/cpu-summary-$TIMESTAMP.txt" ]; then
cat "$OUTPUT_DIR/cpu-summary-$TIMESTAMP.txt" >> "$REPORT_FILE"
echo "" >> "$REPORT_FILE"
fi
# Add memory summary if available
if [ -f "$OUTPUT_DIR/memory-summary-$TIMESTAMP.txt" ]; then
cat "$OUTPUT_DIR/memory-summary-$TIMESTAMP.txt" >> "$REPORT_FILE"
echo "" >> "$REPORT_FILE"
fi
# Add recommendations
cat >> "$REPORT_FILE" << EOF
Recommendations:
─────────────────────────────────────────────────────────
EOF
if [ -f "$OUTPUT_DIR/cpu-summary-$TIMESTAMP.txt" ]; then
MAX_CPU=$(awk '/Peak CPU:/ {print $3}' "$OUTPUT_DIR/cpu-summary-$TIMESTAMP.txt" | sed 's/%//')
if [ -n "$MAX_CPU" ] && (( $(echo "$MAX_CPU > 80" | bc -l) )); then
echo " ⚠ High CPU usage detected (${MAX_CPU}%)" >> "$REPORT_FILE"
echo " - Consider optimizing CPU-intensive operations" >> "$REPORT_FILE"
echo " - Profile with flame graphs for detailed analysis" >> "$REPORT_FILE"
echo "" >> "$REPORT_FILE"
fi
fi
if [ -f "$OUTPUT_DIR/memory-summary-$TIMESTAMP.txt" ]; then
GROWTH=$(awk '/Memory Growth:/ {print $3}' "$OUTPUT_DIR/memory-summary-$TIMESTAMP.txt")
if [ -n "$GROWTH" ] && (( $(echo "$GROWTH > 100" | bc -l) )); then
echo " ⚠ Significant memory growth detected (${GROWTH} MB)" >> "$REPORT_FILE"
echo " - Possible memory leak" >> "$REPORT_FILE"
echo " - Use heap profiling to identify leak sources" >> "$REPORT_FILE"
echo "" >> "$REPORT_FILE"
fi
fi
cat >> "$REPORT_FILE" << EOF
Output Files:
─────────────────────────────────────────────────────────
EOF
ls -lh "$OUTPUT_DIR"/*-$TIMESTAMP.* >> "$REPORT_FILE"
echo "" >> "$REPORT_FILE"
echo "═══════════════════════════════════════════════════════════" >> "$REPORT_FILE"
log_success "Profile complete!"
log_info "Report saved to: $REPORT_FILE"
# Display summary
cat "$REPORT_FILE"
exit 0