Initial commit
This commit is contained in:
533
commands/security-scan/.scripts/file-scanner.sh
Executable file
533
commands/security-scan/.scripts/file-scanner.sh
Executable file
@@ -0,0 +1,533 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# ============================================================================
|
||||
# File Scanner - Detect dangerous files and sensitive configurations
|
||||
# ============================================================================
|
||||
# Purpose: Identify files that should not be committed to version control
|
||||
# Version: 1.0.0
|
||||
# Usage: ./file-scanner.sh <path> <patterns> <include_hidden> <check_gitignore>
|
||||
# Returns: 0=no dangerous files, 1=dangerous files found, 2=error
|
||||
# ============================================================================
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Source shared validation library
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PLUGIN_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
||||
|
||||
if [[ -f "${PLUGIN_ROOT}/scripts/validate-lib.sh" ]]; then
|
||||
source "${PLUGIN_ROOT}/scripts/validate-lib.sh"
|
||||
fi
|
||||
|
||||
# ============================================================================
|
||||
# Configuration
|
||||
# ============================================================================
|
||||
|
||||
PATH_TO_SCAN="${1:-.}"
|
||||
PATTERNS="${2:-all}"
|
||||
INCLUDE_HIDDEN="${3:-true}"
|
||||
CHECK_GITIGNORE="${4:-true}"
|
||||
|
||||
DANGEROUS_FILES_FOUND=0
|
||||
declare -a FINDINGS=()
|
||||
|
||||
# ============================================================================
|
||||
# Dangerous File Pattern Definitions
|
||||
# ============================================================================
|
||||
|
||||
# Environment Files (CRITICAL)
|
||||
declare -a ENV_PATTERNS=(
|
||||
".env"
|
||||
".env.local"
|
||||
".env.production"
|
||||
".env.development"
|
||||
".env.staging"
|
||||
".env.test"
|
||||
"env.sh"
|
||||
"setenv.sh"
|
||||
".envrc"
|
||||
)
|
||||
|
||||
# Credential Files (CRITICAL)
|
||||
declare -a CREDENTIAL_PATTERNS=(
|
||||
"*credentials*"
|
||||
"*secrets*"
|
||||
"*password*"
|
||||
".aws/credentials"
|
||||
".azure/credentials"
|
||||
".gcp/credentials.json"
|
||||
"gcloud/credentials"
|
||||
"service-account*.json"
|
||||
)
|
||||
|
||||
# Private Keys (CRITICAL)
|
||||
declare -a KEY_PATTERNS=(
|
||||
"id_rsa"
|
||||
"id_dsa"
|
||||
"id_ed25519"
|
||||
"id_ecdsa"
|
||||
"*.pem"
|
||||
"*.key"
|
||||
"*.p12"
|
||||
"*.pfx"
|
||||
"*.jks"
|
||||
"*.keystore"
|
||||
".gnupg/*"
|
||||
".ssh/id_*"
|
||||
)
|
||||
|
||||
# Database Files (HIGH)
|
||||
declare -a DATABASE_PATTERNS=(
|
||||
"*.db"
|
||||
"*.sqlite"
|
||||
"*.sqlite3"
|
||||
"dump.sql"
|
||||
"*backup*.sql"
|
||||
"*.mdb"
|
||||
"*.accdb"
|
||||
)
|
||||
|
||||
# Configuration Files (MEDIUM)
|
||||
declare -a CONFIG_PATTERNS=(
|
||||
"config/database.yml"
|
||||
"appsettings.json"
|
||||
"wp-config.php"
|
||||
"settings.py"
|
||||
".htpasswd"
|
||||
)
|
||||
|
||||
# Backup Files (MEDIUM)
|
||||
declare -a BACKUP_PATTERNS=(
|
||||
"*.bak"
|
||||
"*.backup"
|
||||
"*.old"
|
||||
"*.orig"
|
||||
"*.copy"
|
||||
"*~"
|
||||
"*.swp"
|
||||
"*.swo"
|
||||
)
|
||||
|
||||
# Log Files (LOW)
|
||||
declare -a LOG_PATTERNS=(
|
||||
"*.log"
|
||||
"debug.log"
|
||||
"error.log"
|
||||
)
|
||||
|
||||
# ============================================================================
|
||||
# Severity Classification
|
||||
# ============================================================================
|
||||
|
||||
get_file_severity() {
|
||||
local filename="$1"
|
||||
|
||||
# CRITICAL: Environment, credentials, keys
|
||||
for pattern in "${ENV_PATTERNS[@]}" "${CREDENTIAL_PATTERNS[@]}" "${KEY_PATTERNS[@]}"; do
|
||||
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
|
||||
echo "critical"
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
# HIGH: Databases
|
||||
for pattern in "${DATABASE_PATTERNS[@]}"; do
|
||||
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
|
||||
echo "high"
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
# MEDIUM: Config, backups
|
||||
for pattern in "${CONFIG_PATTERNS[@]}" "${BACKUP_PATTERNS[@]}"; do
|
||||
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
|
||||
echo "medium"
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
# LOW: Logs
|
||||
for pattern in "${LOG_PATTERNS[@]}"; do
|
||||
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
|
||||
echo "low"
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
echo "unknown"
|
||||
}
|
||||
|
||||
get_file_type() {
|
||||
local filename="$1"
|
||||
|
||||
for pattern in "${ENV_PATTERNS[@]}"; do
|
||||
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
|
||||
echo "Environment File"
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
for pattern in "${CREDENTIAL_PATTERNS[@]}"; do
|
||||
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
|
||||
echo "Credential File"
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
for pattern in "${KEY_PATTERNS[@]}"; do
|
||||
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
|
||||
echo "Private Key"
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
for pattern in "${DATABASE_PATTERNS[@]}"; do
|
||||
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
|
||||
echo "Database File"
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
for pattern in "${CONFIG_PATTERNS[@]}"; do
|
||||
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
|
||||
echo "Configuration File"
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
for pattern in "${BACKUP_PATTERNS[@]}"; do
|
||||
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
|
||||
echo "Backup File"
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
for pattern in "${LOG_PATTERNS[@]}"; do
|
||||
if [[ "${filename}" == ${pattern} ]] || [[ "${filename}" =~ ${pattern//\*/.*} ]]; then
|
||||
echo "Log File"
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Unknown"
|
||||
}
|
||||
|
||||
get_risk_description() {
|
||||
local file_type="$1"
|
||||
|
||||
case "${file_type}" in
|
||||
"Environment File")
|
||||
echo "Contains secrets, API keys, and configuration"
|
||||
;;
|
||||
"Credential File")
|
||||
echo "Direct access credentials"
|
||||
;;
|
||||
"Private Key")
|
||||
echo "Authentication keys"
|
||||
;;
|
||||
"Database File")
|
||||
echo "May contain sensitive user data"
|
||||
;;
|
||||
"Configuration File")
|
||||
echo "May contain hardcoded secrets"
|
||||
;;
|
||||
"Backup File")
|
||||
echo "May contain previous versions with secrets"
|
||||
;;
|
||||
"Log File")
|
||||
echo "May contain leaked sensitive information"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown risk"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
get_remediation() {
|
||||
local file_type="$1"
|
||||
local in_gitignore="$2"
|
||||
|
||||
if [[ "${in_gitignore}" == "false" ]]; then
|
||||
echo "Add to .gitignore, remove from git history, rotate credentials"
|
||||
else
|
||||
echo "Verify .gitignore is working, review if file should exist"
|
||||
fi
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# .gitignore Checking
|
||||
# ============================================================================
|
||||
|
||||
is_in_gitignore() {
|
||||
local file="$1"
|
||||
local gitignore="${PATH_TO_SCAN}/.gitignore"
|
||||
|
||||
if [[ ! -f "${gitignore}" ]]; then
|
||||
echo "false"
|
||||
return
|
||||
fi
|
||||
|
||||
# Simple check - does not handle all gitignore patterns perfectly
|
||||
local basename
|
||||
basename=$(basename "${file}")
|
||||
local dirname
|
||||
dirname=$(dirname "${file}")
|
||||
|
||||
if grep -qF "${basename}" "${gitignore}" 2>/dev/null; then
|
||||
echo "true"
|
||||
return
|
||||
fi
|
||||
|
||||
if grep -qF "${file}" "${gitignore}" 2>/dev/null; then
|
||||
echo "true"
|
||||
return
|
||||
fi
|
||||
|
||||
# Check pattern matches
|
||||
while IFS= read -r pattern; do
|
||||
# Skip comments and empty lines
|
||||
[[ "${pattern}" =~ ^#.*$ || -z "${pattern}" ]] && continue
|
||||
|
||||
# Simple pattern matching (not complete gitignore spec)
|
||||
if [[ "${basename}" == ${pattern} ]]; then
|
||||
echo "true"
|
||||
return
|
||||
fi
|
||||
done < "${gitignore}"
|
||||
|
||||
echo "false"
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# File Scanning
|
||||
# ============================================================================
|
||||
|
||||
should_check_pattern() {
|
||||
local filename="$1"
|
||||
|
||||
if [[ "${PATTERNS}" == "all" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
case "${PATTERNS}" in
|
||||
*env*)
|
||||
for pattern in "${ENV_PATTERNS[@]}"; do
|
||||
[[ "${filename}" == ${pattern} ]] && return 0
|
||||
done
|
||||
;;
|
||||
*credentials*)
|
||||
for pattern in "${CREDENTIAL_PATTERNS[@]}"; do
|
||||
[[ "${filename}" == ${pattern} ]] && return 0
|
||||
done
|
||||
;;
|
||||
*keys*)
|
||||
for pattern in "${KEY_PATTERNS[@]}"; do
|
||||
[[ "${filename}" == ${pattern} ]] && return 0
|
||||
done
|
||||
;;
|
||||
esac
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
scan_file() {
|
||||
local filepath="$1"
|
||||
local filename
|
||||
filename=$(basename "${filepath}")
|
||||
|
||||
# Check if hidden file (skip if not including hidden)
|
||||
if [[ "${filename}" =~ ^\. && "${INCLUDE_HIDDEN}" != "true" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# Skip certain directories
|
||||
if [[ "${filepath}" =~ (\.git|node_modules|vendor|dist|build)/ ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# Check if file matches dangerous patterns
|
||||
local severity
|
||||
severity=$(get_file_severity "${filename}")
|
||||
|
||||
if [[ "${severity}" == "unknown" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
if ! should_check_pattern "${filename}"; then
|
||||
return
|
||||
fi
|
||||
|
||||
# Get file details
|
||||
local file_type
|
||||
file_type=$(get_file_type "${filename}")
|
||||
local size
|
||||
size=$(stat -f%z "${filepath}" 2>/dev/null || stat -c%s "${filepath}" 2>/dev/null || echo "0")
|
||||
local in_gitignore="false"
|
||||
|
||||
if [[ "${CHECK_GITIGNORE}" == "true" ]]; then
|
||||
in_gitignore=$(is_in_gitignore "${filepath}")
|
||||
fi
|
||||
|
||||
local risk
|
||||
risk=$(get_risk_description "${file_type}")
|
||||
local remediation
|
||||
remediation=$(get_remediation "${file_type}" "${in_gitignore}")
|
||||
|
||||
FINDINGS+=("${severity}|${filepath}|${file_type}|${size}|${in_gitignore}|${risk}|${remediation}")
|
||||
((DANGEROUS_FILES_FOUND++))
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Main Execution
|
||||
# ============================================================================
|
||||
|
||||
main() {
|
||||
# Validate path
|
||||
if [[ ! -d "${PATH_TO_SCAN}" ]]; then
|
||||
echo "ERROR: Path is not a directory: ${PATH_TO_SCAN}" >&2
|
||||
exit 2
|
||||
fi
|
||||
|
||||
echo "Dangerous Files Scan Results"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo "Path: ${PATH_TO_SCAN}"
|
||||
echo "Include Hidden: ${INCLUDE_HIDDEN}"
|
||||
echo "Check .gitignore: ${CHECK_GITIGNORE}"
|
||||
echo ""
|
||||
|
||||
# Scan files
|
||||
local files_scanned=0
|
||||
|
||||
if [[ "${INCLUDE_HIDDEN}" == "true" ]]; then
|
||||
while IFS= read -r -d '' file; do
|
||||
scan_file "${file}"
|
||||
((files_scanned++))
|
||||
done < <(find "${PATH_TO_SCAN}" -type f -print0 2>/dev/null)
|
||||
else
|
||||
while IFS= read -r -d '' file; do
|
||||
scan_file "${file}"
|
||||
((files_scanned++))
|
||||
done < <(find "${PATH_TO_SCAN}" -type f -not -path '*/.*' -print0 2>/dev/null)
|
||||
fi
|
||||
|
||||
echo "Files Scanned: ${files_scanned}"
|
||||
echo ""
|
||||
|
||||
# Report findings
|
||||
if [[ ${DANGEROUS_FILES_FOUND} -eq 0 ]]; then
|
||||
echo "✅ SUCCESS: No dangerous files detected"
|
||||
echo "All files safe"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "⚠️ DANGEROUS FILES DETECTED: ${DANGEROUS_FILES_FOUND}"
|
||||
echo ""
|
||||
|
||||
# Check .gitignore status
|
||||
if [[ "${CHECK_GITIGNORE}" == "true" && ! -f "${PATH_TO_SCAN}/.gitignore" ]]; then
|
||||
echo "⚠️ WARNING: No .gitignore file found"
|
||||
echo " Recommendation: Create .gitignore to prevent committing sensitive files"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Group by severity
|
||||
local critical_count=0
|
||||
local high_count=0
|
||||
local medium_count=0
|
||||
local low_count=0
|
||||
local not_in_gitignore=0
|
||||
|
||||
for finding in "${FINDINGS[@]}"; do
|
||||
IFS='|' read -r severity filepath file_type size in_gitignore risk remediation <<< "${finding}"
|
||||
case "${severity}" in
|
||||
critical) ((critical_count++)) ;;
|
||||
high) ((high_count++)) ;;
|
||||
medium) ((medium_count++)) ;;
|
||||
low) ((low_count++)) ;;
|
||||
esac
|
||||
[[ "${in_gitignore}" == "false" ]] && ((not_in_gitignore++))
|
||||
done
|
||||
|
||||
# Print findings by severity
|
||||
if [[ ${critical_count} -gt 0 ]]; then
|
||||
echo "CRITICAL Files (${critical_count}):"
|
||||
for finding in "${FINDINGS[@]}"; do
|
||||
IFS='|' read -r severity filepath file_type size in_gitignore risk remediation <<< "${finding}"
|
||||
if [[ "${severity}" == "critical" ]]; then
|
||||
# Convert size to human readable
|
||||
local size_human
|
||||
if [[ ${size} -ge 1048576 ]]; then
|
||||
size_human="$(( size / 1048576 )) MB"
|
||||
elif [[ ${size} -ge 1024 ]]; then
|
||||
size_human="$(( size / 1024 )) KB"
|
||||
else
|
||||
size_human="${size} bytes"
|
||||
fi
|
||||
|
||||
echo " ❌ ${filepath} (${size_human})"
|
||||
echo " Type: ${file_type}"
|
||||
echo " Risk: ${risk}"
|
||||
if [[ "${CHECK_GITIGNORE}" == "true" ]]; then
|
||||
if [[ "${in_gitignore}" == "true" ]]; then
|
||||
echo " Status: In .gitignore ✓"
|
||||
else
|
||||
echo " Status: NOT in .gitignore ⚠️"
|
||||
fi
|
||||
fi
|
||||
echo " Remediation: ${remediation}"
|
||||
echo ""
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ ${high_count} -gt 0 ]]; then
|
||||
echo "HIGH Files (${high_count}):"
|
||||
for finding in "${FINDINGS[@]}"; do
|
||||
IFS='|' read -r severity filepath file_type size in_gitignore risk remediation <<< "${finding}"
|
||||
if [[ "${severity}" == "high" ]]; then
|
||||
local size_human
|
||||
if [[ ${size} -ge 1048576 ]]; then
|
||||
size_human="$(( size / 1048576 )) MB"
|
||||
elif [[ ${size} -ge 1024 ]]; then
|
||||
size_human="$(( size / 1024 )) KB"
|
||||
else
|
||||
size_human="${size} bytes"
|
||||
fi
|
||||
|
||||
echo " ⚠️ ${filepath} (${size_human})"
|
||||
echo " Type: ${file_type}"
|
||||
if [[ "${CHECK_GITIGNORE}" == "true" ]]; then
|
||||
echo " Status: $([ "${in_gitignore}" == "true" ] && echo "In .gitignore ✓" || echo "NOT in .gitignore ⚠️")"
|
||||
fi
|
||||
echo ""
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ ${medium_count} -gt 0 ]]; then
|
||||
echo "MEDIUM Files (${medium_count}):"
|
||||
for finding in "${FINDINGS[@]}"; do
|
||||
IFS='|' read -r severity filepath file_type size in_gitignore risk remediation <<< "${finding}"
|
||||
if [[ "${severity}" == "medium" ]]; then
|
||||
echo " 💡 ${filepath}"
|
||||
echo " Type: ${file_type}"
|
||||
echo ""
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
echo "Summary:"
|
||||
echo " Critical: ${critical_count}"
|
||||
echo " High: ${high_count}"
|
||||
echo " Medium: ${medium_count}"
|
||||
echo " Low: ${low_count}"
|
||||
if [[ "${CHECK_GITIGNORE}" == "true" ]]; then
|
||||
echo " Not in .gitignore: ${not_in_gitignore}"
|
||||
fi
|
||||
echo ""
|
||||
echo "Action Required: $([ ${critical_count} -gt 0 ] || [ ${not_in_gitignore} -gt 0 ] && echo "YES" || echo "REVIEW")"
|
||||
|
||||
exit 1
|
||||
}
|
||||
|
||||
main "$@"
|
||||
407
commands/security-scan/.scripts/permission-checker.sh
Executable file
407
commands/security-scan/.scripts/permission-checker.sh
Executable file
@@ -0,0 +1,407 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# ============================================================================
|
||||
# Permission Checker - Audit file permissions for security issues
|
||||
# ============================================================================
|
||||
# Purpose: Detect world-writable files, overly permissive scripts, and permission issues
|
||||
# Version: 1.0.0
|
||||
# Usage: ./permission-checker.sh <path> <strict> <check_executables> <report_all>
|
||||
# Returns: 0=all permissions correct, 1=issues found, 2=error
|
||||
# ============================================================================
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Source shared validation library
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PLUGIN_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
||||
|
||||
if [[ -f "${PLUGIN_ROOT}/scripts/validate-lib.sh" ]]; then
|
||||
source "${PLUGIN_ROOT}/scripts/validate-lib.sh"
|
||||
fi
|
||||
|
||||
# ============================================================================
|
||||
# Configuration
|
||||
# ============================================================================
|
||||
|
||||
PATH_TO_SCAN="${1:-.}"
|
||||
STRICT="${2:-false}"
|
||||
CHECK_EXECUTABLES="${3:-true}"
|
||||
REPORT_ALL="${4:-false}"
|
||||
|
||||
ISSUES_FOUND=0
|
||||
declare -a FINDINGS=()
|
||||
|
||||
# ============================================================================
|
||||
# Permission Classification
|
||||
# ============================================================================
|
||||
|
||||
get_permission_octal() {
|
||||
local file="$1"
|
||||
stat -f "%Op" "${file}" 2>/dev/null | sed 's/.*\([0-7][0-7][0-7][0-7]\)$/\1/' || \
|
||||
stat -c "%a" "${file}" 2>/dev/null || echo "0644"
|
||||
}
|
||||
|
||||
get_permission_symbolic() {
|
||||
local file="$1"
|
||||
ls -ld "${file}" 2>/dev/null | awk '{print $1}' | tail -c 10
|
||||
}
|
||||
|
||||
is_world_writable() {
|
||||
local perms="$1"
|
||||
[[ "${perms: -1}" =~ [2367] ]]
|
||||
}
|
||||
|
||||
is_world_readable() {
|
||||
local perms="$1"
|
||||
[[ "${perms: -1}" =~ [4567] ]]
|
||||
}
|
||||
|
||||
is_executable() {
|
||||
local perms="$1"
|
||||
[[ "${perms}" =~ [1357] ]]
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Severity Classification
|
||||
# ============================================================================
|
||||
|
||||
get_issue_severity() {
|
||||
local issue_type="$1"
|
||||
local perms="$2"
|
||||
|
||||
case "${issue_type}" in
|
||||
world_writable_executable)
|
||||
echo "critical"
|
||||
;;
|
||||
world_writable)
|
||||
echo "critical"
|
||||
;;
|
||||
missing_shebang)
|
||||
echo "high"
|
||||
;;
|
||||
overly_permissive_sensitive)
|
||||
echo "high"
|
||||
;;
|
||||
wrong_directory_perms)
|
||||
echo "medium"
|
||||
;;
|
||||
non_executable_script)
|
||||
echo "medium"
|
||||
;;
|
||||
inconsistent_perms)
|
||||
echo "low"
|
||||
;;
|
||||
*)
|
||||
echo "low"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Shebang Validation
|
||||
# ============================================================================
|
||||
|
||||
has_shebang() {
|
||||
local file="$1"
|
||||
|
||||
if [[ ! -f "${file}" ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
local first_line
|
||||
first_line=$(head -n 1 "${file}" 2>/dev/null || echo "")
|
||||
|
||||
[[ "${first_line}" =~ ^#! ]]
|
||||
}
|
||||
|
||||
get_expected_shebang() {
|
||||
local file="$1"
|
||||
local basename
|
||||
basename=$(basename "${file}")
|
||||
|
||||
case "${basename}" in
|
||||
*.sh|*.bash)
|
||||
echo "#!/usr/bin/env bash"
|
||||
;;
|
||||
*.py)
|
||||
echo "#!/usr/bin/env python3"
|
||||
;;
|
||||
*.js)
|
||||
echo "#!/usr/bin/env node"
|
||||
;;
|
||||
*.rb)
|
||||
echo "#!/usr/bin/env ruby"
|
||||
;;
|
||||
*)
|
||||
echo ""
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Expected Permissions
|
||||
# ============================================================================
|
||||
|
||||
get_expected_permissions() {
|
||||
local file="$1"
|
||||
local basename
|
||||
basename=$(basename "${file}")
|
||||
local is_exec
|
||||
|
||||
# Check if currently executable
|
||||
if [[ -x "${file}" ]]; then
|
||||
is_exec="true"
|
||||
else
|
||||
is_exec="false"
|
||||
fi
|
||||
|
||||
# Sensitive files
|
||||
if [[ "${basename}" =~ ^\.env || "${basename}" =~ credentials || "${basename}" =~ secrets ]]; then
|
||||
echo "600"
|
||||
return
|
||||
fi
|
||||
|
||||
# SSH/GPG files
|
||||
if [[ "${file}" =~ \.ssh/id_ || "${file}" =~ \.gnupg/ ]]; then
|
||||
if [[ "${basename}" =~ \.pub$ ]]; then
|
||||
echo "644"
|
||||
else
|
||||
echo "600"
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
# Scripts
|
||||
if [[ "${basename}" =~ \.(sh|bash|py|js|rb)$ ]]; then
|
||||
if [[ "${is_exec}" == "true" ]] || has_shebang "${file}"; then
|
||||
echo "755"
|
||||
else
|
||||
echo "644"
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
# Directories
|
||||
if [[ -d "${file}" ]]; then
|
||||
if [[ "${basename}" =~ ^\.ssh$ || "${basename}" =~ ^\.gnupg$ ]]; then
|
||||
echo "700"
|
||||
else
|
||||
echo "755"
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
# Default
|
||||
echo "644"
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Permission Checking
|
||||
# ============================================================================
|
||||
|
||||
check_file_permissions() {
|
||||
local file="$1"
|
||||
local perms
|
||||
perms=$(get_permission_octal "${file}")
|
||||
local symbolic
|
||||
symbolic=$(get_permission_symbolic "${file}")
|
||||
local expected
|
||||
expected=$(get_expected_permissions "${file}")
|
||||
local basename
|
||||
basename=$(basename "${file}")
|
||||
|
||||
# Skip certain directories
|
||||
if [[ "${file}" =~ (\.git|node_modules|vendor|dist|build)/ ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# CRITICAL: Check for 777 (world-writable and executable)
|
||||
if [[ "${perms}" == "0777" || "${perms}" == "777" ]]; then
|
||||
local issue_type="world_writable_executable"
|
||||
local severity
|
||||
severity=$(get_issue_severity "${issue_type}" "${perms}")
|
||||
FINDINGS+=("${severity}|${file}|${perms}|${symbolic}|${expected}|World-writable and executable|Anyone can modify and execute|chmod ${expected} \"${file}\"")
|
||||
((ISSUES_FOUND++))
|
||||
return
|
||||
fi
|
||||
|
||||
# CRITICAL: Check for 666 (world-writable)
|
||||
if [[ "${perms}" == "0666" || "${perms}" == "666" ]]; then
|
||||
local issue_type="world_writable"
|
||||
local severity
|
||||
severity=$(get_issue_severity "${issue_type}" "${perms}")
|
||||
FINDINGS+=("${severity}|${file}|${perms}|${symbolic}|${expected}|World-writable file|Anyone can modify content|chmod ${expected} \"${file}\"")
|
||||
((ISSUES_FOUND++))
|
||||
return
|
||||
fi
|
||||
|
||||
# Check if executable but missing shebang
|
||||
if [[ -f "${file}" && -x "${file}" && "${CHECK_EXECUTABLES}" == "true" ]]; then
|
||||
if [[ "${basename}" =~ \.(sh|bash|py|js|rb)$ ]]; then
|
||||
if ! has_shebang "${file}"; then
|
||||
local expected_shebang
|
||||
expected_shebang=$(get_expected_shebang "${file}")
|
||||
FINDINGS+=("high|${file}|${perms}|${symbolic}|${perms}|Executable without shebang|May not execute correctly|Add ${expected_shebang} to first line")
|
||||
((ISSUES_FOUND++))
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check sensitive files
|
||||
if [[ "${basename}" =~ ^\.env || "${basename}" =~ credentials || "${basename}" =~ secrets ]]; then
|
||||
if is_world_readable "${perms}"; then
|
||||
FINDINGS+=("high|${file}|${perms}|${symbolic}|600|Sensitive file world-readable|Secrets visible to all users|chmod 600 \"${file}\"")
|
||||
((ISSUES_FOUND++))
|
||||
return
|
||||
fi
|
||||
if [[ "${perms}" != "0600" && "${perms}" != "600" && "${STRICT}" == "true" ]]; then
|
||||
FINDINGS+=("medium|${file}|${perms}|${symbolic}|600|Sensitive file should be 600|Reduce permissions|chmod 600 \"${file}\"")
|
||||
((ISSUES_FOUND++))
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
# Strict mode: Check for any discrepancies
|
||||
if [[ "${STRICT}" == "true" ]]; then
|
||||
if [[ "${perms}" != "0${expected}" && "${perms}" != "${expected}" ]]; then
|
||||
# Check if it's a minor discrepancy
|
||||
if [[ "${perms}" =~ ^0?775$ && "${expected}" == "755" ]]; then
|
||||
FINDINGS+=("medium|${file}|${perms}|${symbolic}|${expected}|Group-writable (strict mode)|Remove group write|chmod ${expected} \"${file}\"")
|
||||
((ISSUES_FOUND++))
|
||||
elif [[ "${perms}" =~ ^0?755$ && "${expected}" == "644" ]]; then
|
||||
FINDINGS+=("low|${file}|${perms}|${symbolic}|${expected}|Executable but should not be|Remove executable bit|chmod ${expected} \"${file}\"")
|
||||
((ISSUES_FOUND++))
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Report all mode
|
||||
if [[ "${REPORT_ALL}" == "true" ]]; then
|
||||
if [[ "${perms}" == "0${expected}" || "${perms}" == "${expected}" ]]; then
|
||||
FINDINGS+=("info|${file}|${perms}|${symbolic}|${expected}|Permissions correct|N/A|N/A")
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Main Execution
|
||||
# ============================================================================
|
||||
|
||||
main() {
|
||||
# Validate path
|
||||
if [[ ! -e "${PATH_TO_SCAN}" ]]; then
|
||||
echo "ERROR: Path does not exist: ${PATH_TO_SCAN}" >&2
|
||||
exit 2
|
||||
fi
|
||||
|
||||
echo "File Permission Audit Results"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo "Path: ${PATH_TO_SCAN}"
|
||||
echo "Strict Mode: ${STRICT}"
|
||||
echo "Check Executables: ${CHECK_EXECUTABLES}"
|
||||
echo ""
|
||||
|
||||
# Scan files
|
||||
local files_checked=0
|
||||
|
||||
if [[ -f "${PATH_TO_SCAN}" ]]; then
|
||||
check_file_permissions "${PATH_TO_SCAN}"
|
||||
((files_checked++))
|
||||
elif [[ -d "${PATH_TO_SCAN}" ]]; then
|
||||
while IFS= read -r -d '' file; do
|
||||
check_file_permissions "${file}"
|
||||
((files_checked++))
|
||||
done < <(find "${PATH_TO_SCAN}" -print0 2>/dev/null)
|
||||
fi
|
||||
|
||||
echo "Files Checked: ${files_checked}"
|
||||
echo ""
|
||||
|
||||
# Report findings
|
||||
if [[ ${ISSUES_FOUND} -eq 0 ]]; then
|
||||
echo "✅ SUCCESS: All file permissions correct"
|
||||
echo "No permission issues detected"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "⚠️ PERMISSION ISSUES DETECTED: ${ISSUES_FOUND}"
|
||||
echo ""
|
||||
|
||||
# Group by severity
|
||||
local critical_count=0
|
||||
local high_count=0
|
||||
local medium_count=0
|
||||
local low_count=0
|
||||
|
||||
for finding in "${FINDINGS[@]}"; do
|
||||
IFS='|' read -r severity file perms symbolic expected issue risk fix <<< "${finding}"
|
||||
case "${severity}" in
|
||||
critical) ((critical_count++)) ;;
|
||||
high) ((high_count++)) ;;
|
||||
medium) ((medium_count++)) ;;
|
||||
low) ((low_count++)) ;;
|
||||
info) ;; # Don't count info
|
||||
esac
|
||||
done
|
||||
|
||||
# Print findings by severity
|
||||
if [[ ${critical_count} -gt 0 ]]; then
|
||||
echo "CRITICAL Issues (${critical_count}):"
|
||||
for finding in "${FINDINGS[@]}"; do
|
||||
IFS='|' read -r severity file perms symbolic expected issue risk fix <<< "${finding}"
|
||||
if [[ "${severity}" == "critical" ]]; then
|
||||
echo " ❌ ${file} (${perms})"
|
||||
echo " Current: ${symbolic} (${perms})"
|
||||
echo " Issue: ${issue}"
|
||||
echo " Risk: ${risk}"
|
||||
echo " Fix: ${fix}"
|
||||
echo ""
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ ${high_count} -gt 0 ]]; then
|
||||
echo "HIGH Issues (${high_count}):"
|
||||
for finding in "${FINDINGS[@]}"; do
|
||||
IFS='|' read -r severity file perms symbolic expected issue risk fix <<< "${finding}"
|
||||
if [[ "${severity}" == "high" ]]; then
|
||||
echo " ⚠️ ${file} (${perms})"
|
||||
echo " Issue: ${issue}"
|
||||
echo " Fix: ${fix}"
|
||||
echo ""
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ ${medium_count} -gt 0 ]]; then
|
||||
echo "MEDIUM Issues (${medium_count}):"
|
||||
for finding in "${FINDINGS[@]}"; do
|
||||
IFS='|' read -r severity file perms symbolic expected issue risk fix <<< "${finding}"
|
||||
if [[ "${severity}" == "medium" ]]; then
|
||||
echo " 💡 ${file} (${perms})"
|
||||
echo " Recommendation: ${issue}"
|
||||
echo " Fix: ${fix}"
|
||||
echo ""
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
echo "Summary:"
|
||||
echo " Critical: ${critical_count}"
|
||||
echo " High: ${high_count}"
|
||||
echo " Medium: ${medium_count}"
|
||||
echo " Low: ${low_count}"
|
||||
echo ""
|
||||
|
||||
if [[ ${critical_count} -gt 0 ]]; then
|
||||
echo "Action Required: FIX IMMEDIATELY"
|
||||
elif [[ ${high_count} -gt 0 ]]; then
|
||||
echo "Action Required: YES"
|
||||
else
|
||||
echo "Action Required: REVIEW"
|
||||
fi
|
||||
|
||||
exit 1
|
||||
}
|
||||
|
||||
main "$@"
|
||||
416
commands/security-scan/.scripts/secret-scanner.sh
Executable file
416
commands/security-scan/.scripts/secret-scanner.sh
Executable file
@@ -0,0 +1,416 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# ============================================================================
|
||||
# Secret Scanner - Detect exposed secrets with 50+ patterns
|
||||
# ============================================================================
|
||||
# Purpose: Comprehensive secret detection for API keys, tokens, credentials
|
||||
# Version: 1.0.0
|
||||
# Usage: ./secret-scanner.sh <path> <recursive> <patterns> <exclude> <severity>
|
||||
# Returns: 0=no secrets, 1=secrets found, 2=error
|
||||
# ============================================================================
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Source shared validation library
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PLUGIN_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
||||
|
||||
if [[ -f "${PLUGIN_ROOT}/scripts/validate-lib.sh" ]]; then
|
||||
source "${PLUGIN_ROOT}/scripts/validate-lib.sh"
|
||||
fi
|
||||
|
||||
# ============================================================================
|
||||
# Configuration
|
||||
# ============================================================================
|
||||
|
||||
# Default values
|
||||
PATH_TO_SCAN="${1:-.}"
|
||||
RECURSIVE="${2:-true}"
|
||||
PATTERNS="${3:-all}"
|
||||
EXCLUDE="${4:-}"
|
||||
MIN_SEVERITY="${5:-medium}"
|
||||
|
||||
SECRETS_FOUND=0
|
||||
declare -a FINDINGS=()
|
||||
|
||||
# ============================================================================
|
||||
# Secret Pattern Definitions (50+ patterns)
|
||||
# ============================================================================
|
||||
|
||||
# API Keys & Service Tokens
|
||||
declare -A API_KEY_PATTERNS=(
|
||||
# Stripe
|
||||
["stripe_live_key"]='sk_live_[a-zA-Z0-9]{24,}'
|
||||
["stripe_test_key"]='sk_test_[a-zA-Z0-9]{24,}'
|
||||
["stripe_publishable_live"]='pk_live_[a-zA-Z0-9]{24,}'
|
||||
["stripe_publishable_test"]='pk_test_[a-zA-Z0-9]{24,}'
|
||||
|
||||
# OpenAI
|
||||
["openai_api_key"]='sk-[a-zA-Z0-9]{32,}'
|
||||
|
||||
# AWS
|
||||
["aws_access_key_id"]='AKIA[0-9A-Z]{16}'
|
||||
["aws_secret_access_key"]='aws_secret_access_key.*[=:].*[A-Za-z0-9/+=]{40}'
|
||||
|
||||
# Google
|
||||
["google_api_key"]='AIza[0-9A-Za-z_-]{35}'
|
||||
["google_oauth_id"]='[0-9]+-[0-9A-Za-z_-]{32}\.apps\.googleusercontent\.com'
|
||||
|
||||
# GitHub
|
||||
["github_personal_token"]='ghp_[a-zA-Z0-9]{36}'
|
||||
["github_oauth_token"]='gho_[a-zA-Z0-9]{36}'
|
||||
["github_app_token"]='ghs_[a-zA-Z0-9]{36}'
|
||||
["github_user_token"]='ghu_[a-zA-Z0-9]{36}'
|
||||
["github_refresh_token"]='ghr_[a-zA-Z0-9]{36}'
|
||||
|
||||
# Slack
|
||||
["slack_token"]='xox[baprs]-[0-9a-zA-Z]{10,}'
|
||||
["slack_webhook"]='https://hooks\.slack\.com/services/T[0-9A-Z]{8}/B[0-9A-Z]{8}/[0-9A-Za-z]{24}'
|
||||
|
||||
# Twitter
|
||||
["twitter_access_token"]='[0-9]{15,}-[0-9a-zA-Z]{35,44}'
|
||||
["twitter_api_key"]='[A-Za-z0-9]{25}'
|
||||
["twitter_api_secret"]='[A-Za-z0-9]{50}'
|
||||
|
||||
# Facebook
|
||||
["facebook_access_token"]='EAA[0-9A-Za-z]{90,}'
|
||||
|
||||
# SendGrid
|
||||
["sendgrid_api_key"]='SG\.[a-zA-Z0-9_-]{22}\.[a-zA-Z0-9_-]{43}'
|
||||
|
||||
# Mailgun
|
||||
["mailgun_api_key"]='key-[0-9a-zA-Z]{32}'
|
||||
|
||||
# Twilio
|
||||
["twilio_account_sid"]='AC[a-f0-9]{32}'
|
||||
["twilio_api_key"]='SK[a-f0-9]{32}'
|
||||
|
||||
# Azure
|
||||
["azure_storage_key"]='[a-zA-Z0-9/+=]{88}'
|
||||
["azure_connection_string"]='AccountKey=[a-zA-Z0-9/+=]{88}'
|
||||
|
||||
# Generic patterns
|
||||
["generic_api_key"]='api[_-]?key.*[=:].*['\''"][a-zA-Z0-9]{20,}['\''"]'
|
||||
["generic_secret"]='secret.*[=:].*['\''"][a-zA-Z0-9]{20,}['\''"]'
|
||||
["generic_token"]='token.*[=:].*['\''"][a-zA-Z0-9]{20,}['\''"]'
|
||||
["generic_password"]='password.*[=:].*['\''"][^'\''\"]{8,}['\''"]'
|
||||
["bearer_token"]='Bearer [a-zA-Z0-9_-]{20,}'
|
||||
["authorization_header"]='Authorization.*Basic [a-zA-Z0-9+/=]{20,}'
|
||||
)
|
||||
|
||||
# Private Keys
|
||||
declare -A PRIVATE_KEY_PATTERNS=(
|
||||
["rsa_private_key"]='-----BEGIN RSA PRIVATE KEY-----'
|
||||
["openssh_private_key"]='-----BEGIN OPENSSH PRIVATE KEY-----'
|
||||
["private_key_generic"]='-----BEGIN PRIVATE KEY-----'
|
||||
["pgp_private_key"]='-----BEGIN PGP PRIVATE KEY BLOCK-----'
|
||||
["dsa_private_key"]='-----BEGIN DSA PRIVATE KEY-----'
|
||||
["ec_private_key"]='-----BEGIN EC PRIVATE KEY-----'
|
||||
["encrypted_private_key"]='-----BEGIN ENCRYPTED PRIVATE KEY-----'
|
||||
)
|
||||
|
||||
# Cloud Provider Credentials
|
||||
declare -A CLOUD_PATTERNS=(
|
||||
["aws_credentials_block"]='aws_access_key_id|aws_secret_access_key'
|
||||
["gcp_service_account"]='type.*service_account'
|
||||
["azure_client_secret"]='client_secret.*[=:].*[a-zA-Z0-9~._-]{34,}'
|
||||
["heroku_api_key"]='[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'
|
||||
)
|
||||
|
||||
# Database Connection Strings
|
||||
declare -A DATABASE_PATTERNS=(
|
||||
["mongodb_connection"]='mongodb(\+srv)?://[^:]+:[^@]+@'
|
||||
["postgres_connection"]='postgres(ql)?://[^:]+:[^@]+@'
|
||||
["mysql_connection"]='mysql://[^:]+:[^@]+@'
|
||||
["redis_connection"]='redis://[^:]+:[^@]+@'
|
||||
)
|
||||
|
||||
# ============================================================================
|
||||
# Severity Classification
|
||||
# ============================================================================
|
||||
|
||||
get_pattern_severity() {
|
||||
local pattern_name="$1"
|
||||
|
||||
case "${pattern_name}" in
|
||||
# CRITICAL: Private keys, production credentials
|
||||
*_private_key*|aws_access_key_id|aws_secret_access_key|*_connection)
|
||||
echo "critical"
|
||||
;;
|
||||
# HIGH: Service API keys, OAuth tokens
|
||||
stripe_live_key|openai_api_key|github_*_token|slack_token|*_access_token)
|
||||
echo "high"
|
||||
;;
|
||||
# MEDIUM: Passwords, secrets, test keys
|
||||
*_password|*_secret|stripe_test_key|generic_*)
|
||||
echo "medium"
|
||||
;;
|
||||
# LOW: Everything else
|
||||
*)
|
||||
echo "low"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Pattern Filtering
|
||||
# ============================================================================
|
||||
|
||||
should_check_pattern() {
|
||||
local pattern_name="$1"
|
||||
local severity
|
||||
severity=$(get_pattern_severity "${pattern_name}")
|
||||
|
||||
# Check if pattern category requested
|
||||
if [[ "${PATTERNS}" != "all" ]]; then
|
||||
case "${PATTERNS}" in
|
||||
*api-keys*) [[ "${pattern_name}" =~ _api_key|_token ]] || return 1 ;;
|
||||
*private-keys*) [[ "${pattern_name}" =~ private_key ]] || return 1 ;;
|
||||
*passwords*) [[ "${pattern_name}" =~ password ]] || return 1 ;;
|
||||
*cloud*) [[ "${pattern_name}" =~ aws_|gcp_|azure_ ]] || return 1 ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Check severity threshold
|
||||
case "${MIN_SEVERITY}" in
|
||||
critical)
|
||||
[[ "${severity}" == "critical" ]] || return 1
|
||||
;;
|
||||
high)
|
||||
[[ "${severity}" == "critical" || "${severity}" == "high" ]] || return 1
|
||||
;;
|
||||
medium)
|
||||
[[ "${severity}" != "low" ]] || return 1
|
||||
;;
|
||||
low)
|
||||
# Report all
|
||||
;;
|
||||
esac
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# File Exclusion
|
||||
# ============================================================================
|
||||
|
||||
should_exclude_file() {
|
||||
local file="$1"
|
||||
|
||||
# Default exclusions
|
||||
if [[ "${file}" =~ \.(git|node_modules|vendor|dist|build)/ ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# User-specified exclusions
|
||||
if [[ -n "${EXCLUDE}" ]]; then
|
||||
IFS=',' read -ra EXCLUDE_PATTERNS <<< "${EXCLUDE}"
|
||||
for pattern in "${EXCLUDE_PATTERNS[@]}"; do
|
||||
if [[ "${file}" =~ ${pattern} ]]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Secret Scanning
|
||||
# ============================================================================
|
||||
|
||||
scan_file() {
|
||||
local file="$1"
|
||||
local file_findings=0
|
||||
|
||||
# Skip excluded files
|
||||
if should_exclude_file "${file}"; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Skip binary files
|
||||
if file "${file}" 2>/dev/null | grep -q "text"; then
|
||||
:
|
||||
else
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Scan with all pattern categories
|
||||
for pattern_name in "${!API_KEY_PATTERNS[@]}"; do
|
||||
if should_check_pattern "${pattern_name}"; then
|
||||
local pattern="${API_KEY_PATTERNS[${pattern_name}]}"
|
||||
if grep -nE "${pattern}" "${file}" &>/dev/null; then
|
||||
local severity
|
||||
severity=$(get_pattern_severity "${pattern_name}")
|
||||
local line_numbers
|
||||
line_numbers=$(grep -nE "${pattern}" "${file}" | cut -d: -f1 | tr '\n' ',' | sed 's/,$//')
|
||||
FINDINGS+=("${severity}|${file}|${line_numbers}|${pattern_name}|API Key")
|
||||
((file_findings++))
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
for pattern_name in "${!PRIVATE_KEY_PATTERNS[@]}"; do
|
||||
if should_check_pattern "${pattern_name}"; then
|
||||
local pattern="${PRIVATE_KEY_PATTERNS[${pattern_name}]}"
|
||||
if grep -nF "${pattern}" "${file}" &>/dev/null; then
|
||||
local severity
|
||||
severity=$(get_pattern_severity "${pattern_name}")
|
||||
local line_numbers
|
||||
line_numbers=$(grep -nF "${pattern}" "${file}" | cut -d: -f1 | tr '\n' ',' | sed 's/,$//')
|
||||
FINDINGS+=("critical|${file}|${line_numbers}|${pattern_name}|Private Key")
|
||||
((file_findings++))
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
for pattern_name in "${!CLOUD_PATTERNS[@]}"; do
|
||||
if should_check_pattern "${pattern_name}"; then
|
||||
local pattern="${CLOUD_PATTERNS[${pattern_name}]}"
|
||||
if grep -nE "${pattern}" "${file}" &>/dev/null; then
|
||||
local severity
|
||||
severity=$(get_pattern_severity "${pattern_name}")
|
||||
local line_numbers
|
||||
line_numbers=$(grep -nE "${pattern}" "${file}" | cut -d: -f1 | tr '\n' ',' | sed 's/,$//')
|
||||
FINDINGS+=("${severity}|${file}|${line_numbers}|${pattern_name}|Cloud Credential")
|
||||
((file_findings++))
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
for pattern_name in "${!DATABASE_PATTERNS[@]}"; do
|
||||
if should_check_pattern "${pattern_name}"; then
|
||||
local pattern="${DATABASE_PATTERNS[${pattern_name}]}"
|
||||
if grep -nE "${pattern}" "${file}" &>/dev/null; then
|
||||
FINDINGS+=("critical|${file}|$(grep -nE "${pattern}" "${file}" | cut -d: -f1 | tr '\n' ',' | sed 's/,$//')|${pattern_name}|Database Connection")
|
||||
((file_findings++))
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
((SECRETS_FOUND += file_findings))
|
||||
return 0
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Main Execution
|
||||
# ============================================================================
|
||||
|
||||
main() {
|
||||
# Validate path
|
||||
if [[ ! -e "${PATH_TO_SCAN}" ]]; then
|
||||
echo "ERROR: Path does not exist: ${PATH_TO_SCAN}" >&2
|
||||
exit 2
|
||||
fi
|
||||
|
||||
echo "Secret Scanner"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo "Path: ${PATH_TO_SCAN}"
|
||||
echo "Recursive: ${RECURSIVE}"
|
||||
echo "Min Severity: ${MIN_SEVERITY}"
|
||||
echo "Patterns: 50+"
|
||||
echo ""
|
||||
|
||||
# Scan files
|
||||
local files_scanned=0
|
||||
|
||||
if [[ -f "${PATH_TO_SCAN}" ]]; then
|
||||
# Single file
|
||||
scan_file "${PATH_TO_SCAN}"
|
||||
((files_scanned++))
|
||||
elif [[ -d "${PATH_TO_SCAN}" ]]; then
|
||||
# Directory
|
||||
if [[ "${RECURSIVE}" == "true" ]]; then
|
||||
while IFS= read -r -d '' file; do
|
||||
scan_file "${file}"
|
||||
((files_scanned++))
|
||||
done < <(find "${PATH_TO_SCAN}" -type f -print0)
|
||||
else
|
||||
while IFS= read -r file; do
|
||||
scan_file "${file}"
|
||||
((files_scanned++))
|
||||
done < <(find "${PATH_TO_SCAN}" -maxdepth 1 -type f)
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Files Scanned: ${files_scanned}"
|
||||
echo ""
|
||||
|
||||
# Report findings
|
||||
if [[ ${SECRETS_FOUND} -eq 0 ]]; then
|
||||
echo "✅ SUCCESS: No secrets detected"
|
||||
echo "All files clean"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "⚠️ SECRETS DETECTED: ${SECRETS_FOUND}"
|
||||
echo ""
|
||||
|
||||
# Group by severity
|
||||
local critical_count=0
|
||||
local high_count=0
|
||||
local medium_count=0
|
||||
local low_count=0
|
||||
|
||||
for finding in "${FINDINGS[@]}"; do
|
||||
IFS='|' read -r severity file lines pattern type <<< "${finding}"
|
||||
case "${severity}" in
|
||||
critical) ((critical_count++)) ;;
|
||||
high) ((high_count++)) ;;
|
||||
medium) ((medium_count++)) ;;
|
||||
low) ((low_count++)) ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Print findings by severity
|
||||
if [[ ${critical_count} -gt 0 ]]; then
|
||||
echo "CRITICAL Issues (${critical_count}):"
|
||||
for finding in "${FINDINGS[@]}"; do
|
||||
IFS='|' read -r severity file lines pattern type <<< "${finding}"
|
||||
if [[ "${severity}" == "critical" ]]; then
|
||||
echo " ❌ ${file}:${lines}"
|
||||
echo " Type: ${type}"
|
||||
echo " Pattern: ${pattern}"
|
||||
echo " Remediation: Remove and rotate immediately"
|
||||
echo ""
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ ${high_count} -gt 0 ]]; then
|
||||
echo "HIGH Issues (${high_count}):"
|
||||
for finding in "${FINDINGS[@]}"; do
|
||||
IFS='|' read -r severity file lines pattern type <<< "${finding}"
|
||||
if [[ "${severity}" == "high" ]]; then
|
||||
echo " ⚠️ ${file}:${lines}"
|
||||
echo " Type: ${type}"
|
||||
echo " Pattern: ${pattern}"
|
||||
echo ""
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ ${medium_count} -gt 0 ]]; then
|
||||
echo "MEDIUM Issues (${medium_count}):"
|
||||
for finding in "${FINDINGS[@]}"; do
|
||||
IFS='|' read -r severity file lines pattern type <<< "${finding}"
|
||||
if [[ "${severity}" == "medium" ]]; then
|
||||
echo " 💡 ${file}:${lines}"
|
||||
echo " Type: ${type}"
|
||||
echo ""
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
echo "Summary:"
|
||||
echo " Critical: ${critical_count}"
|
||||
echo " High: ${high_count}"
|
||||
echo " Medium: ${medium_count}"
|
||||
echo " Low: ${low_count}"
|
||||
echo ""
|
||||
echo "Action Required: YES"
|
||||
|
||||
exit 1
|
||||
}
|
||||
|
||||
main "$@"
|
||||
386
commands/security-scan/.scripts/url-validator.py
Executable file
386
commands/security-scan/.scripts/url-validator.py
Executable file
@@ -0,0 +1,386 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
URL Validator - Check URL safety and detect malicious patterns
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
from pathlib import Path
|
||||
from urllib.parse import urlparse
|
||||
from typing import List, Dict, Tuple, Set
|
||||
|
||||
# ============================================================================
|
||||
# Configuration
|
||||
# ============================================================================
|
||||
|
||||
class Config:
|
||||
"""Configuration for URL validation"""
|
||||
SUSPICIOUS_TLDS = {'.tk', '.ml', '.ga', '.cf', '.gq'}
|
||||
URL_SHORTENERS = {'bit.ly', 'tinyurl.com', 'goo.gl', 't.co', 'ow.ly'}
|
||||
TRUSTED_REGISTRIES = {
|
||||
'registry.npmjs.org',
|
||||
'pypi.org',
|
||||
'registry.hub.docker.com',
|
||||
'github.com',
|
||||
'gitlab.com'
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# URL Pattern Definitions
|
||||
# ============================================================================
|
||||
|
||||
# Comprehensive URL pattern
|
||||
URL_PATTERN = re.compile(
|
||||
r'(?:(?:https?|ftp|file)://|www\.|ftp\.)'
|
||||
r'(?:\S+(?::\S*)?@)?'
|
||||
r'(?:'
|
||||
r'(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])'
|
||||
r'(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}'
|
||||
r'(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))'
|
||||
r'|'
|
||||
r'(?:(?:[a-z\u00a1-\uffff0-9]-?)*[a-z\u00a1-\uffff0-9]+)'
|
||||
r'(?:\.(?:[a-z\u00a1-\uffff0-9]-?)*[a-z\u00a1-\uffff0-9]+)*'
|
||||
r'(?:\.(?:[a-z\u00a1-\uffff]{2,}))'
|
||||
r')'
|
||||
r'(?::\d{2,5})?'
|
||||
r'(?:[/?#]\S*)?',
|
||||
re.IGNORECASE
|
||||
)
|
||||
|
||||
# Dangerous code execution patterns
|
||||
DANGEROUS_PATTERNS = {
|
||||
'curl_pipe_sh': re.compile(r'curl\s+[^|]+\|\s*(sh|bash)', re.IGNORECASE),
|
||||
'wget_pipe_sh': re.compile(r'wget\s+[^|]+\|\s*(sh|bash)', re.IGNORECASE),
|
||||
'curl_silent_pipe': re.compile(r'curl\s+-[a-zA-Z]*s[a-zA-Z]*\s+[^|]+\|\s*(sh|bash)', re.IGNORECASE),
|
||||
'bash_redirect': re.compile(r'bash\s+<\s*\(\s*curl', re.IGNORECASE),
|
||||
'eval_fetch': re.compile(r'eval.*fetch\s*\(', re.IGNORECASE),
|
||||
'eval_curl': re.compile(r'eval.*curl', re.IGNORECASE),
|
||||
'exec_wget': re.compile(r'exec\s*\(.*wget', re.IGNORECASE),
|
||||
'rm_rf_url': re.compile(r'rm\s+-rf.*https?://', re.IGNORECASE),
|
||||
}
|
||||
|
||||
# Obfuscation patterns
|
||||
OBFUSCATION_PATTERNS = {
|
||||
'base64_url': re.compile(r'(?:atob|base64|Buffer\.from)\s*\([^)]*https?:', re.IGNORECASE),
|
||||
'hex_encoded': re.compile(r'\\x[0-9a-f]{2}.*https?:', re.IGNORECASE),
|
||||
'unicode_escape': re.compile(r'\\u[0-9a-f]{4}.*https?:', re.IGNORECASE),
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Severity Classification
|
||||
# ============================================================================
|
||||
|
||||
class Severity:
|
||||
CRITICAL = 'critical'
|
||||
HIGH = 'high'
|
||||
MEDIUM = 'medium'
|
||||
LOW = 'low'
|
||||
|
||||
# ============================================================================
|
||||
# Finding Class
|
||||
# ============================================================================
|
||||
|
||||
class Finding:
|
||||
"""Represents a URL security finding"""
|
||||
|
||||
def __init__(self, file_path: str, line_num: int, url: str, issue: str,
|
||||
severity: str, risk: str, remediation: str):
|
||||
self.file = file_path
|
||||
self.line = line_num
|
||||
self.url = url
|
||||
self.issue = issue
|
||||
self.severity = severity
|
||||
self.risk = risk
|
||||
self.remediation = remediation
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
return {
|
||||
'file': self.file,
|
||||
'line': self.line,
|
||||
'url': self.url,
|
||||
'issue': self.issue,
|
||||
'severity': self.severity,
|
||||
'risk': self.risk,
|
||||
'remediation': self.remediation
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# URL Validator
|
||||
# ============================================================================
|
||||
|
||||
class URLValidator:
|
||||
"""Main URL validation class"""
|
||||
|
||||
def __init__(self, path: str, https_only: bool = False,
|
||||
allow_localhost: bool = True, check_code_patterns: bool = True):
|
||||
self.path = Path(path)
|
||||
self.https_only = https_only
|
||||
self.allow_localhost = allow_localhost
|
||||
self.check_code_patterns = check_code_patterns
|
||||
self.findings: List[Finding] = []
|
||||
self.urls_checked = 0
|
||||
self.files_scanned = 0
|
||||
|
||||
def is_text_file(self, file_path: Path) -> bool:
|
||||
"""Check if file is text"""
|
||||
try:
|
||||
with open(file_path, 'rb') as f:
|
||||
chunk = f.read(512)
|
||||
if b'\0' in chunk:
|
||||
return False
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def should_exclude(self, file_path: Path) -> bool:
|
||||
"""Check if file should be excluded"""
|
||||
exclude_patterns = {'.git', 'node_modules', 'vendor', 'dist', 'build', '__pycache__'}
|
||||
return any(part in exclude_patterns for part in file_path.parts)
|
||||
|
||||
def get_context(self, file_path: Path, line_num: int) -> str:
|
||||
"""Get context around a line"""
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
lines = f.readlines()
|
||||
if 0 <= line_num - 1 < len(lines):
|
||||
# Check if in comment or documentation
|
||||
line = lines[line_num - 1].strip()
|
||||
if line.startswith('#') or line.startswith('//') or line.startswith('*'):
|
||||
return 'documentation'
|
||||
if 'test' in str(file_path).lower() or 'spec' in str(file_path).lower():
|
||||
return 'test'
|
||||
if 'example' in str(file_path).lower() or 'mock' in str(file_path).lower():
|
||||
return 'example'
|
||||
return 'production'
|
||||
except Exception:
|
||||
pass
|
||||
return 'unknown'
|
||||
|
||||
def check_url_safety(self, url: str, file_path: Path, line_num: int) -> None:
|
||||
"""Check if URL is safe"""
|
||||
try:
|
||||
parsed = urlparse(url)
|
||||
except Exception:
|
||||
return
|
||||
|
||||
context = self.get_context(file_path, line_num)
|
||||
|
||||
# Check protocol
|
||||
if parsed.scheme == 'http':
|
||||
# Allow localhost in development
|
||||
if self.allow_localhost and parsed.hostname in ('localhost', '127.0.0.1', '0.0.0.0'):
|
||||
return
|
||||
|
||||
# Enforce HTTPS
|
||||
if self.https_only or context == 'production':
|
||||
severity = Severity.HIGH if context == 'production' else Severity.MEDIUM
|
||||
self.findings.append(Finding(
|
||||
str(file_path), line_num, url,
|
||||
'Non-HTTPS URL',
|
||||
severity,
|
||||
'Man-in-the-middle attacks, data interception',
|
||||
'Change to HTTPS: ' + url.replace('http://', 'https://')
|
||||
))
|
||||
return
|
||||
|
||||
# Check for FTP/Telnet
|
||||
if parsed.scheme in ('ftp', 'telnet'):
|
||||
self.findings.append(Finding(
|
||||
str(file_path), line_num, url,
|
||||
'Insecure protocol',
|
||||
Severity.HIGH,
|
||||
'Unencrypted data transmission',
|
||||
'Use secure alternatives (HTTPS, SFTP, SSH)'
|
||||
))
|
||||
return
|
||||
|
||||
# Check for file:// protocol
|
||||
if parsed.scheme == 'file':
|
||||
self.findings.append(Finding(
|
||||
str(file_path), line_num, url,
|
||||
'File protocol detected',
|
||||
Severity.MEDIUM,
|
||||
'Potential security risk, path disclosure',
|
||||
'Review necessity of file:// protocol'
|
||||
))
|
||||
|
||||
# Check for IP addresses
|
||||
if parsed.hostname and re.match(r'^\d+\.\d+\.\d+\.\d+$', parsed.hostname):
|
||||
self.findings.append(Finding(
|
||||
str(file_path), line_num, url,
|
||||
'IP address instead of domain',
|
||||
Severity.LOW,
|
||||
'Harder to verify legitimacy, no certificate validation',
|
||||
'Use domain name instead of IP address'
|
||||
))
|
||||
|
||||
# Check for suspicious TLDs
|
||||
if parsed.hostname:
|
||||
for tld in Config.SUSPICIOUS_TLDS:
|
||||
if parsed.hostname.endswith(tld):
|
||||
self.findings.append(Finding(
|
||||
str(file_path), line_num, url,
|
||||
'Suspicious TLD',
|
||||
Severity.MEDIUM,
|
||||
'Often used for malicious purposes',
|
||||
'Verify domain legitimacy before use'
|
||||
))
|
||||
break
|
||||
|
||||
# Check for URL shorteners
|
||||
if parsed.hostname in Config.URL_SHORTENERS:
|
||||
self.findings.append(Finding(
|
||||
str(file_path), line_num, url,
|
||||
'Shortened URL',
|
||||
Severity.LOW,
|
||||
'Cannot verify destination',
|
||||
'Expand URL and use full destination'
|
||||
))
|
||||
|
||||
def check_dangerous_patterns(self, content: str, file_path: Path) -> None:
|
||||
"""Check for dangerous code execution patterns"""
|
||||
if not self.check_code_patterns:
|
||||
return
|
||||
|
||||
lines = content.split('\n')
|
||||
|
||||
for pattern_name, pattern in DANGEROUS_PATTERNS.items():
|
||||
for match in pattern.finditer(content):
|
||||
line_num = content[:match.start()].count('\n') + 1
|
||||
self.findings.append(Finding(
|
||||
str(file_path), line_num, match.group(0),
|
||||
'Remote code execution pattern',
|
||||
Severity.CRITICAL,
|
||||
f'Executes arbitrary code from remote source ({pattern_name})',
|
||||
'Download, verify checksum, review code, then execute'
|
||||
))
|
||||
|
||||
for pattern_name, pattern in OBFUSCATION_PATTERNS.items():
|
||||
for match in pattern.finditer(content):
|
||||
line_num = content[:match.start()].count('\n') + 1
|
||||
self.findings.append(Finding(
|
||||
str(file_path), line_num, match.group(0)[:50] + '...',
|
||||
'Obfuscated URL',
|
||||
Severity.HIGH,
|
||||
f'URL obfuscation detected ({pattern_name})',
|
||||
'Review obfuscated content for malicious intent'
|
||||
))
|
||||
|
||||
def scan_file(self, file_path: Path) -> None:
|
||||
"""Scan a single file"""
|
||||
if self.should_exclude(file_path) or not self.is_text_file(file_path):
|
||||
return
|
||||
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
content = f.read()
|
||||
|
||||
self.files_scanned += 1
|
||||
|
||||
# Check for dangerous patterns first
|
||||
self.check_dangerous_patterns(content, file_path)
|
||||
|
||||
# Find all URLs
|
||||
lines = content.split('\n')
|
||||
for line_num, line in enumerate(lines, 1):
|
||||
for match in URL_PATTERN.finditer(line):
|
||||
url = match.group(0)
|
||||
self.urls_checked += 1
|
||||
self.check_url_safety(url, file_path, line_num)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not scan {file_path}: {e}", file=sys.stderr)
|
||||
|
||||
def scan(self) -> None:
|
||||
"""Scan path for URLs"""
|
||||
if self.path.is_file():
|
||||
self.scan_file(self.path)
|
||||
elif self.path.is_dir():
|
||||
for file_path in self.path.rglob('*'):
|
||||
if file_path.is_file():
|
||||
self.scan_file(file_path)
|
||||
|
||||
def report(self) -> int:
|
||||
"""Generate report and return exit code"""
|
||||
print("URL Safety Scan Results")
|
||||
print("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
|
||||
print(f"Path: {self.path}")
|
||||
print(f"Files Scanned: {self.files_scanned}")
|
||||
print(f"URLs Checked: {self.urls_checked}")
|
||||
print()
|
||||
|
||||
if not self.findings:
|
||||
print("✅ SUCCESS: All URLs safe")
|
||||
print("No unsafe URLs or malicious patterns detected")
|
||||
return 0
|
||||
|
||||
# Group by severity
|
||||
critical = [f for f in self.findings if f.severity == Severity.CRITICAL]
|
||||
high = [f for f in self.findings if f.severity == Severity.HIGH]
|
||||
medium = [f for f in self.findings if f.severity == Severity.MEDIUM]
|
||||
low = [f for f in self.findings if f.severity == Severity.LOW]
|
||||
|
||||
print(f"⚠️ UNSAFE URLS DETECTED: {len(self.findings)}")
|
||||
print()
|
||||
|
||||
if critical:
|
||||
print(f"CRITICAL Issues ({len(critical)}):")
|
||||
for finding in critical:
|
||||
print(f" ❌ {finding.file}:{finding.line}")
|
||||
print(f" Pattern: {finding.url}")
|
||||
print(f" Risk: {finding.risk}")
|
||||
print(f" Remediation: {finding.remediation}")
|
||||
print()
|
||||
|
||||
if high:
|
||||
print(f"HIGH Issues ({len(high)}):")
|
||||
for finding in high:
|
||||
print(f" ⚠️ {finding.file}:{finding.line}")
|
||||
print(f" URL: {finding.url}")
|
||||
print(f" Issue: {finding.issue}")
|
||||
print(f" Remediation: {finding.remediation}")
|
||||
print()
|
||||
|
||||
if medium:
|
||||
print(f"MEDIUM Issues ({len(medium)}):")
|
||||
for finding in medium:
|
||||
print(f" 💡 {finding.file}:{finding.line}")
|
||||
print(f" Issue: {finding.issue}")
|
||||
print()
|
||||
|
||||
print("Summary:")
|
||||
print(f" Critical: {len(critical)}")
|
||||
print(f" High: {len(high)}")
|
||||
print(f" Medium: {len(medium)}")
|
||||
print(f" Low: {len(low)}")
|
||||
print()
|
||||
print("Action Required: YES" if (critical or high) else "Review Recommended")
|
||||
|
||||
return 1
|
||||
|
||||
# ============================================================================
|
||||
# Main
|
||||
# ============================================================================
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: url-validator.py <path> [https_only] [allow_localhost] [check_code_patterns]")
|
||||
sys.exit(2)
|
||||
|
||||
path = sys.argv[1]
|
||||
https_only = sys.argv[2].lower() == 'true' if len(sys.argv) > 2 else False
|
||||
allow_localhost = sys.argv[3].lower() == 'true' if len(sys.argv) > 3 else True
|
||||
check_code_patterns = sys.argv[4].lower() == 'true' if len(sys.argv) > 4 else True
|
||||
|
||||
if not os.path.exists(path):
|
||||
print(f"ERROR: Path does not exist: {path}", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
validator = URLValidator(path, https_only, allow_localhost, check_code_patterns)
|
||||
validator.scan()
|
||||
sys.exit(validator.report())
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
353
commands/security-scan/check-permissions.md
Normal file
353
commands/security-scan/check-permissions.md
Normal file
@@ -0,0 +1,353 @@
|
||||
## Operation: Check File Permissions
|
||||
|
||||
Audit file permissions to detect world-writable files, overly permissive scripts, and inappropriate executability.
|
||||
|
||||
### Parameters from $ARGUMENTS
|
||||
|
||||
- **path**: Target directory to scan (required)
|
||||
- **strict**: Enforce strict permission rules (true|false, default: false)
|
||||
- **check-executables**: Verify executable files have shebangs (true|false, default: true)
|
||||
- **report-all**: Report all permissions, not just issues (true|false, default: false)
|
||||
|
||||
### Permission Rules
|
||||
|
||||
**Forbidden Permissions** (CRITICAL):
|
||||
- **777** (rwxrwxrwx) - World-writable and executable
|
||||
- Risk: Anyone can modify and execute
|
||||
- Remediation: chmod 755 (directories) or 644 (files)
|
||||
|
||||
- **666** (rw-rw-rw-) - World-writable files
|
||||
- Risk: Anyone can modify content
|
||||
- Remediation: chmod 644 (owner write, others read)
|
||||
|
||||
- **000** (---------) - Inaccessible files
|
||||
- Risk: Unusable file, potential error
|
||||
- Remediation: chmod 644 or remove
|
||||
|
||||
**Scripts & Executables** (HIGH priority):
|
||||
- Shell scripts (*.sh, *.bash) SHOULD be:
|
||||
- 755 (rwxr-xr-x) or 750 (rwxr-x---)
|
||||
- Have shebang (#!/bin/bash, #!/usr/bin/env bash)
|
||||
- Not world-writable
|
||||
|
||||
- Python scripts (*.py) SHOULD be:
|
||||
- 755 if executable, 644 if library
|
||||
- Have shebang if executable (#!/usr/bin/env python3)
|
||||
|
||||
- Node.js scripts (*.js, *.ts) SHOULD be:
|
||||
- 644 (not executable, run via node)
|
||||
- Exception: CLI tools can be 755 with shebang
|
||||
|
||||
**Configuration Files** (MEDIUM priority):
|
||||
- Config files (.env, *.json, *.yaml, *.conf) SHOULD be:
|
||||
- 600 (rw-------) for sensitive configs
|
||||
- 644 (rw-r--r--) for non-sensitive
|
||||
- Never 666 or 777
|
||||
|
||||
- SSH/GPG files MUST be:
|
||||
- Private keys: 600 (rw-------)
|
||||
- Public keys: 644 (rw-r--r--)
|
||||
- ~/.ssh directory: 700 (rwx------)
|
||||
|
||||
**Directories** (MEDIUM priority):
|
||||
- Standard directories: 755 (rwxr-xr-x)
|
||||
- Private directories: 750 or 700
|
||||
- Never 777 (world-writable)
|
||||
|
||||
### Workflow
|
||||
|
||||
1. **Parse arguments**
|
||||
```
|
||||
Extract path, strict, check-executables, report-all
|
||||
Validate path exists
|
||||
Determine scan scope
|
||||
```
|
||||
|
||||
2. **Execute permission checker**
|
||||
```bash
|
||||
Execute .scripts/permission-checker.sh "$path" "$strict" "$check_executables" "$report_all"
|
||||
|
||||
Returns:
|
||||
- 0: All permissions correct
|
||||
- 1: Permission issues found
|
||||
- 2: Scan error
|
||||
```
|
||||
|
||||
3. **Analyze results**
|
||||
```
|
||||
Categorize findings:
|
||||
- CRITICAL: 777, 666, world-writable
|
||||
- HIGH: Executables without shebangs, 775 on sensitive files
|
||||
- MEDIUM: Overly permissive configs, wrong directory perms
|
||||
- LOW: Inconsistent permissions, non-executable scripts
|
||||
|
||||
Generate fix commands
|
||||
```
|
||||
|
||||
4. **Format output**
|
||||
```
|
||||
File Permission Audit Results
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
Path: <path>
|
||||
Files Checked: <count>
|
||||
|
||||
CRITICAL Issues (<count>):
|
||||
❌ scripts/deploy.sh (777)
|
||||
Current: rwxrwxrwx (777)
|
||||
Issue: World-writable and executable
|
||||
Risk: Anyone can modify and execute script
|
||||
Fix: chmod 755 scripts/deploy.sh
|
||||
|
||||
❌ config/secrets.json (666)
|
||||
Current: rw-rw-rw- (666)
|
||||
Issue: World-writable configuration
|
||||
Risk: Secrets can be modified by anyone
|
||||
Fix: chmod 600 config/secrets.json
|
||||
|
||||
HIGH Issues (<count>):
|
||||
⚠️ bin/cli.sh (755) - Missing shebang
|
||||
Issue: Executable without shebang
|
||||
Fix: Add #!/usr/bin/env bash to first line
|
||||
|
||||
MEDIUM Issues (<count>):
|
||||
💡 .env (644)
|
||||
Current: rw-r--r-- (644)
|
||||
Recommendation: Restrict to owner only
|
||||
Fix: chmod 600 .env
|
||||
|
||||
Summary:
|
||||
- Total issues: <count>
|
||||
- Critical: <count> (fix immediately)
|
||||
- Fixes available: Yes
|
||||
```
|
||||
|
||||
### Permission Patterns
|
||||
|
||||
**Standard File Permissions**:
|
||||
```
|
||||
644 (rw-r--r--) - Regular files, documentation
|
||||
755 (rwxr-xr-x) - Executable scripts, directories
|
||||
600 (rw-------) - Sensitive configs, private keys
|
||||
700 (rwx------) - Private directories (.ssh, .gnupg)
|
||||
```
|
||||
|
||||
**Forbidden Permissions**:
|
||||
```
|
||||
777 (rwxrwxrwx) - Never use (world-writable + executable)
|
||||
666 (rw-rw-rw-) - Never use (world-writable)
|
||||
000 (---------) - Inaccessible (likely error)
|
||||
```
|
||||
|
||||
**Context-Specific**:
|
||||
```
|
||||
Shell scripts: 755 with #!/bin/bash
|
||||
Python scripts: 755 with #!/usr/bin/env python3 (if CLI)
|
||||
644 without shebang (if library)
|
||||
Config files: 600 (sensitive) or 644 (public)
|
||||
SSH keys: 600 (private), 644 (public)
|
||||
Directories: 755 (public), 700 (private)
|
||||
```
|
||||
|
||||
### Shebang Validation
|
||||
|
||||
**Valid shebangs**:
|
||||
```bash
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
#!/usr/bin/env python3
|
||||
#!/usr/bin/env node
|
||||
#!/usr/bin/env ruby
|
||||
```
|
||||
|
||||
**Invalid patterns**:
|
||||
```bash
|
||||
#!/bin/sh # Too generic, prefer bash
|
||||
#! /bin/bash # Space after #!
|
||||
# /usr/bin/env bash # Missing !
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```bash
|
||||
# Check all permissions in current directory
|
||||
/security-scan permissions path:.
|
||||
|
||||
# Strict mode - flag all non-standard permissions
|
||||
/security-scan permissions path:. strict:true
|
||||
|
||||
# Check executables for shebangs
|
||||
/security-scan permissions path:./scripts/ check-executables:true
|
||||
|
||||
# Report all files, not just issues
|
||||
/security-scan permissions path:. report-all:true
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
**Path not found**:
|
||||
```
|
||||
ERROR: Path does not exist: <path>
|
||||
Remediation: Verify path and try again
|
||||
```
|
||||
|
||||
**Permission denied**:
|
||||
```
|
||||
ERROR: Cannot read permissions for: <path>
|
||||
Remediation: Run with sufficient privileges or check ownership
|
||||
```
|
||||
|
||||
**No issues found**:
|
||||
```
|
||||
SUCCESS: All file permissions correct
|
||||
No action required
|
||||
```
|
||||
|
||||
### Automated Fixes
|
||||
|
||||
**Critical Issues**:
|
||||
```bash
|
||||
# Fix world-writable files
|
||||
find . -type f -perm 0666 -exec chmod 644 {} \;
|
||||
find . -type f -perm 0777 -exec chmod 755 {} \;
|
||||
|
||||
# Fix world-writable directories
|
||||
find . -type d -perm 0777 -exec chmod 755 {} \;
|
||||
```
|
||||
|
||||
**Sensitive Files**:
|
||||
```bash
|
||||
# Restrict sensitive configs
|
||||
chmod 600 .env
|
||||
chmod 600 config/credentials.json
|
||||
chmod 600 ~/.ssh/id_rsa
|
||||
|
||||
# Secure directories
|
||||
chmod 700 ~/.ssh
|
||||
chmod 700 ~/.gnupg
|
||||
```
|
||||
|
||||
**Executables**:
|
||||
```bash
|
||||
# Make scripts executable
|
||||
chmod +x scripts/*.sh
|
||||
|
||||
# Remove execute from libraries
|
||||
chmod 644 src/**/*.py
|
||||
```
|
||||
|
||||
### Platform-Specific Notes
|
||||
|
||||
**Unix/Linux**:
|
||||
- Full permission support (owner/group/other)
|
||||
- Numeric (755) or symbolic (rwxr-xr-x) modes
|
||||
- Respect umask settings
|
||||
|
||||
**macOS**:
|
||||
- Same as Unix/Linux
|
||||
- Additional extended attributes (xattr)
|
||||
- May have quarantine attributes on downloaded files
|
||||
|
||||
**Windows (WSL/Git Bash)**:
|
||||
- Limited permission support
|
||||
- Executable bit preserved in git
|
||||
- May show 755 for all files by default
|
||||
|
||||
### Strict Mode Rules
|
||||
|
||||
When `strict:true`:
|
||||
|
||||
**Additional checks**:
|
||||
- Flag 775 on any file (group-writable)
|
||||
- Flag 755 on non-executable files
|
||||
- Require 600 for all .env files
|
||||
- Require 700 for all .ssh, .gnupg directories
|
||||
- Flag inconsistent permissions in same directory
|
||||
|
||||
**Stricter recommendations**:
|
||||
- Config files: Must be 600
|
||||
- Scripts: Must have correct shebang
|
||||
- No group-writable files
|
||||
- Directories: 750 instead of 755
|
||||
|
||||
### Remediation Guidance
|
||||
|
||||
**For world-writable files (777, 666)**:
|
||||
1. Determine correct permission level
|
||||
2. Apply fix immediately: `chmod 644 <file>` or `chmod 755 <executable>`
|
||||
3. Verify no unauthorized modifications
|
||||
4. Check git history for permission changes
|
||||
5. Document required permissions in README
|
||||
|
||||
**For executables without shebangs**:
|
||||
1. Add appropriate shebang:
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
```
|
||||
2. Verify script runs correctly
|
||||
3. Consider using absolute path if specific version needed
|
||||
|
||||
**For overly permissive configs**:
|
||||
1. Restrict to owner: `chmod 600 <config>`
|
||||
2. Verify application can still read
|
||||
3. Update deployment documentation
|
||||
4. Use principle of least privilege
|
||||
|
||||
**For inconsistent permissions**:
|
||||
1. Establish permission standards
|
||||
2. Document in CONTRIBUTING.md
|
||||
3. Add pre-commit hook to enforce
|
||||
4. Use tools like .editorconfig
|
||||
|
||||
### Security Best Practices
|
||||
|
||||
**General**:
|
||||
- Use most restrictive permissions possible
|
||||
- Never use 777 or 666
|
||||
- Sensitive files: 600 (owner read/write only)
|
||||
- Executables: 755 (everyone execute, owner write)
|
||||
- Configs: 644 (everyone read, owner write) or 600 (owner only)
|
||||
|
||||
**For Scripts**:
|
||||
- Always include shebang
|
||||
- Make executable only if meant to be run directly
|
||||
- Libraries should be 644, not 755
|
||||
- Verify no secrets in scripts
|
||||
|
||||
**For Keys**:
|
||||
- Private keys: 600 or SSH refuses to use them
|
||||
- Public keys: 644
|
||||
- Key directories: 700
|
||||
- Never group or world readable
|
||||
|
||||
### Output Format
|
||||
|
||||
```json
|
||||
{
|
||||
"scan_type": "permissions",
|
||||
"path": "<path>",
|
||||
"files_checked": <count>,
|
||||
"issues_found": <count>,
|
||||
"severity_breakdown": {
|
||||
"critical": <count>,
|
||||
"high": <count>,
|
||||
"medium": <count>,
|
||||
"low": <count>
|
||||
},
|
||||
"findings": [
|
||||
{
|
||||
"file": "<file_path>",
|
||||
"current_permissions": "<octal>",
|
||||
"current_symbolic": "<symbolic>",
|
||||
"issue": "<issue_description>",
|
||||
"severity": "<severity>",
|
||||
"risk": "<risk_description>",
|
||||
"recommended_permissions": "<octal>",
|
||||
"fix_command": "chmod <perms> <file>"
|
||||
}
|
||||
],
|
||||
"fixes_available": <boolean>,
|
||||
"action_required": <boolean>
|
||||
}
|
||||
```
|
||||
|
||||
**Request**: $ARGUMENTS
|
||||
262
commands/security-scan/check-urls.md
Normal file
262
commands/security-scan/check-urls.md
Normal file
@@ -0,0 +1,262 @@
|
||||
## Operation: Check URL Safety
|
||||
|
||||
Validate URL safety, enforce HTTPS, and detect malicious patterns in URLs and code.
|
||||
|
||||
### Parameters from $ARGUMENTS
|
||||
|
||||
- **path**: Target directory or file to scan (required)
|
||||
- **https-only**: Enforce HTTPS for all URLs (true|false, default: false)
|
||||
- **allow-localhost**: Allow http://localhost URLs (true|false, default: true)
|
||||
- **check-code-patterns**: Check for dangerous code execution patterns (true|false, default: true)
|
||||
|
||||
### URL Safety Checks
|
||||
|
||||
**Protocol Validation**:
|
||||
- HTTPS enforcement (production contexts)
|
||||
- HTTP allowed only for localhost/127.0.0.1
|
||||
- FTP/telnet flagged as insecure
|
||||
- file:// protocol flagged (potential security risk)
|
||||
|
||||
**Malicious Patterns**:
|
||||
- `curl ... | sh` - Remote code execution
|
||||
- `wget ... | bash` - Remote script execution
|
||||
- `eval(fetch(...))` - Dynamic code execution
|
||||
- `exec(...)` with URLs - Command injection risk
|
||||
- `rm -rf` in scripts downloaded from URLs
|
||||
- Obfuscated URLs (base64, hex encoded)
|
||||
|
||||
**Domain Validation**:
|
||||
- Check for typosquatting (common package registries)
|
||||
- Suspicious TLDs (.tk, .ml, .ga, .cf)
|
||||
- IP addresses instead of domains
|
||||
- Shortened URLs (bit.ly, tinyurl) - potential phishing
|
||||
|
||||
### Workflow
|
||||
|
||||
1. **Parse arguments**
|
||||
```
|
||||
Extract path, https-only, allow-localhost, check-code-patterns
|
||||
Validate path exists
|
||||
Determine scan scope
|
||||
```
|
||||
|
||||
2. **Execute URL validator**
|
||||
```bash
|
||||
Execute .scripts/url-validator.py "$path" "$https_only" "$allow_localhost" "$check_code_patterns"
|
||||
|
||||
Returns:
|
||||
- 0: All URLs safe
|
||||
- 1: Unsafe URLs detected
|
||||
- 2: Validation error
|
||||
```
|
||||
|
||||
3. **Analyze results**
|
||||
```
|
||||
Categorize findings:
|
||||
- CRITICAL: Remote code execution patterns
|
||||
- HIGH: Non-HTTPS in production, obfuscated URLs
|
||||
- MEDIUM: HTTP in non-localhost, suspicious TLDs
|
||||
- LOW: Shortened URLs, IP addresses
|
||||
|
||||
Generate context-aware remediation
|
||||
```
|
||||
|
||||
4. **Format output**
|
||||
```
|
||||
URL Safety Scan Results
|
||||
━━━━━━━━━━━━━━━━━━━━━━
|
||||
Path: <path>
|
||||
URLs Scanned: <count>
|
||||
|
||||
CRITICAL Issues (<count>):
|
||||
❌ <file>:<line>: Remote code execution pattern
|
||||
Pattern: curl https://example.com/script.sh | bash
|
||||
Risk: Executes arbitrary code without verification
|
||||
Remediation: Download, verify, then execute
|
||||
|
||||
HIGH Issues (<count>):
|
||||
⚠️ <file>:<line>: Non-HTTPS URL in production context
|
||||
URL: http://api.example.com
|
||||
Risk: Man-in-the-middle attacks
|
||||
Remediation: Use HTTPS
|
||||
|
||||
Summary:
|
||||
- Total URLs: <count>
|
||||
- Safe: <count>
|
||||
- Unsafe: <count>
|
||||
- Action required: <yes|no>
|
||||
```
|
||||
|
||||
### Dangerous Code Patterns
|
||||
|
||||
**Remote Execution** (CRITICAL):
|
||||
```bash
|
||||
# Dangerous patterns
|
||||
curl https://example.com/install.sh | bash
|
||||
wget -qO- https://get.example.com | sh
|
||||
eval "$(curl -fsSL https://example.com/script)"
|
||||
bash <(curl -s https://example.com/setup.sh)
|
||||
```
|
||||
|
||||
**Dynamic Code Execution** (HIGH):
|
||||
```javascript
|
||||
// Dangerous patterns
|
||||
eval(fetch(url).then(r => r.text()))
|
||||
new Function(await fetch(url).text())()
|
||||
exec(`curl ${url}`)
|
||||
```
|
||||
|
||||
**Command Injection** (HIGH):
|
||||
```bash
|
||||
# Vulnerable patterns
|
||||
wget $USER_INPUT
|
||||
curl "$UNTRUSTED_URL"
|
||||
git clone $URL # without validation
|
||||
```
|
||||
|
||||
### Safe Alternatives
|
||||
|
||||
**Instead of curl | sh**:
|
||||
```bash
|
||||
# Safe: Download, verify, then execute
|
||||
wget https://example.com/install.sh
|
||||
sha256sum -c install.sh.sha256
|
||||
chmod +x install.sh
|
||||
./install.sh
|
||||
```
|
||||
|
||||
**Instead of eval(fetch())**:
|
||||
```javascript
|
||||
// Safe: Fetch as data, validate, then use
|
||||
const response = await fetch(url);
|
||||
const data = await response.json();
|
||||
// Process data, not as code
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```bash
|
||||
# Check all URLs, enforce HTTPS
|
||||
/security-scan urls path:. https-only:true
|
||||
|
||||
# Allow localhost HTTP during development
|
||||
/security-scan urls path:. https-only:true allow-localhost:true
|
||||
|
||||
# Check for code execution patterns
|
||||
/security-scan urls path:./scripts/ check-code-patterns:true
|
||||
|
||||
# Scan specific file
|
||||
/security-scan urls path:./install.sh
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
**Path not found**:
|
||||
```
|
||||
ERROR: Path does not exist: <path>
|
||||
Remediation: Verify path and try again
|
||||
```
|
||||
|
||||
**No URLs found**:
|
||||
```
|
||||
INFO: No URLs detected
|
||||
No action required
|
||||
```
|
||||
|
||||
**Python unavailable**:
|
||||
```
|
||||
ERROR: Python3 not available
|
||||
Remediation: Install Python 3.x or skip URL validation
|
||||
```
|
||||
|
||||
### Context-Aware Rules
|
||||
|
||||
**Production contexts** (strict):
|
||||
- package.json scripts
|
||||
- Dockerfiles
|
||||
- CI/CD configs (.github/, .gitlab-ci.yml)
|
||||
- Installation scripts (install.sh, setup.sh)
|
||||
→ Enforce HTTPS, no remote execution
|
||||
|
||||
**Development contexts** (relaxed):
|
||||
- Test files (*test*, *spec*)
|
||||
- Mock data
|
||||
- Local development configs
|
||||
→ Allow HTTP for localhost
|
||||
|
||||
**Documentation contexts** (informational):
|
||||
- README.md
|
||||
- *.md files
|
||||
- Comments
|
||||
→ Flag but don't fail
|
||||
|
||||
### URL Categories
|
||||
|
||||
**Registry URLs** (validate carefully):
|
||||
- npm: https://registry.npmjs.org
|
||||
- PyPI: https://pypi.org
|
||||
- Docker: https://registry.hub.docker.com
|
||||
- GitHub: https://github.com
|
||||
→ Verify exact domain, check for typosquatting
|
||||
|
||||
**CDN URLs** (HTTPS required):
|
||||
- https://cdn.jsdelivr.net
|
||||
- https://unpkg.com
|
||||
- https://cdnjs.cloudflare.com
|
||||
→ Must use HTTPS, verify integrity hashes
|
||||
|
||||
**Shortened URLs** (flag for review):
|
||||
- bit.ly, tinyurl.com, goo.gl
|
||||
→ Cannot verify destination, recommend expanding
|
||||
|
||||
### Remediation Guidance
|
||||
|
||||
**For remote code execution**:
|
||||
1. Remove pipe-to-shell patterns
|
||||
2. Download scripts explicitly
|
||||
3. Verify checksums/signatures
|
||||
4. Review code before execution
|
||||
5. Use official package managers when possible
|
||||
|
||||
**For non-HTTPS URLs**:
|
||||
1. Update to HTTPS version
|
||||
2. Verify certificate validity
|
||||
3. Pin certificate if highly sensitive
|
||||
4. Consider using subresource integrity (SRI) for CDNs
|
||||
|
||||
**For suspicious URLs**:
|
||||
1. Verify domain legitimacy
|
||||
2. Check for typosquatting
|
||||
3. Expand shortened URLs
|
||||
4. Review destination manually
|
||||
|
||||
### Output Format
|
||||
|
||||
```json
|
||||
{
|
||||
"scan_type": "urls",
|
||||
"path": "<path>",
|
||||
"urls_scanned": <count>,
|
||||
"unsafe_urls": <count>,
|
||||
"severity_breakdown": {
|
||||
"critical": <count>,
|
||||
"high": <count>,
|
||||
"medium": <count>,
|
||||
"low": <count>
|
||||
},
|
||||
"findings": [
|
||||
{
|
||||
"file": "<file_path>",
|
||||
"line": <line_number>,
|
||||
"url": "<url>",
|
||||
"issue": "<issue_type>",
|
||||
"severity": "<severity>",
|
||||
"risk": "<risk_description>",
|
||||
"remediation": "<action>"
|
||||
}
|
||||
],
|
||||
"action_required": <boolean>
|
||||
}
|
||||
```
|
||||
|
||||
**Request**: $ARGUMENTS
|
||||
469
commands/security-scan/full-audit.md
Normal file
469
commands/security-scan/full-audit.md
Normal file
@@ -0,0 +1,469 @@
|
||||
## Operation: Full Security Audit
|
||||
|
||||
Execute comprehensive security audit combining all security scans: secrets, URLs, files, and permissions.
|
||||
|
||||
### Parameters from $ARGUMENTS
|
||||
|
||||
- **path**: Target directory to audit (required)
|
||||
- **severity**: Minimum severity to report (critical|high|medium|low, default: medium)
|
||||
- **strict**: Enable strict mode for all checks (true|false, default: false)
|
||||
- **format**: Output format (text|json|markdown, default: text)
|
||||
|
||||
### Full Audit Workflow
|
||||
|
||||
1. **Initialize audit**
|
||||
```
|
||||
Validate path exists
|
||||
Parse severity threshold
|
||||
Set strict mode for all sub-scans
|
||||
Initialize results aggregator
|
||||
```
|
||||
|
||||
2. **Execute all security scans**
|
||||
```
|
||||
PARALLEL EXECUTION (where possible):
|
||||
|
||||
┌─ Scan 1: Secret Detection
|
||||
│ Read scan-secrets.md
|
||||
│ Execute with path, recursive:true, severity
|
||||
│ Capture results
|
||||
│
|
||||
├─ Scan 2: URL Safety Check
|
||||
│ Read check-urls.md
|
||||
│ Execute with path, https-only, check-code-patterns
|
||||
│ Capture results
|
||||
│
|
||||
├─ Scan 3: Dangerous Files
|
||||
│ Read scan-files.md
|
||||
│ Execute with path, include-hidden, check-gitignore
|
||||
│ Capture results
|
||||
│
|
||||
└─ Scan 4: Permission Audit
|
||||
Read check-permissions.md
|
||||
Execute with path, strict, check-executables
|
||||
Capture results
|
||||
```
|
||||
|
||||
3. **Aggregate results**
|
||||
```
|
||||
Combine all findings
|
||||
Deduplicate issues
|
||||
Sort by severity:
|
||||
1. CRITICAL issues (block publication)
|
||||
2. HIGH issues (fix before publication)
|
||||
3. MEDIUM issues (recommended fixes)
|
||||
4. LOW issues (nice to have)
|
||||
|
||||
Calculate overall security score:
|
||||
Base score: 100
|
||||
- CRITICAL: -25 points each
|
||||
- HIGH: -10 points each
|
||||
- MEDIUM: -5 points each
|
||||
- LOW: -2 points each
|
||||
Score = max(0, base - deductions)
|
||||
```
|
||||
|
||||
4. **Generate comprehensive report**
|
||||
```
|
||||
FULL SECURITY AUDIT REPORT
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
Target: <path>
|
||||
Scan Date: <timestamp>
|
||||
Severity Threshold: <severity>
|
||||
|
||||
OVERALL SECURITY SCORE: <0-100>/100
|
||||
Rating: <Excellent|Good|Fair|Poor|Critical>
|
||||
Publication Ready: <Yes|No|With Fixes>
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
EXECUTIVE SUMMARY
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
Security Posture: <assessment>
|
||||
Critical Issues: <count> (IMMEDIATE ACTION REQUIRED)
|
||||
High Priority: <count> (FIX BEFORE PUBLICATION)
|
||||
Medium Priority: <count> (RECOMMENDED)
|
||||
Low Priority: <count> (OPTIONAL)
|
||||
|
||||
Action Required: <Yes|No>
|
||||
Estimated Fix Time: <time_estimate>
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
SCAN RESULTS BY LAYER
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
[1] SECRET DETECTION
|
||||
Status: <PASS|FAIL>
|
||||
Secrets Found: <count>
|
||||
Files Scanned: <count>
|
||||
<Details...>
|
||||
|
||||
[2] URL SAFETY
|
||||
Status: <PASS|FAIL>
|
||||
Unsafe URLs: <count>
|
||||
URLs Checked: <count>
|
||||
<Details...>
|
||||
|
||||
[3] DANGEROUS FILES
|
||||
Status: <PASS|FAIL>
|
||||
Dangerous Files: <count>
|
||||
Files Scanned: <count>
|
||||
<Details...>
|
||||
|
||||
[4] FILE PERMISSIONS
|
||||
Status: <PASS|FAIL>
|
||||
Permission Issues: <count>
|
||||
Files Checked: <count>
|
||||
<Details...>
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
CRITICAL ISSUES (IMMEDIATE ACTION)
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
❌ Issue 1: <description>
|
||||
File: <path>:<line>
|
||||
Severity: CRITICAL
|
||||
Risk: <risk_assessment>
|
||||
Remediation: <specific_steps>
|
||||
|
||||
❌ Issue 2: ...
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
HIGH PRIORITY ISSUES
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
⚠️ Issue 1: ...
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
REMEDIATION PLAN
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
Phase 1: Critical Issues (Immediate)
|
||||
□ Remove exposed secrets from .env
|
||||
□ Rotate compromised API keys
|
||||
□ Fix world-writable permissions (777)
|
||||
□ Remove dangerous files from repository
|
||||
|
||||
Phase 2: High Priority (Before Publication)
|
||||
□ Update all HTTP URLs to HTTPS
|
||||
□ Add dangerous files to .gitignore
|
||||
□ Fix executables without shebangs
|
||||
□ Remove remote code execution patterns
|
||||
|
||||
Phase 3: Recommended Improvements
|
||||
□ Restrict config file permissions to 600
|
||||
□ Review and expand shortened URLs
|
||||
□ Add security documentation
|
||||
|
||||
Phase 4: Optional Enhancements
|
||||
□ Implement pre-commit hooks
|
||||
□ Add automated security scanning to CI/CD
|
||||
□ Document security best practices
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
SECURITY RECOMMENDATIONS
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
🔒 Secrets Management
|
||||
- Use environment variables for all secrets
|
||||
- Implement secret rotation policy
|
||||
- Consider using secret management tools
|
||||
(AWS Secrets Manager, HashiCorp Vault)
|
||||
|
||||
🌐 URL Security
|
||||
- Enforce HTTPS for all external URLs
|
||||
- Verify checksums for downloaded scripts
|
||||
- Never pipe remote content to shell
|
||||
|
||||
📁 File Security
|
||||
- Review .gitignore completeness
|
||||
- Remove sensitive files from git history
|
||||
- Implement file scanning in CI/CD
|
||||
|
||||
🔐 Permission Security
|
||||
- Use least privilege principle
|
||||
- Document required permissions
|
||||
- Regular permission audits
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
PUBLICATION READINESS
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
<if score >= 90>
|
||||
✅ READY FOR PUBLICATION
|
||||
Security score is excellent. No critical issues found.
|
||||
All security checks passed. Safe to publish.
|
||||
|
||||
<if 70 <= score < 90>
|
||||
⚠️ READY WITH MINOR FIXES
|
||||
Security score is good but has some issues.
|
||||
Fix high priority issues before publication.
|
||||
Estimated fix time: <time>
|
||||
|
||||
<if score < 70>
|
||||
❌ NOT READY FOR PUBLICATION
|
||||
Critical security issues must be resolved.
|
||||
Publication blocked until critical issues fixed.
|
||||
Do not publish in current state.
|
||||
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
NEXT STEPS
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
1. Address all CRITICAL issues immediately
|
||||
2. Fix HIGH priority issues before publication
|
||||
3. Review and implement recommended improvements
|
||||
4. Re-run full security audit to verify fixes
|
||||
5. Document security practices for maintainers
|
||||
```
|
||||
|
||||
### Security Score Calculation
|
||||
|
||||
```
|
||||
Base Score: 100 points
|
||||
|
||||
Deductions:
|
||||
- CRITICAL issues: -25 points each
|
||||
- HIGH issues: -10 points each
|
||||
- MEDIUM issues: -5 points each
|
||||
- LOW issues: -2 points each
|
||||
|
||||
Final Score: max(0, Base - Deductions)
|
||||
|
||||
Rating Scale:
|
||||
- 90-100: Excellent ⭐⭐⭐⭐⭐ (Publication ready)
|
||||
- 70-89: Good ⭐⭐⭐⭐ (Ready with minor fixes)
|
||||
- 50-69: Fair ⭐⭐⭐ (Needs work)
|
||||
- 30-49: Poor ⭐⭐ (Not ready)
|
||||
- 0-29: Critical ⭐ (Major security issues)
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```bash
|
||||
# Full security audit with default settings
|
||||
/security-scan full-security-audit path:.
|
||||
|
||||
# Strict mode - enforce all strict rules
|
||||
/security-scan full-security-audit path:. strict:true
|
||||
|
||||
# Only report critical and high issues
|
||||
/security-scan full-security-audit path:. severity:high
|
||||
|
||||
# JSON output for CI/CD integration
|
||||
/security-scan full-security-audit path:. format:json
|
||||
|
||||
# Markdown report for documentation
|
||||
/security-scan full-security-audit path:. format:markdown
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
**Path not found**:
|
||||
```
|
||||
ERROR: Path does not exist: <path>
|
||||
Remediation: Verify path and try again
|
||||
```
|
||||
|
||||
**Scan failures**:
|
||||
```
|
||||
WARNING: One or more security scans failed
|
||||
Partial results available:
|
||||
- Secrets: ✓ Completed
|
||||
- URLs: ✓ Completed
|
||||
- Files: ✗ Failed
|
||||
- Permissions: ✓ Completed
|
||||
|
||||
Recommendation: Review failures and re-run
|
||||
```
|
||||
|
||||
**All scans passed**:
|
||||
```
|
||||
SUCCESS: Full Security Audit Passed
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
|
||||
Security Score: 100/100 ⭐⭐⭐⭐⭐
|
||||
Rating: Excellent
|
||||
|
||||
All security checks passed with no issues.
|
||||
Your plugin/marketplace is secure and ready for publication.
|
||||
|
||||
Summary:
|
||||
✓ No secrets detected
|
||||
✓ All URLs safe
|
||||
✓ No dangerous files
|
||||
✓ All permissions correct
|
||||
|
||||
Excellent security posture! 🎉
|
||||
```
|
||||
|
||||
### Integration with CI/CD
|
||||
|
||||
**GitHub Actions Example**:
|
||||
```yaml
|
||||
name: Security Audit
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
security:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Run Security Audit
|
||||
run: |
|
||||
/security-scan full-security-audit path:. format:json > security-report.json
|
||||
|
||||
- name: Check Security Score
|
||||
run: |
|
||||
score=$(jq '.security_score' security-report.json)
|
||||
if [ $score -lt 70 ]; then
|
||||
echo "Security score too low: $score"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Upload Report
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: security-report
|
||||
path: security-report.json
|
||||
```
|
||||
|
||||
**GitLab CI Example**:
|
||||
```yaml
|
||||
security_audit:
|
||||
stage: test
|
||||
script:
|
||||
- /security-scan full-security-audit path:. format:json
|
||||
only:
|
||||
- main
|
||||
- merge_requests
|
||||
artifacts:
|
||||
reports:
|
||||
security: security-report.json
|
||||
```
|
||||
|
||||
### Report Formats
|
||||
|
||||
**Text Format** (default):
|
||||
- Human-readable console output
|
||||
- Color-coded severity levels
|
||||
- Section dividers for clarity
|
||||
- Suitable for terminal viewing
|
||||
|
||||
**JSON Format**:
|
||||
```json
|
||||
{
|
||||
"scan_type": "full-audit",
|
||||
"timestamp": "<ISO8601>",
|
||||
"path": "<path>",
|
||||
"security_score": <0-100>,
|
||||
"rating": "<rating>",
|
||||
"publication_ready": <boolean>,
|
||||
"scans": {
|
||||
"secrets": { "status": "pass", "issues": [] },
|
||||
"urls": { "status": "fail", "issues": [...] },
|
||||
"files": { "status": "pass", "issues": [] },
|
||||
"permissions": { "status": "pass", "issues": [] }
|
||||
},
|
||||
"severity_breakdown": {
|
||||
"critical": <count>,
|
||||
"high": <count>,
|
||||
"medium": <count>,
|
||||
"low": <count>
|
||||
},
|
||||
"all_findings": [...],
|
||||
"remediation_plan": [...],
|
||||
"recommendations": [...]
|
||||
}
|
||||
```
|
||||
|
||||
**Markdown Format**:
|
||||
- GitHub/GitLab compatible
|
||||
- Can be added to PR comments
|
||||
- Suitable for documentation
|
||||
- Includes tables and checkboxes
|
||||
|
||||
### Time Estimates
|
||||
|
||||
**By Issue Count**:
|
||||
- 0 issues: No time needed ✅
|
||||
- 1-3 CRITICAL: 2-4 hours
|
||||
- 4-10 HIGH: 1-2 hours
|
||||
- 11-20 MEDIUM: 30-60 minutes
|
||||
- 20+ LOW: 15-30 minutes
|
||||
|
||||
**By Issue Type**:
|
||||
- Secret rotation: 30-60 minutes each
|
||||
- URL updates: 5-10 minutes each
|
||||
- File removal: 15-30 minutes (including .gitignore)
|
||||
- Permission fixes: 5 minutes total (batch operation)
|
||||
|
||||
### Remediation Verification
|
||||
|
||||
After fixing issues, re-run audit:
|
||||
|
||||
```bash
|
||||
# Fix issues
|
||||
chmod 755 scripts/*.sh
|
||||
git rm .env
|
||||
echo ".env" >> .gitignore
|
||||
|
||||
# Verify fixes
|
||||
/security-scan full-security-audit path:.
|
||||
|
||||
# Should see improved score
|
||||
```
|
||||
|
||||
### Best Practices
|
||||
|
||||
**Regular Audits**:
|
||||
- Run before each release
|
||||
- Include in CI/CD pipeline
|
||||
- Weekly scans for active development
|
||||
- After adding dependencies
|
||||
|
||||
**Fix Priority**:
|
||||
1. CRITICAL: Drop everything and fix
|
||||
2. HIGH: Fix within 24 hours
|
||||
3. MEDIUM: Fix within 1 week
|
||||
4. LOW: Address when convenient
|
||||
|
||||
**Team Communication**:
|
||||
- Share audit results with team
|
||||
- Document security requirements
|
||||
- Train on secure development
|
||||
- Review security in code reviews
|
||||
|
||||
### Output Format
|
||||
|
||||
```json
|
||||
{
|
||||
"scan_type": "full-audit",
|
||||
"timestamp": "<ISO8601>",
|
||||
"path": "<path>",
|
||||
"security_score": <0-100>,
|
||||
"rating": "<Excellent|Good|Fair|Poor|Critical>",
|
||||
"publication_ready": <boolean>,
|
||||
"estimated_fix_time": "<time_string>",
|
||||
"severity_breakdown": {
|
||||
"critical": <count>,
|
||||
"high": <count>,
|
||||
"medium": <count>,
|
||||
"low": <count>
|
||||
},
|
||||
"scan_results": {
|
||||
"secrets": { "status": "pass|fail", "findings": [...] },
|
||||
"urls": { "status": "pass|fail", "findings": [...] },
|
||||
"files": { "status": "pass|fail", "findings": [...] },
|
||||
"permissions": { "status": "pass|fail", "findings": [...] }
|
||||
},
|
||||
"all_findings": [...],
|
||||
"remediation_plan": [...],
|
||||
"recommendations": [...],
|
||||
"action_required": <boolean>
|
||||
}
|
||||
```
|
||||
|
||||
**Request**: $ARGUMENTS
|
||||
364
commands/security-scan/scan-files.md
Normal file
364
commands/security-scan/scan-files.md
Normal file
@@ -0,0 +1,364 @@
|
||||
## Operation: Scan for Dangerous Files
|
||||
|
||||
Detect dangerous files, sensitive configurations, and files that should not be committed to version control.
|
||||
|
||||
### Parameters from $ARGUMENTS
|
||||
|
||||
- **path**: Target directory to scan (required)
|
||||
- **patterns**: Specific file patterns to check (optional, default: all)
|
||||
- **include-hidden**: Scan hidden files and directories (true|false, default: true)
|
||||
- **check-gitignore**: Verify .gitignore coverage (true|false, default: true)
|
||||
|
||||
### Dangerous File Categories
|
||||
|
||||
**Environment Files** (CRITICAL):
|
||||
- .env, .env.local, .env.production, .env.development
|
||||
- .env.*.local (any environment-specific)
|
||||
- env.sh, setenv.sh
|
||||
→ Often contain secrets, should never be committed
|
||||
|
||||
**Credential Files** (CRITICAL):
|
||||
- credentials.json, credentials.yaml, credentials.yml
|
||||
- secrets.json, secrets.yaml, config/secrets/*
|
||||
- .aws/credentials, .azure/credentials
|
||||
- .gcp/credentials.json, gcloud/credentials
|
||||
→ Direct access credentials, rotate if exposed
|
||||
|
||||
**Private Keys** (CRITICAL):
|
||||
- id_rsa, id_dsa, id_ed25519 (SSH keys)
|
||||
- *.pem, *.key, *.p12, *.pfx (SSL/TLS certificates)
|
||||
- *.jks, *.keystore (Java keystores)
|
||||
- .gnupg/*, .ssh/id_* (GPG and SSH directories)
|
||||
→ Authentication keys, regenerate if exposed
|
||||
|
||||
**Database Files** (HIGH):
|
||||
- *.db, *.sqlite, *.sqlite3
|
||||
- *.sql with INSERT statements (data dumps)
|
||||
- dump.sql, backup.sql
|
||||
- *.mdb, *.accdb (Access databases)
|
||||
→ May contain sensitive data
|
||||
|
||||
**Configuration Files** (MEDIUM):
|
||||
- config/database.yml with passwords
|
||||
- appsettings.json with connection strings
|
||||
- wp-config.php with DB credentials
|
||||
- settings.py with SECRET_KEY
|
||||
→ Review for hardcoded secrets
|
||||
|
||||
**Backup Files** (MEDIUM):
|
||||
- *.bak, *.backup, *.old
|
||||
- *~, *.swp, *.swo (editor backups)
|
||||
- *.orig, *.copy
|
||||
→ May contain previous versions with secrets
|
||||
|
||||
**Log Files** (LOW):
|
||||
- *.log with potential sensitive data
|
||||
- debug.log, error.log
|
||||
- Combined log files (>10MB)
|
||||
→ Review for leaked information
|
||||
|
||||
### Workflow
|
||||
|
||||
1. **Parse arguments**
|
||||
```
|
||||
Extract path, patterns, include-hidden, check-gitignore
|
||||
Validate path exists and is directory
|
||||
Load dangerous file patterns
|
||||
```
|
||||
|
||||
2. **Execute file scanner**
|
||||
```bash
|
||||
Execute .scripts/file-scanner.sh "$path" "$patterns" "$include_hidden" "$check_gitignore"
|
||||
|
||||
Returns:
|
||||
- 0: No dangerous files found
|
||||
- 1: Dangerous files detected
|
||||
- 2: Scan error
|
||||
```
|
||||
|
||||
3. **Process results**
|
||||
```
|
||||
Categorize by risk:
|
||||
- CRITICAL: Private keys, credentials, production env files
|
||||
- HIGH: Database files, config with secrets
|
||||
- MEDIUM: Backup files, test credentials
|
||||
- LOW: Log files, temporary files
|
||||
|
||||
Cross-reference with .gitignore:
|
||||
- Files that SHOULD be in .gitignore but aren't
|
||||
- Already ignored files (informational)
|
||||
```
|
||||
|
||||
4. **Format output**
|
||||
```
|
||||
Dangerous Files Scan Results
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
||||
Path: <path>
|
||||
Files Scanned: <count>
|
||||
|
||||
CRITICAL Files (<count>):
|
||||
❌ .env (157 bytes)
|
||||
Type: Environment file
|
||||
Risk: Contains API keys and secrets
|
||||
Status: NOT in .gitignore ⚠️
|
||||
Remediation: Add to .gitignore, remove from git history
|
||||
|
||||
❌ config/credentials.json (2.3 KB)
|
||||
Type: Credential file
|
||||
Risk: Contains authentication credentials
|
||||
Status: NOT in .gitignore ⚠️
|
||||
Remediation: Remove, rotate credentials, use env vars
|
||||
|
||||
HIGH Files (<count>):
|
||||
⚠️ database/dev.db (45 MB)
|
||||
Type: SQLite database
|
||||
Risk: May contain user data
|
||||
Status: In .gitignore ✓
|
||||
Remediation: Verify .gitignore working
|
||||
|
||||
Summary:
|
||||
- Total dangerous files: <count>
|
||||
- Not in .gitignore: <count>
|
||||
- Action required: <yes|no>
|
||||
```
|
||||
|
||||
### File Pattern Signatures
|
||||
|
||||
**Environment files**:
|
||||
```
|
||||
.env
|
||||
.env.*
|
||||
env.sh
|
||||
setenv.sh
|
||||
.envrc
|
||||
```
|
||||
|
||||
**Credential files**:
|
||||
```
|
||||
*credentials*
|
||||
*secrets*
|
||||
*password*
|
||||
.aws/credentials
|
||||
.azure/credentials
|
||||
.gcp/*credentials*
|
||||
```
|
||||
|
||||
**Private keys**:
|
||||
```
|
||||
id_rsa
|
||||
id_dsa
|
||||
id_ed25519
|
||||
*.pem
|
||||
*.key
|
||||
*.p12
|
||||
*.pfx
|
||||
*.jks
|
||||
*.keystore
|
||||
.gnupg/*
|
||||
```
|
||||
|
||||
**Database files**:
|
||||
```
|
||||
*.db
|
||||
*.sqlite
|
||||
*.sqlite3
|
||||
*.sql (with INSERT/UPDATE)
|
||||
dump.sql
|
||||
*backup*.sql
|
||||
```
|
||||
|
||||
**Backup patterns**:
|
||||
```
|
||||
*.bak
|
||||
*.backup
|
||||
*.old
|
||||
*.orig
|
||||
*.copy
|
||||
*~
|
||||
*.swp
|
||||
*.swo
|
||||
```
|
||||
|
||||
### .gitignore Validation
|
||||
|
||||
**Should be ignored**:
|
||||
```gitignore
|
||||
# Environment
|
||||
.env*
|
||||
!.env.example
|
||||
|
||||
# Credentials
|
||||
credentials.*
|
||||
secrets.*
|
||||
*.pem
|
||||
*.key
|
||||
id_rsa*
|
||||
|
||||
# Databases
|
||||
*.db
|
||||
*.sqlite*
|
||||
dump.sql
|
||||
|
||||
# Backups
|
||||
*.bak
|
||||
*.backup
|
||||
*~
|
||||
```
|
||||
|
||||
**Safe to commit** (examples):
|
||||
```
|
||||
.env.example
|
||||
.env.template
|
||||
credentials.example.json
|
||||
README.md
|
||||
package.json
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```bash
|
||||
# Scan current directory
|
||||
/security-scan files path:.
|
||||
|
||||
# Check specific patterns only
|
||||
/security-scan files path:. patterns:".env,credentials,*.pem"
|
||||
|
||||
# Include hidden files explicitly
|
||||
/security-scan files path:. include-hidden:true
|
||||
|
||||
# Scan and verify .gitignore coverage
|
||||
/security-scan files path:. check-gitignore:true
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
**Path not found**:
|
||||
```
|
||||
ERROR: Path does not exist: <path>
|
||||
Remediation: Verify path is correct
|
||||
```
|
||||
|
||||
**Path is not directory**:
|
||||
```
|
||||
ERROR: Path is not a directory: <path>
|
||||
Remediation: Provide directory path for file scanning
|
||||
```
|
||||
|
||||
**No .gitignore found**:
|
||||
```
|
||||
WARNING: No .gitignore file found
|
||||
Recommendation: Create .gitignore to prevent committing sensitive files
|
||||
```
|
||||
|
||||
### Remediation Guidance
|
||||
|
||||
**For environment files (.env)**:
|
||||
1. Add to .gitignore immediately
|
||||
2. Remove from git history if committed:
|
||||
```bash
|
||||
git filter-branch --force --index-filter \
|
||||
"git rm --cached --ignore-unmatch .env" \
|
||||
--prune-empty --tag-name-filter cat -- --all
|
||||
```
|
||||
3. Create .env.example with dummy values
|
||||
4. Document environment variables in README
|
||||
|
||||
**For credential files**:
|
||||
1. Remove from repository
|
||||
2. Rotate all exposed credentials
|
||||
3. Use environment variables or secret managers
|
||||
4. Add to .gitignore
|
||||
5. Consider using git-secrets or similar tools
|
||||
|
||||
**For private keys**:
|
||||
1. Regenerate keys immediately
|
||||
2. Remove from repository
|
||||
3. Update deployed systems with new keys
|
||||
4. Add *.pem, *.key, id_rsa to .gitignore
|
||||
5. Audit access logs for unauthorized use
|
||||
|
||||
**For database files**:
|
||||
1. Remove from repository if contains real data
|
||||
2. For test data, ensure no real emails/names
|
||||
3. Add *.db, *.sqlite to .gitignore
|
||||
4. Use schema-only dumps in version control
|
||||
|
||||
**For backup files**:
|
||||
1. Clean up backup files before commit
|
||||
2. Add backup patterns to .gitignore
|
||||
3. Use .gitignore_global for editor backups
|
||||
4. Configure editors to save backups elsewhere
|
||||
|
||||
### Git History Cleanup
|
||||
|
||||
If sensitive files were already committed:
|
||||
|
||||
```bash
|
||||
# Using git filter-repo (recommended)
|
||||
git filter-repo --path .env --invert-paths
|
||||
|
||||
# Using BFG Repo-Cleaner (fast for large repos)
|
||||
bfg --delete-files .env
|
||||
git reflog expire --expire=now --all
|
||||
git gc --prune=now --aggressive
|
||||
|
||||
# Force push (WARNING: destructive)
|
||||
git push origin --force --all
|
||||
```
|
||||
|
||||
### Prevention Strategies
|
||||
|
||||
**Pre-commit hooks**:
|
||||
```bash
|
||||
# .git/hooks/pre-commit
|
||||
#!/bin/bash
|
||||
# Check for dangerous files
|
||||
if git diff --cached --name-only | grep -E '\\.env$|credentials|id_rsa'; then
|
||||
echo "ERROR: Attempting to commit sensitive file"
|
||||
exit 1
|
||||
fi
|
||||
```
|
||||
|
||||
**Use git-secrets**:
|
||||
```bash
|
||||
git secrets --install
|
||||
git secrets --register-aws
|
||||
git secrets --add 'credentials\.json'
|
||||
```
|
||||
|
||||
**IDE Configuration**:
|
||||
- Configure .gitignore templates
|
||||
- Use .editorconfig
|
||||
- Set up file watchers for dangerous patterns
|
||||
|
||||
### Output Format
|
||||
|
||||
```json
|
||||
{
|
||||
"scan_type": "files",
|
||||
"path": "<path>",
|
||||
"files_scanned": <count>,
|
||||
"dangerous_files": <count>,
|
||||
"not_in_gitignore": <count>,
|
||||
"severity_breakdown": {
|
||||
"critical": <count>,
|
||||
"high": <count>,
|
||||
"medium": <count>,
|
||||
"low": <count>
|
||||
},
|
||||
"findings": [
|
||||
{
|
||||
"file": "<file_path>",
|
||||
"type": "<file_type>",
|
||||
"size": <size_bytes>,
|
||||
"severity": "<severity>",
|
||||
"risk": "<risk_description>",
|
||||
"in_gitignore": <boolean>,
|
||||
"remediation": "<action>"
|
||||
}
|
||||
],
|
||||
"action_required": <boolean>
|
||||
}
|
||||
```
|
||||
|
||||
**Request**: $ARGUMENTS
|
||||
207
commands/security-scan/scan-secrets.md
Normal file
207
commands/security-scan/scan-secrets.md
Normal file
@@ -0,0 +1,207 @@
|
||||
## Operation: Scan for Exposed Secrets
|
||||
|
||||
Detect exposed secrets, API keys, tokens, passwords, and private keys using 50+ pattern signatures.
|
||||
|
||||
### Parameters from $ARGUMENTS
|
||||
|
||||
- **path**: Target directory or file to scan (required)
|
||||
- **recursive**: Scan subdirectories (true|false, default: true)
|
||||
- **patterns**: Specific pattern categories to check (optional, default: all)
|
||||
- **exclude**: Patterns to exclude from scan (optional)
|
||||
- **severity**: Minimum severity to report (critical|high|medium|low, default: medium)
|
||||
|
||||
### Secret Detection Patterns (50+)
|
||||
|
||||
**API Keys & Tokens**:
|
||||
- Stripe: sk_live_, sk_test_, pk_live_, pk_test_
|
||||
- OpenAI: sk-[a-zA-Z0-9]{32,}
|
||||
- AWS: AKIA[0-9A-Z]{16}
|
||||
- Google: AIza[0-9A-Za-z_-]{35}
|
||||
- GitHub: ghp_, gho_, ghs_, ghu_
|
||||
- Slack: xox[baprs]-[0-9a-zA-Z]{10,}
|
||||
- Twitter: [0-9a-zA-Z]{35,44}
|
||||
- Facebook: EAA[0-9A-Za-z]{90,}
|
||||
|
||||
**Private Keys**:
|
||||
- RSA: BEGIN RSA PRIVATE KEY
|
||||
- Generic: BEGIN PRIVATE KEY
|
||||
- SSH: BEGIN OPENSSH PRIVATE KEY
|
||||
- PGP: BEGIN PGP PRIVATE KEY
|
||||
- DSA: BEGIN DSA PRIVATE KEY
|
||||
- EC: BEGIN EC PRIVATE KEY
|
||||
|
||||
**Credentials**:
|
||||
- Passwords: password\s*[=:]\s*['\"][^'\"]+['\"]
|
||||
- API keys: api[_-]?key\s*[=:]\s*['\"][^'\"]+['\"]
|
||||
- Secrets: secret\s*[=:]\s*['\"][^'\"]+['\"]
|
||||
- Tokens: token\s*[=:]\s*['\"][^'\"]+['\"]
|
||||
- Auth: authorization\s*[=:]\s*['\"]Bearer [^'\"]+['\"]
|
||||
|
||||
**Cloud Provider Credentials**:
|
||||
- AWS Access Key: aws_access_key_id
|
||||
- AWS Secret: aws_secret_access_key
|
||||
- Azure: [0-9a-zA-Z/+]{88}==
|
||||
- GCP Service Account: type.*service_account
|
||||
|
||||
### Workflow
|
||||
|
||||
1. **Parse arguments**
|
||||
```
|
||||
Extract path, recursive, patterns, exclude, severity
|
||||
Validate path exists
|
||||
Determine scan scope (file vs directory)
|
||||
```
|
||||
|
||||
2. **Execute secret scanner**
|
||||
```bash
|
||||
Execute .scripts/secret-scanner.sh "$path" "$recursive" "$patterns" "$exclude" "$severity"
|
||||
|
||||
Returns:
|
||||
- 0: No secrets found
|
||||
- 1: Secrets detected
|
||||
- 2: Scan error
|
||||
```
|
||||
|
||||
3. **Process results**
|
||||
```
|
||||
Parse scanner output
|
||||
Categorize by severity:
|
||||
- CRITICAL: Private keys, production API keys
|
||||
- HIGH: API keys, tokens with broad scope
|
||||
- MEDIUM: Passwords, secrets in config
|
||||
- LOW: Test keys, development credentials
|
||||
|
||||
Generate remediation guidance per finding
|
||||
```
|
||||
|
||||
4. **Format output**
|
||||
```
|
||||
Secrets Scan Results
|
||||
━━━━━━━━━━━━━━━━━━━━
|
||||
Path: <path>
|
||||
Files Scanned: <count>
|
||||
|
||||
CRITICAL Issues (<count>):
|
||||
❌ <file>:<line>: <type> detected
|
||||
Pattern: <pattern_name>
|
||||
Remediation: Remove and rotate immediately
|
||||
|
||||
HIGH Issues (<count>):
|
||||
⚠️ <file>:<line>: <type> detected
|
||||
|
||||
Summary:
|
||||
- Total secrets: <count>
|
||||
- Unique patterns: <count>
|
||||
- Action required: <yes|no>
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```bash
|
||||
# Scan current directory recursively
|
||||
/security-scan secrets path:.
|
||||
|
||||
# Scan specific file only
|
||||
/security-scan secrets path:./config/settings.json recursive:false
|
||||
|
||||
# Check only API key patterns
|
||||
/security-scan secrets path:. patterns:"api-keys,tokens"
|
||||
|
||||
# Exclude test directories
|
||||
/security-scan secrets path:. exclude:"test,mock,fixtures"
|
||||
|
||||
# Only critical severity
|
||||
/security-scan secrets path:. severity:critical
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
**Path not found**:
|
||||
```
|
||||
ERROR: Path does not exist: <path>
|
||||
Remediation: Verify path and try again
|
||||
```
|
||||
|
||||
**No patterns matched**:
|
||||
```
|
||||
INFO: No secrets detected
|
||||
All files clean
|
||||
```
|
||||
|
||||
**Scanner unavailable**:
|
||||
```
|
||||
ERROR: Secret scanner script not found
|
||||
Remediation: Verify plugin installation
|
||||
```
|
||||
|
||||
### Severity Levels
|
||||
|
||||
**CRITICAL** (Immediate action required):
|
||||
- Private keys (RSA, SSH, PGP)
|
||||
- Production API keys (live_, prod_)
|
||||
- AWS credentials
|
||||
- Database connection strings with passwords
|
||||
|
||||
**HIGH** (Action required):
|
||||
- API keys (generic)
|
||||
- OAuth tokens
|
||||
- Bearer tokens
|
||||
- Authentication credentials
|
||||
|
||||
**MEDIUM** (Should address):
|
||||
- Passwords in config files
|
||||
- Secret variables
|
||||
- Session tokens
|
||||
- Development credentials in non-test contexts
|
||||
|
||||
**LOW** (Review recommended):
|
||||
- Test API keys
|
||||
- Mock credentials
|
||||
- Example configurations
|
||||
|
||||
### Remediation Guidance
|
||||
|
||||
**For exposed secrets**:
|
||||
1. Remove from code immediately
|
||||
2. Rotate/regenerate the credential
|
||||
3. Use environment variables instead
|
||||
4. Add to .gitignore if file-based
|
||||
5. Review git history for exposure
|
||||
6. Consider using secret management (AWS Secrets Manager, HashiCorp Vault)
|
||||
|
||||
**Prevention**:
|
||||
- Use .env files (never commit)
|
||||
- Use environment variables
|
||||
- Implement pre-commit hooks
|
||||
- Use secret scanning in CI/CD
|
||||
- Educate team on security practices
|
||||
|
||||
### Output Format
|
||||
|
||||
```json
|
||||
{
|
||||
"scan_type": "secrets",
|
||||
"path": "<path>",
|
||||
"files_scanned": <count>,
|
||||
"secrets_found": <count>,
|
||||
"severity_breakdown": {
|
||||
"critical": <count>,
|
||||
"high": <count>,
|
||||
"medium": <count>,
|
||||
"low": <count>
|
||||
},
|
||||
"findings": [
|
||||
{
|
||||
"file": "<file_path>",
|
||||
"line": <line_number>,
|
||||
"type": "<secret_type>",
|
||||
"severity": "<severity>",
|
||||
"pattern": "<pattern_name>",
|
||||
"remediation": "<action>"
|
||||
}
|
||||
],
|
||||
"action_required": <boolean>
|
||||
}
|
||||
```
|
||||
|
||||
**Request**: $ARGUMENTS
|
||||
89
commands/security-scan/skill.md
Normal file
89
commands/security-scan/skill.md
Normal file
@@ -0,0 +1,89 @@
|
||||
---
|
||||
description: Comprehensive security scanning for secrets, vulnerabilities, and unsafe practices
|
||||
---
|
||||
|
||||
You are the Security Scan coordinator, protecting against security vulnerabilities and exposed secrets.
|
||||
|
||||
## Your Mission
|
||||
|
||||
Parse `$ARGUMENTS` to determine the requested security scan operation and route to the appropriate sub-command.
|
||||
|
||||
## Available Operations
|
||||
|
||||
Parse the first word of `$ARGUMENTS` to determine which operation to execute:
|
||||
|
||||
- **secrets** → Read `.claude/commands/security-scan/scan-secrets.md`
|
||||
- **urls** → Read `.claude/commands/security-scan/check-urls.md`
|
||||
- **files** → Read `.claude/commands/security-scan/scan-files.md`
|
||||
- **permissions** → Read `.claude/commands/security-scan/check-permissions.md`
|
||||
- **full-security-audit** → Read `.claude/commands/security-scan/full-audit.md`
|
||||
|
||||
## Argument Format
|
||||
|
||||
```
|
||||
/security-scan <operation> [parameters]
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```bash
|
||||
# Scan for exposed secrets
|
||||
/security-scan secrets path:. recursive:true
|
||||
|
||||
# Validate URL safety
|
||||
/security-scan urls path:. https-only:true
|
||||
|
||||
# Detect dangerous files
|
||||
/security-scan files path:. patterns:".env,credentials.json,id_rsa"
|
||||
|
||||
# Check file permissions
|
||||
/security-scan permissions path:. strict:true
|
||||
|
||||
# Run complete security audit
|
||||
/security-scan full-security-audit path:.
|
||||
```
|
||||
|
||||
## Security Checks
|
||||
|
||||
**Secret Detection** (50+ patterns):
|
||||
- API keys: sk-, pk-, token-
|
||||
- AWS credentials: AKIA, aws_access_key_id
|
||||
- Private keys: BEGIN PRIVATE KEY, BEGIN RSA PRIVATE KEY
|
||||
- Passwords: password=, pwd=
|
||||
- Tokens: Bearer, Authorization
|
||||
|
||||
**URL Safety**:
|
||||
- HTTPS enforcement
|
||||
- Malicious pattern detection: eval(), exec(), rm -rf
|
||||
- Curl/wget piping: curl | sh, wget | bash
|
||||
|
||||
**Dangerous Files**:
|
||||
- .env files with secrets
|
||||
- credentials.json, config.json with keys
|
||||
- Private keys: id_rsa, *.pem, *.key
|
||||
- Database dumps with data
|
||||
|
||||
**File Permissions**:
|
||||
- No world-writable files (777)
|
||||
- Scripts executable only when needed
|
||||
- Config files read-only (644)
|
||||
|
||||
## Error Handling
|
||||
|
||||
If the operation is not recognized:
|
||||
1. List all available security operations
|
||||
2. Show security best practices
|
||||
3. Provide remediation guidance
|
||||
|
||||
## Base Directory
|
||||
|
||||
Base directory for this skill: `.claude/commands/security-scan/`
|
||||
|
||||
## Your Task
|
||||
|
||||
1. Parse `$ARGUMENTS` to extract operation and parameters
|
||||
2. Read the corresponding operation file
|
||||
3. Execute security scans with pattern matching
|
||||
4. Return prioritized security findings with remediation steps
|
||||
|
||||
**Current Request**: $ARGUMENTS
|
||||
Reference in New Issue
Block a user