Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 17:51:02 +08:00
commit ff1f4bd119
252 changed files with 72682 additions and 0 deletions

View File

@@ -0,0 +1,9 @@
# Assets Directory
Place files that will be used in the output Claude produces:
- Templates
- Configuration files
- Images/logos
- Boilerplate code
These files are NOT loaded into context but copied/modified in output.

View File

@@ -0,0 +1,357 @@
# Security-Enhanced CI/CD Pipeline Template
#
# This template demonstrates security best practices for CI/CD pipelines.
# Adapt this template to your specific security tool and workflow needs.
#
# Key Security Features:
# - SAST (Static Application Security Testing)
# - Dependency vulnerability scanning
# - Secrets detection
# - Infrastructure-as-Code security scanning
# - Container image scanning
# - Security artifact uploading for compliance
name: Security Scan Pipeline
on:
push:
branches: [main, develop]
pull_request:
branches: [main, develop]
schedule:
# Run weekly security scans on Sunday at 2 AM UTC
- cron: '0 2 * * 0'
workflow_dispatch: # Allow manual trigger
# Security: Restrict permissions to minimum required
permissions:
contents: read
security-events: write # For uploading SARIF results
pull-requests: write # For commenting on PRs
env:
# Configuration
SECURITY_SCAN_FAIL_ON: 'critical,high' # Fail build on these severities
REPORT_DIR: 'security-reports'
jobs:
# Job 1: Static Application Security Testing (SAST)
sast-scan:
name: SAST Security Scan
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0 # Full history for better analysis
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Run SAST Scanner
run: |
# Example: Using Semgrep for SAST
pip install semgrep
semgrep --config=auto \
--json \
--output ${{ env.REPORT_DIR }}/sast-results.json \
. || true
# Alternative: Bandit for Python projects
# pip install bandit
# bandit -r . -f json -o ${{ env.REPORT_DIR }}/bandit-results.json
- name: Process SAST Results
run: |
# Parse results and fail on critical/high severity
python3 -c "
import json
import sys
with open('${{ env.REPORT_DIR }}/sast-results.json') as f:
results = json.load(f)
critical = len([r for r in results.get('results', []) if r.get('extra', {}).get('severity') == 'ERROR'])
high = len([r for r in results.get('results', []) if r.get('extra', {}).get('severity') == 'WARNING'])
print(f'Critical findings: {critical}')
print(f'High findings: {high}')
if critical > 0:
print('❌ Build failed: Critical security issues found')
sys.exit(1)
elif high > 0:
print('⚠️ Warning: High severity issues found')
# Optionally fail on high severity
# sys.exit(1)
else:
print('✅ No critical security issues found')
"
- name: Upload SAST Results
if: always()
uses: actions/upload-artifact@v4
with:
name: sast-results
path: ${{ env.REPORT_DIR }}/sast-results.json
retention-days: 30
# Job 2: Dependency Vulnerability Scanning
dependency-scan:
name: Dependency Vulnerability Scan
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Scan Python Dependencies
if: hashFiles('requirements.txt') != ''
run: |
pip install safety
safety check \
--json \
--output ${{ env.REPORT_DIR }}/safety-results.json \
|| true
- name: Scan Node Dependencies
if: hashFiles('package.json') != ''
run: |
npm audit --json > ${{ env.REPORT_DIR }}/npm-audit.json || true
- name: Process Dependency Results
run: |
# Check for critical vulnerabilities
if [ -f "${{ env.REPORT_DIR }}/safety-results.json" ]; then
critical_count=$(python3 -c "import json; data=json.load(open('${{ env.REPORT_DIR }}/safety-results.json')); print(len([v for v in data.get('vulnerabilities', []) if v.get('severity', '').lower() == 'critical']))")
echo "Critical vulnerabilities: $critical_count"
if [ "$critical_count" -gt "0" ]; then
echo "❌ Build failed: Critical vulnerabilities in dependencies"
exit 1
fi
fi
- name: Upload Dependency Scan Results
if: always()
uses: actions/upload-artifact@v4
with:
name: dependency-scan-results
path: ${{ env.REPORT_DIR }}/
retention-days: 30
# Job 3: Secrets Detection
secrets-scan:
name: Secrets Detection
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0 # Full history to scan all commits
- name: Run Gitleaks
uses: gitleaks/gitleaks-action@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITLEAKS_ENABLE_SUMMARY: true
- name: Alternative - TruffleHog Scan
if: false # Set to true to enable
run: |
pip install truffleHog
trufflehog --json --regex --entropy=True . \
> ${{ env.REPORT_DIR }}/trufflehog-results.json || true
- name: Upload Secrets Scan Results
if: always()
uses: actions/upload-artifact@v4
with:
name: secrets-scan-results
path: ${{ env.REPORT_DIR }}/
retention-days: 30
# Job 4: Container Image Scanning
container-scan:
name: Container Image Security Scan
runs-on: ubuntu-latest
if: hashFiles('Dockerfile') != ''
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Build Docker Image
run: |
docker build -t app:${{ github.sha }} .
- name: Run Trivy Scanner
uses: aquasecurity/trivy-action@master
with:
image-ref: app:${{ github.sha }}
format: 'sarif'
output: '${{ env.REPORT_DIR }}/trivy-results.sarif'
severity: 'CRITICAL,HIGH'
- name: Upload Trivy Results to GitHub Security
if: always()
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: '${{ env.REPORT_DIR }}/trivy-results.sarif'
- name: Upload Container Scan Results
if: always()
uses: actions/upload-artifact@v4
with:
name: container-scan-results
path: ${{ env.REPORT_DIR }}/
retention-days: 30
# Job 5: Infrastructure-as-Code Security Scanning
iac-scan:
name: IaC Security Scan
runs-on: ubuntu-latest
if: hashFiles('**/*.tf', '**/*.yaml', '**/*.yml') != ''
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Run Checkov
run: |
pip install checkov
checkov -d . \
--output json \
--output-file ${{ env.REPORT_DIR }}/checkov-results.json \
--quiet \
|| true
- name: Run tfsec (for Terraform)
if: hashFiles('**/*.tf') != ''
run: |
curl -s https://raw.githubusercontent.com/aquasecurity/tfsec/master/scripts/install_linux.sh | bash
tfsec . \
--format json \
--out ${{ env.REPORT_DIR }}/tfsec-results.json \
|| true
- name: Process IaC Results
run: |
# Fail on critical findings
if [ -f "${{ env.REPORT_DIR }}/checkov-results.json" ]; then
critical_count=$(python3 -c "import json; data=json.load(open('${{ env.REPORT_DIR }}/checkov-results.json')); print(data.get('summary', {}).get('failed', 0))")
echo "Failed checks: $critical_count"
if [ "$critical_count" -gt "0" ]; then
echo "⚠️ Warning: IaC security issues found"
# Optionally fail the build
# exit 1
fi
fi
- name: Upload IaC Scan Results
if: always()
uses: actions/upload-artifact@v4
with:
name: iac-scan-results
path: ${{ env.REPORT_DIR }}/
retention-days: 30
# Job 6: Security Report Generation and Notification
security-report:
name: Generate Security Report
runs-on: ubuntu-latest
needs: [sast-scan, dependency-scan, secrets-scan]
if: always()
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Download All Scan Results
uses: actions/download-artifact@v4
with:
path: all-results/
- name: Generate Consolidated Report
run: |
# Consolidate all security scan results
mkdir -p consolidated-report
cat > consolidated-report/security-summary.md << 'EOF'
# Security Scan Summary
**Scan Date**: $(date -u +"%Y-%m-%d %H:%M:%S UTC")
**Commit**: ${{ github.sha }}
**Branch**: ${{ github.ref_name }}
## Scan Results
### SAST Scan
See artifacts: `sast-results`
### Dependency Scan
See artifacts: `dependency-scan-results`
### Secrets Scan
See artifacts: `secrets-scan-results`
### Container Scan
See artifacts: `container-scan-results`
### IaC Scan
See artifacts: `iac-scan-results`
---
For detailed results, download scan artifacts from this workflow run.
EOF
- name: Comment on PR (if applicable)
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const report = fs.readFileSync('consolidated-report/security-summary.md', 'utf8');
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: report
});
- name: Upload Consolidated Report
if: always()
uses: actions/upload-artifact@v4
with:
name: consolidated-security-report
path: consolidated-report/
retention-days: 90
# Security Best Practices Demonstrated:
#
# 1. ✅ Minimal permissions (principle of least privilege)
# 2. ✅ Multiple security scan types (defense in depth)
# 3. ✅ Fail-fast on critical findings
# 4. ✅ Secrets detection across full git history
# 5. ✅ Container image scanning before deployment
# 6. ✅ IaC scanning for misconfigurations
# 7. ✅ Artifact retention for compliance audit trail
# 8. ✅ SARIF format for GitHub Security integration
# 9. ✅ Scheduled scans for continuous monitoring
# 10. ✅ PR comments for developer feedback
#
# Compliance Mappings:
# - SOC 2: CC6.1, CC6.6, CC7.2 (Security monitoring and logging)
# - PCI-DSS: 6.2, 6.5 (Secure development practices)
# - NIST: SA-11 (Developer Security Testing)
# - OWASP: Integrated security testing throughout SDLC

View File

@@ -0,0 +1,405 @@
# Grype CI/CD Pipeline Configuration Examples
#
# This file provides example configurations for integrating Grype vulnerability
# scanning into various CI/CD platforms.
# =============================================================================
# GitHub Actions
# =============================================================================
name: Container Security Scan
on:
push:
branches: [main, develop]
pull_request:
branches: [main]
schedule:
# Scan daily for new vulnerabilities
- cron: '0 6 * * *'
jobs:
grype-scan:
name: Grype Vulnerability Scan
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Build Docker image
run: |
docker build -t ${{ github.repository }}:${{ github.sha }} .
- name: Install Grype
uses: anchore/scan-action@v3
id: grype
with:
image: ${{ github.repository }}:${{ github.sha }}
fail-build: true
severity-cutoff: high
output-format: sarif
- name: Upload SARIF results to GitHub Security
uses: github/codeql-action/upload-sarif@v3
if: always()
with:
sarif_file: ${{ steps.grype.outputs.sarif }}
- name: Generate human-readable report
if: always()
run: |
grype ${{ github.repository }}:${{ github.sha }} -o table > grype-report.txt
- name: Upload scan report
uses: actions/upload-artifact@v4
if: always()
with:
name: grype-scan-report
path: grype-report.txt
retention-days: 30
# =============================================================================
# GitLab CI
# =============================================================================
# .gitlab-ci.yml
stages:
- build
- scan
- deploy
variables:
IMAGE_NAME: $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
GRYPE_VERSION: "latest"
build:
stage: build
image: docker:24-dind
services:
- docker:24-dind
script:
- docker build -t $IMAGE_NAME .
- docker push $IMAGE_NAME
only:
- branches
grype-scan:
stage: scan
image: anchore/grype:$GRYPE_VERSION
script:
- grype $IMAGE_NAME --fail-on high -o json > grype-results.json
- grype $IMAGE_NAME -o table
artifacts:
reports:
container_scanning: grype-results.json
paths:
- grype-results.json
expire_in: 30 days
allow_failure: false
only:
- branches
deploy:
stage: deploy
script:
- echo "Deploying $IMAGE_NAME"
only:
- main
when: on_success
# =============================================================================
# Jenkins Pipeline
# =============================================================================
# Jenkinsfile
pipeline {
agent any
environment {
IMAGE_NAME = "myapp"
IMAGE_TAG = "${env.BUILD_NUMBER}"
GRYPE_VERSION = "latest"
}
stages {
stage('Build') {
steps {
script {
docker.build("${IMAGE_NAME}:${IMAGE_TAG}")
}
}
}
stage('Grype Scan') {
agent {
docker {
image "anchore/grype:${GRYPE_VERSION}"
args '-v /var/run/docker.sock:/var/run/docker.sock'
}
}
steps {
sh """
# Run scan with high severity threshold
grype ${IMAGE_NAME}:${IMAGE_TAG} \
--fail-on high \
-o json > grype-results.json
# Generate human-readable report
grype ${IMAGE_NAME}:${IMAGE_TAG} \
-o table > grype-report.txt
"""
}
post {
always {
archiveArtifacts artifacts: 'grype-*.json,grype-*.txt',
allowEmptyArchive: true
}
failure {
echo 'Grype scan found vulnerabilities above threshold'
}
}
}
stage('Deploy') {
when {
branch 'main'
}
steps {
echo "Deploying ${IMAGE_NAME}:${IMAGE_TAG}"
}
}
}
}
# =============================================================================
# CircleCI
# =============================================================================
# .circleci/config.yml
version: 2.1
orbs:
docker: circleci/docker@2.2.0
jobs:
build-and-scan:
docker:
- image: cimg/base:2024.01
steps:
- checkout
- setup_remote_docker:
docker_layer_caching: true
- run:
name: Build Docker Image
command: |
docker build -t myapp:${CIRCLE_SHA1} .
- run:
name: Install Grype
command: |
curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sh -s -- -b /usr/local/bin
- run:
name: Scan with Grype
command: |
grype myapp:${CIRCLE_SHA1} --fail-on critical -o json > grype-results.json
grype myapp:${CIRCLE_SHA1} -o table | tee grype-report.txt
- store_artifacts:
path: grype-results.json
destination: scan-results
- store_artifacts:
path: grype-report.txt
destination: scan-results
workflows:
build-scan-deploy:
jobs:
- build-and-scan:
filters:
branches:
only:
- main
- develop
# =============================================================================
# Azure Pipelines
# =============================================================================
# azure-pipelines.yml
trigger:
branches:
include:
- main
- develop
pool:
vmImage: 'ubuntu-latest'
variables:
imageName: 'myapp'
imageTag: '$(Build.BuildId)'
stages:
- stage: Build
jobs:
- job: BuildImage
steps:
- task: Docker@2
displayName: Build Docker image
inputs:
command: build
dockerfile: Dockerfile
tags: $(imageTag)
- stage: Scan
dependsOn: Build
jobs:
- job: GrypeScan
steps:
- script: |
# Install Grype
curl -sSfL https://raw.githubusercontent.com/anchore/grype/main/install.sh | sh -s -- -b /usr/local/bin
# Run scan
grype $(imageName):$(imageTag) \
--fail-on high \
-o json > $(Build.ArtifactStagingDirectory)/grype-results.json
grype $(imageName):$(imageTag) \
-o table > $(Build.ArtifactStagingDirectory)/grype-report.txt
displayName: 'Run Grype Scan'
- task: PublishBuildArtifacts@1
displayName: 'Publish Scan Results'
inputs:
PathtoPublish: '$(Build.ArtifactStagingDirectory)'
ArtifactName: 'grype-scan-results'
condition: always()
- stage: Deploy
dependsOn: Scan
condition: and(succeeded(), eq(variables['Build.SourceBranch'], 'refs/heads/main'))
jobs:
- job: DeployProduction
steps:
- script: echo "Deploying to production"
displayName: 'Deploy'
# =============================================================================
# Tekton Pipeline
# =============================================================================
# tekton-pipeline.yaml
apiVersion: tekton.dev/v1beta1
kind: Pipeline
metadata:
name: grype-scan-pipeline
spec:
params:
- name: image-name
type: string
description: Name of the image to scan
- name: image-tag
type: string
description: Tag of the image to scan
default: latest
workspaces:
- name: shared-workspace
tasks:
- name: build-image
taskRef:
name: buildah
workspaces:
- name: source
workspace: shared-workspace
params:
- name: IMAGE
value: $(params.image-name):$(params.image-tag)
- name: grype-scan
runAfter:
- build-image
taskRef:
name: grype-scan
params:
- name: IMAGE
value: $(params.image-name):$(params.image-tag)
- name: SEVERITY_THRESHOLD
value: high
- name: deploy
runAfter:
- grype-scan
taskRef:
name: kubectl-deploy
params:
- name: IMAGE
value: $(params.image-name):$(params.image-tag)
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: grype-scan
spec:
params:
- name: IMAGE
description: Image to scan
- name: SEVERITY_THRESHOLD
description: Fail on this severity or higher
default: high
steps:
- name: scan
image: anchore/grype:latest
script: |
#!/bin/sh
grype $(params.IMAGE) \
--fail-on $(params.SEVERITY_THRESHOLD) \
-o json > /workspace/grype-results.json
grype $(params.IMAGE) -o table | tee /workspace/grype-report.txt
workspaces:
- name: scan-results
# =============================================================================
# Best Practices
# =============================================================================
# 1. Update vulnerability database regularly
# - Run grype db update before scans
# - Cache database between pipeline runs
# - Update database at least daily
# 2. Set appropriate fail thresholds
# - Production: --fail-on critical or high
# - Development: --fail-on high (may allow critical temporarily)
# - Monitor-only: No fail threshold, just report
# 3. Archive scan results
# - Store JSON for trend analysis
# - Keep reports for compliance audits
# - Retention: 30-90 days minimum
# 4. Integrate with security dashboards
# - Upload SARIF to GitHub Security
# - Send metrics to monitoring systems
# - Alert security team on critical findings
# 5. Scheduled scanning
# - Scan production images daily for new CVEs
# - Re-scan after vulnerability database updates
# - Track vulnerability trends over time

View File

@@ -0,0 +1,255 @@
# Grype Configuration File (.grype.yaml)
#
# Place this file in your project root or specify with: grype <target> -c .grype.yaml
#
# Documentation: https://github.com/anchore/grype#configuration
# =============================================================================
# Ignore Rules - Suppress False Positives and Accepted Risks
# =============================================================================
ignore:
# Example 1: Ignore specific CVE globally
- vulnerability: CVE-2021-12345
reason: "False positive - vulnerable code path not used in our application"
# Example 2: Ignore CVE for specific package only
- vulnerability: CVE-2022-67890
package:
name: example-library
version: 1.2.3
reason: "Risk accepted - compensating WAF rules deployed to block exploitation"
# Example 3: Ignore CVE with expiration date (forces re-evaluation)
- vulnerability: CVE-2023-11111
package:
name: lodash
reason: "Temporary acceptance while migration to alternative library is in progress"
expires: 2025-12-31
# Example 4: Ignore by fix state
- fix-state: wont-fix
reason: "Maintainer has stated these will not be fixed"
# Example 5: Ignore vulnerabilities in test dependencies
- package:
name: pytest
type: python
reason: "Test-only dependency, not present in production"
# =============================================================================
# Match Configuration
# =============================================================================
match:
# Match vulnerabilities in OS packages
os:
enabled: true
# Match vulnerabilities in language packages
language:
enabled: true
# Control matching behavior
go:
# Use Go module proxy for additional metadata
use-network: true
main-module-version:
# Use version from go.mod if available
from-contents: true
java:
# Use Maven Central for additional metadata
use-network: true
python:
# Use PyPI for additional metadata
use-network: true
# =============================================================================
# Search Configuration
# =============================================================================
search:
# Search for packages in these locations
scope: all-layers # Options: all-layers, squashed
# Exclude paths from scanning
exclude:
# Exclude documentation directories
- "/usr/share/doc/**"
- "/usr/share/man/**"
# Exclude test directories
- "**/test/**"
- "**/tests/**"
- "**/__tests__/**"
# Exclude development tools not in production
- "**/node_modules/.bin/**"
# Exclude specific files
- "**/*.md"
- "**/*.txt"
# Index archives (tar, zip, jar, etc.)
index-archives: true
# Maximum depth to traverse nested archives
max-depth: 3
# =============================================================================
# Database Configuration
# =============================================================================
db:
# Cache directory for vulnerability database
cache-dir: ~/.grype/db
# Auto-update database
auto-update: true
# Validate database checksum
validate-by-hash-on-start: true
# Update check timeout
update-url-timeout: 30s
# =============================================================================
# Vulnerability Matching Configuration
# =============================================================================
# Adjust matcher configuration
dev:
# Profile memory usage (debugging)
profile-mem: false
# =============================================================================
# Output Configuration
# =============================================================================
output:
# Default output format
# Options: table, json, cyclonedx-json, cyclonedx-xml, sarif, template
format: table
# Show suppressed/ignored vulnerabilities in output
show-suppressed: false
# =============================================================================
# Fail-on Configuration
# =============================================================================
# Uncomment to set default fail-on severity
# fail-on: high # Options: negligible, low, medium, high, critical
# =============================================================================
# Registry Authentication
# =============================================================================
registry:
# Authenticate to private registries
# auth:
# - authority: registry.example.com
# username: user
# password: pass
#
# - authority: gcr.io
# token: <token>
# Use Docker config for authentication
insecure-use-http: false
# =============================================================================
# Example Configurations for Different Use Cases
# =============================================================================
# -----------------------------------------------------------------------------
# Use Case 1: Development Environment (Permissive)
# -----------------------------------------------------------------------------
#
# ignore:
# # Allow medium and below in dev
# - severity: medium
# reason: "Development environment - focus on high/critical only"
#
# fail-on: critical
#
# search:
# exclude:
# - "**/test/**"
# - "**/node_modules/**"
# -----------------------------------------------------------------------------
# Use Case 2: CI/CD Pipeline (Strict)
# -----------------------------------------------------------------------------
#
# fail-on: high
#
# ignore:
# # Only allow documented exceptions
# - vulnerability: CVE-2024-XXXX
# reason: "Documented risk acceptance by Security Team - Ticket SEC-123"
# expires: 2025-06-30
#
# output:
# format: json
# show-suppressed: true
# -----------------------------------------------------------------------------
# Use Case 3: Production Monitoring (Focus on Exploitability)
# -----------------------------------------------------------------------------
#
# match:
# # Prioritize known exploited vulnerabilities
# only-fixed: true # Only show CVEs with available fixes
#
# ignore:
# # Ignore unfixable vulnerabilities with compensating controls
# - fix-state: wont-fix
# reason: "Compensating controls implemented - network isolation, WAF rules"
#
# output:
# format: json
# -----------------------------------------------------------------------------
# Use Case 4: Compliance Scanning (Comprehensive)
# -----------------------------------------------------------------------------
#
# search:
# scope: all-layers
# index-archives: true
# max-depth: 5
#
# output:
# format: cyclonedx-json
# show-suppressed: true
#
# # No ignores - report everything for compliance review
# =============================================================================
# Best Practices
# =============================================================================
# 1. Document all ignore rules with clear reasons
# - Include ticket numbers for risk acceptances
# - Set expiration dates for temporary ignores
# - Review ignores quarterly
# 2. Use package-specific ignores instead of global CVE ignores
# - Reduces risk of suppressing legitimate vulnerabilities in other packages
# - Example: CVE-2021-12345 in package-a (ignored) vs package-b (should alert)
# 3. Exclude non-production paths
# - Test directories, documentation, dev tools
# - Reduces noise and scan time
# 4. Keep configuration in version control
# - Track changes to ignore rules
# - Audit trail for risk acceptances
# - Share consistent configuration across team
# 5. Different configs for different environments
# - Development: More permissive, focus on critical
# - CI/CD: Strict, block on high/critical
# - Production: Monitor all, focus on exploitable CVEs

View File

@@ -0,0 +1,355 @@
# Security Rule Template
#
# This template demonstrates how to structure security rules/policies.
# Adapt this template to your specific security tool (Semgrep, OPA, etc.)
#
# Rule Structure Best Practices:
# - Clear rule ID and metadata
# - Severity classification
# - Framework mappings (OWASP, CWE)
# - Remediation guidance
# - Example vulnerable and fixed code
rules:
# Example Rule 1: SQL Injection Detection
- id: sql-injection-string-concatenation
metadata:
name: "SQL Injection via String Concatenation"
description: "Detects potential SQL injection vulnerabilities from string concatenation in SQL queries"
severity: "HIGH"
category: "security"
subcategory: "injection"
# Security Framework Mappings
owasp:
- "A03:2021 - Injection"
cwe:
- "CWE-89: SQL Injection"
mitre_attack:
- "T1190: Exploit Public-Facing Application"
# Compliance Standards
compliance:
- "PCI-DSS 6.5.1: Injection flaws"
- "NIST 800-53 SI-10: Information Input Validation"
# Confidence and Impact
confidence: "HIGH"
likelihood: "HIGH"
impact: "HIGH"
# References
references:
- "https://owasp.org/www-community/attacks/SQL_Injection"
- "https://cwe.mitre.org/data/definitions/89.html"
- "https://cheatsheetseries.owasp.org/cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html"
# Languages this rule applies to
languages:
- python
- javascript
- java
- go
# Detection Pattern (example using Semgrep-style syntax)
pattern-either:
- pattern: |
cursor.execute($SQL + $VAR)
- pattern: |
cursor.execute(f"... {$VAR} ...")
- pattern: |
cursor.execute("..." + $VAR + "...")
# What to report when found
message: |
Potential SQL injection vulnerability detected. SQL query is constructed using
string concatenation or f-strings with user input. This allows attackers to
inject malicious SQL code.
Use parameterized queries instead:
- Python: cursor.execute("SELECT * FROM users WHERE id = ?", (user_id,))
- JavaScript: db.query("SELECT * FROM users WHERE id = $1", [userId])
See: https://owasp.org/www-community/attacks/SQL_Injection
# Suggested fix (auto-fix if supported)
fix: |
Use parameterized queries with placeholders
# Example vulnerable code
examples:
- vulnerable: |
# Vulnerable: String concatenation
user_id = request.GET['id']
query = "SELECT * FROM users WHERE id = " + user_id
cursor.execute(query)
- fixed: |
# Fixed: Parameterized query
user_id = request.GET['id']
query = "SELECT * FROM users WHERE id = ?"
cursor.execute(query, (user_id,))
# Example Rule 2: Hardcoded Secrets Detection
- id: hardcoded-secret-credential
metadata:
name: "Hardcoded Secret or Credential"
description: "Detects hardcoded secrets, API keys, passwords, or tokens in source code"
severity: "CRITICAL"
category: "security"
subcategory: "secrets"
owasp:
- "A07:2021 - Identification and Authentication Failures"
cwe:
- "CWE-798: Use of Hard-coded Credentials"
- "CWE-259: Use of Hard-coded Password"
compliance:
- "PCI-DSS 8.2.1: Use of strong cryptography"
- "SOC 2 CC6.1: Logical access controls"
- "GDPR Article 32: Security of processing"
confidence: "MEDIUM"
likelihood: "HIGH"
impact: "CRITICAL"
references:
- "https://cwe.mitre.org/data/definitions/798.html"
- "https://owasp.org/www-community/vulnerabilities/Use_of_hard-coded_password"
languages:
- python
- javascript
- java
- go
- ruby
pattern-either:
- pattern: |
password = "..."
- pattern: |
api_key = "..."
- pattern: |
secret = "..."
- pattern: |
token = "..."
pattern-not: |
$VAR = ""
message: |
Potential hardcoded secret detected. Hardcoding credentials in source code
is a critical security vulnerability that can lead to unauthorized access
if the code is exposed.
Use environment variables or a secrets management system instead:
- Python: os.environ.get('API_KEY')
- Node.js: process.env.API_KEY
- Secrets Manager: AWS Secrets Manager, HashiCorp Vault, etc.
See: https://cwe.mitre.org/data/definitions/798.html
examples:
- vulnerable: |
# Vulnerable: Hardcoded API key
api_key = "sk-1234567890abcdef"
api.authenticate(api_key)
- fixed: |
# Fixed: Environment variable
import os
api_key = os.environ.get('API_KEY')
if not api_key:
raise ValueError("API_KEY environment variable not set")
api.authenticate(api_key)
# Example Rule 3: XSS via Unsafe HTML Rendering
- id: xss-unsafe-html-rendering
metadata:
name: "Cross-Site Scripting (XSS) via Unsafe HTML"
description: "Detects unsafe HTML rendering that could lead to XSS vulnerabilities"
severity: "HIGH"
category: "security"
subcategory: "xss"
owasp:
- "A03:2021 - Injection"
cwe:
- "CWE-79: Cross-site Scripting (XSS)"
- "CWE-80: Improper Neutralization of Script-Related HTML Tags"
compliance:
- "PCI-DSS 6.5.7: Cross-site scripting"
- "NIST 800-53 SI-10: Information Input Validation"
confidence: "HIGH"
likelihood: "MEDIUM"
impact: "HIGH"
references:
- "https://owasp.org/www-community/attacks/xss/"
- "https://cwe.mitre.org/data/definitions/79.html"
- "https://cheatsheetseries.owasp.org/cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html"
languages:
- javascript
- typescript
- jsx
- tsx
pattern-either:
- pattern: |
dangerouslySetInnerHTML={{__html: $VAR}}
- pattern: |
innerHTML = $VAR
message: |
Potential XSS vulnerability detected. Setting HTML content directly from
user input without sanitization can allow attackers to inject malicious
JavaScript code.
Use one of these safe alternatives:
- React: Use {userInput} for automatic escaping
- DOMPurify: const clean = DOMPurify.sanitize(dirty);
- Framework-specific sanitizers
See: https://owasp.org/www-community/attacks/xss/
examples:
- vulnerable: |
// Vulnerable: Unsanitized HTML
function UserComment({ comment }) {
return <div dangerouslySetInnerHTML={{__html: comment}} />;
}
- fixed: |
// Fixed: Sanitized with DOMPurify
import DOMPurify from 'dompurify';
function UserComment({ comment }) {
const sanitized = DOMPurify.sanitize(comment);
return <div dangerouslySetInnerHTML={{__html: sanitized}} />;
}
# Example Rule 4: Insecure Cryptography
- id: weak-cryptographic-algorithm
metadata:
name: "Weak Cryptographic Algorithm"
description: "Detects use of weak or deprecated cryptographic algorithms"
severity: "HIGH"
category: "security"
subcategory: "cryptography"
owasp:
- "A02:2021 - Cryptographic Failures"
cwe:
- "CWE-327: Use of a Broken or Risky Cryptographic Algorithm"
- "CWE-326: Inadequate Encryption Strength"
compliance:
- "PCI-DSS 4.1: Use strong cryptography"
- "NIST 800-53 SC-13: Cryptographic Protection"
- "GDPR Article 32: Security of processing"
confidence: "HIGH"
likelihood: "MEDIUM"
impact: "HIGH"
references:
- "https://cwe.mitre.org/data/definitions/327.html"
- "https://owasp.org/www-project-web-security-testing-guide/latest/4-Web_Application_Security_Testing/09-Testing_for_Weak_Cryptography/"
languages:
- python
- javascript
- java
pattern-either:
- pattern: |
hashlib.md5(...)
- pattern: |
hashlib.sha1(...)
- pattern: |
crypto.createHash('md5')
- pattern: |
crypto.createHash('sha1')
message: |
Weak cryptographic algorithm detected (MD5 or SHA1). These algorithms are
considered cryptographically broken and should not be used for security purposes.
Use strong alternatives:
- For hashing: SHA-256, SHA-384, or SHA-512
- For password hashing: bcrypt, argon2, or PBKDF2
- Python: hashlib.sha256()
- Node.js: crypto.createHash('sha256')
See: https://cwe.mitre.org/data/definitions/327.html
examples:
- vulnerable: |
# Vulnerable: MD5 hash
import hashlib
hash_value = hashlib.md5(data).hexdigest()
- fixed: |
# Fixed: SHA-256 hash
import hashlib
hash_value = hashlib.sha256(data).hexdigest()
# Rule Configuration
configuration:
# Global settings
enabled: true
severity_threshold: "MEDIUM" # Report findings at MEDIUM severity and above
# Performance tuning
max_file_size_kb: 1024
exclude_patterns:
- "test/*"
- "tests/*"
- "node_modules/*"
- "vendor/*"
- "*.min.js"
# False positive reduction
confidence_threshold: "MEDIUM" # Only report findings with MEDIUM confidence or higher
# Rule Metadata Schema
# This section documents the expected structure for rules
metadata_schema:
required:
- id: "Unique identifier for the rule (kebab-case)"
- name: "Human-readable rule name"
- description: "What the rule detects"
- severity: "CRITICAL | HIGH | MEDIUM | LOW | INFO"
- category: "security | best-practice | performance"
optional:
- subcategory: "Specific type (injection, xss, secrets, etc.)"
- owasp: "OWASP Top 10 mappings"
- cwe: "CWE identifier(s)"
- mitre_attack: "MITRE ATT&CK technique(s)"
- compliance: "Compliance standard references"
- confidence: "Detection confidence level"
- likelihood: "Likelihood of exploitation"
- impact: "Potential impact if exploited"
- references: "External documentation links"
# Usage Instructions:
#
# 1. Copy this template when creating new security rules
# 2. Update metadata fields with appropriate framework mappings
# 3. Customize detection patterns for your tool (Semgrep, OPA, etc.)
# 4. Provide clear remediation guidance in the message field
# 5. Include both vulnerable and fixed code examples
# 6. Test rules on real codebases before deployment
#
# Best Practices:
# - Map to multiple frameworks (OWASP, CWE, MITRE ATT&CK)
# - Include compliance standard references
# - Provide actionable remediation guidance
# - Show code examples (vulnerable vs. fixed)
# - Tune confidence levels to reduce false positives
# - Exclude test directories to reduce noise