Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 17:51:02 +08:00
commit ff1f4bd119
252 changed files with 72682 additions and 0 deletions

View File

@@ -0,0 +1,9 @@
# Assets Directory
Place files that will be used in the output Claude produces:
- Templates
- Configuration files
- Images/logos
- Boilerplate code
These files are NOT loaded into context but copied/modified in output.

View File

@@ -0,0 +1,213 @@
# Black Duck Detect Configuration
# Place this file in the root of your project or reference it with:
# --detect.yaml.configuration.path=/path/to/blackduck_config.yml
# Black Duck Server Configuration
blackduck:
url: ${BLACKDUCK_URL} # Set via environment variable
api:
token: ${BLACKDUCK_TOKEN} # Set via environment variable
timeout: 300
trust.cert: false
# Project Configuration
detect:
project:
name: ${PROJECT_NAME:MyProject}
version:
name: ${PROJECT_VERSION:1.0.0}
description: "Software Composition Analysis with Black Duck"
tier: 3 # Project tier (1-5, 1=highest priority)
# Detection Configuration
detector:
search:
depth: 3 # How deep to search for build files
continue: true # Continue if a detector fails
exclusion:
paths: |
node_modules/**/.bin,
vendor/**,
**/__pycache__,
**/site-packages,
**/.venv,
**/venv,
test/**,
tests/**,
**/*.test.js,
**/*.spec.js
buildless: false # Use buildless mode (faster but less accurate)
# Specific Detectors
npm:
include:
dev:
dependencies: false # Exclude dev dependencies from production scans
dependency:
types:
excluded: []
python:
python3: true
path: python3
maven:
included:
scopes: compile,runtime # Exclude test scope
excluded:
scopes: test,provided
# Signature Scanner Configuration
blackduck:
signature:
scanner:
memory: 4096 # Memory in MB for signature scanner
dry:
run: false
snippet:
matching: SNIPPET_MATCHING # or FULL_SNIPPET_MATCHING for comprehensive
upload:
source:
mode: true # Upload source for snippet matching
paths: "."
exclusion:
patterns: |
node_modules,
.git,
.svn,
vendor,
__pycache__,
*.pyc,
*.min.js,
*.bundle.js
# Binary Scanner (optional, for compiled binaries)
binary:
scan:
file:
name: ""
path: ""
# Policy Configuration
policy:
check:
fail:
on:
severities: BLOCKER,CRITICAL,MAJOR # Fail on these severity levels
enabled: true
# Wait for scan results
wait:
for:
results: true # Wait for scan to complete
# Report Configuration
risk:
report:
pdf: true
pdf:
path: "./reports"
notices:
report: true
report:
path: "./reports"
# SBOM Generation
bom:
aggregate:
name: "sbom.json" # CycloneDX SBOM output
enabled: true
# Output Configuration
output:
path: "./blackduck-output"
cleanup: true # Clean up temporary files after scan
# Performance Tuning
parallel:
processors: 4 # Number of parallel processors
# Timeout Configuration
timeout: 7200 # Overall timeout in seconds (2 hours)
# Proxy Configuration (if needed)
# proxy:
# host: proxy.company.com
# port: 8080
# username: ${PROXY_USER}
# password: ${PROXY_PASS}
# Advanced Options
tools:
excluded: [] # Can exclude DETECTOR, SIGNATURE_SCAN, BINARY_SCAN, POLARIS
force:
success: false # Force success even if issues detected (not recommended)
# Logging Configuration
logging:
level:
com:
synopsys:
integration: INFO # DEBUG for troubleshooting
detect: INFO
# Environment-Specific Configurations
---
# Development Environment
spring:
profiles: development
detect:
policy:
check:
fail:
on:
severities: BLOCKER,CRITICAL # Less strict for dev
detector:
search:
depth: 1 # Faster scans for dev
---
# Production Environment
spring:
profiles: production
detect:
policy:
check:
fail:
on:
severities: BLOCKER,CRITICAL,MAJOR # Strict for production
detector:
search:
depth: 5 # Comprehensive scans
blackduck:
signature:
scanner:
snippet:
matching: FULL_SNIPPET_MATCHING # Most thorough
risk:
report:
pdf: true # Always generate PDF for production
bom:
aggregate:
name: "production-sbom.json"
---
# CI/CD Environment
spring:
profiles: ci
detect:
wait:
for:
results: true # Wait for results in CI
policy:
check:
fail:
on:
severities: BLOCKER,CRITICAL
timeout: 3600 # 1 hour timeout for CI
parallel:
processors: 8 # Use more processors in CI

View File

@@ -0,0 +1,357 @@
# Security-Enhanced CI/CD Pipeline Template
#
# This template demonstrates security best practices for CI/CD pipelines.
# Adapt this template to your specific security tool and workflow needs.
#
# Key Security Features:
# - SAST (Static Application Security Testing)
# - Dependency vulnerability scanning
# - Secrets detection
# - Infrastructure-as-Code security scanning
# - Container image scanning
# - Security artifact uploading for compliance
name: Security Scan Pipeline
on:
push:
branches: [main, develop]
pull_request:
branches: [main, develop]
schedule:
# Run weekly security scans on Sunday at 2 AM UTC
- cron: '0 2 * * 0'
workflow_dispatch: # Allow manual trigger
# Security: Restrict permissions to minimum required
permissions:
contents: read
security-events: write # For uploading SARIF results
pull-requests: write # For commenting on PRs
env:
# Configuration
SECURITY_SCAN_FAIL_ON: 'critical,high' # Fail build on these severities
REPORT_DIR: 'security-reports'
jobs:
# Job 1: Static Application Security Testing (SAST)
sast-scan:
name: SAST Security Scan
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0 # Full history for better analysis
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Run SAST Scanner
run: |
# Example: Using Semgrep for SAST
pip install semgrep
semgrep --config=auto \
--json \
--output ${{ env.REPORT_DIR }}/sast-results.json \
. || true
# Alternative: Bandit for Python projects
# pip install bandit
# bandit -r . -f json -o ${{ env.REPORT_DIR }}/bandit-results.json
- name: Process SAST Results
run: |
# Parse results and fail on critical/high severity
python3 -c "
import json
import sys
with open('${{ env.REPORT_DIR }}/sast-results.json') as f:
results = json.load(f)
critical = len([r for r in results.get('results', []) if r.get('extra', {}).get('severity') == 'ERROR'])
high = len([r for r in results.get('results', []) if r.get('extra', {}).get('severity') == 'WARNING'])
print(f'Critical findings: {critical}')
print(f'High findings: {high}')
if critical > 0:
print('❌ Build failed: Critical security issues found')
sys.exit(1)
elif high > 0:
print('⚠️ Warning: High severity issues found')
# Optionally fail on high severity
# sys.exit(1)
else:
print('✅ No critical security issues found')
"
- name: Upload SAST Results
if: always()
uses: actions/upload-artifact@v4
with:
name: sast-results
path: ${{ env.REPORT_DIR }}/sast-results.json
retention-days: 30
# Job 2: Dependency Vulnerability Scanning
dependency-scan:
name: Dependency Vulnerability Scan
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Scan Python Dependencies
if: hashFiles('requirements.txt') != ''
run: |
pip install safety
safety check \
--json \
--output ${{ env.REPORT_DIR }}/safety-results.json \
|| true
- name: Scan Node Dependencies
if: hashFiles('package.json') != ''
run: |
npm audit --json > ${{ env.REPORT_DIR }}/npm-audit.json || true
- name: Process Dependency Results
run: |
# Check for critical vulnerabilities
if [ -f "${{ env.REPORT_DIR }}/safety-results.json" ]; then
critical_count=$(python3 -c "import json; data=json.load(open('${{ env.REPORT_DIR }}/safety-results.json')); print(len([v for v in data.get('vulnerabilities', []) if v.get('severity', '').lower() == 'critical']))")
echo "Critical vulnerabilities: $critical_count"
if [ "$critical_count" -gt "0" ]; then
echo "❌ Build failed: Critical vulnerabilities in dependencies"
exit 1
fi
fi
- name: Upload Dependency Scan Results
if: always()
uses: actions/upload-artifact@v4
with:
name: dependency-scan-results
path: ${{ env.REPORT_DIR }}/
retention-days: 30
# Job 3: Secrets Detection
secrets-scan:
name: Secrets Detection
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0 # Full history to scan all commits
- name: Run Gitleaks
uses: gitleaks/gitleaks-action@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITLEAKS_ENABLE_SUMMARY: true
- name: Alternative - TruffleHog Scan
if: false # Set to true to enable
run: |
pip install truffleHog
trufflehog --json --regex --entropy=True . \
> ${{ env.REPORT_DIR }}/trufflehog-results.json || true
- name: Upload Secrets Scan Results
if: always()
uses: actions/upload-artifact@v4
with:
name: secrets-scan-results
path: ${{ env.REPORT_DIR }}/
retention-days: 30
# Job 4: Container Image Scanning
container-scan:
name: Container Image Security Scan
runs-on: ubuntu-latest
if: hashFiles('Dockerfile') != ''
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Build Docker Image
run: |
docker build -t app:${{ github.sha }} .
- name: Run Trivy Scanner
uses: aquasecurity/trivy-action@master
with:
image-ref: app:${{ github.sha }}
format: 'sarif'
output: '${{ env.REPORT_DIR }}/trivy-results.sarif'
severity: 'CRITICAL,HIGH'
- name: Upload Trivy Results to GitHub Security
if: always()
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: '${{ env.REPORT_DIR }}/trivy-results.sarif'
- name: Upload Container Scan Results
if: always()
uses: actions/upload-artifact@v4
with:
name: container-scan-results
path: ${{ env.REPORT_DIR }}/
retention-days: 30
# Job 5: Infrastructure-as-Code Security Scanning
iac-scan:
name: IaC Security Scan
runs-on: ubuntu-latest
if: hashFiles('**/*.tf', '**/*.yaml', '**/*.yml') != ''
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Run Checkov
run: |
pip install checkov
checkov -d . \
--output json \
--output-file ${{ env.REPORT_DIR }}/checkov-results.json \
--quiet \
|| true
- name: Run tfsec (for Terraform)
if: hashFiles('**/*.tf') != ''
run: |
curl -s https://raw.githubusercontent.com/aquasecurity/tfsec/master/scripts/install_linux.sh | bash
tfsec . \
--format json \
--out ${{ env.REPORT_DIR }}/tfsec-results.json \
|| true
- name: Process IaC Results
run: |
# Fail on critical findings
if [ -f "${{ env.REPORT_DIR }}/checkov-results.json" ]; then
critical_count=$(python3 -c "import json; data=json.load(open('${{ env.REPORT_DIR }}/checkov-results.json')); print(data.get('summary', {}).get('failed', 0))")
echo "Failed checks: $critical_count"
if [ "$critical_count" -gt "0" ]; then
echo "⚠️ Warning: IaC security issues found"
# Optionally fail the build
# exit 1
fi
fi
- name: Upload IaC Scan Results
if: always()
uses: actions/upload-artifact@v4
with:
name: iac-scan-results
path: ${{ env.REPORT_DIR }}/
retention-days: 30
# Job 6: Security Report Generation and Notification
security-report:
name: Generate Security Report
runs-on: ubuntu-latest
needs: [sast-scan, dependency-scan, secrets-scan]
if: always()
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Download All Scan Results
uses: actions/download-artifact@v4
with:
path: all-results/
- name: Generate Consolidated Report
run: |
# Consolidate all security scan results
mkdir -p consolidated-report
cat > consolidated-report/security-summary.md << 'EOF'
# Security Scan Summary
**Scan Date**: $(date -u +"%Y-%m-%d %H:%M:%S UTC")
**Commit**: ${{ github.sha }}
**Branch**: ${{ github.ref_name }}
## Scan Results
### SAST Scan
See artifacts: `sast-results`
### Dependency Scan
See artifacts: `dependency-scan-results`
### Secrets Scan
See artifacts: `secrets-scan-results`
### Container Scan
See artifacts: `container-scan-results`
### IaC Scan
See artifacts: `iac-scan-results`
---
For detailed results, download scan artifacts from this workflow run.
EOF
- name: Comment on PR (if applicable)
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const report = fs.readFileSync('consolidated-report/security-summary.md', 'utf8');
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: report
});
- name: Upload Consolidated Report
if: always()
uses: actions/upload-artifact@v4
with:
name: consolidated-security-report
path: consolidated-report/
retention-days: 90
# Security Best Practices Demonstrated:
#
# 1. ✅ Minimal permissions (principle of least privilege)
# 2. ✅ Multiple security scan types (defense in depth)
# 3. ✅ Fail-fast on critical findings
# 4. ✅ Secrets detection across full git history
# 5. ✅ Container image scanning before deployment
# 6. ✅ IaC scanning for misconfigurations
# 7. ✅ Artifact retention for compliance audit trail
# 8. ✅ SARIF format for GitHub Security integration
# 9. ✅ Scheduled scans for continuous monitoring
# 10. ✅ PR comments for developer feedback
#
# Compliance Mappings:
# - SOC 2: CC6.1, CC6.6, CC7.2 (Security monitoring and logging)
# - PCI-DSS: 6.2, 6.5 (Secure development practices)
# - NIST: SA-11 (Developer Security Testing)
# - OWASP: Integrated security testing throughout SDLC

View File

@@ -0,0 +1,151 @@
name: Black Duck SCA Scan
on:
push:
branches: [ main, develop ]
pull_request:
branches: [ main, develop ]
schedule:
# Run daily at 2 AM UTC
- cron: '0 2 * * *'
workflow_dispatch:
env:
BLACKDUCK_URL: ${{ secrets.BLACKDUCK_URL }}
BLACKDUCK_TOKEN: ${{ secrets.BLACKDUCK_API_TOKEN }}
PROJECT_NAME: ${{ github.repository }}
PROJECT_VERSION: ${{ github.ref_name }}-${{ github.sha }}
jobs:
blackduck-scan:
name: Black Duck SCA Security Scan
runs-on: ubuntu-latest
permissions:
contents: read
security-events: write # For SARIF upload
pull-requests: write # For PR comments
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup environment
run: |
echo "::notice::Starting Black Duck scan for ${{ env.PROJECT_NAME }}"
echo "Version: ${{ env.PROJECT_VERSION }}"
- name: Run Black Duck Detect
uses: synopsys-sig/detect-action@v1
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
blackduck-url: ${{ secrets.BLACKDUCK_URL }}
blackduck-api-token: ${{ secrets.BLACKDUCK_API_TOKEN }}
detect-project-name: ${{ env.PROJECT_NAME }}
detect-project-version-name: ${{ env.PROJECT_VERSION }}
# Fail on policy violations (CRITICAL/HIGH severity)
detect-policy-check-fail-on-severities: BLOCKER,CRITICAL,MAJOR
detect-wait-for-results: true
# Generate reports
detect-risk-report-pdf: true
detect-notices-report: true
# Output location
detect-output-path: ./blackduck-output
- name: Upload Black Duck Reports
if: always()
uses: actions/upload-artifact@v4
with:
name: blackduck-reports-${{ github.sha }}
path: |
./blackduck-output/**/BlackDuck_RiskReport_*.pdf
./blackduck-output/**/BlackDuck_Notices_*.txt
./blackduck-output/**/*_Black_Duck_scan.json
retention-days: 30
- name: Generate SBOM
if: success()
run: |
# Generate Software Bill of Materials
curl -s -L https://detect.synopsys.com/detect.sh | bash -- \
--blackduck.url=${{ secrets.BLACKDUCK_URL }} \
--blackduck.api.token=${{ secrets.BLACKDUCK_API_TOKEN }} \
--detect.project.name=${{ env.PROJECT_NAME }} \
--detect.project.version.name=${{ env.PROJECT_VERSION }} \
--detect.tools=DETECTOR \
--detect.bom.aggregate.name=sbom.json \
--detect.output.path=./sbom-output
- name: Upload SBOM
if: success()
uses: actions/upload-artifact@v4
with:
name: sbom-${{ github.sha }}
path: ./sbom-output/**/sbom.json
retention-days: 90
- name: Check for Critical Vulnerabilities
if: always()
run: |
# Parse results and check for critical vulnerabilities
if [ -f ./blackduck-output/runs/*/status/status.json ]; then
CRITICAL=$(jq -r '.policyStatus.overallStatus' ./blackduck-output/runs/*/status/status.json)
if [ "$CRITICAL" = "IN_VIOLATION" ]; then
echo "::error::Policy violations detected - build should fail"
exit 1
fi
fi
- name: Comment on PR
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const fs = require('fs');
const statusFile = './blackduck-output/runs/*/status/status.json';
// Read Black Duck results
let comment = '## Black Duck SCA Scan Results\n\n';
comment += `**Project**: ${process.env.PROJECT_NAME}\n`;
comment += `**Version**: ${process.env.PROJECT_VERSION}\n\n`;
// Add vulnerability summary
comment += '### Security Summary\n';
comment += '| Severity | Count |\n';
comment += '|----------|-------|\n';
comment += '| Critical | 0 |\n'; // Parse from actual results
comment += '| High | 0 |\n';
comment += '| Medium | 0 |\n';
comment += '| Low | 0 |\n\n';
comment += '### License Compliance\n';
comment += '✅ No license violations detected\n\n';
comment += '**Full reports available in workflow artifacts**\n';
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: comment
});
# Optional: Upload to GitHub Code Scanning (requires SARIF format)
code-scanning:
name: Upload to Code Scanning
runs-on: ubuntu-latest
needs: blackduck-scan
if: always()
steps:
- name: Download SARIF
uses: actions/download-artifact@v4
with:
name: blackduck-reports-${{ github.sha }}
- name: Upload SARIF to Code Scanning
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: blackduck-sarif.json
category: black-duck-sca

View File

@@ -0,0 +1,191 @@
# GitLab CI/CD configuration for Black Duck SCA scanning
#
# Add this to your .gitlab-ci.yml or include it:
# include:
# - local: 'assets/ci_integration/gitlab_ci.yml'
variables:
BLACKDUCK_URL: ${BLACKDUCK_URL}
BLACKDUCK_TOKEN: ${BLACKDUCK_API_TOKEN}
PROJECT_NAME: ${CI_PROJECT_PATH}
PROJECT_VERSION: ${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}
stages:
- security-scan
- security-report
# Black Duck SCA Scan
blackduck-sca-scan:
stage: security-scan
image: ubuntu:22.04
before_script:
- apt-get update && apt-get install -y curl bash jq
- echo "Starting Black Duck scan for ${PROJECT_NAME}"
- echo "Version ${PROJECT_VERSION}"
script:
# Run Black Duck Detect
- |
bash <(curl -s -L https://detect.synopsys.com/detect.sh) \
--blackduck.url=${BLACKDUCK_URL} \
--blackduck.api.token=${BLACKDUCK_TOKEN} \
--detect.project.name="${PROJECT_NAME}" \
--detect.project.version.name="${PROJECT_VERSION}" \
--detect.policy.check.fail.on.severities=BLOCKER,CRITICAL \
--detect.wait.for.results=true \
--detect.risk.report.pdf=true \
--detect.notices.report=true \
--detect.output.path=./blackduck-output \
--detect.cleanup=false
after_script:
# Generate summary report
- |
if [ -f ./blackduck-output/runs/*/status/status.json ]; then
echo "=== Black Duck Scan Summary ==="
jq -r '.policyStatus' ./blackduck-output/runs/*/status/status.json
fi
artifacts:
name: "blackduck-reports-${CI_COMMIT_SHORT_SHA}"
paths:
- blackduck-output/**/BlackDuck_RiskReport_*.pdf
- blackduck-output/**/BlackDuck_Notices_*.txt
- blackduck-output/**/*_Black_Duck_scan.json
expire_in: 30 days
reports:
# GitLab dependency scanning report format
dependency_scanning: blackduck-output/gl-dependency-scanning-report.json
rules:
# Run on merge requests
- if: $CI_MERGE_REQUEST_ID
# Run on main/master branch
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
# Run on tags
- if: $CI_COMMIT_TAG
# Run on scheduled pipelines
- if: $CI_PIPELINE_SOURCE == "schedule"
# Manual trigger
- if: $CI_PIPELINE_SOURCE == "web"
allow_failure: false # Fail pipeline on policy violations
# Generate SBOM
blackduck-sbom:
stage: security-scan
image: ubuntu:22.04
before_script:
- apt-get update && apt-get install -y curl bash jq
script:
- |
bash <(curl -s -L https://detect.synopsys.com/detect.sh) \
--blackduck.url=${BLACKDUCK_URL} \
--blackduck.api.token=${BLACKDUCK_TOKEN} \
--detect.project.name="${PROJECT_NAME}" \
--detect.project.version.name="${PROJECT_VERSION}" \
--detect.tools=DETECTOR \
--detect.bom.aggregate.name=sbom-cyclonedx.json \
--detect.output.path=./sbom-output
artifacts:
name: "sbom-${CI_COMMIT_SHORT_SHA}"
paths:
- sbom-output/**/sbom-cyclonedx.json
expire_in: 90 days
rules:
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
- if: $CI_COMMIT_TAG
- if: $CI_PIPELINE_SOURCE == "schedule"
# Security Report Summary
blackduck-summary:
stage: security-report
image: ubuntu:22.04
needs: ["blackduck-sca-scan"]
before_script:
- apt-get update && apt-get install -y jq curl
script:
- |
# Parse Black Duck results and create summary
echo "## Black Duck SCA Scan Summary" > security-summary.md
echo "" >> security-summary.md
echo "**Project**: ${PROJECT_NAME}" >> security-summary.md
echo "**Version**: ${PROJECT_VERSION}" >> security-summary.md
echo "**Scan Date**: $(date -u +%Y-%m-%dT%H:%M:%SZ)" >> security-summary.md
echo "" >> security-summary.md
# Add vulnerability summary if available
if [ -f blackduck-output/runs/*/status/status.json ]; then
echo "### Vulnerability Summary" >> security-summary.md
jq -r '.componentStatus' blackduck-output/runs/*/status/status.json >> security-summary.md || true
fi
cat security-summary.md
artifacts:
reports:
# Metrics for GitLab Security Dashboard
metrics: security-summary.md
rules:
- if: $CI_MERGE_REQUEST_ID
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
# Policy Check (can be used as a gate)
blackduck-policy-gate:
stage: security-report
image: ubuntu:22.04
needs: ["blackduck-sca-scan"]
script:
- |
# Check policy status
if [ -f ./blackduck-output/runs/*/status/status.json ]; then
POLICY_STATUS=$(jq -r '.policyStatus.overallStatus' ./blackduck-output/runs/*/status/status.json)
if [ "$POLICY_STATUS" = "IN_VIOLATION" ]; then
echo "❌ Policy violations detected!"
echo "Critical or high-severity vulnerabilities found."
echo "Review the Black Duck report for details."
exit 1
else
echo "✅ No policy violations detected"
fi
else
echo "⚠️ Warning: Unable to verify policy status"
exit 1
fi
rules:
# Only run as gate on merge requests and main branch
- if: $CI_MERGE_REQUEST_ID
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
# Scheduled daily scan (comprehensive)
blackduck-scheduled-scan:
extends: blackduck-sca-scan
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
variables:
# More comprehensive scan for scheduled runs
DETECT_TOOLS: "DETECTOR,SIGNATURE_SCAN,BINARY_SCAN"
script:
- |
bash <(curl -s -L https://detect.synopsys.com/detect.sh) \
--blackduck.url=${BLACKDUCK_URL} \
--blackduck.api.token=${BLACKDUCK_TOKEN} \
--detect.project.name="${PROJECT_NAME}" \
--detect.project.version.name="${PROJECT_VERSION}" \
--detect.tools=${DETECT_TOOLS} \
--detect.risk.report.pdf=true \
--detect.notices.report=true \
--detect.policy.check.fail.on.severities=BLOCKER,CRITICAL,MAJOR \
--detect.wait.for.results=true \
--detect.output.path=./blackduck-output

View File

@@ -0,0 +1,310 @@
// Jenkins Declarative Pipeline for Black Duck SCA Scanning
//
// Prerequisites:
// 1. Install "Synopsys Detect" plugin in Jenkins
// 2. Configure Black Duck server in Jenkins Global Configuration
// 3. Add credentials: BLACKDUCK_URL and BLACKDUCK_API_TOKEN
pipeline {
agent any
parameters {
choice(
name: 'SCAN_TYPE',
choices: ['RAPID', 'INTELLIGENT', 'FULL'],
description: 'Type of Black Duck scan to perform'
)
booleanParam(
name: 'FAIL_ON_POLICY_VIOLATION',
defaultValue: true,
description: 'Fail build on policy violations'
)
booleanParam(
name: 'GENERATE_SBOM',
defaultValue: false,
description: 'Generate Software Bill of Materials'
)
}
environment {
BLACKDUCK_URL = credentials('blackduck-url')
BLACKDUCK_TOKEN = credentials('blackduck-api-token')
PROJECT_NAME = "${env.JOB_NAME}"
PROJECT_VERSION = "${env.BRANCH_NAME}-${env.BUILD_NUMBER}"
DETECT_JAR_DOWNLOAD_DIR = "${WORKSPACE}/.blackduck"
}
options {
timestamps()
timeout(time: 2, unit: 'HOURS')
buildDiscarder(logRotator(numToKeepStr: '30', artifactNumToKeepStr: '10'))
}
stages {
stage('Preparation') {
steps {
script {
echo "=========================================="
echo "Black Duck SCA Scan"
echo "=========================================="
echo "Project: ${PROJECT_NAME}"
echo "Version: ${PROJECT_VERSION}"
echo "Scan Type: ${params.SCAN_TYPE}"
echo "=========================================="
}
// Clean previous scan results
sh 'rm -rf blackduck-output || true'
sh 'mkdir -p blackduck-output'
}
}
stage('Dependency Installation') {
steps {
script {
// Install dependencies based on project type
if (fileExists('package.json')) {
echo 'Node.js project detected'
sh 'npm ci || npm install'
}
else if (fileExists('requirements.txt')) {
echo 'Python project detected'
sh 'pip install -r requirements.txt'
}
else if (fileExists('pom.xml')) {
echo 'Maven project detected'
sh 'mvn dependency:resolve'
}
else if (fileExists('build.gradle')) {
echo 'Gradle project detected'
sh './gradlew dependencies'
}
}
}
}
stage('Black Duck Scan') {
steps {
script {
def detectCommand = """
bash <(curl -s -L https://detect.synopsys.com/detect.sh) \
--blackduck.url=${BLACKDUCK_URL} \
--blackduck.api.token=${BLACKDUCK_TOKEN} \
--detect.project.name="${PROJECT_NAME}" \
--detect.project.version.name="${PROJECT_VERSION}" \
--detect.output.path=${WORKSPACE}/blackduck-output \
--detect.cleanup=false \
--detect.risk.report.pdf=true \
--detect.notices.report=true
"""
// Add scan type configuration
switch(params.SCAN_TYPE) {
case 'RAPID':
detectCommand += " --detect.detector.search.depth=0"
detectCommand += " --detect.blackduck.signature.scanner.snippet.matching=SNIPPET_MATCHING"
break
case 'INTELLIGENT':
detectCommand += " --detect.detector.search.depth=3"
break
case 'FULL':
detectCommand += " --detect.tools=DETECTOR,SIGNATURE_SCAN,BINARY_SCAN"
detectCommand += " --detect.detector.search.depth=10"
break
}
// Add policy check if enabled
if (params.FAIL_ON_POLICY_VIOLATION) {
detectCommand += " --detect.policy.check.fail.on.severities=BLOCKER,CRITICAL"
detectCommand += " --detect.wait.for.results=true"
}
// Execute scan
try {
sh detectCommand
} catch (Exception e) {
if (params.FAIL_ON_POLICY_VIOLATION) {
error("Black Duck policy violations detected!")
} else {
unstable("Black Duck scan completed with violations")
}
}
}
}
}
stage('Generate SBOM') {
when {
expression { params.GENERATE_SBOM == true }
}
steps {
script {
sh """
bash <(curl -s -L https://detect.synopsys.com/detect.sh) \
--blackduck.url=${BLACKDUCK_URL} \
--blackduck.api.token=${BLACKDUCK_TOKEN} \
--detect.project.name="${PROJECT_NAME}" \
--detect.project.version.name="${PROJECT_VERSION}" \
--detect.tools=DETECTOR \
--detect.bom.aggregate.name=sbom-cyclonedx.json \
--detect.output.path=${WORKSPACE}/sbom-output
"""
}
}
}
stage('Parse Results') {
steps {
script {
// Parse Black Duck results
def statusFile = sh(
script: 'find blackduck-output -name "status.json" -type f | head -n 1',
returnStdout: true
).trim()
if (statusFile) {
def status = readJSON file: statusFile
echo "Policy Status: ${status.policyStatus?.overallStatus}"
echo "Component Count: ${status.componentStatus?.componentCount}"
// Set build description
currentBuild.description = """
Black Duck Scan Results
Policy: ${status.policyStatus?.overallStatus}
Components: ${status.componentStatus?.componentCount}
""".stripIndent()
}
}
}
}
stage('Publish Reports') {
steps {
// Archive reports
archiveArtifacts(
artifacts: 'blackduck-output/**/BlackDuck_RiskReport_*.pdf,blackduck-output/**/BlackDuck_Notices_*.txt',
allowEmptyArchive: true,
fingerprint: true
)
// Archive SBOM if generated
archiveArtifacts(
artifacts: 'sbom-output/**/sbom-cyclonedx.json',
allowEmptyArchive: true,
fingerprint: true
)
// Publish HTML reports
publishHTML([
allowMissing: true,
alwaysLinkToLastBuild: true,
keepAll: true,
reportDir: 'blackduck-output',
reportFiles: '**/*.html',
reportName: 'Black Duck Security Report'
])
}
}
stage('Quality Gate') {
when {
expression { params.FAIL_ON_POLICY_VIOLATION == true }
}
steps {
script {
// Check for policy violations
def statusFile = sh(
script: 'find blackduck-output -name "status.json" -type f | head -n 1',
returnStdout: true
).trim()
if (statusFile) {
def status = readJSON file: statusFile
if (status.policyStatus?.overallStatus == 'IN_VIOLATION') {
error("Build failed: Black Duck policy violations detected")
} else {
echo "✅ No policy violations detected"
}
}
}
}
}
}
post {
always {
// Clean up workspace
cleanWs(
deleteDirs: true,
patterns: [
[pattern: '.blackduck', type: 'INCLUDE'],
[pattern: 'blackduck-output/runs', type: 'INCLUDE']
]
)
}
success {
echo '✅ Black Duck scan completed successfully'
// Send notification (configure as needed)
// emailext(
// subject: "Black Duck Scan Success: ${PROJECT_NAME}",
// body: "Black Duck scan completed with no policy violations",
// to: "${env.CHANGE_AUTHOR_EMAIL}"
// )
}
failure {
echo '❌ Black Duck scan failed or policy violations detected'
// Send notification
// emailext(
// subject: "Black Duck Scan Failed: ${PROJECT_NAME}",
// body: "Black Duck scan detected policy violations. Review the report for details.",
// to: "${env.CHANGE_AUTHOR_EMAIL}"
// )
}
unstable {
echo '⚠️ Black Duck scan completed with warnings'
}
}
}
// Shared library functions (optional)
def getProjectType() {
if (fileExists('package.json')) return 'nodejs'
if (fileExists('requirements.txt')) return 'python'
if (fileExists('pom.xml')) return 'maven'
if (fileExists('build.gradle')) return 'gradle'
if (fileExists('Gemfile')) return 'ruby'
if (fileExists('go.mod')) return 'golang'
return 'unknown'
}
def installDependencies(projectType) {
switch(projectType) {
case 'nodejs':
sh 'npm ci || npm install'
break
case 'python':
sh 'pip install -r requirements.txt'
break
case 'maven':
sh 'mvn dependency:resolve'
break
case 'gradle':
sh './gradlew dependencies'
break
case 'ruby':
sh 'bundle install'
break
case 'golang':
sh 'go mod download'
break
default:
echo "Unknown project type, skipping dependency installation"
}
}

View File

@@ -0,0 +1,182 @@
{
"$schema": "https://json-schema.org/draft-07/schema#",
"title": "Black Duck Security Policy",
"description": "Default security policy for Black Duck SCA scanning",
"version": "1.0.0",
"vulnerability_thresholds": {
"description": "Maximum allowed vulnerabilities by severity",
"critical": {
"max_count": 0,
"action": "fail",
"description": "No critical vulnerabilities allowed"
},
"high": {
"max_count": 0,
"action": "fail",
"description": "No high severity vulnerabilities allowed"
},
"medium": {
"max_count": 10,
"action": "warn",
"description": "Up to 10 medium severity vulnerabilities allowed with warning"
},
"low": {
"max_count": 50,
"action": "info",
"description": "Up to 50 low severity vulnerabilities allowed"
}
},
"cvss_thresholds": {
"description": "CVSS score-based policy",
"max_cvss_score": 7.0,
"fail_on_exploitable": true,
"require_exploit_available": false
},
"license_policy": {
"description": "License compliance rules",
"blocklist": [
{
"license": "GPL-2.0",
"reason": "Strong copyleft incompatible with commercial software",
"action": "fail"
},
{
"license": "GPL-3.0",
"reason": "Strong copyleft incompatible with commercial software",
"action": "fail"
},
{
"license": "AGPL-3.0",
"reason": "Network copyleft triggers on SaaS usage",
"action": "fail"
}
],
"warning_list": [
{
"license": "LGPL-2.1",
"reason": "Weak copyleft - verify dynamic linking",
"action": "warn"
},
{
"license": "LGPL-3.0",
"reason": "Weak copyleft - verify dynamic linking",
"action": "warn"
},
{
"license": "MPL-2.0",
"reason": "File-level copyleft - verify separation",
"action": "warn"
}
],
"approved_list": [
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"ISC",
"0BSD",
"CC0-1.0",
"Unlicense"
],
"require_approval_for_new_licenses": true,
"fail_on_unknown_license": true
},
"component_policy": {
"description": "Component usage and quality rules",
"blocklist": [
{
"name": "event-stream",
"version": "3.3.6",
"reason": "Known malicious version with cryptocurrency stealer",
"action": "fail"
}
],
"quality_requirements": {
"min_github_stars": 10,
"min_contributors": 2,
"max_age_days": 1095,
"require_active_maintenance": true,
"max_days_since_update": 730,
"fail_on_deprecated": true,
"fail_on_unmaintained": false
}
},
"operational_risk": {
"description": "Supply chain and operational risk policies",
"fail_on_unmaintained": false,
"max_days_inactive": 730,
"require_repository_url": true,
"warn_on_single_maintainer": true,
"fail_on_no_repository": false
},
"sbom_requirements": {
"description": "Software Bill of Materials requirements",
"require_sbom_generation": true,
"sbom_format": "CycloneDX",
"sbom_version": "1.4",
"include_transitive_dependencies": true,
"include_license_info": true
},
"compliance_requirements": {
"description": "Regulatory compliance mappings",
"frameworks": [
"SOC2",
"PCI-DSS",
"GDPR",
"HIPAA"
],
"require_vulnerability_tracking": true,
"require_remediation_timeline": true,
"max_remediation_days": {
"critical": 7,
"high": 30,
"medium": 90,
"low": 180
}
},
"exclusions": {
"description": "Global exclusions and exceptions",
"paths": [
"test/**",
"tests/**",
"**/test/**",
"**/__tests__/**",
"**/*.test.js",
"**/*.spec.js",
"node_modules/**/.bin/**"
],
"dev_dependencies": {
"exclude_from_production_scan": true,
"apply_relaxed_policy": true
}
},
"notification_settings": {
"description": "Alert and notification configuration",
"notify_on_new_vulnerabilities": true,
"notify_on_policy_violation": true,
"notify_on_license_violation": true,
"notification_channels": [
"email",
"slack",
"jira"
]
},
"remediation_guidance": {
"description": "Remediation policy and guidance",
"auto_create_tickets": true,
"ticket_system": "jira",
"assign_to_component_owner": true,
"require_risk_acceptance_approval": true,
"max_risk_acceptance_duration_days": 90
}
}

View File

@@ -0,0 +1,355 @@
# Security Rule Template
#
# This template demonstrates how to structure security rules/policies.
# Adapt this template to your specific security tool (Semgrep, OPA, etc.)
#
# Rule Structure Best Practices:
# - Clear rule ID and metadata
# - Severity classification
# - Framework mappings (OWASP, CWE)
# - Remediation guidance
# - Example vulnerable and fixed code
rules:
# Example Rule 1: SQL Injection Detection
- id: sql-injection-string-concatenation
metadata:
name: "SQL Injection via String Concatenation"
description: "Detects potential SQL injection vulnerabilities from string concatenation in SQL queries"
severity: "HIGH"
category: "security"
subcategory: "injection"
# Security Framework Mappings
owasp:
- "A03:2021 - Injection"
cwe:
- "CWE-89: SQL Injection"
mitre_attack:
- "T1190: Exploit Public-Facing Application"
# Compliance Standards
compliance:
- "PCI-DSS 6.5.1: Injection flaws"
- "NIST 800-53 SI-10: Information Input Validation"
# Confidence and Impact
confidence: "HIGH"
likelihood: "HIGH"
impact: "HIGH"
# References
references:
- "https://owasp.org/www-community/attacks/SQL_Injection"
- "https://cwe.mitre.org/data/definitions/89.html"
- "https://cheatsheetseries.owasp.org/cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html"
# Languages this rule applies to
languages:
- python
- javascript
- java
- go
# Detection Pattern (example using Semgrep-style syntax)
pattern-either:
- pattern: |
cursor.execute($SQL + $VAR)
- pattern: |
cursor.execute(f"... {$VAR} ...")
- pattern: |
cursor.execute("..." + $VAR + "...")
# What to report when found
message: |
Potential SQL injection vulnerability detected. SQL query is constructed using
string concatenation or f-strings with user input. This allows attackers to
inject malicious SQL code.
Use parameterized queries instead:
- Python: cursor.execute("SELECT * FROM users WHERE id = ?", (user_id,))
- JavaScript: db.query("SELECT * FROM users WHERE id = $1", [userId])
See: https://owasp.org/www-community/attacks/SQL_Injection
# Suggested fix (auto-fix if supported)
fix: |
Use parameterized queries with placeholders
# Example vulnerable code
examples:
- vulnerable: |
# Vulnerable: String concatenation
user_id = request.GET['id']
query = "SELECT * FROM users WHERE id = " + user_id
cursor.execute(query)
- fixed: |
# Fixed: Parameterized query
user_id = request.GET['id']
query = "SELECT * FROM users WHERE id = ?"
cursor.execute(query, (user_id,))
# Example Rule 2: Hardcoded Secrets Detection
- id: hardcoded-secret-credential
metadata:
name: "Hardcoded Secret or Credential"
description: "Detects hardcoded secrets, API keys, passwords, or tokens in source code"
severity: "CRITICAL"
category: "security"
subcategory: "secrets"
owasp:
- "A07:2021 - Identification and Authentication Failures"
cwe:
- "CWE-798: Use of Hard-coded Credentials"
- "CWE-259: Use of Hard-coded Password"
compliance:
- "PCI-DSS 8.2.1: Use of strong cryptography"
- "SOC 2 CC6.1: Logical access controls"
- "GDPR Article 32: Security of processing"
confidence: "MEDIUM"
likelihood: "HIGH"
impact: "CRITICAL"
references:
- "https://cwe.mitre.org/data/definitions/798.html"
- "https://owasp.org/www-community/vulnerabilities/Use_of_hard-coded_password"
languages:
- python
- javascript
- java
- go
- ruby
pattern-either:
- pattern: |
password = "..."
- pattern: |
api_key = "..."
- pattern: |
secret = "..."
- pattern: |
token = "..."
pattern-not: |
$VAR = ""
message: |
Potential hardcoded secret detected. Hardcoding credentials in source code
is a critical security vulnerability that can lead to unauthorized access
if the code is exposed.
Use environment variables or a secrets management system instead:
- Python: os.environ.get('API_KEY')
- Node.js: process.env.API_KEY
- Secrets Manager: AWS Secrets Manager, HashiCorp Vault, etc.
See: https://cwe.mitre.org/data/definitions/798.html
examples:
- vulnerable: |
# Vulnerable: Hardcoded API key
api_key = "sk-1234567890abcdef"
api.authenticate(api_key)
- fixed: |
# Fixed: Environment variable
import os
api_key = os.environ.get('API_KEY')
if not api_key:
raise ValueError("API_KEY environment variable not set")
api.authenticate(api_key)
# Example Rule 3: XSS via Unsafe HTML Rendering
- id: xss-unsafe-html-rendering
metadata:
name: "Cross-Site Scripting (XSS) via Unsafe HTML"
description: "Detects unsafe HTML rendering that could lead to XSS vulnerabilities"
severity: "HIGH"
category: "security"
subcategory: "xss"
owasp:
- "A03:2021 - Injection"
cwe:
- "CWE-79: Cross-site Scripting (XSS)"
- "CWE-80: Improper Neutralization of Script-Related HTML Tags"
compliance:
- "PCI-DSS 6.5.7: Cross-site scripting"
- "NIST 800-53 SI-10: Information Input Validation"
confidence: "HIGH"
likelihood: "MEDIUM"
impact: "HIGH"
references:
- "https://owasp.org/www-community/attacks/xss/"
- "https://cwe.mitre.org/data/definitions/79.html"
- "https://cheatsheetseries.owasp.org/cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html"
languages:
- javascript
- typescript
- jsx
- tsx
pattern-either:
- pattern: |
dangerouslySetInnerHTML={{__html: $VAR}}
- pattern: |
innerHTML = $VAR
message: |
Potential XSS vulnerability detected. Setting HTML content directly from
user input without sanitization can allow attackers to inject malicious
JavaScript code.
Use one of these safe alternatives:
- React: Use {userInput} for automatic escaping
- DOMPurify: const clean = DOMPurify.sanitize(dirty);
- Framework-specific sanitizers
See: https://owasp.org/www-community/attacks/xss/
examples:
- vulnerable: |
// Vulnerable: Unsanitized HTML
function UserComment({ comment }) {
return <div dangerouslySetInnerHTML={{__html: comment}} />;
}
- fixed: |
// Fixed: Sanitized with DOMPurify
import DOMPurify from 'dompurify';
function UserComment({ comment }) {
const sanitized = DOMPurify.sanitize(comment);
return <div dangerouslySetInnerHTML={{__html: sanitized}} />;
}
# Example Rule 4: Insecure Cryptography
- id: weak-cryptographic-algorithm
metadata:
name: "Weak Cryptographic Algorithm"
description: "Detects use of weak or deprecated cryptographic algorithms"
severity: "HIGH"
category: "security"
subcategory: "cryptography"
owasp:
- "A02:2021 - Cryptographic Failures"
cwe:
- "CWE-327: Use of a Broken or Risky Cryptographic Algorithm"
- "CWE-326: Inadequate Encryption Strength"
compliance:
- "PCI-DSS 4.1: Use strong cryptography"
- "NIST 800-53 SC-13: Cryptographic Protection"
- "GDPR Article 32: Security of processing"
confidence: "HIGH"
likelihood: "MEDIUM"
impact: "HIGH"
references:
- "https://cwe.mitre.org/data/definitions/327.html"
- "https://owasp.org/www-project-web-security-testing-guide/latest/4-Web_Application_Security_Testing/09-Testing_for_Weak_Cryptography/"
languages:
- python
- javascript
- java
pattern-either:
- pattern: |
hashlib.md5(...)
- pattern: |
hashlib.sha1(...)
- pattern: |
crypto.createHash('md5')
- pattern: |
crypto.createHash('sha1')
message: |
Weak cryptographic algorithm detected (MD5 or SHA1). These algorithms are
considered cryptographically broken and should not be used for security purposes.
Use strong alternatives:
- For hashing: SHA-256, SHA-384, or SHA-512
- For password hashing: bcrypt, argon2, or PBKDF2
- Python: hashlib.sha256()
- Node.js: crypto.createHash('sha256')
See: https://cwe.mitre.org/data/definitions/327.html
examples:
- vulnerable: |
# Vulnerable: MD5 hash
import hashlib
hash_value = hashlib.md5(data).hexdigest()
- fixed: |
# Fixed: SHA-256 hash
import hashlib
hash_value = hashlib.sha256(data).hexdigest()
# Rule Configuration
configuration:
# Global settings
enabled: true
severity_threshold: "MEDIUM" # Report findings at MEDIUM severity and above
# Performance tuning
max_file_size_kb: 1024
exclude_patterns:
- "test/*"
- "tests/*"
- "node_modules/*"
- "vendor/*"
- "*.min.js"
# False positive reduction
confidence_threshold: "MEDIUM" # Only report findings with MEDIUM confidence or higher
# Rule Metadata Schema
# This section documents the expected structure for rules
metadata_schema:
required:
- id: "Unique identifier for the rule (kebab-case)"
- name: "Human-readable rule name"
- description: "What the rule detects"
- severity: "CRITICAL | HIGH | MEDIUM | LOW | INFO"
- category: "security | best-practice | performance"
optional:
- subcategory: "Specific type (injection, xss, secrets, etc.)"
- owasp: "OWASP Top 10 mappings"
- cwe: "CWE identifier(s)"
- mitre_attack: "MITRE ATT&CK technique(s)"
- compliance: "Compliance standard references"
- confidence: "Detection confidence level"
- likelihood: "Likelihood of exploitation"
- impact: "Potential impact if exploited"
- references: "External documentation links"
# Usage Instructions:
#
# 1. Copy this template when creating new security rules
# 2. Update metadata fields with appropriate framework mappings
# 3. Customize detection patterns for your tool (Semgrep, OPA, etc.)
# 4. Provide clear remediation guidance in the message field
# 5. Include both vulnerable and fixed code examples
# 6. Test rules on real codebases before deployment
#
# Best Practices:
# - Map to multiple frameworks (OWASP, CWE, MITRE ATT&CK)
# - Include compliance standard references
# - Provide actionable remediation guidance
# - Show code examples (vulnerable vs. fixed)
# - Tune confidence levels to reduce false positives
# - Exclude test directories to reduce noise