Initial commit

This commit is contained in:
Zhongwei Li
2025-11-30 08:48:52 +08:00
commit 6ec3196ecc
434 changed files with 125248 additions and 0 deletions

View File

@@ -0,0 +1,269 @@
#!/usr/bin/env python3
"""
Cloudflare Worker Deployment Utility
Automates Cloudflare Worker deployments with wrangler.toml configuration handling,
multi-environment support, and comprehensive error handling.
Usage:
python cloudflare-deploy.py --env production --dry-run
python cloudflare-deploy.py --project ./my-worker --env staging
"""
import argparse
import json
import subprocess
import sys
from pathlib import Path
from typing import Dict, List, Optional, Tuple
class CloudflareDeployError(Exception):
"""Custom exception for Cloudflare deployment errors."""
pass
class CloudflareDeploy:
"""Handle Cloudflare Worker deployments with wrangler CLI."""
def __init__(self, project_dir: Path, env: Optional[str] = None,
dry_run: bool = False, verbose: bool = False):
"""
Initialize CloudflareDeploy.
Args:
project_dir: Path to Worker project directory
env: Environment name (production, staging, dev)
dry_run: Preview deployment without actually deploying
verbose: Enable verbose output
"""
self.project_dir = Path(project_dir).resolve()
self.env = env
self.dry_run = dry_run
self.verbose = verbose
self.wrangler_toml = self.project_dir / "wrangler.toml"
def validate_project(self) -> bool:
"""
Validate project directory and wrangler.toml existence.
Returns:
True if valid, False otherwise
Raises:
CloudflareDeployError: If validation fails
"""
if not self.project_dir.exists():
raise CloudflareDeployError(
f"Project directory does not exist: {self.project_dir}"
)
if not self.wrangler_toml.exists():
raise CloudflareDeployError(
f"wrangler.toml not found in: {self.project_dir}"
)
return True
def check_wrangler_installed(self) -> bool:
"""
Check if wrangler CLI is installed.
Returns:
True if installed, False otherwise
"""
try:
result = subprocess.run(
["wrangler", "--version"],
capture_output=True,
text=True,
check=True
)
if self.verbose:
print(f"Wrangler version: {result.stdout.strip()}")
return True
except (subprocess.CalledProcessError, FileNotFoundError):
return False
def run_command(self, cmd: List[str], check: bool = True) -> Tuple[int, str, str]:
"""
Run shell command and capture output.
Args:
cmd: Command and arguments as list
check: Raise exception on non-zero exit code
Returns:
Tuple of (exit_code, stdout, stderr)
Raises:
CloudflareDeployError: If command fails and check=True
"""
if self.verbose:
print(f"Running: {' '.join(cmd)}")
try:
result = subprocess.run(
cmd,
capture_output=True,
text=True,
cwd=self.project_dir,
check=check
)
return result.returncode, result.stdout, result.stderr
except subprocess.CalledProcessError as e:
if check:
raise CloudflareDeployError(
f"Command failed: {' '.join(cmd)}\n{e.stderr}"
)
return e.returncode, e.stdout, e.stderr
def get_worker_name(self) -> str:
"""
Extract worker name from wrangler.toml.
Returns:
Worker name
Raises:
CloudflareDeployError: If name cannot be extracted
"""
try:
with open(self.wrangler_toml, 'r') as f:
for line in f:
if line.strip().startswith('name'):
# Parse: name = "worker-name"
return line.split('=')[1].strip().strip('"\'')
except Exception as e:
raise CloudflareDeployError(f"Failed to read worker name: {e}")
raise CloudflareDeployError("Worker name not found in wrangler.toml")
def build_deploy_command(self) -> List[str]:
"""
Build wrangler deploy command with appropriate flags.
Returns:
Command as list of strings
"""
cmd = ["wrangler", "deploy"]
if self.env:
cmd.extend(["--env", self.env])
if self.dry_run:
cmd.append("--dry-run")
return cmd
def deploy(self) -> bool:
"""
Execute deployment.
Returns:
True if successful
Raises:
CloudflareDeployError: If deployment fails
"""
# Validate
self.validate_project()
if not self.check_wrangler_installed():
raise CloudflareDeployError(
"wrangler CLI not installed. Install: npm install -g wrangler"
)
worker_name = self.get_worker_name()
env_suffix = f" ({self.env})" if self.env else ""
mode = "DRY RUN" if self.dry_run else "DEPLOY"
print(f"\n{mode}: {worker_name}{env_suffix}")
print(f"Project: {self.project_dir}\n")
# Build and run command
cmd = self.build_deploy_command()
exit_code, stdout, stderr = self.run_command(cmd)
# Output results
if stdout:
print(stdout)
if stderr:
print(stderr, file=sys.stderr)
if exit_code == 0:
status = "would be deployed" if self.dry_run else "deployed successfully"
print(f"\n✓ Worker {status}")
return True
else:
raise CloudflareDeployError("Deployment failed")
def main():
"""CLI entry point."""
parser = argparse.ArgumentParser(
description="Deploy Cloudflare Worker with wrangler",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python cloudflare-deploy.py
python cloudflare-deploy.py --env production
python cloudflare-deploy.py --project ./my-worker --env staging
python cloudflare-deploy.py --dry-run
python cloudflare-deploy.py --env prod --verbose
"""
)
parser.add_argument(
"--project",
type=str,
default=".",
help="Path to Worker project directory (default: current directory)"
)
parser.add_argument(
"--env",
type=str,
choices=["production", "staging", "dev"],
help="Environment to deploy to (production, staging, dev)"
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Preview deployment without actually deploying"
)
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="Enable verbose output"
)
args = parser.parse_args()
try:
deployer = CloudflareDeploy(
project_dir=args.project,
env=args.env,
dry_run=args.dry_run,
verbose=args.verbose
)
success = deployer.deploy()
sys.exit(0 if success else 1)
except CloudflareDeployError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
except KeyboardInterrupt:
print("\nDeployment cancelled by user", file=sys.stderr)
sys.exit(130)
except Exception as e:
print(f"Unexpected error: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,320 @@
#!/usr/bin/env python3
"""
Dockerfile Optimization Analyzer
Analyzes Dockerfiles for optimization opportunities including multi-stage builds,
security issues, size reduction, and best practices.
Usage:
python docker-optimize.py Dockerfile
python docker-optimize.py --json Dockerfile
python docker-optimize.py --verbose Dockerfile
"""
import argparse
import json
import re
import sys
from pathlib import Path
from typing import Dict, List, Optional, Tuple
class DockerfileAnalyzer:
"""Analyze Dockerfile for optimization opportunities."""
def __init__(self, dockerfile_path: Path, verbose: bool = False):
"""
Initialize analyzer.
Args:
dockerfile_path: Path to Dockerfile
verbose: Enable verbose output
"""
self.dockerfile_path = Path(dockerfile_path)
self.verbose = verbose
self.lines = []
self.issues = []
self.suggestions = []
def load_dockerfile(self) -> bool:
"""
Load and parse Dockerfile.
Returns:
True if loaded successfully
Raises:
FileNotFoundError: If Dockerfile doesn't exist
"""
if not self.dockerfile_path.exists():
raise FileNotFoundError(f"Dockerfile not found: {self.dockerfile_path}")
with open(self.dockerfile_path, 'r') as f:
self.lines = f.readlines()
return True
def analyze_base_image(self) -> None:
"""Check base image for optimization opportunities."""
for i, line in enumerate(self.lines, 1):
line = line.strip()
if line.startswith('FROM'):
# Check for 'latest' tag
if ':latest' in line or (': ' not in line and 'AS' not in line and '@' not in line):
self.issues.append({
'line': i,
'severity': 'warning',
'category': 'base_image',
'message': 'Base image uses :latest or no tag',
'suggestion': 'Use specific version tags for reproducibility'
})
# Check for non-alpine/slim variants
if 'node' in line.lower() and 'alpine' not in line.lower():
self.suggestions.append({
'line': i,
'category': 'size',
'message': 'Consider using Alpine variant',
'suggestion': 'node:20-alpine is ~10x smaller than node:20'
})
def analyze_multi_stage(self) -> None:
"""Check if multi-stage build is used."""
from_count = sum(1 for line in self.lines if line.strip().startswith('FROM'))
if from_count == 1:
# Check if build tools are installed
has_build_tools = any(
any(tool in line.lower() for tool in ['gcc', 'make', 'build-essential', 'npm install', 'pip install'])
for line in self.lines
)
if has_build_tools:
self.issues.append({
'line': 0,
'severity': 'warning',
'category': 'optimization',
'message': 'Single-stage build with build tools',
'suggestion': 'Use multi-stage build to exclude build dependencies from final image'
})
def analyze_layer_caching(self) -> None:
"""Check for optimal layer caching order."""
copy_lines = []
run_lines = []
for i, line in enumerate(self.lines, 1):
stripped = line.strip()
if stripped.startswith('COPY'):
copy_lines.append((i, stripped))
elif stripped.startswith('RUN'):
run_lines.append((i, stripped))
# Check if dependency files copied before source
has_package_copy = any('package.json' in line or 'requirements.txt' in line or 'go.mod' in line
for _, line in copy_lines)
has_source_copy = any('COPY . .' in line or 'COPY ./' in line
for _, line in copy_lines)
if has_source_copy and not has_package_copy:
self.issues.append({
'line': 0,
'severity': 'warning',
'category': 'caching',
'message': 'Source copied before dependencies',
'suggestion': 'Copy dependency files first (package.json, requirements.txt) then run install, then copy source'
})
def analyze_security(self) -> None:
"""Check for security issues."""
has_user = any(line.strip().startswith('USER') and 'root' not in line.lower()
for line in self.lines)
if not has_user:
self.issues.append({
'line': 0,
'severity': 'error',
'category': 'security',
'message': 'Container runs as root',
'suggestion': 'Create and use non-root user with USER instruction'
})
# Check for secrets in build
for i, line in enumerate(self.lines, 1):
if any(secret in line.upper() for secret in ['PASSWORD', 'SECRET', 'TOKEN', 'API_KEY']):
if 'ENV' in line or 'ARG' in line:
self.issues.append({
'line': i,
'severity': 'error',
'category': 'security',
'message': 'Potential secret in Dockerfile',
'suggestion': 'Use build-time arguments or runtime environment variables'
})
def analyze_apt_cache(self) -> None:
"""Check for apt cache cleanup."""
for i, line in enumerate(self.lines, 1):
if 'apt-get install' in line.lower() or 'apt install' in line.lower():
# Check if same RUN command cleans cache
if 'rm -rf /var/lib/apt/lists/*' not in line:
self.suggestions.append({
'line': i,
'category': 'size',
'message': 'apt cache not cleaned in same layer',
'suggestion': 'Add && rm -rf /var/lib/apt/lists/* to reduce image size'
})
def analyze_combine_run(self) -> None:
"""Check for multiple consecutive RUN commands."""
consecutive_runs = 0
first_run_line = 0
for i, line in enumerate(self.lines, 1):
if line.strip().startswith('RUN'):
if consecutive_runs == 0:
first_run_line = i
consecutive_runs += 1
else:
if consecutive_runs > 1:
self.suggestions.append({
'line': first_run_line,
'category': 'layers',
'message': f'{consecutive_runs} consecutive RUN commands',
'suggestion': 'Combine related RUN commands with && to reduce layers'
})
consecutive_runs = 0
def analyze_workdir(self) -> None:
"""Check for WORKDIR usage."""
has_workdir = any(line.strip().startswith('WORKDIR') for line in self.lines)
if not has_workdir:
self.suggestions.append({
'line': 0,
'category': 'best_practice',
'message': 'No WORKDIR specified',
'suggestion': 'Use WORKDIR to set working directory instead of cd commands'
})
def analyze(self) -> Dict:
"""
Run all analyses.
Returns:
Analysis results dictionary
"""
self.load_dockerfile()
self.analyze_base_image()
self.analyze_multi_stage()
self.analyze_layer_caching()
self.analyze_security()
self.analyze_apt_cache()
self.analyze_combine_run()
self.analyze_workdir()
return {
'dockerfile': str(self.dockerfile_path),
'total_lines': len(self.lines),
'issues': self.issues,
'suggestions': self.suggestions,
'summary': {
'errors': len([i for i in self.issues if i.get('severity') == 'error']),
'warnings': len([i for i in self.issues if i.get('severity') == 'warning']),
'suggestions': len(self.suggestions)
}
}
def print_results(self, results: Dict) -> None:
"""
Print analysis results in human-readable format.
Args:
results: Analysis results from analyze()
"""
print(f"\nDockerfile Analysis: {results['dockerfile']}")
print(f"Total lines: {results['total_lines']}")
print(f"\nSummary:")
print(f" Errors: {results['summary']['errors']}")
print(f" Warnings: {results['summary']['warnings']}")
print(f" Suggestions: {results['summary']['suggestions']}")
if results['issues']:
print(f"\n{'='*60}")
print("ISSUES:")
print('='*60)
for issue in results['issues']:
severity = issue.get('severity', 'info').upper()
line_info = f"Line {issue['line']}" if issue['line'] > 0 else "General"
print(f"\n[{severity}] {line_info} - {issue['category']}")
print(f" {issue['message']}")
print(f"{issue['suggestion']}")
if results['suggestions']:
print(f"\n{'='*60}")
print("SUGGESTIONS:")
print('='*60)
for sugg in results['suggestions']:
line_info = f"Line {sugg['line']}" if sugg['line'] > 0 else "General"
print(f"\n{line_info} - {sugg['category']}")
print(f" {sugg['message']}")
print(f"{sugg['suggestion']}")
print()
def main():
"""CLI entry point."""
parser = argparse.ArgumentParser(
description="Analyze Dockerfile for optimization opportunities",
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"dockerfile",
type=str,
help="Path to Dockerfile"
)
parser.add_argument(
"--json",
action="store_true",
help="Output results as JSON"
)
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="Enable verbose output"
)
args = parser.parse_args()
try:
analyzer = DockerfileAnalyzer(
dockerfile_path=args.dockerfile,
verbose=args.verbose
)
results = analyzer.analyze()
if args.json:
print(json.dumps(results, indent=2))
else:
analyzer.print_results(results)
# Exit with error code if issues found
if results['summary']['errors'] > 0:
sys.exit(1)
except FileNotFoundError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"Unexpected error: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,20 @@
# DevOps Skill Dependencies
# Python 3.10+ required
# No Python package dependencies - uses only standard library
# Testing dependencies (dev)
pytest>=8.0.0
pytest-cov>=4.1.0
pytest-mock>=3.12.0
# Note: This skill requires various CLI tools depending on platform:
#
# Cloudflare:
# - wrangler CLI: npm install -g wrangler
#
# Docker:
# - docker CLI: https://docs.docker.com/get-docker/
#
# Google Cloud:
# - gcloud CLI: https://cloud.google.com/sdk/docs/install

View File

@@ -0,0 +1,3 @@
pytest>=7.0.0
pytest-cov>=4.0.0
pytest-mock>=3.10.0

View File

@@ -0,0 +1,285 @@
"""
Tests for cloudflare-deploy.py
Run with: pytest test_cloudflare_deploy.py -v
"""
import pytest
import subprocess
from pathlib import Path
from unittest.mock import Mock, patch, mock_open
import sys
import os
# Add parent directory to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent))
from cloudflare_deploy import CloudflareDeploy, CloudflareDeployError
@pytest.fixture
def temp_project(tmp_path):
"""Create temporary project directory with wrangler.toml"""
project_dir = tmp_path / "test-worker"
project_dir.mkdir()
wrangler_toml = project_dir / "wrangler.toml"
wrangler_toml.write_text('''
name = "test-worker"
main = "src/index.ts"
compatibility_date = "2024-01-01"
''')
return project_dir
@pytest.fixture
def deployer(temp_project):
"""Create CloudflareDeploy instance with temp project"""
return CloudflareDeploy(
project_dir=temp_project,
env="staging",
dry_run=False,
verbose=False
)
class TestCloudflareDeployInit:
"""Test CloudflareDeploy initialization"""
def test_init_with_defaults(self, temp_project):
deployer = CloudflareDeploy(project_dir=temp_project)
assert deployer.project_dir == temp_project.resolve()
assert deployer.env is None
assert deployer.dry_run is False
assert deployer.verbose is False
def test_init_with_custom_params(self, temp_project):
deployer = CloudflareDeploy(
project_dir=temp_project,
env="production",
dry_run=True,
verbose=True
)
assert deployer.env == "production"
assert deployer.dry_run is True
assert deployer.verbose is True
class TestValidateProject:
"""Test project validation"""
def test_validate_existing_project(self, deployer):
assert deployer.validate_project() is True
def test_validate_nonexistent_project(self, tmp_path):
deployer = CloudflareDeploy(project_dir=tmp_path / "nonexistent")
with pytest.raises(CloudflareDeployError, match="does not exist"):
deployer.validate_project()
def test_validate_missing_wrangler_toml(self, tmp_path):
project_dir = tmp_path / "no-toml"
project_dir.mkdir()
deployer = CloudflareDeploy(project_dir=project_dir)
with pytest.raises(CloudflareDeployError, match="wrangler.toml not found"):
deployer.validate_project()
class TestCheckWranglerInstalled:
"""Test wrangler CLI detection"""
@patch('subprocess.run')
def test_wrangler_installed(self, mock_run, deployer):
mock_run.return_value = Mock(
returncode=0,
stdout="wrangler 3.0.0",
stderr=""
)
assert deployer.check_wrangler_installed() is True
@patch('subprocess.run')
def test_wrangler_not_installed(self, mock_run, deployer):
mock_run.side_effect = FileNotFoundError()
assert deployer.check_wrangler_installed() is False
@patch('subprocess.run')
def test_wrangler_command_fails(self, mock_run, deployer):
mock_run.side_effect = subprocess.CalledProcessError(1, "wrangler")
assert deployer.check_wrangler_installed() is False
class TestGetWorkerName:
"""Test worker name extraction"""
def test_get_worker_name_success(self, deployer):
name = deployer.get_worker_name()
assert name == "test-worker"
def test_get_worker_name_no_name(self, tmp_path):
project_dir = tmp_path / "no-name"
project_dir.mkdir()
wrangler_toml = project_dir / "wrangler.toml"
wrangler_toml.write_text("main = 'index.ts'")
deployer = CloudflareDeploy(project_dir=project_dir)
with pytest.raises(CloudflareDeployError, match="Worker name not found"):
deployer.get_worker_name()
def test_get_worker_name_with_quotes(self, tmp_path):
project_dir = tmp_path / "quoted"
project_dir.mkdir()
wrangler_toml = project_dir / "wrangler.toml"
wrangler_toml.write_text('name = "my-worker"\n')
deployer = CloudflareDeploy(project_dir=project_dir)
assert deployer.get_worker_name() == "my-worker"
def test_get_worker_name_single_quotes(self, tmp_path):
project_dir = tmp_path / "single-quotes"
project_dir.mkdir()
wrangler_toml = project_dir / "wrangler.toml"
wrangler_toml.write_text("name = 'my-worker'\n")
deployer = CloudflareDeploy(project_dir=project_dir)
assert deployer.get_worker_name() == "my-worker"
class TestBuildDeployCommand:
"""Test deploy command construction"""
def test_basic_command(self, temp_project):
deployer = CloudflareDeploy(project_dir=temp_project)
cmd = deployer.build_deploy_command()
assert cmd == ["wrangler", "deploy"]
def test_command_with_env(self, temp_project):
deployer = CloudflareDeploy(project_dir=temp_project, env="production")
cmd = deployer.build_deploy_command()
assert cmd == ["wrangler", "deploy", "--env", "production"]
def test_command_with_dry_run(self, temp_project):
deployer = CloudflareDeploy(project_dir=temp_project, dry_run=True)
cmd = deployer.build_deploy_command()
assert cmd == ["wrangler", "deploy", "--dry-run"]
def test_command_with_env_and_dry_run(self, temp_project):
deployer = CloudflareDeploy(
project_dir=temp_project,
env="staging",
dry_run=True
)
cmd = deployer.build_deploy_command()
assert cmd == ["wrangler", "deploy", "--env", "staging", "--dry-run"]
class TestRunCommand:
"""Test command execution"""
@patch('subprocess.run')
def test_run_command_success(self, mock_run, deployer):
mock_run.return_value = Mock(
returncode=0,
stdout="Success",
stderr=""
)
exit_code, stdout, stderr = deployer.run_command(["echo", "test"])
assert exit_code == 0
assert stdout == "Success"
assert stderr == ""
mock_run.assert_called_once()
@patch('subprocess.run')
def test_run_command_failure_with_check(self, mock_run, deployer):
mock_run.side_effect = subprocess.CalledProcessError(
1, "cmd", stderr="Error"
)
with pytest.raises(CloudflareDeployError, match="Command failed"):
deployer.run_command(["false"], check=True)
@patch('subprocess.run')
def test_run_command_failure_no_check(self, mock_run, deployer):
mock_run.side_effect = subprocess.CalledProcessError(
1, "cmd", output="", stderr="Error"
)
exit_code, stdout, stderr = deployer.run_command(["false"], check=False)
assert exit_code == 1
class TestDeploy:
"""Test full deployment flow"""
@patch.object(CloudflareDeploy, 'check_wrangler_installed')
@patch.object(CloudflareDeploy, 'run_command')
def test_deploy_success(self, mock_run_cmd, mock_check_wrangler, deployer):
mock_check_wrangler.return_value = True
mock_run_cmd.return_value = (0, "Deployed successfully", "")
result = deployer.deploy()
assert result is True
mock_check_wrangler.assert_called_once()
mock_run_cmd.assert_called_once()
@patch.object(CloudflareDeploy, 'check_wrangler_installed')
def test_deploy_wrangler_not_installed(self, mock_check_wrangler, deployer):
mock_check_wrangler.return_value = False
with pytest.raises(CloudflareDeployError, match="wrangler CLI not installed"):
deployer.deploy()
@patch.object(CloudflareDeploy, 'check_wrangler_installed')
@patch.object(CloudflareDeploy, 'run_command')
def test_deploy_command_fails(self, mock_run_cmd, mock_check_wrangler, deployer):
mock_check_wrangler.return_value = True
mock_run_cmd.side_effect = CloudflareDeployError("Deploy failed")
with pytest.raises(CloudflareDeployError, match="Deploy failed"):
deployer.deploy()
def test_deploy_invalid_project(self, tmp_path):
deployer = CloudflareDeploy(project_dir=tmp_path / "nonexistent")
with pytest.raises(CloudflareDeployError):
deployer.deploy()
class TestIntegration:
"""Integration tests"""
@patch.object(CloudflareDeploy, 'check_wrangler_installed')
@patch.object(CloudflareDeploy, 'run_command')
def test_full_deployment_flow(self, mock_run_cmd, mock_check_wrangler, temp_project):
mock_check_wrangler.return_value = True
mock_run_cmd.return_value = (0, "Success", "")
deployer = CloudflareDeploy(
project_dir=temp_project,
env="production",
dry_run=False,
verbose=True
)
result = deployer.deploy()
assert result is True
assert mock_run_cmd.call_count == 1
# Verify correct command was built
call_args = mock_run_cmd.call_args[0][0]
assert "wrangler" in call_args
assert "deploy" in call_args
assert "--env" in call_args
assert "production" in call_args
if __name__ == "__main__":
pytest.main([__file__, "-v"])

View File

@@ -0,0 +1,436 @@
"""
Tests for docker-optimize.py
Run with: pytest test_docker_optimize.py -v
"""
import pytest
import json
from pathlib import Path
import sys
# Add parent directory to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent))
from docker_optimize import DockerfileAnalyzer
@pytest.fixture
def temp_dockerfile(tmp_path):
"""Create temporary Dockerfile"""
dockerfile = tmp_path / "Dockerfile"
return dockerfile
def write_dockerfile(filepath, content):
"""Helper to write Dockerfile content"""
with open(filepath, 'w') as f:
f.write(content)
class TestDockerfileAnalyzerInit:
"""Test DockerfileAnalyzer initialization"""
def test_init(self, temp_dockerfile):
write_dockerfile(temp_dockerfile, "FROM node:20\n")
analyzer = DockerfileAnalyzer(temp_dockerfile)
assert analyzer.dockerfile_path == temp_dockerfile
assert analyzer.verbose is False
assert analyzer.lines == []
assert analyzer.issues == []
assert analyzer.suggestions == []
class TestLoadDockerfile:
"""Test Dockerfile loading"""
def test_load_success(self, temp_dockerfile):
content = "FROM node:20\nWORKDIR /app\n"
write_dockerfile(temp_dockerfile, content)
analyzer = DockerfileAnalyzer(temp_dockerfile)
result = analyzer.load_dockerfile()
assert result is True
assert len(analyzer.lines) == 2
def test_load_nonexistent(self, tmp_path):
analyzer = DockerfileAnalyzer(tmp_path / "nonexistent")
with pytest.raises(FileNotFoundError):
analyzer.load_dockerfile()
class TestAnalyzeBaseImage:
"""Test base image analysis"""
def test_latest_tag(self, temp_dockerfile):
write_dockerfile(temp_dockerfile, "FROM node:latest\n")
analyzer = DockerfileAnalyzer(temp_dockerfile)
analyzer.load_dockerfile()
analyzer.analyze_base_image()
assert len(analyzer.issues) == 1
assert analyzer.issues[0]['category'] == 'base_image'
assert 'latest' in analyzer.issues[0]['message']
def test_no_tag(self, temp_dockerfile):
write_dockerfile(temp_dockerfile, "FROM node\n")
analyzer = DockerfileAnalyzer(temp_dockerfile)
analyzer.load_dockerfile()
analyzer.analyze_base_image()
assert len(analyzer.issues) == 1
assert 'no tag' in analyzer.issues[0]['message']
def test_specific_tag(self, temp_dockerfile):
write_dockerfile(temp_dockerfile, "FROM node:20-alpine\n")
analyzer = DockerfileAnalyzer(temp_dockerfile)
analyzer.load_dockerfile()
analyzer.analyze_base_image()
# Should have no issues with specific tag
base_image_issues = [i for i in analyzer.issues if i['category'] == 'base_image']
assert len(base_image_issues) == 0
def test_non_alpine_suggestion(self, temp_dockerfile):
write_dockerfile(temp_dockerfile, "FROM node:20\n")
analyzer = DockerfileAnalyzer(temp_dockerfile)
analyzer.load_dockerfile()
analyzer.analyze_base_image()
assert len(analyzer.suggestions) >= 1
assert any('Alpine' in s['message'] for s in analyzer.suggestions)
class TestAnalyzeMultiStage:
"""Test multi-stage build analysis"""
def test_single_stage_with_build_tools(self, temp_dockerfile):
content = """
FROM node:20
WORKDIR /app
COPY package.json .
RUN npm install
COPY . .
CMD ["node", "server.js"]
"""
write_dockerfile(temp_dockerfile, content)
analyzer = DockerfileAnalyzer(temp_dockerfile)
analyzer.load_dockerfile()
analyzer.analyze_multi_stage()
assert len(analyzer.issues) == 1
assert analyzer.issues[0]['category'] == 'optimization'
assert 'multi-stage' in analyzer.issues[0]['message'].lower()
def test_multi_stage_no_issues(self, temp_dockerfile):
content = """
FROM node:20 AS build
WORKDIR /app
COPY package.json .
RUN npm install
COPY . .
RUN npm run build
FROM node:20-alpine
WORKDIR /app
COPY --from=build /app/dist ./dist
CMD ["node", "dist/server.js"]
"""
write_dockerfile(temp_dockerfile, content)
analyzer = DockerfileAnalyzer(temp_dockerfile)
analyzer.load_dockerfile()
analyzer.analyze_multi_stage()
multi_stage_issues = [i for i in analyzer.issues if i['category'] == 'optimization']
assert len(multi_stage_issues) == 0
class TestAnalyzeLayerCaching:
"""Test layer caching analysis"""
def test_source_before_dependencies(self, temp_dockerfile):
content = """
FROM node:20
WORKDIR /app
COPY . .
RUN npm install
"""
write_dockerfile(temp_dockerfile, content)
analyzer = DockerfileAnalyzer(temp_dockerfile)
analyzer.load_dockerfile()
analyzer.analyze_layer_caching()
assert len(analyzer.issues) == 1
assert analyzer.issues[0]['category'] == 'caching'
def test_correct_order(self, temp_dockerfile):
content = """
FROM node:20
WORKDIR /app
COPY package.json .
RUN npm install
COPY . .
"""
write_dockerfile(temp_dockerfile, content)
analyzer = DockerfileAnalyzer(temp_dockerfile)
analyzer.load_dockerfile()
analyzer.analyze_layer_caching()
caching_issues = [i for i in analyzer.issues if i['category'] == 'caching']
assert len(caching_issues) == 0
class TestAnalyzeSecurity:
"""Test security analysis"""
def test_no_user_instruction(self, temp_dockerfile):
content = """
FROM node:20
WORKDIR /app
COPY . .
CMD ["node", "server.js"]
"""
write_dockerfile(temp_dockerfile, content)
analyzer = DockerfileAnalyzer(temp_dockerfile)
analyzer.load_dockerfile()
analyzer.analyze_security()
assert len(analyzer.issues) >= 1
security_issues = [i for i in analyzer.issues if i['category'] == 'security']
assert any('root' in i['message'] for i in security_issues)
def test_with_user_instruction(self, temp_dockerfile):
content = """
FROM node:20
WORKDIR /app
COPY . .
USER node
CMD ["node", "server.js"]
"""
write_dockerfile(temp_dockerfile, content)
analyzer = DockerfileAnalyzer(temp_dockerfile)
analyzer.load_dockerfile()
analyzer.analyze_security()
# Should not have root user issue
root_issues = [i for i in analyzer.issues
if i['category'] == 'security' and 'root' in i['message']]
assert len(root_issues) == 0
def test_detect_secrets(self, temp_dockerfile):
content = """
FROM node:20
ENV API_KEY=secret123
ENV PASSWORD=mypassword
"""
write_dockerfile(temp_dockerfile, content)
analyzer = DockerfileAnalyzer(temp_dockerfile)
analyzer.load_dockerfile()
analyzer.analyze_security()
secret_issues = [i for i in analyzer.issues
if i['category'] == 'security' and 'secret' in i['message'].lower()]
assert len(secret_issues) >= 1
class TestAnalyzeAptCache:
"""Test apt cache cleanup analysis"""
def test_apt_without_cleanup(self, temp_dockerfile):
content = """
FROM ubuntu:22.04
RUN apt-get update && apt-get install -y curl
"""
write_dockerfile(temp_dockerfile, content)
analyzer = DockerfileAnalyzer(temp_dockerfile)
analyzer.load_dockerfile()
analyzer.analyze_apt_cache()
assert len(analyzer.suggestions) >= 1
assert any('apt cache' in s['message'] for s in analyzer.suggestions)
def test_apt_with_cleanup(self, temp_dockerfile):
content = """
FROM ubuntu:22.04
RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/*
"""
write_dockerfile(temp_dockerfile, content)
analyzer = DockerfileAnalyzer(temp_dockerfile)
analyzer.load_dockerfile()
analyzer.analyze_apt_cache()
apt_suggestions = [s for s in analyzer.suggestions if 'apt cache' in s['message']]
assert len(apt_suggestions) == 0
class TestAnalyzeCombineRun:
"""Test RUN command combination analysis"""
def test_consecutive_runs(self, temp_dockerfile):
content = """
FROM node:20
RUN apt-get update
RUN apt-get install -y curl
RUN apt-get clean
"""
write_dockerfile(temp_dockerfile, content)
analyzer = DockerfileAnalyzer(temp_dockerfile)
analyzer.load_dockerfile()
analyzer.analyze_combine_run()
assert len(analyzer.suggestions) >= 1
assert any('consecutive' in s['message'] for s in analyzer.suggestions)
def test_non_consecutive_runs(self, temp_dockerfile):
content = """
FROM node:20
RUN apt-get update
COPY package.json .
RUN npm install
"""
write_dockerfile(temp_dockerfile, content)
analyzer = DockerfileAnalyzer(temp_dockerfile)
analyzer.load_dockerfile()
analyzer.analyze_combine_run()
consecutive_suggestions = [s for s in analyzer.suggestions
if 'consecutive' in s['message']]
assert len(consecutive_suggestions) == 0
class TestAnalyzeWorkdir:
"""Test WORKDIR analysis"""
def test_no_workdir(self, temp_dockerfile):
content = """
FROM node:20
COPY . /app
CMD ["node", "/app/server.js"]
"""
write_dockerfile(temp_dockerfile, content)
analyzer = DockerfileAnalyzer(temp_dockerfile)
analyzer.load_dockerfile()
analyzer.analyze_workdir()
assert len(analyzer.suggestions) >= 1
assert any('WORKDIR' in s['message'] for s in analyzer.suggestions)
def test_with_workdir(self, temp_dockerfile):
content = """
FROM node:20
WORKDIR /app
COPY . .
CMD ["node", "server.js"]
"""
write_dockerfile(temp_dockerfile, content)
analyzer = DockerfileAnalyzer(temp_dockerfile)
analyzer.load_dockerfile()
analyzer.analyze_workdir()
workdir_suggestions = [s for s in analyzer.suggestions if 'WORKDIR' in s['message']]
assert len(workdir_suggestions) == 0
class TestFullAnalyze:
"""Test complete analysis"""
def test_analyze_poor_dockerfile(self, temp_dockerfile):
content = """
FROM node:latest
COPY . .
RUN npm install
CMD ["node", "server.js"]
"""
write_dockerfile(temp_dockerfile, content)
analyzer = DockerfileAnalyzer(temp_dockerfile)
results = analyzer.analyze()
assert 'dockerfile' in results
assert 'total_lines' in results
assert 'issues' in results
assert 'suggestions' in results
assert 'summary' in results
# Should have multiple issues and suggestions
assert results['summary']['warnings'] > 0
assert results['summary']['suggestions'] > 0
def test_analyze_good_dockerfile(self, temp_dockerfile):
content = """
FROM node:20-alpine AS build
WORKDIR /app
COPY package.json .
RUN npm ci --only=production
COPY . .
RUN npm run build
FROM node:20-alpine
WORKDIR /app
COPY --from=build /app/dist ./dist
COPY --from=build /app/node_modules ./node_modules
USER node
EXPOSE 3000
CMD ["node", "dist/server.js"]
"""
write_dockerfile(temp_dockerfile, content)
analyzer = DockerfileAnalyzer(temp_dockerfile)
results = analyzer.analyze()
# Should have minimal issues
assert results['summary']['errors'] == 0
# May have some suggestions, but fewer issues overall
class TestPrintResults:
"""Test results printing"""
def test_print_results(self, temp_dockerfile, capsys):
content = "FROM node:latest\n"
write_dockerfile(temp_dockerfile, content)
analyzer = DockerfileAnalyzer(temp_dockerfile)
results = analyzer.analyze()
analyzer.print_results(results)
captured = capsys.readouterr()
assert "Dockerfile Analysis" in captured.out
assert "Summary:" in captured.out
assert "ISSUES:" in captured.out or "SUGGESTIONS:" in captured.out
class TestIntegration:
"""Integration tests"""
def test_full_analysis_workflow(self, temp_dockerfile):
content = """
FROM python:3.11
COPY . /app
RUN pip install -r /app/requirements.txt
ENV API_KEY=secret
CMD ["python", "/app/app.py"]
"""
write_dockerfile(temp_dockerfile, content)
analyzer = DockerfileAnalyzer(temp_dockerfile, verbose=True)
results = analyzer.analyze()
# Verify all expected checks ran
assert len(analyzer.issues) > 0
assert len(analyzer.suggestions) > 0
# Should flag multiple categories
categories = {i['category'] for i in analyzer.issues}
assert 'security' in categories
# Verify summary calculations
total_findings = (results['summary']['errors'] +
results['summary']['warnings'] +
results['summary']['suggestions'])
assert total_findings > 0
if __name__ == "__main__":
pytest.main([__file__, "-v"])