Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 18:00:18 +08:00
commit 765529cd13
69 changed files with 18291 additions and 0 deletions

View File

@@ -0,0 +1,495 @@
#!/usr/bin/env -S uv run --script --quiet
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "httpx>=0.27.0",
# ]
# ///
"""
Convert Python scripts to uv single-file format (PEP 723)
Purpose: script-conversion-automation
Team: devops
Author: devops@spaceships.work
Converts existing Python scripts to use inline dependency management.
Reads dependencies from requirements.txt or detects from imports.
Usage:
convert_to_uv.py <script.py>
convert_to_uv.py <script.py> --requirements requirements.txt
convert_to_uv.py <script.py> --detect-imports
convert_to_uv.py <script.py> --dry-run
Examples:
# Convert script with requirements.txt in same directory
convert_to_uv.py my_script.py
# Convert script with specific requirements file
convert_to_uv.py my_script.py --requirements ../requirements.txt
# Detect dependencies from imports (basic detection)
convert_to_uv.py my_script.py --detect-imports
# Preview conversion without creating file
convert_to_uv.py my_script.py --dry-run
# Specify output filename
convert_to_uv.py my_script.py --output my_script_new.py
"""
import ast
import re
import sys
import tomllib
from pathlib import Path
# Common import name -> PyPI package name mappings
IMPORT_TO_PACKAGE = {
'cv2': 'opencv-python',
'PIL': 'Pillow',
'yaml': 'PyYAML',
'bs4': 'beautifulsoup4',
'sklearn': 'scikit-learn',
'dotenv': 'python-dotenv',
'claude_agent_sdk': 'claude-agent-sdk',
}
def get_pypi_latest_version(package_name: str) -> str | None:
"""
Query PyPI API for latest version of package.
Returns version string like "1.2.3" or None if not found.
"""
try:
import httpx
url = f"https://pypi.org/pypi/{package_name}/json"
with httpx.Client(timeout=5.0) as client:
response = client.get(url)
if response.status_code == 200:
data = response.json()
return data['info']['version']
except Exception:
# Network error, package not found, etc. - fail silently
pass
return None
def find_version_in_project(package_name: str, script_path: Path) -> str | None:
"""
Look for version constraint in project's pyproject.toml.
Searches up directory tree from script location.
"""
current = script_path.parent
# Search up to 3 levels for pyproject.toml
for _ in range(3):
pyproject_path = current / "pyproject.toml"
if pyproject_path.exists():
try:
content = pyproject_path.read_text(encoding='utf-8')
data = tomllib.loads(content)
# Check [project.dependencies]
deps = data.get('project', {}).get('dependencies', [])
for dep in deps:
if isinstance(dep, str) and dep.startswith(package_name):
# Extract version constraint
# e.g., "package>=1.0.0" -> ">=1.0.0"
version_part = dep[len(package_name):].strip()
if version_part:
return version_part
# Check [tool.uv.sources] or other sections if needed
# (could expand this to check dev-dependencies, etc.)
except Exception:
pass
# Move up one directory
parent = current.parent
if parent == current: # Reached filesystem root
break
current = parent
return None
def normalize_package_name(import_name: str, use_pypi: bool = True) -> str:
"""
Normalize import name to PyPI package name.
Strategy:
1. Check known mappings first (fast)
2. Try hyphen normalization (common pattern: my_package → my-package)
3. Validate with PyPI if enabled
4. Fall back to original
Returns: Normalized package name
"""
# Known mapping takes precedence
if import_name in IMPORT_TO_PACKAGE:
return IMPORT_TO_PACKAGE[import_name]
# Try hyphen normalization (most common pattern)
if '_' in import_name:
hyphenated = import_name.replace('_', '-')
# Validate with PyPI if enabled
if use_pypi and get_pypi_latest_version(hyphenated):
return hyphenated
# Without PyPI, trust the normalization (common convention)
if not use_pypi:
return hyphenated
# Fall back to original
return import_name
def resolve_package_version(
import_name: str,
script_path: Path,
use_pypi: bool = True
) -> str:
"""
Resolve package name and version constraint.
Returns: "package>=X.Y.Z" format
Strategy:
1. Normalize import name to package name (handles underscore → hyphen)
2. Check project's pyproject.toml for version
3. Query PyPI for latest version
4. Fall back to unversioned package name
"""
# Normalize import name to package name
package_name = normalize_package_name(import_name, use_pypi)
# Try to find version in project
version = find_version_in_project(package_name, script_path)
if version:
return f"{package_name}{version}"
# Try PyPI if enabled
if use_pypi:
latest = get_pypi_latest_version(package_name)
if latest:
# Use minimum version constraint with latest
return f"{package_name}>={latest}"
# Fall back to unversioned (let uv resolve)
return package_name
def has_pep723_metadata(content: str) -> bool:
"""Check if script already has PEP 723 metadata"""
pattern = r'# /// script\r?\n((?:#.*(?:\r?\n|(?=\r?\n?#\s*///)))+)(?:\r?\n)?#\s*///'
return bool(re.search(pattern, content, re.MULTILINE))
def read_requirements_file(req_path: Path) -> list[str]:
"""Read dependencies from requirements.txt"""
if not req_path.exists():
return []
try:
content = req_path.read_text(encoding='utf-8')
deps = []
for line in content.splitlines():
line = line.strip()
# Skip empty lines and comments
if not line or line.startswith('#'):
continue
# Skip -e and --editable
if line.startswith('-e') or line.startswith('--editable'):
continue
# Skip -r and --requirement
if line.startswith('-r') or line.startswith('--requirement'):
continue
deps.append(line)
return deps
except (UnicodeDecodeError, OSError) as e:
print(f"Warning: Could not read {req_path}: {e}", file=sys.stderr)
return []
def detect_imports(content: str) -> list[str]:
"""
Detect third-party imports from script (basic detection).
Returns import names, not package names (caller should map these).
"""
imports = set()
try:
tree = ast.parse(content)
for node in ast.walk(tree):
if isinstance(node, ast.Import):
for alias in node.names:
# Get base module name
base = alias.name.split('.')[0]
imports.add(base)
elif isinstance(node, ast.ImportFrom):
if node.module:
base = node.module.split('.')[0]
imports.add(base)
except SyntaxError as e:
print(f"Warning: Could not parse script for imports: {e}", file=sys.stderr)
return []
# Filter out standard library modules (basic filter)
stdlib_modules = {
'abc', 'argparse', 'ast', 'asyncio', 'base64', 'collections', 'contextlib',
'copy', 'csv', 'dataclasses', 'datetime', 'decimal', 'email', 'enum', 'functools',
'glob', 'hashlib', 'http', 'inspect', 'io', 'itertools', 'json', 'logging',
'math', 'multiprocessing', 'operator', 'os', 'pathlib', 'pickle', 'platform',
'pprint', 'queue', 're', 'secrets', 'shutil', 'socket', 'sqlite3', 'ssl',
'string', 'subprocess', 'sys', 'tempfile', 'threading', 'time', 'tomllib',
'traceback', 'typing', 'unittest', 'urllib', 'uuid', 'warnings', 'weakref',
'xml', 'zipfile', 'zoneinfo'
}
third_party = [imp for imp in imports if imp not in stdlib_modules]
return sorted(third_party)
def generate_header(
dependencies: list[str],
python_version: str = ">=3.11",
quiet: bool = False
) -> str:
"""Generate PEP 723 header with shebang"""
shebang = "#!/usr/bin/env -S uv run --script"
if quiet:
shebang += " --quiet"
# Format dependencies for TOML array
if dependencies:
deps_str = ",\n# ".join(f'"{dep}"' for dep in dependencies)
deps_section = f"# dependencies = [\n# {deps_str},\n# ]"
else:
deps_section = "# dependencies = []"
header = f"""{shebang}
# /// script
# requires-python = "{python_version}"
{deps_section}
# ///
"""
return header
def convert_script(
script_path: Path,
output_path: Path | None = None,
requirements_path: Path | None = None,
detect_imports_flag: bool = False,
dry_run: bool = False,
python_version: str = ">=3.11",
quiet_mode: bool = False,
use_pypi: bool = True
) -> bool:
"""
Convert script to uv format.
Returns True if successful, False otherwise.
"""
# Read original script
try:
content = script_path.read_text(encoding='utf-8')
except (FileNotFoundError, PermissionError, OSError, UnicodeDecodeError) as e:
print(f"Error: Cannot read {script_path}: {e}", file=sys.stderr)
return False
# Check if already has metadata
if has_pep723_metadata(content):
print(f"Error: {script_path} already has PEP 723 metadata", file=sys.stderr)
print(" Use validate_script.py to check the existing metadata", file=sys.stderr)
return False
# Determine dependencies
dependencies = []
if requirements_path:
# Use specified requirements file
dependencies = read_requirements_file(requirements_path)
if dependencies:
print(f"Found {len(dependencies)} dependencies in {requirements_path}")
else:
# Look for requirements.txt in same directory
default_req = script_path.parent / "requirements.txt"
if default_req.exists():
dependencies = read_requirements_file(default_req)
if dependencies:
print(f"Found {len(dependencies)} dependencies in {default_req}")
# Optionally detect imports
if detect_imports_flag:
detected = detect_imports(content)
if detected:
print(f"Detected imports: {', '.join(detected)}")
print("Resolving versions...")
# If no dependencies from requirements, use detected
if not dependencies:
# Use smart version resolution
resolved = []
for imp in detected:
# Normalize package name first
normalized_pkg = normalize_package_name(imp, use_pypi)
# Then resolve version
dep = resolve_package_version(imp, script_path, use_pypi=use_pypi)
resolved.append(dep)
# Show what was resolved
if imp in IMPORT_TO_PACKAGE:
print(f" - Mapped '{imp}''{IMPORT_TO_PACKAGE[imp]}' (known mapping)")
elif imp != normalized_pkg:
print(f" - Normalized '{imp}''{normalized_pkg}' (auto-detected)")
if '>=' in dep:
version = dep.split('>=')[1]
source = "from project" if find_version_in_project(dep.split('>=')[0], script_path) else "from PyPI"
print(f" - Resolved version: {version} {source}")
else:
print(f" - Using package: {dep} (uv will resolve version)")
dependencies = resolved
# Generate header
header = generate_header(dependencies, python_version, quiet_mode)
# Remove old shebang if present
lines = content.split('\n')
if lines and lines[0].startswith('#!'):
# Skip old shebang
content_without_shebang = '\n'.join(lines[1:])
else:
content_without_shebang = content
# Combine header and content
new_content = header + content_without_shebang
# Determine output path
if output_path is None:
# Default: add _uv before extension
stem = script_path.stem
suffix = script_path.suffix
output_path = script_path.parent / f"{stem}_uv{suffix}"
# Dry run - just print
if dry_run:
print("\n" + "=" * 60)
print("DRY RUN - Preview of converted script:")
print("=" * 60)
print(new_content[:500]) # Show first 500 chars
if len(new_content) > 500:
print(f"\n... ({len(new_content) - 500} more characters)")
print("=" * 60)
print(f"Would create: {output_path}")
return True
# Write output
try:
output_path.write_text(new_content, encoding='utf-8')
print(f"✓ Created: {output_path}")
# Make executable
import stat
current_permissions = output_path.stat().st_mode
output_path.chmod(current_permissions | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
print("✓ Made executable")
# Print next steps
print("\nNext steps:")
print(f" 1. Review dependencies in {output_path}")
print(" 2. Add version constraints if needed")
print(f" 3. Test: {output_path}")
print(f" 4. Validate: validate_script.py {output_path}")
return True
except (PermissionError, OSError) as e:
print(f"Error: Cannot write {output_path}: {e}", file=sys.stderr)
return False
def main():
"""Main entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Convert Python scripts to uv single-file format",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__
)
parser.add_argument('script', help='Python script to convert')
parser.add_argument(
'--requirements', '-r',
help='Path to requirements.txt (default: look in same directory)'
)
parser.add_argument(
'--detect-imports', '-d',
action='store_true',
help='Detect dependencies from imports (basic detection)'
)
parser.add_argument(
'--output', '-o',
help='Output filename (default: <script>_uv.py)'
)
parser.add_argument(
'--python-version', '-p',
default='>=3.11',
help='Python version constraint (default: >=3.11)'
)
parser.add_argument(
'--quiet', '-q',
action='store_true',
help='Add --quiet flag to shebang'
)
parser.add_argument(
'--dry-run', '-n',
action='store_true',
help='Preview conversion without creating file'
)
parser.add_argument(
'--no-pypi',
action='store_true',
help='Skip PyPI queries for version resolution (faster, offline)'
)
args = parser.parse_args()
script_path = Path(args.script)
if not script_path.exists():
print(f"Error: File not found: {script_path}", file=sys.stderr)
sys.exit(1)
# Parse requirements path if provided
req_path = Path(args.requirements) if args.requirements else None
# Parse output path if provided
out_path = Path(args.output) if args.output else None
# Convert
success = convert_script(
script_path=script_path,
output_path=out_path,
requirements_path=req_path,
detect_imports_flag=args.detect_imports,
dry_run=args.dry_run,
python_version=args.python_version,
quiet_mode=args.quiet,
use_pypi=not args.no_pypi
)
sys.exit(0 if success else 1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,356 @@
#!/usr/bin/env -S uv run --script --quiet
# /// script
# requires-python = ">=3.11"
# dependencies = []
# ///
"""
Validate PEP 723 inline script metadata
Checks Python scripts for:
- Valid PEP 723 metadata block
- Required fields (requires-python, dependencies)
- TOML syntax validity
- Shebang presence and format
- Security issues
Usage:
validate_script.py <script.py>
validate_script.py --strict <script.py>
validate_script.py --force <script>
Examples:
# Basic validation
validate_script.py my_script.py
# Strict mode (all best practices)
validate_script.py --strict my_script.py
# Validate executable Python script without .py extension
validate_script.py my_script
# Force validation, skip extension check
validate_script.py --force my_script
# Validate all scripts in directory
find . -name '*.py' -exec python validate_script.py {} \\;
"""
import ast
import os
import re
import sys
import tomllib
from dataclasses import dataclass
from pathlib import Path
@dataclass
class ValidationResult:
"""Validation result"""
valid: bool
has_metadata: bool
has_shebang: bool
has_docstring: bool
warnings: list[str]
errors: list[str]
def extract_metadata_block(content: str) -> str | None:
"""Extract PEP 723 metadata block"""
# Match metadata block with CRLF tolerance and flexible whitespace
# Uses lookahead to allow last metadata line without trailing newline
pattern = r'# /// script\r?\n((?:#.*(?:\r?\n|(?=\r?\n?#\s*///)))+)(?:\r?\n)?#\s*///'
match = re.search(pattern, content, re.MULTILINE)
if not match:
return None
# Extract TOML content (remove leading # and optional whitespace from each line)
lines = match.group(1).splitlines()
toml_lines = []
for line in lines:
if line.startswith('#'):
# Strip '#' followed by optional space or tab
stripped = re.sub(r'^#[ \t]?', '', line)
toml_lines.append(stripped)
else:
# Preserve non-comment lines (shouldn't occur with our regex but be safe)
toml_lines.append(line)
return '\n'.join(toml_lines)
def validate_toml_syntax(toml_content: str) -> list[str]:
"""Validate TOML syntax using structured parsing"""
errors = []
# Parse TOML content
try:
data = tomllib.loads(toml_content)
except tomllib.TOMLDecodeError as e:
errors.append(f"Invalid TOML syntax: {e}")
return errors
# Validate required fields
if 'requires-python' not in data:
errors.append("Missing 'requires-python' field")
elif not isinstance(data['requires-python'], str):
errors.append("'requires-python' must be a string")
if 'dependencies' not in data:
errors.append("Missing 'dependencies' field")
else:
dependencies = data['dependencies']
# Dependencies should be a list/array
if not isinstance(dependencies, list):
errors.append("'dependencies' must be an array/list")
else:
# Validate each dependency item
for idx, dep in enumerate(dependencies):
if not isinstance(dep, str):
errors.append(
f"Dependency at index {idx} must be a string, got {type(dep).__name__}")
return errors
def check_shebang(content: str) -> tuple[bool, list[str]]:
"""Check shebang line"""
warnings = []
lines = content.split('\n')
if not lines:
return False, ["Empty file"]
first_line = lines[0]
if not first_line.startswith('#!'):
return False, []
# Check for recommended shebangs
recommended = [
'#!/usr/bin/env -S uv run --script',
'#!/usr/bin/env -S uv run --script --quiet',
]
if first_line not in recommended:
warnings.append(f"Shebang not recommended. Use: {recommended[0]}")
return True, warnings
def check_security_issues(content: str) -> list[str]:
"""Check for common security issues"""
warnings = []
# Check for hardcoded secrets
secret_patterns = [
(r'password\s*=\s*["\']', "Possible hardcoded password"),
(r'api[_-]?key\s*=\s*["\']', "Possible hardcoded API key"),
(r'secret\s*=\s*["\']', "Possible hardcoded secret"),
(r'token\s*=\s*["\']', "Possible hardcoded token"),
]
for pattern, message in secret_patterns:
if re.search(pattern, content, re.IGNORECASE):
warnings.append(f"Security: {message}")
# Check for shell=True
if re.search(r'shell\s*=\s*True', content):
warnings.append(
"Security: subprocess.run with shell=True (command injection risk)")
# Check for eval/exec
if re.search(r'\b(eval|exec)\s*\(', content):
warnings.append(
"Security: Use of eval() or exec() (code injection risk)")
return warnings
def is_valid_python_file(script_path: Path) -> tuple[bool, str]:
"""
Check if a non-.py file is a valid Python script.
Returns:
Tuple of (is_valid, reason) where reason describes why it's valid or invalid
"""
try:
content = script_path.read_text(encoding='utf-8')
except (FileNotFoundError, PermissionError, OSError, UnicodeDecodeError) as e:
return False, f"Cannot read file: {e}"
# Check if file is executable with Python shebang
is_executable = os.access(script_path, os.X_OK)
lines = content.split('\n')
has_python_shebang = False
if lines and lines[0].startswith('#!'):
shebang = lines[0].lower()
has_python_shebang = 'python' in shebang
if is_executable and has_python_shebang:
return True, "executable with Python shebang"
# Try to parse as Python to confirm it's valid Python code
try:
ast.parse(content)
return True, "valid Python syntax"
except SyntaxError as e:
return False, f"not valid Python: {e}"
def validate_script(script_path: Path, strict: bool = False) -> ValidationResult:
"""Validate Python script"""
result = ValidationResult(
valid=True,
has_metadata=False,
has_shebang=False,
has_docstring=False,
warnings=[],
errors=[]
)
# Read file
try:
content = script_path.read_text(encoding='utf-8')
except (FileNotFoundError, PermissionError, OSError, UnicodeDecodeError) as e:
result.valid = False
result.errors.append(f"Failed to read file: {e}")
return result
# Check shebang
has_shebang, shebang_warnings = check_shebang(content)
result.has_shebang = has_shebang
result.warnings.extend(shebang_warnings)
if strict and not has_shebang:
result.errors.append("Missing shebang (required in strict mode)")
result.valid = False
# Check for metadata block
metadata = extract_metadata_block(content)
result.has_metadata = metadata is not None
if not metadata:
result.errors.append("No PEP 723 metadata block found")
result.valid = False
return result
# Validate TOML syntax
toml_errors = validate_toml_syntax(metadata)
result.errors.extend(toml_errors)
if toml_errors:
result.valid = False
# Check for module docstring using AST parsing
try:
module_node = ast.parse(content)
module_docstring = ast.get_docstring(module_node)
result.has_docstring = module_docstring is not None
except SyntaxError as e:
result.has_docstring = False
result.warnings.append(
f"Could not parse file for docstring check: {e}")
if strict and not result.has_docstring:
result.warnings.append(
"Missing module docstring (recommended in strict mode)")
# Security checks (always warnings, never errors - these are heuristic checks)
security_warnings = check_security_issues(content)
result.warnings.extend(security_warnings)
return result
def main():
"""Main entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Validate PEP 723 script metadata",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__
)
parser.add_argument('script', help='Python script to validate')
parser.add_argument('--strict', action='store_true',
help='Enable strict validation')
parser.add_argument('--force', action='store_true',
help='Skip Python file extension check')
args = parser.parse_args()
script_path = Path(args.script)
if not script_path.exists():
print(f"Error: File not found: {script_path}", file=sys.stderr)
sys.exit(1)
# Check if file is a Python file
if script_path.suffix != '.py':
if args.force:
print(
f"Warning: File lacks .py extension, but --force was specified", file=sys.stderr)
else:
# Check if it's a valid Python file by other means
is_valid, reason = is_valid_python_file(script_path)
if not is_valid:
print(
f"Error: Not a Python file: {script_path}", file=sys.stderr)
print(f" Reason: {reason}", file=sys.stderr)
print(f" Hint: File must either:", file=sys.stderr)
print(f" - Have a .py extension, OR", file=sys.stderr)
print(f" - Be executable with a Python shebang, OR",
file=sys.stderr)
print(f" - Contain valid Python syntax", file=sys.stderr)
print(f" Use --force to skip this check", file=sys.stderr)
sys.exit(1)
else:
print(
f"Info: File accepted as Python ({reason})", file=sys.stderr)
# Validate
result = validate_script(script_path, strict=args.strict)
# Print results
print(f"Validating: {script_path}")
print("=" * 60)
if result.has_shebang:
print("✓ Has shebang")
else:
print("✗ Missing shebang")
if result.has_metadata:
print("✓ Has PEP 723 metadata")
else:
print("✗ Missing PEP 723 metadata")
if result.has_docstring:
print("✓ Has docstring")
else:
print("○ No docstring")
if result.warnings:
print("\nWarnings:")
for warning in result.warnings:
print(f"{warning}")
if result.errors:
print("\nErrors:")
for error in result.errors:
print(f"{error}")
print("\n" + "=" * 60)
if result.valid:
print("Status: ✓ VALID")
sys.exit(0)
else:
print("Status: ✗ INVALID")
sys.exit(1)
if __name__ == "__main__":
main()