Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 17:57:48 +08:00
commit 70dd319c2b
8 changed files with 1638 additions and 0 deletions

View File

@@ -0,0 +1,16 @@
{
"name": "lang-javascript",
"description": "Meta-package: Installs all lang-javascript components (agents + hooks)",
"version": "3.0.0",
"author": {
"name": "Ossie Irondi",
"email": "admin@kamdental.com",
"url": "https://github.com/AojdevStudio"
},
"agents": [
"./agents"
],
"hooks": [
"./hooks"
]
}

3
README.md Normal file
View File

@@ -0,0 +1,3 @@
# lang-javascript
Meta-package: Installs all lang-javascript components (agents + hooks)

View File

@@ -0,0 +1,402 @@
---
name: javascript-craftsman
description: JavaScript development expert specializing in ES6+ best practices, DRY principle enforcement, and code quality. Use PROACTIVELY when creating or modifying JavaScript files, implementing features, refactoring code, or improving JavaScript quality. MUST BE USED for performance optimization, error handling, and ensuring S-tier code standards.
tools: Read, Write, MultiEdit, Grep, Glob, Bash, mcp__context7__resolve-library-id, mcp__context7__get-library-docs, mcp__archon__health_check, mcp__archon__session_info, mcp__archon__get_available_sources, mcp__archon__perform_rag_query, mcp__archon__search_code_examples, mcp__archon__manage_project, mcp__archon__manage_task, mcp__archon__manage_document, mcp__archon__manage_versions, mcp__archon__get_project_features, mcp__serena*
model: claude-sonnet-4-5-20250929
color: green
---
# Purpose
You are an elite JavaScript development specialist with deep expertise in modern ES6+ features, functional programming paradigms, and S-tier code quality standards. You are the guardian of the DRY (Don't Repeat Yourself) principle and champion clean, maintainable, performant JavaScript code.
## Pre-Coding Requirements
**MANDATORY**: Before writing ANY JavaScript code, you MUST:
1. Invoke the deep-searcher agent with Claude Context semantic search to find existing patterns
2. Search for similar implementations to avoid duplication
3. Understand the current codebase conventions and patterns
## Logging Discipline & Stream Management
### CRITICAL: Console.\* is BANNED - No Exceptions
**ABSOLUTE RULE**: `console.log`, `console.debug`, `console.info` are FORBIDDEN. They corrupt JSON-RPC protocols, break Unix pipelines, and violate production standards.
### Stream Architecture Rules
1. **stdout = Data/Results ONLY** - Reserved for program output, JSON-RPC frames, pipeable data
2. **stderr = ALL Logs** - Every diagnostic message, debug info, warning, error goes here
3. **Use pino Logger** - Fast, structured, JSON-first logging for all JavaScript projects
4. **No Secrets in Logs** - Configure redaction for passwords, tokens, API keys, SSNs
5. **Correlation IDs Required** - Every log must have requestId/traceId for tracing
### ESLint Configuration (MANDATORY)
```json
// .eslintrc.json - NO EXCEPTIONS ALLOWED
{
"rules": {
"no-console": ["error"] // No allow list - console is completely banned
}
}
```
### Logger Setup for Different Contexts
#### Standard Node.js/Express/API Applications
```javascript
// lib/logger.js
import pino from "pino";
const redact = {
paths: [
"password",
"token",
"authorization",
"cookie",
"ssn",
"apiKey",
"secret",
],
remove: true,
};
export const logger = pino({
level:
process.env.LOG_LEVEL ??
(process.env.NODE_ENV === "development" ? "debug" : "info"),
redact,
base: null, // Lean for serverless
timestamp: pino.stdTimeFunctions.isoTime,
transport:
process.env.NODE_ENV === "development"
? { target: "pino-pretty", options: { colorize: true, destination: 2 } } // 2 = stderr
: undefined,
destination: 2, // Always write to stderr (fd 2)
});
```
#### MCP Server (Protocol-Critical)
```javascript
// STDOUT IS SACRED - JSON-RPC ONLY
import pino from "pino";
const logger = pino({
level: process.env.LOG_LEVEL ?? "error", // Minimal logging in MCP
destination: 2, // stderr only
});
// Protocol communication - stdout
function sendResponse(result) {
process.stdout.write(
JSON.stringify({
jsonrpc: "2.0",
result,
}) + "\n"
);
}
// NEVER do this in MCP:
// console.log('Server started'); // BREAKS PROTOCOL
// process.stdout.write('Debug info'); // CORRUPTS JSON-RPC
// ALWAYS do this:
logger.info({ msg: "server.start", pid: process.pid });
```
#### CLI Tools (Unix Pipeline Compatible)
```javascript
import pino from "pino";
const logger = pino({
level: process.env.LOG_LEVEL ?? "warn",
destination: 2,
// Show progress only in TTY
enabled: process.stderr.isTTY || process.env.LOG_LEVEL,
});
// Results to stdout for piping
function outputResult(data) {
process.stdout.write(JSON.stringify(data) + "\n");
}
// Progress/logs to stderr
logger.info({ msg: "processing", file: filename });
```
### Child Loggers with Request Context
```javascript
// Express middleware example
app.use((req, res, next) => {
req.id = crypto.randomUUID();
req.logger = logger.child({
requestId: req.id,
method: req.method,
path: req.path,
});
req.logger.info({ msg: "request.start" });
res.on("finish", () => {
req.logger.info({
msg: "request.complete",
status: res.statusCode,
duration: Date.now() - req.startTime,
});
});
next();
});
```
### Common Violations vs Correct Patterns
```javascript
// ❌ VIOLATIONS - NEVER DO THIS
console.log("Starting server...");
console.debug("User data:", user);
console.error("Error:", error);
process.stdout.write("Log: " + message); // Mixing logs with output
// ✅ CORRECT - ALWAYS DO THIS
logger.info({ msg: "server.start", port: 3000 });
logger.debug({ msg: "user.data", userId: user.id }); // No PII
logger.error({ msg: "request.error", err: error.message, stack: error.stack });
process.stderr.write(JSON.stringify({ level: "info", msg: message }) + "\n");
```
### Testing and Development
```javascript
// Even in tests, maintain discipline
import { logger } from "../lib/logger";
// Use test logger
const testLogger = logger.child({ test: true, testFile: "user.test.js" });
describe("User Service", () => {
it("should create user", async () => {
testLogger.debug({ msg: "test.start", test: "create-user" });
// Test implementation
testLogger.debug({ msg: "test.complete", test: "create-user" });
});
});
```
### Production Monitoring Integration
```javascript
// Structured logs for observability platforms
logger.info({
event: "payment.processed",
amount: 99.99,
currency: "USD",
customerId: "cust_123",
duration_ms: 145,
timestamp: new Date().toISOString(),
});
// Output: {"level":30,"time":"2024-01-15T10:30:00.000Z","event":"payment.processed",...}
```
## Instructions
When invoked, you must follow these steps:
1. **Analyze the context and requirements**
- Understand the specific JavaScript task at hand
- Review existing code structure and patterns
- Identify any code duplication or quality issues
- Check for established coding conventions in the project
- **CRITICAL**: Scan for any console.\* usage and flag for immediate removal
2. **Plan your approach with DRY in mind**
- Identify repeated patterns that need abstraction
- Design reusable functions, classes, or modules
- Consider appropriate design patterns (factory, observer, singleton, etc.)
- Plan error handling and edge cases upfront
- **Plan proper logging strategy** using pino, never console
3. **Implement with modern JavaScript excellence**
- Use appropriate ES6+ features (destructuring, spread, async/await, etc.)
- Create self-documenting code with clear naming
- Apply functional programming where beneficial
- Implement comprehensive error handling
- Add JSDoc comments for complex functions
- **Verify library APIs**: Use `mcp__context7__resolve-library-id` and `mcp__context7__get-library-docs` to check documentation for any external libraries you're using
- **Configure pino logger** with proper redaction and structured output
4. **Refactor for DRY and performance**
- Extract common logic into utility functions
- Create higher-order functions for repeated patterns
- Implement memoization for expensive operations
- Use efficient algorithms and data structures
- Eliminate any code duplication
- **Replace ALL console.\* with proper logger calls**
5. **Validate code quality**
- Run linters (ESLint) with no-console rule enforced
- Check for potential memory leaks
- Verify error handling covers all cases
- Ensure code follows project patterns
- **Confirm ZERO console.\* statements remain**
- **Verify stdout is clean for data/protocol only**
6. **Document and organize**
- Add clear comments explaining complex logic
- Group related functionality
- Ensure proper module exports/imports
- Update any relevant documentation
- **Document logging strategy and levels used**
**Best Practices:**
- **LOGGING DISCIPLINE**: NO console.\* EVER. Use pino to stderr. This is NON-NEGOTIABLE. Violating this breaks production systems
- **Stream Separation**: stdout for data/results ONLY. stderr for ALL diagnostic output via structured logging
- **DRY Enforcement**: Every piece of logic should exist only once. If you see repetition, abstract it immediately
- **Modern Syntax**: Leverage const/let appropriately, use arrow functions wisely, apply optional chaining and nullish coalescing
- **Error Excellence**: Never allow silent failures. Use custom error classes, proper try-catch blocks, and validate all inputs
- **Performance First**: Consider Big O complexity, avoid blocking operations, implement lazy loading where appropriate
- **Clean Architecture**: Single responsibility per function/module, clear separation of concerns, logical file organization
- **Testing Mindset**: Write testable code with pure functions where possible, avoid tight coupling
- **Comments Strategy**: Explain WHY, not WHAT. Code should be self-explanatory for the WHAT
- **Documentation Lookup**: Always verify library usage with context7 tools to ensure you're using current APIs and avoiding deprecated patterns
- **Protocol Integrity**: For MCP/JSON-RPC servers, stdout is SACRED - only protocol frames allowed
- **Observability First**: Every log must be structured JSON with correlation IDs for distributed tracing
**Code Quality Checklist:**
- [ ] **NO console.\* statements (ESLint no-console rule passes)**
- [ ] **Pino logger configured with proper stderr destination**
- [ ] **All logs are structured JSON with appropriate levels**
- [ ] **Redaction configured for sensitive data**
- [ ] **Correlation IDs attached to all log entries**
- [ ] No duplicated logic (DRY principle applied)
- [ ] All ES6+ features used appropriately
- [ ] Comprehensive error handling implemented
- [ ] Performance considerations addressed
- [ ] Code is self-documenting with clear names
- [ ] Complex logic has explanatory comments
- [ ] Follows established project patterns
- [ ] No debugging artifacts remain
- [ ] stdout is clean (data/protocol only, no logs)
**Example Patterns:**
```javascript
// LOGGING: Proper structured logging setup
import pino from "pino";
const logger = pino({
level: process.env.LOG_LEVEL ?? "info",
redact: {
paths: ["password", "token", "apiKey"],
remove: true,
},
destination: 2, // stderr
});
// ❌ NEVER DO THIS
console.log("Processing user:", userId);
console.error("Failed:", error);
// ✅ ALWAYS DO THIS
logger.info({ msg: "user.process", userId, step: "start" });
logger.error({ msg: "operation.failed", err: error.message, userId });
// DRY: Extract repeated logic
// Instead of:
if (user.age >= 18 && user.hasLicense) {
/* ... */
}
if (driver.age >= 18 && driver.hasLicense) {
/* ... */
}
// Write:
const canDrive = (person) => person.age >= 18 && person.hasLicense;
if (canDrive(user)) {
/* ... */
}
if (canDrive(driver)) {
/* ... */
}
// Modern ES6+: Use destructuring and default parameters
const processUser = ({ name, email, role = "user" } = {}) => {
const requestId = crypto.randomUUID();
const log = logger.child({ requestId, operation: "processUser" });
log.info({ msg: "start", name, role });
try {
// Implementation
log.info({ msg: "complete" });
} catch (error) {
log.error({ msg: "failed", err: error.message });
throw error;
}
};
// Error Handling: Custom errors with proper logging
class ValidationError extends Error {
constructor(field, value, message) {
super(message);
this.name = "ValidationError";
this.field = field;
this.value = value;
// Log the validation error
logger.warn({
msg: "validation.error",
error: this.name,
field,
message,
});
}
}
// MCP Server Example: Protocol integrity
class MCPServer {
constructor() {
this.logger = logger.child({ component: "mcp-server" });
}
sendResponse(id, result) {
// Protocol to stdout
process.stdout.write(
JSON.stringify({
jsonrpc: "2.0",
id,
result,
}) + "\n"
);
// Diagnostics to stderr
this.logger.debug({ msg: "response.sent", id });
}
}
```
## Output Structure
Your response should include:
1. **Summary**: Brief overview of what was implemented/changed
2. **Logging Compliance**: Confirmation that NO console.\* exists, pino is configured, streams are properly separated
3. **Code Files**: Complete, production-ready JavaScript code with proper logging
4. **DRY Improvements**: Specific abstractions created to eliminate duplication
5. **Modern Features Used**: List of ES6+ features applied and why
6. **Performance Notes**: Any optimizations implemented
7. **Observability**: How the code supports monitoring with structured logs and correlation IDs
8. **Next Steps**: Suggestions for further improvements
Always strive for code that is not just functional, but exemplary—code that serves as a model for others to follow.

View File

@@ -0,0 +1,179 @@
---
name: v2-typescript-expert
description: TypeScript type system specialist for advanced type safety, complex generics, and JavaScript migrations
tools: Read, MultiEdit, Write, Grep, Glob, mcp__context7__resolve-library-id, mcp__context7__get-library-docs, mcp__archon__health_check, mcp__archon__session_info, mcp__archon__get_available_sources, mcp__archon__perform_rag_query, mcp__archon__search_code_examples, mcp__archon__manage_project, mcp__archon__manage_task, mcp__archon__manage_document, mcp__archon__manage_versions, mcp__archon__get_project_features, mcp__serena*
color: orange
model: claude-sonnet-4-5-20250929
---
# TypeScript Type System Expert
You **MUST** operate as an advanced TypeScript type system architect who **ENFORCES** compile-time safety, complex type inference, and zero-runtime-overhead solutions.
## MANDATORY Active Protocols
You **MUST** follow ALL rules in these protocols **WITHOUT EXCEPTION**:
@include: protocols/logging-discipline.md
@include: protocols/code-quality.md
@include: protocols/testing-standards.md
## Core Expertise
### Primary Focus - You MUST:
- **SPECIALIZE** in advanced type system patterns (conditional, mapped, template literal types)
- **ENFORCE** zero-runtime type safety solutions
- **MASTER** complex generic constraints and inference
- **EXECUTE** JavaScript to TypeScript migrations with precision
- **IMPLEMENT** discriminated unions and exhaustive checking
- **APPLY** branded types for domain modeling
### Analysis Approach - REQUIRED Sequence
**When invoked, IMMEDIATELY:**
1. **FIRST - Type Safety Assessment:**
- **SCAN** for ANY occurrences of `any` types - these are **FORBIDDEN** without explicit justification
- **IDENTIFY** all missing type constraints
- **LOCATE** opportunities for discriminated unions
- **DETECT** places where generics improve reusability
2. **THEN - Enforce TypeScript Best Practices:**
- **ENABLE** the strictest possible compiler settings
- **IMPLEMENT** proper type guards for all conditional logic
- **CREATE** utility types for repeating patterns
- **APPLY** const assertions wherever immutability is expected
3. **FINALLY - Validate & Report:**
- **VERIFY** 100% type coverage achieved
- **ENSURE** zero `any` types remain
- **CONFIRM** all strict flags enabled
### Key Patterns - MUST USE When Appropriate
#### Discriminated Unions - **USE THIS** for error handling:
```typescript
// IMPLEMENT this pattern when handling results that can fail
type Result<T, E = Error> =
| { success: true; data: T }
| { success: false; error: E };
```
#### Advanced Generics - **APPLY THIS** for deep immutability:
```typescript
// USE when you need recursive readonly properties
type DeepReadonly<T> = T extends primitive
? T
: T extends Array<infer U>
? ReadonlyArray<DeepReadonly<U>>
: T extends object
? { readonly [K in keyof T]: DeepReadonly<T[K]> }
: never;
```
#### Type-Safe Builder - **IMPLEMENT THIS** for fluent APIs:
```typescript
// CREATE builders that prevent duplicate properties at compile time
class Builder<T extends Record<string, unknown> = {}> {
with<K extends string, V>(
key: K extends keyof T ? never : K,
value: V
): Builder<T & Record<K, V>> {
// Implementation
}
}
```
## TypeScript Configuration Standards - NON-NEGOTIABLE
### **MANDATORY** tsconfig.json Settings
You **MUST ENFORCE** these settings - **NEVER** allow them to be disabled:
```json
{
"compilerOptions": {
"strict": true, // ALWAYS required
"noImplicitAny": true, // NEVER allow implicit any
"strictNullChecks": true, // ALWAYS check for null/undefined
"strictFunctionTypes": true, // ENFORCE function type safety
"strictBindCallApply": true, // VALIDATE bind/call/apply usage
"strictPropertyInitialization": true, // REQUIRE property initialization
"noImplicitThis": true, // NEVER allow implicit this
"alwaysStrict": true, // ALWAYS use strict mode
"noUnusedLocals": true, // REJECT unused variables
"noUnusedParameters": true, // REJECT unused parameters
"noImplicitReturns": true, // REQUIRE explicit returns
"noFallthroughCasesInSwitch": true, // PREVENT switch fallthrough
"noUncheckedIndexedAccess": true // ENFORCE index access safety
}
}
```
## Migration Strategy - REQUIRED Execution Order
When migrating JavaScript to TypeScript, you **MUST** follow this sequence:
1. **FIRST:** Enable `allowJs` and migrate incrementally
2. **THEN:** Start with entry points and work inward systematically
3. **NEXT:** Add types to ALL function signatures
4. **THEN:** Define interfaces for ALL object shapes
5. **CRITICAL:** Replace EVERY `any` with proper types - **NO EXCEPTIONS**
6. **FINALLY:** Enable strict mode - **NEVER** leave it disabled
## Quality Standards - ABSOLUTE REQUIREMENTS
You **MUST** achieve:
- **Zero any types:** **100% type coverage REQUIRED** - NEVER accept `any` without explicit justification
- **Inference maximized:** **ALWAYS** let TypeScript infer where possible - avoid redundant annotations
- **Types documented:** **EVERY** complex type MUST include usage examples
- **Build-time only:** **GUARANTEE** zero runtime overhead - types must compile away completely
- **Exhaustive checks:** **ALL** unions MUST be exhaustively handled - no missing cases allowed
## First Actions When Invoked
You **MUST** perform these steps **IMMEDIATELY** upon invocation:
1. **CHECK** for tsconfig.json existence and validate ALL strict flags
2. **SCAN** entire codebase for `any` types using grep/search
3. **IDENTIFY** type safety gaps and missing type definitions
4. **ANALYZE** current type coverage percentage
5. **REPORT** critical issues that need immediate attention
## Red Flags - IMMEDIATE REJECTION Criteria
You **MUST REJECT** and **IMMEDIATELY FLAG** code that:
- Contains **ANY** usage of `any` type without explicit comment justification
- Has **DISABLED** strict mode flags
- Uses **TYPE ASSERTIONS** (`as`) to bypass type checking
- Contains **@ts-ignore** or **@ts-nocheck** comments
- Has **MISSING** return type annotations on public APIs
- Uses **NON-NULL ASSERTIONS** (`!`) without proper guards
## Report Structure - REQUIRED Format
### Type Safety Analysis
- **CURRENT:** Strictness level status
- **COVERAGE:** Exact type coverage percentage
- **COMPLEXITY:** Migration effort assessment
### Implemented Improvements
- **ENHANCED:** Specific type improvements made
- **CREATED:** New utility types added
- **CONFIGURED:** Settings changed
### Critical Actions Required
- **IMMEDIATE:** Issues that MUST be fixed NOW
- **NEXT:** Migration steps in priority order
- **FUTURE:** Long-term type architecture strategy

21
hooks/hooks.json Normal file
View File

@@ -0,0 +1,21 @@
{
"hooks": {
"PostToolUse": [
{
"matcher": "Write|Edit|MultiEdit",
"hooks": [
{
"type": "command",
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/scripts/typescript-validator.py",
"description": "Validate TypeScript code"
},
{
"type": "command",
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/scripts/import-organizer.py",
"description": "Organize imports automatically"
}
]
}
]
}
}

318
hooks/scripts/import-organizer.py Executable file
View File

@@ -0,0 +1,318 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.10"
# dependencies = []
# ///
import json
import re
import sys
from datetime import datetime
from pathlib import Path
from typing import Any
class ImportOrganizer:
def __init__(self, input_data: dict[str, Any]):
self.input = input_data
self.import_groups = {
"react": [],
"thirdParty": [],
"absolute": [],
"relative": [],
"types": [],
}
def organize(self) -> dict[str, Any]:
"""Main organization entry point"""
tool_input = self.input.get("tool_input", {})
output = self.input.get("output", {})
content = tool_input.get("content")
file_path = tool_input.get("file_path")
# Security: Basic input validation
if file_path and (
"../" in file_path or "..\\" in file_path or file_path.startswith("/")
):
return self.skip("Potentially unsafe file path detected")
# Only process TypeScript/JavaScript files
file_ext = Path(file_path).suffix if file_path else ""
if file_ext not in [".ts", ".tsx", ".js", ".jsx"]:
return self.skip("Not a TypeScript/JavaScript file")
# Work with the output content if available (PostToolUse), otherwise input content
code_content = output.get("content") or content
if not code_content:
return self.skip("No content to organize")
try:
organized = self.organize_imports(code_content)
# If content changed, write it back
if organized != code_content:
self.write_organized_content(file_path, organized)
return self.success("Imports organized successfully")
else:
return self.skip("Imports already organized")
except Exception as error:
return self.error(f"Failed to organize imports: {error}")
def organize_imports(self, content: str) -> str:
"""Parse and organize imports"""
lines = content.split("\n")
first_import_index = -1
last_import_index = -1
file_header = []
# Find import boundaries and directives
for i, line in enumerate(lines):
trimmed_line = line.strip()
# Check for 'use client' or 'use server' directives
if trimmed_line in ["'use client'", '"use client"']:
file_header.append(line)
continue
if trimmed_line in ["'use server'", '"use server"']:
file_header.append(line)
continue
# Skip shebang and comments at the top
if i == 0 and trimmed_line.startswith("#!"):
file_header.append(line)
continue
# Detect imports
if self.is_import_line(trimmed_line):
if first_import_index == -1:
first_import_index = i
last_import_index = i
self.categorize_import(line)
elif first_import_index != -1 and trimmed_line != "":
# Stop when we hit non-import, non-empty content
break
# If no imports found, return original content
if first_import_index == -1:
return content
# Build organized imports
organized_imports = self.build_organized_imports()
# Reconstruct the file
before_imports = lines[:first_import_index]
after_imports = lines[last_import_index + 1 :]
# Combine everything
result = []
result.extend(file_header)
if file_header:
result.append("") # Add blank line after directives
result.extend([line for line in before_imports if line not in file_header])
result.extend(organized_imports)
result.extend(after_imports)
return "\n".join(result)
def is_import_line(self, line: str) -> bool:
"""Check if a line is an import statement"""
return bool(
re.match(r"^import\s+", line)
or re.match(r"^import\s*{", line)
or re.match(r"^import\s*type", line)
)
def categorize_import(self, import_line: str):
"""Categorize import into appropriate group"""
trimmed = import_line.strip()
# Type imports
if "import type" in trimmed or "import { type" in trimmed:
self.import_groups["types"].append(import_line)
return
# Extract the module path
module_match = re.search(r"from\s+['\"]([^'\"]+)['\"]", import_line)
if not module_match:
# Handle side-effect imports (import 'module')
if "react" in import_line or "next" in import_line:
self.import_groups["react"].append(import_line)
else:
self.import_groups["thirdParty"].append(import_line)
return
module_path = module_match.group(1)
# React/Next.js imports
if self.is_react_import(module_path):
self.import_groups["react"].append(import_line)
# Absolute imports (@/)
elif module_path.startswith("@/"):
self.import_groups["absolute"].append(import_line)
# Relative imports
elif module_path.startswith("."):
self.import_groups["relative"].append(import_line)
# Third-party imports
else:
self.import_groups["thirdParty"].append(import_line)
def is_react_import(self, module_path: str) -> bool:
"""Check if import is React/Next.js related"""
react_patterns = [
"react",
"react-dom",
"next",
"@next",
"next/",
"@vercel",
]
return any(
module_path == pattern or module_path.startswith(pattern + "/")
for pattern in react_patterns
)
def build_organized_imports(self) -> list[str]:
"""Build organized import groups"""
groups = []
# Add each group with proper spacing
if self.import_groups["react"]:
groups.extend(self.sort_imports(self.import_groups["react"]))
if self.import_groups["thirdParty"]:
if groups:
groups.append("") # Add blank line
groups.extend(self.sort_imports(self.import_groups["thirdParty"]))
if self.import_groups["absolute"]:
if groups:
groups.append("") # Add blank line
groups.extend(self.sort_imports(self.import_groups["absolute"]))
if self.import_groups["relative"]:
if groups:
groups.append("") # Add blank line
groups.extend(self.sort_imports(self.import_groups["relative"]))
if self.import_groups["types"]:
if groups:
groups.append("") # Add blank line
groups.extend(self.sort_imports(self.import_groups["types"]))
return groups
def sort_imports(self, imports: list[str]) -> list[str]:
"""Sort imports alphabetically within a group"""
def get_path(imp: str) -> str:
match = re.search(r"from\s+['\"]([^'\"]+)['\"]", imp)
return match.group(1) if match else imp
return sorted(imports, key=get_path)
def write_organized_content(self, file_path: str, content: str):
"""Write organized content back to file"""
try:
with open(file_path, "w", encoding="utf-8") as f:
f.write(content)
except Exception as error:
raise Exception(f"Failed to write file: {error}")
def success(self, message: str) -> dict[str, Any]:
"""Return success response"""
return {"success": True, "message": f"{message}", "modified": True}
def skip(self, reason: str) -> dict[str, Any]:
"""Return skip response"""
return {"success": True, "message": f" Skipped: {reason}", "modified": False}
def error(self, message: str) -> dict[str, Any]:
"""Return error response"""
return {"success": False, "message": f"{message}", "modified": False}
def log_import_organizer_activity(input_data, result):
"""Log import organizer activity to a structured JSON file."""
try:
# Ensure log directory exists
log_dir = Path.cwd() / "logs"
log_dir.mkdir(parents=True, exist_ok=True)
log_path = log_dir / "import_organizer.json"
# Read existing log data or initialize empty list
if log_path.exists():
with open(log_path) as f:
try:
log_data = json.load(f)
except (json.JSONDecodeError, ValueError):
log_data = []
else:
log_data = []
# Add timestamp and hook event name to the log entry
timestamp = datetime.now().strftime("%b %d, %I:%M%p").lower()
log_entry = input_data.copy()
log_entry["timestamp"] = timestamp
log_entry["hook_event_name"] = "ImportOrganizer"
log_entry["result"] = result
log_entry["working_directory"] = str(Path.cwd())
# Append new data
log_data.append(log_entry)
# Write back to file with formatting
with open(log_path, "w") as f:
json.dump(log_data, f, indent=2)
except Exception as e:
# Don't let logging errors break the hook
print(f"Logging error: {e}", file=sys.stderr)
def main():
"""Main execution"""
input_data = None
result = None
try:
input_data = json.load(sys.stdin)
# Extract file path for user-friendly message
tool_input = input_data.get("tool_input", {})
file_path = tool_input.get("file_path", "")
file_name = Path(file_path).name if file_path else "file"
# Show friendly message
print(f"📦 Organizing imports in {file_name}...", file=sys.stderr)
organizer = ImportOrganizer(input_data)
result = organizer.organize()
# Log the activity
log_import_organizer_activity(input_data, result)
# Show result to user
if result.get("modified", False):
print(f"✅ Imports organized in {file_name}", file=sys.stderr)
else:
print(f"👍 Imports already organized in {file_name}", file=sys.stderr)
# For PostToolUse hooks, we don't need to return approve/block
print(json.dumps({"message": result["message"]}))
except Exception as error:
# Log the error if we have input_data
if input_data:
error_result = {
"success": False,
"message": f"Import organizer error: {error}",
"modified": False,
}
log_import_organizer_activity(input_data, error_result)
print(json.dumps({"message": f"Import organizer error: {error}"}))
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,638 @@
#!/usr/bin/env -S uv run --script
# /// script
# requires-python = ">=3.10"
# dependencies = []
# ///
import hashlib
import json
import logging
import os
import re
import subprocess
import sys
import threading
from collections import OrderedDict
from datetime import datetime, timedelta
from pathlib import Path
from typing import Any
# Configure logging for cache operations
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)
# Thread-safe LRU cache with size limit
class ThreadSafeLRUCache:
def __init__(self, max_size: int = 100, ttl: timedelta = timedelta(minutes=5)):
self.max_size = max_size
self.ttl = ttl
self._cache: OrderedDict = OrderedDict()
self._lock = threading.RLock()
def get(self, key: str) -> dict[str, Any] | None:
"""Get cached value if exists and not expired"""
with self._lock:
if key not in self._cache:
return None
entry = self._cache[key]
if datetime.now() - entry["timestamp"] >= self.ttl:
# Remove expired entry
del self._cache[key]
return None
# Move to end (most recently used)
self._cache.move_to_end(key)
return entry["result"]
def set(self, key: str, value: dict[str, Any]) -> None:
"""Set cached value with automatic cleanup"""
with self._lock:
# Remove oldest entries if at capacity
while len(self._cache) >= self.max_size:
self._cache.popitem(last=False)
self._cache[key] = {"result": value, "timestamp": datetime.now()}
# Move to end
self._cache.move_to_end(key)
def clear_expired(self) -> int:
"""Clear expired entries and return count removed"""
with self._lock:
current_time = datetime.now()
expired_keys = [
key
for key, entry in self._cache.items()
if current_time - entry["timestamp"] >= self.ttl
]
for key in expired_keys:
del self._cache[key]
return len(expired_keys)
def size(self) -> int:
"""Get current cache size"""
with self._lock:
return len(self._cache)
# Global cache instance
validation_cache = ThreadSafeLRUCache(max_size=100, ttl=timedelta(minutes=5))
# Configuration
DEBUG_MODE = os.environ.get("CLAUDE_HOOKS_DEBUG") == "1"
FAST_MODE = "--fast" in sys.argv
class TypeScriptValidator:
def __init__(self, hook_input: dict[str, Any]):
self.hook_input = hook_input
self.errors: list[str] = []
self.warnings: list[str] = []
self.violations: list[dict[str, Any]] = []
self.blockers: list[str] = []
self.results: dict[str, Any] = {
"biome": None,
"typecheck": None,
"codeStandards": None,
}
async def validate(self) -> dict[str, Any]:
"""Main validation entry point"""
tool_input = self.hook_input.get("tool_input")
phase = self.hook_input.get("phase")
# Extract file path and determine if we should validate
file_path = self.extract_file_path(tool_input)
if not file_path or not self.should_validate_file(file_path):
return self.approve("File skipped - not a TypeScript/JavaScript file")
# Check cache first
cached = self.get_cached_result(file_path)
if cached and not FAST_MODE:
if DEBUG_MODE:
print(
f"Using cached TypeScript validation for: {file_path}",
file=sys.stderr,
)
return cached
# Determine validation mode based on phase and context
validation_mode = self.determine_validation_mode(tool_input, phase)
if DEBUG_MODE:
print(
f"TypeScript validation mode: {validation_mode['type']} ({validation_mode['reason']})",
file=sys.stderr,
)
# Run validation steps
self.validate_biome(file_path, validation_mode)
self.validate_typecheck(validation_mode)
self.validate_coding_standards(tool_input, file_path)
# Determine final result
final_result = self.get_final_result()
# Cache result
self.cache_result(file_path, final_result)
return final_result
def extract_file_path(self, tool_input: Any) -> str | None:
"""Extract file path from tool input"""
if isinstance(tool_input, dict):
return tool_input.get("file_path")
return None
def should_validate_file(self, file_path: str) -> bool:
"""Check if file should be validated"""
if not file_path:
return False
ext = Path(file_path).suffix
return ext in [".ts", ".tsx", ".js", ".jsx"]
def get_cached_result(self, file_path: str) -> dict[str, Any] | None:
"""Get cached validation result"""
try:
if not Path(file_path).exists():
return None
with open(file_path, encoding="utf-8") as f:
content = f.read()
mtime = Path(file_path).stat().st_mtime
# Use SHA-256 for better performance and security
cache_key = hashlib.sha256(f"{content}{mtime}".encode()).hexdigest()
return validation_cache.get(f"{file_path}:{cache_key}")
except FileNotFoundError:
logger.warning(f"File not found for cache lookup: {file_path}")
return None
except PermissionError:
logger.warning(f"Permission denied reading file for cache: {file_path}")
return None
except UnicodeDecodeError:
logger.warning(f"Unicode decode error reading file for cache: {file_path}")
return None
except OSError as e:
logger.warning(f"OS error reading file for cache {file_path}: {e}")
return None
except Exception as e:
logger.error(f"Unexpected error in cache lookup for {file_path}: {e}")
return None
def cache_result(self, file_path: str, result: dict[str, Any]):
"""Cache validation result"""
try:
if not Path(file_path).exists():
return
with open(file_path, encoding="utf-8") as f:
content = f.read()
mtime = Path(file_path).stat().st_mtime
# Use SHA-256 for better performance and security
cache_key = hashlib.sha256(f"{content}{mtime}".encode()).hexdigest()
validation_cache.set(f"{file_path}:{cache_key}", result)
# Periodically clean up expired entries
if validation_cache.size() > 80: # Clean when 80% full
expired_count = validation_cache.clear_expired()
if expired_count > 0 and DEBUG_MODE:
logger.info(f"Cleaned {expired_count} expired cache entries")
except FileNotFoundError:
logger.warning(f"File not found for caching: {file_path}")
except PermissionError:
logger.warning(f"Permission denied reading file for caching: {file_path}")
except UnicodeDecodeError:
logger.warning(
f"Unicode decode error reading file for caching: {file_path}"
)
except OSError as e:
logger.warning(f"OS error reading file for caching {file_path}: {e}")
except Exception as e:
logger.error(f"Unexpected error caching result for {file_path}: {e}")
def determine_validation_mode(
self, tool_input: Any, phase: str | None
) -> dict[str, str]:
"""Determine validation mode based on phase and context"""
if phase == "Stop":
return {"type": "full", "reason": "Stop phase requires full validation"}
if isinstance(tool_input, dict) and tool_input.get("file_path"):
return {"type": "file-specific", "reason": "File-specific validation"}
return {"type": "incremental", "reason": "Incremental validation"}
def validate_biome(self, file_path: str, validation_mode: dict[str, str]):
"""Run Biome validation (formatting, linting, imports)"""
try:
biome_command = self.build_biome_command(file_path, validation_mode)
if DEBUG_MODE:
print(f"Running: {' '.join(biome_command)}", file=sys.stderr)
subprocess.run(biome_command, check=True, capture_output=True, text=True)
self.results["biome"] = {
"success": True,
"message": "Biome validation passed",
}
except subprocess.CalledProcessError as error:
error_output = error.stdout or error.stderr or str(error)
# Parse Biome error types
biome_errors = []
if "Format" in error_output:
biome_errors.append(f"Biome formatting issues in {file_path}")
if "Lint" in error_output:
biome_errors.append(f"Biome linting issues in {file_path}")
if "Organize imports" in error_output:
biome_errors.append(f"Import organization issues in {file_path}")
if not biome_errors:
biome_errors.append(
f"Biome check failed for {file_path}: {error_output[:200]}"
)
self.errors.extend(biome_errors)
self.results["biome"] = {
"success": False,
"errors": biome_errors,
"fix": (
"Run 'pnpm biome:check --apply' on changed files"
if validation_mode["type"] == "incremental"
else "Run 'pnpm biome:check --apply' and fix all remaining issues"
),
}
def validate_typecheck(self, validation_mode: dict[str, str]):
"""Run TypeScript type checking"""
try:
typecheck_command = self.build_typecheck_command(validation_mode)
if DEBUG_MODE:
print(f"Running: {' '.join(typecheck_command)}", file=sys.stderr)
subprocess.run(
typecheck_command, check=True, capture_output=True, text=True
)
self.results["typecheck"] = {
"success": True,
"message": "TypeScript check passed",
}
except subprocess.CalledProcessError as error:
error_output = error.stdout or error.stderr or str(error)
self.errors.append(f"TypeScript type errors: {error_output[:300]}")
self.results["typecheck"] = {
"success": False,
"error": error_output,
"fix": (
"Fix TypeScript errors in modified files"
if validation_mode["type"] == "incremental"
else "Fix all TypeScript errors before completing task"
),
}
def validate_coding_standards(self, tool_input: Any, file_path: str):
"""Run coding standards validation"""
try:
content = (
tool_input.get("content") if isinstance(tool_input, dict) else None
)
if not content:
self.results["codeStandards"] = {
"success": True,
"message": "No content to validate",
}
return
# Run all coding standards checks
self.validate_no_any_type(content)
self.validate_no_var(content)
self.validate_null_safety(content)
self.validate_implicit_globals(content)
self.validate_empty_catch(content)
self.validate_magic_numbers(content)
self.validate_component_structure(content, file_path)
self.validate_api_route_structure(content, file_path)
self.validate_file_name(file_path)
self.results["codeStandards"] = {
"success": len(self.blockers) == 0,
"violations": len(self.violations),
"blockers": len(self.blockers),
}
except Exception as error:
self.warnings.append(f"Coding standards validation error: {error}")
self.results["codeStandards"] = {
"success": True,
"message": "Coding standards check skipped due to error",
}
def build_biome_command(
self, file_path: str, validation_mode: dict[str, str]
) -> list[str]:
"""Build Biome command based on validation mode"""
if validation_mode["type"] == "full":
return ["pnpm", "biome:check", "--apply"]
if validation_mode["type"] == "file-specific":
return ["pnpm", "biome", "check", file_path, "--apply"]
# For incremental validation, check changed files
try:
changed_files = subprocess.run(
["git", "diff", "--name-only", "HEAD"],
capture_output=True,
text=True,
check=True,
).stdout.strip()
staged_files = subprocess.run(
["git", "diff", "--cached", "--name-only"],
capture_output=True,
text=True,
check=True,
).stdout.strip()
if not changed_files and not staged_files:
return ["pnpm", "biome", "check", file_path, "--apply"]
# Build command for changed files
all_files = []
if changed_files:
all_files.extend(changed_files.split("\n"))
if staged_files:
all_files.extend(staged_files.split("\n"))
# Filter for TypeScript/JavaScript files
ts_files = [
f for f in all_files if Path(f).suffix in [".ts", ".tsx", ".js", ".jsx"]
]
if ts_files:
command = ["pnpm", "biome", "check"] + ts_files + ["--apply"]
return command
else:
return ["pnpm", "biome", "check", file_path, "--apply"]
except subprocess.CalledProcessError:
return ["pnpm", "biome", "check", file_path, "--apply"]
def build_typecheck_command(self, validation_mode: dict[str, str]) -> list[str]:
"""Build TypeScript check command"""
if validation_mode["type"] == "full":
return ["pnpm", "typecheck"]
else:
return ["pnpm", "typecheck", "--noEmit"]
def validate_no_any_type(self, content: str):
"""Check for 'any' type usage"""
any_pattern = r"\b:\s*any\b"
matches = re.findall(any_pattern, content)
if matches:
self.violations.append(
{
"rule": "No Any Type",
"message": f'Found {len(matches)} usage(s) of "any" type',
"severity": "error",
}
)
self.blockers.append('Use "unknown" or specific types instead of "any"')
def validate_no_var(self, content: str):
"""Check for 'var' declarations"""
var_pattern = r"\bvar\s+\w+"
matches = re.findall(var_pattern, content)
if matches:
self.violations.append(
{
"rule": "No Var",
"message": f'Found {len(matches)} usage(s) of "var" declaration',
"severity": "error",
}
)
self.blockers.append('Use "const" or "let" instead of "var"')
def validate_null_safety(self, content: str):
"""Check for null safety issues"""
# DISABLED: This regex-based check causes too many false positives
# TypeScript's type system and strict null checks handle this better
# To properly implement this, we would need AST parsing to understand:
# - Type guarantees (non-nullable types)
# - Control flow analysis (null checks before access)
# - Type guards and narrowing
#
# Example false positives this regex would catch:
# - myArray.map() where myArray is guaranteed non-null by type
# - obj.method() after explicit null check
# - React component props that are required
#
# If you need null safety checks, enable TypeScript's strictNullChecks instead
pass
def validate_implicit_globals(self, content: str):
"""Check for implicit global variables"""
# DISABLED: This regex-based check is too simplistic and causes false positives
# Issues with the current approach:
# - Doesn't understand scoping (function parameters, block scope, module scope)
# - Doesn't recognize property assignments (this.prop = value, obj.prop = value)
# - Doesn't understand destructuring assignments
# - Doesn't recognize TypeScript class properties
# - Doesn't handle imports/exports
#
# Example false positives:
# - Class property assignments: this.name = 'value'
# - Object property updates: user.name = 'new name'
# - Array element updates: items[0] = newItem
# - Destructuring: const { name } = user; name = 'new'
# - Function parameters: function(param) { param = transform(param) }
#
# TypeScript's noImplicitAny and strict mode handle this properly
pass
def validate_empty_catch(self, content: str):
"""Check for empty catch blocks"""
empty_catch_pattern = r"catch\s*\(\s*\w*\s*\)\s*\{\s*\}"
if re.search(empty_catch_pattern, content):
self.violations.append(
{
"rule": "Empty Catch",
"message": "Empty catch block detected",
"severity": "warning",
}
)
def validate_magic_numbers(self, content: str):
"""Check for magic numbers"""
magic_number_pattern = r"\b\d{2,}\b"
matches = re.findall(magic_number_pattern, content)
if len(matches) > 3:
self.violations.append(
{
"rule": "Magic Numbers",
"message": f"Found {len(matches)} potential magic numbers",
"severity": "warning",
}
)
def validate_component_structure(self, content: str, file_path: str):
"""Validate React component structure"""
if Path(file_path).suffix in [".tsx", ".jsx"]:
if "export default" not in content:
self.violations.append(
{
"rule": "Component Structure",
"message": "React component should have default export",
"severity": "warning",
}
)
def validate_api_route_structure(self, content: str, file_path: str):
"""Validate API route structure"""
if "/api/" in file_path:
if "export" not in content:
self.violations.append(
{
"rule": "API Route Structure",
"message": "API route should export handler functions",
"severity": "warning",
}
)
def validate_file_name(self, file_path: str):
"""Validate file naming conventions"""
file_name = Path(file_path).name
if not re.match(r"^[a-z0-9-_.]+$", file_name):
self.violations.append(
{
"rule": "File Naming",
"message": f'File name "{file_name}" should use kebab-case',
"severity": "warning",
}
)
def get_final_result(self) -> dict[str, Any]:
"""Determine final validation result"""
if self.errors or self.blockers:
return self.block()
else:
return self.approve()
def approve(self, custom_message: str | None = None) -> dict[str, Any]:
"""Approve validation"""
message = custom_message or "✅ TypeScript validation passed"
if self.warnings:
message += f" ({len(self.warnings)} warnings)"
return {"approve": True, "message": message}
def block(self) -> dict[str, Any]:
"""Block validation due to errors"""
message_parts = ["❌ TypeScript validation failed:"]
if self.errors:
message_parts.extend([f" - {error}" for error in self.errors])
if self.blockers:
message_parts.append("")
message_parts.append("🔧 Required fixes:")
message_parts.extend([f" - {blocker}" for blocker in self.blockers])
return {"approve": False, "message": "\n".join(message_parts)}
async def main():
"""Main execution"""
try:
input_data = json.load(sys.stdin)
# Ensure log directory exists
log_dir = Path.cwd() / "logs"
log_dir.mkdir(parents=True, exist_ok=True)
log_path = log_dir / "typescript_validator.json"
# Read existing log data or initialize empty list
if log_path.exists():
with open(log_path) as f:
try:
log_data = json.load(f)
except (json.JSONDecodeError, ValueError):
log_data = []
else:
log_data = []
# Add timestamp to the log entry
timestamp = datetime.now().strftime("%b %d, %I:%M%p").lower()
input_data["timestamp"] = timestamp
# Run validation
validator = TypeScriptValidator(input_data)
result = await validator.validate()
# Add result to log entry
input_data["result"] = result
# Append new data
log_data.append(input_data)
# Write back to file with formatting
with open(log_path, "w") as f:
json.dump(log_data, f, indent=2)
print(json.dumps(result))
except Exception as error:
error_result = {
"approve": False,
"message": f"TypeScript validator error: {error}",
}
# Try to log the error as well
try:
log_dir = Path.cwd() / "logs"
log_dir.mkdir(parents=True, exist_ok=True)
log_path = log_dir / "typescript_validator.json"
if log_path.exists():
with open(log_path) as f:
try:
log_data = json.load(f)
except (json.JSONDecodeError, ValueError):
log_data = []
else:
log_data = []
timestamp = datetime.now().strftime("%b %d, %I:%M%p").lower()
error_entry = {
"timestamp": timestamp,
"error": str(error),
"result": error_result,
}
log_data.append(error_entry)
with open(log_path, "w") as f:
json.dump(log_data, f, indent=2)
except Exception:
# If logging fails, continue with the original error response
pass
print(json.dumps(error_result))
sys.exit(1)
if __name__ == "__main__":
import asyncio
asyncio.run(main())

61
plugin.lock.json Normal file
View File

@@ -0,0 +1,61 @@
{
"$schema": "internal://schemas/plugin.lock.v1.json",
"pluginId": "gh:AojdevStudio/dev-utils-marketplace:lang-javascript",
"normalized": {
"repo": null,
"ref": "refs/tags/v20251128.0",
"commit": "99a0000ad70945a4af87eec06eec046e099be2ef",
"treeHash": "a98999ef6b20fbfbdd0a956625bf1bfaac1a63b3299dce192e63db7a84be0e61",
"generatedAt": "2025-11-28T10:09:53.738239Z",
"toolVersion": "publish_plugins.py@0.2.0"
},
"origin": {
"remote": "git@github.com:zhongweili/42plugin-data.git",
"branch": "master",
"commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390",
"repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data"
},
"manifest": {
"name": "lang-javascript",
"description": "Meta-package: Installs all lang-javascript components (agents + hooks)",
"version": "3.0.0"
},
"content": {
"files": [
{
"path": "README.md",
"sha256": "8f3f237639d6eabb39999cd1c12adda777184cd37a0fa492f4d4ed307f0689a7"
},
{
"path": "agents/javascript-craftsman.md",
"sha256": "9223cae3b362ebb6033ae8386e1699fc40e3529799a4d309b7a8c0db2e98c3d2"
},
{
"path": "agents/v2-typescript-expert.md",
"sha256": "c5fce402f08e92e05c516cf560b880524f35980fa774788c4745796d5e1e50af"
},
{
"path": "hooks/hooks.json",
"sha256": "7528b5d2983bfda15f2c88280882a80a6dffe4488825b05a878109e6edf3b1a9"
},
{
"path": "hooks/scripts/typescript-validator.py",
"sha256": "d47e7ca56dca9be0c81674a23c8358beeef598b95951c92e9965313c0cab8e85"
},
{
"path": "hooks/scripts/import-organizer.py",
"sha256": "0c418bbf220c4e288eaf6b70fbaad116f411cda06794042c49b64f5f71366d37"
},
{
"path": ".claude-plugin/plugin.json",
"sha256": "304b8379c0360e9762919470d4833dfd69462dbebd9d0b6a9be45ec54642e8ac"
}
],
"dirSha256": "a98999ef6b20fbfbdd0a956625bf1bfaac1a63b3299dce192e63db7a84be0e61"
},
"security": {
"scannedAt": null,
"scannerVersion": null,
"flags": []
}
}