Initial commit
This commit is contained in:
100
skills/subagent-prompt-construction/scripts/count-artifacts.sh
Executable file
100
skills/subagent-prompt-construction/scripts/count-artifacts.sh
Executable file
@@ -0,0 +1,100 @@
|
||||
#!/usr/bin/env bash
|
||||
# count-artifacts.sh - Count lines in skill artifacts
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SKILL_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
|
||||
echo "=== Artifact Line Count Report ==="
|
||||
echo ""
|
||||
|
||||
total_lines=0
|
||||
|
||||
# SKILL.md
|
||||
if [[ -f "$SKILL_DIR/SKILL.md" ]]; then
|
||||
lines=$(wc -l < "$SKILL_DIR/SKILL.md")
|
||||
total_lines=$((total_lines + lines))
|
||||
echo "SKILL.md: $lines lines"
|
||||
if [[ $lines -gt 40 ]]; then
|
||||
echo " ⚠️ WARNING: Exceeds 40-line target ($(($lines - 40)) over)"
|
||||
else
|
||||
echo " ✅ Within 40-line target"
|
||||
fi
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Examples
|
||||
echo "Examples:"
|
||||
for file in "$SKILL_DIR"/examples/*.md; do
|
||||
if [[ -f "$file" ]]; then
|
||||
lines=$(wc -l < "$file")
|
||||
total_lines=$((total_lines + lines))
|
||||
basename=$(basename "$file")
|
||||
echo " $basename: $lines lines"
|
||||
if [[ $lines -gt 150 ]]; then
|
||||
echo " ⚠️ WARNING: Exceeds 150-line target ($(($lines - 150)) over)"
|
||||
else
|
||||
echo " ✅ Within 150-line target"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
|
||||
# Templates
|
||||
echo "Templates:"
|
||||
for file in "$SKILL_DIR"/templates/*.md; do
|
||||
if [[ -f "$file" ]]; then
|
||||
lines=$(wc -l < "$file")
|
||||
total_lines=$((total_lines + lines))
|
||||
basename=$(basename "$file")
|
||||
echo " $basename: $lines lines"
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
|
||||
# Reference
|
||||
echo "Reference:"
|
||||
for file in "$SKILL_DIR"/reference/*.md; do
|
||||
if [[ -f "$file" ]]; then
|
||||
lines=$(wc -l < "$file")
|
||||
total_lines=$((total_lines + lines))
|
||||
basename=$(basename "$file")
|
||||
echo " $basename: $lines lines"
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
|
||||
# Case Studies
|
||||
echo "Case Studies:"
|
||||
for file in "$SKILL_DIR"/reference/case-studies/*.md; do
|
||||
if [[ -f "$file" ]]; then
|
||||
lines=$(wc -l < "$file")
|
||||
total_lines=$((total_lines + lines))
|
||||
basename=$(basename "$file")
|
||||
echo " $basename: $lines lines"
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
|
||||
echo "=== Summary ==="
|
||||
echo "Total lines: $total_lines"
|
||||
echo ""
|
||||
|
||||
# Compactness validation
|
||||
compact_lines=0
|
||||
if [[ -f "$SKILL_DIR/SKILL.md" ]]; then
|
||||
compact_lines=$((compact_lines + $(wc -l < "$SKILL_DIR/SKILL.md")))
|
||||
fi
|
||||
for file in "$SKILL_DIR"/examples/*.md; do
|
||||
if [[ -f "$file" ]]; then
|
||||
compact_lines=$((compact_lines + $(wc -l < "$file")))
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Compactness check (SKILL.md + examples):"
|
||||
echo " Total: $compact_lines lines"
|
||||
if [[ $compact_lines -le 190 ]]; then
|
||||
echo " ✅ Meets compactness target (≤190 lines for SKILL.md ≤40 + examples ≤150)"
|
||||
else
|
||||
echo " ⚠️ Exceeds compactness target ($((compact_lines - 190)) lines over)"
|
||||
fi
|
||||
133
skills/subagent-prompt-construction/scripts/extract-patterns.py
Executable file
133
skills/subagent-prompt-construction/scripts/extract-patterns.py
Executable file
@@ -0,0 +1,133 @@
|
||||
#!/usr/bin/env python3
|
||||
"""extract-patterns.py - Extract and summarize patterns from reference directory"""
|
||||
|
||||
import json
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
|
||||
|
||||
def extract_patterns(reference_dir: Path) -> Dict:
|
||||
"""Extract patterns from reference/patterns.md"""
|
||||
patterns_file = reference_dir / "patterns.md"
|
||||
|
||||
if not patterns_file.exists():
|
||||
return {"error": "patterns.md not found"}
|
||||
|
||||
content = patterns_file.read_text()
|
||||
|
||||
patterns = []
|
||||
|
||||
# Extract pattern sections
|
||||
pattern_regex = r"## Pattern \d+: (.+?)\n\n\*\*Use case\*\*: (.+?)\n\n\*\*Structure\*\*:\n```\n(.+?)\n```"
|
||||
|
||||
for match in re.finditer(pattern_regex, content, re.DOTALL):
|
||||
name = match.group(1).strip()
|
||||
use_case = match.group(2).strip()
|
||||
structure = match.group(3).strip()
|
||||
|
||||
patterns.append({
|
||||
"name": name,
|
||||
"use_case": use_case,
|
||||
"structure": structure
|
||||
})
|
||||
|
||||
return {
|
||||
"patterns_count": len(patterns),
|
||||
"patterns": patterns
|
||||
}
|
||||
|
||||
|
||||
def extract_integration_patterns(reference_dir: Path) -> Dict:
|
||||
"""Extract integration patterns from reference/integration-patterns.md"""
|
||||
integration_file = reference_dir / "integration-patterns.md"
|
||||
|
||||
if not integration_file.exists():
|
||||
return {"error": "integration-patterns.md not found"}
|
||||
|
||||
content = integration_file.read_text()
|
||||
|
||||
integrations = []
|
||||
|
||||
# Extract integration sections
|
||||
integration_regex = r"## \d+\. (.+?)\n\n\*\*Pattern\*\*:\n```\n(.+?)\n```"
|
||||
|
||||
for match in re.finditer(integration_regex, content, re.DOTALL):
|
||||
name = match.group(1).strip()
|
||||
pattern = match.group(2).strip()
|
||||
|
||||
integrations.append({
|
||||
"name": name,
|
||||
"pattern": pattern
|
||||
})
|
||||
|
||||
return {
|
||||
"integration_patterns_count": len(integrations),
|
||||
"integration_patterns": integrations
|
||||
}
|
||||
|
||||
|
||||
def extract_symbols(reference_dir: Path) -> Dict:
|
||||
"""Extract symbolic language operators from reference/symbolic-language.md"""
|
||||
symbols_file = reference_dir / "symbolic-language.md"
|
||||
|
||||
if not symbols_file.exists():
|
||||
return {"error": "symbolic-language.md not found"}
|
||||
|
||||
content = symbols_file.read_text()
|
||||
|
||||
# Count sections
|
||||
logic_ops = len(re.findall(r"### .+? \(.+?\)\n\*\*Symbol\*\*: `(.+?)`", content[:2000]))
|
||||
quantifiers = len(re.findall(r"### .+?\n\*\*Symbol\*\*: `(.+?)`", content[2000:4000]))
|
||||
set_ops = len(re.findall(r"### .+?\n\*\*Symbol\*\*: `(.+?)`", content[4000:6000]))
|
||||
|
||||
return {
|
||||
"logic_operators": logic_ops,
|
||||
"quantifiers": quantifiers,
|
||||
"set_operations": set_ops,
|
||||
"total_symbols": logic_ops + quantifiers + set_ops
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point"""
|
||||
skill_dir = Path(__file__).parent.parent
|
||||
reference_dir = skill_dir / "reference"
|
||||
|
||||
if not reference_dir.exists():
|
||||
print(json.dumps({"error": "reference directory not found"}, indent=2))
|
||||
return
|
||||
|
||||
# Extract all patterns
|
||||
patterns = extract_patterns(reference_dir)
|
||||
integrations = extract_integration_patterns(reference_dir)
|
||||
symbols = extract_symbols(reference_dir)
|
||||
|
||||
# Combine results
|
||||
result = {
|
||||
"skill": "subagent-prompt-construction",
|
||||
"patterns": patterns,
|
||||
"integration_patterns": integrations,
|
||||
"symbolic_language": symbols,
|
||||
"summary": {
|
||||
"total_patterns": patterns.get("patterns_count", 0),
|
||||
"total_integration_patterns": integrations.get("integration_patterns_count", 0),
|
||||
"total_symbols": symbols.get("total_symbols", 0)
|
||||
}
|
||||
}
|
||||
|
||||
# Save to inventory
|
||||
inventory_dir = skill_dir / "inventory"
|
||||
inventory_dir.mkdir(exist_ok=True)
|
||||
|
||||
output_file = inventory_dir / "patterns-summary.json"
|
||||
output_file.write_text(json.dumps(result, indent=2))
|
||||
|
||||
print(f"✅ Patterns extracted to {output_file}")
|
||||
print(f" - {result['summary']['total_patterns']} core patterns")
|
||||
print(f" - {result['summary']['total_integration_patterns']} integration patterns")
|
||||
print(f" - {result['summary']['total_symbols']} symbolic operators")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
122
skills/subagent-prompt-construction/scripts/generate-frontmatter.py
Executable file
122
skills/subagent-prompt-construction/scripts/generate-frontmatter.py
Executable file
@@ -0,0 +1,122 @@
|
||||
#!/usr/bin/env python3
|
||||
"""generate-frontmatter.py - Generate skill frontmatter inventory"""
|
||||
|
||||
import json
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Dict
|
||||
|
||||
|
||||
def extract_frontmatter(skill_md: Path) -> Dict:
|
||||
"""Extract YAML frontmatter from SKILL.md"""
|
||||
if not skill_md.exists():
|
||||
return {"error": "SKILL.md not found"}
|
||||
|
||||
content = skill_md.read_text()
|
||||
|
||||
# Extract frontmatter between --- delimiters
|
||||
match = re.search(r"^---\n(.+?)\n---", content, re.DOTALL | re.MULTILINE)
|
||||
if not match:
|
||||
return {"error": "No frontmatter found"}
|
||||
|
||||
frontmatter_text = match.group(1)
|
||||
|
||||
# Parse YAML-style frontmatter
|
||||
frontmatter = {}
|
||||
for line in frontmatter_text.split("\n"):
|
||||
if ":" in line:
|
||||
key, value = line.split(":", 1)
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
|
||||
# Try to parse as number or boolean
|
||||
if value.replace(".", "").isdigit():
|
||||
value = float(value) if "." in value else int(value)
|
||||
elif value.lower() in ["true", "false"]:
|
||||
value = value.lower() == "true"
|
||||
elif value.endswith("%"):
|
||||
value = int(value[:-1])
|
||||
|
||||
frontmatter[key] = value
|
||||
|
||||
return frontmatter
|
||||
|
||||
|
||||
def extract_lambda_contract(skill_md: Path) -> str:
|
||||
"""Extract lambda contract from SKILL.md"""
|
||||
if not skill_md.exists():
|
||||
return ""
|
||||
|
||||
content = skill_md.read_text()
|
||||
|
||||
# Find lambda contract (starts with λ)
|
||||
match = re.search(r"^λ\(.+?\).*$", content, re.MULTILINE)
|
||||
if match:
|
||||
return match.group(0)
|
||||
|
||||
return ""
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point"""
|
||||
skill_dir = Path(__file__).parent.parent
|
||||
skill_md = skill_dir / "SKILL.md"
|
||||
|
||||
if not skill_md.exists():
|
||||
print(json.dumps({"error": "SKILL.md not found"}, indent=2))
|
||||
return
|
||||
|
||||
# Extract frontmatter and lambda contract
|
||||
frontmatter = extract_frontmatter(skill_md)
|
||||
lambda_contract = extract_lambda_contract(skill_md)
|
||||
|
||||
# Calculate metrics
|
||||
skill_lines = len(skill_md.read_text().split("\n"))
|
||||
|
||||
# Count examples
|
||||
examples_dir = skill_dir / "examples"
|
||||
examples_count = len(list(examples_dir.glob("*.md"))) if examples_dir.exists() else 0
|
||||
|
||||
# Count reference files
|
||||
reference_dir = skill_dir / "reference"
|
||||
reference_count = len(list(reference_dir.glob("*.md"))) if reference_dir.exists() else 0
|
||||
|
||||
# Count case studies
|
||||
case_studies_dir = reference_dir / "case-studies" if reference_dir.exists() else None
|
||||
case_studies_count = len(list(case_studies_dir.glob("*.md"))) if case_studies_dir and case_studies_dir.exists() else 0
|
||||
|
||||
# Combine results
|
||||
result = {
|
||||
"skill": "subagent-prompt-construction",
|
||||
"frontmatter": frontmatter,
|
||||
"lambda_contract": lambda_contract,
|
||||
"metrics": {
|
||||
"skill_md_lines": skill_lines,
|
||||
"examples_count": examples_count,
|
||||
"reference_files_count": reference_count,
|
||||
"case_studies_count": case_studies_count
|
||||
},
|
||||
"compliance": {
|
||||
"skill_md_under_40_lines": skill_lines <= 40,
|
||||
"has_lambda_contract": len(lambda_contract) > 0,
|
||||
"has_examples": examples_count > 0,
|
||||
"has_reference": reference_count > 0
|
||||
}
|
||||
}
|
||||
|
||||
# Save to inventory
|
||||
inventory_dir = skill_dir / "inventory"
|
||||
inventory_dir.mkdir(exist_ok=True)
|
||||
|
||||
output_file = inventory_dir / "skill-frontmatter.json"
|
||||
output_file.write_text(json.dumps(result, indent=2))
|
||||
|
||||
print(f"✅ Frontmatter extracted to {output_file}")
|
||||
print(f" - SKILL.md: {skill_lines} lines ({'✅' if skill_lines <= 40 else '⚠️ over'})")
|
||||
print(f" - Examples: {examples_count}")
|
||||
print(f" - Reference files: {reference_count}")
|
||||
print(f" - Case studies: {case_studies_count}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
183
skills/subagent-prompt-construction/scripts/validate-skill.sh
Executable file
183
skills/subagent-prompt-construction/scripts/validate-skill.sh
Executable file
@@ -0,0 +1,183 @@
|
||||
#!/usr/bin/env bash
|
||||
# validate-skill.sh - Validate skill structure and meta-objective compliance
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SKILL_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
INVENTORY_DIR="$SKILL_DIR/inventory"
|
||||
|
||||
# Colors
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Validation results
|
||||
ERRORS=0
|
||||
WARNINGS=0
|
||||
|
||||
echo "=== Skill Validation Report ==="
|
||||
echo ""
|
||||
|
||||
# 1. Directory structure validation
|
||||
echo "1. Directory Structure:"
|
||||
required_dirs=("templates" "examples" "reference" "reference/case-studies" "scripts" "inventory")
|
||||
for dir in "${required_dirs[@]}"; do
|
||||
if [[ -d "$SKILL_DIR/$dir" ]]; then
|
||||
echo -e " ${GREEN}✅${NC} $dir/"
|
||||
else
|
||||
echo -e " ${RED}❌${NC} $dir/ (missing)"
|
||||
ERRORS=$((ERRORS + 1))
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
|
||||
# 2. Required files validation
|
||||
echo "2. Required Files:"
|
||||
required_files=("SKILL.md" "templates/subagent-template.md" "examples/phase-planner-executor.md")
|
||||
for file in "${required_files[@]}"; do
|
||||
if [[ -f "$SKILL_DIR/$file" ]]; then
|
||||
echo -e " ${GREEN}✅${NC} $file"
|
||||
else
|
||||
echo -e " ${RED}❌${NC} $file (missing)"
|
||||
ERRORS=$((ERRORS + 1))
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
|
||||
# 3. Compactness validation
|
||||
echo "3. Compactness Constraints:"
|
||||
|
||||
if [[ -f "$SKILL_DIR/SKILL.md" ]]; then
|
||||
skill_lines=$(wc -l < "$SKILL_DIR/SKILL.md")
|
||||
if [[ $skill_lines -le 40 ]]; then
|
||||
echo -e " ${GREEN}✅${NC} SKILL.md: $skill_lines lines (≤40)"
|
||||
else
|
||||
echo -e " ${RED}❌${NC} SKILL.md: $skill_lines lines (exceeds 40 by $(($skill_lines - 40)))"
|
||||
ERRORS=$((ERRORS + 1))
|
||||
fi
|
||||
fi
|
||||
|
||||
for file in "$SKILL_DIR"/examples/*.md; do
|
||||
if [[ -f "$file" ]]; then
|
||||
lines=$(wc -l < "$file")
|
||||
basename=$(basename "$file")
|
||||
if [[ $lines -le 150 ]]; then
|
||||
echo -e " ${GREEN}✅${NC} examples/$basename: $lines lines (≤150)"
|
||||
else
|
||||
echo -e " ${YELLOW}⚠️${NC} examples/$basename: $lines lines (exceeds 150 by $(($lines - 150)))"
|
||||
WARNINGS=$((WARNINGS + 1))
|
||||
fi
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
|
||||
# 4. Lambda contract validation
|
||||
echo "4. Lambda Contract:"
|
||||
if [[ -f "$SKILL_DIR/SKILL.md" ]]; then
|
||||
if grep -q "^λ(" "$SKILL_DIR/SKILL.md"; then
|
||||
echo -e " ${GREEN}✅${NC} Lambda contract found"
|
||||
else
|
||||
echo -e " ${RED}❌${NC} Lambda contract missing"
|
||||
ERRORS=$((ERRORS + 1))
|
||||
fi
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 5. Reference files validation
|
||||
echo "5. Reference Documentation:"
|
||||
reference_files=("patterns.md" "integration-patterns.md" "symbolic-language.md")
|
||||
for file in "${reference_files[@]}"; do
|
||||
if [[ -f "$SKILL_DIR/reference/$file" ]]; then
|
||||
lines=$(wc -l < "$SKILL_DIR/reference/$file")
|
||||
echo -e " ${GREEN}✅${NC} reference/$file ($lines lines)"
|
||||
else
|
||||
echo -e " ${YELLOW}⚠️${NC} reference/$file (missing)"
|
||||
WARNINGS=$((WARNINGS + 1))
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
|
||||
# 6. Case studies validation
|
||||
echo "6. Case Studies:"
|
||||
case_study_count=$(find "$SKILL_DIR/reference/case-studies" -name "*.md" 2>/dev/null | wc -l)
|
||||
if [[ $case_study_count -gt 0 ]]; then
|
||||
echo -e " ${GREEN}✅${NC} $case_study_count case study file(s) found"
|
||||
else
|
||||
echo -e " ${YELLOW}⚠️${NC} No case studies found"
|
||||
WARNINGS=$((WARNINGS + 1))
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 7. Scripts validation
|
||||
echo "7. Automation Scripts:"
|
||||
script_count=$(find "$SKILL_DIR/scripts" -name "*.sh" -o -name "*.py" 2>/dev/null | wc -l)
|
||||
if [[ $script_count -ge 4 ]]; then
|
||||
echo -e " ${GREEN}✅${NC} $script_count script(s) found (≥4)"
|
||||
else
|
||||
echo -e " ${YELLOW}⚠️${NC} $script_count script(s) found (target: ≥4)"
|
||||
WARNINGS=$((WARNINGS + 1))
|
||||
fi
|
||||
|
||||
# List scripts
|
||||
for script in "$SKILL_DIR"/scripts/*.{sh,py}; do
|
||||
if [[ -f "$script" ]]; then
|
||||
basename=$(basename "$script")
|
||||
echo " - $basename"
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
|
||||
# 8. Meta-objective compliance (from config.json if available)
|
||||
echo "8. Meta-Objective Compliance:"
|
||||
|
||||
config_file="$SKILL_DIR/experiment-config.json"
|
||||
if [[ -f "$config_file" ]]; then
|
||||
echo -e " ${GREEN}✅${NC} experiment-config.json found"
|
||||
|
||||
# Check V_meta and V_instance
|
||||
v_meta=$(grep -oP '"v_meta":\s*\K[0-9.]+' "$config_file" || echo "0")
|
||||
v_instance=$(grep -oP '"v_instance":\s*\K[0-9.]+' "$config_file" || echo "0")
|
||||
|
||||
echo " V_meta: $v_meta (target: ≥0.75)"
|
||||
echo " V_instance: $v_instance (target: ≥0.80)"
|
||||
|
||||
if (( $(echo "$v_instance >= 0.80" | bc -l) )); then
|
||||
echo -e " ${GREEN}✅${NC} V_instance meets threshold"
|
||||
else
|
||||
echo -e " ${YELLOW}⚠️${NC} V_instance below threshold"
|
||||
WARNINGS=$((WARNINGS + 1))
|
||||
fi
|
||||
|
||||
if (( $(echo "$v_meta >= 0.75" | bc -l) )); then
|
||||
echo -e " ${GREEN}✅${NC} V_meta meets threshold"
|
||||
else
|
||||
echo -e " ${YELLOW}⚠️${NC} V_meta below threshold (near convergence)"
|
||||
WARNINGS=$((WARNINGS + 1))
|
||||
fi
|
||||
else
|
||||
echo -e " ${YELLOW}⚠️${NC} experiment-config.json not found"
|
||||
WARNINGS=$((WARNINGS + 1))
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Summary
|
||||
echo "=== Validation Summary ==="
|
||||
echo ""
|
||||
if [[ $ERRORS -eq 0 ]]; then
|
||||
echo -e "${GREEN}✅ All critical validations passed${NC}"
|
||||
else
|
||||
echo -e "${RED}❌ $ERRORS critical error(s) found${NC}"
|
||||
fi
|
||||
|
||||
if [[ $WARNINGS -gt 0 ]]; then
|
||||
echo -e "${YELLOW}⚠️ $WARNINGS warning(s) found${NC}"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Exit code
|
||||
if [[ $ERRORS -gt 0 ]]; then
|
||||
exit 1
|
||||
else
|
||||
exit 0
|
||||
fi
|
||||
Reference in New Issue
Block a user