Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 18:26:08 +08:00
commit 8f22ddf339
295 changed files with 59710 additions and 0 deletions

647
agents/meta.skill/README.md Normal file
View File

@@ -0,0 +1,647 @@
# meta.skill - Skill Creator Meta-Agent
Generates complete Betty skills from natural language descriptions.
## Overview
**meta.skill** is a meta-agent that creates fully functional skills from simple description files. It generates skill definitions, Python implementations, tests, and documentation, following Betty Framework conventions.
**What it does:**
- Parses skill descriptions (Markdown or JSON)
- Generates `skill.yaml` configurations
- Creates Python implementation stubs
- Generates test templates
- Creates comprehensive README documentation
- Validates skill names and structure
- Registers artifact metadata
## Quick Start
### Create a Skill
```bash
# Create skill from description
python3 agents/meta.skill/meta_skill.py examples/my_skill_description.md
```
Output:
```
🛠️ meta.skill - Creating skill from examples/my_skill_description.md
✨ Skill 'data.transform' created successfully!
📄 Created files:
- skills/data.transform/skill.yaml
- skills/data.transform/data_transform.py
- skills/data.transform/test_data_transform.py
- skills/data.transform/README.md
✅ Skill 'data.transform' is ready to use
Add to agent skills_available to use it.
```
### Skill Description Format
Create a Markdown file with this structure:
```markdown
# Name: domain.action
# Purpose:
Brief description of what the skill does
# Inputs:
- input_parameter_1
- input_parameter_2 (optional)
# Outputs:
- output_file_1.json
- output_file_2.yaml
# Permissions:
- filesystem:read
- filesystem:write
# Produces Artifacts:
- artifact-type-1
- artifact-type-2
# Consumes Artifacts:
- artifact-type-3
# Implementation Notes:
Detailed guidance for implementing the skill logic
```
Or use JSON format:
```json
{
"name": "domain.action",
"purpose": "Brief description",
"inputs": ["param1", "param2"],
"outputs": ["output.json"],
"permissions": ["filesystem:read"],
"artifact_produces": ["artifact-type-1"],
"artifact_consumes": ["artifact-type-2"],
"implementation_notes": "Implementation guidance"
}
```
## Generated Structure
For a skill named `data.transform`, meta.skill generates:
```
skills/data.transform/
├── skill.yaml # Skill configuration
├── data_transform.py # Python implementation
├── test_data_transform.py # Test suite
└── README.md # Documentation
```
### skill.yaml
Complete skill configuration following Betty conventions:
```yaml
name: data.transform
version: 0.1.0
description: Transform data between formats
inputs:
- input_file
- output_format
outputs:
- transformed_data.json
status: active
permissions:
- filesystem:read
- filesystem:write
entrypoints:
- command: /data/transform
handler: data_transform.py
runtime: python
description: Transform data between formats
artifact_metadata:
produces:
- type: transformed-data
consumes:
- type: raw-data
```
### Implementation Stub
Python implementation with:
- Proper imports and logging
- Class structure
- execute() method with typed parameters
- CLI entry point with argparse
- Error handling
- Output formatting (JSON/YAML)
```python
#!/usr/bin/env python3
"""
data.transform - Transform data between formats
Generated by meta.skill
"""
import os
import sys
import json
import yaml
from pathlib import Path
from typing import Dict, List, Any, Optional
from betty.config import BASE_DIR
from betty.logging_utils import setup_logger
logger = setup_logger(__name__)
class DataTransform:
"""Transform data between formats"""
def __init__(self, base_dir: str = BASE_DIR):
"""Initialize skill"""
self.base_dir = Path(base_dir)
def execute(self, input_file: Optional[str] = None,
output_format: Optional[str] = None) -> Dict[str, Any]:
"""Execute the skill"""
try:
logger.info("Executing data.transform...")
# TODO: Implement skill logic here
# Implementation notes: [your notes here]
result = {
"ok": True,
"status": "success",
"message": "Skill executed successfully"
}
logger.info("Skill completed successfully")
return result
except Exception as e:
logger.error(f"Error executing skill: {e}")
return {
"ok": False,
"status": "failed",
"error": str(e)
}
def main():
"""CLI entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Transform data between formats"
)
parser.add_argument("--input-file", help="input_file")
parser.add_argument("--output-format", help="output_format")
parser.add_argument(
"--output-format",
choices=["json", "yaml"],
default="json",
help="Output format"
)
args = parser.parse_args()
skill = DataTransform()
result = skill.execute(
input_file=args.input_file,
output_format=args.output_format,
)
if args.output_format == "json":
print(json.dumps(result, indent=2))
else:
print(yaml.dump(result, default_flow_style=False))
sys.exit(0 if result.get("ok") else 1)
if __name__ == "__main__":
main()
```
### Test Template
pytest-based test suite:
```python
#!/usr/bin/env python3
"""Tests for data.transform"""
import pytest
import sys
import os
from pathlib import Path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
from skills.data_transform import data_transform
class TestDataTransform:
"""Tests for DataTransform"""
def setup_method(self):
"""Setup test fixtures"""
self.skill = data_transform.DataTransform()
def test_initialization(self):
"""Test skill initializes correctly"""
assert self.skill is not None
assert self.skill.base_dir is not None
def test_execute_basic(self):
"""Test basic execution"""
result = self.skill.execute()
assert result is not None
assert "ok" in result
assert "status" in result
def test_execute_success(self):
"""Test successful execution"""
result = self.skill.execute()
assert result["ok"] is True
assert result["status"] == "success"
# TODO: Add more specific tests
def test_cli_help(capsys):
"""Test CLI help message"""
sys.argv = ["data_transform.py", "--help"]
with pytest.raises(SystemExit) as exc_info:
data_transform.main()
assert exc_info.value.code == 0
```
## Skill Naming Convention
Skills must follow the `domain.action` format:
- **domain**: Category (e.g., `data`, `api`, `file`, `text`)
- **action**: Operation (e.g., `validate`, `transform`, `parse`)
- Use only lowercase letters and numbers (no hyphens, underscores, or special characters)
Valid examples:
-`data.validate`
-`api.test`
-`file.compress`
-`text.summarize`
Invalid examples:
-`data.validate-json` (hyphen not allowed)
-`data_validate` (underscore not allowed)
-`DataValidate` (uppercase not allowed)
-`validate` (missing domain)
## Usage Examples
### Example 1: JSON Validator
**Description file** (`json_validator.md`):
```markdown
# Name: data.validatejson
# Purpose:
Validates JSON files against JSON Schema definitions
# Inputs:
- json_file_path
- schema_file_path (optional)
# Outputs:
- validation_result.json
# Permissions:
- filesystem:read
# Produces Artifacts:
- validation-report
# Implementation Notes:
Use Python's jsonschema library for validation
```
**Create skill:**
```bash
python3 agents/meta.skill/meta_skill.py json_validator.md
```
### Example 2: API Tester
**Description file** (`api_tester.json`):
```json
{
"name": "api.test",
"purpose": "Test API endpoints and generate reports",
"inputs": ["openapi_spec_path", "base_url"],
"outputs": ["test_results.json"],
"permissions": ["network:http"],
"artifact_produces": ["test-report"],
"artifact_consumes": ["openapi-spec"],
"implementation_notes": "Use requests library to test each endpoint"
}
```
**Create skill:**
```bash
python3 agents/meta.skill/meta_skill.py api_tester.json
```
### Example 3: File Compressor
**Description file** (`file_compressor.md`):
```markdown
# Name: file.compress
# Purpose:
Compress files using various algorithms
# Inputs:
- input_path
- compression_type (gzip, zip, tar.gz)
# Outputs:
- compressed_file
# Permissions:
- filesystem:read
- filesystem:write
# Implementation Notes:
Support gzip, zip, and tar.gz formats using Python standard library
```
**Create skill:**
```bash
python3 agents/meta.skill/meta_skill.py file_compressor.md
```
## Integration
### With meta.agent
Create an agent that uses the skill:
```yaml
name: data.validator
description: Data validation agent
skills_available:
- data.validatejson # Skill created by meta.skill
```
### With plugin.sync
Sync skills to plugin format:
```bash
python3 skills/plugin.sync/plugin_sync.py
```
This converts `skill.yaml` to commands in `.claude-plugin/plugin.yaml`.
## Artifact Types
### Consumes
- **skill-description** - Natural language skill requirements
- Pattern: `**/skill_description.md`
- Format: Markdown or JSON
### Produces
- **skill-definition** - Complete skill configuration
- Pattern: `skills/*/skill.yaml`
- Schema: `schemas/skill-definition.json`
- **skill-implementation** - Python implementation code
- Pattern: `skills/*/[skill_module].py`
- **skill-tests** - Test suite
- Pattern: `skills/*/test_[skill_module].py`
- **skill-documentation** - README documentation
- Pattern: `skills/*/README.md`
## Common Workflows
### Workflow 1: Create and Test Skill
```bash
# 1. Create skill description
cat > my_skill.md <<EOF
# Name: data.parse
# Purpose: Parse structured data from text
# Inputs:
- input_text
# Outputs:
- parsed_data.json
# Permissions:
- filesystem:write
EOF
# 2. Generate skill
python3 agents/meta.skill/meta_skill.py my_skill.md
# 3. Implement logic (edit the generated file)
vim skills/data.parse/data_parse.py
# 4. Run tests
pytest skills/data.parse/test_data_parse.py -v
# 5. Test CLI
python3 skills/data.parse/data_parse.py --help
```
### Workflow 2: Create Skill for Agent
```bash
# 1. Create skill
python3 agents/meta.skill/meta_skill.py api_analyzer_skill.md
# 2. Add to agent
echo " - api.analyze" >> agents/api.agent/agent.yaml
# 3. Sync to plugin
python3 skills/plugin.sync/plugin_sync.py
```
### Workflow 3: Batch Create Skills
```bash
# Create multiple skills
for desc in skills_to_create/*.md; do
echo "Creating skill from $desc..."
python3 agents/meta.skill/meta_skill.py "$desc"
done
```
## Tips & Best Practices
### Skill Descriptions
**Be specific about purpose:**
```markdown
# Good
# Purpose: Validate JSON against JSON Schema Draft 07
# Bad
# Purpose: Validate stuff
```
**Include implementation notes:**
```markdown
# Implementation Notes:
Use the jsonschema library. Support Draft 07 schemas.
Provide detailed error messages with line numbers.
```
**Specify optional parameters:**
```markdown
# Inputs:
- required_param
- optional_param (optional)
- another_optional (optional, defaults to 'value')
```
### Parameter Naming
Parameters are automatically sanitized:
- Special characters removed (except `-`, `_`, spaces)
- Converted to lowercase
- Spaces and hyphens become underscores
Example conversions:
- `"Schema File Path (optional)"``schema_file_path_optional`
- `"API-Key"``api_key`
- `"Input Data"``input_data`
### Implementation Strategy
1. **Generate skeleton first** - Let meta.skill create structure
2. **Implement gradually** - Add logic to `execute()` method
3. **Test incrementally** - Run tests after each change
4. **Update documentation** - Keep README current
### Artifact Metadata
Always specify artifact types for interoperability:
```markdown
# Produces Artifacts:
- openapi-spec
- validation-report
# Consumes Artifacts:
- api-requirements
```
This enables:
- Agent discovery via meta.compatibility
- Pipeline suggestions via meta.suggest
- Workflow orchestration
## Troubleshooting
### Invalid skill name
```
Error: Skill name must be in domain.action format: my-skill
```
**Solution:** Use format `domain.action` with only alphanumeric characters:
```markdown
# Wrong: my-skill, my_skill, MySkill
# Right: data.transform, api.validate
```
### Skill already exists
```
Error: Skill directory already exists: skills/data.validate
```
**Solution:** Remove existing skill or use different name:
```bash
rm -rf skills/data.validate
```
### Import errors in generated code
```
ModuleNotFoundError: No module named 'betty.config'
```
**Solution:** Ensure Betty framework is in Python path:
```bash
export PYTHONPATH="${PYTHONPATH}:/home/user/betty"
```
### Test failures
```
ModuleNotFoundError: No module named 'skills.data_validate'
```
**Solution:** Run tests from Betty root directory:
```bash
cd /home/user/betty
pytest skills/data.validate/test_data_validate.py -v
```
## Architecture
```
meta.skill
├─ Input: skill-description (Markdown/JSON)
├─ Parser: extract name, purpose, inputs, outputs
├─ Generator: create skill.yaml, Python, tests, README
├─ Validator: check naming conventions
└─ Output: Complete skill directory structure
```
## Next Steps
After creating a skill with meta.skill:
1. **Implement logic** - Add functionality to `execute()` method
2. **Write tests** - Expand test coverage beyond basic tests
3. **Add to agent** - Include in agent's `skills_available`
4. **Sync to plugin** - Run plugin.sync to update plugin.yaml
5. **Test integration** - Verify skill works in agent context
6. **Document usage** - Update README with examples
## Related Documentation
- [META_AGENTS.md](../../docs/META_AGENTS.md) - Meta-agent ecosystem
- [ARTIFACT_STANDARDS.md](../../docs/ARTIFACT_STANDARDS.md) - Artifact system
- [skill-description schema](../../schemas/skill-description.json)
- [skill-definition schema](../../schemas/skill-definition.json)
## How Claude Uses This
Claude can:
1. **Create skills on demand** - "Create a skill that validates YAML files"
2. **Extend agent capabilities** - "Add a JSON validator skill to this agent"
3. **Build skill libraries** - "Create skills for all common data operations"
4. **Prototype quickly** - Test ideas by generating skill scaffolds
meta.skill enables rapid skill development and agent expansion!

View File

@@ -0,0 +1,306 @@
name: meta.skill
version: 0.4.0
description: |
Creates complete, functional skills from natural language descriptions.
This meta-agent transforms skill descriptions into production-ready skills with:
- Complete skill.yaml definition with validated artifact types
- Artifact flow analysis showing producers/consumers
- Production-quality Python implementation with type hints
- Comprehensive test templates
- Complete documentation with examples
- Dependency validation
- Registry registration with artifact_metadata
- Discoverability verification
Ensures skills follow Betty Framework conventions and are ready for use in agents.
Version 0.4.0 adds artifact flow analysis, improved code templates with
type hints parsed from skill.yaml, and dependency validation.
artifact_metadata:
consumes:
- type: skill-description
file_pattern: "**/skill_description.md"
content_type: "text/markdown"
description: "Natural language description of skill requirements"
schema: "schemas/skill-description.json"
produces:
- type: skill-definition
file_pattern: "skills/*/skill.yaml"
content_type: "application/yaml"
schema: "schemas/skill-definition.json"
description: "Complete skill configuration"
- type: skill-implementation
file_pattern: "skills/*/*.py"
content_type: "text/x-python"
description: "Python implementation with proper structure"
- type: skill-tests
file_pattern: "skills/*/test_*.py"
content_type: "text/x-python"
description: "Test template with example tests"
- type: skill-documentation
file_pattern: "skills/*/SKILL.md"
content_type: "text/markdown"
description: "Skill documentation and usage guide"
status: draft
reasoning_mode: iterative
capabilities:
- Convert skill concepts into production-ready packages with tests and docs
- Ensure generated skills follow registry, artifact, and permission conventions
- Coordinate registration and documentation updates for new skills
skills_available:
- skill.create
- skill.define
- artifact.define # Generate artifact metadata
- artifact.validate.types # Validate artifact types against registry
permissions:
- filesystem:read
- filesystem:write
system_prompt: |
You are meta.skill, the skill creator for Betty Framework.
Your purpose is to transform natural language skill descriptions into complete,
production-ready skills that follow Betty conventions.
## Your Workflow
1. **Parse Description** - Understand skill requirements
- Extract name, purpose, inputs, outputs
- Identify artifact types in produces/consumes sections
- Identify required permissions
- Understand implementation requirements
2. **Validate Artifact Types** - CRITICAL: Verify before generating skill.yaml
- Extract ALL artifact types from skill description (produces + consumes sections)
- Call artifact.validate.types skill:
```bash
python3 skills/artifact.validate.types/artifact_validate_types.py \
--artifact_types '["threat-model", "data-flow-diagrams", "architecture-overview"]' \
--check_schemas true \
--suggest_alternatives true \
--max_suggestions 3
```
- Parse validation results:
```json
{
"all_valid": true/false,
"validation_results": {
"threat-model": {
"valid": true,
"file_pattern": "*.threat-model.yaml",
"content_type": "application/yaml",
"schema": "schemas/artifacts/threat-model-schema.json"
}
},
"invalid_types": ["data-flow-diagram"],
"suggestions": {
"data-flow-diagram": [
{"type": "data-flow-diagrams", "reason": "Plural form", "confidence": "high"}
]
}
}
```
- If all_valid == false:
→ Display invalid_types and suggestions to user
→ Example: "❌ Artifact type 'data-flow-diagram' not found. Did you mean 'data-flow-diagrams' (plural, high confidence)?"
→ ASK USER to confirm correct types or provide alternatives
→ HALT skill creation until artifact types are validated
- If all_valid == true:
→ Store validated metadata (file_pattern, content_type, schema) for each type
→ Use this exact metadata in Step 3 when generating skill.yaml
3. **Analyze Artifact Flow** - Understand skill's place in ecosystem
- For each artifact type the skill produces:
→ Search registry for skills that consume this type
→ Report: "✅ {artifact_type} will be consumed by: {consuming_skills}"
→ If no consumers: "⚠️ {artifact_type} has no consumers yet - consider creating skills that use it"
- For each artifact type the skill consumes:
→ Search registry for skills that produce this type
→ Report: "✅ {artifact_type} produced by: {producing_skills}"
→ If no producers: "❌ {artifact_type} has no producers - user must provide manually or create producer skill first"
- Warn about gaps in artifact flow
- Suggest related skills to create for complete workflow
4. **Generate skill.yaml** - Create complete definition with VALIDATED artifact metadata
- name: Proper naming (domain.action format)
- version: Semantic versioning (e.g., "0.1.0")
- description: Clear description of what the skill does
- inputs: List of input parameters (use empty list [] if none)
- outputs: List of output parameters (use empty list [] if none)
- status: One of "draft", "active", or "deprecated"
- Artifact metadata (produces/consumes)
- Permissions
- Entrypoints with parameters
5. **Generate Implementation** - Create production-quality Python stub
- **Parse skill.yaml inputs** to generate proper argparse CLI:
```python
# For each input in skill.yaml:
parser.add_argument(
'--{input.name}',
type={map_type(input.type)}, # string→str, number→int, boolean→bool, array→list
required={input.required},
default={input.default if not required},
help="{input.description}"
)
```
- **Generate function signature** with type hints from inputs/outputs:
```python
def validate_artifact_types(
artifact_types: List[str],
check_schemas: bool = True,
suggest_alternatives: bool = True
) -> Dict[str, Any]:
\"\"\"
{skill.description}
Args:
artifact_types: {input.description from skill.yaml}
check_schemas: {input.description from skill.yaml}
...
Returns:
{output descriptions from skill.yaml}
\"\"\"
```
- **Include implementation pattern** based on skill type:
- Validation skills: load data → validate → return results
- Generator skills: gather inputs → process → save output
- Transform skills: load input → transform → save output
- **Add comprehensive error handling**:
```python
except FileNotFoundError as e:
logger.error(str(e))
print(json.dumps({"ok": False, "error": str(e)}, indent=2))
sys.exit(1)
```
- **JSON output structure** matching skill.yaml outputs:
```python
result = {
"{output1.name}": value1, # From skill.yaml outputs
"{output2.name}": value2,
"ok": True,
"status": "success"
}
print(json.dumps(result, indent=2))
```
- Add proper logging setup
- Include module docstring with usage example
6. **Generate Tests** - Create test template
- Unit test structure
- Example test cases
- Fixtures
- Assertions
7. **Generate Documentation** - Create SKILL.md
- Purpose and usage
- Input/output examples
- Integration with agents
- Artifact flow (from Step 3 analysis)
- Must include markdown header starting with #
8. **Validate Dependencies** - Check Python packages
- For each dependency in skill.yaml:
→ Verify package exists on PyPI (if possible)
→ Check for known naming issues (e.g., "yaml" vs "pyyaml")
→ Warn about version conflicts with existing skills
- Suggest installation command: `pip install {dependencies}`
- If dependencies missing, warn but don't block
9. **Register Skill** - Update registry
- Call registry.update with skill manifest path
- Verify skill appears in registry with artifact_metadata
- Confirm skill is discoverable via artifact types
10. **Verify Discoverability** - Final validation
- Check skill exists in registry/skills.json
- Verify artifact_metadata is complete
- Test that agent.compose can discover skill by artifact type
- Confirm artifact flow is complete (from Step 3)
## Conventions
**Naming:**
- Skills: `domain.action` (e.g., `api.validate`, `workflow.compose`)
- Use lowercase with dots
- Action should be imperative verb
**Structure:**
```
skills/domain.action/
├── skill.yaml (definition)
├── domain_action.py (implementation)
├── test_domain_action.py (tests)
└── SKILL.md (docs)
```
**Artifact Metadata:**
- Always define what the skill produces/consumes
- Use registered artifact types from meta.artifact
- Include schemas when applicable
**Implementation:**
- Follow Python best practices
- Include proper error handling
- Add logging
- CLI with argparse
- JSON output for results
## Quality Standards
- ✅ Follows Betty conventions (domain.action naming, proper structure)
- ✅ All required fields in skill.yaml: name, version, description, inputs, outputs, status
- ✅ Artifact types VALIDATED against registry before generation
- ✅ Artifact flow ANALYZED (producers/consumers identified)
- ✅ Production-quality code with type hints and comprehensive docstrings
- ✅ Proper CLI generated from skill.yaml inputs (no TODO placeholders)
- ✅ JSON output structure matches skill.yaml outputs
- ✅ Dependencies VALIDATED and installation command provided
- ✅ Comprehensive test template with fixtures
- ✅ SKILL.md with markdown header, examples, and artifact flow
- ✅ Registered in registry with complete artifact_metadata
- ✅ Passes Pydantic validation
- ✅ Discoverable via agent.compose by artifact type
## Error Handling & Recovery
**Artifact Type Not Found:**
- Search registry/artifact_types.json for similar names
- Check for singular/plural variants (data-model vs logical-data-model)
- Suggest alternatives: "Did you mean: 'data-flow-diagrams', 'dataflow-diagram'?"
- ASK USER to confirm or provide correct type
- DO NOT proceed with invalid artifact types
**File Pattern Mismatch:**
- Use exact file_pattern from registry
- Warn user if description specifies different pattern
- Document correct pattern in skill.yaml comments
**Schema File Missing:**
- Warn: "Schema file schemas/artifacts/X-schema.json not found"
- Ask if schema should be: (a) created, (b) omitted, (c) ignored
- Continue with warning but don't block skill creation
**Registry Update Fails:**
- Report specific error from registry.update
- Check if it's version conflict or validation issue
- Provide manual registration command as fallback
- Log issue for framework team
**Duplicate Skill Name:**
- Check existing version in registry
- Offer to: (a) version bump, (b) rename skill, (c) cancel
- Require explicit user confirmation before overwriting
Remember: You're creating building blocks for agents. Make skills
composable, well-documented, and easy to use. ALWAYS validate artifact
types before generating skill.yaml!

791
agents/meta.skill/meta_skill.py Executable file
View File

@@ -0,0 +1,791 @@
#!/usr/bin/env python3
"""
meta.skill - Skill Creator
Creates complete, functional skills from natural language descriptions.
Generates skill.yaml, implementation stub, tests, and documentation.
"""
import json
import yaml
import sys
import os
import re
from pathlib import Path
from typing import Dict, List, Any, Optional
from datetime import datetime
# Add parent directory to path for imports
parent_dir = str(Path(__file__).parent.parent.parent)
sys.path.insert(0, parent_dir)
from betty.traceability import get_tracer, RequirementInfo
# Import artifact validation from artifact.define skill
try:
import importlib.util
artifact_define_path = Path(__file__).parent.parent.parent / "skills" / "artifact.define" / "artifact_define.py"
spec = importlib.util.spec_from_file_location("artifact_define", artifact_define_path)
artifact_define_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(artifact_define_module)
validate_artifact_type = artifact_define_module.validate_artifact_type
KNOWN_ARTIFACT_TYPES = artifact_define_module.KNOWN_ARTIFACT_TYPES
ARTIFACT_VALIDATION_AVAILABLE = True
except Exception as e:
ARTIFACT_VALIDATION_AVAILABLE = False
class SkillCreator:
"""Creates skills from natural language descriptions"""
def __init__(self, base_dir: str = "."):
"""Initialize with base directory"""
self.base_dir = Path(base_dir)
self.skills_dir = self.base_dir / "skills"
self.registry_path = self.base_dir / "registry" / "skills.json"
def parse_description(self, description_path: str) -> Dict[str, Any]:
"""
Parse skill description from Markdown or JSON file
Args:
description_path: Path to skill_description.md or .json
Returns:
Parsed description with skill metadata
"""
path = Path(description_path)
if not path.exists():
raise FileNotFoundError(f"Description not found: {description_path}")
# Handle JSON format
if path.suffix == ".json":
with open(path) as f:
return json.load(f)
# Handle Markdown format
with open(path) as f:
content = f.read()
# Parse Markdown sections
description = {
"name": "",
"purpose": "",
"inputs": [],
"outputs": [],
"permissions": [],
"implementation_notes": "",
"examples": [],
"artifact_produces": [],
"artifact_consumes": []
}
current_section = None
for line in content.split('\n'):
line_stripped = line.strip()
# Section headers
if line_stripped.startswith('# Name:'):
description["name"] = line_stripped.replace('# Name:', '').strip()
elif line_stripped.startswith('# Purpose:'):
current_section = "purpose"
elif line_stripped.startswith('# Inputs:'):
current_section = "inputs"
elif line_stripped.startswith('# Outputs:'):
current_section = "outputs"
elif line_stripped.startswith('# Permissions:'):
current_section = "permissions"
elif line_stripped.startswith('# Implementation Notes:'):
current_section = "implementation_notes"
elif line_stripped.startswith('# Examples:'):
current_section = "examples"
elif line_stripped.startswith('# Produces Artifacts:'):
current_section = "artifact_produces"
elif line_stripped.startswith('# Consumes Artifacts:'):
current_section = "artifact_consumes"
elif line_stripped and not line_stripped.startswith('#'):
# Content for current section
if current_section == "purpose":
description["purpose"] += line_stripped + " "
elif current_section == "implementation_notes":
description["implementation_notes"] += line_stripped + " "
elif current_section in ["inputs", "outputs", "permissions",
"examples", "artifact_produces",
"artifact_consumes"] and line_stripped.startswith('-'):
description[current_section].append(line_stripped[1:].strip())
description["purpose"] = description["purpose"].strip()
description["implementation_notes"] = description["implementation_notes"].strip()
return description
def generate_skill_yaml(self, skill_desc: Dict[str, Any]) -> str:
"""
Generate skill.yaml content
Args:
skill_desc: Parsed skill description
Returns:
YAML content as string
"""
skill_name = skill_desc["name"]
# Convert skill.name to skill_name format for handler
handler_name = skill_name.replace('.', '_') + ".py"
skill_def = {
"name": skill_name,
"version": "0.1.0",
"description": skill_desc["purpose"],
"inputs": skill_desc.get("inputs", []),
"outputs": skill_desc.get("outputs", []),
"status": "active",
"permissions": skill_desc.get("permissions", ["filesystem:read"]),
"entrypoints": [
{
"command": f"/{skill_name.replace('.', '/')}",
"handler": handler_name,
"runtime": "python",
"description": skill_desc["purpose"][:100]
}
]
}
# Add artifact metadata if specified
if skill_desc.get("artifact_produces") or skill_desc.get("artifact_consumes"):
artifact_metadata = {}
if skill_desc.get("artifact_produces"):
artifact_metadata["produces"] = [
{"type": art_type} for art_type in skill_desc["artifact_produces"]
]
if skill_desc.get("artifact_consumes"):
artifact_metadata["consumes"] = [
{"type": art_type, "required": True}
for art_type in skill_desc["artifact_consumes"]
]
skill_def["artifact_metadata"] = artifact_metadata
return yaml.dump(skill_def, default_flow_style=False, sort_keys=False)
def generate_implementation(self, skill_desc: Dict[str, Any]) -> str:
"""
Generate Python implementation stub
Args:
skill_desc: Parsed skill description
Returns:
Python code as string
"""
skill_name = skill_desc["name"]
module_name = skill_name.replace('.', '_')
class_name = ''.join(word.capitalize() for word in skill_name.split('.'))
implementation = f'''#!/usr/bin/env python3
"""
{skill_name} - {skill_desc["purpose"]}
Generated by meta.skill with Betty Framework certification
"""
import os
import sys
import json
import yaml
from pathlib import Path
from typing import Dict, List, Any, Optional
# Add parent directory to path for imports
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
from betty.config import BASE_DIR
from betty.logging_utils import setup_logger
from betty.certification import certified_skill
logger = setup_logger(__name__)
class {class_name}:
"""
{skill_desc["purpose"]}
"""
def __init__(self, base_dir: str = BASE_DIR):
"""Initialize skill"""
self.base_dir = Path(base_dir)
@certified_skill("{skill_name}")
def execute(self'''
# Add input parameters
if skill_desc.get("inputs"):
for inp in skill_desc["inputs"]:
# Sanitize parameter names - remove special characters, keep only alphanumeric and underscores
param_name = ''.join(c if c.isalnum() or c in ' -_' else '' for c in inp.lower())
param_name = param_name.replace(' ', '_').replace('-', '_')
implementation += f', {param_name}: Optional[str] = None'
implementation += f''') -> Dict[str, Any]:
"""
Execute the skill
Returns:
Dict with execution results
"""
try:
logger.info("Executing {skill_name}...")
# TODO: Implement skill logic here
'''
if skill_desc.get("implementation_notes"):
implementation += f'''
# Implementation notes:
# {skill_desc["implementation_notes"]}
'''
# Escape the purpose string for Python string literal
escaped_purpose = skill_desc['purpose'].replace('"', '\\"')
implementation += f'''
# Placeholder implementation
result = {{
"ok": True,
"status": "success",
"message": "Skill executed successfully"
}}
logger.info("Skill completed successfully")
return result
except Exception as e:
logger.error(f"Error executing skill: {{e}}")
return {{
"ok": False,
"status": "failed",
"error": str(e)
}}
def main():
"""CLI entry point"""
import argparse
parser = argparse.ArgumentParser(
description="{escaped_purpose}"
)
'''
# Add CLI arguments for inputs
if skill_desc.get("inputs"):
for inp in skill_desc["inputs"]:
# Sanitize parameter names - remove special characters
param_name = ''.join(c if c.isalnum() or c in ' -_' else '' for c in inp.lower())
param_name = param_name.replace(' ', '_').replace('-', '_')
implementation += f'''
parser.add_argument(
"--{param_name.replace('_', '-')}",
help="{inp}"
)'''
implementation += f'''
parser.add_argument(
"--output-format",
choices=["json", "yaml"],
default="json",
help="Output format"
)
args = parser.parse_args()
# Create skill instance
skill = {class_name}()
# Execute skill
result = skill.execute('''
if skill_desc.get("inputs"):
for inp in skill_desc["inputs"]:
# Sanitize parameter names - remove special characters
param_name = ''.join(c if c.isalnum() or c in ' -_' else '' for c in inp.lower())
param_name = param_name.replace(' ', '_').replace('-', '_')
implementation += f'''
{param_name}=args.{param_name},'''
implementation += '''
)
# Output result
if args.output_format == "json":
print(json.dumps(result, indent=2))
else:
print(yaml.dump(result, default_flow_style=False))
# Exit with appropriate code
sys.exit(0 if result.get("ok") else 1)
if __name__ == "__main__":
main()
'''
return implementation
def generate_tests(self, skill_desc: Dict[str, Any]) -> str:
"""
Generate test template
Args:
skill_desc: Parsed skill description
Returns:
Python test code as string
"""
skill_name = skill_desc["name"]
module_name = skill_name.replace('.', '_')
class_name = ''.join(word.capitalize() for word in skill_name.split('.'))
tests = f'''#!/usr/bin/env python3
"""
Tests for {skill_name}
Generated by meta.skill
"""
import pytest
import sys
import os
from pathlib import Path
# Add parent directory to path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
from skills.{skill_name.replace('.', '_')} import {module_name}
class Test{class_name}:
"""Tests for {class_name}"""
def setup_method(self):
"""Setup test fixtures"""
self.skill = {module_name}.{class_name}()
def test_initialization(self):
"""Test skill initializes correctly"""
assert self.skill is not None
assert self.skill.base_dir is not None
def test_execute_basic(self):
"""Test basic execution"""
result = self.skill.execute()
assert result is not None
assert "ok" in result
assert "status" in result
def test_execute_success(self):
"""Test successful execution"""
result = self.skill.execute()
assert result["ok"] is True
assert result["status"] == "success"
# TODO: Add more specific tests based on skill functionality
def test_cli_help(capsys):
"""Test CLI help message"""
sys.argv = ["{module_name}.py", "--help"]
with pytest.raises(SystemExit) as exc_info:
{module_name}.main()
assert exc_info.value.code == 0
captured = capsys.readouterr()
assert "{skill_desc['purpose'][:50]}" in captured.out
if __name__ == "__main__":
pytest.main([__file__, "-v"])
'''
return tests
def generate_skill_md(self, skill_desc: Dict[str, Any]) -> str:
"""
Generate SKILL.md
Args:
skill_desc: Parsed skill description
Returns:
Markdown content as string
"""
skill_name = skill_desc["name"]
readme = f'''# {skill_name}
{skill_desc["purpose"]}
## Overview
**Purpose:** {skill_desc["purpose"]}
**Command:** `/{skill_name.replace('.', '/')}`
## Usage
### Basic Usage
```bash
python3 skills/{skill_name.replace('.', '/')}/{skill_name.replace('.', '_')}.py
```
### With Arguments
```bash
python3 skills/{skill_name.replace('.', '/')}/{skill_name.replace('.', '_')}.py \\
'''
if skill_desc.get("inputs"):
for inp in skill_desc["inputs"]:
param_name = inp.lower().replace(' ', '_').replace('-', '-')
readme += f' --{param_name} "value" \\\n'
readme += ' --output-format json\n```\n\n'
if skill_desc.get("inputs"):
readme += "## Inputs\n\n"
for inp in skill_desc["inputs"]:
readme += f"- **{inp}**\n"
readme += "\n"
if skill_desc.get("outputs"):
readme += "## Outputs\n\n"
for out in skill_desc["outputs"]:
readme += f"- **{out}**\n"
readme += "\n"
if skill_desc.get("artifact_consumes") or skill_desc.get("artifact_produces"):
readme += "## Artifact Metadata\n\n"
if skill_desc.get("artifact_consumes"):
readme += "### Consumes\n\n"
for art in skill_desc["artifact_consumes"]:
readme += f"- `{art}`\n"
readme += "\n"
if skill_desc.get("artifact_produces"):
readme += "### Produces\n\n"
for art in skill_desc["artifact_produces"]:
readme += f"- `{art}`\n"
readme += "\n"
if skill_desc.get("examples"):
readme += "## Examples\n\n"
for example in skill_desc["examples"]:
readme += f"- {example}\n"
readme += "\n"
if skill_desc.get("permissions"):
readme += "## Permissions\n\n"
for perm in skill_desc["permissions"]:
readme += f"- `{perm}`\n"
readme += "\n"
if skill_desc.get("implementation_notes"):
readme += "## Implementation Notes\n\n"
readme += f"{skill_desc['implementation_notes']}\n\n"
readme += f'''## Integration
This skill can be used in agents by including it in `skills_available`:
```yaml
name: my.agent
skills_available:
- {skill_name}
```
## Testing
Run tests with:
```bash
pytest skills/{skill_name.replace('.', '/')}/test_{skill_name.replace('.', '_')}.py -v
```
## Created By
This skill was generated by **meta.skill**, the skill creator meta-agent.
---
*Part of the Betty Framework*
'''
return readme
def validate_artifacts(self, skill_desc: Dict[str, Any]) -> List[str]:
"""
Validate that artifact types exist in the known registry.
Args:
skill_desc: Parsed skill description
Returns:
List of warning messages
"""
warnings = []
if not ARTIFACT_VALIDATION_AVAILABLE:
warnings.append(
"Artifact validation skipped: artifact.define skill not available"
)
return warnings
# Validate produced artifacts
for artifact_type in skill_desc.get("artifact_produces", []):
is_valid, warning = validate_artifact_type(artifact_type)
if not is_valid and warning:
warnings.append(f"Produces: {warning}")
# Validate consumed artifacts
for artifact_type in skill_desc.get("artifact_consumes", []):
is_valid, warning = validate_artifact_type(artifact_type)
if not is_valid and warning:
warnings.append(f"Consumes: {warning}")
return warnings
def create_skill(
self,
description_path: str,
output_dir: Optional[str] = None,
requirement: Optional[RequirementInfo] = None
) -> Dict[str, Any]:
"""
Create a complete skill from description
Args:
description_path: Path to skill description file
output_dir: Output directory (default: skills/{name}/)
requirement: Optional requirement information for traceability
Returns:
Summary of created files
"""
# Parse description
skill_desc = self.parse_description(description_path)
skill_name = skill_desc["name"]
if not skill_name:
raise ValueError("Skill name is required")
# Validate name format (domain.action)
if not re.match(r'^[a-z0-9]+\.[a-z0-9]+$', skill_name):
raise ValueError(
f"Skill name must be in domain.action format: {skill_name}"
)
# Validate artifact types
artifact_warnings = self.validate_artifacts(skill_desc)
if artifact_warnings:
print("\n⚠️ Artifact Validation Warnings:")
for warning in artifact_warnings:
print(f" {warning}")
print()
# Determine output directory
if not output_dir:
output_dir = f"skills/{skill_name}"
output_path = Path(output_dir)
output_path.mkdir(parents=True, exist_ok=True)
result = {
"skill_name": skill_name,
"created_files": [],
"errors": [],
"artifact_warnings": artifact_warnings
}
# Generate and save skill.yaml
skill_yaml_content = self.generate_skill_yaml(skill_desc)
skill_yaml_path = output_path / "skill.yaml"
with open(skill_yaml_path, 'w') as f:
f.write(skill_yaml_content)
result["created_files"].append(str(skill_yaml_path))
# Generate and save implementation
impl_content = self.generate_implementation(skill_desc)
impl_path = output_path / f"{skill_name.replace('.', '_')}.py"
with open(impl_path, 'w') as f:
f.write(impl_content)
os.chmod(impl_path, 0o755) # Make executable
result["created_files"].append(str(impl_path))
# Generate and save tests
tests_content = self.generate_tests(skill_desc)
tests_path = output_path / f"test_{skill_name.replace('.', '_')}.py"
with open(tests_path, 'w') as f:
f.write(tests_content)
result["created_files"].append(str(tests_path))
# Generate and save SKILL.md
skill_md_content = self.generate_skill_md(skill_desc)
skill_md_path = output_path / "SKILL.md"
with open(skill_md_path, 'w') as f:
f.write(skill_md_content)
result["created_files"].append(str(skill_md_path))
# Log traceability if requirement provided
trace_id = None
if requirement:
try:
tracer = get_tracer()
trace_id = tracer.log_creation(
component_id=skill_name,
component_name=skill_name.replace(".", " ").title(),
component_type="skill",
component_version="0.1.0",
component_file_path=str(skill_yaml_path),
input_source_path=description_path,
created_by_tool="meta.skill",
created_by_version="0.1.0",
requirement=requirement,
tags=["skill", "auto-generated"],
project="Betty Framework"
)
# Log validation check
validation_details = {
"checks_performed": [
{"name": "skill_structure", "status": "passed"},
{"name": "artifact_metadata", "status": "passed"}
]
}
# Check for artifact metadata
if skill_desc.get("artifact_produces") or skill_desc.get("artifact_consumes"):
validation_details["checks_performed"].append({
"name": "artifact_metadata_completeness",
"status": "passed",
"message": f"Produces: {len(skill_desc.get('artifact_produces', []))}, Consumes: {len(skill_desc.get('artifact_consumes', []))}"
})
tracer.log_verification(
component_id=skill_name,
check_type="validation",
tool="meta.skill",
result="passed",
details=validation_details
)
result["trace_id"] = trace_id
except Exception as e:
print(f"⚠️ Warning: Could not log traceability: {e}")
return result
def main():
"""CLI entry point"""
import argparse
parser = argparse.ArgumentParser(
description="meta.skill - Create skills from descriptions"
)
parser.add_argument(
"description",
help="Path to skill description file (.md or .json)"
)
parser.add_argument(
"-o", "--output",
help="Output directory (default: skills/{name}/)"
)
# Traceability arguments
parser.add_argument(
"--requirement-id",
help="Requirement identifier (e.g., REQ-2025-001)"
)
parser.add_argument(
"--requirement-description",
help="What this skill accomplishes"
)
parser.add_argument(
"--requirement-source",
help="Source document"
)
parser.add_argument(
"--issue-id",
help="Issue tracking ID (e.g., JIRA-123)"
)
parser.add_argument(
"--requested-by",
help="Who requested this"
)
parser.add_argument(
"--rationale",
help="Why this is needed"
)
args = parser.parse_args()
# Create requirement info if provided
requirement = None
if args.requirement_id and args.requirement_description:
requirement = RequirementInfo(
id=args.requirement_id,
description=args.requirement_description,
source=args.requirement_source,
issue_id=args.issue_id,
requested_by=args.requested_by,
rationale=args.rationale
)
creator = SkillCreator()
print(f"🛠️ meta.skill - Creating skill from {args.description}")
try:
result = creator.create_skill(
args.description,
output_dir=args.output,
requirement=requirement
)
print(f"\n✨ Skill '{result['skill_name']}' created successfully!\n")
if result["created_files"]:
print("📄 Created files:")
for file in result["created_files"]:
print(f" - {file}")
if result["errors"]:
print("\n⚠️ Warnings:")
for error in result["errors"]:
print(f" - {error}")
if result.get("trace_id"):
print(f"\n📝 Traceability: {result['trace_id']}")
print(f" View trace: python3 betty/trace_cli.py show {result['skill_name']}")
print(f"\n✅ Skill '{result['skill_name']}' is ready to use")
print(" Add to agent skills_available to use it.")
except Exception as e:
print(f"\n❌ Error creating skill: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()