Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 18:26:08 +08:00
commit 8f22ddf339
295 changed files with 59710 additions and 0 deletions

View File

@@ -0,0 +1,82 @@
# test.example
A simple test skill for validating the meta.create orchestrator workflow
## Overview
**Purpose:** A simple test skill for validating the meta.create orchestrator workflow
**Command:** `/test/example`
## Usage
### Basic Usage
```bash
python3 skills/test/example/test_example.py
```
### With Arguments
```bash
python3 skills/test/example/test_example.py \
--input_data_(string)_-_test_input_data "value" \
--output-format json
```
## Inputs
- **input_data (string) - Test input data**
## Outputs
- **output_result (string) - Processed result**
## Artifact Metadata
### Consumes
- `test.input`
### Produces
- `test.result`
## Examples
- Process test data and produce test results
- Validate meta.create orchestration workflow
## Permissions
- `filesystem:read`
## Implementation Notes
This is a minimal test skill to verify that meta.create can properly orchestrate the creation of skills, check for duplicates, and validate compatibility.
## Integration
This skill can be used in agents by including it in `skills_available`:
```yaml
name: my.agent
skills_available:
- test.example
```
## Testing
Run tests with:
```bash
pytest skills/test/example/test_test_example.py -v
```
## Created By
This skill was generated by **meta.skill**, the skill creator meta-agent.
---
*Part of the Betty Framework*

View File

@@ -0,0 +1 @@
# Auto-generated package initializer for skills.

View File

@@ -0,0 +1,21 @@
name: test.example
version: 0.1.0
description: A simple test skill for validating the meta.create orchestrator workflow
inputs:
- input_data (string) - Test input data
outputs:
- output_result (string) - Processed result
status: active
permissions:
- filesystem:read
entrypoints:
- command: /test/example
handler: test_example.py
runtime: python
description: A simple test skill for validating the meta.create orchestrator workflow
artifact_metadata:
produces:
- type: test.result
consumes:
- type: test.input
required: true

View File

@@ -0,0 +1,107 @@
#!/usr/bin/env python3
"""
test.example - A simple test skill for validating the meta.create orchestrator workflow
Generated by meta.skill with Betty Framework certification
"""
import os
import sys
import json
import yaml
from pathlib import Path
from typing import Dict, List, Any, Optional
from betty.config import BASE_DIR
from betty.logging_utils import setup_logger
from betty.certification import certified_skill
logger = setup_logger(__name__)
class TestExample:
"""
A simple test skill for validating the meta.create orchestrator workflow
"""
def __init__(self, base_dir: str = BASE_DIR):
"""Initialize skill"""
self.base_dir = Path(base_dir)
@certified_skill("test.example")
def execute(self, input_data_string___test_input_data: Optional[str] = None) -> Dict[str, Any]:
"""
Execute the skill
Returns:
Dict with execution results
"""
try:
logger.info("Executing test.example...")
# TODO: Implement skill logic here
# Implementation notes:
# This is a minimal test skill to verify that meta.create can properly orchestrate the creation of skills, check for duplicates, and validate compatibility.
# Placeholder implementation
result = {
"ok": True,
"status": "success",
"message": "Skill executed successfully"
}
logger.info("Skill completed successfully")
return result
except Exception as e:
logger.error(f"Error executing skill: {e}")
return {
"ok": False,
"status": "failed",
"error": str(e)
}
def main():
"""CLI entry point"""
import argparse
parser = argparse.ArgumentParser(
description="A simple test skill for validating the meta.create orchestrator workflow"
)
parser.add_argument(
"--input-data-string---test-input-data",
help="input_data (string) - Test input data"
)
parser.add_argument(
"--output-format",
choices=["json", "yaml"],
default="json",
help="Output format"
)
args = parser.parse_args()
# Create skill instance
skill = TestExample()
# Execute skill
result = skill.execute(
input_data_string___test_input_data=args.input_data_string___test_input_data,
)
# Output result
if args.output_format == "json":
print(json.dumps(result, indent=2))
else:
print(yaml.dump(result, default_flow_style=False))
# Exit with appropriate code
sys.exit(0 if result.get("ok") else 1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,62 @@
#!/usr/bin/env python3
"""
Tests for test.example
Generated by meta.skill
"""
import pytest
import sys
import os
from pathlib import Path
# Add parent directory to path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
from skills.test_example import test_example
class TestTestExample:
"""Tests for TestExample"""
def setup_method(self):
"""Setup test fixtures"""
self.skill = test_example.TestExample()
def test_initialization(self):
"""Test skill initializes correctly"""
assert self.skill is not None
assert self.skill.base_dir is not None
def test_execute_basic(self):
"""Test basic execution"""
result = self.skill.execute()
assert result is not None
assert "ok" in result
assert "status" in result
def test_execute_success(self):
"""Test successful execution"""
result = self.skill.execute()
assert result["ok"] is True
assert result["status"] == "success"
# TODO: Add more specific tests based on skill functionality
def test_cli_help(capsys):
"""Test CLI help message"""
sys.argv = ["test_example.py", "--help"]
with pytest.raises(SystemExit) as exc_info:
test_example.main()
assert exc_info.value.code == 0
captured = capsys.readouterr()
assert "A simple test skill for validating the meta.create" in captured.out
if __name__ == "__main__":
pytest.main([__file__, "-v"])