Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 18:26:08 +08:00
commit 8f22ddf339
295 changed files with 59710 additions and 0 deletions

83
skills/api.test/README.md Normal file
View File

@@ -0,0 +1,83 @@
# api.test
Test REST API endpoints by executing HTTP requests and validating responses against expected outcomes
## Overview
**Purpose:** Test REST API endpoints by executing HTTP requests and validating responses against expected outcomes
**Command:** `/api/test`
## Usage
### Basic Usage
```bash
python3 skills/api/test/api_test.py
```
### With Arguments
```bash
python3 skills/api/test/api_test.py \
--api_spec_path "value" \
--base_url "value" \
--test_scenarios_path_(optional) "value" \
--auth_config_path_(optional) "value" \
--output-format json
```
## Inputs
- **api_spec_path**
- **base_url**
- **test_scenarios_path (optional)**
- **auth_config_path (optional)**
## Outputs
- **test_results.json**
- **test_report.html**
## Artifact Metadata
### Produces
- `test-result`
- `test-report`
## Permissions
- `network:http`
- `filesystem:read`
- `filesystem:write`
## Implementation Notes
Support multiple HTTP methods: GET, POST, PUT, PATCH, DELETE, HEAD, OPTIONS Test scenarios should validate: - Response status codes - Response headers - Response body structure and content - Response time/performance - Authentication/authorization - Error handling Features: - Load test scenarios from OpenAPI/Swagger specs - Support various authentication methods (Bearer, Basic, API Key, OAuth2) - Execute tests in sequence or parallel - Generate detailed HTML reports with pass/fail visualization - Support environment variables for configuration - Retry failed tests with exponential backoff - Collect performance metrics (response time, throughput) Output should include: - Total tests run - Passed/failed counts - Individual test results with request/response details - Performance statistics - Coverage metrics (% of endpoints tested)
## Integration
This skill can be used in agents by including it in `skills_available`:
```yaml
name: my.agent
skills_available:
- api.test
```
## Testing
Run tests with:
```bash
pytest skills/api/test/test_api_test.py -v
```
## Created By
This skill was generated by **meta.skill**, the skill creator meta-agent.
---
*Part of the Betty Framework*

View File

@@ -0,0 +1 @@
# Auto-generated package initializer for skills.

120
skills/api.test/api_test.py Executable file
View File

@@ -0,0 +1,120 @@
#!/usr/bin/env python3
"""
api.test - Test REST API endpoints by executing HTTP requests and validating responses against expected outcomes
Generated by meta.skill
"""
import os
import sys
import json
import yaml
from pathlib import Path
from typing import Dict, List, Any, Optional
from betty.config import BASE_DIR
from betty.logging_utils import setup_logger
logger = setup_logger(__name__)
class ApiTest:
"""
Test REST API endpoints by executing HTTP requests and validating responses against expected outcomes
"""
def __init__(self, base_dir: str = BASE_DIR):
"""Initialize skill"""
self.base_dir = Path(base_dir)
def execute(self, api_spec_path: Optional[str] = None, base_url: Optional[str] = None, test_scenarios_path_optional: Optional[str] = None, auth_config_path_optional: Optional[str] = None) -> Dict[str, Any]:
"""
Execute the skill
Returns:
Dict with execution results
"""
try:
logger.info("Executing api.test...")
# TODO: Implement skill logic here
# Implementation notes:
# Support multiple HTTP methods: GET, POST, PUT, PATCH, DELETE, HEAD, OPTIONS Test scenarios should validate: - Response status codes - Response headers - Response body structure and content - Response time/performance - Authentication/authorization - Error handling Features: - Load test scenarios from OpenAPI/Swagger specs - Support various authentication methods (Bearer, Basic, API Key, OAuth2) - Execute tests in sequence or parallel - Generate detailed HTML reports with pass/fail visualization - Support environment variables for configuration - Retry failed tests with exponential backoff - Collect performance metrics (response time, throughput) Output should include: - Total tests run - Passed/failed counts - Individual test results with request/response details - Performance statistics - Coverage metrics (% of endpoints tested)
# Placeholder implementation
result = {
"ok": True,
"status": "success",
"message": "Skill executed successfully"
}
logger.info("Skill completed successfully")
return result
except Exception as e:
logger.error(f"Error executing skill: {e}")
return {
"ok": False,
"status": "failed",
"error": str(e)
}
def main():
"""CLI entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Test REST API endpoints by executing HTTP requests and validating responses against expected outcomes"
)
parser.add_argument(
"--api-spec-path",
help="api_spec_path"
)
parser.add_argument(
"--base-url",
help="base_url"
)
parser.add_argument(
"--test-scenarios-path-optional",
help="test_scenarios_path (optional)"
)
parser.add_argument(
"--auth-config-path-optional",
help="auth_config_path (optional)"
)
parser.add_argument(
"--output-format",
choices=["json", "yaml"],
default="json",
help="Output format"
)
args = parser.parse_args()
# Create skill instance
skill = ApiTest()
# Execute skill
result = skill.execute(
api_spec_path=args.api_spec_path,
base_url=args.base_url,
test_scenarios_path_optional=args.test_scenarios_path_optional,
auth_config_path_optional=args.auth_config_path_optional,
)
# Output result
if args.output_format == "json":
print(json.dumps(result, indent=2))
else:
print(yaml.dump(result, default_flow_style=False))
# Exit with appropriate code
sys.exit(0 if result.get("ok") else 1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,27 @@
name: api.test
version: 0.1.0
description: Test REST API endpoints by executing HTTP requests and validating responses
against expected outcomes
inputs:
- api_spec_path
- base_url
- test_scenarios_path (optional)
- auth_config_path (optional)
outputs:
- test_results.json
- test_report.html
status: active
permissions:
- network:http
- filesystem:read
- filesystem:write
entrypoints:
- command: /api/test
handler: api_test.py
runtime: python
description: Test REST API endpoints by executing HTTP requests and validating responses
against expected outcome
artifact_metadata:
produces:
- type: test-result
- type: test-report

View File

@@ -0,0 +1,62 @@
#!/usr/bin/env python3
"""
Tests for api.test
Generated by meta.skill
"""
import pytest
import sys
import os
from pathlib import Path
# Add parent directory to path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
from skills.api_test import api_test
class TestApiTest:
"""Tests for ApiTest"""
def setup_method(self):
"""Setup test fixtures"""
self.skill = api_test.ApiTest()
def test_initialization(self):
"""Test skill initializes correctly"""
assert self.skill is not None
assert self.skill.base_dir is not None
def test_execute_basic(self):
"""Test basic execution"""
result = self.skill.execute()
assert result is not None
assert "ok" in result
assert "status" in result
def test_execute_success(self):
"""Test successful execution"""
result = self.skill.execute()
assert result["ok"] is True
assert result["status"] == "success"
# TODO: Add more specific tests based on skill functionality
def test_cli_help(capsys):
"""Test CLI help message"""
sys.argv = ["api_test.py", "--help"]
with pytest.raises(SystemExit) as exc_info:
api_test.main()
assert exc_info.value.code == 0
captured = capsys.readouterr()
assert "Test REST API endpoints by executing HTTP requests" in captured.out
if __name__ == "__main__":
pytest.main([__file__, "-v"])