commit 097db974e4613a2ca000c2857cd42f25b8bad192 Author: Zhongwei Li Date: Sun Nov 30 08:42:37 2025 +0800 Initial commit diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json new file mode 100644 index 0000000..3e57052 --- /dev/null +++ b/.claude-plugin/plugin.json @@ -0,0 +1,12 @@ +{ + "name": "mcp-skill-creator", + "description": "Create workflow-optimized skills from MCP servers. Transform MCP tools into personalized skills with progressive disclosure, parallel execution, and embedded user preferences. Based on Anthropic's MCP + Code Execution best practices.", + "version": "0.0.0-2025.11.28", + "author": { + "name": "Meta-Skills Collection", + "email": "onlrrr@gmail.com" + }, + "skills": [ + "./skills/mcp-skill-creator" + ] +} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..3f5b0da --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# mcp-skill-creator + +Create workflow-optimized skills from MCP servers. Transform MCP tools into personalized skills with progressive disclosure, parallel execution, and embedded user preferences. Based on Anthropic's MCP + Code Execution best practices. diff --git a/plugin.lock.json b/plugin.lock.json new file mode 100644 index 0000000..1ff0619 --- /dev/null +++ b/plugin.lock.json @@ -0,0 +1,68 @@ +{ + "$schema": "internal://schemas/plugin.lock.v1.json", + "pluginId": "gh:nemori-ai/skills:mcp-skill-creator", + "normalized": { + "repo": null, + "ref": "refs/tags/v20251128.0", + "commit": "4fe9a796199a264d199fc49b31bce34dbec94653", + "treeHash": "f11b997af2799468ec157fe6e16c4c56e08a75c3c7781d3822ed64858bcbe35a", + "generatedAt": "2025-11-28T10:27:18.244532Z", + "toolVersion": "publish_plugins.py@0.2.0" + }, + "origin": { + "remote": "git@github.com:zhongweili/42plugin-data.git", + "branch": "master", + "commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390", + "repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data" + }, + "manifest": { + "name": "mcp-skill-creator", + "description": "Create workflow-optimized skills from MCP servers. Transform MCP tools into personalized skills with progressive disclosure, parallel execution, and embedded user preferences. Based on Anthropic's MCP + Code Execution best practices." + }, + "content": { + "files": [ + { + "path": "README.md", + "sha256": "a7e9a24e4a055c659a0857d8437a38fc299b73344659633b61087bbdef79c02b" + }, + { + "path": ".claude-plugin/plugin.json", + "sha256": "88b82153dbc5f6516dca023720700ef52a38fc866a080fe25dfe2e32abe8b068" + }, + { + "path": "skills/mcp-skill-creator/SKILL.md", + "sha256": "f1a46837885844641245978d6036c8663ade8deb914b8a5d3366802ddacb039f" + }, + { + "path": "skills/mcp-skill-creator/LICENSE.txt", + "sha256": "fd6432f2e4aef05eeb89a06d97a65b90cd5b012555f7656d28b7f30b509f0739" + }, + { + "path": "skills/mcp-skill-creator/references/example-config.json", + "sha256": "49534d18c7ab8c21985018adc9e94532b0991972842b51c7a7c599f8bcb7da7f" + }, + { + "path": "skills/mcp-skill-creator/references/quick-start.md", + "sha256": "94fd4497c98074079ecb009d0960b1b071c41086e3735ca0814b307695ec12b1" + }, + { + "path": "skills/mcp-skill-creator/references/mcp-best-practices.md", + "sha256": "47bb07ea3015965efc2ce78857fc6c2007bd2932d9cec567fc9f70087c94fe95" + }, + { + "path": "skills/mcp-skill-creator/scripts/mcp_introspector.py", + "sha256": "f0f15f9933c55a9c3d61e0fe2b54c96dccd487d76603b410ac363d5a56fa0ab6" + }, + { + "path": "skills/mcp-skill-creator/scripts/generate_mcp_wrappers.py", + "sha256": "b6403c14c1c4b6c5da970e0b85a2a6db27fe3b9055e6fd0443fed79fa340bc0d" + } + ], + "dirSha256": "f11b997af2799468ec157fe6e16c4c56e08a75c3c7781d3822ed64858bcbe35a" + }, + "security": { + "scannedAt": null, + "scannerVersion": null, + "flags": [] + } +} \ No newline at end of file diff --git a/skills/mcp-skill-creator/LICENSE.txt b/skills/mcp-skill-creator/LICENSE.txt new file mode 100644 index 0000000..b3f929d --- /dev/null +++ b/skills/mcp-skill-creator/LICENSE.txt @@ -0,0 +1,28 @@ +MIT License + +Copyright (c) 2025 MCP Skill Creator Contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--- + +This skill implements concepts from Anthropic's MCP engineering blog post: +https://www.anthropic.com/engineering/code-execution-with-mcp + +The MCP (Model Context Protocol) is an open standard developed by Anthropic. diff --git a/skills/mcp-skill-creator/SKILL.md b/skills/mcp-skill-creator/SKILL.md new file mode 100644 index 0000000..cea9cd5 --- /dev/null +++ b/skills/mcp-skill-creator/SKILL.md @@ -0,0 +1,672 @@ +--- +name: mcp-skill-creator +description: Meta-skill for creating workflow-optimized skills from MCP servers. Use when users want to create a custom skill that integrates one or more MCP servers into a specialized workflow. The user provides MCP server configurations and describes their work scenario (workflow, preferences, SOPs), and this skill generates a new skill with optimized scripts following Anthropic's MCP + code execution best practices. +--- + +# MCP-Powered Skill Creator + +This meta-skill creates workflow-optimized skills from MCP servers using code execution patterns inspired by [Anthropic's MCP engineering practices](https://www.anthropic.com/engineering/code-execution-with-mcp). + +## Core Concept + +Transform MCP servers into specialized, personalized workflow skills by: + +1. **Progressive Disclosure**: Generate code APIs for MCP tools, loaded on-demand (not all upfront) +2. **Context Efficiency**: Process data in execution environment, minimize token usage +3. **Workflow Optimization**: Combine MCP calls with parallel execution, filtering, control flow +4. **Personalization**: Embed user preferences, SOPs, and domain knowledge into the skill + +## When to Use This Skill + +Use this skill when a user wants to: +- Create a custom skill from one or more MCP servers +- Optimize a workflow that involves multiple MCP tool calls +- Build reusable automation scripts for specific work scenarios +- Capture personal SOPs and preferences into a skill + +## ⚠️ IMPORTANT: Before You Start + +**ALWAYS check and install dependencies FIRST before doing anything else:** + +```bash +python3 -c "import mcp; print('✓ MCP SDK is installed')" 2>/dev/null || pip3 install mcp --break-system-packages +``` + +**Automatic Installation Process:** +1. First, check if MCP SDK is installed +2. If not installed, **automatically install it** using `pip3 install mcp --break-system-packages` +3. Verify installation succeeded before continuing +4. Then proceed with skill creation + +**DO NOT ask the user to manually install dependencies** - you should handle this automatically as part of the skill creation process. + +**Why this matters**: The introspector and generated scripts require the `mcp` package. Installing it upfront ensures a smooth workflow. + +## Skill Creation Process + +Follow these steps to create an MCP-powered skill. This process combines programmatic MCP infrastructure generation with LLM-driven skill design, following skill-creator principles. + +### Overview of Steps + +0. **Prerequisites** - Install required dependencies +1. **Gather Input** - Collect MCP servers and workflow description from user +2. **Generate MCP Infrastructure** - Use scripts to introspect servers and create wrappers (programmatic) +3. **Understand the Workflow** - Analyze user's scenario with concrete examples (LLM-driven) +4. **Plan Skill Contents** - Determine what scripts, references, and guidance to include (LLM-driven) +5. **Implement the Skill** - Write workflow scripts and SKILL.md with embedded preferences (LLM-driven) +6. **Package and Deliver** - Create distributable .skill file + +### Step 0: Prerequisites (Automatic) + +**You should automatically check and install the MCP SDK if needed:** + +```bash +python3 -c "import mcp; print('✓ MCP SDK is installed')" 2>/dev/null || pip3 install mcp --break-system-packages +``` + +**Process:** +1. Check if MCP SDK is already installed +2. If not, install it automatically with `pip3 install mcp --break-system-packages` +3. Verify installation succeeded +4. Inform the user that dependencies have been installed + +**Why needed**: The introspector and generated scripts use the `mcp` package to connect to MCP servers. + +**DO NOT ask the user to manually install** - handle this automatically as part of the workflow. + +### Step 1: Gather Input + +Before starting, collect the following from the user: + +**MCP Server Configurations** (required): +```json +{ + "mcp_servers": [ + { + "name": "server-name", + "command": ["npx", "-y", "@modelcontextprotocol/server-..."] + } + ] +} +``` + +**Workflow Description** (required): +- Clear description of the workflow steps +- Can be numbered list, sequential narrative, or structured steps +- Example: "First I visit the official website, then check ProductHunt, then search Twitter/Reddit, finally generate a report" + +**User Preferences** (optional): +- How they like to work +- What data they prioritize +- Quality standards +- Example: "I prefer quantitative metrics over qualitative descriptions" + +**Standard Operating Procedures** (optional): +- Company-specific practices +- Domain knowledge +- Best practices +- Example: "Always verify information from at least 3 sources" + +If user provides a single configuration file, parse it to extract these components. + +### Step 2: Generate MCP Infrastructure (Programmatic) + +This step generates the MCP client infrastructure and tool discovery utilities. + +#### 2.1 Introspect MCP Servers + +Use `scripts/mcp_introspector.py` to discover available tools: + +```bash +# Create MCP config file in skill directory +echo '{ + "servers": [ + { + "name": "filesystem", + "command": ["npx", "-y", "@modelcontextprotocol/server-filesystem", "/path/to/dir"] + } + ] +}' > /mcp_config.json + +# Run introspection +python scripts/mcp_introspector.py /mcp_config.json introspection.json +``` + +This produces a JSON file with all available tools, their parameters, and descriptions. + +#### 2.2 Generate MCP Client and Tool Discovery + +Use `scripts/generate_mcp_wrappers.py` to create the infrastructure: + +```bash +python scripts/generate_mcp_wrappers.py introspection.json +``` + +This creates: +- `scripts/mcp_client.py` - **Working** MCP client with proper connection management +- `scripts/list_mcp_tools.py` - **Dynamic tool discovery** (Progressive Disclosure) +- `scripts/tools//` - (Optional) Type-safe wrappers for each tool + +**Generated structure**: +``` +/ +├── mcp_config.json # Server configuration +├── scripts/ +│ ├── mcp_client.py # ✅ Working implementation +│ ├── list_mcp_tools.py # 🆕 View tool docs on-demand +│ └── workflows/ # (You'll create these) +│ └── your_workflow.py +``` + +#### 2.3 How to View MCP Tool Documentation + +**Progressive Disclosure** means tools are discovered on-demand, not pre-loaded. Three ways to view docs: + +1. **Dynamic Query** (Recommended): + ```bash + cd /scripts + python list_mcp_tools.py + ``` + Shows all available tools with parameters and descriptions. + +2. **Generate Static Reference**: + ```bash + python list_mcp_tools.py > references/mcp_tools_reference.txt + ``` + Save for offline reference. + +3. **In SKILL.md**: + List only the most commonly used tools, full docs available via method 1 or 2. + +**Key Insight**: You don't need wrapper files for each tool. Just use `call_mcp_tool()` directly: +```python +from mcp_client import call_mcp_tool + +result = await call_mcp_tool('filesystem', 'search_files', { + 'path': '/path', + 'pattern': 'myfile' +}) +``` + +### Step 3: Understand the Workflow (LLM-Driven) + +Now analyze the user's workflow description to understand what this skill needs to accomplish. Similar to skill-creator's Step 1. + +**Ask clarifying questions if needed**: +- "What specific data sources do you use?" +- "How do you handle cases where data is missing?" +- "What does the final output look like?" +- "Are there any steps that must happen sequentially vs in parallel?" + +**Identify workflow characteristics**: +- Which steps are data fetching operations (candidates for parallelization) +- Which steps are data processing (candidates for execution environment filtering) +- Which steps have complex control flow (loops, conditionals, polling) +- What intermediate state needs to be preserved + +**Example Analysis**: + +User says: "I research products by checking the official site, ProductHunt, Twitter, and Reddit, then create a report" + +Analysis: +- 4 data fetch operations (parallel opportunity) +- 1 aggregation step (data filtering opportunity) +- 1 output generation (report creation) +- Sequential dependency: Fetch all → Aggregate → Generate report + +### Step 4: Plan Skill Contents (LLM-Driven) + +Based on the workflow analysis, determine what to include in the skill. Follow skill-creator principles. + +#### 4.1 Decide on Scripts vs Guidance + +**Create scripts when**: +- Same code would be rewritten repeatedly +- Complex workflow with multiple MCP calls +- Parallel execution can improve performance +- Data filtering reduces context usage significantly +- Polling/monitoring patterns + +**Use text guidance when**: +- Workflow varies significantly each time +- Simple single-tool operations +- User needs flexibility in approach +- Context helps more than code + +#### 4.2 Plan Script Structure + +For each workflow script to create, determine: + +**Script purpose**: What part of the workflow does it handle? + +**MCP tools needed**: Which tools from which servers? + +**Optimization patterns**: +- Parallel execution: `asyncio.gather()` for independent fetches +- Data filtering: Process in execution environment before returning +- Control flow: Loops, conditionals, error handling +- State persistence: Save intermediate results to filesystem + +**Parameters**: What inputs does the script need? + +**Output**: What does it return? (Prefer summaries over full data) + +#### 4.3 Plan SKILL.md Structure + +Determine what goes in SKILL.md: + +**Essential**: +- Workflow overview (user's mental model) +- When to use each script +- User preferences (embedded as guidance) +- SOPs (embedded as procedural instructions) +- Available MCP tools (reference, not full docs) + +**Optional references/**: +- Detailed MCP tool catalog +- Complex schemas or API documentation +- Additional examples +- Troubleshooting guides + +### Step 5: Implement the Skill (LLM-Driven) + +Now create the actual skill files. This is where you write code and documentation. + +#### 5.1 Write Workflow Scripts + +For each planned script, create `scripts/workflows/.py`: + +**Follow these patterns from Anthropic's MCP best practices**: + +**Pattern 1: Parallel Fetch + Aggregate** +```python +async def research_pipeline(product_url: str, product_name: str) -> dict: + """Complete research workflow with parallel data gathering""" + + # Parallel fetch from multiple sources + official_task = google_devtools.fetch_page(product_url) + twitter_task = x_com.search_tweets(f'"{product_name}"') + reddit_task = reddit.search_discussions(product_name) + + # Execute concurrently (3x faster than sequential) + official, twitter, reddit = await asyncio.gather( + official_task, twitter_task, reddit_task + ) + + # Filter and aggregate in execution environment + # (keeps raw data out of context) + key_features = extract_features(official, top_n=10) + sentiment = analyze_sentiment([twitter, reddit]) + highlights = extract_highlights(twitter + reddit, top_n=5) + + # Return summary (not full data) + return { + 'key_features': key_features, + 'sentiment': sentiment, + 'highlights': highlights, + 'source_count': len(twitter) + len(reddit) + } +``` + +**Pattern 2: Polling/Monitoring** +```python +async def wait_for_deployment(channel: str, keyword: str, timeout: int = 300): + """Poll Slack channel for deployment completion""" + start = time.time() + + while time.time() - start < timeout: + messages = await slack.get_channel_history(channel, limit=10) + + if any(keyword in m['text'].lower() for m in messages): + return {'status': 'complete', 'message': messages[0]} + + await asyncio.sleep(10) + + return {'status': 'timeout'} +``` + +**Pattern 3: Bulk Processing** +```python +async def sync_contacts(sheet_id: str, crm_object: str): + """Sync contacts from sheet to CRM (privacy-preserving)""" + + # Load data once + contacts = await google_sheets.get_sheet(sheet_id) + + # Filter in execution environment (not in context) + valid = [c for c in contacts if validate_email(c['email'])] + + # Batch update (PII never enters model context) + results = [] + for batch in chunked(valid, batch_size=50): + batch_results = await asyncio.gather(*[ + crm.update_record(crm_object, contact) + for contact in batch + ]) + results.extend(batch_results) + + # Return summary only + return { + 'processed': len(valid), + 'successful': sum(1 for r in results if r['success']) + } +``` + +**Key Principles for Scripts**: +- Use `async`/`await` for IO-bound MCP calls +- Combine related operations into single scripts +- Filter/aggregate data before returning to model +- Return summaries, not raw data +- Include type hints and docstrings +- Add helper functions for data processing + +#### 5.2 Write SKILL.md + +Create the SKILL.md following skill-creator structure, with MCP-specific additions. + +**YAML Frontmatter**: +```yaml +--- +name: +description: +--- +``` + +**Body Structure**: + +```markdown +# [Skill Name] + +[Overview of what this skill does] + +## Prerequisites + +This skill requires the MCP SDK. **The scripts will automatically check and install it if needed.** + +If you want to manually verify or install: + +```bash +python3 -c "import mcp; print('✓ MCP SDK ready!')" 2>/dev/null || pip3 install mcp --break-system-packages +``` + +**Why needed**: This skill uses MCP tools to [brief explanation of what MCP servers do]. The workflow scripts require the `mcp` package to connect to MCP servers. + +**Note**: When you run any workflow script, it will automatically check for MCP SDK and display a helpful error message if not installed. + +## Workflow Overview + +[User's workflow steps in their own language] + +[USER PREFERENCES - Embedded as guidance] +When using this skill: +- [Preference 1] +- [Preference 2] + +[SOPs - Embedded as procedural instructions] +Standard procedure: +1. [SOP step 1] +2. [SOP step 2] + +## Quick Start + +**Before running workflows, ensure MCP SDK is installed** (see Prerequisites above). + +[Simple example of using the main workflow script] + +## Available Workflows + +### [Primary Workflow Script] + +**Use when**: [Scenario] + +**Location**: `scripts/workflows/