From 5a32a6e3b6161fcbefdf55d160da1853e78b104d Mon Sep 17 00:00:00 2001 From: Zhongwei Li Date: Sat, 29 Nov 2025 18:48:35 +0800 Subject: [PATCH] Initial commit --- .claude-plugin/plugin.json | 12 ++ README.md | 3 + plugin.lock.json | 53 +++++ skills/local-brain/SKILL.md | 185 ++++++++++++++++++ .../local-brain/references/CLI_REFERENCE.md | 57 ++++++ skills/local-brain/references/HOOKS.md | 97 +++++++++ 6 files changed, 407 insertions(+) create mode 100644 .claude-plugin/plugin.json create mode 100644 README.md create mode 100644 plugin.lock.json create mode 100644 skills/local-brain/SKILL.md create mode 100644 skills/local-brain/references/CLI_REFERENCE.md create mode 100644 skills/local-brain/references/HOOKS.md diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json new file mode 100644 index 0000000..fefced7 --- /dev/null +++ b/.claude-plugin/plugin.json @@ -0,0 +1,12 @@ +{ + "name": "local-brain", + "description": "Offload routine tasks (code review, doc analysis, planning) to local Ollama LLM models", + "version": "0.1.0", + "author": { + "name": "Ismael Martinez Ramos", + "email": "ismaelmartinez@gmail.com" + }, + "skills": [ + "./skills" + ] +} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..68247bf --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# local-brain + +Offload routine tasks (code review, doc analysis, planning) to local Ollama LLM models diff --git a/plugin.lock.json b/plugin.lock.json new file mode 100644 index 0000000..1e0c4bd --- /dev/null +++ b/plugin.lock.json @@ -0,0 +1,53 @@ +{ + "$schema": "internal://schemas/plugin.lock.v1.json", + "pluginId": "gh:IsmaelMartinez/local-brain:local-brain", + "normalized": { + "repo": null, + "ref": "refs/tags/v20251128.0", + "commit": "74a529386f29f60c6b02cff4a601e82711396c2e", + "treeHash": "1d6c125098995655f04c75eb801e354b900f139d39e7484a344ee0e7904d1c7b", + "generatedAt": "2025-11-28T10:11:42.527122Z", + "toolVersion": "publish_plugins.py@0.2.0" + }, + "origin": { + "remote": "git@github.com:zhongweili/42plugin-data.git", + "branch": "master", + "commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390", + "repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data" + }, + "manifest": { + "name": "local-brain", + "description": "Offload routine tasks (code review, doc analysis, planning) to local Ollama LLM models", + "version": "0.1.0" + }, + "content": { + "files": [ + { + "path": "README.md", + "sha256": "d1c84cf17936e832e15d06af6c3aa33065a4ba3b4725e1e2aef79a19a75a306a" + }, + { + "path": ".claude-plugin/plugin.json", + "sha256": "ee9e366bb22c5a1a038a3262e8e2e253b73d4e7e74fc0c20aeaa64de78df955c" + }, + { + "path": "skills/local-brain/SKILL.md", + "sha256": "ae7e451d7ffa6ad1fba19811f1c5207e242f3b94781f1943425d2cf04a9eb788" + }, + { + "path": "skills/local-brain/references/CLI_REFERENCE.md", + "sha256": "bea3765a6f2ee6eeaf40b81ad68c033f6b7bb6d5091348ba6077bcc09085b7ba" + }, + { + "path": "skills/local-brain/references/HOOKS.md", + "sha256": "6f154a51f33850f564151d74cedec4ee897ffa797ff24527d62ff5701a44ef2f" + } + ], + "dirSha256": "1d6c125098995655f04c75eb801e354b900f139d39e7484a344ee0e7904d1c7b" + }, + "security": { + "scannedAt": null, + "scannerVersion": null, + "flags": [] + } +} \ No newline at end of file diff --git a/skills/local-brain/SKILL.md b/skills/local-brain/SKILL.md new file mode 100644 index 0000000..30142e3 --- /dev/null +++ b/skills/local-brain/SKILL.md @@ -0,0 +1,185 @@ +--- +name: local-brain +description: Delegate code reviews, document analysis, and planning tasks to local Ollama LLM models to reduce context usage. Supports lightweight hooks (ai, ai-cmd, ai-explain) for quick operations and heavyweight agent for multi-file reviews. Use when users request code reviews, design document summaries, ticket/issue triage, documentation analysis, planning, or routine pattern matching. Ideal for routine analysis that doesn't require cloud-scale reasoning. Do NOT use for complex multi-step reasoning requiring extensive codebase context or security-critical decisions. +--- + +# Local Brain - Context Offloading Skill + +Tiered system for offloading work to local Ollama models, preserving main agent context. + +## Tiers + +**Tier 1 - Hooks** (fastest, direct bash): +- `ai` - Quick Q&A +- `ai-cmd` - Command generation +- `ai-explain` - Explain last command + +**Tier 2 - local-brain binary** (structured reviews): +- Single/multiple file reviews +- Directory reviews with patterns +- Git diff reviews +- Structured Markdown output + +**Tier 3 - Subagent** (heavyweight, multi-file): +- Orchestrates multiple local-brain calls +- Handles complex multi-file analysis +- Coordinates multiple review tasks + +## Decision Logic + +Use this flowchart to select the right tier: + +``` +User request + ↓ +Is it a quick question/explanation? + → YES: Use Tier 1 (hooks) + → NO: Continue + ↓ + Is it 1-3 files for review? + → YES: Use Tier 2 (local-brain binary directly) + → NO: Continue + ↓ + Multiple files OR multiple review tasks? + → YES: Use Tier 3 (spawn subagent) +``` + +## Prerequisites + +- **Ollama** running locally with at least one model +- **local-brain** binary installed +- **Hooks** defined in `~/.zshrc` (ai, ai-cmd, ai-explain) + +Check prerequisites: `which local-brain && ollama ps` + +See [CLI_REFERENCE.md](references/CLI_REFERENCE.md) for installation and [HOOKS.md](references/HOOKS.md) for hook details. + +## Tier 1: Lightweight Hooks + +### When to Use +- Quick factual questions +- Command generation +- Explaining last command/output +- NO file reading needed + +### Usage + +**Quick Q&A:** +```bash +ai "brief question" +``` + +**Command generation:** +```bash +ai-cmd "task description" +``` + +**Explain last command:** +```bash +ai-explain +``` + +See [HOOKS.md](references/HOOKS.md) for detailed hook documentation. + +## Tier 2: Direct local-brain Binary + +### When to Use +- Review 1-3 specific files +- Single directory review +- Single git diff review +- Want structured Markdown output + +### Usage + +**IMPORTANT:** Do NOT read file contents first - that defeats the purpose of context offloading. + +1. Verify files exist: `ls path/to/file` (do NOT use Read tool) +2. Run local-brain directly: + +```bash +# Single file +local-brain --files path/to/file + +# Multiple files +local-brain --files path/file1,path/file2 + +# Directory +local-brain --dir src --pattern "*.rs" + +# Git diff +local-brain --git-diff + +# With task type +local-brain --task quick-review --files path/to/file +``` + +3. Parse and present the Markdown output sections: + - Issues Found + - Simplifications + - Consider Later + - Other Observations + +## Tier 3: Heavyweight Subagent + +### When to Use +- Multiple directories to review +- Multiple separate review tasks +- Need to coordinate multiple local-brain calls +- Complex multi-step analysis + +### Usage + +Spawn subagent using Task tool with `subagent_type=general-purpose` and `model=haiku`: + +**Example prompt:** +``` +Review multiple files using local-brain without reading them into context. + +IMPORTANT: Do NOT read file contents - offload to local-brain. + +Prerequisites verified: +- local-brain: [path] +- Ollama: [status] + +Tasks: +1. Review [file1] with local-brain --files [file1] +2. Review [file2] with local-brain --files [file2] +3. Review [dir] with local-brain --dir [dir] --pattern "*.ext" + +For each review: +- Execute local-brain command +- Parse Markdown output +- Extract key findings + +Return consolidated summary: +1. Critical issues across all files +2. Common patterns found +3. Recommended priority actions + +Return complete analysis in final message. +``` + +### Subagent Responsibilities +1. Execute multiple local-brain commands +2. Parse each Markdown output +3. Consolidate findings +4. Return structured summary + +## Output Handling + +All tiers produce different outputs: + +**Tier 1 (hooks):** Plain text responses +**Tier 2 (binary):** Structured Markdown with sections +**Tier 3 (subagent):** Consolidated cross-file analysis + +After receiving results: +- Highlight critical items from "Issues Found" +- Summarize simplification opportunities +- Distinguish urgent vs. later improvements +- Ask if user wants to address specific findings + +## References + +- [CLI_REFERENCE.md](references/CLI_REFERENCE.md) - Installation, flags, troubleshooting +- [HOOKS.md](references/HOOKS.md) - Detailed hook documentation and usage diff --git a/skills/local-brain/references/CLI_REFERENCE.md b/skills/local-brain/references/CLI_REFERENCE.md new file mode 100644 index 0000000..93f862b --- /dev/null +++ b/skills/local-brain/references/CLI_REFERENCE.md @@ -0,0 +1,57 @@ +# Local Brain CLI Reference + +## Installation + +```bash +# Via cargo-binstall (recommended) +cargo binstall local-brain + +# Or from source +cargo install --git https://github.com/IsmaelMartinez/local-brain +``` + +## Command Line Flags + +### Input Modes + +- `--files ` - Comma-separated list of files to review +- `--git-diff` - Review all changed files in git working directory +- `--dir --pattern ` - Review files in directory matching glob pattern + +### Optional Flags + +- `--task ` - Task-based model selection: quick-review, thorough-review, security, documentation, architecture, refactoring +- `--model ` - Override default model (e.g., qwen2.5-coder:3b) +- `--kind ` - Document type: code, design-doc, ticket, other +- `--review-focus ` - Review focus: refactoring, readability, performance, risk, general +- `--dry-run` - Test without calling Ollama (validate inputs only) + +## Environment Variables + +- `OLLAMA_HOST` - Ollama server URL (default: `http://localhost:11434`) +- `MODEL_NAME` - Default model to use + +## Output Format + +Structured Markdown with sections: +- **Issues Found** - Problems requiring attention (with line numbers) +- **Simplifications** - Opportunities to reduce complexity +- **Consider Later** - Non-critical improvements +- **Other Observations** - General notes + +## Troubleshooting + +### Ollama Not Running +``` +Error: Failed to send request to Ollama +``` +Start Ollama: `ollama serve` + +### Model Not Found +``` +Error: model 'xyz' not found +``` +Pull a model: `ollama pull qwen2.5-coder:3b` + +### No Output +Ensure Ollama has enough RAM for the model. Check `ollama ps` for running models. diff --git a/skills/local-brain/references/HOOKS.md b/skills/local-brain/references/HOOKS.md new file mode 100644 index 0000000..cd3d132 --- /dev/null +++ b/skills/local-brain/references/HOOKS.md @@ -0,0 +1,97 @@ +# Local AI Hooks + +Lightweight shell functions for quick AI operations. Defined in `~/.zshrc`. + +## Available Hooks + +### ai - Concise Q&A + +Quick, concise question answering using local Ollama model. + +**Usage:** +```bash +ai "your question here" +``` + +**Example:** +```bash +ai "what does the grep -r flag do?" +ai "what is a closure in rust?" +``` + +**Output:** Concise, direct answers without lengthy explanations + +**When to use:** +- Quick factual questions +- Need brief, to-the-point answers +- Want to minimize token usage + +### ai-cmd - Command Generation + +Generate shell commands from natural language task descriptions. + +**Usage:** +```bash +ai-cmd "what you want to do" +``` + +**Example:** +```bash +ai-cmd "find all rust files modified in the last week" +ai-cmd "count lines in all python files" +``` + +**Output:** Shell commands only, no commentary + +**When to use:** +- Need shell command for a specific task +- Don't remember exact syntax +- Want safe command suggestions + +### ai-explain - Detailed Explanation + +Explain the last command, or provide detailed explanation of a given string. + +**Usage:** +```bash +# Explain last command and recent output +ai-explain + +# Explain a specific string (code, concept, etc) +ai-explain "string to explain" +``` + +**Example:** +```bash +# Explain last command +ai-explain + +# Explain a code pattern +ai-explain "async/await in Rust" + +# Explain an error message +ai-explain "connection refused on port 8080" +``` + +**Output:** Detailed technical explanation with context, implications, and solutions + +**When to use:** +- Understand what last command did or what went wrong +- Need detailed explanation of a concept or error +- Want suggestions for fixing/improving + +## Default Model + +All hooks use: `deepseek-coder-v2-8k` (configurable via `OLLAMA_MODEL` env var) + +## Decision Matrix: Hooks vs local-brain Binary + +| Scenario | Tool | Reason | +|----------|------|--------| +| Quick question | `ai` hook | Fastest, no file reading | +| Generate command | `ai-cmd` hook | Direct shell output | +| Explain last command | `ai-explain` hook | Auto-captures context | +| Review single file | `local-brain` binary | Structured output | +| Review multiple files | `local-brain` binary | Handles multiple files | +| Review directory | `local-brain` binary | Directory traversal | +| Git diff review | `local-brain` binary | Git integration |