Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 18:26:40 +08:00
commit 4c495cb871
11 changed files with 1003 additions and 0 deletions

View File

@@ -0,0 +1,14 @@
{
"name": "ultralytics-dev",
"description": "Auto-formatting hooks for Python, JavaScript, Markdown, and Bash. Includes MCP servers for Slack message search, MongoDB queries, and Linear tracking, with usage skills.",
"version": "1.2.2",
"author": {
"name": "Fatih Akyon"
},
"skills": [
"./skills"
],
"hooks": [
"./hooks"
]
}

3
README.md Normal file
View File

@@ -0,0 +1,3 @@
# ultralytics-dev
Auto-formatting hooks for Python, JavaScript, Markdown, and Bash. Includes MCP servers for Slack message search, MongoDB queries, and Linear tracking, with usage skills.

36
hooks/hooks.json Normal file
View File

@@ -0,0 +1,36 @@
{
"description": "Code formatting and quality hooks for Ultralytics development",
"hooks": {
"PostToolUse": [
{
"matcher": "Edit|MultiEdit|Write|Task",
"hooks": [
{
"type": "command",
"command": "file_path=$(jq -r '.tool_input.file_path // empty' 2>/dev/null); if [[ -n \"$file_path\" && -f \"$file_path\" ]]; then case \"$file_path\" in *.py|*.js|*.jsx|*.ts|*.tsx) if [[ \"$OSTYPE\" == \"darwin\"* ]]; then sed -i '' 's/^[[:space:]]*$//g' \"$file_path\" 2>/dev/null || true; else sed -i 's/^[[:space:]]*$//g' \"$file_path\" 2>/dev/null || true; fi ;; esac; fi"
},
{
"type": "command",
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/scripts/format_python_docstrings.py"
},
{
"type": "command",
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/scripts/python_code_quality.py"
},
{
"type": "command",
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/scripts/prettier_formatting.py"
},
{
"type": "command",
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/scripts/markdown_formatting.py"
},
{
"type": "command",
"command": "${CLAUDE_PLUGIN_ROOT}/hooks/scripts/bash_formatting.py"
}
]
}
]
}
}

View File

@@ -0,0 +1,61 @@
#!/usr/bin/env python3
"""
PostToolUse hook: Auto-format Bash/Shell scripts with prettier-plugin-sh
"""
import json
import shutil
import subprocess
import sys
from pathlib import Path
def check_prettier_version() -> bool:
"""Check if prettier is installed and warn if version differs from 3.6.2."""
if not shutil.which('npx'):
return False
try:
result = subprocess.run(['npx', 'prettier', '--version'],
capture_output=True, text=True, check=False, timeout=5)
if result.returncode == 0:
version = result.stdout.strip()
if '3.6.2' not in version:
print(f"⚠️ Prettier version mismatch: expected 3.6.2, found {version}")
return True
except Exception:
pass
return False
def main():
try:
data = json.load(sys.stdin)
file_path = data.get("tool_input", {}).get("file_path", "")
if not file_path.endswith(('.sh', '.bash')):
sys.exit(0)
sh_file = Path(file_path)
if not sh_file.exists():
sys.exit(0)
# Check if prettier is available
if not check_prettier_version():
sys.exit(0)
# Try prettier with prettier-plugin-sh, handle any failure gracefully
try:
subprocess.run([
'npx', 'prettier', '--write', '--list-different', '--print-width', '120',
'--plugin=$(npm root -g)/prettier-plugin-sh/lib/index.cjs',
str(sh_file)
], shell=True, capture_output=True, check=False, cwd=sh_file.parent, timeout=10)
except Exception:
pass # Silently handle any failure (missing plugin, timeout, etc.)
except Exception:
pass
sys.exit(0)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,264 @@
#!/usr/bin/env python3
"""Format Python docstrings in Google style without external dependencies."""
from __future__ import annotations
import ast
import json
import re
import sys
import textwrap
from pathlib import Path
def is_google_docstring(docstring: str) -> bool:
"""Check if docstring is Google-style format."""
google_sections = (
'Args:', 'Arguments:', 'Attributes:', 'Example:', 'Examples:',
'Note:', 'Notes:', 'Returns:', 'Return:', 'Raises:', 'Raise:',
'Yields:', 'Yield:', 'References:', 'See Also:', 'Todo:', 'Todos:'
)
return any(f'\n {section}' in docstring for section in google_sections)
def wrap_text(text: str, width: int = 120, initial_indent: str = '', subsequent_indent: str = '') -> str:
"""Wrap text intelligently, preserving code blocks, tables, and lists."""
lines = text.split('\n')
result = []
in_code_block = False
for line in lines:
# Detect code blocks
if line.strip().startswith('```'):
in_code_block = not in_code_block
result.append(line)
continue
if in_code_block or line.startswith(' ' * 8) or line.startswith('\t'):
result.append(line)
continue
# Preserve table rows, lists, and tree diagrams
if any(line.strip().startswith(x) for x in ['|', '-', '*', '+', '', '', '']):
result.append(line)
continue
# Preserve URLs on their own
if re.match(r'^\s*(https?://|www\.)', line):
result.append(line)
continue
# Wrap regular text
if line.strip():
wrapped = textwrap.fill(
line.strip(),
width=width,
initial_indent=initial_indent,
subsequent_indent=subsequent_indent,
break_long_words=False,
break_on_hyphens=False
)
result.append(wrapped)
else:
result.append('')
return '\n'.join(result)
def format_docstring(docstring: str) -> str:
"""Format a single docstring in Google style."""
if not docstring or not docstring.strip():
return docstring
if not is_google_docstring(docstring):
return docstring
lines = docstring.split('\n')
if not lines:
return docstring
# Extract content and indentation
indent = len(lines[0]) - len(lines[0].lstrip())
base_indent = ' ' * indent
# Process lines
result = []
i = 0
# Summary line
summary = lines[0].strip()
if summary:
# Capitalize first word if not URL
if summary and not summary[0].isupper() and not summary.startswith(('http', 'www', '@')):
summary = summary[0].upper() + summary[1:]
# Add period if missing
if summary and not summary.endswith(('.', '!', '?', ':')):
summary += '.'
result.append(base_indent + summary)
i = 1
# Skip blank lines after summary
while i < len(lines) and not lines[i].strip():
i += 1
# Process remaining sections
while i < len(lines):
line = lines[i]
section_match = re.match(r'^(\s*)([A-Za-z\s]+):\s*$', line)
if section_match:
# Section header
section_name = section_match.group(2).strip()
# Normalize section names
section_name = {
'Arguments': 'Args',
'Return': 'Returns',
'Yield': 'Yields',
'Raise': 'Raises',
'Example': 'Examples',
'Note': 'Notes',
'Todo': 'Todos',
'See Also': 'References'
}.get(section_name, section_name)
result.append(base_indent + section_name + ':')
i += 1
# Process section content
while i < len(lines):
line = lines[i]
if not line.strip():
result.append('')
i += 1
break
# Check if next section starts
if re.match(r'^(\s*)([A-Za-z\s]+):\s*$', line):
break
# Preserve indentation for parameters and code
if line.strip():
# Parameter line (name: description)
param_match = re.match(r'^(\s+)(\w+)\s*\(([^)]+)\):\s*(.*)$', line)
if param_match:
spaces, name, type_str, desc = param_match.groups()
param_indent = base_indent + ' '
result.append(f'{param_indent}{name} ({type_str}): {desc}')
else:
result.append(line)
else:
result.append(line)
i += 1
else:
result.append(line)
i += 1
# Remove trailing blank lines but keep structure
while result and not result[-1].strip():
result.pop()
return '\n'.join(result)
class DocstringVisitor(ast.NodeVisitor):
"""Find all docstrings in a Python file."""
def __init__(self):
self.docstrings = []
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
"""Visit function definitions."""
if ast.get_docstring(node):
self.docstrings.append((node.lineno - 1, ast.get_docstring(node)))
self.generic_visit(node)
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
"""Visit async function definitions."""
if ast.get_docstring(node):
self.docstrings.append((node.lineno - 1, ast.get_docstring(node)))
self.generic_visit(node)
def visit_ClassDef(self, node: ast.ClassDef) -> None:
"""Visit class definitions."""
if ast.get_docstring(node):
self.docstrings.append((node.lineno - 1, ast.get_docstring(node)))
self.generic_visit(node)
def format_python_file(content: str) -> str:
"""Format all docstrings in Python file content."""
try:
tree = ast.parse(content)
except SyntaxError:
return content
visitor = DocstringVisitor()
visitor.visit(tree)
if not visitor.docstrings:
return content
lines = content.split('\n')
# Format docstrings (iterate in reverse to maintain line numbers)
for line_num, docstring in reversed(visitor.docstrings):
formatted = format_docstring(docstring)
if formatted != docstring:
# Find and replace the docstring in the source
# This is a simplified approach - find the docstring literal in source
for i in range(line_num, min(line_num + 50, len(lines))):
if '"""' in lines[i] or "'''" in lines[i]:
quote = '"""' if '"""' in lines[i] else "'''"
# Simple replacement for single-line docstrings in source
if lines[i].count(quote) == 2:
indent = len(lines[i]) - len(lines[i].lstrip())
lines[i] = ' ' * indent + quote + formatted + quote
break
return '\n'.join(lines)
def read_python_path() -> Path | None:
"""Read the Python path from stdin payload.
Returns:
(Path | None): Python file path when present and valid.
"""
try:
data = json.load(sys.stdin)
except Exception:
return None
file_path = data.get("tool_input", {}).get("file_path", "")
path = Path(file_path) if file_path else None
if not path or path.suffix != ".py" or not path.exists():
return None
if any(p in path.parts for p in ['.venv', 'venv', 'site-packages', '__pycache__']):
return None
return path
def main() -> None:
"""Format Python docstrings in files."""
python_file = read_python_path()
if python_file:
try:
content = python_file.read_text()
formatted = format_python_file(content)
if formatted != content:
python_file.write_text(formatted)
print(f"Formatted: {python_file}")
except Exception as e:
# Block on unexpected errors during formatting
error_msg = f'ERROR formatting Python docstrings ❌ {python_file}: {e}'
print(error_msg, file=sys.stderr)
output = {
'systemMessage': f'Docstring formatting failed for {python_file.name}',
'hookSpecificOutput': {'hookEventName': 'PostToolUse', 'decision': 'block', 'reason': error_msg},
}
print(json.dumps(output))
sys.exit(2)
sys.exit(0)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,296 @@
#!/usr/bin/env python3
"""
PostToolUse hook: Format Markdown files and embedded code blocks.
Inspired by https://github.com/ultralytics/actions/blob/main/actions/update_markdown_code_blocks.py
"""
from __future__ import annotations
import hashlib
import json
import re
import shutil
import subprocess
import sys
from pathlib import Path
from tempfile import TemporaryDirectory
PYTHON_BLOCK_PATTERN = r"^( *)```(?:python|py|\{[ ]*\.py[ ]*\.annotate[ ]*\})\n(.*?)\n\1```"
BASH_BLOCK_PATTERN = r"^( *)```(?:bash|sh|shell)\n(.*?)\n\1```"
LANGUAGE_TAGS = {"python": ["python", "py", "{ .py .annotate }"], "bash": ["bash", "sh", "shell"]}
def check_prettier_version() -> bool:
"""Check if prettier is installed and warn if version differs from 3.6.2."""
if not shutil.which("npx"):
return False
try:
result = subprocess.run(["npx", "prettier", "--version"],
capture_output=True, text=True, check=False, timeout=5)
if result.returncode == 0:
version = result.stdout.strip()
if "3.6.2" not in version:
print(f"⚠️ Prettier version mismatch: expected 3.6.2, found {version}")
return True
except Exception:
pass
return False
def extract_code_blocks(markdown_content: str) -> dict[str, list[tuple[str, str]]]:
"""Extract code blocks from markdown content.
Args:
markdown_content (str): Markdown text to inspect.
Returns:
(dict): Mapping of language names to lists of (indentation, block) pairs.
"""
python_blocks = re.compile(PYTHON_BLOCK_PATTERN, re.DOTALL | re.MULTILINE).findall(markdown_content)
bash_blocks = re.compile(BASH_BLOCK_PATTERN, re.DOTALL | re.MULTILINE).findall(markdown_content)
return {"python": python_blocks, "bash": bash_blocks}
def remove_indentation(code_block: str, num_spaces: int) -> str:
"""Remove indentation from a block of code.
Args:
code_block (str): Code snippet to adjust.
num_spaces (int): Leading space count to strip.
Returns:
(str): Code with indentation removed.
"""
lines = code_block.split("\n")
stripped_lines = [line[num_spaces:] if len(line) >= num_spaces else line for line in lines]
return "\n".join(stripped_lines)
def add_indentation(code_block: str, num_spaces: int) -> str:
"""Add indentation back to non-empty lines in a code block.
Args:
code_block (str): Code snippet to indent.
num_spaces (int): Space count to prefix.
Returns:
(str): Code with indentation restored.
"""
indent = " " * num_spaces
lines = code_block.split("\n")
return "\n".join([indent + line if line.strip() else line for line in lines])
def format_code_with_ruff(temp_dir: Path) -> None:
"""Format Python files in a temporary directory with Ruff.
Args:
temp_dir (Path): Directory containing extracted Python blocks.
"""
try:
subprocess.run(["ruff", "format", "--line-length=120", str(temp_dir)], check=True)
print("Completed ruff format ✅")
except Exception as exc:
print(f"ERROR running ruff format ❌ {exc}")
try:
subprocess.run(
[
"ruff",
"check",
"--fix",
"--extend-select=F,I,D,UP,RUF,FA",
"--target-version=py39",
"--ignore=D100,D101,D103,D104,D203,D205,D212,D213,D401,D406,D407,D413,F821,F841,RUF001,RUF002,RUF012",
str(temp_dir),
],
check=True,
)
print("Completed ruff check ✅")
except Exception as exc:
print(f"ERROR running ruff check ❌ {exc}")
def format_bash_with_prettier(temp_dir: Path) -> None:
"""Format Bash files in a temporary directory with prettier-plugin-sh.
Args:
temp_dir (Path): Directory containing extracted Bash blocks.
"""
try:
result = subprocess.run(
"npx prettier --write --print-width 120 --plugin=$(npm root -g)/prettier-plugin-sh/lib/index.cjs ./**/*.sh",
shell=True,
capture_output=True,
text=True,
cwd=temp_dir,
)
if result.returncode != 0:
print(f"ERROR running prettier-plugin-sh ❌ {result.stderr}")
else:
print("Completed bash formatting ✅")
except Exception as exc:
print(f"ERROR running prettier-plugin-sh ❌ {exc}")
def generate_temp_filename(file_path: Path, index: int, code_type: str) -> str:
"""Generate a deterministic filename for a temporary code block.
Args:
file_path (Path): Source markdown path.
index (int): Block index for uniqueness.
code_type (str): Language identifier.
Returns:
(str): Safe filename for the temporary code file.
"""
stem = file_path.stem
code_letter = code_type[0]
path_part = str(file_path.parent).replace("/", "_").replace("\\", "_").replace(" ", "-")
hash_val = hashlib.md5(f"{file_path}_{index}".encode(), usedforsecurity=False).hexdigest()[:6]
ext = ".py" if code_type == "python" else ".sh"
filename = f"{stem}_{path_part}_{code_letter}{index}_{hash_val}{ext}"
return re.sub(r"[^\w\-.]", "_", filename)
def process_markdown_file(
file_path: Path,
temp_dir: Path,
process_python: bool = True,
process_bash: bool = True,
) -> tuple[str, list[tuple[int, str, Path, str]]]:
"""Extract code blocks from a markdown file and store them as temporary files.
Args:
file_path (Path): Markdown path to process.
temp_dir (Path): Directory to store temporary files.
process_python (bool, optional): Enable Python block extraction.
process_bash (bool, optional): Enable Bash block extraction.
Returns:
markdown_content (str): Original markdown content.
temp_files (list): Extracted block metadata.
"""
try:
markdown_content = file_path.read_text()
except Exception as exc:
print(f"Error reading file {file_path}: {exc}")
return "", []
code_blocks_by_type = extract_code_blocks(markdown_content)
temp_files: list[tuple[int, str, Path, str]] = []
code_types: list[tuple[str, int]] = []
if process_python:
code_types.append(("python", 0))
if process_bash:
code_types.append(("bash", 1000))
for code_type, offset in code_types:
for i, (indentation, code_block) in enumerate(code_blocks_by_type[code_type]):
num_spaces = len(indentation)
code_without_indentation = remove_indentation(code_block, num_spaces)
temp_file_path = temp_dir / generate_temp_filename(file_path, i + offset, code_type)
try:
temp_file_path.write_text(code_without_indentation)
except Exception as exc:
print(f"Error writing temp file {temp_file_path}: {exc}")
continue
temp_files.append((num_spaces, code_block, temp_file_path, code_type))
return markdown_content, temp_files
def update_markdown_file(file_path: Path, markdown_content: str, temp_files: list[tuple[int, str, Path, str]]) -> None:
"""Replace markdown code blocks with formatted versions.
Args:
file_path (Path): Markdown file to update.
markdown_content (str): Original content.
temp_files (list): Metadata for formatted code blocks.
"""
for num_spaces, original_code_block, temp_file_path, code_type in temp_files:
try:
formatted_code = temp_file_path.read_text().rstrip("\n")
except Exception as exc:
print(f"Error reading temp file {temp_file_path}: {exc}")
continue
formatted_code_with_indentation = add_indentation(formatted_code, num_spaces)
for lang in LANGUAGE_TAGS[code_type]:
markdown_content = markdown_content.replace(
f"{' ' * num_spaces}```{lang}\n{original_code_block}\n{' ' * num_spaces}```",
f"{' ' * num_spaces}```{lang}\n{formatted_code_with_indentation}\n{' ' * num_spaces}```",
)
try:
file_path.write_text(markdown_content)
except Exception as exc:
print(f"Error writing file {file_path}: {exc}")
def run_prettier(markdown_file: Path) -> None:
"""Format a markdown file with Prettier when available.
Args:
markdown_file (Path): Markdown file to format.
"""
if not check_prettier_version():
return
is_docs = "docs" in markdown_file.parts and "reference" not in markdown_file.parts
command = ["npx", "prettier", "--write", "--list-different", str(markdown_file)]
if is_docs:
command = ["npx", "prettier", "--tab-width", "4", "--write", "--list-different", str(markdown_file)]
subprocess.run(command, capture_output=True, check=False, cwd=markdown_file.parent)
def format_markdown_file(markdown_file: Path) -> None:
"""Format markdown-embedded code and run Prettier on the file.
Args:
markdown_file (Path): Markdown file to process.
"""
with TemporaryDirectory() as tmp_dir_name:
temp_dir = Path(tmp_dir_name)
markdown_content, temp_files = process_markdown_file(markdown_file, temp_dir)
if not temp_files:
run_prettier(markdown_file)
return
has_python = any(code_type == "python" for *_, code_type in temp_files)
has_bash = any(code_type == "bash" for *_, code_type in temp_files)
if has_python:
format_code_with_ruff(temp_dir)
if has_bash:
format_bash_with_prettier(temp_dir)
update_markdown_file(markdown_file, markdown_content, temp_files)
run_prettier(markdown_file)
def read_markdown_path() -> Path | None:
"""Read the markdown path from stdin payload.
Returns:
markdown_path (Path | None): Markdown path when present and valid.
"""
try:
data = json.load(sys.stdin)
except Exception:
return None
file_path = data.get("tool_input", {}).get("file_path", "")
path = Path(file_path) if file_path else None
if not path or path.suffix.lower() != ".md" or not path.exists():
return None
return path
def main() -> None:
"""Run markdown formatting hook."""
markdown_file = read_markdown_path()
if markdown_file:
format_markdown_file(markdown_file)
sys.exit(0)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,66 @@
#!/usr/bin/env python3
"""
PostToolUse hook: Auto-format JS/TS/CSS/JSON/YAML/HTML/Vue/Svelte files with prettier
"""
import json
import re
import shutil
import subprocess
import sys
from pathlib import Path
# File extensions that prettier handles
PRETTIER_EXTENSIONS = {'.js', '.jsx', '.ts', '.tsx', '.css', '.less', '.scss',
'.json', '.yml', '.yaml', '.html', '.vue', '.svelte'}
LOCK_FILE_PATTERN = re.compile(r'.*lock\.(json|yaml|yml)$|.*\.lock$')
def check_prettier_version() -> bool:
"""Check if prettier is installed and warn if version differs from 3.6.2."""
if not shutil.which('npx'):
return False
try:
result = subprocess.run(['npx', 'prettier', '--version'],
capture_output=True, text=True, check=False, timeout=5)
if result.returncode == 0:
version = result.stdout.strip()
if '3.6.2' not in version:
print(f"⚠️ Prettier version mismatch: expected 3.6.2, found {version}")
return True
except Exception:
pass
return False
def main():
try:
data = json.load(sys.stdin)
file_path = data.get("tool_input", {}).get("file_path", "")
if not file_path:
sys.exit(0)
py_file = Path(file_path)
if not py_file.exists() or py_file.suffix not in PRETTIER_EXTENSIONS:
sys.exit(0)
# Skip lock files and model.json
if LOCK_FILE_PATTERN.match(py_file.name) or py_file.name == 'model.json':
sys.exit(0)
# Check if prettier is available
if not check_prettier_version():
sys.exit(0)
# Run prettier
subprocess.run([
'npx', 'prettier', '--write', '--list-different', '--print-width', '120', str(py_file)
], capture_output=True, check=False, cwd=py_file.parent)
except Exception:
pass
sys.exit(0)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,87 @@
#!/usr/bin/env python3
"""
PostToolUse hook: Auto-format Python files with ruff and block on errors
Inspired by onuralpszr's pre-commit hook: https://github.com/onuralpszr/onuralpszr/blob/main/configs/git-hooks/pre-commit-line-120
"""
import json
import shutil
import subprocess
import sys
from pathlib import Path
def main():
try:
data = json.load(sys.stdin)
# Get file path from tool input
file_path = data.get("tool_input", {}).get("file_path", "")
# Only process Python files
if not file_path.endswith('.py'):
sys.exit(0)
# Check if ruff is available - silent exit if not (no blocking)
if not shutil.which('ruff'):
sys.exit(0)
# Get directory containing the Python file
py_file = Path(file_path)
if not py_file.exists():
sys.exit(0)
work_dir = py_file.parent
# Run ruff check with fixes - capture output to check for errors
check_result = subprocess.run([
'ruff', 'check',
'--fix',
'--extend-select', 'F,I,D,UP,RUF,FA',
'--target-version', 'py39',
'--ignore', 'D100,D104,D203,D205,D212,D213,D401,D406,D407,D413,RUF001,RUF002,RUF012',
str(py_file)
], capture_output=True, text=True, cwd=work_dir)
# Block only if ruff check finds unfixable errors
if check_result.returncode != 0:
error_output = check_result.stdout.strip() or check_result.stderr.strip()
error_msg = f'ERROR running ruff check ❌ {error_output}'
print(error_msg, file=sys.stderr)
output = {
'systemMessage': f'Ruff errors detected in {py_file.name}',
'hookSpecificOutput': {'hookEventName': 'PostToolUse', 'decision': 'block', 'reason': error_msg},
}
print(json.dumps(output))
sys.exit(2)
# Run ruff format
format_result = subprocess.run([
'ruff', 'format',
'--line-length', '120',
str(py_file)
], capture_output=True, text=True, cwd=work_dir)
# Block only if ruff format fails (unlikely but possible)
if format_result.returncode != 0:
error_msg = f'ERROR running ruff format ❌ {format_result.stderr.strip()}'
print(error_msg, file=sys.stderr)
output = {
'systemMessage': f'Ruff format failed for {py_file.name}',
'hookSpecificOutput': {'hookEventName': 'PostToolUse', 'decision': 'block', 'reason': error_msg},
}
print(json.dumps(output))
sys.exit(2)
except Exception as e:
# Block on unexpected errors
error_msg = f'Python code quality hook error: {e}'
print(error_msg, file=sys.stderr)
sys.exit(2)
# Success - no errors
sys.exit(0)
if __name__ == '__main__':
main()

73
plugin.lock.json Normal file
View File

@@ -0,0 +1,73 @@
{
"$schema": "internal://schemas/plugin.lock.v1.json",
"pluginId": "gh:fcakyon/claude-codex-settings:plugins/ultralytics-dev",
"normalized": {
"repo": null,
"ref": "refs/tags/v20251128.0",
"commit": "6b4b1cec691752d433324f6ebfa2f25c7ecf5815",
"treeHash": "44d2d29556549b74b39821b3ac0c714042729643ccb62c80506e400f0799b587",
"generatedAt": "2025-11-28T10:16:50.519426Z",
"toolVersion": "publish_plugins.py@0.2.0"
},
"origin": {
"remote": "git@github.com:zhongweili/42plugin-data.git",
"branch": "master",
"commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390",
"repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data"
},
"manifest": {
"name": "ultralytics-dev",
"description": "Auto-formatting hooks for Python, JavaScript, Markdown, and Bash. Includes MCP servers for Slack message search, MongoDB queries, and Linear tracking, with usage skills.",
"version": "1.2.2"
},
"content": {
"files": [
{
"path": "README.md",
"sha256": "31943459fef7521ca6f43fec210a1715f402f525ab9a29e04dc7d0a6885ccba9"
},
{
"path": "hooks/hooks.json",
"sha256": "f7e97693b5cbabf55e5dc07feb8977d7cb08c31e56aaab39bc995618eb98a02d"
},
{
"path": "hooks/scripts/prettier_formatting.py",
"sha256": "97dd221ad05b3cad1751039257cf4826148a871bae3c46e194a81af3e6da11f4"
},
{
"path": "hooks/scripts/markdown_formatting.py",
"sha256": "e677705642a693393558ab0d614dc1949677593f3630630469b30c1c807f530a"
},
{
"path": "hooks/scripts/format_python_docstrings.py",
"sha256": "3a5eb04f3e7052ab4b86778ad92b306e39d1e446a7c1f473ca9e3a9cf61944d1"
},
{
"path": "hooks/scripts/python_code_quality.py",
"sha256": "01bcd2c7de166620a92a2e02c87d9b4ba9ff1d48489f42f0e31561f99f84a608"
},
{
"path": "hooks/scripts/bash_formatting.py",
"sha256": "b64e1cdacd6db8231be9743c8c36f3ced84916536af10a94929ba0e7823ab075"
},
{
"path": ".claude-plugin/plugin.json",
"sha256": "6de22c270f012bfde04c172a34b8cb8f716fbd34f53514dfe6ee7097ccef3d42"
},
{
"path": "skills/slack-usage/SKILL.md",
"sha256": "87f5a9a2373be7d3257b6f191c428daffd9daee681d90248288dc4220fcaef7d"
},
{
"path": "skills/mongodb-usage/SKILL.md",
"sha256": "66e5d6c1d76781c430719c6867f3ad8cee10dec8d11b1da60908f565037fbb1d"
}
],
"dirSha256": "44d2d29556549b74b39821b3ac0c714042729643ccb62c80506e400f0799b587"
},
"security": {
"scannedAt": null,
"scannerVersion": null,
"flags": []
}
}

View File

@@ -0,0 +1,44 @@
---
name: mongodb-usage
description: This skill should be used when user asks to "query MongoDB", "show database collections", "get collection schema", "list MongoDB databases", "search records in MongoDB", or "check database indexes".
---
# MongoDB MCP Usage
Use the MongoDB MCP server to integrate database queries into workflows.
## Read-Only Access
MongoDB MCP is configured in read-only mode. Only queries and data retrieval are supported. No write, update, or delete operations.
## Database Queries
Use `mcp__mongodb__*` tools for:
- Listing databases
- Viewing collection schemas
- Querying collection data
- Analyzing indexes
## Integration Pattern
1. List available databases with `mcp__mongodb__list_databases`
2. Explore collections with `mcp__mongodb__list_collections`
3. Get schema information with `mcp__mongodb__get_collection_schema`
4. Query data as needed for analysis
5. Format results for user consumption
## Environment Variables
MongoDB MCP requires:
- `MONGODB_URI` - Connection string (mongodb://...)
Configure in shell before using the plugin.
## Cost Considerations
- Minimize database calls when possible
- Use schema queries before running analysis queries
- Cache results locally if multiple calls needed
- Prefer aggregation pipelines for complex operations

View File

@@ -0,0 +1,59 @@
---
name: slack-usage
description: This skill should be used when user asks to "search Slack for messages", "find Slack messages about X", "get channel history", "look up conversation in Slack", or "find what someone said in Slack".
---
# Slack MCP Usage
Use the Slack MCP server to integrate Slack message search and channel access into workflows.
## Critical Rules
**Always use `mcp__slack__slack_search_messages` first for all message searches.** Only use `mcp__slack__slack_get_channel_history` when explicitly asked for channel history.
This pattern prioritizes the more efficient search interface and prevents unnecessary full channel scans.
## Search Messages (Recommended)
Use `mcp__slack__slack_search_messages` for finding specific messages:
```
Query: Find messages about deployment status
Tool: mcp__slack__slack_search_messages
Parameters: query="deployment status"
```
**Best for:**
- Keyword searches
- Finding messages from specific users
- Searching across channels
- Time-bounded queries
## Channel History (When Needed)
Use `mcp__slack__slack_get_channel_history` only when user explicitly requests:
- "Get recent messages from #engineering"
- "Show me the channel history"
- "List all messages in this channel"
**Note:** This scans entire channel, so use sparingly.
## Integration Pattern
In commands and agents:
1. Check if user wants search or history
2. Default to `mcp__slack__slack_search_messages` for keywords
3. Use `mcp__slack__slack_get_channel_history` only if explicitly requested
4. Format results clearly with message content and metadata
## Environment Variables
Slack MCP requires:
- `SLACK_BOT_TOKEN` - Bot user token (xoxb-...)
- `SLACK_USER_TOKEN` - User token (xoxp-...)
Configure in shell before using the plugin.