Initial commit
This commit is contained in:
15
.claude-plugin/plugin.json
Normal file
15
.claude-plugin/plugin.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"name": "cc-plus",
|
||||
"description": "Essential productivity features for Claude Code: session backup, PR creation, and more",
|
||||
"version": "0.1.0",
|
||||
"author": {
|
||||
"name": "YeonGyu Kim",
|
||||
"email": "public.kim.yeon.gyu@gmail.com"
|
||||
},
|
||||
"skills": [
|
||||
"./skills"
|
||||
],
|
||||
"commands": [
|
||||
"./commands"
|
||||
]
|
||||
}
|
||||
3
README.md
Normal file
3
README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# cc-plus
|
||||
|
||||
Essential productivity features for Claude Code: session backup, PR creation, and more
|
||||
32
commands/share.md
Normal file
32
commands/share.md
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
name: share
|
||||
description: Export current Claude Code session as readable markdown file. Quick shortcut for share-session skill.
|
||||
---
|
||||
|
||||
# Share Session Command
|
||||
|
||||
Export the current Claude Code conversation session to a shareable markdown file.
|
||||
|
||||
## Instructions
|
||||
|
||||
Execute the share-session skill to convert this session into markdown format:
|
||||
|
||||
1. Use the Skill tool to invoke the share-session skill
|
||||
2. The skill will automatically:
|
||||
- Create a session identifier todo with current context
|
||||
- Search for the session using fuzzy matching
|
||||
- Convert the transcript to markdown with full statistics
|
||||
- Save to `/tmp/claude-code-sessions/` with timestamp
|
||||
- Copy the file path to clipboard
|
||||
- Display cost breakdown and session metrics
|
||||
|
||||
## Output
|
||||
|
||||
You will receive:
|
||||
- 📄 Markdown file path (copied to clipboard)
|
||||
- 💰 Total session cost breakdown
|
||||
- 📊 Token usage statistics
|
||||
- ⏱️ Session timeline metrics
|
||||
- 🎯 Cache hit rate percentage
|
||||
|
||||
Simply invoke the skill - no additional parameters needed.
|
||||
69
plugin.lock.json
Normal file
69
plugin.lock.json
Normal file
@@ -0,0 +1,69 @@
|
||||
{
|
||||
"$schema": "internal://schemas/plugin.lock.v1.json",
|
||||
"pluginId": "gh:code-yeongyu/cc-plus:claude-code-plugins/cc-plus",
|
||||
"normalized": {
|
||||
"repo": null,
|
||||
"ref": "refs/tags/v20251128.0",
|
||||
"commit": "ec77a3526c84f4a538c72afb0a7d9f266dd64f31",
|
||||
"treeHash": "965a8c08ddfb85dbcb896b9ca3558c18271df657e3f952e55d04fabe3f5ada57",
|
||||
"generatedAt": "2025-11-28T10:15:42.797541Z",
|
||||
"toolVersion": "publish_plugins.py@0.2.0"
|
||||
},
|
||||
"origin": {
|
||||
"remote": "git@github.com:zhongweili/42plugin-data.git",
|
||||
"branch": "master",
|
||||
"commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390",
|
||||
"repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data"
|
||||
},
|
||||
"manifest": {
|
||||
"name": "cc-plus",
|
||||
"description": "Essential productivity features for Claude Code: session backup, PR creation, and more",
|
||||
"version": "0.1.0"
|
||||
},
|
||||
"content": {
|
||||
"files": [
|
||||
{
|
||||
"path": "README.md",
|
||||
"sha256": "27bb872630b57021e081afd30ae0358873f58454be805b0a571a0213bc6c0ae3"
|
||||
},
|
||||
{
|
||||
"path": ".claude-plugin/plugin.json",
|
||||
"sha256": "939e09deaf9c4b3f8de44eb3bf30dc8f38a7739f564b7d075d722a3de855b074"
|
||||
},
|
||||
{
|
||||
"path": "commands/share.md",
|
||||
"sha256": "75c97fd4b1a5ecae116eeea747b22209d5c7385975b29d5225158515b5438e0a"
|
||||
},
|
||||
{
|
||||
"path": "skills/share-session/SKILL.md",
|
||||
"sha256": "9093c742a24f58a86de6e5ecb438b2e10eb8d711bd40c9accd396fc98edcb0c9"
|
||||
},
|
||||
{
|
||||
"path": "skills/share-session/scripts/transcript_to_markdown.py",
|
||||
"sha256": "b57385356a6be68b1098334ae151b83da43f132243e32b2ed9bc449e2f5b1f19"
|
||||
},
|
||||
{
|
||||
"path": "skills/share-session/scripts/share_session.py",
|
||||
"sha256": "dd485d8477df10730153016cdeb96bd40a8109efabdd95c83c9d73a9dcc3dbc9"
|
||||
},
|
||||
{
|
||||
"path": "skills/pr-creator/SKILL.md",
|
||||
"sha256": "47ffdbb6ee488472e0422a12c1b8d18bfe7435cb9d3f2dec6bf36e2a877ae64e"
|
||||
},
|
||||
{
|
||||
"path": "skills/pr-creator/scripts/detect_pr_template.py",
|
||||
"sha256": "427c0812f9230afb6882530afe2b7679f803656cacc7d39004f88602de3b8f46"
|
||||
},
|
||||
{
|
||||
"path": "skills/pr-creator/scripts/get_git_status.py",
|
||||
"sha256": "f2ad83ae054da9f15cf6c81ef8cc6e30a6bd396b68f68cfc171b616d858fe26d"
|
||||
}
|
||||
],
|
||||
"dirSha256": "965a8c08ddfb85dbcb896b9ca3558c18271df657e3f952e55d04fabe3f5ada57"
|
||||
},
|
||||
"security": {
|
||||
"scannedAt": null,
|
||||
"scannerVersion": null,
|
||||
"flags": []
|
||||
}
|
||||
}
|
||||
285
skills/pr-creator/SKILL.md
Normal file
285
skills/pr-creator/SKILL.md
Normal file
@@ -0,0 +1,285 @@
|
||||
---
|
||||
name: pr-creator
|
||||
description: "GitHub Pull Request creation specialist. Analyzes user requirements to create PRs with structured titles and bodies matching the user's query language. Handles git change analysis, PR draft creation, user confirmation, and final PR creation via gh CLI."
|
||||
---
|
||||
|
||||
# PR Creator
|
||||
|
||||
Workflow for creating GitHub Pull Requests. Analyzes changes, writes clear PRs matching the user's query language, and creates them via gh CLI after user confirmation.
|
||||
|
||||
**Language Policy:** Always match the user's query language. If the user writes in Korean, write the PR in Korean. If the user writes in English, write the PR in English.
|
||||
|
||||
## Workflow
|
||||
|
||||
### 0. Create TODO list for tracking
|
||||
|
||||
**CRITICAL: ALWAYS start by creating a TODO list using TodoWrite tool.**
|
||||
|
||||
Create todos for all workflow steps:
|
||||
|
||||
```json
|
||||
{
|
||||
"todos": [
|
||||
{"content": "Gather git status", "status": "pending", "activeForm": "Gathering git status"},
|
||||
{"content": "Analyze changes deeply", "status": "pending", "activeForm": "Analyzing changes deeply"},
|
||||
{"content": "Extract or ask for ticket ID", "status": "pending", "activeForm": "Extracting or asking for ticket ID"},
|
||||
{"content": "Write PR title", "status": "pending", "activeForm": "Writing PR title"},
|
||||
{"content": "Detect PR template", "status": "pending", "activeForm": "Detecting PR template"},
|
||||
{"content": "Write PR body", "status": "pending", "activeForm": "Writing PR body"},
|
||||
{"content": "Save PR draft and get user confirmation", "status": "pending", "activeForm": "Saving PR draft and getting user confirmation"},
|
||||
{"content": "Create PR via gh CLI", "status": "pending", "activeForm": "Creating PR via gh CLI"}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Update each todo's status to "in_progress" when starting that step, and "completed" when finished.
|
||||
|
||||
### 1. Gather git status
|
||||
|
||||
Run the parallel git status script to gather all repository information:
|
||||
|
||||
```bash
|
||||
python scripts/get_git_status.py
|
||||
```
|
||||
|
||||
This script executes these commands in parallel using asyncio:
|
||||
- `git status` - current branch and changes
|
||||
- `git diff` - unstaged changes
|
||||
- `git diff --staged` - staged changes
|
||||
- `git log --oneline -10` - recent commit history
|
||||
- `git branch --show-current` - current branch name
|
||||
|
||||
The script returns JSON output with all results. Parse this to understand the current state.
|
||||
|
||||
### 2. Analyze changes deeply
|
||||
|
||||
Use ultrathink to analyze:
|
||||
- What problem was solved?
|
||||
- What was implemented/added/modified?
|
||||
- Why was this change needed?
|
||||
- What technical approach was taken?
|
||||
- What is the user's core intent to emphasize?
|
||||
|
||||
### 3. Extract or ask for ticket ID
|
||||
|
||||
**Try to extract ticket ID from branch name first:**
|
||||
|
||||
Common patterns to detect:
|
||||
- `feature/TICKET-123-description` → `TICKET-123`
|
||||
- `bugfix/PROJ-456-fix-something` → `PROJ-456`
|
||||
- `feat/ABC-789` → `ABC-789`
|
||||
- `username/ISSUE-123-description` → `ISSUE-123`
|
||||
|
||||
Regular expression pattern: `([A-Z]+-\d+)` or `([A-Z]+\d+)`
|
||||
|
||||
**If ticket ID not found in branch name OR user hasn't explicitly mentioned ticket ID:**
|
||||
|
||||
Use AskUserQuestion to ask:
|
||||
|
||||
```json
|
||||
{
|
||||
"questions": [{
|
||||
"question": "이 작업과 관련된 티켓 ID가 있나요?",
|
||||
"header": "티켓 ID",
|
||||
"multiSelect": false,
|
||||
"options": [
|
||||
{"label": "있음", "description": "티켓 ID를 입력하세요 (예: TICKET-123)"},
|
||||
{"label": "없음", "description": "관련 티켓이 없습니다"}
|
||||
]
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
**If user query is in English:**
|
||||
|
||||
```json
|
||||
{
|
||||
"questions": [{
|
||||
"question": "Is there a ticket ID related to this work?",
|
||||
"header": "Ticket ID",
|
||||
"multiSelect": false,
|
||||
"options": [
|
||||
{"label": "Yes", "description": "Enter ticket ID (e.g., TICKET-123)"},
|
||||
{"label": "No", "description": "No related ticket"}
|
||||
]
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
**Skip asking if:**
|
||||
- User explicitly mentioned ticket ID in their request
|
||||
- User explicitly said "no ticket" or "티켓 없음"
|
||||
|
||||
### 4. Write PR title
|
||||
|
||||
**Format:**
|
||||
- **If ticket ID exists**: `[TICKET-ID] {descriptive title}`
|
||||
- **If no ticket ID**: `{descriptive title}`
|
||||
|
||||
**Required rules:**
|
||||
- **Ticket ID prefix**: If ticket ID was found or provided, add `[TICKET-ID]` at the start
|
||||
- **Noun form ending**: End with noun form in the user's query language
|
||||
- **Specific and clear**: Immediately understandable
|
||||
- **Emphasize core**: Reflect what the user wants to highlight
|
||||
- **Around 50-60 characters** (excluding ticket ID prefix): Keep concise but informative
|
||||
|
||||
**Prohibited:**
|
||||
- NO conventional commit prefixes (`fix:`, `feat:`, `refactor:`)
|
||||
- NO vague expressions like "bug fix", "improvement"
|
||||
- NO technical term lists only
|
||||
|
||||
**Good examples:**
|
||||
- "[PROJ-123] Fix dmypy incorrectly inferring all Django models as Reel type"
|
||||
- "[ISSUE-456] Implement Instagram Reels bulk collection API"
|
||||
- "[TICKET-789] Improve response speed by adding search result caching"
|
||||
- "Fix dmypy incorrectly inferring all Django models as Reel type" (no ticket)
|
||||
|
||||
**Bad examples:**
|
||||
- "[PROJ-123] fix: type error"
|
||||
- "[ISSUE-456] bug fix"
|
||||
- "performance improvement"
|
||||
- "PROJ-123: Fix type error" (wrong format - must use brackets)
|
||||
|
||||
### 5. Detect PR template
|
||||
|
||||
**CRITICAL: ALWAYS run the PR template detection script. DO NOT skip this step.**
|
||||
|
||||
**You MUST run this Python script to detect PR templates:**
|
||||
|
||||
```bash
|
||||
python scripts/detect_pr_template.py
|
||||
```
|
||||
|
||||
**Why this is critical:**
|
||||
- Each repository may have its own PR template structure
|
||||
- Using the repository's template ensures consistency with team conventions
|
||||
- The script handles git worktree environments correctly
|
||||
- Skipping this step results in incorrect PR format
|
||||
|
||||
**What the script does:**
|
||||
- Searches for PULL_REQUEST_TEMPLATE.md in standard locations (.github/, docs/, root)
|
||||
- Works correctly in git worktree environments
|
||||
- Returns JSON with template path and content if found
|
||||
|
||||
**IMPORTANT: You must parse the JSON output and follow the instructions:**
|
||||
- If `"found": true` → **USE THE TEMPLATE STRUCTURE** from `"content"` field. DO NOT use fallback structure.
|
||||
- If `"found": false` → Use the fallback structure below
|
||||
|
||||
**Example output when found:**
|
||||
```json
|
||||
{
|
||||
"found": true,
|
||||
"path": ".github/PULL_REQUEST_TEMPLATE.md",
|
||||
"content": "## Description\n\n## Changes\n..."
|
||||
}
|
||||
```
|
||||
|
||||
**What to do:** Extract the `"content"` field and use that exact structure for the PR body.
|
||||
|
||||
### 6. Write PR body
|
||||
|
||||
**If template found:** Follow the repository's template structure exactly. Fill in each section based on the analyzed changes.
|
||||
|
||||
**If template not found (fallback):** Use the structure matching the user's query language.
|
||||
|
||||
**Structure template:**
|
||||
```markdown
|
||||
## Background
|
||||
{Explain why this work was needed. Describe the problem situation or requirements specifically}
|
||||
|
||||
## Changes
|
||||
{Describe what and how things were changed. Include technical approach}
|
||||
- List major changes as bullet points
|
||||
- Balance technical details with understandable explanations
|
||||
|
||||
## Testing
|
||||
{How to verify this PR works properly. Omit this section if not applicable}
|
||||
1. Specific test steps
|
||||
2. Expected results
|
||||
3. Commands to run automated tests if available
|
||||
|
||||
## Review Notes
|
||||
{Parts that reviewers should pay special attention to or additional context. Omit if not applicable}
|
||||
- Areas of special focus
|
||||
- Potential risks or trade-offs
|
||||
- Future improvement opportunities
|
||||
|
||||
## Screenshots
|
||||
{If there are UI changes or visual demonstrations. Omit if not applicable}
|
||||
```
|
||||
|
||||
**Note:** Write the entire PR content in the same language as the user's query. If the user writes in Korean, use Korean section titles and content. If in English, use English.
|
||||
|
||||
### 7. Save PR draft
|
||||
|
||||
Save the draft to a temporary file:
|
||||
|
||||
```bash
|
||||
/tmp/pull-request-{topic}-{timestamp}.md
|
||||
```
|
||||
|
||||
Show the content to the user and ask for confirmation in their language.
|
||||
|
||||
### 8. Create PR
|
||||
|
||||
**Only proceed after user approval.**
|
||||
|
||||
1. Check for uncommitted changes:
|
||||
```bash
|
||||
git status
|
||||
```
|
||||
|
||||
2. If needed, create new branch:
|
||||
```bash
|
||||
git checkout -b {username}/{branch-name}
|
||||
```
|
||||
|
||||
3. If there are unstaged changes, create commit (use existing git commit workflow)
|
||||
|
||||
4. Push to remote:
|
||||
```bash
|
||||
git push -u origin {branch-name}
|
||||
```
|
||||
|
||||
5. Create PR using gh CLI:
|
||||
```bash
|
||||
gh pr create --title "{title}" --body "$(cat /tmp/pull-request-{topic}-{timestamp}.md)" --base main
|
||||
```
|
||||
|
||||
6. Return PR URL to user
|
||||
|
||||
## Key considerations
|
||||
|
||||
**User intent understanding:**
|
||||
- Identify what the user wants to emphasize
|
||||
- Focus on business value or problem solving over technical achievement
|
||||
- Clearly communicate "why this is important"
|
||||
|
||||
**Balance technical accuracy and readability:**
|
||||
- Use technical terms precisely, but add context
|
||||
- Wrap code changes in backticks
|
||||
- Add simple analogies or explanations for complex concepts
|
||||
|
||||
**Reviewer perspective:**
|
||||
- Structure for quick understanding
|
||||
- Emphasize important changes
|
||||
- Provide clear testing methods
|
||||
|
||||
## Prohibited actions
|
||||
|
||||
- NO conventional commit prefixes in PR title
|
||||
- NO vague or generic descriptions
|
||||
- NO technical term lists only
|
||||
- NO PR creation without user confirmation
|
||||
- NO PR creation with uncommitted changes
|
||||
|
||||
## Final checklist
|
||||
|
||||
Before creating PR, verify:
|
||||
- [ ] Title matches user's query language?
|
||||
- [ ] Title is specific and clear?
|
||||
- [ ] Body follows the required structure?
|
||||
- [ ] Balance between technical details and explanations is appropriate?
|
||||
- [ ] User's core intent is well communicated?
|
||||
- [ ] All changes are committed?
|
||||
- [ ] User confirmation received?
|
||||
54
skills/pr-creator/scripts/detect_pr_template.py
Normal file
54
skills/pr-creator/scripts/detect_pr_template.py
Normal file
@@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Detect PR template in git repository with worktree support."""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
async def run_git_command(command: str) -> tuple[str, int]:
|
||||
process = await asyncio.create_subprocess_shell(
|
||||
command,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE,
|
||||
)
|
||||
stdout, _ = await process.communicate()
|
||||
return stdout.decode("utf-8").strip(), process.returncode or 0
|
||||
|
||||
|
||||
async def find_pr_template() -> dict[str, str | bool | None]:
|
||||
git_root_output, returncode = await run_git_command("git rev-parse --show-toplevel")
|
||||
|
||||
if returncode != 0:
|
||||
return {"error": "Not a git repository", "found": False}
|
||||
|
||||
git_root = Path(git_root_output)
|
||||
|
||||
template_locations = [
|
||||
git_root / ".github" / "PULL_REQUEST_TEMPLATE.md",
|
||||
git_root / ".github" / "pull_request_template.md",
|
||||
git_root / "docs" / "PULL_REQUEST_TEMPLATE.md",
|
||||
git_root / "PULL_REQUEST_TEMPLATE.md",
|
||||
]
|
||||
|
||||
for template_path in template_locations:
|
||||
if template_path.exists() and template_path.is_file():
|
||||
content = template_path.read_text(encoding="utf-8")
|
||||
return {"found": True, "path": str(template_path), "content": content}
|
||||
|
||||
return {"found": False}
|
||||
|
||||
|
||||
async def main() -> int:
|
||||
try:
|
||||
result = await find_pr_template()
|
||||
print(json.dumps(result, indent=2, ensure_ascii=False))
|
||||
return 0
|
||||
except Exception as e:
|
||||
print(json.dumps({"error": str(e), "type": type(e).__name__, "found": False}), file=sys.stderr)
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(asyncio.run(main()))
|
||||
67
skills/pr-creator/scripts/get_git_status.py
Normal file
67
skills/pr-creator/scripts/get_git_status.py
Normal file
@@ -0,0 +1,67 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Parallel git status fetcher using asyncio subprocess."""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
|
||||
async def run_git_command(command: str) -> tuple[str, str, int]:
|
||||
process = await asyncio.create_subprocess_shell(
|
||||
command,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE,
|
||||
)
|
||||
stdout, stderr = await process.communicate()
|
||||
return (
|
||||
stdout.decode("utf-8", errors="replace").strip(),
|
||||
stderr.decode("utf-8", errors="replace").strip(),
|
||||
process.returncode or 0,
|
||||
)
|
||||
|
||||
|
||||
async def gather_git_status() -> dict[str, Any]:
|
||||
commands = {
|
||||
"status": "git status",
|
||||
"diff": "git diff",
|
||||
"diff_staged": "git diff --staged",
|
||||
"log": "git log --oneline -10",
|
||||
"current_branch": "git branch --show-current",
|
||||
}
|
||||
|
||||
tasks = {key: run_git_command(cmd) for key, cmd in commands.items()}
|
||||
results = await asyncio.gather(*[tasks[key] for key in commands.keys()])
|
||||
|
||||
output = {}
|
||||
for (key, _), (stdout, stderr, returncode) in zip(commands.items(), results):
|
||||
output[key] = {"stdout": stdout, "stderr": stderr, "returncode": returncode, "success": returncode == 0}
|
||||
|
||||
return output
|
||||
|
||||
|
||||
async def main() -> int:
|
||||
try:
|
||||
check_process = await asyncio.create_subprocess_shell(
|
||||
"git rev-parse --git-dir",
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE,
|
||||
)
|
||||
await check_process.communicate()
|
||||
|
||||
if check_process.returncode != 0:
|
||||
print(json.dumps({"error": "Not a git repository", "cwd": str(Path.cwd())}), file=sys.stderr)
|
||||
return 1
|
||||
|
||||
results = await gather_git_status()
|
||||
print(json.dumps(results, indent=2, ensure_ascii=False))
|
||||
return 0
|
||||
|
||||
except Exception as e:
|
||||
print(json.dumps({"error": str(e), "type": type(e).__name__}), file=sys.stderr)
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(asyncio.run(main()))
|
||||
228
skills/share-session/SKILL.md
Normal file
228
skills/share-session/SKILL.md
Normal file
@@ -0,0 +1,228 @@
|
||||
---
|
||||
name: share-session
|
||||
description: Convert and share Claude Code conversation sessions as readable markdown files. Use when user wants to share a session transcript, export conversation history, or create a shareable markdown document from a Claude Code session. Triggered by requests like "share this session", "export conversation", "convert session to markdown".
|
||||
---
|
||||
|
||||
# Share Session
|
||||
|
||||
## Overview
|
||||
|
||||
Convert Claude Code sessions into readable markdown format for easy sharing. This skill finds sessions by fuzzy matching todo items and generates well-formatted markdown documents.
|
||||
If this is loaded by user's explicit request but no comments there, just execute followings.
|
||||
|
||||
## Workflow
|
||||
|
||||
### Step 1: CRITICAL - Create Todo for Session Identification
|
||||
|
||||
**MANDATORY**: You MUST use TodoWrite tool to create a todo item that describes **what this session is about**.
|
||||
|
||||
**IMPORTANT**: You do NOT need to know the session ID. Describe the session content instead.
|
||||
|
||||
**CORRECT Usage:**
|
||||
```python
|
||||
TodoWrite(todos=[{
|
||||
"content": "share this session about ccusage integration and time tracking",
|
||||
"status": "in_progress",
|
||||
"activeForm": "Sharing session"
|
||||
}])
|
||||
```
|
||||
|
||||
**Good Examples (describe session topic):**
|
||||
- ✅ "share this session about ccusage integration"
|
||||
- ✅ "export conversation on implementing time tracking"
|
||||
- ✅ "share current session with share-session improvements"
|
||||
|
||||
**Bad Examples (using session ID directly):**
|
||||
- ❌ "get session id of 62d3a2b2-102c-43d3-8414-0a30d7a5e5e0" (you don't know session ID yet!)
|
||||
- ❌ "export 62d3a2b2" (session ID unknown)
|
||||
|
||||
**How it works:**
|
||||
1. You create todo with **session description**
|
||||
2. Claude Code saves todo as: `~/.claude/todos/{SESSION-ID}.json`
|
||||
3. Script searches todo **content** using fuzzy matching (60% threshold)
|
||||
4. Script extracts SESSION-ID from the matching todo **filename**
|
||||
5. Script uses that SESSION-ID to find transcript
|
||||
|
||||
**Why this is required:**
|
||||
- Without a todo, the script has no way to identify which session to export
|
||||
- The todo file name is the ONLY place where session ID is stored
|
||||
- Fuzzy matching allows flexible queries ("share this session" matches multiple variations)
|
||||
|
||||
**Common mistakes:**
|
||||
- ❌ Forgetting to call TodoWrite before running the script
|
||||
- ❌ Using session ID in todo content (you don't know it yet!)
|
||||
- ❌ Query in Step 2 doesn't match todo content at all
|
||||
|
||||
### Step 2: Run share_session.py
|
||||
|
||||
**IMPORTANT**: Always use the ABSOLUTE path to the script:
|
||||
|
||||
```bash
|
||||
uv run --script /Users/yeongyu/local-workspaces/advanced-claude-code/claude-code-plugins/cc-plus/skills/share-session/scripts/share_session.py "your search query"
|
||||
```
|
||||
|
||||
**The search query should match your todo content from Step 1.**
|
||||
|
||||
The script automatically:
|
||||
- Searches todos using fuzzy matching (60% threshold)
|
||||
- Locates transcript at `~/.claude/projects/*/{session-id}.jsonl`
|
||||
- Merges pre-compact backups if they exist
|
||||
- **Fetches accurate cost/token data from ccusage** (NOT LiteLLM)
|
||||
- Converts to markdown with full statistics
|
||||
- **Truncates before /share command** (excludes the share request itself)
|
||||
- Saves to `/tmp/claude-code-sessions/{session-id}-{timestamp}.md`
|
||||
- Copies the file path to clipboard
|
||||
- Displays success message with cost breakdown
|
||||
|
||||
### Step 3: Output
|
||||
|
||||
The script displays:
|
||||
```
|
||||
✅ Markdown saved to:
|
||||
/tmp/claude-code-sessions/{session-id}-{timestamp}.md
|
||||
|
||||
💰 Total Session Cost: $X.XXXXXX
|
||||
|
||||
📋 The path has been copied to your clipboard.
|
||||
```
|
||||
|
||||
## Generated Markdown Format
|
||||
|
||||
The script generates comprehensive markdown with:
|
||||
|
||||
**Session Metadata:**
|
||||
- 📊 Session ID, generation timestamp, message count
|
||||
- 🔄 Models used (from ccusage data)
|
||||
|
||||
**Content:**
|
||||
- 💬 User messages with timestamps (meta messages filtered)
|
||||
- 🤖 Assistant responses with timestamps
|
||||
- 🧠 Thinking process (when available, shown as nested quotes)
|
||||
- 🔧 Tool usage details (collapsed in `<details>` tags)
|
||||
- 🚀 Subagent calls (Task tool usage)
|
||||
|
||||
**Cost & Token Statistics (from ccusage):**
|
||||
- 💰 Total session cost (accurate calculation from ccusage)
|
||||
- 📊 Token breakdown:
|
||||
- Input tokens
|
||||
- Output tokens
|
||||
- Cache creation tokens
|
||||
- Cache read tokens
|
||||
- Total tokens
|
||||
- 🎯 Cache hit rate percentage
|
||||
- 📉 Average cost per message
|
||||
|
||||
**Session Timeline (NEW):**
|
||||
- ⏱️ **Total Session Time**: First message → Last message
|
||||
- 🟢 **LLM Active Time**: User question → Last assistant response (per turn)
|
||||
- 🟡 **LLM Idle Time**: Last assistant → Next user question
|
||||
- 📊 **LLM Utilization**: (Active Time / Total Time) × 100%
|
||||
|
||||
**Special Features:**
|
||||
- 📦 Compact markers shown for merged pre-compact backups
|
||||
- 🔪 Auto-truncates before `/share` command (excludes the export request itself)
|
||||
- 🔄 Multi-model support (tracks different models per message)
|
||||
|
||||
## Script
|
||||
|
||||
### share_session.py
|
||||
|
||||
**The only script you need.** Does everything from search to markdown generation.
|
||||
|
||||
**Usage:**
|
||||
```bash
|
||||
uv run --script /Users/yeongyu/local-workspaces/advanced-claude-code/claude-code-plugins/cc-plus/skills/share-session/scripts/share_session.py <query>
|
||||
```
|
||||
|
||||
**Dependencies (auto-installed by uv):**
|
||||
- `orjson`: Fast JSON parsing
|
||||
- `thefuzz`: Fuzzy string matching for todo search
|
||||
- `rich`: Terminal formatting and progress display
|
||||
|
||||
**Complete features:**
|
||||
- ✅ Fuzzy search through todo files (60% threshold)
|
||||
- ✅ Automatic pre-compact backup merging
|
||||
- ✅ **Accurate cost/token data from ccusage** (via `bunx --bun ccusage session -i`)
|
||||
- ✅ **Turn-based time tracking**:
|
||||
- LLM Active Time (user → last assistant per turn)
|
||||
- LLM Idle Time (last assistant → next user)
|
||||
- Utilization percentage
|
||||
- ✅ Auto-truncation before `/share` command
|
||||
- ✅ Multi-model session support (from ccusage data)
|
||||
- ✅ Clipboard integration (macOS `pbcopy`)
|
||||
- ✅ Rich terminal output with colored progress
|
||||
- ✅ TypedDict-based type safety
|
||||
|
||||
**Output:** File path (stdout) + clipboard
|
||||
|
||||
**Exit codes:**
|
||||
- 0: Success
|
||||
- 1: Session not found or conversion failed
|
||||
|
||||
**Performance:**
|
||||
- Typical execution: 2-5 seconds
|
||||
- Timeout: 30 seconds (for ccusage call)
|
||||
|
||||
## Error Handling
|
||||
|
||||
**No session found:**
|
||||
- ❌ **Cause**: Todo item not created or query doesn't match
|
||||
- ✅ **Solution**:
|
||||
1. Verify you called `TodoWrite` in Step 1
|
||||
2. Check query matches todo content (60% fuzzy threshold)
|
||||
3. Try exact session ID if known
|
||||
|
||||
**Transcript not found:**
|
||||
- ❌ **Cause**: Session ID extracted but transcript missing
|
||||
- ✅ **Solution**:
|
||||
1. Confirm session ID is correct
|
||||
2. Check `~/.claude/projects/` directory exists
|
||||
3. Look for `{session-id}.jsonl` file
|
||||
4. Check pre-compact backups at `~/.claude/pre-compact-session-histories/`
|
||||
|
||||
**ccusage data fetch failed:**
|
||||
- ⚠️ **Symptom**: "Could not fetch session usage data from ccusage"
|
||||
- ❌ **Possible causes**:
|
||||
1. `ccusage` command not available (check `bunx --bun ccusage --version`)
|
||||
2. Session ID not found in ccusage database
|
||||
3. JSON parsing error from ccusage output
|
||||
- ✅ **Impact**: Markdown still generated but without cost/token statistics
|
||||
- ✅ **Fallback**: Warning message displayed, conversion continues
|
||||
|
||||
**Conversion failed:**
|
||||
- ❌ **Cause**: JSONL parsing or markdown generation error
|
||||
- ✅ **Solution**:
|
||||
1. Check transcript file is valid JSONL (each line = valid JSON)
|
||||
2. Review error message from stderr
|
||||
3. Check for corrupted transcript data
|
||||
|
||||
**Clipboard copy failed:**
|
||||
- ⚠️ **Symptom**: "Warning: Could not copy to clipboard"
|
||||
- ❌ **Cause**: `pbcopy` command failed (macOS only)
|
||||
- ✅ **Impact**: Non-critical - file path still shown in stdout
|
||||
- ✅ **Workaround**: Manually copy the displayed path
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Script says "No session found" even though todo exists:**
|
||||
```bash
|
||||
# Check if todo file exists
|
||||
ls -la ~/.claude/todos/ | grep $(date +%Y-%m-%d)
|
||||
|
||||
# Verify todo content
|
||||
cat ~/.claude/todos/{session-id}*.json | jq .
|
||||
```
|
||||
|
||||
**Want to export specific session by ID:**
|
||||
```bash
|
||||
# Create todo with exact session ID
|
||||
TodoWrite(todos=[{"content": "export {exact-session-id}", "status": "in_progress", "activeForm": "Exporting"}])
|
||||
|
||||
# Then run with session ID
|
||||
uv run --script ... "{exact-session-id}"
|
||||
```
|
||||
|
||||
**ccusage returns wrong data:**
|
||||
- Verify ccusage version: `bunx --bun ccusage --version`
|
||||
- Test ccusage directly: `bunx --bun ccusage session -i {session-id} --json`
|
||||
- Check if session exists: `bunx --bun ccusage session`
|
||||
701
skills/share-session/scripts/share_session.py
Executable file
701
skills/share-session/scripts/share_session.py
Executable file
@@ -0,0 +1,701 @@
|
||||
#!/usr/bin/env -S uv run --script
|
||||
# /// script
|
||||
# requires-python = "~=3.12"
|
||||
# dependencies = [
|
||||
# "orjson",
|
||||
# "thefuzz",
|
||||
# "rich",
|
||||
# ]
|
||||
# ///
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, NotRequired, TypedDict
|
||||
|
||||
import orjson
|
||||
from rich.console import Console
|
||||
from thefuzz import fuzz
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
class SessionUsageData(TypedDict):
|
||||
sessionId: str
|
||||
inputTokens: int
|
||||
outputTokens: int
|
||||
cacheCreationTokens: int
|
||||
cacheReadTokens: int
|
||||
totalTokens: int
|
||||
totalCost: float
|
||||
lastActivity: str
|
||||
modelsUsed: list[str]
|
||||
modelBreakdowns: NotRequired[list[dict[str, Any]]]
|
||||
|
||||
|
||||
def extract_session_id_from_filename(filename: str) -> str | None:
|
||||
if not filename.endswith(".json"):
|
||||
return None
|
||||
parts = filename.replace(".json", "").split("-agent-")
|
||||
if len(parts) >= 1:
|
||||
return parts[0]
|
||||
return None
|
||||
|
||||
|
||||
def search_todos(query: str, todos_dir: Path) -> str | None:
|
||||
if not todos_dir.exists():
|
||||
return None
|
||||
|
||||
best_match_score = 0
|
||||
best_session_id = None
|
||||
|
||||
for todo_file in todos_dir.glob("*.json"):
|
||||
try:
|
||||
todos = orjson.loads(todo_file.read_bytes())
|
||||
except (orjson.JSONDecodeError, OSError):
|
||||
continue
|
||||
|
||||
for todo in todos:
|
||||
content = todo.get("content", "")
|
||||
score = fuzz.partial_ratio(query.lower(), content.lower())
|
||||
|
||||
if score > best_match_score:
|
||||
best_match_score = score
|
||||
best_session_id = extract_session_id_from_filename(todo_file.name)
|
||||
|
||||
if best_match_score >= 60:
|
||||
return best_session_id
|
||||
return None
|
||||
|
||||
|
||||
def find_transcript_path(session_id: str) -> Path | None:
|
||||
projects_dir = Path.home() / ".claude" / "projects"
|
||||
if not projects_dir.exists():
|
||||
return None
|
||||
|
||||
for project_dir in projects_dir.iterdir():
|
||||
if not project_dir.is_dir():
|
||||
continue
|
||||
|
||||
transcript_file = project_dir / f"{session_id}.jsonl"
|
||||
if transcript_file.exists():
|
||||
return transcript_file
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def find_pre_compact_backups(session_id: str) -> list[Path]:
|
||||
backup_dir = Path.home() / ".claude" / "pre-compact-session-histories"
|
||||
if not backup_dir.exists():
|
||||
return []
|
||||
|
||||
backups = list(backup_dir.glob(f"{session_id}-*.jsonl"))
|
||||
backups.sort(key=lambda p: p.name)
|
||||
return backups
|
||||
|
||||
|
||||
def create_merged_transcript(session_id: str, current_transcript: Path) -> Path | None:
|
||||
backups = find_pre_compact_backups(session_id)
|
||||
|
||||
if not backups:
|
||||
return current_transcript
|
||||
|
||||
merged_dir = Path("/tmp/claude-code-merged-transcripts")
|
||||
merged_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
timestamp_str = datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||
merged_file = merged_dir / f"{session_id}-merged-{timestamp_str}.jsonl"
|
||||
|
||||
try:
|
||||
with merged_file.open("w", encoding="utf-8") as outfile:
|
||||
for i, backup_path in enumerate(backups):
|
||||
if i > 0:
|
||||
compact_marker = orjson.dumps({"type": "compact_marker"}).decode()
|
||||
outfile.write(compact_marker + "\n")
|
||||
|
||||
with backup_path.open("r", encoding="utf-8") as infile:
|
||||
outfile.write(infile.read())
|
||||
|
||||
compact_marker = orjson.dumps({"type": "compact_marker"}).decode()
|
||||
outfile.write(compact_marker + "\n")
|
||||
|
||||
with current_transcript.open("r", encoding="utf-8") as infile:
|
||||
outfile.write(infile.read())
|
||||
|
||||
return merged_file
|
||||
except Exception as e:
|
||||
console.print(f"[yellow]Warning: Failed to merge transcripts: {e}[/yellow]")
|
||||
return current_transcript
|
||||
|
||||
|
||||
def fetch_session_usage_from_ccusage(session_id: str) -> SessionUsageData | None:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["bunx", "--bun", "ccusage", "session", "-i", session_id, "--json"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True,
|
||||
timeout=30,
|
||||
)
|
||||
|
||||
data = orjson.loads(result.stdout)
|
||||
|
||||
if not isinstance(data, dict):
|
||||
console.print("[red]Error: Unexpected ccusage output format[/red]")
|
||||
return None
|
||||
|
||||
total_input = 0
|
||||
total_output = 0
|
||||
total_cache_creation = 0
|
||||
total_cache_read = 0
|
||||
models_used_set: set[str] = set()
|
||||
|
||||
entries = data.get("entries", [])
|
||||
for entry in entries:
|
||||
total_input += entry.get("inputTokens", 0)
|
||||
total_output += entry.get("outputTokens", 0)
|
||||
total_cache_creation += entry.get("cacheCreationTokens", 0)
|
||||
total_cache_read += entry.get("cacheReadTokens", 0)
|
||||
|
||||
model = entry.get("model")
|
||||
if model:
|
||||
models_used_set.add(model)
|
||||
|
||||
total_tokens = data.get("totalTokens", 0)
|
||||
total_cost = data.get("totalCost", 0.0)
|
||||
|
||||
last_activity = ""
|
||||
if entries:
|
||||
last_entry = entries[-1]
|
||||
last_timestamp = last_entry.get("timestamp", "")
|
||||
if last_timestamp:
|
||||
last_activity = last_timestamp.split("T")[0]
|
||||
|
||||
return SessionUsageData(
|
||||
sessionId=data.get("sessionId", session_id),
|
||||
inputTokens=total_input,
|
||||
outputTokens=total_output,
|
||||
cacheCreationTokens=total_cache_creation,
|
||||
cacheReadTokens=total_cache_read,
|
||||
totalTokens=total_tokens,
|
||||
totalCost=total_cost,
|
||||
lastActivity=last_activity,
|
||||
modelsUsed=sorted(models_used_set),
|
||||
)
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
console.print("[red]Error: ccusage command timed out[/red]")
|
||||
return None
|
||||
except subprocess.CalledProcessError as e:
|
||||
console.print(f"[red]Error calling ccusage: {e.stderr}[/red]")
|
||||
return None
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error fetching session usage: {e}[/red]")
|
||||
return None
|
||||
|
||||
|
||||
def escape_xml_tags(text: str) -> str:
|
||||
return text.replace("<", r"\<").replace(">", r"\>")
|
||||
|
||||
|
||||
def parse_timestamp(ts: str) -> str:
|
||||
try:
|
||||
dt = datetime.fromisoformat(ts.replace("Z", "+00:00"))
|
||||
local_dt = dt.astimezone()
|
||||
return local_dt.strftime("%Y-%m-%d %H:%M:%S %Z")
|
||||
except (ValueError, AttributeError):
|
||||
return ts
|
||||
|
||||
|
||||
def parse_timestamp_to_datetime(ts: str) -> datetime | None:
|
||||
try:
|
||||
return datetime.fromisoformat(ts.replace("Z", "+00:00"))
|
||||
except (ValueError, AttributeError):
|
||||
return None
|
||||
|
||||
|
||||
def format_duration(seconds: float) -> str:
|
||||
if seconds < 60:
|
||||
return f"{seconds:.0f}s"
|
||||
elif seconds < 3600:
|
||||
minutes = seconds // 60
|
||||
secs = seconds % 60
|
||||
return f"{int(minutes)}m {int(secs)}s"
|
||||
else:
|
||||
hours = seconds // 3600
|
||||
minutes = (seconds % 3600) // 60
|
||||
secs = seconds % 60
|
||||
if secs > 0:
|
||||
return f"{int(hours)}h {int(minutes)}m {int(secs)}s"
|
||||
else:
|
||||
return f"{int(hours)}h {int(minutes)}m"
|
||||
|
||||
|
||||
def format_tool_parameters(params: dict[str, Any]) -> str:
|
||||
if not params:
|
||||
return "_No parameters_"
|
||||
|
||||
lines = []
|
||||
for key, value in params.items():
|
||||
match value:
|
||||
case str():
|
||||
if len(value) > 100:
|
||||
lines.append(f"**{key}**: `{value[:100]}...`")
|
||||
else:
|
||||
lines.append(f"**{key}**: `{value}`")
|
||||
case list() | dict():
|
||||
value_json = orjson.dumps(value, option=orjson.OPT_INDENT_2).decode()
|
||||
if len(value_json) > 100:
|
||||
lines.append(f"**{key}**: `{value_json[:100]}...`")
|
||||
else:
|
||||
lines.append(f"**{key}**: `{value_json}`")
|
||||
case _:
|
||||
lines.append(f"**{key}**: `{value}`")
|
||||
return "\n\n".join(lines)
|
||||
|
||||
|
||||
def extract_text_from_message(msg: dict[str, Any]) -> str:
|
||||
message_data = msg.get("message", {})
|
||||
content_items = message_data.get("content", [])
|
||||
|
||||
match content_items:
|
||||
case str():
|
||||
return content_items
|
||||
case list():
|
||||
text_items = [
|
||||
item.get("text", "") for item in content_items if isinstance(item, dict) and item.get("type") == "text"
|
||||
]
|
||||
return " ".join(text_items)
|
||||
case _:
|
||||
return ""
|
||||
|
||||
|
||||
def is_warmup_message(messages: list[dict[str, Any]]) -> bool:
|
||||
for msg in messages:
|
||||
if msg.get("type") == "user":
|
||||
text_content = extract_text_from_message(msg)
|
||||
return text_content.strip().lower() == "warmup"
|
||||
return False
|
||||
|
||||
|
||||
def filter_warmup_pair(messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
||||
first_user_found = False
|
||||
first_assistant_found = False
|
||||
filtered_messages = []
|
||||
|
||||
for msg in messages:
|
||||
msg_type = msg.get("type")
|
||||
|
||||
if msg_type == "user" and not first_user_found:
|
||||
first_user_found = True
|
||||
continue
|
||||
|
||||
if msg_type == "assistant" and first_user_found and not first_assistant_found:
|
||||
first_assistant_found = True
|
||||
continue
|
||||
|
||||
if first_user_found:
|
||||
filtered_messages.append(msg)
|
||||
|
||||
return filtered_messages
|
||||
|
||||
|
||||
def find_last_share_command_index(messages: list[dict[str, Any]]) -> int | None:
|
||||
for i in range(len(messages) - 1, -1, -1):
|
||||
msg = messages[i]
|
||||
|
||||
if msg.get("type") == "compact_marker":
|
||||
continue
|
||||
|
||||
if msg.get("type") != "user":
|
||||
continue
|
||||
|
||||
message_data = msg.get("message", {})
|
||||
content = message_data.get("content", [])
|
||||
|
||||
match content:
|
||||
case str():
|
||||
if (
|
||||
"<command-name>/share</command-name>" in content
|
||||
or "<command-name>/cc-plus:share</command-name>" in content
|
||||
):
|
||||
return i
|
||||
case list():
|
||||
for item in content:
|
||||
if isinstance(item, dict):
|
||||
text = item.get("text", "")
|
||||
if (
|
||||
"<command-name>/share</command-name>" in text
|
||||
or "<command-name>/cc-plus:share</command-name>" in text
|
||||
):
|
||||
return i
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def convert_transcript_to_markdown(
|
||||
transcript_path: Path, output_path: Path, session_id: str, usage_data: SessionUsageData | None
|
||||
) -> None:
|
||||
if not transcript_path.exists():
|
||||
console.print(f"[red]Error: Transcript file not found: {transcript_path}[/red]")
|
||||
sys.exit(1)
|
||||
|
||||
messages: list[dict[str, Any]] = []
|
||||
|
||||
with transcript_path.open("rb") as f:
|
||||
for line in f:
|
||||
if line.strip():
|
||||
try:
|
||||
data = orjson.loads(line)
|
||||
if data.get("type") in ("user", "assistant", "compact_marker"):
|
||||
messages.append(data)
|
||||
except orjson.JSONDecodeError:
|
||||
continue
|
||||
|
||||
if not messages:
|
||||
console.print("[yellow]No messages found in transcript[/yellow]")
|
||||
sys.exit(0)
|
||||
|
||||
if is_warmup_message(messages):
|
||||
messages = filter_warmup_pair(messages)
|
||||
|
||||
last_share_index = find_last_share_command_index(messages)
|
||||
if last_share_index is not None:
|
||||
messages = messages[:last_share_index]
|
||||
console.print(
|
||||
f"[yellow]📍 Truncating before /share command (excluded message #{last_share_index + 1})[/yellow]"
|
||||
)
|
||||
|
||||
first_timestamp: datetime | None = None
|
||||
last_timestamp: datetime | None = None
|
||||
|
||||
for msg in messages:
|
||||
msg_type = msg.get("type")
|
||||
if msg_type == "compact_marker":
|
||||
continue
|
||||
|
||||
timestamp_str = msg.get("timestamp", "")
|
||||
timestamp_dt = parse_timestamp_to_datetime(timestamp_str)
|
||||
|
||||
if timestamp_dt:
|
||||
if first_timestamp is None:
|
||||
first_timestamp = timestamp_dt
|
||||
last_timestamp = timestamp_dt
|
||||
|
||||
total_session_time = 0.0
|
||||
if first_timestamp and last_timestamp:
|
||||
total_session_time = (last_timestamp - first_timestamp).total_seconds()
|
||||
|
||||
class Turn(TypedDict):
|
||||
user_timestamp: datetime | None
|
||||
last_assistant_timestamp: datetime | None
|
||||
|
||||
turns: list[Turn] = []
|
||||
current_turn: Turn | None = None
|
||||
|
||||
for msg in messages:
|
||||
msg_type = msg.get("type")
|
||||
if msg_type == "compact_marker":
|
||||
continue
|
||||
|
||||
timestamp_str = msg.get("timestamp", "")
|
||||
timestamp_dt = parse_timestamp_to_datetime(timestamp_str)
|
||||
|
||||
match msg_type:
|
||||
case "user":
|
||||
if current_turn:
|
||||
turns.append(current_turn)
|
||||
|
||||
current_turn = Turn(user_timestamp=timestamp_dt, last_assistant_timestamp=None)
|
||||
|
||||
case "assistant":
|
||||
if current_turn and timestamp_dt:
|
||||
current_turn["last_assistant_timestamp"] = timestamp_dt
|
||||
|
||||
if current_turn:
|
||||
turns.append(current_turn)
|
||||
|
||||
llm_active_time_total = 0.0
|
||||
llm_idle_time_total = 0.0
|
||||
|
||||
for i, turn in enumerate(turns):
|
||||
user_ts = turn["user_timestamp"]
|
||||
last_asst_ts = turn["last_assistant_timestamp"]
|
||||
|
||||
if user_ts and last_asst_ts:
|
||||
active_duration = (last_asst_ts - user_ts).total_seconds()
|
||||
llm_active_time_total += active_duration
|
||||
|
||||
if i + 1 < len(turns):
|
||||
next_turn = turns[i + 1]
|
||||
next_user_ts = next_turn["user_timestamp"]
|
||||
if last_asst_ts and next_user_ts:
|
||||
idle_duration = (next_user_ts - last_asst_ts).total_seconds()
|
||||
llm_idle_time_total += idle_duration
|
||||
|
||||
md_lines = [
|
||||
"# 🤖 Claude Code Session Transcript",
|
||||
"",
|
||||
f"**Session ID**: `{session_id}`",
|
||||
f"**Generated**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
|
||||
f"**Messages**: {len(messages)}",
|
||||
"",
|
||||
"## 📊 Session Statistics",
|
||||
"",
|
||||
]
|
||||
|
||||
if usage_data:
|
||||
models_str = ", ".join(usage_data["modelsUsed"])
|
||||
md_lines.extend(
|
||||
[
|
||||
f"**Models Used**: {models_str}",
|
||||
"",
|
||||
"### Token Usage",
|
||||
"",
|
||||
f"- **Input Tokens**: {usage_data['inputTokens']:,}",
|
||||
f"- **Output Tokens**: {usage_data['outputTokens']:,}",
|
||||
f"- **Cache Creation**: {usage_data['cacheCreationTokens']:,}",
|
||||
f"- **Cache Read**: {usage_data['cacheReadTokens']:,}",
|
||||
f"- **Total Tokens**: {usage_data['totalTokens']:,}",
|
||||
"",
|
||||
"### 💰 Cost Estimate",
|
||||
"",
|
||||
f"- **Total Cost**: ${usage_data['totalCost']:.6f}",
|
||||
]
|
||||
)
|
||||
|
||||
if usage_data["totalTokens"] > 0 and usage_data["cacheReadTokens"] > 0:
|
||||
cache_hit_rate = (usage_data["cacheReadTokens"] / usage_data["totalTokens"]) * 100
|
||||
md_lines.append(f"- **Cache Hit Rate**: {cache_hit_rate:.2f}%")
|
||||
|
||||
if usage_data["totalCost"] > 0:
|
||||
assistant_count = len([m for m in messages if m.get("type") == "assistant"])
|
||||
if assistant_count > 0:
|
||||
avg_cost_per_msg = usage_data["totalCost"] / assistant_count
|
||||
md_lines.append(f"- **Average Cost per Message**: ${avg_cost_per_msg:.6f}")
|
||||
else:
|
||||
md_lines.append("**Warning**: Session usage data not available from ccusage")
|
||||
|
||||
if total_session_time > 0:
|
||||
md_lines.extend(["", "### ⏱️ Session Timeline", ""])
|
||||
md_lines.append(f"- **Total Session Time**: {format_duration(total_session_time)}")
|
||||
|
||||
if llm_active_time_total > 0:
|
||||
md_lines.append(f"- **LLM Active Time**: {format_duration(llm_active_time_total)}")
|
||||
|
||||
if llm_idle_time_total > 0:
|
||||
md_lines.append(f"- **LLM Idle Time**: {format_duration(llm_idle_time_total)}")
|
||||
|
||||
if llm_active_time_total > 0 and total_session_time > 0:
|
||||
utilization = (llm_active_time_total / total_session_time) * 100
|
||||
md_lines.append(f"- **LLM Utilization**: {utilization:.1f}%")
|
||||
|
||||
md_lines.extend(["", "---", ""])
|
||||
|
||||
for i, msg in enumerate(messages, 1):
|
||||
msg_type = msg.get("type")
|
||||
|
||||
if msg_type == "compact_marker":
|
||||
md_lines.extend(["---", "", "## 📦 [COMPACTED]", "", "---", ""])
|
||||
continue
|
||||
|
||||
timestamp = parse_timestamp(msg.get("timestamp", ""))
|
||||
message_data = msg.get("message", {})
|
||||
role = message_data.get("role", msg_type)
|
||||
|
||||
if role == "user":
|
||||
is_meta = msg.get("isMeta", False)
|
||||
content_items = message_data.get("content", [])
|
||||
|
||||
if is_meta:
|
||||
continue
|
||||
else:
|
||||
match content_items:
|
||||
case str():
|
||||
escaped_content = escape_xml_tags(content_items)
|
||||
quoted_lines = [f"> {line}" if line else ">" for line in escaped_content.split("\n")]
|
||||
md_lines.extend(
|
||||
[
|
||||
f"## 💬 User #{i}",
|
||||
f"**Time**: {timestamp}",
|
||||
"",
|
||||
]
|
||||
)
|
||||
md_lines.extend(quoted_lines)
|
||||
md_lines.append("")
|
||||
case list():
|
||||
text_items = [
|
||||
item.get("text", "")
|
||||
for item in content_items
|
||||
if isinstance(item, dict) and item.get("type") == "text"
|
||||
]
|
||||
if text_items:
|
||||
md_lines.extend(
|
||||
[
|
||||
f"## 💬 User #{i}",
|
||||
f"**Time**: {timestamp}",
|
||||
"",
|
||||
]
|
||||
)
|
||||
for text in text_items:
|
||||
escaped_text = escape_xml_tags(text)
|
||||
quoted_lines = [f"> {line}" if line else ">" for line in escaped_text.split("\n")]
|
||||
md_lines.extend(quoted_lines)
|
||||
md_lines.append("")
|
||||
|
||||
else:
|
||||
md_lines.extend(
|
||||
[
|
||||
f"## 🤖 Assistant #{i}",
|
||||
f"**Time**: {timestamp}",
|
||||
"",
|
||||
]
|
||||
)
|
||||
|
||||
content = message_data.get("content", [])
|
||||
match content:
|
||||
case str():
|
||||
md_lines.extend([content, ""])
|
||||
case list():
|
||||
for item in content:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
|
||||
item_type = item.get("type")
|
||||
|
||||
match item_type:
|
||||
case "text":
|
||||
text = item.get("text", "")
|
||||
if text.strip():
|
||||
quoted_lines = [f"> {line}" if line else ">" for line in text.split("\n")]
|
||||
md_lines.extend(quoted_lines)
|
||||
md_lines.append("")
|
||||
|
||||
case "thinking":
|
||||
thinking = item.get("thinking", "")
|
||||
if thinking.strip():
|
||||
md_lines.append("> ")
|
||||
md_lines.append(">> 🧠 Thinking")
|
||||
thinking_lines = [f">> {line}" if line else ">>" for line in thinking.split("\n")]
|
||||
md_lines.extend(thinking_lines)
|
||||
md_lines.append(">")
|
||||
|
||||
case "tool_use":
|
||||
tool_name = item.get("name", "unknown")
|
||||
tool_input = item.get("input", {})
|
||||
|
||||
is_subagent = tool_name == "Task"
|
||||
subagent_type = tool_input.get("subagent_type", "") if is_subagent else ""
|
||||
|
||||
if is_subagent:
|
||||
tool_display = f"🚀 Subagent: {subagent_type}"
|
||||
else:
|
||||
tool_display = f"🔧 Tool: {tool_name}"
|
||||
|
||||
md_lines.extend(
|
||||
[
|
||||
"<details>",
|
||||
f"<summary>{tool_display}</summary>",
|
||||
"",
|
||||
format_tool_parameters(tool_input),
|
||||
"",
|
||||
"</details>",
|
||||
"",
|
||||
]
|
||||
)
|
||||
|
||||
md_lines.extend(["---", ""])
|
||||
|
||||
markdown_content = "\n".join(md_lines)
|
||||
|
||||
while "\n---\n\n---\n" in markdown_content:
|
||||
markdown_content = markdown_content.replace("\n---\n\n---\n", "\n---\n")
|
||||
|
||||
output_path.write_text(markdown_content, encoding="utf-8")
|
||||
console.print(f"[green]✅ Markdown saved to: {output_path}[/green]")
|
||||
|
||||
if usage_data:
|
||||
console.print(f"\n[bold green]💰 Total Session Cost: ${usage_data['totalCost']:.6f}[/bold green]")
|
||||
|
||||
|
||||
def convert_to_markdown(session_id: str) -> Path | None:
|
||||
transcript_path = find_transcript_path(session_id)
|
||||
if not transcript_path:
|
||||
console.print(f"[red]Error: Transcript not found for session {session_id}[/red]")
|
||||
return None
|
||||
|
||||
merged_transcript_path = create_merged_transcript(session_id, transcript_path)
|
||||
if not merged_transcript_path:
|
||||
console.print("[red]Error: Failed to create merged transcript[/red]")
|
||||
return None
|
||||
|
||||
backups = find_pre_compact_backups(session_id)
|
||||
if backups:
|
||||
console.print(f"[cyan]Found {len(backups)} pre-compact backup(s). Merging...[/cyan]")
|
||||
|
||||
console.print("[cyan]Fetching session usage from ccusage...[/cyan]")
|
||||
usage_data = fetch_session_usage_from_ccusage(session_id)
|
||||
|
||||
if usage_data:
|
||||
console.print("[green]✓ Session usage data loaded[/green]")
|
||||
else:
|
||||
console.print("[yellow]⚠ Could not fetch session usage data from ccusage[/yellow]")
|
||||
|
||||
output_dir = Path("/tmp/claude-code-sessions")
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
date_str = datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||
output_file = output_dir / f"{session_id}-{date_str}.md"
|
||||
|
||||
try:
|
||||
convert_transcript_to_markdown(merged_transcript_path, output_file, session_id, usage_data)
|
||||
return output_file
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error during conversion: {e}[/red]")
|
||||
return None
|
||||
|
||||
|
||||
def copy_to_clipboard(text: str) -> None:
|
||||
try:
|
||||
subprocess.run(["pbcopy"], input=text.encode(), check=True)
|
||||
except Exception as e:
|
||||
console.print(f"[yellow]Warning: Could not copy to clipboard: {e}[/yellow]")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
if len(sys.argv) < 2:
|
||||
console.print("[red]Usage: share_session.py <query>[/red]")
|
||||
sys.exit(1)
|
||||
|
||||
query = " ".join(sys.argv[1:])
|
||||
console.print(f"[cyan]Searching for session matching: {query}[/cyan]")
|
||||
|
||||
todos_dir = Path.home() / ".claude" / "todos"
|
||||
session_id = search_todos(query, todos_dir)
|
||||
|
||||
if not session_id:
|
||||
console.print(f"[red]No session found matching: {query}[/red]")
|
||||
sys.exit(1)
|
||||
|
||||
console.print(f"[green]✓ Found session: {session_id}[/green]")
|
||||
|
||||
output_path = convert_to_markdown(session_id)
|
||||
|
||||
if not output_path:
|
||||
sys.exit(1)
|
||||
|
||||
copy_to_clipboard(str(output_path))
|
||||
|
||||
console.print("\n[green]✅ Markdown saved to:[/green]")
|
||||
console.print(f"[bold]{output_path}[/bold]")
|
||||
console.print("\n[cyan]📋 The path has been copied to your clipboard.[/cyan]")
|
||||
|
||||
print(output_path)
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
496
skills/share-session/scripts/transcript_to_markdown.py
Executable file
496
skills/share-session/scripts/transcript_to_markdown.py
Executable file
@@ -0,0 +1,496 @@
|
||||
#!/usr/bin/env -S uv run --script
|
||||
# /// script
|
||||
# requires-python = "~=3.12"
|
||||
# dependencies = [
|
||||
# "orjson",
|
||||
# "rich",
|
||||
# "typer",
|
||||
# "httpx",
|
||||
# ]
|
||||
# ///
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, NotRequired, TypedDict
|
||||
|
||||
import httpx # pyright: ignore[reportMissingImports]
|
||||
import orjson # pyright: ignore[reportMissingImports]
|
||||
import typer # pyright: ignore[reportMissingImports]
|
||||
from rich.console import Console # pyright: ignore[reportMissingImports]
|
||||
from rich.markdown import Markdown # pyright: ignore[reportMissingImports]
|
||||
|
||||
app = typer.Typer()
|
||||
console = Console()
|
||||
|
||||
LITELLM_PRICING_URL = (
|
||||
"https://raw.githubusercontent.com/BerriAI/litellm/refs/heads/main/model_prices_and_context_window.json"
|
||||
)
|
||||
|
||||
|
||||
class ModelPricing(TypedDict):
|
||||
input_cost_per_token: NotRequired[float]
|
||||
output_cost_per_token: NotRequired[float]
|
||||
cache_creation_input_token_cost: NotRequired[float]
|
||||
cache_read_input_token_cost: NotRequired[float]
|
||||
litellm_provider: str
|
||||
mode: str
|
||||
|
||||
|
||||
def escape_xml_tags(text: str) -> str:
|
||||
"""Escape XML/HTML tags so they display in markdown."""
|
||||
return text.replace("<", r"\<").replace(">", r"\>")
|
||||
|
||||
|
||||
def parse_timestamp(ts: str) -> str:
|
||||
try:
|
||||
dt = datetime.fromisoformat(ts.replace("Z", "+00:00"))
|
||||
local_dt = dt.astimezone()
|
||||
return local_dt.strftime("%Y-%m-%d %H:%M:%S %Z")
|
||||
except (ValueError, AttributeError):
|
||||
return ts
|
||||
|
||||
|
||||
def parse_timestamp_to_datetime(ts: str) -> datetime | None:
|
||||
try:
|
||||
return datetime.fromisoformat(ts.replace("Z", "+00:00"))
|
||||
except (ValueError, AttributeError):
|
||||
return None
|
||||
|
||||
|
||||
def format_duration(seconds: float) -> str:
|
||||
if seconds < 60:
|
||||
return f"{seconds:.0f}s"
|
||||
elif seconds < 3600:
|
||||
minutes = seconds // 60
|
||||
secs = seconds % 60
|
||||
return f"{int(minutes)}m {int(secs)}s"
|
||||
else:
|
||||
hours = seconds // 3600
|
||||
minutes = (seconds % 3600) // 60
|
||||
secs = seconds % 60
|
||||
if secs > 0:
|
||||
return f"{int(hours)}h {int(minutes)}m {int(secs)}s"
|
||||
else:
|
||||
return f"{int(hours)}h {int(minutes)}m"
|
||||
|
||||
|
||||
def format_tool_parameters(params: dict[str, Any]) -> str:
|
||||
if not params:
|
||||
return "_No parameters_"
|
||||
|
||||
lines = []
|
||||
for key, value in params.items():
|
||||
match value:
|
||||
case str():
|
||||
if len(value) > 100:
|
||||
lines.append(f"**{key}**: `{value[:100]}...`")
|
||||
else:
|
||||
lines.append(f"**{key}**: `{value}`")
|
||||
case list() | dict():
|
||||
value_json = orjson.dumps(value, option=orjson.OPT_INDENT_2).decode()
|
||||
if len(value_json) > 100:
|
||||
lines.append(f"**{key}**: `{value_json[:100]}...`")
|
||||
else:
|
||||
lines.append(f"**{key}**: `{value_json}`")
|
||||
case _:
|
||||
lines.append(f"**{key}**: `{value}`")
|
||||
return "\n\n".join(lines)
|
||||
|
||||
|
||||
def format_tool_result(result: dict[str, Any]) -> str:
|
||||
content = result.get("content", "")
|
||||
is_error = result.get("is_error", False)
|
||||
|
||||
if is_error:
|
||||
return f"❌ **Error**:\n```\n{content}\n```"
|
||||
|
||||
match content:
|
||||
case str():
|
||||
return f"```\n{content}\n```"
|
||||
case _:
|
||||
return f"```json\n{orjson.dumps(content, option=orjson.OPT_INDENT_2).decode()}\n```"
|
||||
|
||||
|
||||
def build_tool_map(messages: list[dict[str, Any]]) -> dict[str, dict[str, Any]]:
|
||||
tool_map: dict[str, dict[str, Any]] = {}
|
||||
|
||||
for msg in messages:
|
||||
if msg.get("type") == "user":
|
||||
content = msg.get("message", {}).get("content", [])
|
||||
if isinstance(content, list):
|
||||
for item in content:
|
||||
if isinstance(item, dict) and item.get("type") == "tool_result":
|
||||
tool_use_id = item.get("tool_use_id")
|
||||
if tool_use_id:
|
||||
tool_map[tool_use_id] = item
|
||||
|
||||
return tool_map
|
||||
|
||||
|
||||
def fetch_pricing_data() -> dict[str, ModelPricing]:
|
||||
with httpx.Client(timeout=30.0) as client:
|
||||
response = client.get(LITELLM_PRICING_URL)
|
||||
response.raise_for_status()
|
||||
data = orjson.loads(response.content)
|
||||
if "sample_spec" in data:
|
||||
del data["sample_spec"]
|
||||
return data
|
||||
|
||||
|
||||
def find_last_share_command_index(messages: list[dict[str, Any]]) -> int | None:
|
||||
for i in range(len(messages) - 1, -1, -1):
|
||||
msg = messages[i]
|
||||
|
||||
if msg.get("type") == "compact_marker":
|
||||
continue
|
||||
|
||||
if msg.get("type") != "user":
|
||||
continue
|
||||
|
||||
message_data = msg.get("message", {})
|
||||
content = message_data.get("content", [])
|
||||
|
||||
match content:
|
||||
case str():
|
||||
if (
|
||||
"<command-name>/share</command-name>" in content
|
||||
or "<command-name>/cc-plus:share</command-name>" in content
|
||||
):
|
||||
return i
|
||||
case list():
|
||||
for item in content:
|
||||
if isinstance(item, dict):
|
||||
text = item.get("text", "")
|
||||
if (
|
||||
"<command-name>/share</command-name>" in text
|
||||
or "<command-name>/cc-plus:share</command-name>" in text
|
||||
):
|
||||
return i
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def calculate_message_cost(
|
||||
usage: dict[str, Any], model: str, pricing_data: dict[str, ModelPricing]
|
||||
) -> tuple[float, dict[str, int]]:
|
||||
pricing = pricing_data.get(model)
|
||||
if not pricing:
|
||||
return 0.0, {}
|
||||
|
||||
input_tokens = usage.get("input_tokens", 0)
|
||||
output_tokens = usage.get("output_tokens", 0)
|
||||
cache_creation_tokens = usage.get("cache_creation_input_tokens", 0)
|
||||
cache_read_tokens = usage.get("cache_read_input_tokens", 0)
|
||||
|
||||
input_rate = pricing.get("input_cost_per_token", 0.0)
|
||||
output_rate = pricing.get("output_cost_per_token", 0.0)
|
||||
cache_creation_rate = pricing.get("cache_creation_input_token_cost", 0.0)
|
||||
cache_read_rate = pricing.get("cache_read_input_token_cost", 0.0)
|
||||
|
||||
total_cost = (
|
||||
input_tokens * input_rate
|
||||
+ output_tokens * output_rate
|
||||
+ cache_creation_tokens * cache_creation_rate
|
||||
+ cache_read_tokens * cache_read_rate
|
||||
)
|
||||
|
||||
token_breakdown = {
|
||||
"input": input_tokens,
|
||||
"output": output_tokens,
|
||||
"cache_creation": cache_creation_tokens,
|
||||
"cache_read": cache_read_tokens,
|
||||
}
|
||||
|
||||
return total_cost, token_breakdown
|
||||
|
||||
|
||||
def convert_transcript_to_markdown(transcript_path: Path, output_path: Path | None = None) -> None:
|
||||
if not transcript_path.exists():
|
||||
console.print(f"[red]Error: Transcript file not found: {transcript_path}[/red]")
|
||||
sys.exit(1)
|
||||
|
||||
messages: list[dict[str, Any]] = []
|
||||
|
||||
with transcript_path.open("rb") as f:
|
||||
for line in f:
|
||||
if line.strip():
|
||||
try:
|
||||
data = orjson.loads(line)
|
||||
if data.get("type") in ("user", "assistant", "compact_marker"):
|
||||
messages.append(data)
|
||||
except orjson.JSONDecodeError:
|
||||
continue
|
||||
|
||||
if not messages:
|
||||
console.print("[yellow]No messages found in transcript[/yellow]")
|
||||
sys.exit(0)
|
||||
|
||||
last_share_index = find_last_share_command_index(messages)
|
||||
if last_share_index is not None:
|
||||
messages = messages[:last_share_index]
|
||||
console.print(
|
||||
f"[yellow]📍 Truncating before /share command (excluded message #{last_share_index + 1})[/yellow]"
|
||||
)
|
||||
|
||||
build_tool_map(messages)
|
||||
|
||||
console.print("[cyan]Fetching pricing data...[/cyan]")
|
||||
try:
|
||||
pricing_data = fetch_pricing_data()
|
||||
console.print("[green]✓ Pricing data loaded[/green]")
|
||||
except Exception as e:
|
||||
console.print(f"[yellow]⚠ Could not fetch pricing data: {e}[/yellow]")
|
||||
pricing_data = {}
|
||||
|
||||
total_cost = 0.0
|
||||
total_input_tokens = 0
|
||||
total_output_tokens = 0
|
||||
total_cache_creation_tokens = 0
|
||||
total_cache_read_tokens = 0
|
||||
models_used: dict[str, int] = {}
|
||||
|
||||
first_timestamp: datetime | None = None
|
||||
last_timestamp: datetime | None = None
|
||||
last_user_timestamp: datetime | None = None
|
||||
llm_time_seconds = 0.0
|
||||
llm_started = False
|
||||
|
||||
for msg in messages:
|
||||
msg_type = msg.get("type")
|
||||
if msg_type == "compact_marker":
|
||||
continue
|
||||
|
||||
timestamp_str = msg.get("timestamp", "")
|
||||
timestamp_dt = parse_timestamp_to_datetime(timestamp_str)
|
||||
|
||||
if timestamp_dt:
|
||||
if first_timestamp is None:
|
||||
first_timestamp = timestamp_dt
|
||||
last_timestamp = timestamp_dt
|
||||
|
||||
if msg_type == "user":
|
||||
last_user_timestamp = timestamp_dt
|
||||
llm_started = False
|
||||
elif msg_type == "assistant":
|
||||
if last_user_timestamp and timestamp_dt and not llm_started:
|
||||
llm_duration = (timestamp_dt - last_user_timestamp).total_seconds()
|
||||
llm_time_seconds += llm_duration
|
||||
llm_started = True
|
||||
|
||||
message_data = msg.get("message", {})
|
||||
usage = message_data.get("usage")
|
||||
if usage:
|
||||
model = message_data.get("model", "unknown")
|
||||
models_used[model] = models_used.get(model, 0) + 1
|
||||
|
||||
cost, breakdown = calculate_message_cost(usage, model, pricing_data)
|
||||
total_cost += cost
|
||||
total_input_tokens += breakdown.get("input", 0)
|
||||
total_output_tokens += breakdown.get("output", 0)
|
||||
total_cache_creation_tokens += breakdown.get("cache_creation", 0)
|
||||
total_cache_read_tokens += breakdown.get("cache_read", 0)
|
||||
|
||||
total_tokens = total_input_tokens + total_output_tokens + total_cache_creation_tokens + total_cache_read_tokens
|
||||
|
||||
total_session_time = 0.0
|
||||
if first_timestamp and last_timestamp:
|
||||
total_session_time = (last_timestamp - first_timestamp).total_seconds()
|
||||
|
||||
md_lines = [
|
||||
"# 🤖 Claude Code Session Transcript",
|
||||
"",
|
||||
f"**Session ID**: `{messages[0].get('sessionId', 'unknown')}`",
|
||||
f"**Generated**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
|
||||
f"**Messages**: {len(messages)}",
|
||||
"",
|
||||
"## 📊 Session Statistics",
|
||||
"",
|
||||
f"**Models Used**: {', '.join(f'{model} ({count})' for model, count in models_used.items())}",
|
||||
"",
|
||||
"### Token Usage",
|
||||
"",
|
||||
f"- **Input Tokens**: {total_input_tokens:,}",
|
||||
f"- **Output Tokens**: {total_output_tokens:,}",
|
||||
f"- **Cache Creation**: {total_cache_creation_tokens:,}",
|
||||
f"- **Cache Read**: {total_cache_read_tokens:,}",
|
||||
f"- **Total Tokens**: {total_tokens:,}",
|
||||
"",
|
||||
"### 💰 Cost Estimate",
|
||||
"",
|
||||
f"- **Total Cost**: ${total_cost:.6f}",
|
||||
]
|
||||
|
||||
if total_tokens > 0 and total_cache_read_tokens > 0:
|
||||
cache_hit_rate = (total_cache_read_tokens / total_tokens) * 100
|
||||
md_lines.append(f"- **Cache Hit Rate**: {cache_hit_rate:.2f}%")
|
||||
|
||||
if total_cost > 0:
|
||||
assistant_count = len([m for m in messages if m.get("type") == "assistant"])
|
||||
if assistant_count > 0:
|
||||
avg_cost_per_msg = total_cost / assistant_count
|
||||
md_lines.append(f"- **Average Cost per Message**: ${avg_cost_per_msg:.6f}")
|
||||
|
||||
if total_session_time > 0:
|
||||
md_lines.extend(["", "### ⏱️ Session Timeline", ""])
|
||||
md_lines.append(f"- **Total Session Time**: {format_duration(total_session_time)}")
|
||||
md_lines.append(f"- **LLM Active Time**: {format_duration(llm_time_seconds)}")
|
||||
|
||||
wait_time = total_session_time - llm_time_seconds
|
||||
if wait_time > 0:
|
||||
md_lines.append(f"- **Wait Time**: {format_duration(wait_time)}")
|
||||
|
||||
if total_session_time > 0:
|
||||
utilization = (llm_time_seconds / total_session_time) * 100
|
||||
md_lines.append(f"- **LLM Utilization**: {utilization:.1f}%")
|
||||
|
||||
md_lines.extend(["", "---", ""])
|
||||
|
||||
for i, msg in enumerate(messages, 1):
|
||||
msg_type = msg.get("type")
|
||||
|
||||
if msg_type == "compact_marker":
|
||||
md_lines.extend(["---", "", "## 📦 [COMPACTED]", "", "---", ""])
|
||||
continue
|
||||
|
||||
timestamp = parse_timestamp(msg.get("timestamp", ""))
|
||||
message_data = msg.get("message", {})
|
||||
role = message_data.get("role", msg_type)
|
||||
|
||||
if role == "user":
|
||||
is_meta = msg.get("isMeta", False)
|
||||
content_items = message_data.get("content", [])
|
||||
|
||||
if is_meta:
|
||||
continue
|
||||
else:
|
||||
match content_items:
|
||||
case str():
|
||||
escaped_content = escape_xml_tags(content_items)
|
||||
quoted_lines = [f"> {line}" if line else ">" for line in escaped_content.split("\n")]
|
||||
md_lines.extend(
|
||||
[
|
||||
f"## 💬 User #{i}",
|
||||
f"**Time**: {timestamp}",
|
||||
"",
|
||||
]
|
||||
)
|
||||
md_lines.extend(quoted_lines)
|
||||
md_lines.append("")
|
||||
case list():
|
||||
text_items = [
|
||||
item.get("text", "")
|
||||
for item in content_items
|
||||
if isinstance(item, dict) and item.get("type") == "text"
|
||||
]
|
||||
if text_items:
|
||||
md_lines.extend(
|
||||
[
|
||||
f"## 💬 User #{i}",
|
||||
f"**Time**: {timestamp}",
|
||||
"",
|
||||
]
|
||||
)
|
||||
for text in text_items:
|
||||
escaped_text = escape_xml_tags(text)
|
||||
quoted_lines = [f"> {line}" if line else ">" for line in escaped_text.split("\n")]
|
||||
md_lines.extend(quoted_lines)
|
||||
md_lines.append("")
|
||||
|
||||
else:
|
||||
md_lines.extend(
|
||||
[
|
||||
f"## 🤖 Assistant #{i}",
|
||||
f"**Time**: {timestamp}",
|
||||
"",
|
||||
]
|
||||
)
|
||||
|
||||
content = message_data.get("content", [])
|
||||
match content:
|
||||
case str():
|
||||
md_lines.extend([content, ""])
|
||||
case list():
|
||||
for item in content:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
|
||||
item_type = item.get("type")
|
||||
|
||||
match item_type:
|
||||
case "text":
|
||||
text = item.get("text", "")
|
||||
if text.strip():
|
||||
quoted_lines = [f"> {line}" if line else ">" for line in text.split("\n")]
|
||||
md_lines.extend(quoted_lines)
|
||||
md_lines.append("")
|
||||
|
||||
case "thinking":
|
||||
thinking = item.get("thinking", "")
|
||||
if thinking.strip():
|
||||
md_lines.append("> ")
|
||||
md_lines.append(">> 🧠 Thinking")
|
||||
thinking_lines = [f">> {line}" if line else ">>" for line in thinking.split("\n")]
|
||||
md_lines.extend(thinking_lines)
|
||||
md_lines.append(">")
|
||||
|
||||
case "tool_use":
|
||||
tool_name = item.get("name", "unknown")
|
||||
tool_input = item.get("input", {})
|
||||
|
||||
is_subagent = tool_name == "Task"
|
||||
subagent_type = tool_input.get("subagent_type", "") if is_subagent else ""
|
||||
|
||||
if is_subagent:
|
||||
tool_display = f"🚀 Subagent: {subagent_type}"
|
||||
else:
|
||||
tool_display = f"🔧 Tool: {tool_name}"
|
||||
|
||||
md_lines.extend(
|
||||
[
|
||||
"<details>",
|
||||
f"<summary>{tool_display}</summary>",
|
||||
"",
|
||||
format_tool_parameters(tool_input),
|
||||
"",
|
||||
"</details>",
|
||||
"",
|
||||
]
|
||||
)
|
||||
|
||||
md_lines.extend(["---", ""])
|
||||
|
||||
markdown_content = "\n".join(md_lines)
|
||||
|
||||
while "\n---\n\n---\n" in markdown_content:
|
||||
markdown_content = markdown_content.replace("\n---\n\n---\n", "\n---\n")
|
||||
|
||||
if output_path:
|
||||
output_path.write_text(markdown_content, encoding="utf-8")
|
||||
console.print(f"[green]✅ Markdown saved to: {output_path}[/green]")
|
||||
else:
|
||||
default_output = transcript_path.with_suffix(".md")
|
||||
default_output.write_text(markdown_content, encoding="utf-8")
|
||||
console.print(f"[green]✅ Markdown saved to: {default_output}[/green]")
|
||||
|
||||
console.print(f"\n[bold green]💰 Total Session Cost: ${total_cost:.6f}[/bold green]")
|
||||
console.print("\n[cyan]Preview:[/cyan]")
|
||||
console.print(Markdown(markdown_content[:1000] + "\n\n... (truncated)"))
|
||||
|
||||
|
||||
@app.command()
|
||||
def main(
|
||||
transcript: Path = typer.Argument(..., help="Path to transcript JSONL file"),
|
||||
output: Path | None = typer.Option(None, "--output", "-o", help="Output markdown file path"),
|
||||
preview: bool = typer.Option(False, "--preview", "-p", help="Preview in terminal instead of saving"),
|
||||
) -> None:
|
||||
if preview:
|
||||
console.print("[yellow]Preview mode - content will not be saved[/yellow]\n")
|
||||
|
||||
convert_transcript_to_markdown(transcript, output)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app()
|
||||
Reference in New Issue
Block a user