{ "$schema": "internal://schemas/plugin.lock.v1.json", "pluginId": "gh:BrandCast-Signage/agent-benchmark-kit:", "normalized": { "repo": null, "ref": "refs/tags/v20251128.0", "commit": "e7c681e83c110648ad1ddceb8cae60f7ae04e4c9", "treeHash": "59f8d4027c637fe883a9887c114f35ba94d6c4a51a815411d0bfd2241e9beb06", "generatedAt": "2025-11-28T10:09:58.720123Z", "toolVersion": "publish_plugins.py@0.2.0" }, "origin": { "remote": "git@github.com:zhongweili/42plugin-data.git", "branch": "master", "commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390", "repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data" }, "manifest": { "name": "agent-benchmark-kit", "description": "Automated quality assurance for Claude Code agents using LLM-as-judge evaluation", "version": "1.0.0" }, "content": { "files": [ { "path": "README.md", "sha256": "f7cc203719c7c97c5a236ba87948e02efd7530938e8ed408b7f4f3c07dc9daa2" }, { "path": "agents/benchmark-orchestrator.md", "sha256": "1e08a57094c189cbe63c2acb0daeb6c4012102d029d704593d0e1d3ae9d18aa8" }, { "path": "agents/test-suite-creator.md", "sha256": "dcdaa412b8a686e12eb8cad3128fa7e51a5d5b7eebe5c9775a197ae06b294fb3" }, { "path": "agents/benchmark-judge.md", "sha256": "125e533e3e5cd80205113d8672b7bdfa4f8136259847b7010bbbebbb9d4298b5" }, { "path": ".claude-plugin/plugin.json", "sha256": "403861f56874cfe2018bc334edbd392c2fa65516e8a77acabef9201fb6d879d1" }, { "path": "commands/benchmark-agent.md", "sha256": "2f198c0a949d5c8a9fce854cf050d4c6228ac349cdbf5d628041d1e7a51ec081" } ], "dirSha256": "59f8d4027c637fe883a9887c114f35ba94d6c4a51a815411d0bfd2241e9beb06" }, "security": { "scannedAt": null, "scannerVersion": null, "flags": [] } }