93 lines
3.4 KiB
JSON
93 lines
3.4 KiB
JSON
{
|
|
"$schema": "internal://schemas/plugin.lock.v1.json",
|
|
"pluginId": "gh:tachyon-beep/skillpacks:plugins/yzmir-deep-rl",
|
|
"normalized": {
|
|
"repo": null,
|
|
"ref": "refs/tags/v20251128.0",
|
|
"commit": "3d8031d752213bc80717c580e222f473351373d2",
|
|
"treeHash": "41151bbe20e784f5cca14bec2f65c61f9769537086b42ffbc7aa6067e21a69de",
|
|
"generatedAt": "2025-11-28T10:28:33.623266Z",
|
|
"toolVersion": "publish_plugins.py@0.2.0"
|
|
},
|
|
"origin": {
|
|
"remote": "git@github.com:zhongweili/42plugin-data.git",
|
|
"branch": "master",
|
|
"commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390",
|
|
"repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data"
|
|
},
|
|
"manifest": {
|
|
"name": "yzmir-deep-rl",
|
|
"description": "Reinforcement learning - DQN, PPO, SAC, reward shaping, exploration - 13 skills",
|
|
"version": "1.0.2"
|
|
},
|
|
"content": {
|
|
"files": [
|
|
{
|
|
"path": "README.md",
|
|
"sha256": "d3584c0c0ab08be14709fdffbfb1e6badc8a6ac092ab3540bc0a76cdb5b331f2"
|
|
},
|
|
{
|
|
"path": ".claude-plugin/plugin.json",
|
|
"sha256": "93693f5ca21f6ca04afb2fd2c73b3ae300523bbf1f44e502ad80a82818fe5476"
|
|
},
|
|
{
|
|
"path": "skills/using-deep-rl/rl-evaluation.md",
|
|
"sha256": "5af74db50916b96effa9639c55b38c0fb1ef6049cd1da9d8eaaea19105ddfde6"
|
|
},
|
|
{
|
|
"path": "skills/using-deep-rl/offline-rl.md",
|
|
"sha256": "f8b7ed67a1d1ab93e1c7ac0ce9eff2b5987da51ccb37fe22e128b9bc2bf1ed56"
|
|
},
|
|
{
|
|
"path": "skills/using-deep-rl/model-based-rl.md",
|
|
"sha256": "50cc7046715ffb1b33e7d0361eb3db1dd92dd6cb0b794d5f50d54b97504d263f"
|
|
},
|
|
{
|
|
"path": "skills/using-deep-rl/exploration-strategies.md",
|
|
"sha256": "51a818fc79a89de7db65cc718e16bcdb46ed3033089f1c3c5f3745a22e59ba96"
|
|
},
|
|
{
|
|
"path": "skills/using-deep-rl/rl-debugging.md",
|
|
"sha256": "051394537aacae53245c015a9190fc6228cec9c0bc8c0d64086c565d4375877e"
|
|
},
|
|
{
|
|
"path": "skills/using-deep-rl/actor-critic-methods.md",
|
|
"sha256": "4daabcfd84d320c290ae6242a6a778d8e201279a23526472900fb2f039286516"
|
|
},
|
|
{
|
|
"path": "skills/using-deep-rl/reward-shaping-engineering.md",
|
|
"sha256": "69169218da0d054a0a62a4d711a7b68120cd74310ca24111d7d7722846ed2382"
|
|
},
|
|
{
|
|
"path": "skills/using-deep-rl/multi-agent-rl.md",
|
|
"sha256": "38aabcd45ccd6054fedec10987fcf4f8981da206940cbe4bf2f7389204fdfc4a"
|
|
},
|
|
{
|
|
"path": "skills/using-deep-rl/policy-gradient-methods.md",
|
|
"sha256": "eaf348cb0dbb58d7f91662bdd9ada19e7249c967afae6818d52aa2a35566dac0"
|
|
},
|
|
{
|
|
"path": "skills/using-deep-rl/rl-foundations.md",
|
|
"sha256": "e1ee576785a65d8c957a6418c60a9ab4da5c68b4dc60d874eb92fcc99419dfb6"
|
|
},
|
|
{
|
|
"path": "skills/using-deep-rl/rl-environments.md",
|
|
"sha256": "7567f7c9be5ec0dd5b56e77643c976bff3c777a84404b4b33b32174a58a63ce0"
|
|
},
|
|
{
|
|
"path": "skills/using-deep-rl/SKILL.md",
|
|
"sha256": "0f3b040a56864f8bc2865da041015e7b8c50808b557d146551db74b0cceed7e4"
|
|
},
|
|
{
|
|
"path": "skills/using-deep-rl/value-based-methods.md",
|
|
"sha256": "2e375f87591925741dfe67c931051aa9c0d23b19821c80dcbe162371d65dd057"
|
|
}
|
|
],
|
|
"dirSha256": "41151bbe20e784f5cca14bec2f65c61f9769537086b42ffbc7aa6067e21a69de"
|
|
},
|
|
"security": {
|
|
"scannedAt": null,
|
|
"scannerVersion": null,
|
|
"flags": []
|
|
}
|
|
} |