From 058ce5ee4acf6c044c8ebb8d606260dfebe4c013 Mon Sep 17 00:00:00 2001 From: Zhongwei Li Date: Sat, 29 Nov 2025 18:51:42 +0800 Subject: [PATCH] Initial commit --- .claude-plugin/plugin.json | 15 ++ README.md | 3 + commands/explain-model.md | 15 ++ plugin.lock.json | 73 ++++++++ skills/model-explainability-tool/SKILL.md | 57 +++++++ .../assets/README.md | 7 + .../assets/example_explanation.json | 95 +++++++++++ .../assets/explanation_template.html | 160 ++++++++++++++++++ .../assets/visualization_styles.css | 118 +++++++++++++ .../references/README.md | 8 + .../scripts/README.md | 7 + 11 files changed, 558 insertions(+) create mode 100644 .claude-plugin/plugin.json create mode 100644 README.md create mode 100644 commands/explain-model.md create mode 100644 plugin.lock.json create mode 100644 skills/model-explainability-tool/SKILL.md create mode 100644 skills/model-explainability-tool/assets/README.md create mode 100644 skills/model-explainability-tool/assets/example_explanation.json create mode 100644 skills/model-explainability-tool/assets/explanation_template.html create mode 100644 skills/model-explainability-tool/assets/visualization_styles.css create mode 100644 skills/model-explainability-tool/references/README.md create mode 100644 skills/model-explainability-tool/scripts/README.md diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json new file mode 100644 index 0000000..b2dd86b --- /dev/null +++ b/.claude-plugin/plugin.json @@ -0,0 +1,15 @@ +{ + "name": "model-explainability-tool", + "description": "Model interpretability and explainability", + "version": "1.0.0", + "author": { + "name": "Claude Code Plugins", + "email": "[email protected]" + }, + "skills": [ + "./skills" + ], + "commands": [ + "./commands" + ] +} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..30fd6ff --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# model-explainability-tool + +Model interpretability and explainability diff --git a/commands/explain-model.md b/commands/explain-model.md new file mode 100644 index 0000000..7e9e4fa --- /dev/null +++ b/commands/explain-model.md @@ -0,0 +1,15 @@ +--- +description: Execute AI/ML task with intelligent automation +--- + +# AI/ML Task Executor + +You are an AI/ML specialist. When this command is invoked: + +1. Analyze the current context and requirements +2. Generate appropriate code for the ML task +3. Include data validation and error handling +4. Provide performance metrics and insights +5. Save artifacts and generate documentation + +Support modern ML frameworks and best practices. diff --git a/plugin.lock.json b/plugin.lock.json new file mode 100644 index 0000000..3fba798 --- /dev/null +++ b/plugin.lock.json @@ -0,0 +1,73 @@ +{ + "$schema": "internal://schemas/plugin.lock.v1.json", + "pluginId": "gh:jeremylongshore/claude-code-plugins-plus:plugins/ai-ml/model-explainability-tool", + "normalized": { + "repo": null, + "ref": "refs/tags/v20251128.0", + "commit": "032ded864621262df912c0d3eebec954fcca1d2a", + "treeHash": "666fd883b97b0910449a10adedec2618b2ad6728800a759cf95982852b38d146", + "generatedAt": "2025-11-28T10:18:35.343469Z", + "toolVersion": "publish_plugins.py@0.2.0" + }, + "origin": { + "remote": "git@github.com:zhongweili/42plugin-data.git", + "branch": "master", + "commit": "aa1497ed0949fd50e99e70d6324a29c5b34f9390", + "repoRoot": "/Users/zhongweili/projects/openmind/42plugin-data" + }, + "manifest": { + "name": "model-explainability-tool", + "description": "Model interpretability and explainability", + "version": "1.0.0" + }, + "content": { + "files": [ + { + "path": "README.md", + "sha256": "9be9ea1d530d4982f0d23bdce87b95aa4575d6132faecc19caf9d3b2bea3a1ff" + }, + { + "path": ".claude-plugin/plugin.json", + "sha256": "8a3ec94dc71bad6be32545fda3503d77bc318bc403cfa8bb0264faedf924de44" + }, + { + "path": "commands/explain-model.md", + "sha256": "043efb83e2f02fc6d0869c8a3a7388d6e49f6c809292b93dd6a97a1b142e5647" + }, + { + "path": "skills/model-explainability-tool/SKILL.md", + "sha256": "c15bf83863df7ebbfbd99d38c26cb6a549b93ffaab5ddf2dcd31fb24ca993536" + }, + { + "path": "skills/model-explainability-tool/references/README.md", + "sha256": "46a35476962dfc41038c2963a959567e6e32b503e72c46cc1148075a999c7d77" + }, + { + "path": "skills/model-explainability-tool/scripts/README.md", + "sha256": "a64617c7e25724b9a0388742281cb759dfa894121d369bea510500d275bf3b9d" + }, + { + "path": "skills/model-explainability-tool/assets/visualization_styles.css", + "sha256": "d5997d7461926a243e83df4baf2b2e349e6c75f08b2ff0bcb3dc74d86f8f481a" + }, + { + "path": "skills/model-explainability-tool/assets/explanation_template.html", + "sha256": "ae73a8b03ba2ac6f75d7404d7e4d100457d367306bab35544d4d496c7a758daf" + }, + { + "path": "skills/model-explainability-tool/assets/README.md", + "sha256": "481630c5c17a13e48c3d3dda1b4e2536d1cb29d9274267e4bfc2d5b91f006a6b" + }, + { + "path": "skills/model-explainability-tool/assets/example_explanation.json", + "sha256": "e89686306514086222f1dad8038d3fc1aa8f2a162d10de4c6dbe97d7e2ccbca7" + } + ], + "dirSha256": "666fd883b97b0910449a10adedec2618b2ad6728800a759cf95982852b38d146" + }, + "security": { + "scannedAt": null, + "scannerVersion": null, + "flags": [] + } +} \ No newline at end of file diff --git a/skills/model-explainability-tool/SKILL.md b/skills/model-explainability-tool/SKILL.md new file mode 100644 index 0000000..72c8bf1 --- /dev/null +++ b/skills/model-explainability-tool/SKILL.md @@ -0,0 +1,57 @@ +--- +name: explaining-machine-learning-models +description: | + This skill enables Claude to provide interpretability and explainability for machine learning models. It is triggered when the user requests explanations for model predictions, insights into feature importance, or help understanding model behavior. The skill leverages techniques like SHAP and LIME to generate explanations. It is useful when debugging model performance, ensuring fairness, or communicating model insights to stakeholders. Use this skill when the user mentions "explain model", "interpret model", "feature importance", "SHAP values", or "LIME explanations". +allowed-tools: Read, Write, Edit, Grep, Glob, Bash +version: 1.0.0 +--- + +## Overview + +This skill empowers Claude to analyze and explain machine learning models. It helps users understand why a model makes certain predictions, identify the most influential features, and gain insights into the model's overall behavior. + +## How It Works + +1. **Analyze Context**: Claude analyzes the user's request and the available model data. +2. **Select Explanation Technique**: Claude chooses the most appropriate explanation technique (e.g., SHAP, LIME) based on the model type and the user's needs. +3. **Generate Explanations**: Claude uses the selected technique to generate explanations for model predictions. +4. **Present Results**: Claude presents the explanations in a clear and concise format, highlighting key insights and feature importances. + +## When to Use This Skill + +This skill activates when you need to: +- Understand why a machine learning model made a specific prediction. +- Identify the most important features influencing a model's output. +- Debug model performance issues by identifying unexpected feature interactions. +- Communicate model insights to non-technical stakeholders. +- Ensure fairness and transparency in model predictions. + +## Examples + +### Example 1: Understanding Loan Application Decisions + +User request: "Explain why this loan application was rejected." + +The skill will: +1. Analyze the loan application data and the model's prediction. +2. Calculate SHAP values to determine the contribution of each feature to the rejection decision. +3. Present the results, highlighting the features that most strongly influenced the outcome, such as credit score or debt-to-income ratio. + +### Example 2: Identifying Key Factors in Customer Churn + +User request: "Interpret the customer churn model and identify the most important factors." + +The skill will: +1. Analyze the customer churn model and its predictions. +2. Use LIME to generate local explanations for individual customer churn predictions. +3. Aggregate the LIME explanations to identify the most important features driving churn, such as customer tenure or service usage. + +## Best Practices + +- **Model Type**: Choose the explanation technique that is most appropriate for the model type (e.g., tree-based models, neural networks). +- **Data Preprocessing**: Ensure that the data used for explanation is properly preprocessed and aligned with the model's input format. +- **Visualization**: Use visualizations to effectively communicate model insights and feature importances. + +## Integration + +This skill integrates with other data analysis and visualization plugins to provide a comprehensive model understanding workflow. It can be used in conjunction with data cleaning and preprocessing plugins to ensure data quality and with visualization tools to present the explanation results in an informative way. \ No newline at end of file diff --git a/skills/model-explainability-tool/assets/README.md b/skills/model-explainability-tool/assets/README.md new file mode 100644 index 0000000..4306f16 --- /dev/null +++ b/skills/model-explainability-tool/assets/README.md @@ -0,0 +1,7 @@ +# Assets + +Bundled resources for model-explainability-tool skill + +- [ ] explanation_template.html: HTML template for displaying model explanations in a user-friendly format. +- [ ] example_explanation.json: Example JSON output of a model explanation. +- [ ] visualization_styles.css: CSS file for styling visualizations of feature importance and model predictions. diff --git a/skills/model-explainability-tool/assets/example_explanation.json b/skills/model-explainability-tool/assets/example_explanation.json new file mode 100644 index 0000000..fcbbe32 --- /dev/null +++ b/skills/model-explainability-tool/assets/example_explanation.json @@ -0,0 +1,95 @@ +{ + "_comment": "Example JSON output for a model explanation. This is a template for the model-explainability-tool plugin.", + "model_id": "model_v3.2", + "model_type": "Classification", + "dataset_used": "customer_churn_dataset.csv", + "explanation_type": "SHAP", + "explanation_timestamp": "2024-01-26T10:30:00Z", + "global_explanation": { + "_comment": "Global feature importance ranking.", + "feature_importance": [ + { + "feature": "contract_length", + "importance_score": 0.35, + "description": "Length of the customer's contract (e.g., monthly, yearly)." + }, + { + "feature": "monthly_charges", + "importance_score": 0.28, + "description": "The customer's monthly bill amount." + }, + { + "feature": "total_charges", + "importance_score": 0.22, + "description": "Total amount the customer has paid." + }, + { + "feature": "internet_service", + "importance_score": 0.10, + "description": "Type of internet service the customer has (e.g., DSL, Fiber optic)." + }, + { + "feature": "online_security", + "importance_score": 0.05, + "description": "Whether the customer has online security." + } + ], + "summary": "The model's predictions are most influenced by contract length, monthly charges, and total charges. Internet service and online security have a smaller, but still significant, impact." + }, + "local_explanation": { + "_comment": "Explanation for a specific instance/prediction.", + "instance_id": "customer_123", + "predicted_class": "Churn", + "prediction_probability": 0.85, + "feature_contributions": [ + { + "feature": "contract_length", + "contribution": -0.40, + "value": "Month-to-month", + "reason": "Month-to-month contracts are highly correlated with churn." + }, + { + "feature": "monthly_charges", + "contribution": 0.25, + "value": 75.50, + "reason": "Higher monthly charges increase the likelihood of churn." + }, + { + "feature": "total_charges", + "contribution": -0.10, + "value": 200.00, + "reason": "Relatively low total charges suggest the customer is new and more likely to churn." + }, + { + "feature": "internet_service", + "contribution": 0.05, + "value": "Fiber optic", + "reason": "Fiber optic service is associated with higher churn rates in this dataset." + }, + { + "feature": "online_security", + "contribution": -0.02, + "value": "No", + "reason": "Lack of online security slightly increases churn risk." + } + ], + "summary": "This customer is predicted to churn primarily due to their month-to-month contract and high monthly charges. The relatively low total charges also contribute to the prediction." + }, + "fairness_metrics": { + "_comment": "Metrics for assessing fairness across different groups.", + "protected_attribute": "gender", + "metric": "Disparate Impact", + "value": 0.95, + "threshold": 0.8, + "status": "Acceptable", + "summary": "The model exhibits acceptable disparate impact across genders, as the value (0.95) is above the threshold (0.8)." + }, + "data_bias_detection": { + "_comment": "Results of data bias detection.", + "potential_bias": "Unequal representation of geographic regions in the training data.", + "recommendations": [ + "Collect more data from underrepresented regions.", + "Use re-weighting techniques to balance the data." + ] + } +} \ No newline at end of file diff --git a/skills/model-explainability-tool/assets/explanation_template.html b/skills/model-explainability-tool/assets/explanation_template.html new file mode 100644 index 0000000..6b7ebf1 --- /dev/null +++ b/skills/model-explainability-tool/assets/explanation_template.html @@ -0,0 +1,160 @@ + + + + + + Model Explanation + + + +
+

Model Explanation

+ +
+

Model Summary

+

Model Type: {{model_type}}

+

Model Description: {{model_description}}

+
+ +
+

Feature Importance

+
+

The table below shows the importance of each feature in the model:

+ + + + + + + + + {{feature_importance_table}} + +
FeatureImportance
+
+
+ +
+

Example Prediction Explanation

+

Input Data:

+

{{example_input_data}}

+

Prediction:

+

{{example_prediction}}

+

Explanation:

+

{{example_explanation}}

+
+ +
+

Overall Insights

+

{{overall_insights}}

+
+
+ + \ No newline at end of file diff --git a/skills/model-explainability-tool/assets/visualization_styles.css b/skills/model-explainability-tool/assets/visualization_styles.css new file mode 100644 index 0000000..190a445 --- /dev/null +++ b/skills/model-explainability-tool/assets/visualization_styles.css @@ -0,0 +1,118 @@ +/* + visualization_styles.css - CSS file for styling visualizations of feature importance and model predictions. + + This file provides the styling for visualizations generated by the model-explainability-tool plugin. + It includes styles for feature importance charts, prediction breakdowns, and other interpretability + visualizations. + + Feel free to customize these styles to match your application's design. + + Table of Contents: + 1. General Styles + 2. Feature Importance Chart Styles + 3. Prediction Breakdown Styles + 4. [Add more sections as needed] +*/ + +/* 1. General Styles */ + +body { + font-family: sans-serif; + margin: 0; + padding: 0; +} + +.visualization-container { + margin: 20px; + padding: 20px; + border: 1px solid #ccc; + border-radius: 5px; +} + +.visualization-title { + font-size: 1.5em; + font-weight: bold; + margin-bottom: 10px; +} + +.visualization-description { + font-size: 1em; + color: #555; + margin-bottom: 20px; +} + +/* 2. Feature Importance Chart Styles */ + +.feature-importance-chart { + /* Styles for the chart container */ + width: 100%; + height: 400px; /* Adjust as needed */ +} + +.feature-importance-bar { + /* Styles for individual bars in the chart */ + background-color: #4CAF50; /* Green */ + color: white; + padding: 5px; + margin-bottom: 2px; + text-align: right; +} + +.feature-name { + /* Styles for feature names displayed in the chart */ + float: left; +} + +.feature-importance-value { + /* Styles for feature importance values */ + float: right; +} + +/* 3. Prediction Breakdown Styles */ + +.prediction-breakdown { + /* Styles for the overall prediction breakdown container */ +} + +.prediction-breakdown-item { + /* Styles for individual items in the breakdown */ + margin-bottom: 5px; +} + +.prediction-breakdown-label { + /* Styles for labels in the breakdown */ + font-weight: bold; +} + +.prediction-breakdown-value { + /* Styles for values in the breakdown */ + color: #007bff; /* Blue */ +} + +/* 4. [Add more sections as needed. Example: Styles for specific model types] */ +/* Example: Styles for tree-based models */ + +.tree-node { + /* Styles for nodes in a decision tree visualization */ + border: 1px solid #ddd; + padding: 5px; + margin: 2px; + border-radius: 3px; +} + +/* Placeholders for future customization */ + +/* Example: Add styles for different color palettes */ +/* .color-palette-1 { ... } */ + +/* Add responsive styles for different screen sizes */ +@media (max-width: 768px) { + .visualization-container { + margin: 10px; + padding: 10px; + } + + .feature-importance-chart { + height: 300px; /* Adjust height for smaller screens */ + } +} \ No newline at end of file diff --git a/skills/model-explainability-tool/references/README.md b/skills/model-explainability-tool/references/README.md new file mode 100644 index 0000000..938d0d9 --- /dev/null +++ b/skills/model-explainability-tool/references/README.md @@ -0,0 +1,8 @@ +# References + +Bundled resources for model-explainability-tool skill + +- [ ] shap_documentation.md: Documentation for the SHAP library, including usage examples and API reference. +- [ ] lime_documentation.md: Documentation for the LIME library, including usage examples and API reference. +- [ ] model_explanation_best_practices.md: A guide to best practices for explaining machine learning models, including fairness considerations and interpretability metrics. +- [ ] example_model_schema.json: Example schema for the machine learning model input data. diff --git a/skills/model-explainability-tool/scripts/README.md b/skills/model-explainability-tool/scripts/README.md new file mode 100644 index 0000000..47dd57e --- /dev/null +++ b/skills/model-explainability-tool/scripts/README.md @@ -0,0 +1,7 @@ +# Scripts + +Bundled resources for model-explainability-tool skill + +- [ ] explain_model.py: Script to execute model explanation using SHAP or LIME, taking model and data as input. +- [ ] feature_importance.py: Script to calculate and visualize feature importance scores. +- [ ] data_preprocessing.py: Script to preprocess data for model explanation.