Initial commit
This commit is contained in:
186
commands/commit-analysis/.scripts/atomicity-checker.py
Executable file
186
commands/commit-analysis/.scripts/atomicity-checker.py
Executable file
@@ -0,0 +1,186 @@
|
||||
#!/usr/bin/env python3
|
||||
# Script: atomicity-checker.py
|
||||
# Purpose: Assess if changes form an atomic commit or should be split
|
||||
# Author: Git Commit Assistant Plugin
|
||||
# Version: 1.0.0
|
||||
#
|
||||
# Usage:
|
||||
# git diff HEAD | ./atomicity-checker.py
|
||||
#
|
||||
# Returns:
|
||||
# JSON: {"atomic": true/false, "reasoning": "...", "recommendations": [...]}
|
||||
#
|
||||
# Exit Codes:
|
||||
# 0 - Success
|
||||
# 1 - No input
|
||||
# 2 - Analysis error
|
||||
|
||||
import sys
|
||||
import re
|
||||
import json
|
||||
from collections import defaultdict
|
||||
|
||||
def analyze_atomicity(diff_content):
|
||||
"""
|
||||
Analyze if changes are atomic (single logical unit).
|
||||
|
||||
Criteria for atomic:
|
||||
- Single type (all feat, or all fix, etc.)
|
||||
- Single scope (all in one module)
|
||||
- Logically cohesive
|
||||
- Reasonable file count (<= 10)
|
||||
"""
|
||||
|
||||
lines = diff_content.split('\n')
|
||||
|
||||
# Track changes
|
||||
files = []
|
||||
types_detected = set()
|
||||
scopes_detected = set()
|
||||
file_changes = defaultdict(lambda: {'additions': 0, 'deletions': 0})
|
||||
|
||||
current_file = None
|
||||
|
||||
for line in lines:
|
||||
# Track files
|
||||
if line.startswith('+++ '):
|
||||
file_path = line[4:].strip()
|
||||
if file_path != '/dev/null' and file_path.startswith('b/'):
|
||||
file_path = file_path[2:]
|
||||
files.append(file_path)
|
||||
current_file = file_path
|
||||
|
||||
# Detect type from file
|
||||
if '.test.' in file_path or '.spec.' in file_path:
|
||||
types_detected.add('test')
|
||||
elif file_path.endswith('.md'):
|
||||
types_detected.add('docs')
|
||||
elif 'package.json' in file_path or 'pom.xml' in file_path:
|
||||
types_detected.add('build')
|
||||
elif '.github/workflows' in file_path or '.gitlab-ci' in file_path:
|
||||
types_detected.add('ci')
|
||||
|
||||
# Detect scope from path
|
||||
match = re.match(r'src/([^/]+)/', file_path)
|
||||
if match:
|
||||
scopes_detected.add(match.group(1))
|
||||
|
||||
# Count line changes
|
||||
if current_file:
|
||||
if line.startswith('+') and not line.startswith('+++'):
|
||||
file_changes[current_file]['additions'] += 1
|
||||
elif line.startswith('-') and not line.startswith('---'):
|
||||
file_changes[current_file]['deletions'] += 1
|
||||
|
||||
# Detect types from content
|
||||
if line.startswith('+'):
|
||||
if 'export function' in line or 'export class' in line:
|
||||
types_detected.add('feat')
|
||||
elif 'fix' in line.lower() or 'error' in line.lower():
|
||||
types_detected.add('fix')
|
||||
elif 'refactor' in line.lower() or 'rename' in line.lower():
|
||||
types_detected.add('refactor')
|
||||
|
||||
# Calculate metrics
|
||||
total_files = len(files)
|
||||
total_additions = sum(f['additions'] for f in file_changes.values())
|
||||
total_deletions = sum(f['deletions'] for f in file_changes.values())
|
||||
total_changes = total_additions + total_deletions
|
||||
|
||||
num_types = len(types_detected)
|
||||
num_scopes = len(scopes_detected)
|
||||
|
||||
# Atomicity checks
|
||||
checks = {
|
||||
'single_type': num_types <= 1,
|
||||
'single_scope': num_scopes <= 1,
|
||||
'reasonable_file_count': total_files <= 10,
|
||||
'reasonable_change_size': total_changes <= 500,
|
||||
'cohesive': num_types <= 1 and num_scopes <= 1
|
||||
}
|
||||
|
||||
# Determine atomicity
|
||||
is_atomic = all([
|
||||
checks['single_type'] or num_types == 0,
|
||||
checks['single_scope'] or num_scopes == 0,
|
||||
checks['reasonable_file_count']
|
||||
])
|
||||
|
||||
# Build reasoning
|
||||
if is_atomic:
|
||||
reasoning = f"Changes are atomic: {total_files} files, "
|
||||
if num_types <= 1:
|
||||
reasoning += f"single type ({list(types_detected)[0] if types_detected else 'unknown'}), "
|
||||
if num_scopes <= 1:
|
||||
reasoning += f"single scope ({list(scopes_detected)[0] if scopes_detected else 'root'}). "
|
||||
reasoning += "Forms a cohesive logical unit."
|
||||
else:
|
||||
issues = []
|
||||
if num_types > 1:
|
||||
issues.append(f"multiple types ({', '.join(types_detected)})")
|
||||
if num_scopes > 1:
|
||||
issues.append(f"multiple scopes ({', '.join(list(scopes_detected)[:3])})")
|
||||
if total_files > 10:
|
||||
issues.append(f"many files ({total_files})")
|
||||
|
||||
reasoning = f"Changes are NOT atomic: {', '.join(issues)}. Should be split into focused commits."
|
||||
|
||||
# Generate recommendations if not atomic
|
||||
recommendations = []
|
||||
if not is_atomic:
|
||||
if num_types > 1:
|
||||
recommendations.append({
|
||||
'strategy': 'Split by type',
|
||||
'description': f"Create separate commits for each type: {', '.join(types_detected)}"
|
||||
})
|
||||
if num_scopes > 1:
|
||||
recommendations.append({
|
||||
'strategy': 'Split by scope',
|
||||
'description': f"Create separate commits for each module: {', '.join(list(scopes_detected)[:3])}"
|
||||
})
|
||||
if total_files > 15:
|
||||
recommendations.append({
|
||||
'strategy': 'Split by feature',
|
||||
'description': 'Break into smaller logical units (5-10 files per commit)'
|
||||
})
|
||||
|
||||
return {
|
||||
'atomic': is_atomic,
|
||||
'reasoning': reasoning,
|
||||
'checks': checks,
|
||||
'metrics': {
|
||||
'total_files': total_files,
|
||||
'total_additions': total_additions,
|
||||
'total_deletions': total_deletions,
|
||||
'total_changes': total_changes,
|
||||
'types_detected': list(types_detected),
|
||||
'scopes_detected': list(scopes_detected),
|
||||
'num_types': num_types,
|
||||
'num_scopes': num_scopes
|
||||
},
|
||||
'recommendations': recommendations if not is_atomic else []
|
||||
}
|
||||
|
||||
def main():
|
||||
diff_content = sys.stdin.read()
|
||||
|
||||
if not diff_content or not diff_content.strip():
|
||||
print(json.dumps({
|
||||
'error': 'No diff content provided',
|
||||
'atomic': None
|
||||
}))
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
result = analyze_atomicity(diff_content)
|
||||
print(json.dumps(result, indent=2))
|
||||
sys.exit(0)
|
||||
except Exception as e:
|
||||
print(json.dumps({
|
||||
'error': str(e),
|
||||
'atomic': None
|
||||
}))
|
||||
sys.exit(2)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
103
commands/commit-analysis/.scripts/git-diff-analyzer.sh
Executable file
103
commands/commit-analysis/.scripts/git-diff-analyzer.sh
Executable file
@@ -0,0 +1,103 @@
|
||||
#!/bin/bash
|
||||
# Script: git-diff-analyzer.sh
|
||||
# Purpose: Parse git diff output for detailed file and line change analysis
|
||||
# Author: Git Commit Assistant Plugin
|
||||
# Version: 1.0.0
|
||||
#
|
||||
# Usage:
|
||||
# git diff HEAD | ./git-diff-analyzer.sh
|
||||
#
|
||||
# Returns:
|
||||
# JSON with file details, line counts, and change summaries
|
||||
#
|
||||
# Exit Codes:
|
||||
# 0 - Success
|
||||
# 1 - No input
|
||||
# 2 - Analysis error
|
||||
|
||||
# Read diff from stdin
|
||||
diff_content=$(cat)
|
||||
|
||||
if [ -z "$diff_content" ]; then
|
||||
echo '{"error": "No diff content provided"}'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Initialize counters
|
||||
total_files=0
|
||||
total_additions=0
|
||||
total_deletions=0
|
||||
declare -A file_stats
|
||||
|
||||
# Parse diff output
|
||||
current_file=""
|
||||
while IFS= read -r line; do
|
||||
# File headers
|
||||
if [[ "$line" =~ ^\+\+\+\ b/(.+)$ ]]; then
|
||||
current_file="${BASH_REMATCH[1]}"
|
||||
((total_files++))
|
||||
file_stats["$current_file,additions"]=0
|
||||
file_stats["$current_file,deletions"]=0
|
||||
file_stats["$current_file,status"]="M"
|
||||
|
||||
# New file
|
||||
elif [[ "$line" =~ ^\+\+\+\ b/(.+)$ ]] && [[ "$diff_content" == *"--- /dev/null"* ]]; then
|
||||
file_stats["$current_file,status"]="A"
|
||||
|
||||
# Deleted file
|
||||
elif [[ "$line" =~ ^---\ a/(.+)$ ]] && [[ "$diff_content" == *"+++ /dev/null"* ]]; then
|
||||
current_file="${BASH_REMATCH[1]}"
|
||||
file_stats["$current_file,status"]="D"
|
||||
|
||||
# Count additions
|
||||
elif [[ "$line" =~ ^\+[^+] ]] && [ -n "$current_file" ]; then
|
||||
((total_additions++))
|
||||
((file_stats["$current_file,additions"]++))
|
||||
|
||||
# Count deletions
|
||||
elif [[ "$line" =~ ^-[^-] ]] && [ -n "$current_file" ]; then
|
||||
((total_deletions++))
|
||||
((file_stats["$current_file,deletions"]++))
|
||||
fi
|
||||
done <<< "$diff_content"
|
||||
|
||||
# Build JSON output
|
||||
echo "{"
|
||||
echo " \"summary\": {"
|
||||
echo " \"total_files\": $total_files,"
|
||||
echo " \"total_additions\": $total_additions,"
|
||||
echo " \"total_deletions\": $total_deletions,"
|
||||
echo " \"net_change\": $((total_additions - total_deletions))"
|
||||
echo " },"
|
||||
echo " \"files\": ["
|
||||
|
||||
# Output file stats
|
||||
first=true
|
||||
for key in "${!file_stats[@]}"; do
|
||||
if [[ "$key" == *",status" ]]; then
|
||||
file="${key%,status}"
|
||||
status="${file_stats[$key]}"
|
||||
additions=${file_stats["$file,additions"]:-0}
|
||||
deletions=${file_stats["$file,deletions"]:-0}
|
||||
|
||||
if [ "$first" = true ]; then
|
||||
first=false
|
||||
else
|
||||
echo ","
|
||||
fi
|
||||
|
||||
echo -n " {"
|
||||
echo -n "\"file\": \"$file\", "
|
||||
echo -n "\"status\": \"$status\", "
|
||||
echo -n "\"additions\": $additions, "
|
||||
echo -n "\"deletions\": $deletions, "
|
||||
echo -n "\"net\": $((additions - deletions))"
|
||||
echo -n "}"
|
||||
fi
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo " ]"
|
||||
echo "}"
|
||||
|
||||
exit 0
|
||||
131
commands/commit-analysis/.scripts/scope-identifier.sh
Executable file
131
commands/commit-analysis/.scripts/scope-identifier.sh
Executable file
@@ -0,0 +1,131 @@
|
||||
#!/bin/bash
|
||||
# Script: scope-identifier.sh
|
||||
# Purpose: Identify primary scope (module/component) from file paths
|
||||
# Author: Git Commit Assistant Plugin
|
||||
# Version: 1.0.0
|
||||
#
|
||||
# Usage:
|
||||
# git diff HEAD --name-only | ./scope-identifier.sh
|
||||
# ./scope-identifier.sh < files.txt
|
||||
#
|
||||
# Returns:
|
||||
# JSON: {"scope": "auth", "confidence": "high", "affected_areas": {...}}
|
||||
#
|
||||
# Exit Codes:
|
||||
# 0 - Success
|
||||
# 1 - No input
|
||||
# 2 - Analysis error
|
||||
|
||||
# Read file paths from stdin
|
||||
files=()
|
||||
while IFS= read -r line; do
|
||||
files+=("$line")
|
||||
done
|
||||
|
||||
if [ ${#files[@]} -eq 0 ]; then
|
||||
echo '{"error": "No files provided", "scope": null}'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Scope counters
|
||||
declare -A scope_counts
|
||||
declare -A scope_files
|
||||
|
||||
# Analyze each file path
|
||||
for file in "${files[@]}"; do
|
||||
# Skip empty lines
|
||||
[ -z "$file" ] && continue
|
||||
|
||||
# Extract scope from path patterns
|
||||
scope=""
|
||||
|
||||
# Pattern 1: src/<scope>/*
|
||||
if [[ "$file" =~ ^src/([^/]+)/ ]]; then
|
||||
scope="${BASH_REMATCH[1]}"
|
||||
# Pattern 2: components/<Component>
|
||||
elif [[ "$file" =~ components/([^/]+) ]]; then
|
||||
# Convert PascalCase to kebab-case
|
||||
component="${BASH_REMATCH[1]}"
|
||||
scope=$(echo "$component" | sed 's/\([A-Z]\)/-\1/g' | tr '[:upper:]' '[:lower:]' | sed 's/^-//')
|
||||
# Pattern 3: tests/<module>
|
||||
elif [[ "$file" =~ tests?/([^/]+) ]]; then
|
||||
scope="${BASH_REMATCH[1]}"
|
||||
scope=$(echo "$scope" | sed 's/\.test.*$//' | sed 's/\.spec.*$//')
|
||||
# Pattern 4: docs/*
|
||||
elif [[ "$file" =~ ^docs?/ ]]; then
|
||||
scope="docs"
|
||||
# Pattern 5: .github/workflows
|
||||
elif [[ "$file" =~ \.github/workflows ]]; then
|
||||
scope="ci"
|
||||
# Pattern 6: config files
|
||||
elif [[ "$file" =~ (package\.json|tsconfig\.json|.*\.config\.(js|ts|json)) ]]; then
|
||||
scope="config"
|
||||
# Pattern 7: root README
|
||||
elif [[ "$file" == "README.md" ]]; then
|
||||
scope="docs"
|
||||
fi
|
||||
|
||||
# Count scopes
|
||||
if [ -n "$scope" ]; then
|
||||
((scope_counts[$scope]++))
|
||||
scope_files[$scope]="${scope_files[$scope]}$file\n"
|
||||
fi
|
||||
done
|
||||
|
||||
# Find primary scope (most files)
|
||||
primary_scope=""
|
||||
max_count=0
|
||||
for scope in "${!scope_counts[@]}"; do
|
||||
count=${scope_counts[$scope]}
|
||||
if [ $count -gt $max_count ]; then
|
||||
max_count=$count
|
||||
primary_scope="$scope"
|
||||
fi
|
||||
done
|
||||
|
||||
# Determine confidence
|
||||
confidence="low"
|
||||
total_files=${#files[@]}
|
||||
if [ -n "$primary_scope" ]; then
|
||||
primary_percentage=$((max_count * 100 / total_files))
|
||||
if [ $primary_percentage -ge 80 ]; then
|
||||
confidence="high"
|
||||
elif [ $primary_percentage -ge 50 ]; then
|
||||
confidence="medium"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Build affected areas JSON
|
||||
affected_areas="{"
|
||||
first=true
|
||||
for scope in "${!scope_counts[@]}"; do
|
||||
if [ "$first" = true ]; then
|
||||
first=false
|
||||
else
|
||||
affected_areas+=","
|
||||
fi
|
||||
affected_areas+="\"$scope\":${scope_counts[$scope]}"
|
||||
done
|
||||
affected_areas+="}"
|
||||
|
||||
# Build reasoning
|
||||
if [ -n "$primary_scope" ]; then
|
||||
reasoning="Primary scope '$primary_scope' identified from $max_count of $total_files files ($primary_percentage%)."
|
||||
else
|
||||
reasoning="Unable to identify clear scope. Files span multiple unrelated areas."
|
||||
fi
|
||||
|
||||
# Output JSON
|
||||
cat <<EOF
|
||||
{
|
||||
"scope": ${primary_scope:+\"$primary_scope\"},
|
||||
"confidence": "$confidence",
|
||||
"reasoning": "$reasoning",
|
||||
"affected_areas": $affected_areas,
|
||||
"total_files": $total_files,
|
||||
"primary_file_count": $max_count,
|
||||
"primary_percentage": ${primary_percentage:-0}
|
||||
}
|
||||
EOF
|
||||
|
||||
exit 0
|
||||
246
commands/commit-analysis/.scripts/type-detector.py
Executable file
246
commands/commit-analysis/.scripts/type-detector.py
Executable file
@@ -0,0 +1,246 @@
|
||||
#!/usr/bin/env python3
|
||||
# Script: type-detector.py
|
||||
# Purpose: Detect conventional commit type from git diff analysis
|
||||
# Author: Git Commit Assistant Plugin
|
||||
# Version: 1.0.0
|
||||
#
|
||||
# Usage:
|
||||
# git diff HEAD | ./type-detector.py
|
||||
# ./type-detector.py < diff.txt
|
||||
#
|
||||
# Returns:
|
||||
# JSON: {"type": "feat", "confidence": "high", "reasoning": "..."}
|
||||
#
|
||||
# Exit Codes:
|
||||
# 0 - Success
|
||||
# 1 - No input provided
|
||||
# 2 - Analysis error
|
||||
|
||||
import sys
|
||||
import re
|
||||
import json
|
||||
|
||||
def detect_type_from_diff(diff_content):
|
||||
"""
|
||||
Detect commit type using priority-based decision tree.
|
||||
|
||||
Priority order:
|
||||
1. feat - new files/functions
|
||||
2. fix - bug fixes/error handling
|
||||
3. docs - documentation only
|
||||
4. refactor - code restructuring
|
||||
5. style - formatting only
|
||||
6. test - test files only
|
||||
7. build - dependencies
|
||||
8. ci - CI/CD configs
|
||||
9. perf - performance
|
||||
10. chore - other
|
||||
"""
|
||||
|
||||
lines = diff_content.split('\n')
|
||||
|
||||
# Indicators
|
||||
indicators = {
|
||||
'new_files': 0,
|
||||
'new_exports': 0,
|
||||
'bug_keywords': 0,
|
||||
'error_handling': 0,
|
||||
'docs_only': True,
|
||||
'test_only': True,
|
||||
'formatting_only': True,
|
||||
'build_files': 0,
|
||||
'ci_files': 0,
|
||||
'perf_keywords': 0,
|
||||
'refactor_keywords': 0
|
||||
}
|
||||
|
||||
changed_files = []
|
||||
|
||||
for line in lines:
|
||||
# Track changed files
|
||||
if line.startswith('+++') or line.startswith('---'):
|
||||
file_path = line[4:].strip()
|
||||
if file_path != '/dev/null':
|
||||
changed_files.append(file_path)
|
||||
|
||||
# New file indicator
|
||||
if line.startswith('+++ ') and '/dev/null' not in line:
|
||||
if line.startswith('+++ b/'):
|
||||
indicators['new_files'] += 1
|
||||
|
||||
# New exports (feat indicator)
|
||||
if line.startswith('+') and ('export function' in line or 'export class' in line or 'export const' in line):
|
||||
indicators['new_exports'] += 1
|
||||
|
||||
# Bug fix keywords
|
||||
if line.startswith('+') and any(kw in line.lower() for kw in ['fix', 'resolve', 'correct', 'handle error']):
|
||||
indicators['bug_keywords'] += 1
|
||||
|
||||
# Error handling (fix indicator)
|
||||
if line.startswith('+') and ('try {' in line or 'catch' in line or 'if (! in line or 'throw' in line):
|
||||
indicators['error_handling'] += 1
|
||||
|
||||
# Check if only docs changed
|
||||
if line.startswith('+++') and not line.endswith('.md') and not '# ' in line:
|
||||
if '/dev/null' not in line:
|
||||
indicators['docs_only'] = False
|
||||
|
||||
# Check if only tests changed
|
||||
if line.startswith('+++'):
|
||||
if not ('.test.' in line or '.spec.' in line or '_test' in line):
|
||||
if '/dev/null' not in line:
|
||||
indicators['test_only'] = False
|
||||
|
||||
# Check if only formatting
|
||||
if line.startswith('+') and len(line.strip()) > 1:
|
||||
stripped = line[1:].strip()
|
||||
if stripped and not stripped.isspace():
|
||||
indicators['formatting_only'] = False
|
||||
|
||||
# Build files (package.json, etc.)
|
||||
if 'package.json' in line or 'pom.xml' in line or 'build.gradle' in line:
|
||||
indicators['build_files'] += 1
|
||||
|
||||
# CI files
|
||||
if '.github/workflows' in line or '.gitlab-ci' in line or 'Jenkinsfile' in line:
|
||||
indicators['ci_files'] += 1
|
||||
|
||||
# Performance keywords
|
||||
if line.startswith('+') and any(kw in line.lower() for kw in ['optimize', 'cache', 'memoize', 'performance']):
|
||||
indicators['perf_keywords'] += 1
|
||||
|
||||
# Refactor keywords
|
||||
if line.startswith('+') and any(kw in line.lower() for kw in ['extract', 'rename', 'simplify', 'reorganize']):
|
||||
indicators['refactor_keywords'] += 1
|
||||
|
||||
# Decision tree
|
||||
|
||||
# 1. Check for feat
|
||||
if indicators['new_files'] > 0 or indicators['new_exports'] > 2:
|
||||
return {
|
||||
'type': 'feat',
|
||||
'confidence': 'high' if indicators['new_files'] > 0 else 'medium',
|
||||
'reasoning': f"New files ({indicators['new_files']}) or new exports ({indicators['new_exports']}) detected. Indicates new feature.",
|
||||
'indicators': {
|
||||
'new_files': indicators['new_files'],
|
||||
'new_exports': indicators['new_exports']
|
||||
}
|
||||
}
|
||||
|
||||
# 2. Check for fix
|
||||
if indicators['error_handling'] > 2 or indicators['bug_keywords'] > 1:
|
||||
return {
|
||||
'type': 'fix',
|
||||
'confidence': 'high',
|
||||
'reasoning': f"Error handling ({indicators['error_handling']}) or bug fix keywords ({indicators['bug_keywords']}) found. Indicates bug fix.",
|
||||
'indicators': {
|
||||
'error_handling': indicators['error_handling'],
|
||||
'bug_keywords': indicators['bug_keywords']
|
||||
}
|
||||
}
|
||||
|
||||
# 3. Check for docs
|
||||
if indicators['docs_only']:
|
||||
return {
|
||||
'type': 'docs',
|
||||
'confidence': 'high',
|
||||
'reasoning': "Only documentation files (.md) changed. Pure documentation update.",
|
||||
'indicators': {}
|
||||
}
|
||||
|
||||
# 4. Check for style
|
||||
if indicators['formatting_only']:
|
||||
return {
|
||||
'type': 'style',
|
||||
'confidence': 'high',
|
||||
'reasoning': "Only formatting/whitespace changes detected. No logic changes.",
|
||||
'indicators': {}
|
||||
}
|
||||
|
||||
# 5. Check for test
|
||||
if indicators['test_only']:
|
||||
return {
|
||||
'type': 'test',
|
||||
'confidence': 'high',
|
||||
'reasoning': "Only test files changed. Test additions or updates.",
|
||||
'indicators': {}
|
||||
}
|
||||
|
||||
# 6. Check for build
|
||||
if indicators['build_files'] > 0:
|
||||
return {
|
||||
'type': 'build',
|
||||
'confidence': 'high',
|
||||
'reasoning': f"Build files ({indicators['build_files']}) changed. Dependency or build system updates.",
|
||||
'indicators': {
|
||||
'build_files': indicators['build_files']
|
||||
}
|
||||
}
|
||||
|
||||
# 7. Check for ci
|
||||
if indicators['ci_files'] > 0:
|
||||
return {
|
||||
'type': 'ci',
|
||||
'confidence': 'high',
|
||||
'reasoning': f"CI/CD configuration files ({indicators['ci_files']}) changed.",
|
||||
'indicators': {
|
||||
'ci_files': indicators['ci_files']
|
||||
}
|
||||
}
|
||||
|
||||
# 8. Check for perf
|
||||
if indicators['perf_keywords'] > 2:
|
||||
return {
|
||||
'type': 'perf',
|
||||
'confidence': 'medium',
|
||||
'reasoning': f"Performance-related keywords ({indicators['perf_keywords']}) found.",
|
||||
'indicators': {
|
||||
'perf_keywords': indicators['perf_keywords']
|
||||
}
|
||||
}
|
||||
|
||||
# 9. Check for refactor
|
||||
if indicators['refactor_keywords'] > 2:
|
||||
return {
|
||||
'type': 'refactor',
|
||||
'confidence': 'medium',
|
||||
'reasoning': f"Refactoring keywords ({indicators['refactor_keywords']}) found.",
|
||||
'indicators': {
|
||||
'refactor_keywords': indicators['refactor_keywords']
|
||||
}
|
||||
}
|
||||
|
||||
# 10. Default to chore
|
||||
return {
|
||||
'type': 'chore',
|
||||
'confidence': 'low',
|
||||
'reasoning': "Changes don't match specific patterns. Defaulting to chore.",
|
||||
'indicators': {}
|
||||
}
|
||||
|
||||
def main():
|
||||
# Read diff from stdin
|
||||
diff_content = sys.stdin.read()
|
||||
|
||||
if not diff_content or not diff_content.strip():
|
||||
print(json.dumps({
|
||||
'error': 'No diff content provided',
|
||||
'type': None,
|
||||
'confidence': None
|
||||
}))
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
result = detect_type_from_diff(diff_content)
|
||||
print(json.dumps(result, indent=2))
|
||||
sys.exit(0)
|
||||
except Exception as e:
|
||||
print(json.dumps({
|
||||
'error': str(e),
|
||||
'type': None,
|
||||
'confidence': None
|
||||
}))
|
||||
sys.exit(2)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Reference in New Issue
Block a user