Initial commit
This commit is contained in:
294
skills/product-design/functions/component_mapper.py
Executable file
294
skills/product-design/functions/component_mapper.py
Executable file
@@ -0,0 +1,294 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Map Figma components to codebase components using Code Connect data and fuzzy matching.
|
||||
"""
|
||||
|
||||
import json
|
||||
import argparse
|
||||
import os
|
||||
from typing import Dict, List, Any
|
||||
from difflib import SequenceMatcher
|
||||
|
||||
|
||||
def calculate_similarity(str1: str, str2: str) -> float:
|
||||
"""Calculate similarity ratio between two strings."""
|
||||
return SequenceMatcher(None, str1.lower(), str2.lower()).ratio()
|
||||
|
||||
|
||||
def find_component_files(project_root: str, extensions: List[str] = None) -> List[Dict[str, str]]:
|
||||
"""
|
||||
Find all component files in project.
|
||||
|
||||
Args:
|
||||
project_root: Project root directory
|
||||
extensions: File extensions to search (default: ['tsx', 'jsx', 'vue'])
|
||||
|
||||
Returns:
|
||||
List of component file info (path, name)
|
||||
"""
|
||||
if extensions is None:
|
||||
extensions = ['tsx', 'jsx', 'vue', 'svelte']
|
||||
|
||||
components = []
|
||||
|
||||
for root, dirs, files in os.walk(project_root):
|
||||
# Skip node_modules, dist, build directories
|
||||
dirs[:] = [d for d in dirs if d not in ['node_modules', 'dist', 'build', '.git', '.next']]
|
||||
|
||||
for file in files:
|
||||
if any(file.endswith(f'.{ext}') for ext in extensions):
|
||||
full_path = os.path.join(root, file)
|
||||
rel_path = os.path.relpath(full_path, project_root)
|
||||
|
||||
# Extract component name (filename without extension)
|
||||
comp_name = os.path.splitext(file)[0]
|
||||
|
||||
# Skip test files, stories, etc.
|
||||
if any(suffix in comp_name.lower() for suffix in ['.test', '.spec', '.stories', '.story']):
|
||||
continue
|
||||
|
||||
components.append({
|
||||
'name': comp_name,
|
||||
'path': rel_path,
|
||||
'full_path': full_path
|
||||
})
|
||||
|
||||
return components
|
||||
|
||||
|
||||
def fuzzy_match_component(figma_name: str, codebase_components: List[Dict[str, str]],
|
||||
threshold: float = 0.6) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Fuzzy match Figma component name to codebase components.
|
||||
|
||||
Args:
|
||||
figma_name: Figma component name
|
||||
codebase_components: List of codebase component info
|
||||
threshold: Minimum similarity threshold
|
||||
|
||||
Returns:
|
||||
List of matches with confidence scores
|
||||
"""
|
||||
matches = []
|
||||
|
||||
# Clean Figma name (remove variant info)
|
||||
# "Button/Primary/Large" → "Button"
|
||||
base_name = figma_name.split('/')[0].strip()
|
||||
|
||||
for comp in codebase_components:
|
||||
comp_name = comp['name']
|
||||
similarity = calculate_similarity(base_name, comp_name)
|
||||
|
||||
if similarity >= threshold:
|
||||
matches.append({
|
||||
'figma_name': figma_name,
|
||||
'code_component': comp_name,
|
||||
'code_path': comp['path'],
|
||||
'confidence': round(similarity, 3),
|
||||
'match_type': 'fuzzy'
|
||||
})
|
||||
|
||||
# Sort by confidence
|
||||
matches.sort(key=lambda x: x['confidence'], reverse=True)
|
||||
|
||||
return matches
|
||||
|
||||
|
||||
def extract_variant_mapping(figma_name: str) -> Dict[str, str]:
|
||||
"""
|
||||
Extract variant information from Figma component name.
|
||||
|
||||
Examples:
|
||||
"Button/Primary/Large" → {"variant": "primary", "size": "lg"}
|
||||
"Card/Elevated" → {"variant": "elevated"}
|
||||
|
||||
Args:
|
||||
figma_name: Figma component name with variants
|
||||
|
||||
Returns:
|
||||
Dictionary of variant properties
|
||||
"""
|
||||
parts = [p.strip() for p in figma_name.split('/')]
|
||||
|
||||
if len(parts) == 1:
|
||||
return {}
|
||||
|
||||
# Base component is first part
|
||||
variants = parts[1:]
|
||||
|
||||
# Map common variant patterns
|
||||
mapping = {}
|
||||
|
||||
for variant in variants:
|
||||
variant_lower = variant.lower()
|
||||
|
||||
# Size variants
|
||||
if variant_lower in ['small', 'sm', 'xs', 'tiny']:
|
||||
mapping['size'] = 'sm'
|
||||
elif variant_lower in ['medium', 'md', 'base']:
|
||||
mapping['size'] = 'md'
|
||||
elif variant_lower in ['large', 'lg']:
|
||||
mapping['size'] = 'lg'
|
||||
elif variant_lower in ['xl', 'xlarge', 'extra-large']:
|
||||
mapping['size'] = 'xl'
|
||||
|
||||
# Style variants
|
||||
elif variant_lower in ['primary', 'main']:
|
||||
mapping['variant'] = 'primary'
|
||||
elif variant_lower in ['secondary', 'outline', 'outlined']:
|
||||
mapping['variant'] = 'secondary'
|
||||
elif variant_lower in ['tertiary', 'ghost', 'link', 'text']:
|
||||
mapping['variant'] = 'ghost'
|
||||
|
||||
# State variants
|
||||
elif variant_lower in ['disabled', 'inactive']:
|
||||
mapping['state'] = 'disabled'
|
||||
elif variant_lower in ['loading', 'busy']:
|
||||
mapping['state'] = 'loading'
|
||||
|
||||
# Type variants
|
||||
elif variant_lower in ['solid', 'filled']:
|
||||
mapping['type'] = 'solid'
|
||||
elif variant_lower in ['elevated', 'raised']:
|
||||
mapping['type'] = 'elevated'
|
||||
elif variant_lower in ['flat', 'plain']:
|
||||
mapping['type'] = 'flat'
|
||||
|
||||
# If no pattern matches, use as generic variant
|
||||
else:
|
||||
if 'variant' not in mapping:
|
||||
mapping['variant'] = variant_lower
|
||||
|
||||
return mapping
|
||||
|
||||
|
||||
def map_components(figma_components: List[Dict[str, Any]],
|
||||
code_connect_map: Dict[str, Any],
|
||||
project_root: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Main mapping function: map Figma components to codebase components.
|
||||
|
||||
Args:
|
||||
figma_components: List of Figma components from design_analyzer
|
||||
code_connect_map: Figma Code Connect mappings
|
||||
project_root: Project root directory for component search
|
||||
|
||||
Returns:
|
||||
Component mappings with confidence scores
|
||||
"""
|
||||
# Find all component files in codebase
|
||||
codebase_components = find_component_files(project_root)
|
||||
|
||||
mappings = {
|
||||
'mapped': [],
|
||||
'unmapped': [],
|
||||
'low_confidence': [],
|
||||
'summary': {}
|
||||
}
|
||||
|
||||
for figma_comp in figma_components:
|
||||
comp_id = figma_comp.get('id')
|
||||
comp_name = figma_comp.get('name')
|
||||
|
||||
# Check Code Connect first (highest confidence)
|
||||
if comp_id and comp_id in code_connect_map:
|
||||
code_connect_data = code_connect_map[comp_id]
|
||||
mappings['mapped'].append({
|
||||
'figma_id': comp_id,
|
||||
'figma_name': comp_name,
|
||||
'code_component': code_connect_data.get('codeConnectName'),
|
||||
'code_path': code_connect_data.get('codeConnectSrc'),
|
||||
'confidence': 1.0,
|
||||
'match_type': 'code_connect',
|
||||
'props_mapping': extract_variant_mapping(comp_name)
|
||||
})
|
||||
else:
|
||||
# Fallback to fuzzy matching
|
||||
matches = fuzzy_match_component(comp_name, codebase_components, threshold=0.6)
|
||||
|
||||
if matches and matches[0]['confidence'] >= 0.8:
|
||||
# High confidence match
|
||||
best_match = matches[0]
|
||||
best_match['figma_id'] = comp_id
|
||||
best_match['props_mapping'] = extract_variant_mapping(comp_name)
|
||||
mappings['mapped'].append(best_match)
|
||||
|
||||
elif matches:
|
||||
# Low confidence match (manual review needed)
|
||||
for match in matches[:3]: # Top 3 matches
|
||||
match['figma_id'] = comp_id
|
||||
match['props_mapping'] = extract_variant_mapping(comp_name)
|
||||
mappings['low_confidence'].append(match)
|
||||
|
||||
else:
|
||||
# No match found
|
||||
mappings['unmapped'].append({
|
||||
'figma_id': comp_id,
|
||||
'figma_name': comp_name,
|
||||
'recommendation': 'Create new component',
|
||||
'props_mapping': extract_variant_mapping(comp_name)
|
||||
})
|
||||
|
||||
# Generate summary
|
||||
total = len(figma_components)
|
||||
mappings['summary'] = {
|
||||
'total_figma_components': total,
|
||||
'mapped_count': len(mappings['mapped']),
|
||||
'low_confidence_count': len(mappings['low_confidence']),
|
||||
'unmapped_count': len(mappings['unmapped']),
|
||||
'mapping_coverage': f"{(len(mappings['mapped']) / max(total, 1)) * 100:.1f}%"
|
||||
}
|
||||
|
||||
return mappings
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Map Figma components to codebase components'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--figma-components',
|
||||
required=True,
|
||||
help='Path to JSON file with Figma components (from design_analyzer)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--code-connect-map',
|
||||
help='Path to Code Connect map JSON (optional)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--project-root',
|
||||
required=True,
|
||||
help='Project root directory'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output',
|
||||
help='Output file path (default: stdout)'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load Figma components
|
||||
with open(args.figma_components, 'r') as f:
|
||||
figma_components = json.load(f)
|
||||
|
||||
# Load Code Connect map if provided
|
||||
code_connect_map = {}
|
||||
if args.code_connect_map:
|
||||
with open(args.code_connect_map, 'r') as f:
|
||||
code_connect_map = json.load(f)
|
||||
|
||||
# Run mapping
|
||||
mappings = map_components(figma_components, code_connect_map, args.project_root)
|
||||
|
||||
# Output results
|
||||
output_json = json.dumps(mappings, indent=2)
|
||||
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
f.write(output_json)
|
||||
else:
|
||||
print(output_json)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
445
skills/product-design/functions/design_analyzer.py
Executable file
445
skills/product-design/functions/design_analyzer.py
Executable file
@@ -0,0 +1,445 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Analyze Figma design data and extract patterns, components, and tokens.
|
||||
Compares against existing UI kit to identify new components and potential reuse opportunities.
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import argparse
|
||||
from typing import Dict, List, Any
|
||||
from difflib import SequenceMatcher
|
||||
|
||||
|
||||
def calculate_similarity(str1: str, str2: str) -> float:
|
||||
"""
|
||||
Calculate similarity ratio between two strings.
|
||||
|
||||
Args:
|
||||
str1: First string
|
||||
str2: Second string
|
||||
|
||||
Returns:
|
||||
float: Similarity ratio (0.0 to 1.0)
|
||||
"""
|
||||
return SequenceMatcher(None, str1.lower(), str2.lower()).ratio()
|
||||
|
||||
|
||||
def extract_components_from_metadata(metadata: Dict[str, Any]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Extract component information from Figma metadata.
|
||||
|
||||
Args:
|
||||
metadata: Figma MCP get_metadata response or manual structure
|
||||
|
||||
Returns:
|
||||
List of components with their properties
|
||||
"""
|
||||
components = []
|
||||
|
||||
def traverse_nodes(node, depth=0):
|
||||
"""Recursively traverse Figma node tree."""
|
||||
if not isinstance(node, dict):
|
||||
return
|
||||
|
||||
node_type = node.get('type', '')
|
||||
node_name = node.get('name', 'Unnamed')
|
||||
node_id = node.get('id', '')
|
||||
|
||||
# Identify components (COMPONENT, COMPONENT_SET, or instances)
|
||||
if node_type in ['COMPONENT', 'COMPONENT_SET', 'INSTANCE']:
|
||||
components.append({
|
||||
'id': node_id,
|
||||
'name': node_name,
|
||||
'type': node_type,
|
||||
'depth': depth,
|
||||
'properties': extract_node_properties(node)
|
||||
})
|
||||
|
||||
# Traverse children
|
||||
children = node.get('children', [])
|
||||
for child in children:
|
||||
traverse_nodes(child, depth + 1)
|
||||
|
||||
# Handle both MCP format and manual format
|
||||
if 'document' in metadata:
|
||||
traverse_nodes(metadata['document'])
|
||||
elif 'nodes' in metadata:
|
||||
for node in metadata['nodes']:
|
||||
traverse_nodes(node)
|
||||
elif isinstance(metadata, dict):
|
||||
traverse_nodes(metadata)
|
||||
|
||||
return components
|
||||
|
||||
|
||||
def extract_node_properties(node: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Extract relevant properties from Figma node.
|
||||
|
||||
Args:
|
||||
node: Figma node data
|
||||
|
||||
Returns:
|
||||
Dictionary of extracted properties
|
||||
"""
|
||||
properties = {}
|
||||
|
||||
# Extract layout properties
|
||||
if 'layoutMode' in node:
|
||||
properties['layout'] = {
|
||||
'mode': node.get('layoutMode'),
|
||||
'direction': node.get('layoutDirection'),
|
||||
'gap': node.get('itemSpacing'),
|
||||
'padding': {
|
||||
'top': node.get('paddingTop'),
|
||||
'right': node.get('paddingRight'),
|
||||
'bottom': node.get('paddingBottom'),
|
||||
'left': node.get('paddingLeft')
|
||||
}
|
||||
}
|
||||
|
||||
# Extract sizing
|
||||
if 'absoluteBoundingBox' in node:
|
||||
bbox = node['absoluteBoundingBox']
|
||||
properties['size'] = {
|
||||
'width': bbox.get('width'),
|
||||
'height': bbox.get('height')
|
||||
}
|
||||
|
||||
# Extract variant properties
|
||||
if 'componentProperties' in node:
|
||||
properties['variants'] = node['componentProperties']
|
||||
|
||||
return properties
|
||||
|
||||
|
||||
def categorize_component_by_name(component_name: str) -> str:
|
||||
"""
|
||||
Categorize component by atomic design level based on name patterns.
|
||||
|
||||
Args:
|
||||
component_name: Component name from Figma
|
||||
|
||||
Returns:
|
||||
'atom', 'molecule', 'organism', or 'template'
|
||||
"""
|
||||
name_lower = component_name.lower()
|
||||
|
||||
# Atoms: Basic elements
|
||||
atoms = ['button', 'input', 'icon', 'text', 'badge', 'avatar', 'checkbox',
|
||||
'radio', 'switch', 'label', 'link', 'image']
|
||||
|
||||
# Molecules: Simple combinations
|
||||
molecules = ['field', 'card', 'list-item', 'menu-item', 'tab', 'breadcrumb',
|
||||
'tooltip', 'dropdown', 'search', 'pagination']
|
||||
|
||||
# Organisms: Complex components
|
||||
organisms = ['header', 'footer', 'sidebar', 'navigation', 'modal', 'form',
|
||||
'table', 'dashboard', 'profile', 'chart', 'grid']
|
||||
|
||||
for atom in atoms:
|
||||
if atom in name_lower:
|
||||
return 'atom'
|
||||
|
||||
for molecule in molecules:
|
||||
if molecule in name_lower:
|
||||
return 'molecule'
|
||||
|
||||
for organism in organisms:
|
||||
if organism in name_lower:
|
||||
return 'organism'
|
||||
|
||||
# Default to molecule if unclear
|
||||
return 'molecule'
|
||||
|
||||
|
||||
def find_similar_components(new_component: Dict[str, Any],
|
||||
ui_kit_inventory: List[Dict[str, Any]],
|
||||
threshold: float = 0.7) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Find similar components in existing UI kit.
|
||||
|
||||
Args:
|
||||
new_component: Component from Figma design
|
||||
ui_kit_inventory: List of existing UI kit components
|
||||
threshold: Similarity threshold (0.0 to 1.0)
|
||||
|
||||
Returns:
|
||||
List of similar components with similarity scores
|
||||
"""
|
||||
similar = []
|
||||
new_name = new_component.get('name', '')
|
||||
|
||||
for existing in ui_kit_inventory:
|
||||
existing_name = existing.get('name', '')
|
||||
similarity = calculate_similarity(new_name, existing_name)
|
||||
|
||||
if similarity >= threshold:
|
||||
similar.append({
|
||||
'name': existing_name,
|
||||
'path': existing.get('path', ''),
|
||||
'similarity': similarity,
|
||||
'recommendation': generate_recommendation(similarity, new_name, existing_name)
|
||||
})
|
||||
|
||||
# Sort by similarity descending
|
||||
similar.sort(key=lambda x: x['similarity'], reverse=True)
|
||||
|
||||
return similar
|
||||
|
||||
|
||||
def generate_recommendation(similarity: float, new_name: str, existing_name: str) -> str:
|
||||
"""
|
||||
Generate recommendation based on similarity score.
|
||||
|
||||
Args:
|
||||
similarity: Similarity ratio
|
||||
new_name: New component name
|
||||
existing_name: Existing component name
|
||||
|
||||
Returns:
|
||||
Recommendation string
|
||||
"""
|
||||
if similarity >= 0.9:
|
||||
return f"Very similar to {existing_name}. Consider reusing existing component."
|
||||
elif similarity >= 0.7:
|
||||
return f"Similar to {existing_name}. Consider extending with new variant/prop."
|
||||
else:
|
||||
return f"Some similarity to {existing_name}. Review for potential shared patterns."
|
||||
|
||||
|
||||
def analyze_design(figma_data: Dict[str, Any],
|
||||
ui_kit_inventory: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Main analysis function: extract patterns from Figma and compare with UI kit.
|
||||
|
||||
Args:
|
||||
figma_data: Combined Figma MCP data (metadata, variables, code_connect_map)
|
||||
ui_kit_inventory: Current UI kit inventory
|
||||
|
||||
Returns:
|
||||
Analysis results with new tokens, components, similarities, breaking changes
|
||||
"""
|
||||
results = {
|
||||
'new_tokens': [],
|
||||
'new_components': [],
|
||||
'similar_components': [],
|
||||
'breaking_changes': [],
|
||||
'summary': {}
|
||||
}
|
||||
|
||||
# Extract components from Figma metadata
|
||||
metadata = figma_data.get('metadata', {})
|
||||
figma_components = extract_components_from_metadata(metadata)
|
||||
|
||||
# Extract existing UI kit components
|
||||
existing_components = ui_kit_inventory.get('components', [])
|
||||
|
||||
# Analyze each Figma component
|
||||
for figma_comp in figma_components:
|
||||
comp_name = figma_comp.get('name', '')
|
||||
|
||||
# Skip system components (starting with _, . or #)
|
||||
if comp_name.startswith(('_', '.', '#')):
|
||||
continue
|
||||
|
||||
# Find similar components
|
||||
similar = find_similar_components(figma_comp, existing_components, threshold=0.7)
|
||||
|
||||
if similar:
|
||||
# Component has similarities - potential reuse
|
||||
results['similar_components'].append({
|
||||
'figma_component': comp_name,
|
||||
'figma_id': figma_comp.get('id'),
|
||||
'category': categorize_component_by_name(comp_name),
|
||||
'similar_to': similar,
|
||||
'properties': figma_comp.get('properties', {})
|
||||
})
|
||||
else:
|
||||
# New component - needs creation
|
||||
results['new_components'].append({
|
||||
'name': comp_name,
|
||||
'id': figma_comp.get('id'),
|
||||
'category': categorize_component_by_name(comp_name),
|
||||
'properties': figma_comp.get('properties', {}),
|
||||
'depth': figma_comp.get('depth', 0)
|
||||
})
|
||||
|
||||
# Analyze design tokens from variables
|
||||
variables = figma_data.get('variables', {})
|
||||
if variables:
|
||||
results['new_tokens'] = analyze_tokens(variables, ui_kit_inventory)
|
||||
|
||||
# Analyze breaking changes
|
||||
code_connect_map = figma_data.get('code_connect_map', {})
|
||||
if code_connect_map:
|
||||
results['breaking_changes'] = detect_breaking_changes(
|
||||
figma_components,
|
||||
code_connect_map,
|
||||
existing_components
|
||||
)
|
||||
|
||||
# Generate summary
|
||||
results['summary'] = {
|
||||
'total_figma_components': len(figma_components),
|
||||
'new_components_count': len(results['new_components']),
|
||||
'similar_components_count': len(results['similar_components']),
|
||||
'new_tokens_count': len(results['new_tokens']),
|
||||
'breaking_changes_count': len(results['breaking_changes']),
|
||||
'reuse_potential': f"{(len(results['similar_components']) / max(len(figma_components), 1)) * 100:.1f}%"
|
||||
}
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def analyze_tokens(variables: Dict[str, Any],
|
||||
ui_kit_inventory: Dict[str, Any]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Analyze design tokens from Figma variables.
|
||||
|
||||
Args:
|
||||
variables: Figma variables data
|
||||
ui_kit_inventory: Current UI kit inventory with existing tokens
|
||||
|
||||
Returns:
|
||||
List of new tokens not in current inventory
|
||||
"""
|
||||
new_tokens = []
|
||||
existing_tokens = ui_kit_inventory.get('tokens', {})
|
||||
|
||||
# Handle different variable formats
|
||||
for var_name, var_data in variables.items():
|
||||
if isinstance(var_data, dict):
|
||||
value = var_data.get('$value') or var_data.get('value')
|
||||
var_type = var_data.get('$type') or var_data.get('type')
|
||||
else:
|
||||
value = var_data
|
||||
var_type = infer_token_type(var_name, value)
|
||||
|
||||
# Check if token exists
|
||||
if var_name not in existing_tokens:
|
||||
new_tokens.append({
|
||||
'name': var_name,
|
||||
'value': value,
|
||||
'type': var_type,
|
||||
'status': 'new'
|
||||
})
|
||||
|
||||
return new_tokens
|
||||
|
||||
|
||||
def infer_token_type(name: str, value: Any) -> str:
|
||||
"""
|
||||
Infer token type from name and value.
|
||||
|
||||
Args:
|
||||
name: Token name
|
||||
value: Token value
|
||||
|
||||
Returns:
|
||||
Token type string
|
||||
"""
|
||||
name_lower = name.lower()
|
||||
|
||||
if 'color' in name_lower or (isinstance(value, str) and value.startswith('#')):
|
||||
return 'color'
|
||||
elif 'spacing' in name_lower or 'gap' in name_lower or 'padding' in name_lower:
|
||||
return 'dimension'
|
||||
elif 'font' in name_lower or 'typography' in name_lower:
|
||||
return 'typography'
|
||||
elif 'radius' in name_lower or 'border' in name_lower:
|
||||
return 'dimension'
|
||||
elif 'shadow' in name_lower:
|
||||
return 'shadow'
|
||||
else:
|
||||
return 'unknown'
|
||||
|
||||
|
||||
def detect_breaking_changes(figma_components: List[Dict[str, Any]],
|
||||
code_connect_map: Dict[str, Any],
|
||||
existing_components: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Detect breaking changes in component mappings.
|
||||
|
||||
Args:
|
||||
figma_components: Components from Figma
|
||||
code_connect_map: Figma Code Connect mappings
|
||||
existing_components: Existing UI kit components
|
||||
|
||||
Returns:
|
||||
List of breaking changes detected
|
||||
"""
|
||||
breaking_changes = []
|
||||
|
||||
for figma_comp in figma_components:
|
||||
comp_id = figma_comp.get('id')
|
||||
comp_name = figma_comp.get('name')
|
||||
|
||||
# Check if component was previously mapped
|
||||
if comp_id in code_connect_map:
|
||||
mapping = code_connect_map[comp_id]
|
||||
mapped_path = mapping.get('codeConnectSrc')
|
||||
|
||||
# Check if mapped component still exists
|
||||
exists = any(
|
||||
existing.get('path') == mapped_path
|
||||
for existing in existing_components
|
||||
)
|
||||
|
||||
if not exists:
|
||||
breaking_changes.append({
|
||||
'figma_component': comp_name,
|
||||
'figma_id': comp_id,
|
||||
'previous_mapping': mapped_path,
|
||||
'issue': 'Mapped component no longer exists in codebase',
|
||||
'recommendation': 'Re-map to new component or create new implementation'
|
||||
})
|
||||
|
||||
return breaking_changes
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Analyze Figma design data and compare with UI kit'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--figma-data',
|
||||
required=True,
|
||||
help='Path to JSON file with Figma MCP data'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--ui-kit-inventory',
|
||||
required=True,
|
||||
help='Path to UI kit inventory JSON file'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output',
|
||||
help='Output file path (default: stdout)'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load Figma data
|
||||
with open(args.figma_data, 'r') as f:
|
||||
figma_data = json.load(f)
|
||||
|
||||
# Load UI kit inventory
|
||||
with open(args.ui_kit_inventory, 'r') as f:
|
||||
ui_kit_inventory = json.load(f)
|
||||
|
||||
# Run analysis
|
||||
results = analyze_design(figma_data, ui_kit_inventory)
|
||||
|
||||
# Output results
|
||||
output_json = json.dumps(results, indent=2)
|
||||
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
f.write(output_json)
|
||||
else:
|
||||
print(output_json)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
359
skills/product-design/functions/design_system_auditor.py
Executable file
359
skills/product-design/functions/design_system_auditor.py
Executable file
@@ -0,0 +1,359 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Audit design system for drift between Figma design and code implementation.
|
||||
Compares tokens, components, and generates recommendations.
|
||||
"""
|
||||
|
||||
import json
|
||||
import argparse
|
||||
from typing import Dict, List, Any
|
||||
|
||||
|
||||
def audit_token_alignment(figma_tokens: Dict[str, Any],
|
||||
code_tokens: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Audit token alignment between Figma and code.
|
||||
|
||||
Args:
|
||||
figma_tokens: Tokens from Figma (DTCG format)
|
||||
code_tokens: Tokens from code (design-tokens.json)
|
||||
|
||||
Returns:
|
||||
Alignment report with drift analysis
|
||||
"""
|
||||
def flatten_tokens(tokens, prefix=''):
|
||||
"""Flatten nested tokens to dot notation."""
|
||||
flat = {}
|
||||
for key, value in tokens.items():
|
||||
path = f"{prefix}.{key}" if prefix else key
|
||||
if isinstance(value, dict) and '$value' in value:
|
||||
flat[path] = value
|
||||
elif isinstance(value, dict):
|
||||
flat.update(flatten_tokens(value, path))
|
||||
return flat
|
||||
|
||||
figma_flat = flatten_tokens(figma_tokens)
|
||||
code_flat = flatten_tokens(code_tokens)
|
||||
|
||||
alignment = {
|
||||
'in_sync': [],
|
||||
'drift_detected': [],
|
||||
'missing_in_code': [],
|
||||
'unused_in_design': []
|
||||
}
|
||||
|
||||
# Compare Figma tokens with code
|
||||
for token_path, figma_data in figma_flat.items():
|
||||
figma_value = figma_data.get('$value')
|
||||
|
||||
if token_path in code_flat:
|
||||
code_value = code_flat[token_path].get('$value')
|
||||
|
||||
if figma_value == code_value:
|
||||
alignment['in_sync'].append({
|
||||
'path': token_path,
|
||||
'value': figma_value
|
||||
})
|
||||
else:
|
||||
alignment['drift_detected'].append({
|
||||
'path': token_path,
|
||||
'figma_value': figma_value,
|
||||
'code_value': code_value,
|
||||
'type': figma_data.get('$type')
|
||||
})
|
||||
else:
|
||||
alignment['missing_in_code'].append({
|
||||
'path': token_path,
|
||||
'value': figma_value,
|
||||
'type': figma_data.get('$type')
|
||||
})
|
||||
|
||||
# Find tokens in code but not in Figma
|
||||
for token_path in code_flat.keys():
|
||||
if token_path not in figma_flat:
|
||||
alignment['unused_in_design'].append({
|
||||
'path': token_path,
|
||||
'value': code_flat[token_path].get('$value'),
|
||||
'type': code_flat[token_path].get('$type')
|
||||
})
|
||||
|
||||
return alignment
|
||||
|
||||
|
||||
def analyze_component_reuse(figma_components: List[Dict[str, Any]],
|
||||
component_mappings: Dict[str, Any]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Analyze component reuse opportunities.
|
||||
|
||||
Args:
|
||||
figma_components: Components from design_analyzer
|
||||
component_mappings: Mappings from component_mapper
|
||||
|
||||
Returns:
|
||||
List of reuse opportunities
|
||||
"""
|
||||
opportunities = []
|
||||
|
||||
# Get similar components from mappings
|
||||
similar_components = component_mappings.get('low_confidence', [])
|
||||
|
||||
for similar in similar_components:
|
||||
confidence = similar.get('confidence', 0)
|
||||
figma_name = similar.get('figma_name')
|
||||
code_component = similar.get('code_component')
|
||||
|
||||
if confidence >= 0.7:
|
||||
# Strong similarity - suggest extending existing
|
||||
opportunities.append({
|
||||
'figma_component': figma_name,
|
||||
'existing_component': code_component,
|
||||
'code_path': similar.get('code_path'),
|
||||
'similarity': confidence,
|
||||
'recommendation': f"Extend {code_component} with new variant/prop instead of creating new component",
|
||||
'estimated_time_saved': '2-3 hours'
|
||||
})
|
||||
elif confidence >= 0.5:
|
||||
# Moderate similarity - suggest reviewing for shared patterns
|
||||
opportunities.append({
|
||||
'figma_component': figma_name,
|
||||
'existing_component': code_component,
|
||||
'code_path': similar.get('code_path'),
|
||||
'similarity': confidence,
|
||||
'recommendation': f"Review {code_component} for shared patterns before implementing",
|
||||
'estimated_time_saved': '1-2 hours'
|
||||
})
|
||||
|
||||
return opportunities
|
||||
|
||||
|
||||
def audit_tailwind_config(tokens: Dict[str, Any], tailwind_config_path: str = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Audit Tailwind config alignment with design tokens.
|
||||
|
||||
Args:
|
||||
tokens: Design tokens (DTCG format)
|
||||
tailwind_config_path: Path to tailwind.config.js (optional)
|
||||
|
||||
Returns:
|
||||
Tailwind alignment report
|
||||
"""
|
||||
# This is a simplified version - real implementation would parse tailwind.config.js
|
||||
# For now, return structure for manual audit
|
||||
|
||||
alignment = {
|
||||
'status': 'manual_audit_required',
|
||||
'recommendations': []
|
||||
}
|
||||
|
||||
def flatten_tokens(tokens, prefix=''):
|
||||
flat = {}
|
||||
for key, value in tokens.items():
|
||||
path = f"{prefix}.{key}" if prefix else key
|
||||
if isinstance(value, dict) and '$value' in value:
|
||||
flat[path] = value
|
||||
elif isinstance(value, dict):
|
||||
flat.update(flatten_tokens(value, path))
|
||||
return flat
|
||||
|
||||
flat_tokens = flatten_tokens(tokens)
|
||||
|
||||
# Generate recommendations based on token types
|
||||
color_tokens = [t for t in flat_tokens.keys() if t.startswith('color.')]
|
||||
spacing_tokens = [t for t in flat_tokens.keys() if t.startswith('spacing.')]
|
||||
typography_tokens = [t for t in flat_tokens.keys() if t.startswith('typography.')]
|
||||
|
||||
if color_tokens:
|
||||
alignment['recommendations'].append({
|
||||
'category': 'colors',
|
||||
'action': f'Add {len(color_tokens)} color tokens to Tailwind theme.extend.colors',
|
||||
'example': f'"{color_tokens[0]}": "var(--{color_tokens[0].replace(".", "-")})"'
|
||||
})
|
||||
|
||||
if spacing_tokens:
|
||||
alignment['recommendations'].append({
|
||||
'category': 'spacing',
|
||||
'action': f'Add {len(spacing_tokens)} spacing tokens to Tailwind theme.extend.spacing',
|
||||
'example': f'"{spacing_tokens[0].split(".")[-1]}": "var(--{spacing_tokens[0].replace(".", "-")})"'
|
||||
})
|
||||
|
||||
if typography_tokens:
|
||||
alignment['recommendations'].append({
|
||||
'category': 'typography',
|
||||
'action': f'Add {len(typography_tokens)} typography tokens to Tailwind theme.extend.fontSize',
|
||||
'example': 'Use Style Dictionary to generate Tailwind @theme directive'
|
||||
})
|
||||
|
||||
return alignment
|
||||
|
||||
|
||||
def generate_audit_summary(token_alignment: Dict[str, Any],
|
||||
component_reuse: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate overall audit summary with priority levels.
|
||||
|
||||
Args:
|
||||
token_alignment: Token alignment report
|
||||
component_reuse: Component reuse opportunities
|
||||
|
||||
Returns:
|
||||
Summary with priority levels and recommendations
|
||||
"""
|
||||
total_tokens = (
|
||||
len(token_alignment['in_sync']) +
|
||||
len(token_alignment['drift_detected']) +
|
||||
len(token_alignment['missing_in_code']) +
|
||||
len(token_alignment['unused_in_design'])
|
||||
)
|
||||
|
||||
drift_count = len(token_alignment['drift_detected'])
|
||||
missing_count = len(token_alignment['missing_in_code'])
|
||||
|
||||
# Determine priority
|
||||
if drift_count > 10 or (drift_count / max(total_tokens, 1)) > 0.2:
|
||||
priority = 'critical'
|
||||
elif drift_count > 5 or missing_count > 10:
|
||||
priority = 'high'
|
||||
elif drift_count > 0 or missing_count > 0:
|
||||
priority = 'medium'
|
||||
else:
|
||||
priority = 'low'
|
||||
|
||||
summary = {
|
||||
'token_health': {
|
||||
'total': total_tokens,
|
||||
'in_sync': len(token_alignment['in_sync']),
|
||||
'drift_detected': drift_count,
|
||||
'missing_in_code': missing_count,
|
||||
'unused_in_design': len(token_alignment['unused_in_design']),
|
||||
'sync_percentage': f"{(len(token_alignment['in_sync']) / max(total_tokens, 1)) * 100:.1f}%"
|
||||
},
|
||||
'component_reuse': {
|
||||
'opportunities_found': len(component_reuse),
|
||||
'estimated_time_savings': f"{len(component_reuse) * 2}-{len(component_reuse) * 3} hours"
|
||||
},
|
||||
'priority': priority,
|
||||
'top_recommendations': generate_top_recommendations(
|
||||
token_alignment,
|
||||
component_reuse,
|
||||
priority
|
||||
)
|
||||
}
|
||||
|
||||
return summary
|
||||
|
||||
|
||||
def generate_top_recommendations(token_alignment: Dict[str, Any],
|
||||
component_reuse: List[Dict[str, Any]],
|
||||
priority: str) -> List[str]:
|
||||
"""Generate top 3-5 recommendations based on audit results."""
|
||||
recommendations = []
|
||||
|
||||
drift_count = len(token_alignment['drift_detected'])
|
||||
missing_count = len(token_alignment['missing_in_code'])
|
||||
|
||||
if drift_count > 0:
|
||||
recommendations.append(
|
||||
f"⚠️ Fix {drift_count} drifted tokens - update design-tokens.json with Figma values"
|
||||
)
|
||||
|
||||
if missing_count > 0:
|
||||
recommendations.append(
|
||||
f"➕ Add {missing_count} new tokens to design system - run Style Dictionary build after"
|
||||
)
|
||||
|
||||
if len(token_alignment['unused_in_design']) > 5:
|
||||
recommendations.append(
|
||||
f"🗑️ Clean up {len(token_alignment['unused_in_design'])} unused tokens in codebase"
|
||||
)
|
||||
|
||||
if component_reuse:
|
||||
top_reuse = component_reuse[0]
|
||||
recommendations.append(
|
||||
f"♻️ Reuse opportunity: Extend {top_reuse['existing_component']} instead of creating {top_reuse['figma_component']}"
|
||||
)
|
||||
|
||||
if priority == 'low':
|
||||
recommendations.append("✅ Design system is well-aligned - good maintenance!")
|
||||
|
||||
return recommendations[:5]
|
||||
|
||||
|
||||
def audit_design_system(figma_data: Dict[str, Any],
|
||||
code_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Main audit function: comprehensive design system health check.
|
||||
|
||||
Args:
|
||||
figma_data: Combined Figma data (tokens, components, mappings)
|
||||
code_data: Combined code data (design-tokens.json, ui-kit-inventory, etc.)
|
||||
|
||||
Returns:
|
||||
Complete audit report with recommendations
|
||||
"""
|
||||
# Extract data
|
||||
figma_tokens = figma_data.get('tokens', {})
|
||||
figma_components = figma_data.get('components', [])
|
||||
component_mappings = figma_data.get('component_mappings', {})
|
||||
|
||||
code_tokens = code_data.get('design_tokens', {})
|
||||
ui_kit_inventory = code_data.get('ui_kit_inventory', {})
|
||||
|
||||
# Run audits
|
||||
token_alignment = audit_token_alignment(figma_tokens, code_tokens)
|
||||
component_reuse = analyze_component_reuse(figma_components, component_mappings)
|
||||
tailwind_alignment = audit_tailwind_config(code_tokens)
|
||||
|
||||
# Generate summary
|
||||
summary = generate_audit_summary(token_alignment, component_reuse)
|
||||
|
||||
return {
|
||||
'token_alignment': token_alignment,
|
||||
'component_reuse_opportunities': component_reuse,
|
||||
'tailwind_alignment': tailwind_alignment,
|
||||
'summary': summary
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Audit design system for drift and reuse opportunities'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--figma-data',
|
||||
required=True,
|
||||
help='Path to JSON file with Figma data (tokens, components, mappings)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--code-data',
|
||||
required=True,
|
||||
help='Path to JSON file with code data (design-tokens.json, ui-kit-inventory)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output',
|
||||
help='Output file path (default: stdout)'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load data
|
||||
with open(args.figma_data, 'r') as f:
|
||||
figma_data = json.load(f)
|
||||
|
||||
with open(args.code_data, 'r') as f:
|
||||
code_data = json.load(f)
|
||||
|
||||
# Run audit
|
||||
audit_results = audit_design_system(figma_data, code_data)
|
||||
|
||||
# Output results
|
||||
output_json = json.dumps(audit_results, indent=2)
|
||||
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
f.write(output_json)
|
||||
else:
|
||||
print(output_json)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
332
skills/product-design/functions/figma_mcp_client.py
Normal file
332
skills/product-design/functions/figma_mcp_client.py
Normal file
@@ -0,0 +1,332 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Figma MCP Client - Direct Python interface to Figma Desktop MCP server.
|
||||
|
||||
This module provides a simple async interface to Figma's Model Context Protocol
|
||||
server running locally at http://127.0.0.1:3845/mcp
|
||||
|
||||
Usage:
|
||||
async with FigmaMCPClient() as client:
|
||||
# Get design tokens
|
||||
tokens = await client.get_variable_defs()
|
||||
|
||||
# Get component metadata
|
||||
metadata = await client.get_metadata(node_id="1:23")
|
||||
|
||||
# Get code mappings
|
||||
mappings = await client.get_code_connect_map()
|
||||
|
||||
Requirements:
|
||||
- Figma Desktop app must be running
|
||||
- MCP server enabled in Figma Preferences
|
||||
- User logged into Figma
|
||||
- pip install mcp
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional, Dict, Any, List
|
||||
|
||||
try:
|
||||
from mcp import ClientSession
|
||||
from mcp.client.streamable_http import streamablehttp_client
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"MCP SDK not installed. Install with: pip install mcp"
|
||||
) from e
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FigmaMCPError(Exception):
|
||||
"""Base exception for Figma MCP client errors."""
|
||||
pass
|
||||
|
||||
|
||||
class FigmaNotRunningError(FigmaMCPError):
|
||||
"""Raised when Figma Desktop is not running or MCP server not enabled."""
|
||||
pass
|
||||
|
||||
|
||||
class FigmaMCPClient:
|
||||
"""
|
||||
Async client for Figma Desktop MCP server.
|
||||
|
||||
Provides direct access to Figma's design data through the Model Context Protocol.
|
||||
Use as async context manager to ensure proper connection lifecycle.
|
||||
|
||||
Example:
|
||||
async with FigmaMCPClient() as client:
|
||||
variables = await client.get_variable_defs()
|
||||
print(f"Found {len(variables)} design tokens")
|
||||
"""
|
||||
|
||||
def __init__(self, mcp_url: str = "http://127.0.0.1:3845/mcp"):
|
||||
"""
|
||||
Initialize Figma MCP client.
|
||||
|
||||
Args:
|
||||
mcp_url: URL of Figma Desktop MCP server (default: http://127.0.0.1:3845/mcp)
|
||||
"""
|
||||
self.mcp_url = mcp_url
|
||||
self.session = None
|
||||
self.transport = None
|
||||
self.session_context = None
|
||||
|
||||
async def __aenter__(self):
|
||||
"""Async context manager entry - establishes MCP connection."""
|
||||
try:
|
||||
# Connect to Figma MCP server
|
||||
self.transport = streamablehttp_client(self.mcp_url)
|
||||
self.read_stream, self.write_stream, _ = await self.transport.__aenter__()
|
||||
|
||||
# Create MCP session
|
||||
self.session_context = ClientSession(self.read_stream, self.write_stream)
|
||||
self.session = await self.session_context.__aenter__()
|
||||
|
||||
# Initialize MCP protocol
|
||||
init_result = await self.session.initialize()
|
||||
logger.info(
|
||||
f"Connected to {init_result.serverInfo.name} "
|
||||
f"v{init_result.serverInfo.version}"
|
||||
)
|
||||
|
||||
return self
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to connect to Figma MCP server: {e}")
|
||||
raise FigmaNotRunningError(
|
||||
"Could not connect to Figma Desktop MCP server. "
|
||||
"Please ensure:\n"
|
||||
" 1. Figma Desktop app is running\n"
|
||||
" 2. MCP server is enabled in Figma → Preferences\n"
|
||||
" 3. You are logged into Figma\n"
|
||||
f"Error: {e}"
|
||||
) from e
|
||||
|
||||
async def __aexit__(self, *args):
|
||||
"""Async context manager exit - closes MCP connection."""
|
||||
try:
|
||||
if self.session_context:
|
||||
await self.session_context.__aexit__(*args)
|
||||
if self.transport:
|
||||
await self.transport.__aexit__(*args)
|
||||
logger.info("Disconnected from Figma MCP server")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error during disconnect: {e}")
|
||||
|
||||
async def _call_tool(self, tool_name: str, params: Optional[Dict[str, Any]] = None) -> Any:
|
||||
"""
|
||||
Internal method to call MCP tool and extract content.
|
||||
|
||||
Args:
|
||||
tool_name: Name of the MCP tool to call
|
||||
params: Tool parameters
|
||||
|
||||
Returns:
|
||||
Tool response content (parsed as JSON if possible)
|
||||
"""
|
||||
if not self.session:
|
||||
raise FigmaMCPError("Client not connected. Use 'async with FigmaMCPClient()'")
|
||||
|
||||
try:
|
||||
result = await self.session.call_tool(tool_name, params or {})
|
||||
|
||||
# Extract content from MCP response
|
||||
if result.content and len(result.content) > 0:
|
||||
content_item = result.content[0]
|
||||
|
||||
# Handle different content types
|
||||
if hasattr(content_item, 'text'):
|
||||
# Text content (most common)
|
||||
content = content_item.text
|
||||
|
||||
# Try to parse as JSON
|
||||
try:
|
||||
return json.loads(content)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
# Return raw text if not JSON
|
||||
return content
|
||||
|
||||
elif hasattr(content_item, 'data'):
|
||||
# Image or binary content
|
||||
return content_item.data
|
||||
|
||||
else:
|
||||
# Unknown content type - return as-is
|
||||
return content_item
|
||||
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error calling {tool_name}: {e}")
|
||||
raise FigmaMCPError(f"Failed to call {tool_name}: {e}") from e
|
||||
|
||||
async def get_metadata(self, node_id: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Get metadata for a node or page in XML format.
|
||||
|
||||
Includes node IDs, layer types, names, positions, and sizes.
|
||||
Use this to discover component structure before fetching full details.
|
||||
|
||||
Args:
|
||||
node_id: Specific node or page ID (e.g., "1:23" or "0:1")
|
||||
If None, uses currently selected node in Figma
|
||||
|
||||
Returns:
|
||||
Metadata dictionary with node structure
|
||||
|
||||
Example:
|
||||
metadata = await client.get_metadata(node_id="0:1")
|
||||
# Parse to find component node IDs
|
||||
"""
|
||||
params = {"nodeId": node_id} if node_id else {}
|
||||
return await self._call_tool("get_metadata", params)
|
||||
|
||||
async def get_variable_defs(self, node_id: Optional[str] = None) -> Dict[str, str]:
|
||||
"""
|
||||
Get design token variable definitions.
|
||||
|
||||
Returns mapping of variable names to values.
|
||||
|
||||
Args:
|
||||
node_id: Specific node ID (if None, uses currently selected)
|
||||
|
||||
Returns:
|
||||
Dictionary mapping variable names to values
|
||||
Example: {'icon/default/secondary': '#949494', 'spacing/md': '16px'}
|
||||
|
||||
Example:
|
||||
tokens = await client.get_variable_defs()
|
||||
for name, value in tokens.items():
|
||||
print(f"{name}: {value}")
|
||||
"""
|
||||
params = {"nodeId": node_id} if node_id else {}
|
||||
return await self._call_tool("get_variable_defs", params)
|
||||
|
||||
async def get_code_connect_map(self, node_id: Optional[str] = None) -> Dict[str, Dict[str, str]]:
|
||||
"""
|
||||
Get mapping of Figma components to code components.
|
||||
|
||||
Requires Figma Enterprise plan with Code Connect configured.
|
||||
|
||||
Args:
|
||||
node_id: Specific node ID (if None, uses currently selected)
|
||||
|
||||
Returns:
|
||||
Dictionary mapping node IDs to code locations
|
||||
Example: {
|
||||
'1:2': {
|
||||
'codeConnectSrc': 'https://github.com/foo/components/Button.tsx',
|
||||
'codeConnectName': 'Button'
|
||||
}
|
||||
}
|
||||
|
||||
Example:
|
||||
mappings = await client.get_code_connect_map()
|
||||
for node_id, mapping in mappings.items():
|
||||
print(f"{node_id} → {mapping['codeConnectName']}")
|
||||
"""
|
||||
params = {"nodeId": node_id} if node_id else {}
|
||||
return await self._call_tool("get_code_connect_map", params)
|
||||
|
||||
async def get_design_context(self, node_id: Optional[str] = None) -> str:
|
||||
"""
|
||||
Generate UI code for a component.
|
||||
|
||||
Returns React/Vue/HTML implementation code for the selected component.
|
||||
Use sparingly - can return large responses (50-100k tokens).
|
||||
|
||||
Args:
|
||||
node_id: Specific node ID (if None, uses currently selected)
|
||||
|
||||
Returns:
|
||||
UI code as string (React/Vue/HTML)
|
||||
|
||||
Example:
|
||||
code = await client.get_design_context(node_id="1:23")
|
||||
# Returns React component code
|
||||
"""
|
||||
params = {"nodeId": node_id} if node_id else {}
|
||||
return await self._call_tool("get_design_context", params)
|
||||
|
||||
async def get_screenshot(self, node_id: Optional[str] = None) -> str:
|
||||
"""
|
||||
Generate screenshot for a component.
|
||||
|
||||
Args:
|
||||
node_id: Specific node ID (if None, uses currently selected)
|
||||
|
||||
Returns:
|
||||
Screenshot image data (format depends on Figma response)
|
||||
|
||||
Example:
|
||||
screenshot = await client.get_screenshot(node_id="1:23")
|
||||
# Save or process screenshot data
|
||||
"""
|
||||
params = {"nodeId": node_id} if node_id else {}
|
||||
return await self._call_tool("get_screenshot", params)
|
||||
|
||||
async def create_design_system_rules(self) -> str:
|
||||
"""
|
||||
Generate design system rules for the repository.
|
||||
|
||||
Returns:
|
||||
Prompt for design system rules generation
|
||||
|
||||
Example:
|
||||
rules = await client.create_design_system_rules()
|
||||
"""
|
||||
return await self._call_tool("create_design_system_rules")
|
||||
|
||||
async def list_available_tools(self) -> List[str]:
|
||||
"""
|
||||
List all available MCP tools.
|
||||
|
||||
Useful for debugging or discovering what Figma MCP supports.
|
||||
|
||||
Returns:
|
||||
List of tool names
|
||||
|
||||
Example:
|
||||
tools = await client.list_available_tools()
|
||||
print(f"Available: {', '.join(tools)}")
|
||||
"""
|
||||
if not self.session:
|
||||
raise FigmaMCPError("Client not connected")
|
||||
|
||||
result = await self.session.list_tools()
|
||||
return [tool.name for tool in result.tools]
|
||||
|
||||
|
||||
# Convenience function for simple use cases
|
||||
async def get_figma_variables() -> Dict[str, str]:
|
||||
"""
|
||||
Quick helper to fetch Figma design tokens.
|
||||
|
||||
Returns:
|
||||
Dictionary of variable name → value mappings
|
||||
|
||||
Example:
|
||||
tokens = await get_figma_variables()
|
||||
"""
|
||||
async with FigmaMCPClient() as client:
|
||||
return await client.get_variable_defs()
|
||||
|
||||
|
||||
async def get_figma_metadata(node_id: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Quick helper to fetch Figma node metadata.
|
||||
|
||||
Args:
|
||||
node_id: Specific node ID (if None, uses currently selected)
|
||||
|
||||
Returns:
|
||||
Metadata dictionary
|
||||
|
||||
Example:
|
||||
metadata = await get_figma_metadata(node_id="0:1")
|
||||
"""
|
||||
async with FigmaMCPClient() as client:
|
||||
return await client.get_metadata(node_id)
|
||||
391
skills/product-design/functions/implementation_planner.py
Executable file
391
skills/product-design/functions/implementation_planner.py
Executable file
@@ -0,0 +1,391 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Generate implementation task documentation from design review analysis.
|
||||
Creates phased breakdown with acceptance criteria and complexity estimates.
|
||||
"""
|
||||
|
||||
import json
|
||||
import argparse
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Any
|
||||
|
||||
|
||||
def estimate_complexity(component_category: str, has_variants: bool, breaking_change: bool) -> tuple:
|
||||
"""
|
||||
Estimate implementation complexity and time.
|
||||
|
||||
Args:
|
||||
component_category: atom, molecule, organism, template
|
||||
has_variants: Whether component has variants/props
|
||||
breaking_change: Whether this is a breaking change
|
||||
|
||||
Returns:
|
||||
Tuple of (complexity_level, estimated_hours)
|
||||
"""
|
||||
base_hours = {
|
||||
'atom': 2,
|
||||
'molecule': 3,
|
||||
'organism': 5,
|
||||
'template': 8
|
||||
}
|
||||
|
||||
hours = base_hours.get(component_category, 3)
|
||||
|
||||
if has_variants:
|
||||
hours += 1
|
||||
|
||||
if breaking_change:
|
||||
hours += 2
|
||||
|
||||
if hours <= 2:
|
||||
complexity = 'Low'
|
||||
elif hours <= 4:
|
||||
complexity = 'Medium'
|
||||
else:
|
||||
complexity = 'High'
|
||||
|
||||
return complexity, hours
|
||||
|
||||
|
||||
def generate_token_phase(new_tokens: List[Dict[str, Any]],
|
||||
modified_tokens: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""Generate Phase 1: Design Tokens implementation plan."""
|
||||
total_tokens = len(new_tokens) + len(modified_tokens)
|
||||
hours = max(1, total_tokens // 10 + 1) # 10 tokens per hour estimate
|
||||
|
||||
subtasks = [
|
||||
f"Add {len(new_tokens)} new tokens to design-tokens.json" if new_tokens else None,
|
||||
f"Update {len(modified_tokens)} modified tokens" if modified_tokens else None,
|
||||
"Run Style Dictionary build to generate platform outputs",
|
||||
"Update Tailwind @theme with new variables",
|
||||
"Verify token availability in Storybook tokens page"
|
||||
]
|
||||
|
||||
acceptance_criteria = [
|
||||
f"All {total_tokens} new/modified tokens available in Tailwind utilities",
|
||||
"No breaking changes to existing token references",
|
||||
"Style Dictionary build completes without errors",
|
||||
"Storybook tokens page shows all additions"
|
||||
]
|
||||
|
||||
return {
|
||||
'name': 'Design Tokens',
|
||||
'priority': 'High',
|
||||
'estimated_hours': hours,
|
||||
'description': f'Add and update {total_tokens} design tokens',
|
||||
'subtasks': [task for task in subtasks if task],
|
||||
'acceptance_criteria': acceptance_criteria,
|
||||
'files_to_modify': [
|
||||
'.agent/design-system/design-tokens.json',
|
||||
'tailwind.config.js (or CSS @theme)',
|
||||
'Storybook tokens documentation'
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
def generate_component_phase(component: Dict[str, Any], phase_number: int) -> Dict[str, Any]:
|
||||
"""Generate component implementation phase."""
|
||||
comp_name = component.get('name')
|
||||
category = component.get('category', 'molecule')
|
||||
properties = component.get('properties', {})
|
||||
similar_to = component.get('similar_to', [])
|
||||
|
||||
has_variants = bool(properties.get('variants'))
|
||||
breaking_change = component.get('breaking_change', False)
|
||||
|
||||
complexity, hours = estimate_complexity(category, has_variants, breaking_change)
|
||||
|
||||
# Determine approach
|
||||
if similar_to and similar_to[0]['similarity'] >= 0.7:
|
||||
approach = f"Extend existing {similar_to[0]['name']} component"
|
||||
action = 'extend'
|
||||
else:
|
||||
approach = f"Create new {category} component"
|
||||
action = 'create'
|
||||
|
||||
# Generate subtasks based on action
|
||||
if action == 'extend':
|
||||
subtasks = [
|
||||
f"Add new variant props to {similar_to[0]['name']}",
|
||||
"Update TypeScript interface with new props",
|
||||
"Add styles for new variants",
|
||||
"Update existing tests",
|
||||
"Add Storybook stories for new variants"
|
||||
]
|
||||
files = [
|
||||
similar_to[0].get('path', f'src/components/{category}/{comp_name}.tsx'),
|
||||
f"src/components/{category}/{comp_name}.test.tsx",
|
||||
f"src/components/{category}/{comp_name}.stories.tsx"
|
||||
]
|
||||
else:
|
||||
subtasks = [
|
||||
f"Create {comp_name} component file",
|
||||
"Implement TypeScript props interface",
|
||||
"Add styles (CSS modules/Tailwind)",
|
||||
"Write unit tests",
|
||||
"Create Storybook stories",
|
||||
"Add barrel export (index.ts)"
|
||||
]
|
||||
files = [
|
||||
f"src/components/{category}/{comp_name}.tsx",
|
||||
f"src/components/{category}/{comp_name}.test.tsx",
|
||||
f"src/components/{category}/{comp_name}.stories.tsx",
|
||||
f"src/components/{category}/index.ts"
|
||||
]
|
||||
|
||||
acceptance_criteria = [
|
||||
f"{comp_name} renders correctly with all variants",
|
||||
"100% test coverage for new props/variants" if action == 'extend' else "90%+ test coverage",
|
||||
"Storybook shows all component states",
|
||||
"No visual regression in existing components" if action == 'extend' else "Passes visual regression tests",
|
||||
"Accessibility audit passes (a11y addon)"
|
||||
]
|
||||
|
||||
if breaking_change:
|
||||
acceptance_criteria.insert(0, "Migration guide created for breaking changes")
|
||||
subtasks.append("Create migration documentation")
|
||||
|
||||
return {
|
||||
'number': phase_number,
|
||||
'name': comp_name,
|
||||
'category': category,
|
||||
'priority': 'High' if breaking_change else 'Medium',
|
||||
'complexity': complexity,
|
||||
'estimated_hours': hours,
|
||||
'approach': approach,
|
||||
'subtasks': subtasks,
|
||||
'files_to_modify': files,
|
||||
'acceptance_criteria': acceptance_criteria,
|
||||
'breaking_change': breaking_change
|
||||
}
|
||||
|
||||
|
||||
def generate_task_document(task_id: str,
|
||||
feature_name: str,
|
||||
analysis_results: Dict[str, Any],
|
||||
review_reference: str) -> str:
|
||||
"""
|
||||
Generate complete Navigator task document.
|
||||
|
||||
Args:
|
||||
task_id: Task identifier (e.g., "TASK-16")
|
||||
feature_name: Feature name (e.g., "Dashboard Redesign")
|
||||
analysis_results: Combined analysis from all functions
|
||||
review_reference: Path to design review report
|
||||
|
||||
Returns:
|
||||
Markdown task document
|
||||
"""
|
||||
date = datetime.now().strftime('%Y-%m-%d')
|
||||
|
||||
# Extract data
|
||||
new_tokens = analysis_results.get('new_tokens', [])
|
||||
modified_tokens = analysis_results.get('token_diff', {}).get('modified', [])
|
||||
new_components = analysis_results.get('new_components', [])
|
||||
similar_components = analysis_results.get('similar_components', [])
|
||||
breaking_changes = analysis_results.get('breaking_changes', [])
|
||||
|
||||
# Generate phases
|
||||
phases = []
|
||||
|
||||
# Phase 1: Always start with tokens if any exist
|
||||
if new_tokens or modified_tokens:
|
||||
phases.append(generate_token_phase(new_tokens, modified_tokens))
|
||||
|
||||
# Phase 2+: Component implementations
|
||||
for i, comp in enumerate(new_components + similar_components, start=2):
|
||||
phases.append(generate_component_phase(comp, i))
|
||||
|
||||
# Calculate totals
|
||||
total_hours = sum(phase.get('estimated_hours', 0) for phase in phases)
|
||||
total_complexity = 'High' if total_hours > 10 else 'Medium' if total_hours > 5 else 'Low'
|
||||
|
||||
# Build markdown document
|
||||
doc = f"""# {task_id}: {feature_name} Implementation
|
||||
|
||||
**Created**: {date}
|
||||
**Status**: Ready for Development
|
||||
**Priority**: High
|
||||
**Complexity**: {total_complexity}
|
||||
**Estimated Time**: {total_hours} hours
|
||||
|
||||
---
|
||||
|
||||
## Context
|
||||
|
||||
Implement {feature_name} from Figma mockup with design system integration.
|
||||
|
||||
**Design Review**: `{review_reference}`
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
**Changes Required**:
|
||||
- Design Tokens: {len(new_tokens)} new, {len(modified_tokens)} modified
|
||||
- Components: {len(new_components)} new, {len(similar_components)} to extend
|
||||
- Breaking Changes: {len(breaking_changes)}
|
||||
|
||||
**Implementation Strategy**: Phased approach following atomic design hierarchy
|
||||
|
||||
---
|
||||
|
||||
## Implementation Phases
|
||||
|
||||
"""
|
||||
|
||||
# Add each phase
|
||||
for i, phase in enumerate(phases, start=1):
|
||||
doc += f"""### Phase {i}: {phase['name']}
|
||||
|
||||
**Priority**: {phase['priority']}
|
||||
**Complexity**: {phase.get('complexity', 'Medium')}
|
||||
**Estimated Time**: {phase['estimated_hours']} hours
|
||||
|
||||
#### Approach
|
||||
{phase.get('approach', phase.get('description', 'Implement component following project patterns'))}
|
||||
|
||||
#### Subtasks
|
||||
"""
|
||||
for subtask in phase['subtasks']:
|
||||
doc += f"- {subtask}\n"
|
||||
|
||||
doc += f"""
|
||||
#### Files to Modify
|
||||
"""
|
||||
for file in phase.get('files_to_modify', []):
|
||||
doc += f"- `{file}`\n"
|
||||
|
||||
doc += f"""
|
||||
**Acceptance Criteria**:
|
||||
"""
|
||||
for criterion in phase['acceptance_criteria']:
|
||||
doc += f"- [ ] {criterion}\n"
|
||||
|
||||
doc += "\n---\n\n"
|
||||
|
||||
# Add testing strategy
|
||||
doc += """## Testing Strategy
|
||||
|
||||
### Unit Tests
|
||||
- All new/modified components
|
||||
- Test all variants and props
|
||||
- Error states and edge cases
|
||||
- Target: 90%+ coverage
|
||||
|
||||
### Visual Regression
|
||||
- Chromatic for all component stories
|
||||
- Test all variants and states
|
||||
- Verify no regressions in existing components
|
||||
|
||||
### Integration Tests
|
||||
- Test component composition
|
||||
- Verify design token usage
|
||||
- Test responsive behavior
|
||||
|
||||
### Accessibility
|
||||
- Run a11y addon in Storybook
|
||||
- Keyboard navigation testing
|
||||
- Screen reader verification
|
||||
- WCAG 2.2 Level AA compliance
|
||||
|
||||
---
|
||||
|
||||
## Rollout Plan
|
||||
|
||||
1. **Phase 1: Tokens** (no visual changes, safe to deploy)
|
||||
2. **Phase 2-N: Components** (incremental deployment)
|
||||
- Deploy each component after testing
|
||||
- Monitor for issues before next phase
|
||||
3. **Final: Integration** (full feature integration)
|
||||
|
||||
**Rollback Strategy**: Each phase is independent and can be reverted
|
||||
|
||||
---
|
||||
|
||||
## Success Metrics
|
||||
|
||||
- [ ] 100% design fidelity vs Figma mockup
|
||||
- [ ] All acceptance criteria met
|
||||
- [ ] No visual regressions
|
||||
- [ ] All accessibility checks pass
|
||||
- [ ] Performance budget maintained (no layout shifts)
|
||||
|
||||
---
|
||||
|
||||
## Design System Impact
|
||||
|
||||
**UI Kit Inventory**: Update after each component completion
|
||||
|
||||
**Token Additions**: {len(new_tokens)} new tokens added to design system
|
||||
|
||||
**Component Reuse**: {len(similar_components)} opportunities to extend existing components
|
||||
|
||||
---
|
||||
|
||||
## Notes
|
||||
|
||||
{f"⚠️ **Breaking Changes**: {len(breaking_changes)} component(s) require migration - see phase details" if breaking_changes else "✅ No breaking changes - backward compatible implementation"}
|
||||
|
||||
---
|
||||
|
||||
**Last Updated**: {date}
|
||||
**Navigator Version**: 3.2.0
|
||||
"""
|
||||
|
||||
return doc
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Generate implementation task document from design review'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--task-id',
|
||||
required=True,
|
||||
help='Task identifier (e.g., TASK-16)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--feature-name',
|
||||
required=True,
|
||||
help='Feature name (e.g., "Dashboard Redesign")'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--analysis-results',
|
||||
required=True,
|
||||
help='Path to JSON file with combined analysis results'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--review-reference',
|
||||
required=True,
|
||||
help='Path to design review report'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output',
|
||||
help='Output file path (default: stdout)'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load analysis results
|
||||
with open(args.analysis_results, 'r') as f:
|
||||
analysis_results = json.load(f)
|
||||
|
||||
# Generate task document
|
||||
task_doc = generate_task_document(
|
||||
args.task_id,
|
||||
args.feature_name,
|
||||
analysis_results,
|
||||
args.review_reference
|
||||
)
|
||||
|
||||
# Output
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
f.write(task_doc)
|
||||
else:
|
||||
print(task_doc)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
51
skills/product-design/functions/test_mcp_connection.py
Normal file
51
skills/product-design/functions/test_mcp_connection.py
Normal file
@@ -0,0 +1,51 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test Figma MCP connection - Quick validation script.
|
||||
|
||||
Tests connection to Figma Desktop MCP server and lists available tools.
|
||||
"""
|
||||
import asyncio
|
||||
import sys
|
||||
|
||||
try:
|
||||
from figma_mcp_client import FigmaMCPClient, FigmaNotRunningError
|
||||
except ImportError:
|
||||
print("❌ Error: figma_mcp_client not found")
|
||||
print(" Ensure you're in the correct directory: skills/product-design/functions/")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
async def test_connection():
|
||||
"""Test Figma MCP connection."""
|
||||
try:
|
||||
async with FigmaMCPClient() as client:
|
||||
# List available tools
|
||||
tools = await client.list_available_tools()
|
||||
|
||||
print("✅ Successfully connected to Figma MCP server")
|
||||
print(f" Found {len(tools)} tools:")
|
||||
for tool in tools:
|
||||
print(f" - {tool}")
|
||||
|
||||
return True
|
||||
|
||||
except FigmaNotRunningError as e:
|
||||
print("❌ Figma Desktop not running or MCP not enabled")
|
||||
print(f" {e}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Unexpected error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
|
||||
async def main():
|
||||
"""Main entry point."""
|
||||
success = await test_connection()
|
||||
sys.exit(0 if success else 1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
394
skills/product-design/functions/token_extractor.py
Executable file
394
skills/product-design/functions/token_extractor.py
Executable file
@@ -0,0 +1,394 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Extract design tokens from Figma variables and convert to DTCG format.
|
||||
Compares with existing tokens and generates diff summary.
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import argparse
|
||||
from typing import Dict, List, Any, Tuple
|
||||
|
||||
|
||||
def normalize_token_name(figma_name: str) -> str:
|
||||
"""
|
||||
Normalize Figma variable name to DTCG semantic naming.
|
||||
|
||||
Examples:
|
||||
"Primary 500" → "color.primary.500"
|
||||
"Spacing MD" → "spacing.md"
|
||||
"Font Heading Large" → "typography.heading.large"
|
||||
|
||||
Args:
|
||||
figma_name: Original Figma variable name
|
||||
|
||||
Returns:
|
||||
Normalized DTCG token path
|
||||
"""
|
||||
name = figma_name.strip()
|
||||
|
||||
# Convert to lowercase and split
|
||||
parts = name.lower().replace('-', ' ').replace('_', ' ').split()
|
||||
|
||||
# Detect token type from name
|
||||
if any(keyword in parts for keyword in ['color', 'colour']):
|
||||
token_type = 'color'
|
||||
parts = [p for p in parts if p not in ['color', 'colour']]
|
||||
elif any(keyword in parts for keyword in ['spacing', 'space', 'gap', 'padding', 'margin']):
|
||||
token_type = 'spacing'
|
||||
parts = [p for p in parts if p not in ['spacing', 'space', 'gap', 'padding', 'margin']]
|
||||
elif any(keyword in parts for keyword in ['font', 'typography', 'text']):
|
||||
token_type = 'typography'
|
||||
parts = [p for p in parts if p not in ['font', 'typography', 'text']]
|
||||
elif any(keyword in parts for keyword in ['radius', 'border']):
|
||||
token_type = 'radius'
|
||||
parts = [p for p in parts if p not in ['radius', 'border']]
|
||||
elif any(keyword in parts for keyword in ['shadow', 'elevation']):
|
||||
token_type = 'shadow'
|
||||
parts = [p for p in parts if p not in ['shadow', 'elevation']]
|
||||
else:
|
||||
# Infer from first part
|
||||
first_part = parts[0] if parts else ''
|
||||
if first_part in ['primary', 'secondary', 'success', 'error', 'warning', 'info']:
|
||||
token_type = 'color'
|
||||
elif first_part in ['xs', 'sm', 'md', 'lg', 'xl', '2xl', '3xl']:
|
||||
token_type = 'spacing'
|
||||
else:
|
||||
token_type = 'other'
|
||||
|
||||
# Build token path
|
||||
if parts:
|
||||
return f"{token_type}.{'.'.join(parts)}"
|
||||
else:
|
||||
return token_type
|
||||
|
||||
|
||||
def detect_token_type(name: str, value: Any) -> str:
|
||||
"""
|
||||
Detect DTCG token type from name and value.
|
||||
|
||||
Args:
|
||||
name: Token name
|
||||
value: Token value
|
||||
|
||||
Returns:
|
||||
DTCG type string
|
||||
"""
|
||||
name_lower = name.lower()
|
||||
|
||||
# Check by name first
|
||||
if 'color' in name_lower or 'colour' in name_lower:
|
||||
return 'color'
|
||||
elif 'spacing' in name_lower or 'gap' in name_lower or 'padding' in name_lower or 'margin' in name_lower:
|
||||
return 'dimension'
|
||||
elif 'font' in name_lower or 'typography' in name_lower:
|
||||
if isinstance(value, dict):
|
||||
return 'typography'
|
||||
else:
|
||||
return 'fontFamily' if 'family' in name_lower else 'dimension'
|
||||
elif 'radius' in name_lower or 'border' in name_lower:
|
||||
return 'dimension'
|
||||
elif 'shadow' in name_lower or 'elevation' in name_lower:
|
||||
return 'shadow'
|
||||
elif 'duration' in name_lower or 'transition' in name_lower:
|
||||
return 'duration'
|
||||
elif 'opacity' in name_lower or 'alpha' in name_lower:
|
||||
return 'number'
|
||||
|
||||
# Infer from value
|
||||
if isinstance(value, str):
|
||||
if value.startswith('#') or value.startswith('rgb'):
|
||||
return 'color'
|
||||
elif value.endswith('px') or value.endswith('rem') or value.endswith('em'):
|
||||
return 'dimension'
|
||||
elif value.endswith('ms') or value.endswith('s'):
|
||||
return 'duration'
|
||||
elif isinstance(value, (int, float)):
|
||||
return 'number'
|
||||
elif isinstance(value, dict):
|
||||
if 'fontFamily' in value or 'fontSize' in value:
|
||||
return 'typography'
|
||||
elif 'x' in value and 'y' in value:
|
||||
return 'shadow'
|
||||
|
||||
return 'other'
|
||||
|
||||
|
||||
def convert_to_dtcg(figma_variables: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Convert Figma variables to DTCG format.
|
||||
|
||||
Args:
|
||||
figma_variables: Figma get_variable_defs response
|
||||
|
||||
Returns:
|
||||
DTCG formatted tokens
|
||||
"""
|
||||
dtcg_tokens = {}
|
||||
|
||||
for var_name, var_data in figma_variables.items():
|
||||
# Extract value and type
|
||||
if isinstance(var_data, dict):
|
||||
value = var_data.get('$value') or var_data.get('value')
|
||||
var_type = var_data.get('$type') or var_data.get('type')
|
||||
description = var_data.get('$description') or var_data.get('description', '')
|
||||
else:
|
||||
value = var_data
|
||||
var_type = None
|
||||
description = ''
|
||||
|
||||
# Detect type if not provided
|
||||
if not var_type:
|
||||
var_type = detect_token_type(var_name, value)
|
||||
|
||||
# Normalize token name to DTCG path
|
||||
token_path = normalize_token_name(var_name)
|
||||
|
||||
# Build nested structure
|
||||
path_parts = token_path.split('.')
|
||||
current = dtcg_tokens
|
||||
|
||||
for i, part in enumerate(path_parts):
|
||||
if i == len(path_parts) - 1:
|
||||
# Last part - add token definition
|
||||
current[part] = {
|
||||
'$value': value,
|
||||
'$type': var_type
|
||||
}
|
||||
if description:
|
||||
current[part]['$description'] = description
|
||||
else:
|
||||
# Intermediate path - create nested dict
|
||||
if part not in current:
|
||||
current[part] = {}
|
||||
current = current[part]
|
||||
|
||||
return dtcg_tokens
|
||||
|
||||
|
||||
def generate_diff(new_tokens: Dict[str, Any],
|
||||
existing_tokens: Dict[str, Any]) -> Dict[str, List[Dict[str, Any]]]:
|
||||
"""
|
||||
Generate diff between new and existing tokens.
|
||||
|
||||
Args:
|
||||
new_tokens: New tokens from Figma (DTCG format)
|
||||
existing_tokens: Existing tokens from design-tokens.json
|
||||
|
||||
Returns:
|
||||
Diff summary with added, modified, removed, unchanged
|
||||
"""
|
||||
diff = {
|
||||
'added': [],
|
||||
'modified': [],
|
||||
'removed': [],
|
||||
'unchanged': []
|
||||
}
|
||||
|
||||
# Flatten tokens for comparison
|
||||
new_flat = flatten_tokens(new_tokens)
|
||||
existing_flat = flatten_tokens(existing_tokens)
|
||||
|
||||
# Find added and modified
|
||||
for token_path, token_data in new_flat.items():
|
||||
if token_path not in existing_flat:
|
||||
diff['added'].append({
|
||||
'path': token_path,
|
||||
'value': token_data.get('$value'),
|
||||
'type': token_data.get('$type')
|
||||
})
|
||||
else:
|
||||
existing_value = existing_flat[token_path].get('$value')
|
||||
new_value = token_data.get('$value')
|
||||
|
||||
if existing_value != new_value:
|
||||
diff['modified'].append({
|
||||
'path': token_path,
|
||||
'old_value': existing_value,
|
||||
'new_value': new_value,
|
||||
'type': token_data.get('$type')
|
||||
})
|
||||
else:
|
||||
diff['unchanged'].append({
|
||||
'path': token_path,
|
||||
'value': new_value
|
||||
})
|
||||
|
||||
# Find removed
|
||||
for token_path, token_data in existing_flat.items():
|
||||
if token_path not in new_flat:
|
||||
diff['removed'].append({
|
||||
'path': token_path,
|
||||
'value': token_data.get('$value'),
|
||||
'type': token_data.get('$type')
|
||||
})
|
||||
|
||||
return diff
|
||||
|
||||
|
||||
def flatten_tokens(tokens: Dict[str, Any], prefix: str = '') -> Dict[str, Any]:
|
||||
"""
|
||||
Flatten nested DTCG tokens to dot notation paths.
|
||||
|
||||
Args:
|
||||
tokens: Nested DTCG token structure
|
||||
prefix: Current path prefix
|
||||
|
||||
Returns:
|
||||
Flattened dictionary with dot notation keys
|
||||
"""
|
||||
flat = {}
|
||||
|
||||
for key, value in tokens.items():
|
||||
current_path = f"{prefix}.{key}" if prefix else key
|
||||
|
||||
if isinstance(value, dict) and '$value' in value:
|
||||
# This is a token definition
|
||||
flat[current_path] = value
|
||||
elif isinstance(value, dict):
|
||||
# This is a nested group
|
||||
flat.update(flatten_tokens(value, current_path))
|
||||
|
||||
return flat
|
||||
|
||||
|
||||
def generate_summary(diff: Dict[str, List[Dict[str, Any]]]) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate summary statistics from diff.
|
||||
|
||||
Args:
|
||||
diff: Token diff
|
||||
|
||||
Returns:
|
||||
Summary statistics
|
||||
"""
|
||||
total_new = len(diff['added']) + len(diff['unchanged'])
|
||||
total_existing = len(diff['modified']) + len(diff['removed']) + len(diff['unchanged'])
|
||||
|
||||
return {
|
||||
'total_new_tokens': total_new,
|
||||
'total_existing_tokens': total_existing,
|
||||
'added_count': len(diff['added']),
|
||||
'modified_count': len(diff['modified']),
|
||||
'removed_count': len(diff['removed']),
|
||||
'unchanged_count': len(diff['unchanged']),
|
||||
'sync_status': 'in_sync' if len(diff['added']) == 0 and len(diff['modified']) == 0 and len(diff['removed']) == 0 else 'drift_detected',
|
||||
'drift_percentage': f"{((len(diff['modified']) + len(diff['removed'])) / max(total_existing, 1)) * 100:.1f}%"
|
||||
}
|
||||
|
||||
|
||||
def extract_tokens(figma_variables: Dict[str, Any],
|
||||
existing_tokens: Dict[str, Any] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Main extraction function: convert Figma variables to DTCG and generate diff.
|
||||
|
||||
Args:
|
||||
figma_variables: Figma get_variable_defs response
|
||||
existing_tokens: Current design-tokens.json (optional)
|
||||
|
||||
Returns:
|
||||
Extraction results with DTCG tokens, diff, and summary
|
||||
"""
|
||||
# Convert to DTCG format
|
||||
dtcg_tokens = convert_to_dtcg(figma_variables)
|
||||
|
||||
# Generate diff if existing tokens provided
|
||||
if existing_tokens:
|
||||
diff = generate_diff(dtcg_tokens, existing_tokens)
|
||||
summary = generate_summary(diff)
|
||||
else:
|
||||
# No existing tokens - all are new
|
||||
flat = flatten_tokens(dtcg_tokens)
|
||||
diff = {
|
||||
'added': [
|
||||
{
|
||||
'path': path,
|
||||
'value': data.get('$value'),
|
||||
'type': data.get('$type')
|
||||
}
|
||||
for path, data in flat.items()
|
||||
],
|
||||
'modified': [],
|
||||
'removed': [],
|
||||
'unchanged': []
|
||||
}
|
||||
summary = {
|
||||
'total_new_tokens': len(flat),
|
||||
'total_existing_tokens': 0,
|
||||
'added_count': len(flat),
|
||||
'modified_count': 0,
|
||||
'removed_count': 0,
|
||||
'unchanged_count': 0,
|
||||
'sync_status': 'initial_extraction',
|
||||
'drift_percentage': '0.0%'
|
||||
}
|
||||
|
||||
return {
|
||||
'dtcg_tokens': dtcg_tokens,
|
||||
'diff': diff,
|
||||
'summary': summary
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Extract design tokens from Figma and convert to DTCG format'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--figma-variables',
|
||||
required=True,
|
||||
help='Path to JSON file with Figma variables (get_variable_defs response)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--existing-tokens',
|
||||
help='Path to existing design-tokens.json (optional)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output',
|
||||
help='Output file path (default: stdout)'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--format',
|
||||
choices=['full', 'tokens-only', 'diff-only'],
|
||||
default='full',
|
||||
help='Output format (default: full)'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load Figma variables
|
||||
with open(args.figma_variables, 'r') as f:
|
||||
figma_variables = json.load(f)
|
||||
|
||||
# Load existing tokens if provided
|
||||
existing_tokens = None
|
||||
if args.existing_tokens:
|
||||
with open(args.existing_tokens, 'r') as f:
|
||||
existing_tokens = json.load(f)
|
||||
|
||||
# Run extraction
|
||||
results = extract_tokens(figma_variables, existing_tokens)
|
||||
|
||||
# Format output based on --format flag
|
||||
if args.format == 'tokens-only':
|
||||
output = results['dtcg_tokens']
|
||||
elif args.format == 'diff-only':
|
||||
output = {
|
||||
'diff': results['diff'],
|
||||
'summary': results['summary']
|
||||
}
|
||||
else:
|
||||
output = results
|
||||
|
||||
output_json = json.dumps(output, indent=2)
|
||||
|
||||
# Write output
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f:
|
||||
f.write(output_json)
|
||||
else:
|
||||
print(output_json)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Reference in New Issue
Block a user