Initial commit
This commit is contained in:
309
skills/torch_geometric/scripts/benchmark_model.py
Normal file
309
skills/torch_geometric/scripts/benchmark_model.py
Normal file
@@ -0,0 +1,309 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Benchmark GNN models on standard datasets.
|
||||
|
||||
This script provides a simple way to benchmark different GNN architectures
|
||||
on common datasets and compare their performance.
|
||||
|
||||
Usage:
|
||||
python benchmark_model.py --models gcn gat --dataset Cora
|
||||
python benchmark_model.py --models gcn --dataset Cora --epochs 200 --runs 10
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from torch_geometric.nn import GCNConv, GATConv, SAGEConv, GINConv
|
||||
from torch_geometric.datasets import Planetoid, TUDataset
|
||||
from torch_geometric.loader import DataLoader
|
||||
from torch_geometric.nn import global_mean_pool
|
||||
import time
|
||||
import numpy as np
|
||||
|
||||
|
||||
class GCN(torch.nn.Module):
|
||||
def __init__(self, num_features, hidden_channels, num_classes, dropout=0.5):
|
||||
super().__init__()
|
||||
self.conv1 = GCNConv(num_features, hidden_channels)
|
||||
self.conv2 = GCNConv(hidden_channels, num_classes)
|
||||
self.dropout = dropout
|
||||
|
||||
def forward(self, x, edge_index, batch=None):
|
||||
x = self.conv1(x, edge_index)
|
||||
x = F.relu(x)
|
||||
x = F.dropout(x, p=self.dropout, training=self.training)
|
||||
x = self.conv2(x, edge_index)
|
||||
if batch is not None:
|
||||
x = global_mean_pool(x, batch)
|
||||
return F.log_softmax(x, dim=1)
|
||||
|
||||
|
||||
class GAT(torch.nn.Module):
|
||||
def __init__(self, num_features, hidden_channels, num_classes, heads=8, dropout=0.6):
|
||||
super().__init__()
|
||||
self.conv1 = GATConv(num_features, hidden_channels, heads=heads, dropout=dropout)
|
||||
self.conv2 = GATConv(hidden_channels * heads, num_classes, heads=1,
|
||||
concat=False, dropout=dropout)
|
||||
self.dropout = dropout
|
||||
|
||||
def forward(self, x, edge_index, batch=None):
|
||||
x = F.dropout(x, p=self.dropout, training=self.training)
|
||||
x = F.elu(self.conv1(x, edge_index))
|
||||
x = F.dropout(x, p=self.dropout, training=self.training)
|
||||
x = self.conv2(x, edge_index)
|
||||
if batch is not None:
|
||||
x = global_mean_pool(x, batch)
|
||||
return F.log_softmax(x, dim=1)
|
||||
|
||||
|
||||
class GraphSAGE(torch.nn.Module):
|
||||
def __init__(self, num_features, hidden_channels, num_classes, dropout=0.5):
|
||||
super().__init__()
|
||||
self.conv1 = SAGEConv(num_features, hidden_channels)
|
||||
self.conv2 = SAGEConv(hidden_channels, num_classes)
|
||||
self.dropout = dropout
|
||||
|
||||
def forward(self, x, edge_index, batch=None):
|
||||
x = self.conv1(x, edge_index)
|
||||
x = F.relu(x)
|
||||
x = F.dropout(x, p=self.dropout, training=self.training)
|
||||
x = self.conv2(x, edge_index)
|
||||
if batch is not None:
|
||||
x = global_mean_pool(x, batch)
|
||||
return F.log_softmax(x, dim=1)
|
||||
|
||||
|
||||
MODELS = {
|
||||
'gcn': GCN,
|
||||
'gat': GAT,
|
||||
'graphsage': GraphSAGE,
|
||||
}
|
||||
|
||||
|
||||
def train_node_classification(model, data, optimizer):
|
||||
"""Train for node classification."""
|
||||
model.train()
|
||||
optimizer.zero_grad()
|
||||
out = model(data.x, data.edge_index)
|
||||
loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
return loss.item()
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def test_node_classification(model, data):
|
||||
"""Test for node classification."""
|
||||
model.eval()
|
||||
out = model(data.x, data.edge_index)
|
||||
pred = out.argmax(dim=1)
|
||||
|
||||
accs = []
|
||||
for mask in [data.train_mask, data.val_mask, data.test_mask]:
|
||||
correct = (pred[mask] == data.y[mask]).sum()
|
||||
accs.append(float(correct) / int(mask.sum()))
|
||||
|
||||
return accs
|
||||
|
||||
|
||||
def train_graph_classification(model, loader, optimizer, device):
|
||||
"""Train for graph classification."""
|
||||
model.train()
|
||||
total_loss = 0
|
||||
|
||||
for data in loader:
|
||||
data = data.to(device)
|
||||
optimizer.zero_grad()
|
||||
out = model(data.x, data.edge_index, data.batch)
|
||||
loss = F.nll_loss(out, data.y)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
total_loss += loss.item() * data.num_graphs
|
||||
|
||||
return total_loss / len(loader.dataset)
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def test_graph_classification(model, loader, device):
|
||||
"""Test for graph classification."""
|
||||
model.eval()
|
||||
correct = 0
|
||||
|
||||
for data in loader:
|
||||
data = data.to(device)
|
||||
out = model(data.x, data.edge_index, data.batch)
|
||||
pred = out.argmax(dim=1)
|
||||
correct += (pred == data.y).sum().item()
|
||||
|
||||
return correct / len(loader.dataset)
|
||||
|
||||
|
||||
def benchmark_node_classification(model_name, dataset_name, epochs, lr, weight_decay, device):
|
||||
"""Benchmark a model on node classification."""
|
||||
# Load dataset
|
||||
dataset = Planetoid(root=f'/tmp/{dataset_name}', name=dataset_name)
|
||||
data = dataset[0].to(device)
|
||||
|
||||
# Create model
|
||||
model_class = MODELS[model_name]
|
||||
model = model_class(
|
||||
num_features=dataset.num_features,
|
||||
hidden_channels=64,
|
||||
num_classes=dataset.num_classes
|
||||
).to(device)
|
||||
|
||||
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
|
||||
|
||||
# Training
|
||||
start_time = time.time()
|
||||
best_val_acc = 0
|
||||
best_test_acc = 0
|
||||
|
||||
for epoch in range(1, epochs + 1):
|
||||
loss = train_node_classification(model, data, optimizer)
|
||||
train_acc, val_acc, test_acc = test_node_classification(model, data)
|
||||
|
||||
if val_acc > best_val_acc:
|
||||
best_val_acc = val_acc
|
||||
best_test_acc = test_acc
|
||||
|
||||
train_time = time.time() - start_time
|
||||
|
||||
return {
|
||||
'train_acc': train_acc,
|
||||
'val_acc': best_val_acc,
|
||||
'test_acc': best_test_acc,
|
||||
'train_time': train_time,
|
||||
}
|
||||
|
||||
|
||||
def benchmark_graph_classification(model_name, dataset_name, epochs, lr, device):
|
||||
"""Benchmark a model on graph classification."""
|
||||
# Load dataset
|
||||
dataset = TUDataset(root=f'/tmp/{dataset_name}', name=dataset_name)
|
||||
|
||||
# Split dataset
|
||||
dataset = dataset.shuffle()
|
||||
train_dataset = dataset[:int(len(dataset) * 0.8)]
|
||||
test_dataset = dataset[int(len(dataset) * 0.8):]
|
||||
|
||||
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
|
||||
test_loader = DataLoader(test_dataset, batch_size=32)
|
||||
|
||||
# Create model
|
||||
model_class = MODELS[model_name]
|
||||
model = model_class(
|
||||
num_features=dataset.num_features,
|
||||
hidden_channels=64,
|
||||
num_classes=dataset.num_classes
|
||||
).to(device)
|
||||
|
||||
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
|
||||
|
||||
# Training
|
||||
start_time = time.time()
|
||||
|
||||
for epoch in range(1, epochs + 1):
|
||||
loss = train_graph_classification(model, train_loader, optimizer, device)
|
||||
|
||||
# Final evaluation
|
||||
train_acc = test_graph_classification(model, train_loader, device)
|
||||
test_acc = test_graph_classification(model, test_loader, device)
|
||||
train_time = time.time() - start_time
|
||||
|
||||
return {
|
||||
'train_acc': train_acc,
|
||||
'test_acc': test_acc,
|
||||
'train_time': train_time,
|
||||
}
|
||||
|
||||
|
||||
def run_benchmark(args):
|
||||
"""Run benchmark experiments."""
|
||||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
print(f"Using device: {device}")
|
||||
|
||||
# Determine task type
|
||||
if args.dataset in ['Cora', 'CiteSeer', 'PubMed']:
|
||||
task = 'node_classification'
|
||||
else:
|
||||
task = 'graph_classification'
|
||||
|
||||
print(f"\\nDataset: {args.dataset}")
|
||||
print(f"Task: {task}")
|
||||
print(f"Models: {', '.join(args.models)}")
|
||||
print(f"Epochs: {args.epochs}")
|
||||
print(f"Runs: {args.runs}")
|
||||
print("=" * 60)
|
||||
|
||||
results = {model: [] for model in args.models}
|
||||
|
||||
# Run experiments
|
||||
for run in range(args.runs):
|
||||
print(f"\\nRun {run + 1}/{args.runs}")
|
||||
print("-" * 60)
|
||||
|
||||
for model_name in args.models:
|
||||
if model_name not in MODELS:
|
||||
print(f"Unknown model: {model_name}")
|
||||
continue
|
||||
|
||||
print(f" Training {model_name.upper()}...", end=" ")
|
||||
|
||||
try:
|
||||
if task == 'node_classification':
|
||||
result = benchmark_node_classification(
|
||||
model_name, args.dataset, args.epochs,
|
||||
args.lr, args.weight_decay, device
|
||||
)
|
||||
print(f"Test Acc: {result['test_acc']:.4f}, "
|
||||
f"Time: {result['train_time']:.2f}s")
|
||||
else:
|
||||
result = benchmark_graph_classification(
|
||||
model_name, args.dataset, args.epochs, args.lr, device
|
||||
)
|
||||
print(f"Test Acc: {result['test_acc']:.4f}, "
|
||||
f"Time: {result['train_time']:.2f}s")
|
||||
|
||||
results[model_name].append(result)
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
# Print summary
|
||||
print("\\n" + "=" * 60)
|
||||
print("BENCHMARK RESULTS")
|
||||
print("=" * 60)
|
||||
|
||||
for model_name in args.models:
|
||||
if not results[model_name]:
|
||||
continue
|
||||
|
||||
test_accs = [r['test_acc'] for r in results[model_name]]
|
||||
times = [r['train_time'] for r in results[model_name]]
|
||||
|
||||
print(f"\\n{model_name.upper()}")
|
||||
print(f" Test Accuracy: {np.mean(test_accs):.4f} ± {np.std(test_accs):.4f}")
|
||||
print(f" Training Time: {np.mean(times):.2f} ± {np.std(times):.2f}s")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Benchmark GNN models")
|
||||
parser.add_argument('--models', nargs='+', default=['gcn'],
|
||||
help='Model types to benchmark (gcn, gat, graphsage)')
|
||||
parser.add_argument('--dataset', type=str, default='Cora',
|
||||
help='Dataset name (Cora, CiteSeer, PubMed, ENZYMES, PROTEINS)')
|
||||
parser.add_argument('--epochs', type=int, default=200,
|
||||
help='Number of training epochs')
|
||||
parser.add_argument('--runs', type=int, default=5,
|
||||
help='Number of runs to average over')
|
||||
parser.add_argument('--lr', type=float, default=0.01,
|
||||
help='Learning rate')
|
||||
parser.add_argument('--weight-decay', type=float, default=5e-4,
|
||||
help='Weight decay for node classification')
|
||||
|
||||
args = parser.parse_args()
|
||||
run_benchmark(args)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
529
skills/torch_geometric/scripts/create_gnn_template.py
Normal file
529
skills/torch_geometric/scripts/create_gnn_template.py
Normal file
@@ -0,0 +1,529 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Generate boilerplate code for common GNN architectures in PyTorch Geometric.
|
||||
|
||||
This script creates ready-to-use GNN model templates with training loops,
|
||||
evaluation metrics, and proper data handling.
|
||||
|
||||
Usage:
|
||||
python create_gnn_template.py --model gcn --task node_classification --output my_model.py
|
||||
python create_gnn_template.py --model gat --task graph_classification --output graph_classifier.py
|
||||
"""
|
||||
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
TEMPLATES = {
|
||||
'node_classification': {
|
||||
'gcn': '''import torch
|
||||
import torch.nn.functional as F
|
||||
from torch_geometric.nn import GCNConv
|
||||
from torch_geometric.datasets import Planetoid
|
||||
|
||||
|
||||
class GCN(torch.nn.Module):
|
||||
"""Graph Convolutional Network for node classification."""
|
||||
|
||||
def __init__(self, num_features, hidden_channels, num_classes, num_layers=2, dropout=0.5):
|
||||
super().__init__()
|
||||
self.convs = torch.nn.ModuleList()
|
||||
|
||||
# First layer
|
||||
self.convs.append(GCNConv(num_features, hidden_channels))
|
||||
|
||||
# Hidden layers
|
||||
for _ in range(num_layers - 2):
|
||||
self.convs.append(GCNConv(hidden_channels, hidden_channels))
|
||||
|
||||
# Output layer
|
||||
self.convs.append(GCNConv(hidden_channels, num_classes))
|
||||
|
||||
self.dropout = dropout
|
||||
|
||||
def forward(self, data):
|
||||
x, edge_index = data.x, data.edge_index
|
||||
|
||||
# Apply conv layers with ReLU and dropout
|
||||
for conv in self.convs[:-1]:
|
||||
x = conv(x, edge_index)
|
||||
x = F.relu(x)
|
||||
x = F.dropout(x, p=self.dropout, training=self.training)
|
||||
|
||||
# Final layer without activation
|
||||
x = self.convs[-1](x, edge_index)
|
||||
return F.log_softmax(x, dim=1)
|
||||
|
||||
|
||||
def train(model, data, optimizer):
|
||||
"""Train the model for one epoch."""
|
||||
model.train()
|
||||
optimizer.zero_grad()
|
||||
out = model(data)
|
||||
loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
return loss.item()
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def test(model, data):
|
||||
"""Evaluate the model."""
|
||||
model.eval()
|
||||
out = model(data)
|
||||
pred = out.argmax(dim=1)
|
||||
|
||||
accs = []
|
||||
for mask in [data.train_mask, data.val_mask, data.test_mask]:
|
||||
correct = (pred[mask] == data.y[mask]).sum()
|
||||
accs.append(int(correct) / int(mask.sum()))
|
||||
|
||||
return accs
|
||||
|
||||
|
||||
def main():
|
||||
# Load dataset
|
||||
dataset = Planetoid(root='/tmp/Cora', name='Cora')
|
||||
data = dataset[0]
|
||||
|
||||
# Create model
|
||||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
model = GCN(
|
||||
num_features=dataset.num_features,
|
||||
hidden_channels=64,
|
||||
num_classes=dataset.num_classes,
|
||||
num_layers=3,
|
||||
dropout=0.5
|
||||
).to(device)
|
||||
data = data.to(device)
|
||||
|
||||
# Setup optimizer
|
||||
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
|
||||
|
||||
# Training loop
|
||||
print("Training GCN model...")
|
||||
best_val_acc = 0
|
||||
for epoch in range(1, 201):
|
||||
loss = train(model, data, optimizer)
|
||||
train_acc, val_acc, test_acc = test(model, data)
|
||||
|
||||
if val_acc > best_val_acc:
|
||||
best_val_acc = val_acc
|
||||
best_test_acc = test_acc
|
||||
|
||||
if epoch % 10 == 0:
|
||||
print(f'Epoch {epoch:03d}, Loss: {loss:.4f}, '
|
||||
f'Train: {train_acc:.4f}, Val: {val_acc:.4f}, Test: {test_acc:.4f}')
|
||||
|
||||
print(f'\\nBest Test Accuracy: {best_test_acc:.4f}')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
''',
|
||||
|
||||
'gat': '''import torch
|
||||
import torch.nn.functional as F
|
||||
from torch_geometric.nn import GATConv
|
||||
from torch_geometric.datasets import Planetoid
|
||||
|
||||
|
||||
class GAT(torch.nn.Module):
|
||||
"""Graph Attention Network for node classification."""
|
||||
|
||||
def __init__(self, num_features, hidden_channels, num_classes, heads=8, dropout=0.6):
|
||||
super().__init__()
|
||||
|
||||
self.conv1 = GATConv(num_features, hidden_channels, heads=heads, dropout=dropout)
|
||||
self.conv2 = GATConv(hidden_channels * heads, num_classes, heads=1,
|
||||
concat=False, dropout=dropout)
|
||||
|
||||
self.dropout = dropout
|
||||
|
||||
def forward(self, data):
|
||||
x, edge_index = data.x, data.edge_index
|
||||
|
||||
x = F.dropout(x, p=self.dropout, training=self.training)
|
||||
x = F.elu(self.conv1(x, edge_index))
|
||||
x = F.dropout(x, p=self.dropout, training=self.training)
|
||||
x = self.conv2(x, edge_index)
|
||||
|
||||
return F.log_softmax(x, dim=1)
|
||||
|
||||
|
||||
def train(model, data, optimizer):
|
||||
"""Train the model for one epoch."""
|
||||
model.train()
|
||||
optimizer.zero_grad()
|
||||
out = model(data)
|
||||
loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
return loss.item()
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def test(model, data):
|
||||
"""Evaluate the model."""
|
||||
model.eval()
|
||||
out = model(data)
|
||||
pred = out.argmax(dim=1)
|
||||
|
||||
accs = []
|
||||
for mask in [data.train_mask, data.val_mask, data.test_mask]:
|
||||
correct = (pred[mask] == data.y[mask]).sum()
|
||||
accs.append(int(correct) / int(mask.sum()))
|
||||
|
||||
return accs
|
||||
|
||||
|
||||
def main():
|
||||
# Load dataset
|
||||
dataset = Planetoid(root='/tmp/Cora', name='Cora')
|
||||
data = dataset[0]
|
||||
|
||||
# Create model
|
||||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
model = GAT(
|
||||
num_features=dataset.num_features,
|
||||
hidden_channels=8,
|
||||
num_classes=dataset.num_classes,
|
||||
heads=8,
|
||||
dropout=0.6
|
||||
).to(device)
|
||||
data = data.to(device)
|
||||
|
||||
# Setup optimizer
|
||||
optimizer = torch.optim.Adam(model.parameters(), lr=0.005, weight_decay=5e-4)
|
||||
|
||||
# Training loop
|
||||
print("Training GAT model...")
|
||||
best_val_acc = 0
|
||||
for epoch in range(1, 201):
|
||||
loss = train(model, data, optimizer)
|
||||
train_acc, val_acc, test_acc = test(model, data)
|
||||
|
||||
if val_acc > best_val_acc:
|
||||
best_val_acc = val_acc
|
||||
best_test_acc = test_acc
|
||||
|
||||
if epoch % 10 == 0:
|
||||
print(f'Epoch {epoch:03d}, Loss: {loss:.4f}, '
|
||||
f'Train: {train_acc:.4f}, Val: {val_acc:.4f}, Test: {test_acc:.4f}')
|
||||
|
||||
print(f'\\nBest Test Accuracy: {best_test_acc:.4f}')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
''',
|
||||
|
||||
'graphsage': '''import torch
|
||||
import torch.nn.functional as F
|
||||
from torch_geometric.nn import SAGEConv
|
||||
from torch_geometric.datasets import Planetoid
|
||||
|
||||
|
||||
class GraphSAGE(torch.nn.Module):
|
||||
"""GraphSAGE for node classification."""
|
||||
|
||||
def __init__(self, num_features, hidden_channels, num_classes, num_layers=2, dropout=0.5):
|
||||
super().__init__()
|
||||
self.convs = torch.nn.ModuleList()
|
||||
|
||||
self.convs.append(SAGEConv(num_features, hidden_channels))
|
||||
for _ in range(num_layers - 2):
|
||||
self.convs.append(SAGEConv(hidden_channels, hidden_channels))
|
||||
self.convs.append(SAGEConv(hidden_channels, num_classes))
|
||||
|
||||
self.dropout = dropout
|
||||
|
||||
def forward(self, data):
|
||||
x, edge_index = data.x, data.edge_index
|
||||
|
||||
for conv in self.convs[:-1]:
|
||||
x = conv(x, edge_index)
|
||||
x = F.relu(x)
|
||||
x = F.dropout(x, p=self.dropout, training=self.training)
|
||||
|
||||
x = self.convs[-1](x, edge_index)
|
||||
return F.log_softmax(x, dim=1)
|
||||
|
||||
|
||||
def train(model, data, optimizer):
|
||||
model.train()
|
||||
optimizer.zero_grad()
|
||||
out = model(data)
|
||||
loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
return loss.item()
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def test(model, data):
|
||||
model.eval()
|
||||
out = model(data)
|
||||
pred = out.argmax(dim=1)
|
||||
|
||||
accs = []
|
||||
for mask in [data.train_mask, data.val_mask, data.test_mask]:
|
||||
correct = (pred[mask] == data.y[mask]).sum()
|
||||
accs.append(int(correct) / int(mask.sum()))
|
||||
|
||||
return accs
|
||||
|
||||
|
||||
def main():
|
||||
dataset = Planetoid(root='/tmp/Cora', name='Cora')
|
||||
data = dataset[0]
|
||||
|
||||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
model = GraphSAGE(
|
||||
num_features=dataset.num_features,
|
||||
hidden_channels=64,
|
||||
num_classes=dataset.num_classes,
|
||||
num_layers=2,
|
||||
dropout=0.5
|
||||
).to(device)
|
||||
data = data.to(device)
|
||||
|
||||
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
|
||||
|
||||
print("Training GraphSAGE model...")
|
||||
best_val_acc = 0
|
||||
for epoch in range(1, 201):
|
||||
loss = train(model, data, optimizer)
|
||||
train_acc, val_acc, test_acc = test(model, data)
|
||||
|
||||
if val_acc > best_val_acc:
|
||||
best_val_acc = val_acc
|
||||
best_test_acc = test_acc
|
||||
|
||||
if epoch % 10 == 0:
|
||||
print(f'Epoch {epoch:03d}, Loss: {loss:.4f}, '
|
||||
f'Train: {train_acc:.4f}, Val: {val_acc:.4f}, Test: {test_acc:.4f}')
|
||||
|
||||
print(f'\\nBest Test Accuracy: {best_test_acc:.4f}')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
''',
|
||||
},
|
||||
|
||||
'graph_classification': {
|
||||
'gin': '''import torch
|
||||
import torch.nn.functional as F
|
||||
from torch_geometric.nn import GINConv, global_add_pool
|
||||
from torch_geometric.datasets import TUDataset
|
||||
from torch_geometric.loader import DataLoader
|
||||
|
||||
|
||||
class GIN(torch.nn.Module):
|
||||
"""Graph Isomorphism Network for graph classification."""
|
||||
|
||||
def __init__(self, num_features, hidden_channels, num_classes, num_layers=3, dropout=0.5):
|
||||
super().__init__()
|
||||
|
||||
self.convs = torch.nn.ModuleList()
|
||||
self.batch_norms = torch.nn.ModuleList()
|
||||
|
||||
# Create MLP for first layer
|
||||
nn = torch.nn.Sequential(
|
||||
torch.nn.Linear(num_features, hidden_channels),
|
||||
torch.nn.ReLU(),
|
||||
torch.nn.Linear(hidden_channels, hidden_channels)
|
||||
)
|
||||
self.convs.append(GINConv(nn))
|
||||
self.batch_norms.append(torch.nn.BatchNorm1d(hidden_channels))
|
||||
|
||||
# Hidden layers
|
||||
for _ in range(num_layers - 2):
|
||||
nn = torch.nn.Sequential(
|
||||
torch.nn.Linear(hidden_channels, hidden_channels),
|
||||
torch.nn.ReLU(),
|
||||
torch.nn.Linear(hidden_channels, hidden_channels)
|
||||
)
|
||||
self.convs.append(GINConv(nn))
|
||||
self.batch_norms.append(torch.nn.BatchNorm1d(hidden_channels))
|
||||
|
||||
# Output MLP
|
||||
self.lin = torch.nn.Linear(hidden_channels, num_classes)
|
||||
self.dropout = dropout
|
||||
|
||||
def forward(self, data):
|
||||
x, edge_index, batch = data.x, data.edge_index, data.batch
|
||||
|
||||
for conv, batch_norm in zip(self.convs, self.batch_norms):
|
||||
x = conv(x, edge_index)
|
||||
x = batch_norm(x)
|
||||
x = F.relu(x)
|
||||
x = F.dropout(x, p=self.dropout, training=self.training)
|
||||
|
||||
# Global pooling
|
||||
x = global_add_pool(x, batch)
|
||||
|
||||
# Output layer
|
||||
x = self.lin(x)
|
||||
return F.log_softmax(x, dim=1)
|
||||
|
||||
|
||||
def train(model, loader, optimizer, device):
|
||||
"""Train the model for one epoch."""
|
||||
model.train()
|
||||
total_loss = 0
|
||||
|
||||
for data in loader:
|
||||
data = data.to(device)
|
||||
optimizer.zero_grad()
|
||||
out = model(data)
|
||||
loss = F.nll_loss(out, data.y)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
total_loss += loss.item() * data.num_graphs
|
||||
|
||||
return total_loss / len(loader.dataset)
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def test(model, loader, device):
|
||||
"""Evaluate the model."""
|
||||
model.eval()
|
||||
correct = 0
|
||||
|
||||
for data in loader:
|
||||
data = data.to(device)
|
||||
out = model(data)
|
||||
pred = out.argmax(dim=1)
|
||||
correct += (pred == data.y).sum().item()
|
||||
|
||||
return correct / len(loader.dataset)
|
||||
|
||||
|
||||
def main():
|
||||
# Load dataset
|
||||
dataset = TUDataset(root='/tmp/ENZYMES', name='ENZYMES')
|
||||
print(f"Dataset: {dataset}")
|
||||
print(f"Number of graphs: {len(dataset)}")
|
||||
print(f"Number of features: {dataset.num_features}")
|
||||
print(f"Number of classes: {dataset.num_classes}")
|
||||
|
||||
# Shuffle and split
|
||||
dataset = dataset.shuffle()
|
||||
train_dataset = dataset[:int(len(dataset) * 0.8)]
|
||||
test_dataset = dataset[int(len(dataset) * 0.8):]
|
||||
|
||||
# Create data loaders
|
||||
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
|
||||
test_loader = DataLoader(test_dataset, batch_size=32)
|
||||
|
||||
# Create model
|
||||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
model = GIN(
|
||||
num_features=dataset.num_features,
|
||||
hidden_channels=64,
|
||||
num_classes=dataset.num_classes,
|
||||
num_layers=3,
|
||||
dropout=0.5
|
||||
).to(device)
|
||||
|
||||
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
|
||||
|
||||
# Training loop
|
||||
print("\\nTraining GIN model...")
|
||||
for epoch in range(1, 101):
|
||||
loss = train(model, train_loader, optimizer, device)
|
||||
train_acc = test(model, train_loader, device)
|
||||
test_acc = test(model, test_loader, device)
|
||||
|
||||
if epoch % 10 == 0:
|
||||
print(f'Epoch {epoch:03d}, Loss: {loss:.4f}, '
|
||||
f'Train Acc: {train_acc:.4f}, Test Acc: {test_acc:.4f}')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
''',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def generate_template(model_type: str, task: str, output_path: str):
|
||||
"""Generate a GNN template file."""
|
||||
if task not in TEMPLATES:
|
||||
raise ValueError(f"Unknown task: {task}. Available: {list(TEMPLATES.keys())}")
|
||||
|
||||
if model_type not in TEMPLATES[task]:
|
||||
raise ValueError(f"Model {model_type} not available for task {task}. "
|
||||
f"Available: {list(TEMPLATES[task].keys())}")
|
||||
|
||||
template = TEMPLATES[task][model_type]
|
||||
|
||||
# Write to file
|
||||
output_file = Path(output_path)
|
||||
output_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
with open(output_file, 'w') as f:
|
||||
f.write(template)
|
||||
|
||||
print(f"✓ Generated {model_type.upper()} template for {task}")
|
||||
print(f" Saved to: {output_path}")
|
||||
print(f"\\nTo run the template:")
|
||||
print(f" python {output_path}")
|
||||
|
||||
|
||||
def list_templates():
|
||||
"""List all available templates."""
|
||||
print("Available GNN Templates")
|
||||
print("=" * 50)
|
||||
for task, models in TEMPLATES.items():
|
||||
print(f"\\n{task.upper()}")
|
||||
print("-" * 50)
|
||||
for model in models.keys():
|
||||
print(f" - {model}")
|
||||
print()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate GNN model templates",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
python create_gnn_template.py --model gcn --task node_classification --output gcn_model.py
|
||||
python create_gnn_template.py --model gin --task graph_classification --output gin_model.py
|
||||
python create_gnn_template.py --list
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument('--model', type=str,
|
||||
help='Model type (gcn, gat, graphsage, gin)')
|
||||
parser.add_argument('--task', type=str,
|
||||
help='Task type (node_classification, graph_classification)')
|
||||
parser.add_argument('--output', type=str, default='gnn_model.py',
|
||||
help='Output file path (default: gnn_model.py)')
|
||||
parser.add_argument('--list', action='store_true',
|
||||
help='List all available templates')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.list:
|
||||
list_templates()
|
||||
return
|
||||
|
||||
if not args.model or not args.task:
|
||||
parser.print_help()
|
||||
print("\\n" + "=" * 50)
|
||||
list_templates()
|
||||
return
|
||||
|
||||
try:
|
||||
generate_template(args.model, args.task, args.output)
|
||||
except ValueError as e:
|
||||
print(f"Error: {e}")
|
||||
print("\\nUse --list to see available templates")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
313
skills/torch_geometric/scripts/visualize_graph.py
Normal file
313
skills/torch_geometric/scripts/visualize_graph.py
Normal file
@@ -0,0 +1,313 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Visualize PyTorch Geometric graph structures using networkx and matplotlib.
|
||||
|
||||
This script provides utilities to visualize Data objects, including:
|
||||
- Graph structure (nodes and edges)
|
||||
- Node features (as colors)
|
||||
- Edge attributes (as edge colors/widths)
|
||||
- Community/cluster assignments
|
||||
|
||||
Usage:
|
||||
python visualize_graph.py --dataset Cora --output graph.png
|
||||
|
||||
Or import and use:
|
||||
from scripts.visualize_graph import visualize_data
|
||||
visualize_data(data, title="My Graph", show_labels=True)
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import matplotlib.pyplot as plt
|
||||
import networkx as nx
|
||||
import torch
|
||||
from typing import Optional, Union
|
||||
import numpy as np
|
||||
|
||||
|
||||
def visualize_data(
|
||||
data,
|
||||
title: str = "Graph Visualization",
|
||||
node_color_attr: Optional[str] = None,
|
||||
edge_color_attr: Optional[str] = None,
|
||||
show_labels: bool = False,
|
||||
node_size: int = 300,
|
||||
figsize: tuple = (12, 10),
|
||||
layout: str = "spring",
|
||||
output_path: Optional[str] = None,
|
||||
max_nodes: Optional[int] = None,
|
||||
):
|
||||
"""
|
||||
Visualize a PyTorch Geometric Data object.
|
||||
|
||||
Args:
|
||||
data: PyTorch Geometric Data object
|
||||
title: Plot title
|
||||
node_color_attr: Data attribute to use for node colors (e.g., 'y', 'train_mask')
|
||||
edge_color_attr: Data attribute to use for edge colors
|
||||
show_labels: Whether to show node labels
|
||||
node_size: Size of nodes in visualization
|
||||
figsize: Figure size (width, height)
|
||||
layout: Graph layout algorithm ('spring', 'circular', 'kamada_kawai', 'spectral')
|
||||
output_path: Path to save figure (if None, displays interactively)
|
||||
max_nodes: Maximum number of nodes to visualize (samples if exceeded)
|
||||
"""
|
||||
# Sample nodes if graph is too large
|
||||
if max_nodes and data.num_nodes > max_nodes:
|
||||
print(f"Graph has {data.num_nodes} nodes. Sampling {max_nodes} nodes for visualization.")
|
||||
node_indices = torch.randperm(data.num_nodes)[:max_nodes]
|
||||
data = data.subgraph(node_indices)
|
||||
|
||||
# Convert to networkx graph
|
||||
G = nx.Graph() if is_undirected(data.edge_index) else nx.DiGraph()
|
||||
|
||||
# Add nodes
|
||||
G.add_nodes_from(range(data.num_nodes))
|
||||
|
||||
# Add edges
|
||||
edge_index = data.edge_index.cpu().numpy()
|
||||
edges = list(zip(edge_index[0], edge_index[1]))
|
||||
G.add_edges_from(edges)
|
||||
|
||||
# Setup figure
|
||||
fig, ax = plt.subplots(figsize=figsize)
|
||||
|
||||
# Choose layout
|
||||
if layout == "spring":
|
||||
pos = nx.spring_layout(G, k=0.5, iterations=50)
|
||||
elif layout == "circular":
|
||||
pos = nx.circular_layout(G)
|
||||
elif layout == "kamada_kawai":
|
||||
pos = nx.kamada_kawai_layout(G)
|
||||
elif layout == "spectral":
|
||||
pos = nx.spectral_layout(G)
|
||||
else:
|
||||
raise ValueError(f"Unknown layout: {layout}")
|
||||
|
||||
# Determine node colors
|
||||
if node_color_attr and hasattr(data, node_color_attr):
|
||||
node_colors = getattr(data, node_color_attr).cpu().numpy()
|
||||
if node_colors.dtype == bool:
|
||||
node_colors = node_colors.astype(int)
|
||||
if len(node_colors.shape) > 1:
|
||||
# Multi-dimensional features - use first dimension
|
||||
node_colors = node_colors[:, 0]
|
||||
else:
|
||||
node_colors = 'skyblue'
|
||||
|
||||
# Determine edge colors
|
||||
if edge_color_attr and hasattr(data, edge_color_attr):
|
||||
edge_colors = getattr(data, edge_color_attr).cpu().numpy()
|
||||
if len(edge_colors.shape) > 1:
|
||||
edge_colors = edge_colors[:, 0]
|
||||
else:
|
||||
edge_colors = 'gray'
|
||||
|
||||
# Draw graph
|
||||
nx.draw_networkx_nodes(
|
||||
G, pos,
|
||||
node_color=node_colors,
|
||||
node_size=node_size,
|
||||
cmap=plt.cm.viridis,
|
||||
ax=ax
|
||||
)
|
||||
|
||||
nx.draw_networkx_edges(
|
||||
G, pos,
|
||||
edge_color=edge_colors,
|
||||
alpha=0.3,
|
||||
arrows=isinstance(G, nx.DiGraph),
|
||||
arrowsize=10,
|
||||
ax=ax
|
||||
)
|
||||
|
||||
if show_labels:
|
||||
nx.draw_networkx_labels(G, pos, font_size=8, ax=ax)
|
||||
|
||||
ax.set_title(title, fontsize=16, fontweight='bold')
|
||||
ax.axis('off')
|
||||
|
||||
# Add colorbar if using numeric node colors
|
||||
if node_color_attr and isinstance(node_colors, np.ndarray):
|
||||
sm = plt.cm.ScalarMappable(
|
||||
cmap=plt.cm.viridis,
|
||||
norm=plt.Normalize(vmin=node_colors.min(), vmax=node_colors.max())
|
||||
)
|
||||
sm.set_array([])
|
||||
cbar = plt.colorbar(sm, ax=ax, fraction=0.046, pad=0.04)
|
||||
cbar.set_label(node_color_attr, rotation=270, labelpad=20)
|
||||
|
||||
plt.tight_layout()
|
||||
|
||||
if output_path:
|
||||
plt.savefig(output_path, dpi=300, bbox_inches='tight')
|
||||
print(f"Figure saved to {output_path}")
|
||||
else:
|
||||
plt.show()
|
||||
|
||||
plt.close()
|
||||
|
||||
|
||||
def is_undirected(edge_index):
|
||||
"""Check if graph is undirected."""
|
||||
row, col = edge_index
|
||||
num_edges = edge_index.size(1)
|
||||
|
||||
# Create a set of edges and reverse edges
|
||||
edges = set(zip(row.tolist(), col.tolist()))
|
||||
reverse_edges = set(zip(col.tolist(), row.tolist()))
|
||||
|
||||
# Check if all edges have their reverse
|
||||
return edges == reverse_edges
|
||||
|
||||
|
||||
def plot_degree_distribution(data, output_path: Optional[str] = None):
|
||||
"""Plot the degree distribution of the graph."""
|
||||
from torch_geometric.utils import degree
|
||||
|
||||
row, col = data.edge_index
|
||||
deg = degree(col, data.num_nodes).cpu().numpy()
|
||||
|
||||
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 5))
|
||||
|
||||
# Histogram
|
||||
ax1.hist(deg, bins=50, edgecolor='black', alpha=0.7)
|
||||
ax1.set_xlabel('Degree', fontsize=12)
|
||||
ax1.set_ylabel('Frequency', fontsize=12)
|
||||
ax1.set_title('Degree Distribution', fontsize=14, fontweight='bold')
|
||||
ax1.grid(alpha=0.3)
|
||||
|
||||
# Log-log plot
|
||||
unique_degrees, counts = np.unique(deg, return_counts=True)
|
||||
ax2.loglog(unique_degrees, counts, 'o-', alpha=0.7)
|
||||
ax2.set_xlabel('Degree (log scale)', fontsize=12)
|
||||
ax2.set_ylabel('Frequency (log scale)', fontsize=12)
|
||||
ax2.set_title('Degree Distribution (Log-Log)', fontsize=14, fontweight='bold')
|
||||
ax2.grid(alpha=0.3)
|
||||
|
||||
plt.tight_layout()
|
||||
|
||||
if output_path:
|
||||
plt.savefig(output_path, dpi=300, bbox_inches='tight')
|
||||
print(f"Degree distribution saved to {output_path}")
|
||||
else:
|
||||
plt.show()
|
||||
|
||||
plt.close()
|
||||
|
||||
|
||||
def plot_graph_statistics(data, output_path: Optional[str] = None):
|
||||
"""Plot various graph statistics."""
|
||||
from torch_geometric.utils import degree, contains_self_loops, is_undirected as check_undirected
|
||||
|
||||
# Compute statistics
|
||||
row, col = data.edge_index
|
||||
deg = degree(col, data.num_nodes).cpu().numpy()
|
||||
|
||||
stats = {
|
||||
'Nodes': data.num_nodes,
|
||||
'Edges': data.num_edges,
|
||||
'Avg Degree': deg.mean(),
|
||||
'Max Degree': deg.max(),
|
||||
'Self-loops': contains_self_loops(data.edge_index),
|
||||
'Undirected': check_undirected(data.edge_index),
|
||||
}
|
||||
|
||||
if hasattr(data, 'num_node_features'):
|
||||
stats['Node Features'] = data.num_node_features
|
||||
if hasattr(data, 'num_edge_features') and data.edge_attr is not None:
|
||||
stats['Edge Features'] = data.num_edge_features
|
||||
if hasattr(data, 'y'):
|
||||
if data.y.dim() == 1:
|
||||
stats['Classes'] = int(data.y.max().item()) + 1
|
||||
|
||||
# Create text plot
|
||||
fig, ax = plt.subplots(figsize=(8, 6))
|
||||
ax.axis('off')
|
||||
|
||||
text = "Graph Statistics\n" + "=" * 40 + "\n\n"
|
||||
for key, value in stats.items():
|
||||
text += f"{key:20s}: {value}\n"
|
||||
|
||||
ax.text(0.1, 0.5, text, fontsize=14, family='monospace',
|
||||
verticalalignment='center', transform=ax.transAxes)
|
||||
|
||||
plt.tight_layout()
|
||||
|
||||
if output_path:
|
||||
plt.savefig(output_path, dpi=300, bbox_inches='tight')
|
||||
print(f"Statistics saved to {output_path}")
|
||||
else:
|
||||
plt.show()
|
||||
|
||||
plt.close()
|
||||
|
||||
# Print to console as well
|
||||
print("\n" + text)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Visualize PyTorch Geometric graphs")
|
||||
parser.add_argument('--dataset', type=str, default='Cora',
|
||||
help='Dataset name (e.g., Cora, CiteSeer, ENZYMES)')
|
||||
parser.add_argument('--output', type=str, default=None,
|
||||
help='Output file path for visualization')
|
||||
parser.add_argument('--node-color', type=str, default='y',
|
||||
help='Attribute to use for node colors')
|
||||
parser.add_argument('--layout', type=str, default='spring',
|
||||
choices=['spring', 'circular', 'kamada_kawai', 'spectral'],
|
||||
help='Graph layout algorithm')
|
||||
parser.add_argument('--show-labels', action='store_true',
|
||||
help='Show node labels')
|
||||
parser.add_argument('--max-nodes', type=int, default=500,
|
||||
help='Maximum nodes to visualize')
|
||||
parser.add_argument('--stats', action='store_true',
|
||||
help='Show graph statistics')
|
||||
parser.add_argument('--degree', action='store_true',
|
||||
help='Show degree distribution')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load dataset
|
||||
print(f"Loading dataset: {args.dataset}")
|
||||
|
||||
try:
|
||||
# Try Planetoid datasets
|
||||
from torch_geometric.datasets import Planetoid
|
||||
dataset = Planetoid(root=f'/tmp/{args.dataset}', name=args.dataset)
|
||||
data = dataset[0]
|
||||
except:
|
||||
try:
|
||||
# Try TUDataset
|
||||
from torch_geometric.datasets import TUDataset
|
||||
dataset = TUDataset(root=f'/tmp/{args.dataset}', name=args.dataset)
|
||||
data = dataset[0]
|
||||
except Exception as e:
|
||||
print(f"Error loading dataset: {e}")
|
||||
print("Supported datasets: Cora, CiteSeer, PubMed, ENZYMES, PROTEINS, etc.")
|
||||
return
|
||||
|
||||
print(f"Loaded {args.dataset}: {data.num_nodes} nodes, {data.num_edges} edges")
|
||||
|
||||
# Generate visualizations
|
||||
if args.stats:
|
||||
stats_output = args.output.replace('.png', '_stats.png') if args.output else None
|
||||
plot_graph_statistics(data, stats_output)
|
||||
|
||||
if args.degree:
|
||||
degree_output = args.output.replace('.png', '_degree.png') if args.output else None
|
||||
plot_degree_distribution(data, degree_output)
|
||||
|
||||
# Main visualization
|
||||
visualize_data(
|
||||
data,
|
||||
title=f"{args.dataset} Graph",
|
||||
node_color_attr=args.node_color,
|
||||
show_labels=args.show_labels,
|
||||
layout=args.layout,
|
||||
output_path=args.output,
|
||||
max_nodes=args.max_nodes
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Reference in New Issue
Block a user