Initial commit
This commit is contained in:
454
skills/pytorch-lightning/scripts/quick_trainer_setup.py
Normal file
454
skills/pytorch-lightning/scripts/quick_trainer_setup.py
Normal file
@@ -0,0 +1,454 @@
|
||||
"""
|
||||
Quick Trainer Setup Examples for PyTorch Lightning.
|
||||
|
||||
This script provides ready-to-use Trainer configurations for common use cases.
|
||||
Copy and modify these configurations for your specific needs.
|
||||
"""
|
||||
|
||||
import lightning as L
|
||||
from lightning.pytorch.callbacks import (
|
||||
ModelCheckpoint,
|
||||
EarlyStopping,
|
||||
LearningRateMonitor,
|
||||
DeviceStatsMonitor,
|
||||
RichProgressBar,
|
||||
)
|
||||
from lightning.pytorch import loggers as pl_loggers
|
||||
from lightning.pytorch.strategies import DDPStrategy, FSDPStrategy
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# 1. BASIC TRAINING (Single GPU/CPU)
|
||||
# =============================================================================
|
||||
|
||||
def basic_trainer():
|
||||
"""
|
||||
Simple trainer for quick prototyping.
|
||||
Use for: Small models, debugging, single GPU training
|
||||
"""
|
||||
trainer = L.Trainer(
|
||||
max_epochs=10,
|
||||
accelerator="auto", # Automatically select GPU/CPU
|
||||
devices="auto", # Use all available devices
|
||||
enable_progress_bar=True,
|
||||
logger=True,
|
||||
)
|
||||
return trainer
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# 2. DEBUGGING CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
def debug_trainer():
|
||||
"""
|
||||
Trainer for debugging with fast dev run and anomaly detection.
|
||||
Use for: Finding bugs, testing code quickly
|
||||
"""
|
||||
trainer = L.Trainer(
|
||||
fast_dev_run=True, # Run 1 batch through train/val/test
|
||||
accelerator="cpu", # Use CPU for easier debugging
|
||||
detect_anomaly=True, # Detect NaN/Inf in gradients
|
||||
log_every_n_steps=1, # Log every step
|
||||
enable_progress_bar=True,
|
||||
)
|
||||
return trainer
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# 3. PRODUCTION TRAINING (Single GPU)
|
||||
# =============================================================================
|
||||
|
||||
def production_single_gpu_trainer(
|
||||
max_epochs=100,
|
||||
log_dir="logs",
|
||||
checkpoint_dir="checkpoints"
|
||||
):
|
||||
"""
|
||||
Production-ready trainer for single GPU with checkpointing and logging.
|
||||
Use for: Final training runs on single GPU
|
||||
"""
|
||||
# Callbacks
|
||||
checkpoint_callback = ModelCheckpoint(
|
||||
dirpath=checkpoint_dir,
|
||||
filename="{epoch:02d}-{val_loss:.2f}",
|
||||
monitor="val_loss",
|
||||
mode="min",
|
||||
save_top_k=3,
|
||||
save_last=True,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
early_stop_callback = EarlyStopping(
|
||||
monitor="val_loss",
|
||||
patience=10,
|
||||
mode="min",
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
lr_monitor = LearningRateMonitor(logging_interval="step")
|
||||
|
||||
# Logger
|
||||
tb_logger = pl_loggers.TensorBoardLogger(
|
||||
save_dir=log_dir,
|
||||
name="my_model",
|
||||
)
|
||||
|
||||
# Trainer
|
||||
trainer = L.Trainer(
|
||||
max_epochs=max_epochs,
|
||||
accelerator="gpu",
|
||||
devices=1,
|
||||
precision="16-mixed", # Mixed precision for speed
|
||||
callbacks=[
|
||||
checkpoint_callback,
|
||||
early_stop_callback,
|
||||
lr_monitor,
|
||||
],
|
||||
logger=tb_logger,
|
||||
log_every_n_steps=50,
|
||||
gradient_clip_val=1.0, # Clip gradients
|
||||
enable_progress_bar=True,
|
||||
)
|
||||
|
||||
return trainer
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# 4. MULTI-GPU TRAINING (DDP)
|
||||
# =============================================================================
|
||||
|
||||
def multi_gpu_ddp_trainer(
|
||||
max_epochs=100,
|
||||
num_gpus=4,
|
||||
log_dir="logs",
|
||||
checkpoint_dir="checkpoints"
|
||||
):
|
||||
"""
|
||||
Multi-GPU training with Distributed Data Parallel.
|
||||
Use for: Models <500M parameters, standard deep learning models
|
||||
"""
|
||||
# Callbacks
|
||||
checkpoint_callback = ModelCheckpoint(
|
||||
dirpath=checkpoint_dir,
|
||||
filename="{epoch:02d}-{val_loss:.2f}",
|
||||
monitor="val_loss",
|
||||
mode="min",
|
||||
save_top_k=3,
|
||||
save_last=True,
|
||||
)
|
||||
|
||||
early_stop_callback = EarlyStopping(
|
||||
monitor="val_loss",
|
||||
patience=10,
|
||||
mode="min",
|
||||
)
|
||||
|
||||
lr_monitor = LearningRateMonitor(logging_interval="step")
|
||||
|
||||
# Logger
|
||||
wandb_logger = pl_loggers.WandbLogger(
|
||||
project="my-project",
|
||||
save_dir=log_dir,
|
||||
)
|
||||
|
||||
# Trainer
|
||||
trainer = L.Trainer(
|
||||
max_epochs=max_epochs,
|
||||
accelerator="gpu",
|
||||
devices=num_gpus,
|
||||
strategy=DDPStrategy(
|
||||
find_unused_parameters=False,
|
||||
gradient_as_bucket_view=True,
|
||||
),
|
||||
precision="16-mixed",
|
||||
callbacks=[
|
||||
checkpoint_callback,
|
||||
early_stop_callback,
|
||||
lr_monitor,
|
||||
],
|
||||
logger=wandb_logger,
|
||||
log_every_n_steps=50,
|
||||
gradient_clip_val=1.0,
|
||||
sync_batchnorm=True, # Sync batch norm across GPUs
|
||||
)
|
||||
|
||||
return trainer
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# 5. LARGE MODEL TRAINING (FSDP)
|
||||
# =============================================================================
|
||||
|
||||
def large_model_fsdp_trainer(
|
||||
max_epochs=100,
|
||||
num_gpus=8,
|
||||
log_dir="logs",
|
||||
checkpoint_dir="checkpoints"
|
||||
):
|
||||
"""
|
||||
Training for large models (500M+ parameters) with FSDP.
|
||||
Use for: Large transformers, models that don't fit in single GPU
|
||||
"""
|
||||
import torch.nn as nn
|
||||
|
||||
# Callbacks
|
||||
checkpoint_callback = ModelCheckpoint(
|
||||
dirpath=checkpoint_dir,
|
||||
filename="{epoch:02d}-{val_loss:.2f}",
|
||||
monitor="val_loss",
|
||||
mode="min",
|
||||
save_top_k=3,
|
||||
save_last=True,
|
||||
)
|
||||
|
||||
lr_monitor = LearningRateMonitor(logging_interval="step")
|
||||
|
||||
# Logger
|
||||
wandb_logger = pl_loggers.WandbLogger(
|
||||
project="large-model",
|
||||
save_dir=log_dir,
|
||||
)
|
||||
|
||||
# Trainer with FSDP
|
||||
trainer = L.Trainer(
|
||||
max_epochs=max_epochs,
|
||||
accelerator="gpu",
|
||||
devices=num_gpus,
|
||||
strategy=FSDPStrategy(
|
||||
sharding_strategy="FULL_SHARD",
|
||||
activation_checkpointing_policy={
|
||||
nn.TransformerEncoderLayer,
|
||||
nn.TransformerDecoderLayer,
|
||||
},
|
||||
cpu_offload=False, # Set True if GPU memory insufficient
|
||||
),
|
||||
precision="bf16-mixed", # BFloat16 for A100/H100
|
||||
callbacks=[
|
||||
checkpoint_callback,
|
||||
lr_monitor,
|
||||
],
|
||||
logger=wandb_logger,
|
||||
log_every_n_steps=10,
|
||||
gradient_clip_val=1.0,
|
||||
accumulate_grad_batches=4, # Gradient accumulation
|
||||
)
|
||||
|
||||
return trainer
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# 6. VERY LARGE MODEL TRAINING (DeepSpeed)
|
||||
# =============================================================================
|
||||
|
||||
def deepspeed_trainer(
|
||||
max_epochs=100,
|
||||
num_gpus=8,
|
||||
stage=3,
|
||||
log_dir="logs",
|
||||
checkpoint_dir="checkpoints"
|
||||
):
|
||||
"""
|
||||
Training for very large models with DeepSpeed.
|
||||
Use for: Models >10B parameters, maximum memory efficiency
|
||||
"""
|
||||
# Callbacks
|
||||
checkpoint_callback = ModelCheckpoint(
|
||||
dirpath=checkpoint_dir,
|
||||
filename="{epoch:02d}-{step:06d}",
|
||||
save_top_k=3,
|
||||
save_last=True,
|
||||
every_n_train_steps=1000, # Save every N steps
|
||||
)
|
||||
|
||||
lr_monitor = LearningRateMonitor(logging_interval="step")
|
||||
|
||||
# Logger
|
||||
wandb_logger = pl_loggers.WandbLogger(
|
||||
project="very-large-model",
|
||||
save_dir=log_dir,
|
||||
)
|
||||
|
||||
# Select DeepSpeed stage
|
||||
strategy = f"deepspeed_stage_{stage}"
|
||||
|
||||
# Trainer
|
||||
trainer = L.Trainer(
|
||||
max_epochs=max_epochs,
|
||||
accelerator="gpu",
|
||||
devices=num_gpus,
|
||||
strategy=strategy,
|
||||
precision="16-mixed",
|
||||
callbacks=[
|
||||
checkpoint_callback,
|
||||
lr_monitor,
|
||||
],
|
||||
logger=wandb_logger,
|
||||
log_every_n_steps=10,
|
||||
gradient_clip_val=1.0,
|
||||
accumulate_grad_batches=4,
|
||||
)
|
||||
|
||||
return trainer
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# 7. HYPERPARAMETER TUNING
|
||||
# =============================================================================
|
||||
|
||||
def hyperparameter_tuning_trainer(max_epochs=50):
|
||||
"""
|
||||
Lightweight trainer for hyperparameter search.
|
||||
Use for: Quick experiments, hyperparameter tuning
|
||||
"""
|
||||
trainer = L.Trainer(
|
||||
max_epochs=max_epochs,
|
||||
accelerator="auto",
|
||||
devices=1,
|
||||
enable_checkpointing=False, # Don't save checkpoints
|
||||
logger=False, # Disable logging
|
||||
enable_progress_bar=False,
|
||||
limit_train_batches=0.5, # Use 50% of training data
|
||||
limit_val_batches=0.5, # Use 50% of validation data
|
||||
)
|
||||
return trainer
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# 8. OVERFITTING TEST
|
||||
# =============================================================================
|
||||
|
||||
def overfit_test_trainer(num_batches=10):
|
||||
"""
|
||||
Trainer for overfitting on small subset to verify model capacity.
|
||||
Use for: Testing if model can learn, debugging
|
||||
"""
|
||||
trainer = L.Trainer(
|
||||
max_epochs=100,
|
||||
accelerator="auto",
|
||||
devices=1,
|
||||
overfit_batches=num_batches, # Overfit on N batches
|
||||
log_every_n_steps=1,
|
||||
enable_progress_bar=True,
|
||||
)
|
||||
return trainer
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# 9. TIME-LIMITED TRAINING (SLURM)
|
||||
# =============================================================================
|
||||
|
||||
def time_limited_trainer(
|
||||
max_time_hours=23.5,
|
||||
max_epochs=1000,
|
||||
checkpoint_dir="checkpoints"
|
||||
):
|
||||
"""
|
||||
Training with time limit for SLURM clusters.
|
||||
Use for: Cluster jobs with time limits
|
||||
"""
|
||||
from datetime import timedelta
|
||||
|
||||
checkpoint_callback = ModelCheckpoint(
|
||||
dirpath=checkpoint_dir,
|
||||
save_top_k=3,
|
||||
save_last=True, # Important for resuming
|
||||
every_n_epochs=5,
|
||||
)
|
||||
|
||||
trainer = L.Trainer(
|
||||
max_epochs=max_epochs,
|
||||
max_time=timedelta(hours=max_time_hours),
|
||||
accelerator="gpu",
|
||||
devices="auto",
|
||||
callbacks=[checkpoint_callback],
|
||||
log_every_n_steps=50,
|
||||
)
|
||||
|
||||
return trainer
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# 10. REPRODUCIBLE RESEARCH
|
||||
# =============================================================================
|
||||
|
||||
def reproducible_trainer(seed=42, max_epochs=100):
|
||||
"""
|
||||
Fully reproducible trainer for research papers.
|
||||
Use for: Publications, reproducible results
|
||||
"""
|
||||
# Set seed
|
||||
L.seed_everything(seed, workers=True)
|
||||
|
||||
# Callbacks
|
||||
checkpoint_callback = ModelCheckpoint(
|
||||
dirpath="checkpoints",
|
||||
filename="{epoch:02d}-{val_loss:.2f}",
|
||||
monitor="val_loss",
|
||||
mode="min",
|
||||
save_top_k=3,
|
||||
save_last=True,
|
||||
)
|
||||
|
||||
# Trainer
|
||||
trainer = L.Trainer(
|
||||
max_epochs=max_epochs,
|
||||
accelerator="gpu",
|
||||
devices=1,
|
||||
precision="32-true", # Full precision for reproducibility
|
||||
deterministic=True, # Use deterministic algorithms
|
||||
benchmark=False, # Disable cudnn benchmarking
|
||||
callbacks=[checkpoint_callback],
|
||||
log_every_n_steps=50,
|
||||
)
|
||||
|
||||
return trainer
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# USAGE EXAMPLES
|
||||
# =============================================================================
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("PyTorch Lightning Trainer Configurations\n")
|
||||
|
||||
# Example 1: Basic training
|
||||
print("1. Basic Trainer:")
|
||||
trainer = basic_trainer()
|
||||
print(f" - Max epochs: {trainer.max_epochs}")
|
||||
print(f" - Accelerator: {trainer.accelerator}")
|
||||
print()
|
||||
|
||||
# Example 2: Debug training
|
||||
print("2. Debug Trainer:")
|
||||
trainer = debug_trainer()
|
||||
print(f" - Fast dev run: {trainer.fast_dev_run}")
|
||||
print(f" - Detect anomaly: {trainer.detect_anomaly}")
|
||||
print()
|
||||
|
||||
# Example 3: Production single GPU
|
||||
print("3. Production Single GPU Trainer:")
|
||||
trainer = production_single_gpu_trainer(max_epochs=100)
|
||||
print(f" - Max epochs: {trainer.max_epochs}")
|
||||
print(f" - Precision: {trainer.precision}")
|
||||
print(f" - Callbacks: {len(trainer.callbacks)}")
|
||||
print()
|
||||
|
||||
# Example 4: Multi-GPU DDP
|
||||
print("4. Multi-GPU DDP Trainer:")
|
||||
trainer = multi_gpu_ddp_trainer(num_gpus=4)
|
||||
print(f" - Strategy: {trainer.strategy}")
|
||||
print(f" - Devices: {trainer.num_devices}")
|
||||
print()
|
||||
|
||||
# Example 5: FSDP for large models
|
||||
print("5. FSDP Trainer for Large Models:")
|
||||
trainer = large_model_fsdp_trainer(num_gpus=8)
|
||||
print(f" - Strategy: {trainer.strategy}")
|
||||
print(f" - Precision: {trainer.precision}")
|
||||
print()
|
||||
|
||||
print("\nTo use these configurations:")
|
||||
print("1. Import the desired function")
|
||||
print("2. Create trainer: trainer = production_single_gpu_trainer()")
|
||||
print("3. Train model: trainer.fit(model, datamodule=dm)")
|
||||
328
skills/pytorch-lightning/scripts/template_datamodule.py
Normal file
328
skills/pytorch-lightning/scripts/template_datamodule.py
Normal file
@@ -0,0 +1,328 @@
|
||||
"""
|
||||
Template for creating a PyTorch Lightning DataModule.
|
||||
|
||||
This template provides a complete boilerplate for building a LightningDataModule
|
||||
with all essential methods and best practices for data handling.
|
||||
"""
|
||||
|
||||
import lightning as L
|
||||
from torch.utils.data import Dataset, DataLoader, random_split
|
||||
import torch
|
||||
|
||||
|
||||
class CustomDataset(Dataset):
|
||||
"""
|
||||
Custom Dataset implementation.
|
||||
|
||||
Replace this with your actual dataset implementation.
|
||||
"""
|
||||
|
||||
def __init__(self, data_path, transform=None):
|
||||
"""
|
||||
Initialize the dataset.
|
||||
|
||||
Args:
|
||||
data_path: Path to data directory
|
||||
transform: Optional transforms to apply
|
||||
"""
|
||||
self.data_path = data_path
|
||||
self.transform = transform
|
||||
|
||||
# Load your data here
|
||||
# self.data = load_data(data_path)
|
||||
# self.labels = load_labels(data_path)
|
||||
|
||||
# Placeholder data
|
||||
self.data = torch.randn(1000, 3, 224, 224)
|
||||
self.labels = torch.randint(0, 10, (1000,))
|
||||
|
||||
def __len__(self):
|
||||
"""Return the size of the dataset."""
|
||||
return len(self.data)
|
||||
|
||||
def __getitem__(self, idx):
|
||||
"""
|
||||
Get a single item from the dataset.
|
||||
|
||||
Args:
|
||||
idx: Index of the item
|
||||
|
||||
Returns:
|
||||
Tuple of (data, label)
|
||||
"""
|
||||
sample = self.data[idx]
|
||||
label = self.labels[idx]
|
||||
|
||||
if self.transform:
|
||||
sample = self.transform(sample)
|
||||
|
||||
return sample, label
|
||||
|
||||
|
||||
class TemplateDataModule(L.LightningDataModule):
|
||||
"""
|
||||
Template LightningDataModule for data handling.
|
||||
|
||||
This class encapsulates all data processing steps:
|
||||
1. Download/prepare data (prepare_data)
|
||||
2. Create datasets (setup)
|
||||
3. Create dataloaders (train/val/test/predict_dataloader)
|
||||
|
||||
Args:
|
||||
data_dir: Directory containing the data
|
||||
batch_size: Batch size for dataloaders
|
||||
num_workers: Number of workers for data loading
|
||||
train_val_split: Train/validation split ratio
|
||||
pin_memory: Whether to pin memory for faster GPU transfer
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
data_dir: str = "./data",
|
||||
batch_size: int = 32,
|
||||
num_workers: int = 4,
|
||||
train_val_split: float = 0.8,
|
||||
pin_memory: bool = True,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
# Save hyperparameters
|
||||
self.save_hyperparameters()
|
||||
|
||||
# Initialize as None (will be set in setup)
|
||||
self.train_dataset = None
|
||||
self.val_dataset = None
|
||||
self.test_dataset = None
|
||||
self.predict_dataset = None
|
||||
|
||||
def prepare_data(self):
|
||||
"""
|
||||
Download and prepare data.
|
||||
|
||||
This method is called only once and on a single process.
|
||||
Do not set state here (e.g., self.x = y) because it's not
|
||||
transferred to other processes.
|
||||
|
||||
Use this for:
|
||||
- Downloading datasets
|
||||
- Tokenizing text
|
||||
- Saving processed data to disk
|
||||
"""
|
||||
# Example: Download data if not exists
|
||||
# if not os.path.exists(self.hparams.data_dir):
|
||||
# download_dataset(self.hparams.data_dir)
|
||||
|
||||
# Example: Process and save data
|
||||
# process_and_save(self.hparams.data_dir)
|
||||
|
||||
pass
|
||||
|
||||
def setup(self, stage: str = None):
|
||||
"""
|
||||
Create datasets for each stage.
|
||||
|
||||
This method is called on every process in distributed training.
|
||||
Set state here (e.g., self.train_dataset = ...).
|
||||
|
||||
Args:
|
||||
stage: Current stage ('fit', 'validate', 'test', or 'predict')
|
||||
"""
|
||||
# Define transforms
|
||||
train_transform = self._get_train_transforms()
|
||||
test_transform = self._get_test_transforms()
|
||||
|
||||
# Setup for training and validation
|
||||
if stage == "fit" or stage is None:
|
||||
# Load full dataset
|
||||
full_dataset = CustomDataset(
|
||||
self.hparams.data_dir, transform=train_transform
|
||||
)
|
||||
|
||||
# Split into train and validation
|
||||
train_size = int(self.hparams.train_val_split * len(full_dataset))
|
||||
val_size = len(full_dataset) - train_size
|
||||
|
||||
self.train_dataset, self.val_dataset = random_split(
|
||||
full_dataset,
|
||||
[train_size, val_size],
|
||||
generator=torch.Generator().manual_seed(42),
|
||||
)
|
||||
|
||||
# Apply test transforms to validation set
|
||||
# (Note: random_split doesn't support different transforms,
|
||||
# you may need to implement a custom wrapper)
|
||||
|
||||
# Setup for testing
|
||||
if stage == "test" or stage is None:
|
||||
self.test_dataset = CustomDataset(
|
||||
self.hparams.data_dir, transform=test_transform
|
||||
)
|
||||
|
||||
# Setup for prediction
|
||||
if stage == "predict":
|
||||
self.predict_dataset = CustomDataset(
|
||||
self.hparams.data_dir, transform=test_transform
|
||||
)
|
||||
|
||||
def _get_train_transforms(self):
|
||||
"""
|
||||
Define training transforms/augmentations.
|
||||
|
||||
Returns:
|
||||
Training transforms
|
||||
"""
|
||||
# Example with torchvision:
|
||||
# from torchvision import transforms
|
||||
# return transforms.Compose([
|
||||
# transforms.RandomHorizontalFlip(),
|
||||
# transforms.RandomRotation(10),
|
||||
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
|
||||
# std=[0.229, 0.224, 0.225])
|
||||
# ])
|
||||
|
||||
return None
|
||||
|
||||
def _get_test_transforms(self):
|
||||
"""
|
||||
Define test/validation transforms (no augmentation).
|
||||
|
||||
Returns:
|
||||
Test/validation transforms
|
||||
"""
|
||||
# Example with torchvision:
|
||||
# from torchvision import transforms
|
||||
# return transforms.Compose([
|
||||
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
|
||||
# std=[0.229, 0.224, 0.225])
|
||||
# ])
|
||||
|
||||
return None
|
||||
|
||||
def train_dataloader(self):
|
||||
"""
|
||||
Create training dataloader.
|
||||
|
||||
Returns:
|
||||
Training DataLoader
|
||||
"""
|
||||
return DataLoader(
|
||||
self.train_dataset,
|
||||
batch_size=self.hparams.batch_size,
|
||||
shuffle=True,
|
||||
num_workers=self.hparams.num_workers,
|
||||
pin_memory=self.hparams.pin_memory,
|
||||
persistent_workers=True if self.hparams.num_workers > 0 else False,
|
||||
drop_last=True, # Drop last incomplete batch
|
||||
)
|
||||
|
||||
def val_dataloader(self):
|
||||
"""
|
||||
Create validation dataloader.
|
||||
|
||||
Returns:
|
||||
Validation DataLoader
|
||||
"""
|
||||
return DataLoader(
|
||||
self.val_dataset,
|
||||
batch_size=self.hparams.batch_size,
|
||||
shuffle=False,
|
||||
num_workers=self.hparams.num_workers,
|
||||
pin_memory=self.hparams.pin_memory,
|
||||
persistent_workers=True if self.hparams.num_workers > 0 else False,
|
||||
)
|
||||
|
||||
def test_dataloader(self):
|
||||
"""
|
||||
Create test dataloader.
|
||||
|
||||
Returns:
|
||||
Test DataLoader
|
||||
"""
|
||||
return DataLoader(
|
||||
self.test_dataset,
|
||||
batch_size=self.hparams.batch_size,
|
||||
shuffle=False,
|
||||
num_workers=self.hparams.num_workers,
|
||||
)
|
||||
|
||||
def predict_dataloader(self):
|
||||
"""
|
||||
Create prediction dataloader.
|
||||
|
||||
Returns:
|
||||
Prediction DataLoader
|
||||
"""
|
||||
return DataLoader(
|
||||
self.predict_dataset,
|
||||
batch_size=self.hparams.batch_size,
|
||||
shuffle=False,
|
||||
num_workers=self.hparams.num_workers,
|
||||
)
|
||||
|
||||
# Optional: State management for checkpointing
|
||||
|
||||
def state_dict(self):
|
||||
"""
|
||||
Save DataModule state for checkpointing.
|
||||
|
||||
Returns:
|
||||
State dictionary
|
||||
"""
|
||||
return {"train_val_split": self.hparams.train_val_split}
|
||||
|
||||
def load_state_dict(self, state_dict):
|
||||
"""
|
||||
Restore DataModule state from checkpoint.
|
||||
|
||||
Args:
|
||||
state_dict: State dictionary
|
||||
"""
|
||||
self.hparams.train_val_split = state_dict["train_val_split"]
|
||||
|
||||
# Optional: Teardown for cleanup
|
||||
|
||||
def teardown(self, stage: str = None):
|
||||
"""
|
||||
Cleanup after training/testing/prediction.
|
||||
|
||||
Args:
|
||||
stage: Current stage ('fit', 'validate', 'test', or 'predict')
|
||||
"""
|
||||
# Clean up resources
|
||||
if stage == "fit":
|
||||
self.train_dataset = None
|
||||
self.val_dataset = None
|
||||
elif stage == "test":
|
||||
self.test_dataset = None
|
||||
elif stage == "predict":
|
||||
self.predict_dataset = None
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# Create DataModule
|
||||
dm = TemplateDataModule(
|
||||
data_dir="./data",
|
||||
batch_size=64,
|
||||
num_workers=8,
|
||||
train_val_split=0.8,
|
||||
)
|
||||
|
||||
# Setup for training
|
||||
dm.prepare_data()
|
||||
dm.setup(stage="fit")
|
||||
|
||||
# Get dataloaders
|
||||
train_loader = dm.train_dataloader()
|
||||
val_loader = dm.val_dataloader()
|
||||
|
||||
print(f"Train dataset size: {len(dm.train_dataset)}")
|
||||
print(f"Validation dataset size: {len(dm.val_dataset)}")
|
||||
print(f"Train batches: {len(train_loader)}")
|
||||
print(f"Validation batches: {len(val_loader)}")
|
||||
|
||||
# Example: Use with Trainer
|
||||
# from template_lightning_module import TemplateLightningModule
|
||||
# model = TemplateLightningModule()
|
||||
# trainer = L.Trainer(max_epochs=10)
|
||||
# trainer.fit(model, datamodule=dm)
|
||||
219
skills/pytorch-lightning/scripts/template_lightning_module.py
Normal file
219
skills/pytorch-lightning/scripts/template_lightning_module.py
Normal file
@@ -0,0 +1,219 @@
|
||||
"""
|
||||
Template for creating a PyTorch Lightning Module.
|
||||
|
||||
This template provides a complete boilerplate for building a LightningModule
|
||||
with all essential methods and best practices.
|
||||
"""
|
||||
|
||||
import lightning as L
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from torch.optim import Adam
|
||||
from torch.optim.lr_scheduler import ReduceLROnPlateau
|
||||
|
||||
|
||||
class TemplateLightningModule(L.LightningModule):
|
||||
"""
|
||||
Template LightningModule for building deep learning models.
|
||||
|
||||
Args:
|
||||
learning_rate: Learning rate for optimizer
|
||||
hidden_dim: Hidden dimension size
|
||||
dropout: Dropout probability
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
learning_rate: float = 0.001,
|
||||
hidden_dim: int = 256,
|
||||
dropout: float = 0.1,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
# Save hyperparameters (accessible via self.hparams)
|
||||
self.save_hyperparameters()
|
||||
|
||||
# Define your model architecture
|
||||
self.model = nn.Sequential(
|
||||
nn.Linear(784, self.hparams.hidden_dim),
|
||||
nn.ReLU(),
|
||||
nn.Dropout(self.hparams.dropout),
|
||||
nn.Linear(self.hparams.hidden_dim, 10),
|
||||
)
|
||||
|
||||
# Optional: Define metrics
|
||||
# from torchmetrics import Accuracy
|
||||
# self.train_accuracy = Accuracy(task="multiclass", num_classes=10)
|
||||
# self.val_accuracy = Accuracy(task="multiclass", num_classes=10)
|
||||
|
||||
def forward(self, x):
|
||||
"""
|
||||
Forward pass of the model.
|
||||
|
||||
Args:
|
||||
x: Input tensor
|
||||
|
||||
Returns:
|
||||
Model output
|
||||
"""
|
||||
return self.model(x)
|
||||
|
||||
def training_step(self, batch, batch_idx):
|
||||
"""
|
||||
Training step (called for each training batch).
|
||||
|
||||
Args:
|
||||
batch: Current batch of data
|
||||
batch_idx: Index of the current batch
|
||||
|
||||
Returns:
|
||||
Loss tensor
|
||||
"""
|
||||
x, y = batch
|
||||
|
||||
# Forward pass
|
||||
logits = self(x)
|
||||
loss = F.cross_entropy(logits, y)
|
||||
|
||||
# Calculate accuracy (optional)
|
||||
preds = torch.argmax(logits, dim=1)
|
||||
acc = (preds == y).float().mean()
|
||||
|
||||
# Log metrics
|
||||
self.log("train/loss", loss, on_step=True, on_epoch=True, prog_bar=True)
|
||||
self.log("train/acc", acc, on_step=True, on_epoch=True)
|
||||
self.log("learning_rate", self.optimizers().param_groups[0]["lr"])
|
||||
|
||||
return loss
|
||||
|
||||
def validation_step(self, batch, batch_idx):
|
||||
"""
|
||||
Validation step (called for each validation batch).
|
||||
|
||||
Args:
|
||||
batch: Current batch of data
|
||||
batch_idx: Index of the current batch
|
||||
"""
|
||||
x, y = batch
|
||||
|
||||
# Forward pass
|
||||
logits = self(x)
|
||||
loss = F.cross_entropy(logits, y)
|
||||
|
||||
# Calculate accuracy
|
||||
preds = torch.argmax(logits, dim=1)
|
||||
acc = (preds == y).float().mean()
|
||||
|
||||
# Log metrics (automatically aggregated across batches)
|
||||
self.log("val/loss", loss, on_epoch=True, prog_bar=True, sync_dist=True)
|
||||
self.log("val/acc", acc, on_epoch=True, prog_bar=True, sync_dist=True)
|
||||
|
||||
def test_step(self, batch, batch_idx):
|
||||
"""
|
||||
Test step (called for each test batch).
|
||||
|
||||
Args:
|
||||
batch: Current batch of data
|
||||
batch_idx: Index of the current batch
|
||||
"""
|
||||
x, y = batch
|
||||
|
||||
# Forward pass
|
||||
logits = self(x)
|
||||
loss = F.cross_entropy(logits, y)
|
||||
|
||||
# Calculate accuracy
|
||||
preds = torch.argmax(logits, dim=1)
|
||||
acc = (preds == y).float().mean()
|
||||
|
||||
# Log metrics
|
||||
self.log("test/loss", loss, on_epoch=True)
|
||||
self.log("test/acc", acc, on_epoch=True)
|
||||
|
||||
def predict_step(self, batch, batch_idx, dataloader_idx=0):
|
||||
"""
|
||||
Prediction step (called for each prediction batch).
|
||||
|
||||
Args:
|
||||
batch: Current batch of data
|
||||
batch_idx: Index of the current batch
|
||||
dataloader_idx: Index of the dataloader (if multiple)
|
||||
|
||||
Returns:
|
||||
Predictions
|
||||
"""
|
||||
x, y = batch
|
||||
logits = self(x)
|
||||
preds = torch.argmax(logits, dim=1)
|
||||
return preds
|
||||
|
||||
def configure_optimizers(self):
|
||||
"""
|
||||
Configure optimizers and learning rate schedulers.
|
||||
|
||||
Returns:
|
||||
Optimizer and scheduler configuration
|
||||
"""
|
||||
# Define optimizer
|
||||
optimizer = Adam(
|
||||
self.parameters(),
|
||||
lr=self.hparams.learning_rate,
|
||||
weight_decay=1e-5,
|
||||
)
|
||||
|
||||
# Define scheduler
|
||||
scheduler = ReduceLROnPlateau(
|
||||
optimizer,
|
||||
mode="min",
|
||||
factor=0.5,
|
||||
patience=5,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Return configuration
|
||||
return {
|
||||
"optimizer": optimizer,
|
||||
"lr_scheduler": {
|
||||
"scheduler": scheduler,
|
||||
"monitor": "val/loss",
|
||||
"interval": "epoch",
|
||||
"frequency": 1,
|
||||
},
|
||||
}
|
||||
|
||||
# Optional: Add custom methods for model-specific logic
|
||||
|
||||
def on_train_epoch_end(self):
|
||||
"""Called at the end of each training epoch."""
|
||||
# Example: Log custom metrics
|
||||
pass
|
||||
|
||||
def on_validation_epoch_end(self):
|
||||
"""Called at the end of each validation epoch."""
|
||||
# Example: Compute epoch-level metrics
|
||||
pass
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# Create model
|
||||
model = TemplateLightningModule(
|
||||
learning_rate=0.001,
|
||||
hidden_dim=256,
|
||||
dropout=0.1,
|
||||
)
|
||||
|
||||
# Create trainer
|
||||
trainer = L.Trainer(
|
||||
max_epochs=10,
|
||||
accelerator="auto",
|
||||
devices="auto",
|
||||
logger=True,
|
||||
)
|
||||
|
||||
# Train (you need to provide train_dataloader and val_dataloader)
|
||||
# trainer.fit(model, train_dataloader, val_dataloader)
|
||||
|
||||
print(f"Model created with {model.num_parameters:,} parameters")
|
||||
print(f"Hyperparameters: {model.hparams}")
|
||||
Reference in New Issue
Block a user