Initial commit
This commit is contained in:
257
skills/scikit-learn/scripts/classification_pipeline.py
Normal file
257
skills/scikit-learn/scripts/classification_pipeline.py
Normal file
@@ -0,0 +1,257 @@
|
||||
"""
|
||||
Complete classification pipeline example with preprocessing, model training,
|
||||
hyperparameter tuning, and evaluation.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
|
||||
from sklearn.preprocessing import StandardScaler, OneHotEncoder
|
||||
from sklearn.impute import SimpleImputer
|
||||
from sklearn.compose import ColumnTransformer
|
||||
from sklearn.pipeline import Pipeline
|
||||
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
from sklearn.metrics import (
|
||||
classification_report, confusion_matrix, roc_auc_score,
|
||||
accuracy_score, precision_score, recall_score, f1_score
|
||||
)
|
||||
import warnings
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
|
||||
def create_preprocessing_pipeline(numeric_features, categorical_features):
|
||||
"""
|
||||
Create a preprocessing pipeline for mixed data types.
|
||||
|
||||
Parameters:
|
||||
-----------
|
||||
numeric_features : list
|
||||
List of numeric feature column names
|
||||
categorical_features : list
|
||||
List of categorical feature column names
|
||||
|
||||
Returns:
|
||||
--------
|
||||
ColumnTransformer
|
||||
Preprocessing pipeline
|
||||
"""
|
||||
# Numeric preprocessing
|
||||
numeric_transformer = Pipeline(steps=[
|
||||
('imputer', SimpleImputer(strategy='median')),
|
||||
('scaler', StandardScaler())
|
||||
])
|
||||
|
||||
# Categorical preprocessing
|
||||
categorical_transformer = Pipeline(steps=[
|
||||
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
|
||||
('onehot', OneHotEncoder(handle_unknown='ignore', sparse_output=False))
|
||||
])
|
||||
|
||||
# Combine transformers
|
||||
preprocessor = ColumnTransformer(
|
||||
transformers=[
|
||||
('num', numeric_transformer, numeric_features),
|
||||
('cat', categorical_transformer, categorical_features)
|
||||
]
|
||||
)
|
||||
|
||||
return preprocessor
|
||||
|
||||
|
||||
def train_and_evaluate_model(X, y, numeric_features, categorical_features,
|
||||
test_size=0.2, random_state=42):
|
||||
"""
|
||||
Complete pipeline: preprocess, train, tune, and evaluate a classifier.
|
||||
|
||||
Parameters:
|
||||
-----------
|
||||
X : DataFrame or array
|
||||
Feature matrix
|
||||
y : Series or array
|
||||
Target variable
|
||||
numeric_features : list
|
||||
List of numeric feature names
|
||||
categorical_features : list
|
||||
List of categorical feature names
|
||||
test_size : float
|
||||
Proportion of data for testing
|
||||
random_state : int
|
||||
Random seed
|
||||
|
||||
Returns:
|
||||
--------
|
||||
dict
|
||||
Dictionary containing trained model, predictions, and metrics
|
||||
"""
|
||||
# Split data with stratification
|
||||
X_train, X_test, y_train, y_test = train_test_split(
|
||||
X, y, test_size=test_size, stratify=y, random_state=random_state
|
||||
)
|
||||
|
||||
print(f"Training set size: {len(X_train)}")
|
||||
print(f"Test set size: {len(X_test)}")
|
||||
print(f"Class distribution in training: {pd.Series(y_train).value_counts().to_dict()}")
|
||||
|
||||
# Create preprocessor
|
||||
preprocessor = create_preprocessing_pipeline(numeric_features, categorical_features)
|
||||
|
||||
# Define models to compare
|
||||
models = {
|
||||
'Logistic Regression': Pipeline([
|
||||
('preprocessor', preprocessor),
|
||||
('classifier', LogisticRegression(max_iter=1000, random_state=random_state))
|
||||
]),
|
||||
'Random Forest': Pipeline([
|
||||
('preprocessor', preprocessor),
|
||||
('classifier', RandomForestClassifier(n_estimators=100, random_state=random_state))
|
||||
]),
|
||||
'Gradient Boosting': Pipeline([
|
||||
('preprocessor', preprocessor),
|
||||
('classifier', GradientBoostingClassifier(n_estimators=100, random_state=random_state))
|
||||
])
|
||||
}
|
||||
|
||||
# Compare models using cross-validation
|
||||
print("\n" + "="*60)
|
||||
print("Model Comparison (5-Fold Cross-Validation)")
|
||||
print("="*60)
|
||||
|
||||
cv_results = {}
|
||||
for name, model in models.items():
|
||||
scores = cross_val_score(model, X_train, y_train, cv=5, scoring='accuracy')
|
||||
cv_results[name] = scores.mean()
|
||||
print(f"{name:20s}: {scores.mean():.4f} (+/- {scores.std() * 2:.4f})")
|
||||
|
||||
# Select best model based on CV
|
||||
best_model_name = max(cv_results, key=cv_results.get)
|
||||
best_model = models[best_model_name]
|
||||
|
||||
print(f"\nBest model: {best_model_name}")
|
||||
|
||||
# Hyperparameter tuning for best model
|
||||
if best_model_name == 'Random Forest':
|
||||
param_grid = {
|
||||
'classifier__n_estimators': [100, 200],
|
||||
'classifier__max_depth': [10, 20, None],
|
||||
'classifier__min_samples_split': [2, 5]
|
||||
}
|
||||
elif best_model_name == 'Gradient Boosting':
|
||||
param_grid = {
|
||||
'classifier__n_estimators': [100, 200],
|
||||
'classifier__learning_rate': [0.01, 0.1],
|
||||
'classifier__max_depth': [3, 5]
|
||||
}
|
||||
else: # Logistic Regression
|
||||
param_grid = {
|
||||
'classifier__C': [0.1, 1.0, 10.0],
|
||||
'classifier__penalty': ['l2']
|
||||
}
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("Hyperparameter Tuning")
|
||||
print("="*60)
|
||||
|
||||
grid_search = GridSearchCV(
|
||||
best_model, param_grid, cv=5, scoring='accuracy',
|
||||
n_jobs=-1, verbose=0
|
||||
)
|
||||
|
||||
grid_search.fit(X_train, y_train)
|
||||
|
||||
print(f"Best parameters: {grid_search.best_params_}")
|
||||
print(f"Best CV score: {grid_search.best_score_:.4f}")
|
||||
|
||||
# Evaluate on test set
|
||||
tuned_model = grid_search.best_estimator_
|
||||
y_pred = tuned_model.predict(X_test)
|
||||
y_pred_proba = tuned_model.predict_proba(X_test)
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("Test Set Evaluation")
|
||||
print("="*60)
|
||||
|
||||
# Calculate metrics
|
||||
accuracy = accuracy_score(y_test, y_pred)
|
||||
precision = precision_score(y_test, y_pred, average='weighted')
|
||||
recall = recall_score(y_test, y_pred, average='weighted')
|
||||
f1 = f1_score(y_test, y_pred, average='weighted')
|
||||
|
||||
print(f"Accuracy: {accuracy:.4f}")
|
||||
print(f"Precision: {precision:.4f}")
|
||||
print(f"Recall: {recall:.4f}")
|
||||
print(f"F1-Score: {f1:.4f}")
|
||||
|
||||
# ROC AUC (if binary classification)
|
||||
if len(np.unique(y)) == 2:
|
||||
roc_auc = roc_auc_score(y_test, y_pred_proba[:, 1])
|
||||
print(f"ROC AUC: {roc_auc:.4f}")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("Classification Report")
|
||||
print("="*60)
|
||||
print(classification_report(y_test, y_pred))
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("Confusion Matrix")
|
||||
print("="*60)
|
||||
print(confusion_matrix(y_test, y_pred))
|
||||
|
||||
# Feature importance (if available)
|
||||
if hasattr(tuned_model.named_steps['classifier'], 'feature_importances_'):
|
||||
print("\n" + "="*60)
|
||||
print("Top 10 Most Important Features")
|
||||
print("="*60)
|
||||
|
||||
feature_names = tuned_model.named_steps['preprocessor'].get_feature_names_out()
|
||||
importances = tuned_model.named_steps['classifier'].feature_importances_
|
||||
|
||||
feature_importance_df = pd.DataFrame({
|
||||
'feature': feature_names,
|
||||
'importance': importances
|
||||
}).sort_values('importance', ascending=False).head(10)
|
||||
|
||||
print(feature_importance_df.to_string(index=False))
|
||||
|
||||
return {
|
||||
'model': tuned_model,
|
||||
'y_test': y_test,
|
||||
'y_pred': y_pred,
|
||||
'y_pred_proba': y_pred_proba,
|
||||
'metrics': {
|
||||
'accuracy': accuracy,
|
||||
'precision': precision,
|
||||
'recall': recall,
|
||||
'f1': f1
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# Load example dataset
|
||||
from sklearn.datasets import load_breast_cancer
|
||||
|
||||
# Load data
|
||||
data = load_breast_cancer()
|
||||
X = pd.DataFrame(data.data, columns=data.feature_names)
|
||||
y = data.target
|
||||
|
||||
# For demonstration, treat all features as numeric
|
||||
numeric_features = X.columns.tolist()
|
||||
categorical_features = []
|
||||
|
||||
print("="*60)
|
||||
print("Classification Pipeline Example")
|
||||
print("Dataset: Breast Cancer Wisconsin")
|
||||
print("="*60)
|
||||
|
||||
# Run complete pipeline
|
||||
results = train_and_evaluate_model(
|
||||
X, y, numeric_features, categorical_features,
|
||||
test_size=0.2, random_state=42
|
||||
)
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("Pipeline Complete!")
|
||||
print("="*60)
|
||||
386
skills/scikit-learn/scripts/clustering_analysis.py
Normal file
386
skills/scikit-learn/scripts/clustering_analysis.py
Normal file
@@ -0,0 +1,386 @@
|
||||
"""
|
||||
Clustering analysis example with multiple algorithms, evaluation, and visualization.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import matplotlib.pyplot as plt
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
from sklearn.decomposition import PCA
|
||||
from sklearn.cluster import KMeans, DBSCAN, AgglomerativeClustering
|
||||
from sklearn.mixture import GaussianMixture
|
||||
from sklearn.metrics import (
|
||||
silhouette_score, calinski_harabasz_score, davies_bouldin_score
|
||||
)
|
||||
import warnings
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
|
||||
def preprocess_for_clustering(X, scale=True, pca_components=None):
|
||||
"""
|
||||
Preprocess data for clustering.
|
||||
|
||||
Parameters:
|
||||
-----------
|
||||
X : array-like
|
||||
Feature matrix
|
||||
scale : bool
|
||||
Whether to standardize features
|
||||
pca_components : int or None
|
||||
Number of PCA components (None to skip PCA)
|
||||
|
||||
Returns:
|
||||
--------
|
||||
array
|
||||
Preprocessed data
|
||||
"""
|
||||
X_processed = X.copy()
|
||||
|
||||
if scale:
|
||||
scaler = StandardScaler()
|
||||
X_processed = scaler.fit_transform(X_processed)
|
||||
|
||||
if pca_components is not None:
|
||||
pca = PCA(n_components=pca_components)
|
||||
X_processed = pca.fit_transform(X_processed)
|
||||
print(f"PCA: Explained variance ratio = {pca.explained_variance_ratio_.sum():.3f}")
|
||||
|
||||
return X_processed
|
||||
|
||||
|
||||
def find_optimal_k_kmeans(X, k_range=range(2, 11)):
|
||||
"""
|
||||
Find optimal K for K-Means using elbow method and silhouette score.
|
||||
|
||||
Parameters:
|
||||
-----------
|
||||
X : array-like
|
||||
Feature matrix (should be scaled)
|
||||
k_range : range
|
||||
Range of K values to test
|
||||
|
||||
Returns:
|
||||
--------
|
||||
dict
|
||||
Dictionary with inertia and silhouette scores for each K
|
||||
"""
|
||||
inertias = []
|
||||
silhouette_scores = []
|
||||
|
||||
for k in k_range:
|
||||
kmeans = KMeans(n_clusters=k, random_state=42, n_init=10)
|
||||
labels = kmeans.fit_predict(X)
|
||||
|
||||
inertias.append(kmeans.inertia_)
|
||||
silhouette_scores.append(silhouette_score(X, labels))
|
||||
|
||||
# Plot results
|
||||
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
|
||||
|
||||
# Elbow plot
|
||||
ax1.plot(k_range, inertias, 'bo-')
|
||||
ax1.set_xlabel('Number of clusters (K)')
|
||||
ax1.set_ylabel('Inertia')
|
||||
ax1.set_title('Elbow Method')
|
||||
ax1.grid(True)
|
||||
|
||||
# Silhouette plot
|
||||
ax2.plot(k_range, silhouette_scores, 'ro-')
|
||||
ax2.set_xlabel('Number of clusters (K)')
|
||||
ax2.set_ylabel('Silhouette Score')
|
||||
ax2.set_title('Silhouette Analysis')
|
||||
ax2.grid(True)
|
||||
|
||||
plt.tight_layout()
|
||||
plt.savefig('clustering_optimization.png', dpi=300, bbox_inches='tight')
|
||||
print("Saved: clustering_optimization.png")
|
||||
plt.close()
|
||||
|
||||
# Find best K based on silhouette score
|
||||
best_k = k_range[np.argmax(silhouette_scores)]
|
||||
print(f"\nRecommended K based on silhouette score: {best_k}")
|
||||
|
||||
return {
|
||||
'k_values': list(k_range),
|
||||
'inertias': inertias,
|
||||
'silhouette_scores': silhouette_scores,
|
||||
'best_k': best_k
|
||||
}
|
||||
|
||||
|
||||
def compare_clustering_algorithms(X, n_clusters=3):
|
||||
"""
|
||||
Compare different clustering algorithms.
|
||||
|
||||
Parameters:
|
||||
-----------
|
||||
X : array-like
|
||||
Feature matrix (should be scaled)
|
||||
n_clusters : int
|
||||
Number of clusters
|
||||
|
||||
Returns:
|
||||
--------
|
||||
dict
|
||||
Dictionary with results for each algorithm
|
||||
"""
|
||||
print("="*60)
|
||||
print(f"Comparing Clustering Algorithms (n_clusters={n_clusters})")
|
||||
print("="*60)
|
||||
|
||||
algorithms = {
|
||||
'K-Means': KMeans(n_clusters=n_clusters, random_state=42, n_init=10),
|
||||
'Agglomerative': AgglomerativeClustering(n_clusters=n_clusters, linkage='ward'),
|
||||
'Gaussian Mixture': GaussianMixture(n_components=n_clusters, random_state=42)
|
||||
}
|
||||
|
||||
# DBSCAN doesn't require n_clusters
|
||||
# We'll add it separately
|
||||
dbscan = DBSCAN(eps=0.5, min_samples=5)
|
||||
dbscan_labels = dbscan.fit_predict(X)
|
||||
|
||||
results = {}
|
||||
|
||||
for name, algorithm in algorithms.items():
|
||||
labels = algorithm.fit_predict(X)
|
||||
|
||||
# Calculate metrics
|
||||
silhouette = silhouette_score(X, labels)
|
||||
calinski = calinski_harabasz_score(X, labels)
|
||||
davies = davies_bouldin_score(X, labels)
|
||||
|
||||
results[name] = {
|
||||
'labels': labels,
|
||||
'n_clusters': n_clusters,
|
||||
'silhouette': silhouette,
|
||||
'calinski_harabasz': calinski,
|
||||
'davies_bouldin': davies
|
||||
}
|
||||
|
||||
print(f"\n{name}:")
|
||||
print(f" Silhouette Score: {silhouette:.4f} (higher is better)")
|
||||
print(f" Calinski-Harabasz: {calinski:.4f} (higher is better)")
|
||||
print(f" Davies-Bouldin: {davies:.4f} (lower is better)")
|
||||
|
||||
# DBSCAN results
|
||||
n_clusters_dbscan = len(set(dbscan_labels)) - (1 if -1 in dbscan_labels else 0)
|
||||
n_noise = list(dbscan_labels).count(-1)
|
||||
|
||||
if n_clusters_dbscan > 1:
|
||||
# Only calculate metrics if we have multiple clusters
|
||||
mask = dbscan_labels != -1 # Exclude noise
|
||||
if mask.sum() > 0:
|
||||
silhouette = silhouette_score(X[mask], dbscan_labels[mask])
|
||||
calinski = calinski_harabasz_score(X[mask], dbscan_labels[mask])
|
||||
davies = davies_bouldin_score(X[mask], dbscan_labels[mask])
|
||||
|
||||
results['DBSCAN'] = {
|
||||
'labels': dbscan_labels,
|
||||
'n_clusters': n_clusters_dbscan,
|
||||
'n_noise': n_noise,
|
||||
'silhouette': silhouette,
|
||||
'calinski_harabasz': calinski,
|
||||
'davies_bouldin': davies
|
||||
}
|
||||
|
||||
print(f"\nDBSCAN:")
|
||||
print(f" Clusters found: {n_clusters_dbscan}")
|
||||
print(f" Noise points: {n_noise}")
|
||||
print(f" Silhouette Score: {silhouette:.4f} (higher is better)")
|
||||
print(f" Calinski-Harabasz: {calinski:.4f} (higher is better)")
|
||||
print(f" Davies-Bouldin: {davies:.4f} (lower is better)")
|
||||
else:
|
||||
print(f"\nDBSCAN:")
|
||||
print(f" Clusters found: {n_clusters_dbscan}")
|
||||
print(f" Noise points: {n_noise}")
|
||||
print(" Note: Insufficient clusters for metric calculation")
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def visualize_clusters(X, results, true_labels=None):
|
||||
"""
|
||||
Visualize clustering results using PCA for 2D projection.
|
||||
|
||||
Parameters:
|
||||
-----------
|
||||
X : array-like
|
||||
Feature matrix
|
||||
results : dict
|
||||
Dictionary with clustering results
|
||||
true_labels : array-like or None
|
||||
True labels (if available) for comparison
|
||||
"""
|
||||
# Reduce to 2D using PCA
|
||||
pca = PCA(n_components=2)
|
||||
X_2d = pca.fit_transform(X)
|
||||
|
||||
# Determine number of subplots
|
||||
n_plots = len(results)
|
||||
if true_labels is not None:
|
||||
n_plots += 1
|
||||
|
||||
n_cols = min(3, n_plots)
|
||||
n_rows = (n_plots + n_cols - 1) // n_cols
|
||||
|
||||
fig, axes = plt.subplots(n_rows, n_cols, figsize=(5*n_cols, 4*n_rows))
|
||||
if n_plots == 1:
|
||||
axes = np.array([axes])
|
||||
axes = axes.flatten()
|
||||
|
||||
plot_idx = 0
|
||||
|
||||
# Plot true labels if available
|
||||
if true_labels is not None:
|
||||
ax = axes[plot_idx]
|
||||
scatter = ax.scatter(X_2d[:, 0], X_2d[:, 1], c=true_labels, cmap='viridis', alpha=0.6)
|
||||
ax.set_title('True Labels')
|
||||
ax.set_xlabel(f'PC1 ({pca.explained_variance_ratio_[0]:.2%})')
|
||||
ax.set_ylabel(f'PC2 ({pca.explained_variance_ratio_[1]:.2%})')
|
||||
plt.colorbar(scatter, ax=ax)
|
||||
plot_idx += 1
|
||||
|
||||
# Plot clustering results
|
||||
for name, result in results.items():
|
||||
ax = axes[plot_idx]
|
||||
labels = result['labels']
|
||||
|
||||
scatter = ax.scatter(X_2d[:, 0], X_2d[:, 1], c=labels, cmap='viridis', alpha=0.6)
|
||||
|
||||
# Highlight noise points for DBSCAN
|
||||
if name == 'DBSCAN' and -1 in labels:
|
||||
noise_mask = labels == -1
|
||||
ax.scatter(X_2d[noise_mask, 0], X_2d[noise_mask, 1],
|
||||
c='red', marker='x', s=100, label='Noise', alpha=0.8)
|
||||
ax.legend()
|
||||
|
||||
title = f"{name} (K={result['n_clusters']})"
|
||||
if 'silhouette' in result:
|
||||
title += f"\nSilhouette: {result['silhouette']:.3f}"
|
||||
ax.set_title(title)
|
||||
ax.set_xlabel(f'PC1 ({pca.explained_variance_ratio_[0]:.2%})')
|
||||
ax.set_ylabel(f'PC2 ({pca.explained_variance_ratio_[1]:.2%})')
|
||||
plt.colorbar(scatter, ax=ax)
|
||||
|
||||
plot_idx += 1
|
||||
|
||||
# Hide unused subplots
|
||||
for idx in range(plot_idx, len(axes)):
|
||||
axes[idx].axis('off')
|
||||
|
||||
plt.tight_layout()
|
||||
plt.savefig('clustering_results.png', dpi=300, bbox_inches='tight')
|
||||
print("\nSaved: clustering_results.png")
|
||||
plt.close()
|
||||
|
||||
|
||||
def complete_clustering_analysis(X, true_labels=None, scale=True,
|
||||
find_k=True, k_range=range(2, 11), n_clusters=3):
|
||||
"""
|
||||
Complete clustering analysis workflow.
|
||||
|
||||
Parameters:
|
||||
-----------
|
||||
X : array-like
|
||||
Feature matrix
|
||||
true_labels : array-like or None
|
||||
True labels (for comparison only, not used in clustering)
|
||||
scale : bool
|
||||
Whether to scale features
|
||||
find_k : bool
|
||||
Whether to search for optimal K
|
||||
k_range : range
|
||||
Range of K values to test
|
||||
n_clusters : int
|
||||
Number of clusters to use in comparison
|
||||
|
||||
Returns:
|
||||
--------
|
||||
dict
|
||||
Dictionary with all analysis results
|
||||
"""
|
||||
print("="*60)
|
||||
print("Clustering Analysis")
|
||||
print("="*60)
|
||||
print(f"Data shape: {X.shape}")
|
||||
|
||||
# Preprocess data
|
||||
X_processed = preprocess_for_clustering(X, scale=scale)
|
||||
|
||||
# Find optimal K if requested
|
||||
optimization_results = None
|
||||
if find_k:
|
||||
print("\n" + "="*60)
|
||||
print("Finding Optimal Number of Clusters")
|
||||
print("="*60)
|
||||
optimization_results = find_optimal_k_kmeans(X_processed, k_range=k_range)
|
||||
|
||||
# Use recommended K
|
||||
if optimization_results:
|
||||
n_clusters = optimization_results['best_k']
|
||||
|
||||
# Compare clustering algorithms
|
||||
comparison_results = compare_clustering_algorithms(X_processed, n_clusters=n_clusters)
|
||||
|
||||
# Visualize results
|
||||
print("\n" + "="*60)
|
||||
print("Visualizing Results")
|
||||
print("="*60)
|
||||
visualize_clusters(X_processed, comparison_results, true_labels=true_labels)
|
||||
|
||||
return {
|
||||
'X_processed': X_processed,
|
||||
'optimization': optimization_results,
|
||||
'comparison': comparison_results
|
||||
}
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
from sklearn.datasets import load_iris, make_blobs
|
||||
|
||||
print("="*60)
|
||||
print("Example 1: Iris Dataset")
|
||||
print("="*60)
|
||||
|
||||
# Load Iris dataset
|
||||
iris = load_iris()
|
||||
X_iris = iris.data
|
||||
y_iris = iris.target
|
||||
|
||||
results_iris = complete_clustering_analysis(
|
||||
X_iris,
|
||||
true_labels=y_iris,
|
||||
scale=True,
|
||||
find_k=True,
|
||||
k_range=range(2, 8),
|
||||
n_clusters=3
|
||||
)
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("Example 2: Synthetic Dataset with Noise")
|
||||
print("="*60)
|
||||
|
||||
# Create synthetic dataset
|
||||
X_synth, y_synth = make_blobs(
|
||||
n_samples=500, n_features=2, centers=4,
|
||||
cluster_std=0.5, random_state=42
|
||||
)
|
||||
|
||||
# Add noise points
|
||||
noise = np.random.randn(50, 2) * 3
|
||||
X_synth = np.vstack([X_synth, noise])
|
||||
y_synth_with_noise = np.concatenate([y_synth, np.full(50, -1)])
|
||||
|
||||
results_synth = complete_clustering_analysis(
|
||||
X_synth,
|
||||
true_labels=y_synth_with_noise,
|
||||
scale=True,
|
||||
find_k=True,
|
||||
k_range=range(2, 8),
|
||||
n_clusters=4
|
||||
)
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("Analysis Complete!")
|
||||
print("="*60)
|
||||
Reference in New Issue
Block a user