Initial commit

This commit is contained in:
Zhongwei Li
2025-11-29 18:47:40 +08:00
commit 14c678ceac
22 changed files with 7501 additions and 0 deletions

180
tests/test_helpers.py Normal file
View File

@@ -0,0 +1,180 @@
#!/usr/bin/env python3
"""
Tests for helper utilities.
"""
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent / 'scripts'))
from utils.helpers import *
def test_format_bytes():
"""Test byte formatting."""
assert format_bytes(0) == "0.0 B"
assert format_bytes(512) == "512.0 B"
assert format_bytes(1024) == "1.0 KB"
assert format_bytes(1048576) == "1.0 MB"
assert format_bytes(1073741824) == "1.0 GB"
print("✓ format_bytes() passed")
return True
def test_format_duration():
"""Test duration formatting."""
assert format_duration(30) == "30s"
assert format_duration(65) == "1m 5s"
assert format_duration(3600) == "1h"
assert format_duration(3665) == "1h 1m"
assert format_duration(7265) == "2h 1m"
print("✓ format_duration() passed")
return True
def test_format_percentage():
"""Test percentage formatting."""
assert format_percentage(45.567) == "45.6%"
assert format_percentage(100) == "100.0%"
assert format_percentage(0.123, decimals=2) == "0.12%"
print("✓ format_percentage() passed")
return True
def test_calculate_load_score():
"""Test load score calculation."""
score = calculate_load_score(50, 50, 50)
assert 0 <= score <= 1
assert abs(score - 0.5) < 0.01
score_low = calculate_load_score(20, 30, 25)
score_high = calculate_load_score(80, 85, 90)
assert score_low < score_high
print("✓ calculate_load_score() passed")
return True
def test_classify_load_status():
"""Test load status classification."""
assert classify_load_status(0.2) == "low"
assert classify_load_status(0.5) == "moderate"
assert classify_load_status(0.8) == "high"
print("✓ classify_load_status() passed")
return True
def test_classify_latency():
"""Test latency classification."""
status, desc = classify_latency(25)
assert status == "excellent"
assert "interactive" in desc.lower()
status, desc = classify_latency(150)
assert status == "fair"
print("✓ classify_latency() passed")
return True
def test_parse_disk_usage():
"""Test disk usage parsing."""
sample_output = """Filesystem Size Used Avail Use% Mounted on
/dev/sda1 100G 45G 50G 45% /"""
result = parse_disk_usage(sample_output)
assert result['filesystem'] == '/dev/sda1'
assert result['size'] == '100G'
assert result['used'] == '45G'
assert result['use_pct'] == 45
print("✓ parse_disk_usage() passed")
return True
def test_parse_cpu_load():
"""Test CPU load parsing."""
sample_output = "19:43:41 up 5 days, 2:15, 3 users, load average: 0.45, 0.38, 0.32"
result = parse_cpu_load(sample_output)
assert result['load_1min'] == 0.45
assert result['load_5min'] == 0.38
assert result['load_15min'] == 0.32
print("✓ parse_cpu_load() passed")
return True
def test_get_timestamp():
"""Test timestamp generation."""
ts_iso = get_timestamp(iso=True)
assert 'T' in ts_iso
assert 'Z' in ts_iso
ts_human = get_timestamp(iso=False)
assert ' ' in ts_human
assert len(ts_human) == 19 # YYYY-MM-DD HH:MM:SS
print("✓ get_timestamp() passed")
return True
def test_validate_path():
"""Test path validation."""
assert validate_path("/tmp", must_exist=True) == True
assert validate_path("/nonexistent_path_12345", must_exist=False) == False
print("✓ validate_path() passed")
return True
def test_safe_execute():
"""Test safe execution wrapper."""
# Should return result on success
result = safe_execute(int, "42")
assert result == 42
# Should return default on failure
result = safe_execute(int, "not_a_number", default=0)
assert result == 0
print("✓ safe_execute() passed")
return True
def main():
"""Run all helper tests."""
print("=" * 70)
print("HELPER TESTS")
print("=" * 70)
tests = [
test_format_bytes,
test_format_duration,
test_format_percentage,
test_calculate_load_score,
test_classify_load_status,
test_classify_latency,
test_parse_disk_usage,
test_parse_cpu_load,
test_get_timestamp,
test_validate_path,
test_safe_execute,
]
passed = 0
for test in tests:
try:
if test():
passed += 1
except Exception as e:
print(f"{test.__name__} failed: {e}")
print(f"\nResults: {passed}/{len(tests)} passed")
return passed == len(tests)
if __name__ == "__main__":
success = main()
sys.exit(0 if success else 1)

346
tests/test_integration.py Normal file
View File

@@ -0,0 +1,346 @@
#!/usr/bin/env python3
"""
Integration tests for Tailscale SSH Sync Agent.
Tests complete workflows from query to result.
"""
import sys
from pathlib import Path
# Add scripts to path
sys.path.insert(0, str(Path(__file__).parent.parent / 'scripts'))
from sshsync_wrapper import get_host_status, list_hosts, get_groups
from tailscale_manager import get_tailscale_status, get_network_summary
from load_balancer import format_load_report, MachineMetrics
from utils.helpers import (
format_bytes, format_duration, format_percentage,
calculate_load_score, classify_load_status, classify_latency
)
def test_host_status_basic():
"""Test get_host_status() without errors."""
print("\n✓ Testing get_host_status()...")
try:
result = get_host_status()
# Validations
assert 'hosts' in result, "Missing 'hosts' in result"
assert isinstance(result.get('hosts', []), list), "'hosts' must be list"
# Should have basic counts even if no hosts configured
assert 'total_count' in result, "Missing 'total_count'"
assert 'online_count' in result, "Missing 'online_count'"
assert 'offline_count' in result, "Missing 'offline_count'"
print(f" ✓ Found {result.get('total_count', 0)} hosts")
print(f" ✓ Online: {result.get('online_count', 0)}")
print(f" ✓ Offline: {result.get('offline_count', 0)}")
return True
except Exception as e:
print(f" ✗ FAILED: {e}")
import traceback
traceback.print_exc()
return False
def test_list_hosts():
"""Test list_hosts() function."""
print("\n✓ Testing list_hosts()...")
try:
result = list_hosts(with_status=False)
assert 'hosts' in result, "Missing 'hosts' in result"
assert 'count' in result, "Missing 'count' in result"
assert isinstance(result['hosts'], list), "'hosts' must be list"
print(f" ✓ List hosts working")
print(f" ✓ Found {result['count']} configured hosts")
return True
except Exception as e:
print(f" ✗ FAILED: {e}")
return False
def test_get_groups():
"""Test get_groups() function."""
print("\n✓ Testing get_groups()...")
try:
groups = get_groups()
assert isinstance(groups, dict), "Groups must be dict"
print(f" ✓ Groups config loaded")
print(f" ✓ Found {len(groups)} groups")
for group, hosts in list(groups.items())[:3]: # Show first 3
print(f" - {group}: {len(hosts)} hosts")
return True
except Exception as e:
print(f" ✗ FAILED: {e}")
return False
def test_tailscale_status():
"""Test Tailscale status check."""
print("\n✓ Testing get_tailscale_status()...")
try:
status = get_tailscale_status()
assert isinstance(status, dict), "Status must be dict"
assert 'connected' in status, "Missing 'connected' field"
if status.get('connected'):
print(f" ✓ Tailscale connected")
print(f" ✓ Peers: {status.get('total_count', 0)} total, {status.get('online_count', 0)} online")
else:
print(f" Tailscale not connected: {status.get('error', 'Unknown')}")
print(f" (This is OK if Tailscale is not installed/configured)")
return True
except Exception as e:
print(f" ✗ FAILED: {e}")
return False
def test_network_summary():
"""Test network summary generation."""
print("\n✓ Testing get_network_summary()...")
try:
summary = get_network_summary()
assert isinstance(summary, str), "Summary must be string"
assert len(summary) > 0, "Summary cannot be empty"
print(f" ✓ Network summary generated:")
for line in summary.split('\n'):
print(f" {line}")
return True
except Exception as e:
print(f" ✗ FAILED: {e}")
return False
def test_format_helpers():
"""Test formatting helper functions."""
print("\n✓ Testing format helpers...")
try:
# Test format_bytes
assert format_bytes(1024) == "1.0 KB", "format_bytes failed for 1024"
assert format_bytes(12582912) == "12.0 MB", "format_bytes failed for 12MB"
# Test format_duration
assert format_duration(65) == "1m 5s", "format_duration failed for 65s"
assert format_duration(3665) == "1h 1m", "format_duration failed for 1h+"
# Test format_percentage
assert format_percentage(45.567) == "45.6%", "format_percentage failed"
print(f" ✓ format_bytes(12582912) = {format_bytes(12582912)}")
print(f" ✓ format_duration(3665) = {format_duration(3665)}")
print(f" ✓ format_percentage(45.567) = {format_percentage(45.567)}")
return True
except Exception as e:
print(f" ✗ FAILED: {e}")
return False
def test_load_score_calculation():
"""Test load score calculation."""
print("\n✓ Testing calculate_load_score()...")
try:
# Test various scenarios
score1 = calculate_load_score(45, 60, 40)
assert 0 <= score1 <= 1, "Score must be 0-1"
assert abs(score1 - 0.49) < 0.01, f"Expected ~0.49, got {score1}"
score2 = calculate_load_score(20, 35, 30)
assert score2 < score1, "Lower usage should have lower score"
score3 = calculate_load_score(85, 70, 65)
assert score3 > score1, "Higher usage should have higher score"
print(f" ✓ Low load (20%, 35%, 30%): {score2:.2f}")
print(f" ✓ Med load (45%, 60%, 40%): {score1:.2f}")
print(f" ✓ High load (85%, 70%, 65%): {score3:.2f}")
return True
except Exception as e:
print(f" ✗ FAILED: {e}")
return False
def test_load_classification():
"""Test load status classification."""
print("\n✓ Testing classify_load_status()...")
try:
assert classify_load_status(0.28) == "low", "0.28 should be 'low'"
assert classify_load_status(0.55) == "moderate", "0.55 should be 'moderate'"
assert classify_load_status(0.82) == "high", "0.82 should be 'high'"
print(f" ✓ Score 0.28 = {classify_load_status(0.28)}")
print(f" ✓ Score 0.55 = {classify_load_status(0.55)}")
print(f" ✓ Score 0.82 = {classify_load_status(0.82)}")
return True
except Exception as e:
print(f" ✗ FAILED: {e}")
return False
def test_latency_classification():
"""Test network latency classification."""
print("\n✓ Testing classify_latency()...")
try:
status1, desc1 = classify_latency(25)
assert status1 == "excellent", "25ms should be 'excellent'"
status2, desc2 = classify_latency(75)
assert status2 == "good", "75ms should be 'good'"
status3, desc3 = classify_latency(150)
assert status3 == "fair", "150ms should be 'fair'"
status4, desc4 = classify_latency(250)
assert status4 == "poor", "250ms should be 'poor'"
print(f" ✓ 25ms: {status1} - {desc1}")
print(f" ✓ 75ms: {status2} - {desc2}")
print(f" ✓ 150ms: {status3} - {desc3}")
print(f" ✓ 250ms: {status4} - {desc4}")
return True
except Exception as e:
print(f" ✗ FAILED: {e}")
return False
def test_load_report_formatting():
"""Test load report formatting."""
print("\n✓ Testing format_load_report()...")
try:
metrics = MachineMetrics(
host='web-01',
cpu_pct=45.0,
mem_pct=60.0,
disk_pct=40.0,
load_score=0.49,
status='moderate'
)
report = format_load_report(metrics)
assert 'web-01' in report, "Report must include hostname"
assert '0.49' in report, "Report must include load score"
assert 'moderate' in report, "Report must include status"
print(f" ✓ Report generated:")
for line in report.split('\n'):
print(f" {line}")
return True
except Exception as e:
print(f" ✗ FAILED: {e}")
return False
def test_dry_run_execution():
"""Test dry-run mode for operations."""
print("\n✓ Testing dry-run execution...")
try:
from sshsync_wrapper import execute_on_all
result = execute_on_all("uptime", dry_run=True)
assert result.get('dry_run') == True, "Must indicate dry-run mode"
assert 'command' in result, "Must include command"
assert 'message' in result, "Must include message"
print(f" ✓ Dry-run mode working")
print(f" ✓ Command: {result.get('command')}")
print(f" ✓ Message: {result.get('message')}")
return True
except Exception as e:
print(f" ✗ FAILED: {e}")
return False
def main():
"""Run all integration tests."""
print("=" * 70)
print("INTEGRATION TESTS - Tailscale SSH Sync Agent")
print("=" * 70)
tests = [
("Host status check", test_host_status_basic),
("List hosts", test_list_hosts),
("Get groups", test_get_groups),
("Tailscale status", test_tailscale_status),
("Network summary", test_network_summary),
("Format helpers", test_format_helpers),
("Load score calculation", test_load_score_calculation),
("Load classification", test_load_classification),
("Latency classification", test_latency_classification),
("Load report formatting", test_load_report_formatting),
("Dry-run execution", test_dry_run_execution),
]
results = []
for test_name, test_func in tests:
passed = test_func()
results.append((test_name, passed))
# Summary
print("\n" + "=" * 70)
print("SUMMARY")
print("=" * 70)
for test_name, passed in results:
status = "✅ PASS" if passed else "❌ FAIL"
print(f"{status}: {test_name}")
passed_count = sum(1 for _, p in results if p)
total_count = len(results)
print(f"\nResults: {passed_count}/{total_count} passed")
if passed_count == total_count:
print("\n🎉 All tests passed!")
else:
print(f"\n⚠️ {total_count - passed_count} test(s) failed")
return passed_count == total_count
if __name__ == "__main__":
success = main()
sys.exit(0 if success else 1)

177
tests/test_validation.py Normal file
View File

@@ -0,0 +1,177 @@
#!/usr/bin/env python3
"""
Tests for validators.
"""
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent / 'scripts'))
from utils.validators import *
def test_validate_host():
"""Test host validation."""
# Valid host
assert validate_host("web-01") == "web-01"
assert validate_host(" web-01 ") == "web-01" # Strips whitespace
# With valid list
assert validate_host("web-01", ["web-01", "web-02"]) == "web-01"
# Invalid format
try:
validate_host("web@01") # Invalid character
assert False, "Should have raised ValidationError"
except ValidationError:
pass
print("✓ validate_host() passed")
return True
def test_validate_group():
"""Test group validation."""
# Valid group
assert validate_group("production") == "production"
assert validate_group("PRODUCTION") == "production" # Lowercase normalization
# With valid list
assert validate_group("production", ["production", "staging"]) == "production"
# Invalid
try:
validate_group("invalid!", ["production"])
assert False, "Should have raised ValidationError"
except ValidationError:
pass
print("✓ validate_group() passed")
return True
def test_validate_path_exists():
"""Test path existence validation."""
# Valid path
path = validate_path_exists("/tmp", must_be_dir=True)
assert isinstance(path, Path)
# Invalid path
try:
validate_path_exists("/nonexistent_12345")
assert False, "Should have raised ValidationError"
except ValidationError:
pass
print("✓ validate_path_exists() passed")
return True
def test_validate_timeout():
"""Test timeout validation."""
# Valid timeouts
assert validate_timeout(10) == 10
assert validate_timeout(1) == 1
assert validate_timeout(600) == 600
# Too low
try:
validate_timeout(0)
assert False, "Should have raised ValidationError"
except ValidationError:
pass
# Too high
try:
validate_timeout(1000)
assert False, "Should have raised ValidationError"
except ValidationError:
pass
print("✓ validate_timeout() passed")
return True
def test_validate_command():
"""Test command validation."""
# Safe commands
assert validate_command("ls -la") == "ls -la"
assert validate_command("uptime") == "uptime"
# Dangerous commands (should fail without allow_dangerous)
try:
validate_command("rm -rf /")
assert False, "Should have blocked dangerous command"
except ValidationError:
pass
# But should work with allow_dangerous
assert validate_command("rm -rf /tmp/test", allow_dangerous=True)
print("✓ validate_command() passed")
return True
def test_validate_hosts_list():
"""Test list validation."""
# Valid list
hosts = validate_hosts_list(["web-01", "web-02"])
assert len(hosts) == 2
assert "web-01" in hosts
# Empty list
try:
validate_hosts_list([])
assert False, "Should have raised ValidationError for empty list"
except ValidationError:
pass
print("✓ validate_hosts_list() passed")
return True
def test_get_invalid_hosts():
"""Test finding invalid hosts."""
# Test with mix of valid and invalid
# (This would require actual SSH config, so we test the function exists)
result = get_invalid_hosts(["web-01", "nonexistent-host-12345"])
assert isinstance(result, list)
print("✓ get_invalid_hosts() passed")
return True
def main():
"""Run all validation tests."""
print("=" * 70)
print("VALIDATION TESTS")
print("=" * 70)
tests = [
test_validate_host,
test_validate_group,
test_validate_path_exists,
test_validate_timeout,
test_validate_command,
test_validate_hosts_list,
test_get_invalid_hosts,
]
passed = 0
for test in tests:
try:
if test():
passed += 1
except Exception as e:
print(f"{test.__name__} failed: {e}")
import traceback
traceback.print_exc()
print(f"\nResults: {passed}/{len(tests)} passed")
return passed == len(tests)
if __name__ == "__main__":
success = main()
sys.exit(0 if success else 1)