Files
tyto/backend/internal/alerts/manager_test.go
vikingowl f4dbc55851 feat: add dashboard customization, alerts, PWA, and mobile support
Dashboard Editor & Layout:
- Full-screen visual editor for reorganizing cards
- Drag-and-drop cards between sections
- Toggle card visibility with persistence to localStorage
- Reset to default layout option

Alerts System:
- Threshold-based alerts for CPU, memory, temperature, disk, GPU
- Alert manager with duration requirements
- AlertsCard component with settings UI
- API endpoints for alerts CRUD

New Collectors:
- Docker container monitoring with parallel stats fetching
- Systemd service status via D-Bus
- Historical metrics storage (1 hour at 1s intervals)

PWA Support:
- Service worker with offline caching
- Web app manifest with SVG icons
- iOS PWA meta tags

Mobile Responsive:
- Collapsible hamburger menu on mobile
- Adaptive grid layouts for all screen sizes
- Touch-friendly hover states
- Safe area insets for notched devices

UI Enhancements:
- Light/dark theme toggle with persistence
- Keyboard shortcuts (T=theme, R=refresh, ?=help)
- Per-process expandable details in ProcessesCard
- Sparkline charts for historical data

Performance Fixes:
- Buffered SSE channels to prevent blocking
- Parallel Docker stats collection with timeout
- D-Bus timeout for systemd collector

Tests:
- Unit tests for CPU, memory, network collectors
- Alert manager tests

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2025-12-28 05:35:28 +01:00

345 lines
7.5 KiB
Go

package alerts
import (
"testing"
"time"
"system-monitor/internal/models"
)
func TestNewManager(t *testing.T) {
manager := NewManager()
if manager == nil {
t.Fatal("Expected non-nil manager")
}
config := manager.GetConfig()
if len(config.Thresholds) == 0 {
t.Error("Expected default thresholds")
}
alerts := manager.GetActiveAlerts()
if len(alerts) != 0 {
t.Errorf("Expected no active alerts, got %d", len(alerts))
}
}
func TestCheckMetrics_TriggersWarning(t *testing.T) {
manager := NewManager()
// Set a low threshold for testing
manager.SetConfig(models.AlertConfig{
Thresholds: []models.AlertThreshold{
{
Type: models.AlertTypeCPU,
WarningValue: 50,
CriticalValue: 90,
Enabled: true,
DurationSeconds: 0, // Immediate trigger
},
},
})
// Create metrics with high CPU
metrics := models.AllMetrics{
CPU: models.CPUStats{
TotalUsage: 60, // Above warning, below critical
},
}
newAlerts := manager.CheckMetrics(metrics)
if len(newAlerts) != 1 {
t.Errorf("Expected 1 new alert, got %d", len(newAlerts))
}
if len(newAlerts) > 0 {
if newAlerts[0].Severity != models.AlertSeverityWarning {
t.Errorf("Expected warning severity, got %s", newAlerts[0].Severity)
}
if newAlerts[0].Type != models.AlertTypeCPU {
t.Errorf("Expected CPU alert type, got %s", newAlerts[0].Type)
}
}
// Check active alerts
active := manager.GetActiveAlerts()
if len(active) != 1 {
t.Errorf("Expected 1 active alert, got %d", len(active))
}
}
func TestCheckMetrics_TriggersCritical(t *testing.T) {
manager := NewManager()
manager.SetConfig(models.AlertConfig{
Thresholds: []models.AlertThreshold{
{
Type: models.AlertTypeCPU,
WarningValue: 50,
CriticalValue: 90,
Enabled: true,
DurationSeconds: 0,
},
},
})
metrics := models.AllMetrics{
CPU: models.CPUStats{
TotalUsage: 95, // Above critical
},
}
newAlerts := manager.CheckMetrics(metrics)
if len(newAlerts) != 1 {
t.Errorf("Expected 1 new alert, got %d", len(newAlerts))
}
if len(newAlerts) > 0 && newAlerts[0].Severity != models.AlertSeverityCritical {
t.Errorf("Expected critical severity, got %s", newAlerts[0].Severity)
}
}
func TestCheckMetrics_ResolvesAlert(t *testing.T) {
manager := NewManager()
manager.SetConfig(models.AlertConfig{
Thresholds: []models.AlertThreshold{
{
Type: models.AlertTypeCPU,
WarningValue: 50,
CriticalValue: 90,
Enabled: true,
DurationSeconds: 0,
},
},
})
// Trigger an alert
highCPU := models.AllMetrics{
CPU: models.CPUStats{TotalUsage: 60},
}
manager.CheckMetrics(highCPU)
if len(manager.GetActiveAlerts()) != 1 {
t.Error("Expected active alert after high CPU")
}
// Resolve the alert with low CPU
lowCPU := models.AllMetrics{
CPU: models.CPUStats{TotalUsage: 30},
}
manager.CheckMetrics(lowCPU)
if len(manager.GetActiveAlerts()) != 0 {
t.Error("Expected no active alerts after low CPU")
}
// Should be in history
history := manager.GetAlertHistory()
if len(history) != 1 {
t.Errorf("Expected 1 alert in history, got %d", len(history))
}
}
func TestCheckMetrics_DisabledThreshold(t *testing.T) {
manager := NewManager()
manager.SetConfig(models.AlertConfig{
Thresholds: []models.AlertThreshold{
{
Type: models.AlertTypeCPU,
WarningValue: 50,
CriticalValue: 90,
Enabled: false, // Disabled
DurationSeconds: 0,
},
},
})
metrics := models.AllMetrics{
CPU: models.CPUStats{TotalUsage: 95},
}
newAlerts := manager.CheckMetrics(metrics)
if len(newAlerts) != 0 {
t.Error("Expected no alerts for disabled threshold")
}
}
func TestCheckMetrics_DurationRequirement(t *testing.T) {
manager := NewManager()
manager.SetConfig(models.AlertConfig{
Thresholds: []models.AlertThreshold{
{
Type: models.AlertTypeCPU,
WarningValue: 50,
CriticalValue: 90,
Enabled: true,
DurationSeconds: 60, // Must exceed for 60 seconds
},
},
})
metrics := models.AllMetrics{
CPU: models.CPUStats{TotalUsage: 60},
}
// First check - should not trigger immediately
newAlerts := manager.CheckMetrics(metrics)
if len(newAlerts) != 0 {
t.Error("Expected no alerts before duration requirement met")
}
}
func TestAcknowledgeAlert(t *testing.T) {
manager := NewManager()
manager.SetConfig(models.AlertConfig{
Thresholds: []models.AlertThreshold{
{
Type: models.AlertTypeCPU,
WarningValue: 50,
CriticalValue: 90,
Enabled: true,
DurationSeconds: 0,
},
},
})
metrics := models.AllMetrics{
CPU: models.CPUStats{TotalUsage: 60},
}
newAlerts := manager.CheckMetrics(metrics)
if len(newAlerts) != 1 {
t.Fatalf("Expected 1 new alert, got %d", len(newAlerts))
}
alertID := newAlerts[0].ID
// Acknowledge the alert using the ID from the returned alert
success := manager.AcknowledgeAlert(alertID)
if !success {
t.Error("Expected acknowledge to succeed")
}
// Check it's acknowledged
alerts := manager.GetActiveAlerts()
if len(alerts) != 1 {
t.Fatal("Expected 1 active alert after acknowledge")
}
if !alerts[0].Acknowledged {
t.Error("Expected alert to be acknowledged")
}
// Try acknowledging non-existent alert
success = manager.AcknowledgeAlert("non-existent")
if success {
t.Error("Expected acknowledge to fail for non-existent alert")
}
}
func TestCheckMetrics_MemoryAlert(t *testing.T) {
manager := NewManager()
manager.SetConfig(models.AlertConfig{
Thresholds: []models.AlertThreshold{
{
Type: models.AlertTypeMemory,
WarningValue: 80,
CriticalValue: 95,
Enabled: true,
DurationSeconds: 0,
},
},
})
// 90% memory usage
metrics := models.AllMetrics{
Memory: models.MemoryStats{
Total: 1000,
Used: 900,
},
}
newAlerts := manager.CheckMetrics(metrics)
if len(newAlerts) != 1 {
t.Errorf("Expected 1 memory alert, got %d", len(newAlerts))
}
if len(newAlerts) > 0 && newAlerts[0].Type != models.AlertTypeMemory {
t.Errorf("Expected memory alert type, got %s", newAlerts[0].Type)
}
}
func TestCheckMetrics_DiskAlert(t *testing.T) {
manager := NewManager()
manager.SetConfig(models.AlertConfig{
Thresholds: []models.AlertThreshold{
{
Type: models.AlertTypeDisk,
WarningValue: 80,
CriticalValue: 95,
Enabled: true,
DurationSeconds: 0,
},
},
})
metrics := models.AllMetrics{
Disk: models.DiskStats{
Mounts: []models.MountStats{
{MountPoint: "/", UsedPercent: 85},
{MountPoint: "/home", UsedPercent: 50},
},
},
}
newAlerts := manager.CheckMetrics(metrics)
if len(newAlerts) != 1 {
t.Errorf("Expected 1 disk alert, got %d", len(newAlerts))
}
}
func TestGetAlertHistory_MaxLimit(t *testing.T) {
manager := NewManager()
manager.maxHistory = 5 // Reduce for testing
manager.SetConfig(models.AlertConfig{
Thresholds: []models.AlertThreshold{
{
Type: models.AlertTypeCPU,
WarningValue: 50,
CriticalValue: 90,
Enabled: true,
DurationSeconds: 0,
},
},
})
// Trigger and resolve multiple alerts
for i := 0; i < 10; i++ {
high := models.AllMetrics{CPU: models.CPUStats{TotalUsage: 60}}
manager.CheckMetrics(high)
low := models.AllMetrics{CPU: models.CPUStats{TotalUsage: 30}}
manager.CheckMetrics(low)
time.Sleep(time.Millisecond) // Ensure unique timestamps
}
history := manager.GetAlertHistory()
if len(history) > 5 {
t.Errorf("Expected max 5 alerts in history, got %d", len(history))
}
}