Files
tyto/backend/internal/alerts/manager_test.go
vikingowl a2504c1327 feat: rename project to Tyto with owl branding
- Rename project from system-monitor to Tyto (barn owl themed)
- Update Go module name and all import paths
- Update Docker container names (tyto-backend, tyto-frontend)
- Update localStorage keys (tyto-settings, tyto-hosts)
- Create barn owl SVG favicon and PWA icons (192, 512)
- Update header with owl logo icon
- Update manifest.json and app.html with Tyto branding

Named after Tyto alba, the barn owl — nature's silent, watchful guardian

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2025-12-28 06:36:01 +01:00

345 lines
7.5 KiB
Go

package alerts
import (
"testing"
"time"
"tyto/internal/models"
)
func TestNewManager(t *testing.T) {
manager := NewManager()
if manager == nil {
t.Fatal("Expected non-nil manager")
}
config := manager.GetConfig()
if len(config.Thresholds) == 0 {
t.Error("Expected default thresholds")
}
alerts := manager.GetActiveAlerts()
if len(alerts) != 0 {
t.Errorf("Expected no active alerts, got %d", len(alerts))
}
}
func TestCheckMetrics_TriggersWarning(t *testing.T) {
manager := NewManager()
// Set a low threshold for testing
manager.SetConfig(models.AlertConfig{
Thresholds: []models.AlertThreshold{
{
Type: models.AlertTypeCPU,
WarningValue: 50,
CriticalValue: 90,
Enabled: true,
DurationSeconds: 0, // Immediate trigger
},
},
})
// Create metrics with high CPU
metrics := models.AllMetrics{
CPU: models.CPUStats{
TotalUsage: 60, // Above warning, below critical
},
}
newAlerts := manager.CheckMetrics(metrics)
if len(newAlerts) != 1 {
t.Errorf("Expected 1 new alert, got %d", len(newAlerts))
}
if len(newAlerts) > 0 {
if newAlerts[0].Severity != models.AlertSeverityWarning {
t.Errorf("Expected warning severity, got %s", newAlerts[0].Severity)
}
if newAlerts[0].Type != models.AlertTypeCPU {
t.Errorf("Expected CPU alert type, got %s", newAlerts[0].Type)
}
}
// Check active alerts
active := manager.GetActiveAlerts()
if len(active) != 1 {
t.Errorf("Expected 1 active alert, got %d", len(active))
}
}
func TestCheckMetrics_TriggersCritical(t *testing.T) {
manager := NewManager()
manager.SetConfig(models.AlertConfig{
Thresholds: []models.AlertThreshold{
{
Type: models.AlertTypeCPU,
WarningValue: 50,
CriticalValue: 90,
Enabled: true,
DurationSeconds: 0,
},
},
})
metrics := models.AllMetrics{
CPU: models.CPUStats{
TotalUsage: 95, // Above critical
},
}
newAlerts := manager.CheckMetrics(metrics)
if len(newAlerts) != 1 {
t.Errorf("Expected 1 new alert, got %d", len(newAlerts))
}
if len(newAlerts) > 0 && newAlerts[0].Severity != models.AlertSeverityCritical {
t.Errorf("Expected critical severity, got %s", newAlerts[0].Severity)
}
}
func TestCheckMetrics_ResolvesAlert(t *testing.T) {
manager := NewManager()
manager.SetConfig(models.AlertConfig{
Thresholds: []models.AlertThreshold{
{
Type: models.AlertTypeCPU,
WarningValue: 50,
CriticalValue: 90,
Enabled: true,
DurationSeconds: 0,
},
},
})
// Trigger an alert
highCPU := models.AllMetrics{
CPU: models.CPUStats{TotalUsage: 60},
}
manager.CheckMetrics(highCPU)
if len(manager.GetActiveAlerts()) != 1 {
t.Error("Expected active alert after high CPU")
}
// Resolve the alert with low CPU
lowCPU := models.AllMetrics{
CPU: models.CPUStats{TotalUsage: 30},
}
manager.CheckMetrics(lowCPU)
if len(manager.GetActiveAlerts()) != 0 {
t.Error("Expected no active alerts after low CPU")
}
// Should be in history
history := manager.GetAlertHistory()
if len(history) != 1 {
t.Errorf("Expected 1 alert in history, got %d", len(history))
}
}
func TestCheckMetrics_DisabledThreshold(t *testing.T) {
manager := NewManager()
manager.SetConfig(models.AlertConfig{
Thresholds: []models.AlertThreshold{
{
Type: models.AlertTypeCPU,
WarningValue: 50,
CriticalValue: 90,
Enabled: false, // Disabled
DurationSeconds: 0,
},
},
})
metrics := models.AllMetrics{
CPU: models.CPUStats{TotalUsage: 95},
}
newAlerts := manager.CheckMetrics(metrics)
if len(newAlerts) != 0 {
t.Error("Expected no alerts for disabled threshold")
}
}
func TestCheckMetrics_DurationRequirement(t *testing.T) {
manager := NewManager()
manager.SetConfig(models.AlertConfig{
Thresholds: []models.AlertThreshold{
{
Type: models.AlertTypeCPU,
WarningValue: 50,
CriticalValue: 90,
Enabled: true,
DurationSeconds: 60, // Must exceed for 60 seconds
},
},
})
metrics := models.AllMetrics{
CPU: models.CPUStats{TotalUsage: 60},
}
// First check - should not trigger immediately
newAlerts := manager.CheckMetrics(metrics)
if len(newAlerts) != 0 {
t.Error("Expected no alerts before duration requirement met")
}
}
func TestAcknowledgeAlert(t *testing.T) {
manager := NewManager()
manager.SetConfig(models.AlertConfig{
Thresholds: []models.AlertThreshold{
{
Type: models.AlertTypeCPU,
WarningValue: 50,
CriticalValue: 90,
Enabled: true,
DurationSeconds: 0,
},
},
})
metrics := models.AllMetrics{
CPU: models.CPUStats{TotalUsage: 60},
}
newAlerts := manager.CheckMetrics(metrics)
if len(newAlerts) != 1 {
t.Fatalf("Expected 1 new alert, got %d", len(newAlerts))
}
alertID := newAlerts[0].ID
// Acknowledge the alert using the ID from the returned alert
success := manager.AcknowledgeAlert(alertID)
if !success {
t.Error("Expected acknowledge to succeed")
}
// Check it's acknowledged
alerts := manager.GetActiveAlerts()
if len(alerts) != 1 {
t.Fatal("Expected 1 active alert after acknowledge")
}
if !alerts[0].Acknowledged {
t.Error("Expected alert to be acknowledged")
}
// Try acknowledging non-existent alert
success = manager.AcknowledgeAlert("non-existent")
if success {
t.Error("Expected acknowledge to fail for non-existent alert")
}
}
func TestCheckMetrics_MemoryAlert(t *testing.T) {
manager := NewManager()
manager.SetConfig(models.AlertConfig{
Thresholds: []models.AlertThreshold{
{
Type: models.AlertTypeMemory,
WarningValue: 80,
CriticalValue: 95,
Enabled: true,
DurationSeconds: 0,
},
},
})
// 90% memory usage
metrics := models.AllMetrics{
Memory: models.MemoryStats{
Total: 1000,
Used: 900,
},
}
newAlerts := manager.CheckMetrics(metrics)
if len(newAlerts) != 1 {
t.Errorf("Expected 1 memory alert, got %d", len(newAlerts))
}
if len(newAlerts) > 0 && newAlerts[0].Type != models.AlertTypeMemory {
t.Errorf("Expected memory alert type, got %s", newAlerts[0].Type)
}
}
func TestCheckMetrics_DiskAlert(t *testing.T) {
manager := NewManager()
manager.SetConfig(models.AlertConfig{
Thresholds: []models.AlertThreshold{
{
Type: models.AlertTypeDisk,
WarningValue: 80,
CriticalValue: 95,
Enabled: true,
DurationSeconds: 0,
},
},
})
metrics := models.AllMetrics{
Disk: models.DiskStats{
Mounts: []models.MountStats{
{MountPoint: "/", UsedPercent: 85},
{MountPoint: "/home", UsedPercent: 50},
},
},
}
newAlerts := manager.CheckMetrics(metrics)
if len(newAlerts) != 1 {
t.Errorf("Expected 1 disk alert, got %d", len(newAlerts))
}
}
func TestGetAlertHistory_MaxLimit(t *testing.T) {
manager := NewManager()
manager.maxHistory = 5 // Reduce for testing
manager.SetConfig(models.AlertConfig{
Thresholds: []models.AlertThreshold{
{
Type: models.AlertTypeCPU,
WarningValue: 50,
CriticalValue: 90,
Enabled: true,
DurationSeconds: 0,
},
},
})
// Trigger and resolve multiple alerts
for i := 0; i < 10; i++ {
high := models.AllMetrics{CPU: models.CPUStats{TotalUsage: 60}}
manager.CheckMetrics(high)
low := models.AllMetrics{CPU: models.CPUStats{TotalUsage: 30}}
manager.CheckMetrics(low)
time.Sleep(time.Millisecond) // Ensure unique timestamps
}
history := manager.GetAlertHistory()
if len(history) > 5 {
t.Errorf("Expected max 5 alerts in history, got %d", len(history))
}
}