- Rename project from system-monitor to Tyto (barn owl themed) - Update Go module name and all import paths - Update Docker container names (tyto-backend, tyto-frontend) - Update localStorage keys (tyto-settings, tyto-hosts) - Create barn owl SVG favicon and PWA icons (192, 512) - Update header with owl logo icon - Update manifest.json and app.html with Tyto branding Named after Tyto alba, the barn owl — nature's silent, watchful guardian 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
202 lines
4.9 KiB
Go
202 lines
4.9 KiB
Go
package collectors
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"net"
|
|
"net/http"
|
|
"time"
|
|
|
|
"tyto/internal/models"
|
|
)
|
|
|
|
type DockerCollector struct {
|
|
client *http.Client
|
|
available bool
|
|
socketPath string
|
|
}
|
|
|
|
func NewDockerCollector(socketPath string) *DockerCollector {
|
|
if socketPath == "" {
|
|
socketPath = "/var/run/docker.sock"
|
|
}
|
|
|
|
// Create HTTP client that connects to Docker socket with short timeout
|
|
client := &http.Client{
|
|
Transport: &http.Transport{
|
|
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
|
|
return net.Dial("unix", socketPath)
|
|
},
|
|
},
|
|
Timeout: 2 * time.Second, // Reduced from 5s
|
|
}
|
|
|
|
c := &DockerCollector{client: client, socketPath: socketPath}
|
|
c.checkAvailable()
|
|
return c
|
|
}
|
|
|
|
func (c *DockerCollector) checkAvailable() {
|
|
resp, err := c.client.Get("http://localhost/version")
|
|
if err != nil {
|
|
c.available = false
|
|
return
|
|
}
|
|
defer resp.Body.Close()
|
|
c.available = resp.StatusCode == http.StatusOK
|
|
}
|
|
|
|
func (c *DockerCollector) Collect() (models.DockerStats, error) {
|
|
stats := models.DockerStats{
|
|
Available: c.available,
|
|
Containers: []models.ContainerStats{},
|
|
}
|
|
|
|
if !c.available {
|
|
return stats, nil
|
|
}
|
|
|
|
// Get container list
|
|
resp, err := c.client.Get("http://localhost/containers/json")
|
|
if err != nil {
|
|
c.available = false
|
|
return stats, nil
|
|
}
|
|
defer resp.Body.Close()
|
|
|
|
var containers []struct {
|
|
ID string `json:"Id"`
|
|
Names []string `json:"Names"`
|
|
Image string `json:"Image"`
|
|
State string `json:"State"`
|
|
Status string `json:"Status"`
|
|
}
|
|
|
|
if err := json.NewDecoder(resp.Body).Decode(&containers); err != nil {
|
|
return stats, nil
|
|
}
|
|
|
|
// Build container list first (fast)
|
|
containerStats := make([]models.ContainerStats, len(containers))
|
|
for i, container := range containers {
|
|
name := container.ID[:12]
|
|
if len(container.Names) > 0 {
|
|
name = container.Names[0]
|
|
if len(name) > 0 && name[0] == '/' {
|
|
name = name[1:]
|
|
}
|
|
}
|
|
|
|
containerStats[i] = models.ContainerStats{
|
|
ID: container.ID[:12],
|
|
Name: name,
|
|
Image: container.Image,
|
|
State: container.State,
|
|
Status: container.Status,
|
|
}
|
|
|
|
if container.State == "running" {
|
|
stats.Running++
|
|
}
|
|
}
|
|
|
|
// Fetch detailed stats in parallel for running containers only
|
|
type statsResult struct {
|
|
index int
|
|
stats *detailedStats
|
|
}
|
|
resultChan := make(chan statsResult, len(containers))
|
|
|
|
for i, container := range containers {
|
|
if container.State != "running" {
|
|
continue
|
|
}
|
|
go func(idx int, containerID string) {
|
|
ds := c.fetchContainerStats(containerID)
|
|
resultChan <- statsResult{index: idx, stats: ds}
|
|
}(i, container.ID)
|
|
}
|
|
|
|
// Collect results with timeout
|
|
timeout := time.After(1500 * time.Millisecond)
|
|
runningCount := stats.Running
|
|
collected := 0
|
|
for collected < runningCount {
|
|
select {
|
|
case result := <-resultChan:
|
|
if result.stats != nil {
|
|
containerStats[result.index].CPUPercent = result.stats.cpuPercent
|
|
containerStats[result.index].MemoryUsage = result.stats.memUsage
|
|
containerStats[result.index].MemoryLimit = result.stats.memLimit
|
|
containerStats[result.index].MemoryPercent = result.stats.memPercent
|
|
}
|
|
collected++
|
|
case <-timeout:
|
|
// Stop waiting, use whatever we have
|
|
goto done
|
|
}
|
|
}
|
|
done:
|
|
|
|
stats.Containers = containerStats
|
|
stats.Total = len(containers)
|
|
return stats, nil
|
|
}
|
|
|
|
type detailedStats struct {
|
|
cpuPercent float64
|
|
memUsage uint64
|
|
memLimit uint64
|
|
memPercent float64
|
|
}
|
|
|
|
func (c *DockerCollector) fetchContainerStats(containerID string) *detailedStats {
|
|
statsResp, err := c.client.Get("http://localhost/containers/" + containerID + "/stats?stream=false")
|
|
if err != nil {
|
|
return nil
|
|
}
|
|
defer statsResp.Body.Close()
|
|
|
|
var containerStats struct {
|
|
CPUStats struct {
|
|
CPUUsage struct {
|
|
TotalUsage uint64 `json:"total_usage"`
|
|
} `json:"cpu_usage"`
|
|
SystemCPUUsage uint64 `json:"system_cpu_usage"`
|
|
OnlineCPUs int `json:"online_cpus"`
|
|
} `json:"cpu_stats"`
|
|
PreCPUStats struct {
|
|
CPUUsage struct {
|
|
TotalUsage uint64 `json:"total_usage"`
|
|
} `json:"cpu_usage"`
|
|
SystemCPUUsage uint64 `json:"system_cpu_usage"`
|
|
} `json:"precpu_stats"`
|
|
MemoryStats struct {
|
|
Usage uint64 `json:"usage"`
|
|
Limit uint64 `json:"limit"`
|
|
} `json:"memory_stats"`
|
|
}
|
|
|
|
if err := json.NewDecoder(statsResp.Body).Decode(&containerStats); err != nil {
|
|
return nil
|
|
}
|
|
|
|
ds := &detailedStats{
|
|
memUsage: containerStats.MemoryStats.Usage,
|
|
memLimit: containerStats.MemoryStats.Limit,
|
|
}
|
|
|
|
// Calculate CPU percentage
|
|
cpuDelta := float64(containerStats.CPUStats.CPUUsage.TotalUsage - containerStats.PreCPUStats.CPUUsage.TotalUsage)
|
|
systemDelta := float64(containerStats.CPUStats.SystemCPUUsage - containerStats.PreCPUStats.SystemCPUUsage)
|
|
if systemDelta > 0 && cpuDelta > 0 {
|
|
ds.cpuPercent = (cpuDelta / systemDelta) * float64(containerStats.CPUStats.OnlineCPUs) * 100
|
|
}
|
|
|
|
if ds.memLimit > 0 {
|
|
ds.memPercent = float64(ds.memUsage) / float64(ds.memLimit) * 100
|
|
}
|
|
|
|
return ds
|
|
}
|