feat: add log collection and viewing system
Log Collectors (backend/internal/collectors/logs/): - LogEntry model with level, source, message, fields - Manager for coordinating multiple collectors - JournalCollector: systemd journal via journalctl CLI - FileCollector: tail log files with format parsing (plain, json, nginx) - DockerCollector: docker container logs via docker CLI - All collectors are pure Go (no CGO dependencies) Database Storage: - Add logs table with indexes for efficient querying - StoreLogs: batch insert log entries - QueryLogs: filter by agent, source, level, time, full-text search - DeleteOldLogs: retention cleanup - Implementations for both SQLite and PostgreSQL Frontend Log Viewer: - Log types and level color definitions - Logs API client with streaming support - /logs route with search, level filters, source filters - Live streaming mode for real-time log tailing - Paginated loading with load more 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -60,6 +60,11 @@ type Database interface {
|
||||
|
||||
// Retention
|
||||
RunRetention(ctx context.Context) error
|
||||
|
||||
// Logs
|
||||
StoreLogs(ctx context.Context, entries []LogEntry) error
|
||||
QueryLogs(ctx context.Context, filter LogFilter) ([]LogEntry, int, error)
|
||||
DeleteOldLogs(ctx context.Context, before time.Time) (int, error)
|
||||
}
|
||||
|
||||
// MetricPoint represents a single metric data point.
|
||||
@@ -182,6 +187,42 @@ type AlertFilter struct {
|
||||
Offset int
|
||||
}
|
||||
|
||||
// LogLevel represents log severity.
|
||||
type LogLevel string
|
||||
|
||||
const (
|
||||
LogLevelDebug LogLevel = "debug"
|
||||
LogLevelInfo LogLevel = "info"
|
||||
LogLevelWarning LogLevel = "warning"
|
||||
LogLevelError LogLevel = "error"
|
||||
LogLevelFatal LogLevel = "fatal"
|
||||
)
|
||||
|
||||
// LogEntry represents a stored log entry.
|
||||
type LogEntry struct {
|
||||
ID int64 `json:"id"`
|
||||
AgentID string `json:"agentId"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Source string `json:"source"` // "journal", "file", "docker"
|
||||
SourceName string `json:"sourceName"` // Unit name, filename, container
|
||||
Level LogLevel `json:"level"`
|
||||
Message string `json:"message"`
|
||||
Fields map[string]string `json:"fields,omitempty"`
|
||||
}
|
||||
|
||||
// LogFilter specifies criteria for querying logs.
|
||||
type LogFilter struct {
|
||||
AgentID string // Filter by agent
|
||||
Source string // Filter by source type (journal, file, docker)
|
||||
SourceName string // Filter by source name
|
||||
Level []LogLevel // Filter by levels
|
||||
Query string // Full-text search query
|
||||
From time.Time
|
||||
To time.Time
|
||||
Limit int
|
||||
Offset int
|
||||
}
|
||||
|
||||
// RetentionConfig defines data retention policies.
|
||||
type RetentionConfig struct {
|
||||
// Raw metrics retention (default: 24 hours)
|
||||
@@ -195,6 +236,9 @@ type RetentionConfig struct {
|
||||
|
||||
// Hourly aggregation retention (default: 1 year)
|
||||
HourlyRetention time.Duration
|
||||
|
||||
// Log retention (default: 7 days)
|
||||
LogRetention time.Duration
|
||||
}
|
||||
|
||||
// DefaultRetentionConfig returns default retention settings.
|
||||
@@ -204,5 +248,6 @@ func DefaultRetentionConfig() RetentionConfig {
|
||||
OneMinuteRetention: 7 * 24 * time.Hour,
|
||||
FiveMinuteRetention: 30 * 24 * time.Hour,
|
||||
HourlyRetention: 365 * 24 * time.Hour,
|
||||
LogRetention: 7 * 24 * time.Hour,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,6 +59,7 @@ func (p *PostgresDB) Migrate() error {
|
||||
pgMigrationSessions,
|
||||
pgMigrationMetrics,
|
||||
pgMigrationAlerts,
|
||||
pgMigrationLogs,
|
||||
}
|
||||
|
||||
for i, m := range migrations {
|
||||
@@ -204,6 +205,23 @@ CREATE INDEX IF NOT EXISTS idx_alerts_triggered ON alerts(triggered_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_alerts_severity ON alerts(severity);
|
||||
`
|
||||
|
||||
const pgMigrationLogs = `
|
||||
CREATE TABLE IF NOT EXISTS logs (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
agent_id TEXT NOT NULL REFERENCES agents(id) ON DELETE CASCADE,
|
||||
timestamp TIMESTAMPTZ NOT NULL,
|
||||
source TEXT NOT NULL,
|
||||
source_name TEXT,
|
||||
level TEXT NOT NULL,
|
||||
message TEXT NOT NULL,
|
||||
fields JSONB
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_logs_agent_time ON logs(agent_id, timestamp DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_logs_level ON logs(level);
|
||||
CREATE INDEX IF NOT EXISTS idx_logs_source ON logs(source, source_name);
|
||||
CREATE INDEX IF NOT EXISTS idx_logs_timestamp ON logs(timestamp DESC);
|
||||
`
|
||||
|
||||
func (p *PostgresDB) insertDefaultRoles() error {
|
||||
defaultRoles := []struct {
|
||||
id, name, desc string
|
||||
@@ -1050,3 +1068,185 @@ func (p *PostgresDB) aggregate5MinToHourly(ctx context.Context, before time.Time
|
||||
`, before)
|
||||
return err
|
||||
}
|
||||
|
||||
// =====================
|
||||
// Log Storage Methods
|
||||
// =====================
|
||||
|
||||
// StoreLogs stores multiple log entries in a batch.
|
||||
func (p *PostgresDB) StoreLogs(ctx context.Context, entries []LogEntry) error {
|
||||
if len(entries) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
tx, err := p.db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("begin tx: %w", err)
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
stmt, err := tx.PrepareContext(ctx, `
|
||||
INSERT INTO logs (agent_id, timestamp, source, source_name, level, message, fields)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("prepare stmt: %w", err)
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
for _, e := range entries {
|
||||
var fieldsJSON []byte
|
||||
if len(e.Fields) > 0 {
|
||||
fieldsJSON, _ = json.Marshal(e.Fields)
|
||||
}
|
||||
|
||||
_, err := stmt.ExecContext(ctx,
|
||||
e.AgentID,
|
||||
e.Timestamp,
|
||||
e.Source,
|
||||
e.SourceName,
|
||||
string(e.Level),
|
||||
e.Message,
|
||||
fieldsJSON,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("insert log: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// QueryLogs queries logs with filtering and pagination.
|
||||
// Returns entries, total count, and error.
|
||||
func (p *PostgresDB) QueryLogs(ctx context.Context, filter LogFilter) ([]LogEntry, int, error) {
|
||||
var conditions []string
|
||||
var args []interface{}
|
||||
argNum := 1
|
||||
|
||||
if filter.AgentID != "" {
|
||||
conditions = append(conditions, fmt.Sprintf("agent_id = $%d", argNum))
|
||||
args = append(args, filter.AgentID)
|
||||
argNum++
|
||||
}
|
||||
|
||||
if filter.Source != "" {
|
||||
conditions = append(conditions, fmt.Sprintf("source = $%d", argNum))
|
||||
args = append(args, filter.Source)
|
||||
argNum++
|
||||
}
|
||||
|
||||
if filter.SourceName != "" {
|
||||
conditions = append(conditions, fmt.Sprintf("source_name = $%d", argNum))
|
||||
args = append(args, filter.SourceName)
|
||||
argNum++
|
||||
}
|
||||
|
||||
if len(filter.Level) > 0 {
|
||||
placeholders := make([]string, len(filter.Level))
|
||||
for i, l := range filter.Level {
|
||||
placeholders[i] = fmt.Sprintf("$%d", argNum)
|
||||
args = append(args, string(l))
|
||||
argNum++
|
||||
}
|
||||
conditions = append(conditions, fmt.Sprintf("level IN (%s)", strings.Join(placeholders, ",")))
|
||||
}
|
||||
|
||||
if filter.Query != "" {
|
||||
conditions = append(conditions, fmt.Sprintf("message ILIKE $%d", argNum))
|
||||
args = append(args, "%"+filter.Query+"%")
|
||||
argNum++
|
||||
}
|
||||
|
||||
if !filter.From.IsZero() {
|
||||
conditions = append(conditions, fmt.Sprintf("timestamp >= $%d", argNum))
|
||||
args = append(args, filter.From)
|
||||
argNum++
|
||||
}
|
||||
|
||||
if !filter.To.IsZero() {
|
||||
conditions = append(conditions, fmt.Sprintf("timestamp <= $%d", argNum))
|
||||
args = append(args, filter.To)
|
||||
argNum++
|
||||
}
|
||||
|
||||
whereClause := ""
|
||||
if len(conditions) > 0 {
|
||||
whereClause = "WHERE " + strings.Join(conditions, " AND ")
|
||||
}
|
||||
|
||||
// Get total count
|
||||
countQuery := fmt.Sprintf("SELECT COUNT(*) FROM logs %s", whereClause)
|
||||
var total int
|
||||
if err := p.db.QueryRowContext(ctx, countQuery, args...).Scan(&total); err != nil {
|
||||
return nil, 0, fmt.Errorf("count logs: %w", err)
|
||||
}
|
||||
|
||||
// Get entries with pagination
|
||||
limit := filter.Limit
|
||||
if limit <= 0 {
|
||||
limit = 100
|
||||
}
|
||||
|
||||
query := fmt.Sprintf(`
|
||||
SELECT id, agent_id, timestamp, source, source_name, level, message, fields
|
||||
FROM logs %s
|
||||
ORDER BY timestamp DESC
|
||||
LIMIT $%d OFFSET $%d
|
||||
`, whereClause, argNum, argNum+1)
|
||||
|
||||
args = append(args, limit, filter.Offset)
|
||||
|
||||
rows, err := p.db.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("query logs: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var entries []LogEntry
|
||||
for rows.Next() {
|
||||
var e LogEntry
|
||||
var level string
|
||||
var fieldsJSON sql.NullString
|
||||
|
||||
err := rows.Scan(
|
||||
&e.ID,
|
||||
&e.AgentID,
|
||||
&e.Timestamp,
|
||||
&e.Source,
|
||||
&e.SourceName,
|
||||
&level,
|
||||
&e.Message,
|
||||
&fieldsJSON,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("scan log: %w", err)
|
||||
}
|
||||
|
||||
e.Level = LogLevel(level)
|
||||
|
||||
if fieldsJSON.Valid && fieldsJSON.String != "" {
|
||||
json.Unmarshal([]byte(fieldsJSON.String), &e.Fields)
|
||||
}
|
||||
|
||||
entries = append(entries, e)
|
||||
}
|
||||
|
||||
return entries, total, nil
|
||||
}
|
||||
|
||||
// DeleteOldLogs deletes logs older than the specified time.
|
||||
// Returns the number of deleted entries.
|
||||
func (p *PostgresDB) DeleteOldLogs(ctx context.Context, before time.Time) (int, error) {
|
||||
result, err := p.db.ExecContext(ctx, "DELETE FROM logs WHERE timestamp < $1", before)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("delete old logs: %w", err)
|
||||
}
|
||||
|
||||
affected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return int(affected), nil
|
||||
}
|
||||
|
||||
@@ -62,6 +62,7 @@ func (s *SQLiteDB) Migrate() error {
|
||||
migrationSessions,
|
||||
migrationMetrics,
|
||||
migrationAlerts,
|
||||
migrationLogs,
|
||||
}
|
||||
|
||||
for i, m := range migrations {
|
||||
@@ -213,6 +214,24 @@ CREATE INDEX IF NOT EXISTS idx_alerts_triggered ON alerts(triggered_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_alerts_severity ON alerts(severity);
|
||||
`
|
||||
|
||||
const migrationLogs = `
|
||||
CREATE TABLE IF NOT EXISTS logs (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
agent_id TEXT NOT NULL,
|
||||
timestamp TIMESTAMP NOT NULL,
|
||||
source TEXT NOT NULL,
|
||||
source_name TEXT,
|
||||
level TEXT NOT NULL,
|
||||
message TEXT NOT NULL,
|
||||
fields TEXT, -- JSON object
|
||||
FOREIGN KEY (agent_id) REFERENCES agents(id) ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_logs_agent_time ON logs(agent_id, timestamp DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_logs_level ON logs(level);
|
||||
CREATE INDEX IF NOT EXISTS idx_logs_source ON logs(source, source_name);
|
||||
CREATE INDEX IF NOT EXISTS idx_logs_timestamp ON logs(timestamp DESC);
|
||||
`
|
||||
|
||||
func (s *SQLiteDB) insertDefaultRoles() error {
|
||||
defaultRoles := []struct {
|
||||
id, name, desc string
|
||||
@@ -1132,3 +1151,177 @@ func aggregatePoints(points []MetricPoint) MetricPoint {
|
||||
|
||||
return agg
|
||||
}
|
||||
|
||||
// =====================
|
||||
// Log Storage Methods
|
||||
// =====================
|
||||
|
||||
// StoreLogs stores multiple log entries in a batch.
|
||||
func (s *SQLiteDB) StoreLogs(ctx context.Context, entries []LogEntry) error {
|
||||
if len(entries) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
tx, err := s.db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("begin tx: %w", err)
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
stmt, err := tx.PrepareContext(ctx, `
|
||||
INSERT INTO logs (agent_id, timestamp, source, source_name, level, message, fields)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("prepare stmt: %w", err)
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
for _, e := range entries {
|
||||
var fieldsJSON []byte
|
||||
if len(e.Fields) > 0 {
|
||||
fieldsJSON, _ = json.Marshal(e.Fields)
|
||||
}
|
||||
|
||||
_, err := stmt.ExecContext(ctx,
|
||||
e.AgentID,
|
||||
e.Timestamp,
|
||||
e.Source,
|
||||
e.SourceName,
|
||||
string(e.Level),
|
||||
e.Message,
|
||||
fieldsJSON,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("insert log: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// QueryLogs queries logs with filtering and pagination.
|
||||
// Returns entries, total count, and error.
|
||||
func (s *SQLiteDB) QueryLogs(ctx context.Context, filter LogFilter) ([]LogEntry, int, error) {
|
||||
var conditions []string
|
||||
var args []interface{}
|
||||
|
||||
if filter.AgentID != "" {
|
||||
conditions = append(conditions, "agent_id = ?")
|
||||
args = append(args, filter.AgentID)
|
||||
}
|
||||
|
||||
if filter.Source != "" {
|
||||
conditions = append(conditions, "source = ?")
|
||||
args = append(args, filter.Source)
|
||||
}
|
||||
|
||||
if filter.SourceName != "" {
|
||||
conditions = append(conditions, "source_name = ?")
|
||||
args = append(args, filter.SourceName)
|
||||
}
|
||||
|
||||
if len(filter.Level) > 0 {
|
||||
placeholders := make([]string, len(filter.Level))
|
||||
for i, l := range filter.Level {
|
||||
placeholders[i] = "?"
|
||||
args = append(args, string(l))
|
||||
}
|
||||
conditions = append(conditions, fmt.Sprintf("level IN (%s)", strings.Join(placeholders, ",")))
|
||||
}
|
||||
|
||||
if filter.Query != "" {
|
||||
conditions = append(conditions, "message LIKE ?")
|
||||
args = append(args, "%"+filter.Query+"%")
|
||||
}
|
||||
|
||||
if !filter.From.IsZero() {
|
||||
conditions = append(conditions, "timestamp >= ?")
|
||||
args = append(args, filter.From)
|
||||
}
|
||||
|
||||
if !filter.To.IsZero() {
|
||||
conditions = append(conditions, "timestamp <= ?")
|
||||
args = append(args, filter.To)
|
||||
}
|
||||
|
||||
whereClause := ""
|
||||
if len(conditions) > 0 {
|
||||
whereClause = "WHERE " + strings.Join(conditions, " AND ")
|
||||
}
|
||||
|
||||
// Get total count
|
||||
countQuery := fmt.Sprintf("SELECT COUNT(*) FROM logs %s", whereClause)
|
||||
var total int
|
||||
if err := s.db.QueryRowContext(ctx, countQuery, args...).Scan(&total); err != nil {
|
||||
return nil, 0, fmt.Errorf("count logs: %w", err)
|
||||
}
|
||||
|
||||
// Get entries with pagination
|
||||
limit := filter.Limit
|
||||
if limit <= 0 {
|
||||
limit = 100
|
||||
}
|
||||
|
||||
query := fmt.Sprintf(`
|
||||
SELECT id, agent_id, timestamp, source, source_name, level, message, fields
|
||||
FROM logs %s
|
||||
ORDER BY timestamp DESC
|
||||
LIMIT ? OFFSET ?
|
||||
`, whereClause)
|
||||
|
||||
args = append(args, limit, filter.Offset)
|
||||
|
||||
rows, err := s.db.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("query logs: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var entries []LogEntry
|
||||
for rows.Next() {
|
||||
var e LogEntry
|
||||
var level string
|
||||
var fieldsJSON sql.NullString
|
||||
|
||||
err := rows.Scan(
|
||||
&e.ID,
|
||||
&e.AgentID,
|
||||
&e.Timestamp,
|
||||
&e.Source,
|
||||
&e.SourceName,
|
||||
&level,
|
||||
&e.Message,
|
||||
&fieldsJSON,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("scan log: %w", err)
|
||||
}
|
||||
|
||||
e.Level = LogLevel(level)
|
||||
|
||||
if fieldsJSON.Valid && fieldsJSON.String != "" {
|
||||
json.Unmarshal([]byte(fieldsJSON.String), &e.Fields)
|
||||
}
|
||||
|
||||
entries = append(entries, e)
|
||||
}
|
||||
|
||||
return entries, total, nil
|
||||
}
|
||||
|
||||
// DeleteOldLogs deletes logs older than the specified time.
|
||||
// Returns the number of deleted entries.
|
||||
func (s *SQLiteDB) DeleteOldLogs(ctx context.Context, before time.Time) (int, error) {
|
||||
result, err := s.db.ExecContext(ctx, "DELETE FROM logs WHERE timestamp < ?", before)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("delete old logs: %w", err)
|
||||
}
|
||||
|
||||
affected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return int(affected), nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user