feat: add log collection and viewing system
Log Collectors (backend/internal/collectors/logs/): - LogEntry model with level, source, message, fields - Manager for coordinating multiple collectors - JournalCollector: systemd journal via journalctl CLI - FileCollector: tail log files with format parsing (plain, json, nginx) - DockerCollector: docker container logs via docker CLI - All collectors are pure Go (no CGO dependencies) Database Storage: - Add logs table with indexes for efficient querying - StoreLogs: batch insert log entries - QueryLogs: filter by agent, source, level, time, full-text search - DeleteOldLogs: retention cleanup - Implementations for both SQLite and PostgreSQL Frontend Log Viewer: - Log types and level color definitions - Logs API client with streaming support - /logs route with search, level filters, source filters - Live streaming mode for real-time log tailing - Paginated loading with load more 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -59,6 +59,7 @@ func (p *PostgresDB) Migrate() error {
|
||||
pgMigrationSessions,
|
||||
pgMigrationMetrics,
|
||||
pgMigrationAlerts,
|
||||
pgMigrationLogs,
|
||||
}
|
||||
|
||||
for i, m := range migrations {
|
||||
@@ -204,6 +205,23 @@ CREATE INDEX IF NOT EXISTS idx_alerts_triggered ON alerts(triggered_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_alerts_severity ON alerts(severity);
|
||||
`
|
||||
|
||||
const pgMigrationLogs = `
|
||||
CREATE TABLE IF NOT EXISTS logs (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
agent_id TEXT NOT NULL REFERENCES agents(id) ON DELETE CASCADE,
|
||||
timestamp TIMESTAMPTZ NOT NULL,
|
||||
source TEXT NOT NULL,
|
||||
source_name TEXT,
|
||||
level TEXT NOT NULL,
|
||||
message TEXT NOT NULL,
|
||||
fields JSONB
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_logs_agent_time ON logs(agent_id, timestamp DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_logs_level ON logs(level);
|
||||
CREATE INDEX IF NOT EXISTS idx_logs_source ON logs(source, source_name);
|
||||
CREATE INDEX IF NOT EXISTS idx_logs_timestamp ON logs(timestamp DESC);
|
||||
`
|
||||
|
||||
func (p *PostgresDB) insertDefaultRoles() error {
|
||||
defaultRoles := []struct {
|
||||
id, name, desc string
|
||||
@@ -1050,3 +1068,185 @@ func (p *PostgresDB) aggregate5MinToHourly(ctx context.Context, before time.Time
|
||||
`, before)
|
||||
return err
|
||||
}
|
||||
|
||||
// =====================
|
||||
// Log Storage Methods
|
||||
// =====================
|
||||
|
||||
// StoreLogs stores multiple log entries in a batch.
|
||||
func (p *PostgresDB) StoreLogs(ctx context.Context, entries []LogEntry) error {
|
||||
if len(entries) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
tx, err := p.db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("begin tx: %w", err)
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
stmt, err := tx.PrepareContext(ctx, `
|
||||
INSERT INTO logs (agent_id, timestamp, source, source_name, level, message, fields)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
`)
|
||||
if err != nil {
|
||||
return fmt.Errorf("prepare stmt: %w", err)
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
for _, e := range entries {
|
||||
var fieldsJSON []byte
|
||||
if len(e.Fields) > 0 {
|
||||
fieldsJSON, _ = json.Marshal(e.Fields)
|
||||
}
|
||||
|
||||
_, err := stmt.ExecContext(ctx,
|
||||
e.AgentID,
|
||||
e.Timestamp,
|
||||
e.Source,
|
||||
e.SourceName,
|
||||
string(e.Level),
|
||||
e.Message,
|
||||
fieldsJSON,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("insert log: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// QueryLogs queries logs with filtering and pagination.
|
||||
// Returns entries, total count, and error.
|
||||
func (p *PostgresDB) QueryLogs(ctx context.Context, filter LogFilter) ([]LogEntry, int, error) {
|
||||
var conditions []string
|
||||
var args []interface{}
|
||||
argNum := 1
|
||||
|
||||
if filter.AgentID != "" {
|
||||
conditions = append(conditions, fmt.Sprintf("agent_id = $%d", argNum))
|
||||
args = append(args, filter.AgentID)
|
||||
argNum++
|
||||
}
|
||||
|
||||
if filter.Source != "" {
|
||||
conditions = append(conditions, fmt.Sprintf("source = $%d", argNum))
|
||||
args = append(args, filter.Source)
|
||||
argNum++
|
||||
}
|
||||
|
||||
if filter.SourceName != "" {
|
||||
conditions = append(conditions, fmt.Sprintf("source_name = $%d", argNum))
|
||||
args = append(args, filter.SourceName)
|
||||
argNum++
|
||||
}
|
||||
|
||||
if len(filter.Level) > 0 {
|
||||
placeholders := make([]string, len(filter.Level))
|
||||
for i, l := range filter.Level {
|
||||
placeholders[i] = fmt.Sprintf("$%d", argNum)
|
||||
args = append(args, string(l))
|
||||
argNum++
|
||||
}
|
||||
conditions = append(conditions, fmt.Sprintf("level IN (%s)", strings.Join(placeholders, ",")))
|
||||
}
|
||||
|
||||
if filter.Query != "" {
|
||||
conditions = append(conditions, fmt.Sprintf("message ILIKE $%d", argNum))
|
||||
args = append(args, "%"+filter.Query+"%")
|
||||
argNum++
|
||||
}
|
||||
|
||||
if !filter.From.IsZero() {
|
||||
conditions = append(conditions, fmt.Sprintf("timestamp >= $%d", argNum))
|
||||
args = append(args, filter.From)
|
||||
argNum++
|
||||
}
|
||||
|
||||
if !filter.To.IsZero() {
|
||||
conditions = append(conditions, fmt.Sprintf("timestamp <= $%d", argNum))
|
||||
args = append(args, filter.To)
|
||||
argNum++
|
||||
}
|
||||
|
||||
whereClause := ""
|
||||
if len(conditions) > 0 {
|
||||
whereClause = "WHERE " + strings.Join(conditions, " AND ")
|
||||
}
|
||||
|
||||
// Get total count
|
||||
countQuery := fmt.Sprintf("SELECT COUNT(*) FROM logs %s", whereClause)
|
||||
var total int
|
||||
if err := p.db.QueryRowContext(ctx, countQuery, args...).Scan(&total); err != nil {
|
||||
return nil, 0, fmt.Errorf("count logs: %w", err)
|
||||
}
|
||||
|
||||
// Get entries with pagination
|
||||
limit := filter.Limit
|
||||
if limit <= 0 {
|
||||
limit = 100
|
||||
}
|
||||
|
||||
query := fmt.Sprintf(`
|
||||
SELECT id, agent_id, timestamp, source, source_name, level, message, fields
|
||||
FROM logs %s
|
||||
ORDER BY timestamp DESC
|
||||
LIMIT $%d OFFSET $%d
|
||||
`, whereClause, argNum, argNum+1)
|
||||
|
||||
args = append(args, limit, filter.Offset)
|
||||
|
||||
rows, err := p.db.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("query logs: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var entries []LogEntry
|
||||
for rows.Next() {
|
||||
var e LogEntry
|
||||
var level string
|
||||
var fieldsJSON sql.NullString
|
||||
|
||||
err := rows.Scan(
|
||||
&e.ID,
|
||||
&e.AgentID,
|
||||
&e.Timestamp,
|
||||
&e.Source,
|
||||
&e.SourceName,
|
||||
&level,
|
||||
&e.Message,
|
||||
&fieldsJSON,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("scan log: %w", err)
|
||||
}
|
||||
|
||||
e.Level = LogLevel(level)
|
||||
|
||||
if fieldsJSON.Valid && fieldsJSON.String != "" {
|
||||
json.Unmarshal([]byte(fieldsJSON.String), &e.Fields)
|
||||
}
|
||||
|
||||
entries = append(entries, e)
|
||||
}
|
||||
|
||||
return entries, total, nil
|
||||
}
|
||||
|
||||
// DeleteOldLogs deletes logs older than the specified time.
|
||||
// Returns the number of deleted entries.
|
||||
func (p *PostgresDB) DeleteOldLogs(ctx context.Context, before time.Time) (int, error) {
|
||||
result, err := p.db.ExecContext(ctx, "DELETE FROM logs WHERE timestamp < $1", before)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("delete old logs: %w", err)
|
||||
}
|
||||
|
||||
affected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return int(affected), nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user