Files
HeatGuard/internal/llm/ollama.go
vikingowl 5e6696aa42 feat: add AI-powered actions endpoint and timeline annotations
Add LLM actions endpoint that generates hour-specific heat
management recommendations. Replace static action engine with
AI-driven approach. Add cool mode logic (ventilate/ac/overloaded),
indoor temperature tracking, and timeline legend with annotations.
2026-02-10 03:54:09 +01:00

104 lines
2.7 KiB
Go

package llm
import (
"context"
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
)
// Ollama implements Provider using a local Ollama instance.
type Ollama struct {
model string
endpoint string
client *http.Client
}
// NewOllama creates a new Ollama provider.
func NewOllama(model, endpoint string, client *http.Client) *Ollama {
if client == nil {
client = &http.Client{Timeout: 120 * time.Second}
}
if model == "" {
model = "llama3.2"
}
if endpoint == "" {
endpoint = "http://localhost:11434"
}
return &Ollama{model: model, endpoint: endpoint, client: client}
}
func (o *Ollama) Name() string { return "ollama" }
type ollamaRequest struct {
Model string `json:"model"`
Messages []ollamaMessage `json:"messages"`
Stream bool `json:"stream"`
}
type ollamaMessage struct {
Role string `json:"role"`
Content string `json:"content"`
}
type ollamaResponse struct {
Message struct {
Content string `json:"content"`
} `json:"message"`
Error string `json:"error"`
}
func (o *Ollama) call(ctx context.Context, systemPrompt, userMessage string) (string, error) {
reqBody := ollamaRequest{
Model: o.model,
Messages: []ollamaMessage{
{Role: "system", Content: systemPrompt},
{Role: "user", Content: userMessage},
},
Stream: false,
}
body, err := json.Marshal(reqBody)
if err != nil {
return "", fmt.Errorf("marshal request: %w", err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, o.endpoint+"/api/chat", strings.NewReader(string(body)))
if err != nil {
return "", fmt.Errorf("build request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
resp, err := o.client.Do(req)
if err != nil {
return "", fmt.Errorf("ollama call: %w", err)
}
defer resp.Body.Close()
var result ollamaResponse
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return "", fmt.Errorf("decode response: %w", err)
}
if result.Error != "" {
return "", fmt.Errorf("ollama error: %s", result.Error)
}
return result.Message.Content, nil
}
func (o *Ollama) Summarize(ctx context.Context, input SummaryInput) (string, error) {
return o.call(ctx, SummarizeSystemPrompt(), BuildSummaryPrompt(input))
}
func (o *Ollama) RewriteAction(ctx context.Context, input ActionInput) (string, error) {
return o.call(ctx, RewriteActionSystemPrompt(), BuildRewriteActionPrompt(input))
}
func (o *Ollama) GenerateHeatPlan(ctx context.Context, input HeatPlanInput) (string, error) {
return o.call(ctx, HeatPlanSystemPrompt(), BuildHeatPlanPrompt(input))
}
func (o *Ollama) GenerateActions(ctx context.Context, input ActionsInput) (string, error) {
return o.call(ctx, GenerateActionsSystemPrompt(), BuildActionsPrompt(input))
}