Add LLM actions endpoint that generates hour-specific heat management recommendations. Replace static action engine with AI-driven approach. Add cool mode logic (ventilate/ac/overloaded), indoor temperature tracking, and timeline legend with annotations.
108 lines
2.8 KiB
Go
108 lines
2.8 KiB
Go
package llm
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"fmt"
|
|
"net/http"
|
|
"strings"
|
|
"time"
|
|
)
|
|
|
|
// OpenAI implements Provider using the OpenAI Chat Completions API.
|
|
type OpenAI struct {
|
|
apiKey string
|
|
model string
|
|
client *http.Client
|
|
baseURL string
|
|
}
|
|
|
|
// NewOpenAI creates a new OpenAI provider.
|
|
func NewOpenAI(apiKey, model string, client *http.Client) *OpenAI {
|
|
if client == nil {
|
|
client = &http.Client{Timeout: 60 * time.Second}
|
|
}
|
|
if model == "" {
|
|
model = "gpt-4o"
|
|
}
|
|
return &OpenAI{apiKey: apiKey, model: model, client: client, baseURL: "https://api.openai.com"}
|
|
}
|
|
|
|
func (o *OpenAI) Name() string { return "openai" }
|
|
|
|
type openAIRequest struct {
|
|
Model string `json:"model"`
|
|
Messages []openAIMessage `json:"messages"`
|
|
}
|
|
|
|
type openAIMessage struct {
|
|
Role string `json:"role"`
|
|
Content string `json:"content"`
|
|
}
|
|
|
|
type openAIResponse struct {
|
|
Choices []struct {
|
|
Message struct {
|
|
Content string `json:"content"`
|
|
} `json:"message"`
|
|
} `json:"choices"`
|
|
Error *struct {
|
|
Message string `json:"message"`
|
|
} `json:"error"`
|
|
}
|
|
|
|
func (o *OpenAI) call(ctx context.Context, systemPrompt, userMessage string) (string, error) {
|
|
reqBody := openAIRequest{
|
|
Model: o.model,
|
|
Messages: []openAIMessage{
|
|
{Role: "system", Content: systemPrompt},
|
|
{Role: "user", Content: userMessage},
|
|
},
|
|
}
|
|
body, err := json.Marshal(reqBody)
|
|
if err != nil {
|
|
return "", fmt.Errorf("marshal request: %w", err)
|
|
}
|
|
|
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, o.baseURL+"/v1/chat/completions", strings.NewReader(string(body)))
|
|
if err != nil {
|
|
return "", fmt.Errorf("build request: %w", err)
|
|
}
|
|
req.Header.Set("Content-Type", "application/json")
|
|
req.Header.Set("Authorization", "Bearer "+o.apiKey)
|
|
|
|
resp, err := o.client.Do(req)
|
|
if err != nil {
|
|
return "", fmt.Errorf("openai call: %w", err)
|
|
}
|
|
defer resp.Body.Close()
|
|
|
|
var result openAIResponse
|
|
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
|
return "", fmt.Errorf("decode response: %w", err)
|
|
}
|
|
if result.Error != nil {
|
|
return "", fmt.Errorf("openai error: %s", result.Error.Message)
|
|
}
|
|
if len(result.Choices) == 0 {
|
|
return "", fmt.Errorf("empty response from openai")
|
|
}
|
|
return result.Choices[0].Message.Content, nil
|
|
}
|
|
|
|
func (o *OpenAI) Summarize(ctx context.Context, input SummaryInput) (string, error) {
|
|
return o.call(ctx, SummarizeSystemPrompt(), BuildSummaryPrompt(input))
|
|
}
|
|
|
|
func (o *OpenAI) RewriteAction(ctx context.Context, input ActionInput) (string, error) {
|
|
return o.call(ctx, RewriteActionSystemPrompt(), BuildRewriteActionPrompt(input))
|
|
}
|
|
|
|
func (o *OpenAI) GenerateHeatPlan(ctx context.Context, input HeatPlanInput) (string, error) {
|
|
return o.call(ctx, HeatPlanSystemPrompt(), BuildHeatPlanPrompt(input))
|
|
}
|
|
|
|
func (o *OpenAI) GenerateActions(ctx context.Context, input ActionsInput) (string, error) {
|
|
return o.call(ctx, GenerateActionsSystemPrompt(), BuildActionsPrompt(input))
|
|
}
|