Files
HeatGuard/internal/llm/openai.go
vikingowl 1c9db02334 feat: add web UI with full CRUD setup page
Add server-side rendered setup UI accessible via `heatwave web`.
The dashboard is now re-rendered per request and includes a nav bar
linking to the new /setup page. Setup provides full CRUD for profiles,
rooms, devices, occupants, AC units (with room assignment), scenario
toggles, and forecast fetching — all via POST/redirect/GET forms.

- Add ShowNav field to DashboardData for conditional nav bar
- Extract fetchForecastForProfile() for reuse by web handler
- Create setup.html.tmpl with Tailwind-styled entity sections
- Create web_handlers.go with 15 route handlers and flash cookies
- Switch web.go from pre-rendered to per-request dashboard rendering
- Graceful dashboard fallback when no forecast data exists
2026-02-09 10:39:00 +01:00

104 lines
2.7 KiB
Go

package llm
import (
"context"
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
)
// OpenAI implements Provider using the OpenAI Chat Completions API.
type OpenAI struct {
apiKey string
model string
client *http.Client
baseURL string
}
// NewOpenAI creates a new OpenAI provider.
func NewOpenAI(apiKey, model string, client *http.Client) *OpenAI {
if client == nil {
client = &http.Client{Timeout: 60 * time.Second}
}
if model == "" {
model = "gpt-4o"
}
return &OpenAI{apiKey: apiKey, model: model, client: client, baseURL: "https://api.openai.com"}
}
func (o *OpenAI) Name() string { return "openai" }
type openAIRequest struct {
Model string `json:"model"`
Messages []openAIMessage `json:"messages"`
}
type openAIMessage struct {
Role string `json:"role"`
Content string `json:"content"`
}
type openAIResponse struct {
Choices []struct {
Message struct {
Content string `json:"content"`
} `json:"message"`
} `json:"choices"`
Error *struct {
Message string `json:"message"`
} `json:"error"`
}
func (o *OpenAI) call(ctx context.Context, systemPrompt, userMessage string) (string, error) {
reqBody := openAIRequest{
Model: o.model,
Messages: []openAIMessage{
{Role: "system", Content: systemPrompt},
{Role: "user", Content: userMessage},
},
}
body, err := json.Marshal(reqBody)
if err != nil {
return "", fmt.Errorf("marshal request: %w", err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, o.baseURL+"/v1/chat/completions", strings.NewReader(string(body)))
if err != nil {
return "", fmt.Errorf("build request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+o.apiKey)
resp, err := o.client.Do(req)
if err != nil {
return "", fmt.Errorf("openai call: %w", err)
}
defer resp.Body.Close()
var result openAIResponse
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return "", fmt.Errorf("decode response: %w", err)
}
if result.Error != nil {
return "", fmt.Errorf("openai error: %s", result.Error.Message)
}
if len(result.Choices) == 0 {
return "", fmt.Errorf("empty response from openai")
}
return result.Choices[0].Message.Content, nil
}
func (o *OpenAI) Summarize(ctx context.Context, input SummaryInput) (string, error) {
return o.call(ctx, SummarizeSystemPrompt(), BuildSummaryPrompt(input))
}
func (o *OpenAI) RewriteAction(ctx context.Context, input ActionInput) (string, error) {
return o.call(ctx, RewriteActionSystemPrompt(), BuildRewriteActionPrompt(input))
}
func (o *OpenAI) GenerateHeatPlan(ctx context.Context, input HeatPlanInput) (string, error) {
return o.call(ctx, HeatPlanSystemPrompt(), BuildHeatPlanPrompt(input))
}