diff --git a/backend/internal/domain/settings/handler.go b/backend/internal/domain/settings/handler.go new file mode 100644 index 0000000..e3aefe3 --- /dev/null +++ b/backend/internal/domain/settings/handler.go @@ -0,0 +1,75 @@ +package settings + +import ( + "net/http" + + "github.com/gin-gonic/gin" + + "marktvogt.de/backend/internal/pkg/ai" +) + +// AIStatus is the response payload for GET /admin/settings/ai. +type AIStatus struct { + Provider string `json:"provider"` + Connected bool `json:"connected"` + BaseURL string `json:"base_url,omitempty"` + Model string `json:"model"` + Models []string `json:"models"` +} + +// Handler serves AI settings endpoints. ollama is nil when the active +// provider is not Ollama. +type Handler struct { + ollama *ai.OllamaProvider + provider string +} + +func NewHandler(provider ai.Provider) *Handler { + ollama, _ := provider.(*ai.OllamaProvider) + return &Handler{ollama: ollama, provider: provider.Name()} +} + +func (h *Handler) GetAI(c *gin.Context) { + if h.ollama == nil { + c.JSON(http.StatusOK, gin.H{"data": AIStatus{ + Provider: h.provider, + Connected: true, + Model: "", + Models: []string{}, + }}) + return + } + + models, err := h.ollama.ListModels(c.Request.Context()) + status := AIStatus{ + Provider: "ollama", + BaseURL: h.ollama.BaseURL(), + Model: h.ollama.Model(), + Models: []string{}, + } + if err != nil { + status.Connected = false + } else { + status.Connected = true + for _, m := range models { + status.Models = append(status.Models, m.Name) + } + } + c.JSON(http.StatusOK, gin.H{"data": status}) +} + +func (h *Handler) SetModel(c *gin.Context) { + if h.ollama == nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "model switching only supported for Ollama provider"}) + return + } + var req struct { + Model string `json:"model" binding:"required"` + } + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "model is required"}) + return + } + h.ollama.SetModel(req.Model) + c.JSON(http.StatusOK, gin.H{"data": gin.H{"model": req.Model}}) +} diff --git a/backend/internal/domain/settings/routes.go b/backend/internal/domain/settings/routes.go new file mode 100644 index 0000000..160c56e --- /dev/null +++ b/backend/internal/domain/settings/routes.go @@ -0,0 +1,9 @@ +package settings + +import "github.com/gin-gonic/gin" + +func RegisterRoutes(rg *gin.RouterGroup, h *Handler, requireAuth, requireAdmin gin.HandlerFunc) { + admin := rg.Group("/admin", requireAuth, requireAdmin) + admin.GET("/settings/ai", h.GetAI) + admin.POST("/settings/ai/model", h.SetModel) +} diff --git a/backend/internal/pkg/ai/ollama.go b/backend/internal/pkg/ai/ollama.go index 95ee20d..dcc7781 100644 --- a/backend/internal/pkg/ai/ollama.go +++ b/backend/internal/pkg/ai/ollama.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "net/http" + "sync" "time" ) @@ -17,8 +18,10 @@ type OllamaConfig struct { } type OllamaProvider struct { - cfg OllamaConfig - client *http.Client + cfg OllamaConfig + client *http.Client + mu sync.RWMutex + activeModel string } func NewOllamaProvider(cfg OllamaConfig) *OllamaProvider { @@ -26,8 +29,9 @@ func NewOllamaProvider(cfg OllamaConfig) *OllamaProvider { cfg.Timeout = 300 * time.Second } return &OllamaProvider{ - cfg: cfg, - client: &http.Client{Timeout: cfg.Timeout}, + cfg: cfg, + client: &http.Client{Timeout: cfg.Timeout}, + activeModel: cfg.Model, } } @@ -35,6 +39,59 @@ func (p *OllamaProvider) Name() string { return "ollama" } func (p *OllamaProvider) SupportsJSONMode() bool { return true } func (p *OllamaProvider) SupportsJSONSchema() bool { return true } +func (p *OllamaProvider) BaseURL() string { + return p.cfg.BaseURL +} + +func (p *OllamaProvider) Model() string { + p.mu.RLock() + defer p.mu.RUnlock() + return p.activeModel +} + +func (p *OllamaProvider) SetModel(model string) { + p.mu.Lock() + defer p.mu.Unlock() + p.activeModel = model +} + +// OllamaModelInfo is a model entry from Ollama's /api/tags response. +type OllamaModelInfo struct { + Name string `json:"name"` + Size int64 `json:"size"` +} + +// ListModels calls Ollama's /api/tags endpoint and returns available models. +func (p *OllamaProvider) ListModels(ctx context.Context) ([]OllamaModelInfo, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, p.cfg.BaseURL+"/api/tags", nil) + if err != nil { + return nil, err + } + resp, err := p.client.Do(req) + if err != nil { + return nil, err + } + defer func() { _ = resp.Body.Close() }() + if resp.StatusCode != http.StatusOK { + b, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("ollama /api/tags: status %d: %s", resp.StatusCode, b) + } + var body struct { + Models []struct { + Name string `json:"name"` + Size int64 `json:"size"` + } `json:"models"` + } + if err := json.NewDecoder(resp.Body).Decode(&body); err != nil { + return nil, err + } + out := make([]OllamaModelInfo, len(body.Models)) + for i, m := range body.Models { + out[i] = OllamaModelInfo{Name: m.Name, Size: m.Size} + } + return out, nil +} + type ollamaChatReq struct { Model string `json:"model"` Messages []ollamaMessage `json:"messages"` @@ -64,7 +121,7 @@ type ollamaChatResp struct { func (p *OllamaProvider) Chat(ctx context.Context, req *ChatRequest) (*ChatResponse, error) { model := req.Model if model == "" { - model = p.cfg.Model + model = p.Model() } body := ollamaChatReq{ Model: model, diff --git a/backend/internal/server/routes.go b/backend/internal/server/routes.go index a8e34f0..b4a49e5 100644 --- a/backend/internal/server/routes.go +++ b/backend/internal/server/routes.go @@ -11,6 +11,7 @@ import ( "marktvogt.de/backend/internal/domain/discovery/crawler" "marktvogt.de/backend/internal/domain/discovery/enrich" "marktvogt.de/backend/internal/domain/market" + "marktvogt.de/backend/internal/domain/settings" "marktvogt.de/backend/internal/domain/user" "marktvogt.de/backend/internal/middleware" "marktvogt.de/backend/internal/pkg/ai" @@ -89,6 +90,10 @@ func (s *Server) registerRoutes() { discoveryHandler := discovery.NewHandler(discoveryService, s.cfg.Discovery.CrawlerManualRateLimitPerHour) requireTickToken := middleware.RequireBearerToken(s.cfg.Discovery.Token) discovery.RegisterRoutes(v1, discoveryHandler, requireAuth, requireAdmin, requireTickToken) + + // AI settings routes + settingsHandler := settings.NewHandler(aiProvider) + settings.RegisterRoutes(v1, settingsHandler, requireAuth, requireAdmin) } func (s *Server) healthz(c *gin.Context) { diff --git a/web/src/lib/api/types.ts b/web/src/lib/api/types.ts index 98dd3cd..460edf0 100644 --- a/web/src/lib/api/types.ts +++ b/web/src/lib/api/types.ts @@ -183,6 +183,15 @@ export interface SubmitMarketRequest { turnstile_token: string; } +// AI settings +export interface AIStatus { + provider: string; + connected: boolean; + base_url?: string; + model: string; + models: string[]; +} + // AI Research types export interface ResearchResult { suggestions: FieldSuggestion[]; diff --git a/web/src/routes/admin/+layout.svelte b/web/src/routes/admin/+layout.svelte index b7c6998..b07a3e4 100644 --- a/web/src/routes/admin/+layout.svelte +++ b/web/src/routes/admin/+layout.svelte @@ -10,7 +10,8 @@ const navItems = [ { href: '/admin/maerkte', label: 'Märkte' }, - { href: '/admin/discovery', label: 'Discovery' } + { href: '/admin/discovery', label: 'Discovery' }, + { href: '/admin/einstellungen', label: 'Einstellungen' } ]; function isActive(href: string): boolean { diff --git a/web/src/routes/admin/einstellungen/+page.server.ts b/web/src/routes/admin/einstellungen/+page.server.ts new file mode 100644 index 0000000..01b4f77 --- /dev/null +++ b/web/src/routes/admin/einstellungen/+page.server.ts @@ -0,0 +1,30 @@ +import { fail } from '@sveltejs/kit'; +import { serverFetch } from '$lib/api/client.server.js'; +import type { AIStatus } from '$lib/api/types.js'; +import type { Actions, PageServerLoad } from './$types.js'; + +export const load: PageServerLoad = async ({ cookies, fetch }) => { + const res = await serverFetch('/admin/settings/ai', cookies, { fetch }); + return { ai: res.data }; +}; + +export const actions: Actions = { + setModel: async ({ cookies, fetch, request }) => { + const data = await request.formData(); + const model = data.get('model'); + if (!model || typeof model !== 'string') { + return fail(400, { error: 'Kein Modell ausgewählt.' }); + } + try { + await serverFetch('/admin/settings/ai/model', cookies, { + method: 'POST', + body: JSON.stringify({ model }), + fetch + }); + return { success: true, model }; + } catch (err) { + const message = err instanceof Error ? err.message : 'Modell konnte nicht gesetzt werden.'; + return fail(500, { error: message }); + } + } +}; diff --git a/web/src/routes/admin/einstellungen/+page.svelte b/web/src/routes/admin/einstellungen/+page.svelte new file mode 100644 index 0000000..46e8aad --- /dev/null +++ b/web/src/routes/admin/einstellungen/+page.svelte @@ -0,0 +1,136 @@ + + +
+
+

Einstellungen

+

+ Systemkonfiguration und KI-Provider +

+
+ +
+
+

KI-Provider

+
+ +
+ +
+
+ Provider +

+ {data.ai.provider} +

+
+ + {#if data.ai.base_url} +
+ URL +

+ {data.ai.base_url} +

+
+ {/if} + +
+ {#if data.ai.connected} + + + Verbunden + + {:else} + + + Nicht erreichbar + + {/if} +
+
+ + + {#if data.ai.provider === 'ollama'} +
+ Aktives Modell + + {#if data.ai.connected && data.ai.models.length > 0} +
{ + saving = true; + return async ({ update }) => { + await update(); + saving = false; + }; + }} + class="mt-2 flex items-center gap-3" + > + + + +
+ + {#if form?.success} +

+ Modell auf {form.model} gesetzt. +

+ {/if} + {#if form?.error} +

{form.error}

+ {/if} + {:else if data.ai.connected} +

+ Keine Modelle gefunden. Installiere ein Modell mit + + ollama pull <model> + +

+ {:else} +

+ {data.ai.model || '—'} +

+

+ Ollama nicht erreichbar — prüfe ob der Dienst läuft. +

+ {/if} +
+ {/if} +
+
+