feat: add .env.example and configuration documentation #1
28
.env.example
Normal file
28
.env.example
Normal file
@@ -0,0 +1,28 @@
|
||||
# ===========================================
|
||||
# Vessel Configuration
|
||||
# ===========================================
|
||||
# Copy this file to .env and adjust values as needed.
|
||||
# All variables have sensible defaults - only set what you need to change.
|
||||
|
||||
# ----- Backend -----
|
||||
# Server port (default: 8080, but 9090 recommended for local dev)
|
||||
PORT=9090
|
||||
|
||||
# SQLite database path (relative to backend working directory)
|
||||
DB_PATH=./data/vessel.db
|
||||
|
||||
# Ollama API endpoint
|
||||
OLLAMA_URL=http://localhost:11434
|
||||
|
||||
# GitHub repo for version checking (format: owner/repo)
|
||||
GITHUB_REPO=VikingOwl91/vessel
|
||||
|
||||
# ----- Frontend -----
|
||||
# Ollama API endpoint (for frontend proxy)
|
||||
OLLAMA_API_URL=http://localhost:11434
|
||||
|
||||
# Backend API endpoint
|
||||
BACKEND_URL=http://localhost:9090
|
||||
|
||||
# Development server port
|
||||
DEV_PORT=7842
|
||||
@@ -15,7 +15,7 @@ import { indexConversationMessages } from './chat-indexer.js';
|
||||
export interface SummaryGenerationOptions {
|
||||
/** Model to use for summary generation */
|
||||
model: string;
|
||||
/** Base URL for Ollama API */
|
||||
/** Base URL for Ollama API (default: /api/v1/ollama, uses proxy) */
|
||||
baseUrl?: string;
|
||||
/** Maximum messages to include in summary context */
|
||||
maxMessages?: number;
|
||||
@@ -37,7 +37,7 @@ export async function generateConversationSummary(
|
||||
messages: Message[],
|
||||
options: SummaryGenerationOptions
|
||||
): Promise<string> {
|
||||
const { model, baseUrl = 'http://localhost:11434', maxMessages = 20 } = options;
|
||||
const { model, baseUrl = '/api/v1/ollama', maxMessages = 20 } = options;
|
||||
|
||||
// Filter to user and assistant messages only
|
||||
const relevantMessages = messages
|
||||
|
||||
Reference in New Issue
Block a user