# =========================================== # Vessel Configuration # =========================================== # Copy this file to .env and adjust values as needed. # All variables have sensible defaults - only set what you need to change. # ----- Backend ----- # Server port (default: 9090 for local dev, matches vite proxy) PORT=9090 # SQLite database path (relative to backend working directory) DB_PATH=./data/vessel.db # Ollama API endpoint OLLAMA_URL=http://localhost:11434 # GitHub repo for version checking (format: owner/repo) GITHUB_REPO=VikingOwl91/vessel # ----- Frontend ----- # Ollama API endpoint (for frontend proxy) OLLAMA_API_URL=http://localhost:11434 # Backend API endpoint BACKEND_URL=http://localhost:9090 # Development server port DEV_PORT=7842 # ----- llama.cpp ----- # llama.cpp server port (used by `just llama-server`) LLAMA_PORT=8081 # ----- Additional Ports (for health checks) ----- # Ollama port (extracted from OLLAMA_URL for health checks) OLLAMA_PORT=11434 # ----- Models ----- # Directory for GGUF model files VESSEL_MODELS_DIR=~/.vessel/models