- Remove depends_on from docker-compose.yml (services handle reconnection) - This allows the override to disable ollama without errors - Fix prompt display when running via curl | bash (print to stderr) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
60 lines
1.1 KiB
YAML
60 lines
1.1 KiB
YAML
name: vessel
|
|
|
|
services:
|
|
# Vessel Frontend
|
|
frontend:
|
|
build:
|
|
context: ./frontend
|
|
dockerfile: Dockerfile
|
|
ports:
|
|
- "7842:3000"
|
|
environment:
|
|
- OLLAMA_API_URL=http://ollama:11434
|
|
- BACKEND_URL=http://backend:9090
|
|
networks:
|
|
- vessel-network
|
|
restart: unless-stopped
|
|
|
|
# Vessel Backend API
|
|
backend:
|
|
build:
|
|
context: ./backend
|
|
dockerfile: Dockerfile
|
|
ports:
|
|
- "9090:9090"
|
|
environment:
|
|
- OLLAMA_URL=http://ollama:11434
|
|
- PORT=9090
|
|
volumes:
|
|
- backend-data:/app/data
|
|
networks:
|
|
- vessel-network
|
|
restart: unless-stopped
|
|
|
|
# Ollama LLM Server
|
|
ollama:
|
|
image: ollama/ollama:latest
|
|
ports:
|
|
- "11434:11434"
|
|
volumes:
|
|
- ollama-data:/root/.ollama
|
|
networks:
|
|
- vessel-network
|
|
restart: unless-stopped
|
|
# Uncomment for GPU support (NVIDIA)
|
|
# deploy:
|
|
# resources:
|
|
# reservations:
|
|
# devices:
|
|
# - driver: nvidia
|
|
# count: all
|
|
# capabilities: [gpu]
|
|
|
|
networks:
|
|
vessel-network:
|
|
driver: bridge
|
|
|
|
volumes:
|
|
ollama-data:
|
|
backend-data:
|