name: vessel services: # Vessel Frontend frontend: build: context: ./frontend dockerfile: Dockerfile ports: - "7842:3000" environment: - OLLAMA_API_URL=http://ollama:11434 - BACKEND_URL=http://backend:9090 networks: - vessel-network restart: unless-stopped # Vessel Backend API backend: build: context: ./backend dockerfile: Dockerfile ports: - "9090:9090" environment: - OLLAMA_URL=http://ollama:11434 - PORT=9090 volumes: - backend-data:/app/data networks: - vessel-network restart: unless-stopped # Ollama LLM Server ollama: image: ollama/ollama:latest ports: - "11434:11434" volumes: - ollama-data:/root/.ollama networks: - vessel-network restart: unless-stopped # Uncomment for GPU support (NVIDIA) # deploy: # resources: # reservations: # devices: # - driver: nvidia # count: all # capabilities: [gpu] networks: vessel-network: driver: bridge volumes: ollama-data: backend-data: