services: # Ollama WebUI Frontend frontend: build: context: ./frontend dockerfile: Dockerfile ports: - "7842:3000" environment: - OLLAMA_API_URL=http://ollama:11434 - BACKEND_URL=http://backend:9090 depends_on: - ollama - backend networks: - ollama-network restart: unless-stopped # Go Backend API backend: build: context: ./backend dockerfile: Dockerfile ports: - "9090:9090" environment: - OLLAMA_URL=http://ollama:11434 - PORT=9090 volumes: - backend-data:/app/data depends_on: - ollama networks: - ollama-network restart: unless-stopped # Ollama LLM Server ollama: image: ollama/ollama:latest ports: - "11434:11434" volumes: - ollama-data:/root/.ollama networks: - ollama-network restart: unless-stopped # Uncomment for GPU support (NVIDIA) # deploy: # resources: # reservations: # devices: # - driver: nvidia # count: all # capabilities: [gpu] networks: ollama-network: driver: bridge volumes: ollama-data: backend-data: