feat: require local Ollama, remove Docker Ollama option
Simplify setup by requiring local Ollama installation: - docker-compose.yml now connects to host Ollama via host.docker.internal - Remove ollama service from compose (no longer included) - install.sh now requires Ollama to be installed - Update README with clear prerequisites - Add Docker Ollama support to roadmap for future 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
46
README.md
46
README.md
@@ -109,6 +109,11 @@ Vessel includes five powerful tools that models can invoke automatically:
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- [Docker](https://docs.docker.com/get-docker/) and Docker Compose
|
||||
- [Ollama](https://ollama.com/download) installed and running locally
|
||||
|
||||
### One-Line Install
|
||||
|
||||
```bash
|
||||
@@ -124,9 +129,8 @@ cd vessel
|
||||
```
|
||||
|
||||
The installer will:
|
||||
- Check for Docker and Docker Compose
|
||||
- Detect if you have Ollama installed locally (and let you choose)
|
||||
- Start all services
|
||||
- Check for Docker, Docker Compose, and Ollama
|
||||
- Start the frontend and backend services
|
||||
- Optionally pull a starter model (llama3.2)
|
||||
|
||||
Once running, open **http://localhost:7842** in your browser.
|
||||
@@ -145,39 +149,22 @@ The install script handles everything automatically:
|
||||
./install.sh --uninstall # Remove installation
|
||||
```
|
||||
|
||||
**Features:**
|
||||
- Detects local Ollama installation
|
||||
- Configures Docker networking automatically
|
||||
- Works on Linux and macOS
|
||||
**Requirements:**
|
||||
- Ollama must be installed and running locally
|
||||
- Docker and Docker Compose
|
||||
- Linux or macOS
|
||||
|
||||
### Option 2: Docker Compose (Manual)
|
||||
|
||||
```bash
|
||||
# Make sure Ollama is running first
|
||||
ollama serve
|
||||
|
||||
# Start Vessel
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
#### With GPU Support (NVIDIA)
|
||||
|
||||
Uncomment the GPU section in `docker-compose.yml`:
|
||||
|
||||
```yaml
|
||||
ollama:
|
||||
image: ollama/ollama:latest
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: all
|
||||
capabilities: [gpu]
|
||||
```
|
||||
|
||||
Then run:
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
### Option 2: Manual Setup
|
||||
### Option 3: Manual Setup (Development)
|
||||
|
||||
#### Prerequisites
|
||||
- [Node.js](https://nodejs.org/) 20+
|
||||
@@ -344,6 +331,7 @@ All requests to `/ollama/*` are proxied to the Ollama API, enabling CORS.
|
||||
|
||||
## Roadmap
|
||||
|
||||
- [ ] Docker Ollama support (for systems without local Ollama)
|
||||
- [ ] Image generation (Stable Diffusion, Hugging Face models)
|
||||
- [ ] Hugging Face integration
|
||||
- [ ] Voice input/output
|
||||
|
||||
@@ -9,8 +9,10 @@ services:
|
||||
ports:
|
||||
- "7842:3000"
|
||||
environment:
|
||||
- OLLAMA_API_URL=http://ollama:11434
|
||||
- OLLAMA_API_URL=http://host.docker.internal:11434
|
||||
- BACKEND_URL=http://backend:9090
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
networks:
|
||||
- vessel-network
|
||||
restart: unless-stopped
|
||||
@@ -23,37 +25,19 @@ services:
|
||||
ports:
|
||||
- "9090:9090"
|
||||
environment:
|
||||
- OLLAMA_URL=http://ollama:11434
|
||||
- OLLAMA_URL=http://host.docker.internal:11434
|
||||
- PORT=9090
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
volumes:
|
||||
- backend-data:/app/data
|
||||
networks:
|
||||
- vessel-network
|
||||
restart: unless-stopped
|
||||
|
||||
# Ollama LLM Server
|
||||
ollama:
|
||||
image: ollama/ollama:latest
|
||||
ports:
|
||||
- "11434:11434"
|
||||
volumes:
|
||||
- ollama-data:/root/.ollama
|
||||
networks:
|
||||
- vessel-network
|
||||
restart: unless-stopped
|
||||
# Uncomment for GPU support (NVIDIA)
|
||||
# deploy:
|
||||
# resources:
|
||||
# reservations:
|
||||
# devices:
|
||||
# - driver: nvidia
|
||||
# count: all
|
||||
# capabilities: [gpu]
|
||||
|
||||
networks:
|
||||
vessel-network:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
ollama-data:
|
||||
backend-data:
|
||||
|
||||
114
install.sh
114
install.sh
@@ -156,38 +156,22 @@ detect_os() {
|
||||
# Ollama Detection
|
||||
# =============================================================================
|
||||
|
||||
detect_ollama() {
|
||||
check_ollama() {
|
||||
info "Checking for local Ollama installation..."
|
||||
|
||||
OLLAMA_LOCAL=false
|
||||
|
||||
# Check if ollama command exists
|
||||
if check_command ollama; then
|
||||
# Check if Ollama is responding on default port
|
||||
if curl -s --connect-timeout 2 "http://localhost:${OLLAMA_PORT}/api/tags" &> /dev/null; then
|
||||
OLLAMA_LOCAL=true
|
||||
success "Local Ollama detected and running on port ${OLLAMA_PORT}"
|
||||
else
|
||||
warn "Ollama is installed but not running"
|
||||
fi
|
||||
else
|
||||
info "No local Ollama installation found"
|
||||
if ! check_command ollama; then
|
||||
fatal "Ollama is not installed. Please install Ollama first: https://ollama.com/download"
|
||||
fi
|
||||
}
|
||||
|
||||
prompt_ollama_mode() {
|
||||
if [[ "$OLLAMA_LOCAL" == true ]]; then
|
||||
echo ""
|
||||
if prompt_yes_no "Use your local Ollama installation?" "y"; then
|
||||
USE_SYSTEM_OLLAMA=true
|
||||
info "Will use system Ollama on localhost:${OLLAMA_PORT}"
|
||||
else
|
||||
USE_SYSTEM_OLLAMA=false
|
||||
info "Will run Ollama in Docker"
|
||||
fi
|
||||
# Check if Ollama is responding on default port
|
||||
if curl -s --connect-timeout 2 "http://localhost:${OLLAMA_PORT}/api/tags" &> /dev/null; then
|
||||
success "Ollama is running on port ${OLLAMA_PORT}"
|
||||
else
|
||||
USE_SYSTEM_OLLAMA=false
|
||||
info "Will run Ollama in Docker container"
|
||||
warn "Ollama is installed but not running. Please start it with: ollama serve"
|
||||
if ! prompt_yes_no "Continue anyway?" "n"; then
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -217,52 +201,12 @@ clone_repository() {
|
||||
success "Repository cloned"
|
||||
}
|
||||
|
||||
setup_compose_override() {
|
||||
local override_file="docker-compose.override.yml"
|
||||
|
||||
if [[ "$USE_SYSTEM_OLLAMA" == true ]]; then
|
||||
info "Configuring for system Ollama..."
|
||||
|
||||
cat > "$override_file" << 'EOF'
|
||||
# Auto-generated by install.sh - System Ollama mode
|
||||
# Delete this file to use Docker Ollama instead
|
||||
|
||||
services:
|
||||
ollama:
|
||||
# Disable the ollama service when using system Ollama
|
||||
profiles: ["disabled"]
|
||||
|
||||
frontend:
|
||||
environment:
|
||||
- OLLAMA_API_URL=http://host.docker.internal:11434
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
|
||||
backend:
|
||||
environment:
|
||||
- OLLAMA_URL=http://host.docker.internal:11434
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
EOF
|
||||
success "Created $override_file for system Ollama"
|
||||
else
|
||||
# Remove override if exists (use Docker Ollama)
|
||||
if [[ -f "$override_file" ]]; then
|
||||
rm "$override_file"
|
||||
info "Removed existing $override_file"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
check_port_available() {
|
||||
local port=$1
|
||||
local name=$2
|
||||
|
||||
if lsof -i :"$port" &> /dev/null || ss -tuln 2>/dev/null | grep -q ":$port "; then
|
||||
if [[ "$port" == "$OLLAMA_PORT" && "$USE_SYSTEM_OLLAMA" == true ]]; then
|
||||
# Expected - system Ollama is using this port
|
||||
return 0
|
||||
fi
|
||||
warn "Port $port ($name) is already in use"
|
||||
return 1
|
||||
fi
|
||||
@@ -281,12 +225,6 @@ check_ports() {
|
||||
has_conflict=true
|
||||
fi
|
||||
|
||||
if [[ "$USE_SYSTEM_OLLAMA" != true ]]; then
|
||||
if ! check_port_available $OLLAMA_PORT "ollama"; then
|
||||
has_conflict=true
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$has_conflict" == true ]]; then
|
||||
if ! prompt_yes_no "Continue anyway?" "n"; then
|
||||
fatal "Aborted due to port conflicts"
|
||||
@@ -347,14 +285,8 @@ prompt_pull_model() {
|
||||
|
||||
# Check if any models are available
|
||||
local has_models=false
|
||||
if [[ "$USE_SYSTEM_OLLAMA" == true ]]; then
|
||||
if ollama list 2>/dev/null | grep -q "NAME"; then
|
||||
has_models=true
|
||||
fi
|
||||
else
|
||||
if $COMPOSE_CMD exec -T ollama ollama list 2>/dev/null | grep -q "NAME"; then
|
||||
has_models=true
|
||||
fi
|
||||
if ollama list 2>/dev/null | grep -q "NAME"; then
|
||||
has_models=true
|
||||
fi
|
||||
|
||||
if [[ "$has_models" == true ]]; then
|
||||
@@ -365,21 +297,13 @@ prompt_pull_model() {
|
||||
else
|
||||
if ! prompt_yes_no "Pull starter model ($DEFAULT_MODEL)?" "y"; then
|
||||
warn "No models available. Pull a model manually:"
|
||||
if [[ "$USE_SYSTEM_OLLAMA" == true ]]; then
|
||||
echo " ollama pull $DEFAULT_MODEL"
|
||||
else
|
||||
echo " $COMPOSE_CMD exec ollama ollama pull $DEFAULT_MODEL"
|
||||
fi
|
||||
echo " ollama pull $DEFAULT_MODEL"
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
info "Pulling $DEFAULT_MODEL (this may take a while)..."
|
||||
if [[ "$USE_SYSTEM_OLLAMA" == true ]]; then
|
||||
ollama pull "$DEFAULT_MODEL"
|
||||
else
|
||||
$COMPOSE_CMD exec -T ollama ollama pull "$DEFAULT_MODEL"
|
||||
fi
|
||||
ollama pull "$DEFAULT_MODEL"
|
||||
success "Model $DEFAULT_MODEL is ready"
|
||||
}
|
||||
|
||||
@@ -401,12 +325,6 @@ print_success() {
|
||||
echo -e " Update: ${CYAN}cd $VESSEL_DIR && ./install.sh --update${NC}"
|
||||
echo -e " Pull model: ${CYAN}ollama pull <model>${NC}"
|
||||
echo ""
|
||||
if [[ "$USE_SYSTEM_OLLAMA" == true ]]; then
|
||||
echo -e " ${BOLD}Ollama:${NC} Using system installation"
|
||||
else
|
||||
echo -e " ${BOLD}Ollama:${NC} Running in Docker"
|
||||
fi
|
||||
echo ""
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
@@ -490,10 +408,8 @@ main() {
|
||||
print_banner
|
||||
check_prerequisites
|
||||
detect_os
|
||||
check_ollama
|
||||
clone_repository
|
||||
detect_ollama
|
||||
prompt_ollama_mode
|
||||
setup_compose_override
|
||||
check_ports
|
||||
start_services
|
||||
wait_for_health
|
||||
|
||||
Reference in New Issue
Block a user