Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| a14219c6bb | |||
| 61bf8038d0 |
@@ -21,7 +21,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Version is set at build time via -ldflags, or defaults to dev
|
// Version is set at build time via -ldflags, or defaults to dev
|
||||||
var Version = "0.7.1"
|
var Version = "0.7.0"
|
||||||
|
|
||||||
func getEnvOrDefault(key, defaultValue string) string {
|
func getEnvOrDefault(key, defaultValue string) string {
|
||||||
if value := os.Getenv(key); value != "" {
|
if value := os.Getenv(key); value != "" {
|
||||||
|
|||||||
@@ -372,44 +372,9 @@ func (a *Adapter) convertChatRequest(req *backends.ChatRequest) map[string]inter
|
|||||||
messages := make([]map[string]interface{}, len(req.Messages))
|
messages := make([]map[string]interface{}, len(req.Messages))
|
||||||
for i, msg := range req.Messages {
|
for i, msg := range req.Messages {
|
||||||
m := map[string]interface{}{
|
m := map[string]interface{}{
|
||||||
"role": msg.Role,
|
"role": msg.Role,
|
||||||
|
"content": msg.Content,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle messages with images (vision support)
|
|
||||||
if len(msg.Images) > 0 {
|
|
||||||
// Build content as array of parts for multimodal messages
|
|
||||||
contentParts := make([]map[string]interface{}, 0, len(msg.Images)+1)
|
|
||||||
|
|
||||||
// Add text part if content is not empty
|
|
||||||
if msg.Content != "" {
|
|
||||||
contentParts = append(contentParts, map[string]interface{}{
|
|
||||||
"type": "text",
|
|
||||||
"text": msg.Content,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add image parts
|
|
||||||
for _, img := range msg.Images {
|
|
||||||
// Images are expected as base64 data URLs or URLs
|
|
||||||
imageURL := img
|
|
||||||
if !strings.HasPrefix(img, "http://") && !strings.HasPrefix(img, "https://") && !strings.HasPrefix(img, "data:") {
|
|
||||||
// Assume base64 encoded image, default to JPEG
|
|
||||||
imageURL = "data:image/jpeg;base64," + img
|
|
||||||
}
|
|
||||||
contentParts = append(contentParts, map[string]interface{}{
|
|
||||||
"type": "image_url",
|
|
||||||
"image_url": map[string]interface{}{
|
|
||||||
"url": imageURL,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
m["content"] = contentParts
|
|
||||||
} else {
|
|
||||||
// Plain text message
|
|
||||||
m["content"] = msg.Content
|
|
||||||
}
|
|
||||||
|
|
||||||
if msg.Name != "" {
|
if msg.Name != "" {
|
||||||
m["name"] = msg.Name
|
m["name"] = msg.Name
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
@@ -198,25 +197,13 @@ func probeEndpoint(ctx context.Context, endpoint DiscoveryEndpoint) DiscoveryRes
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// getEnvOrDefault returns the environment variable value or a default
|
// DefaultDiscoveryEndpoints returns the default endpoints to probe
|
||||||
func getEnvOrDefault(key, defaultValue string) string {
|
|
||||||
if value := os.Getenv(key); value != "" {
|
|
||||||
return value
|
|
||||||
}
|
|
||||||
return defaultValue
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultDiscoveryEndpoints returns the default endpoints to probe.
|
|
||||||
// URLs can be overridden via environment variables (useful for Docker).
|
|
||||||
func DefaultDiscoveryEndpoints() []DiscoveryEndpoint {
|
func DefaultDiscoveryEndpoints() []DiscoveryEndpoint {
|
||||||
ollamaURL := getEnvOrDefault("OLLAMA_URL", "http://localhost:11434")
|
|
||||||
llamacppURL := getEnvOrDefault("LLAMACPP_URL", "http://localhost:8081")
|
|
||||||
lmstudioURL := getEnvOrDefault("LMSTUDIO_URL", "http://localhost:1234")
|
|
||||||
|
|
||||||
return []DiscoveryEndpoint{
|
return []DiscoveryEndpoint{
|
||||||
{Type: BackendTypeOllama, BaseURL: ollamaURL},
|
{Type: BackendTypeOllama, BaseURL: "http://localhost:11434"},
|
||||||
{Type: BackendTypeLlamaCpp, BaseURL: llamacppURL},
|
{Type: BackendTypeLlamaCpp, BaseURL: "http://localhost:8081"},
|
||||||
{Type: BackendTypeLMStudio, BaseURL: lmstudioURL},
|
{Type: BackendTypeLlamaCpp, BaseURL: "http://localhost:8080"},
|
||||||
|
{Type: BackendTypeLMStudio, BaseURL: "http://localhost:1234"},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -26,8 +26,6 @@ services:
|
|||||||
- "9090:9090"
|
- "9090:9090"
|
||||||
environment:
|
environment:
|
||||||
- OLLAMA_URL=http://host.docker.internal:11434
|
- OLLAMA_URL=http://host.docker.internal:11434
|
||||||
- LLAMACPP_URL=http://host.docker.internal:8081
|
|
||||||
- LMSTUDIO_URL=http://host.docker.internal:1234
|
|
||||||
- PORT=9090
|
- PORT=9090
|
||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway"
|
- "host.docker.internal:host-gateway"
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "vessel",
|
"name": "vessel",
|
||||||
"version": "0.7.1",
|
"version": "0.7.0",
|
||||||
"private": true,
|
"private": true,
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
|
|||||||
6
justfile
6
justfile
@@ -64,13 +64,13 @@ dev-logs:
|
|||||||
models:
|
models:
|
||||||
@ls -lh {{models_dir}}/*.gguf 2>/dev/null || echo "No models found in {{models_dir}}"
|
@ls -lh {{models_dir}}/*.gguf 2>/dev/null || echo "No models found in {{models_dir}}"
|
||||||
|
|
||||||
# Start llama.cpp server with a model (--host 0.0.0.0 for Docker access)
|
# Start llama.cpp server with a model
|
||||||
llama-server model:
|
llama-server model:
|
||||||
llama-server -m {{models_dir}}/{{model}} --host 0.0.0.0 --port {{llama_port}} -c 8192 -ngl 99
|
llama-server -m {{models_dir}}/{{model}} --port {{llama_port}} -c 8192 -ngl 99
|
||||||
|
|
||||||
# Start llama.cpp server with custom settings
|
# Start llama.cpp server with custom settings
|
||||||
llama-server-custom model port ctx gpu:
|
llama-server-custom model port ctx gpu:
|
||||||
llama-server -m {{models_dir}}/{{model}} --host 0.0.0.0 --port {{port}} -c {{ctx}} -ngl {{gpu}}
|
llama-server -m {{models_dir}}/{{model}} --port {{port}} -c {{ctx}} -ngl {{gpu}}
|
||||||
|
|
||||||
# Start Docker dev + llama.cpp server
|
# Start Docker dev + llama.cpp server
|
||||||
all model: dev-detach
|
all model: dev-detach
|
||||||
|
|||||||
Reference in New Issue
Block a user