Release v0.7.1 #10
@@ -21,7 +21,7 @@ import (
|
||||
)
|
||||
|
||||
// Version is set at build time via -ldflags, or defaults to dev
|
||||
var Version = "0.7.0"
|
||||
var Version = "0.7.1"
|
||||
|
||||
func getEnvOrDefault(key, defaultValue string) string {
|
||||
if value := os.Getenv(key); value != "" {
|
||||
|
||||
@@ -372,9 +372,44 @@ func (a *Adapter) convertChatRequest(req *backends.ChatRequest) map[string]inter
|
||||
messages := make([]map[string]interface{}, len(req.Messages))
|
||||
for i, msg := range req.Messages {
|
||||
m := map[string]interface{}{
|
||||
"role": msg.Role,
|
||||
"content": msg.Content,
|
||||
"role": msg.Role,
|
||||
}
|
||||
|
||||
// Handle messages with images (vision support)
|
||||
if len(msg.Images) > 0 {
|
||||
// Build content as array of parts for multimodal messages
|
||||
contentParts := make([]map[string]interface{}, 0, len(msg.Images)+1)
|
||||
|
||||
// Add text part if content is not empty
|
||||
if msg.Content != "" {
|
||||
contentParts = append(contentParts, map[string]interface{}{
|
||||
"type": "text",
|
||||
"text": msg.Content,
|
||||
})
|
||||
}
|
||||
|
||||
// Add image parts
|
||||
for _, img := range msg.Images {
|
||||
// Images are expected as base64 data URLs or URLs
|
||||
imageURL := img
|
||||
if !strings.HasPrefix(img, "http://") && !strings.HasPrefix(img, "https://") && !strings.HasPrefix(img, "data:") {
|
||||
// Assume base64 encoded image, default to JPEG
|
||||
imageURL = "data:image/jpeg;base64," + img
|
||||
}
|
||||
contentParts = append(contentParts, map[string]interface{}{
|
||||
"type": "image_url",
|
||||
"image_url": map[string]interface{}{
|
||||
"url": imageURL,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
m["content"] = contentParts
|
||||
} else {
|
||||
// Plain text message
|
||||
m["content"] = msg.Content
|
||||
}
|
||||
|
||||
if msg.Name != "" {
|
||||
m["name"] = msg.Name
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@@ -197,13 +198,25 @@ func probeEndpoint(ctx context.Context, endpoint DiscoveryEndpoint) DiscoveryRes
|
||||
return result
|
||||
}
|
||||
|
||||
// DefaultDiscoveryEndpoints returns the default endpoints to probe
|
||||
// getEnvOrDefault returns the environment variable value or a default
|
||||
func getEnvOrDefault(key, defaultValue string) string {
|
||||
if value := os.Getenv(key); value != "" {
|
||||
return value
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
// DefaultDiscoveryEndpoints returns the default endpoints to probe.
|
||||
// URLs can be overridden via environment variables (useful for Docker).
|
||||
func DefaultDiscoveryEndpoints() []DiscoveryEndpoint {
|
||||
ollamaURL := getEnvOrDefault("OLLAMA_URL", "http://localhost:11434")
|
||||
llamacppURL := getEnvOrDefault("LLAMACPP_URL", "http://localhost:8081")
|
||||
lmstudioURL := getEnvOrDefault("LMSTUDIO_URL", "http://localhost:1234")
|
||||
|
||||
return []DiscoveryEndpoint{
|
||||
{Type: BackendTypeOllama, BaseURL: "http://localhost:11434"},
|
||||
{Type: BackendTypeLlamaCpp, BaseURL: "http://localhost:8081"},
|
||||
{Type: BackendTypeLlamaCpp, BaseURL: "http://localhost:8080"},
|
||||
{Type: BackendTypeLMStudio, BaseURL: "http://localhost:1234"},
|
||||
{Type: BackendTypeOllama, BaseURL: ollamaURL},
|
||||
{Type: BackendTypeLlamaCpp, BaseURL: llamacppURL},
|
||||
{Type: BackendTypeLMStudio, BaseURL: lmstudioURL},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -26,6 +26,8 @@ services:
|
||||
- "9090:9090"
|
||||
environment:
|
||||
- OLLAMA_URL=http://host.docker.internal:11434
|
||||
- LLAMACPP_URL=http://host.docker.internal:8081
|
||||
- LMSTUDIO_URL=http://host.docker.internal:1234
|
||||
- PORT=9090
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vessel",
|
||||
"version": "0.7.0",
|
||||
"version": "0.7.1",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
|
||||
6
justfile
6
justfile
@@ -64,13 +64,13 @@ dev-logs:
|
||||
models:
|
||||
@ls -lh {{models_dir}}/*.gguf 2>/dev/null || echo "No models found in {{models_dir}}"
|
||||
|
||||
# Start llama.cpp server with a model
|
||||
# Start llama.cpp server with a model (--host 0.0.0.0 for Docker access)
|
||||
llama-server model:
|
||||
llama-server -m {{models_dir}}/{{model}} --port {{llama_port}} -c 8192 -ngl 99
|
||||
llama-server -m {{models_dir}}/{{model}} --host 0.0.0.0 --port {{llama_port}} -c 8192 -ngl 99
|
||||
|
||||
# Start llama.cpp server with custom settings
|
||||
llama-server-custom model port ctx gpu:
|
||||
llama-server -m {{models_dir}}/{{model}} --port {{port}} -c {{ctx}} -ngl {{gpu}}
|
||||
llama-server -m {{models_dir}}/{{model}} --host 0.0.0.0 --port {{port}} -c {{ctx}} -ngl {{gpu}}
|
||||
|
||||
# Start Docker dev + llama.cpp server
|
||||
all model: dev-detach
|
||||
|
||||
Reference in New Issue
Block a user