refactor(core): remove provider module, migrate to LLMProvider, add client mode handling, improve serialization error handling, update workspace edition, and clean up conditionals and imports

This commit is contained in:
2025-10-12 12:38:55 +02:00
parent c2f5ccea3b
commit 7851af14a9
63 changed files with 2221 additions and 1236 deletions

View File

@@ -7,32 +7,32 @@ use std::{
};
use anyhow::anyhow;
use futures::{future::join_all, future::BoxFuture, Stream, StreamExt};
use futures::{Stream, StreamExt, future::BoxFuture, future::join_all};
use log::{debug, warn};
use ollama_rs::{
Ollama,
error::OllamaError,
generation::chat::{
request::ChatMessageRequest as OllamaChatRequest, ChatMessage as OllamaMessage,
ChatMessageResponse as OllamaChatResponse, MessageRole as OllamaRole,
ChatMessage as OllamaMessage, ChatMessageResponse as OllamaChatResponse,
MessageRole as OllamaRole, request::ChatMessageRequest as OllamaChatRequest,
},
generation::tools::{ToolCall as OllamaToolCall, ToolCallFunction as OllamaToolCallFunction},
headers::{HeaderMap, HeaderValue, AUTHORIZATION},
headers::{AUTHORIZATION, HeaderMap, HeaderValue},
models::{LocalModel, ModelInfo as OllamaModelInfo, ModelOptions},
Ollama,
};
use reqwest::{Client, StatusCode, Url};
use serde_json::{json, Map as JsonMap, Value};
use serde_json::{Map as JsonMap, Value, json};
use uuid::Uuid;
use crate::{
Error, Result,
config::GeneralSettings,
llm::{LlmProvider, ProviderConfig},
mcp::McpToolDescriptor,
model::{DetailedModelInfo, ModelDetailsCache, ModelManager},
provider::{LLMProvider, ProviderConfig},
types::{
ChatParameters, ChatRequest, ChatResponse, Message, ModelInfo, Role, TokenUsage, ToolCall,
},
Error, Result,
};
const DEFAULT_TIMEOUT_SECS: u64 = 120;
@@ -292,13 +292,13 @@ impl OllamaProvider {
);
}
if let Some(descriptors) = &tools {
if !descriptors.is_empty() {
debug!(
"Ignoring {} MCP tool descriptors for Ollama request (tool calling unsupported)",
descriptors.len()
);
}
if let Some(descriptors) = &tools
&& !descriptors.is_empty()
{
debug!(
"Ignoring {} MCP tool descriptors for Ollama request (tool calling unsupported)",
descriptors.len()
);
}
let converted_messages = messages.into_iter().map(convert_message).collect();
@@ -378,10 +378,10 @@ impl OllamaProvider {
let family = pick_first_string(map, &["family", "model_family"]);
let mut families = pick_string_list(map, &["families", "model_families"]);
if families.is_empty() {
if let Some(single) = family.clone() {
families.push(single);
}
if families.is_empty()
&& let Some(single) = family.clone()
{
families.push(single);
}
let system = pick_first_string(map, &["system"]);
@@ -529,32 +529,28 @@ impl OllamaProvider {
StatusCode::UNAUTHORIZED | StatusCode::FORBIDDEN => Error::Auth(format!(
"Ollama rejected the request ({status}): {detail}. Check your API key and account permissions."
)),
StatusCode::BAD_REQUEST => Error::InvalidInput(format!(
"{action} rejected by Ollama ({status}): {detail}"
)),
StatusCode::BAD_REQUEST => {
Error::InvalidInput(format!("{action} rejected by Ollama ({status}): {detail}"))
}
StatusCode::SERVICE_UNAVAILABLE | StatusCode::GATEWAY_TIMEOUT => Error::Timeout(
format!(
"Ollama {action} timed out ({status}). The model may still be loading."
),
format!("Ollama {action} timed out ({status}). The model may still be loading."),
),
_ => Error::Network(format!(
"Ollama {action} failed ({status}): {detail}"
)),
_ => Error::Network(format!("Ollama {action} failed ({status}): {detail}")),
}
}
}
impl LLMProvider for OllamaProvider {
impl LlmProvider for OllamaProvider {
type Stream = Pin<Box<dyn Stream<Item = Result<ChatResponse>> + Send>>;
type ListModelsFuture<'a>
= BoxFuture<'a, Result<Vec<ModelInfo>>>
where
Self: 'a;
type ChatFuture<'a>
type SendPromptFuture<'a>
= BoxFuture<'a, Result<ChatResponse>>
where
Self: 'a;
type ChatStreamFuture<'a>
type StreamPromptFuture<'a>
= BoxFuture<'a, Result<Self::Stream>>
where
Self: 'a;
@@ -575,7 +571,7 @@ impl LLMProvider for OllamaProvider {
})
}
fn chat(&self, request: ChatRequest) -> Self::ChatFuture<'_> {
fn send_prompt(&self, request: ChatRequest) -> Self::SendPromptFuture<'_> {
Box::pin(async move {
let ChatRequest {
model,
@@ -597,7 +593,7 @@ impl LLMProvider for OllamaProvider {
})
}
fn chat_stream(&self, request: ChatRequest) -> Self::ChatStreamFuture<'_> {
fn stream_prompt(&self, request: ChatRequest) -> Self::StreamPromptFuture<'_> {
Box::pin(async move {
let ChatRequest {
model,
@@ -926,11 +922,7 @@ fn value_to_u64(value: &Value) -> Option<u64> {
} else if let Some(v) = num.as_i64() {
v.try_into().ok()
} else if let Some(v) = num.as_f64() {
if v >= 0.0 {
Some(v as u64)
} else {
None
}
if v >= 0.0 { Some(v as u64) } else { None }
} else {
None
}