refactor(core)!: rename Provider to LLMProvider and update implementations

- Export `LLMProvider` from `owlen-core` and replace public `Provider` re-exports.
- Convert `OllamaProvider` to implement the new `LLMProvider` trait with associated future types.
- Adjust imports and trait bounds in `remote_client.rs` to use the updated types.
- Add comprehensive provider interface tests (`provider_interface.rs`) verifying router routing and provider registry model listing with `MockProvider`.
- Align dependency versions across workspace crates by switching to workspace-managed versions.
- Extend CI (`.woodpecker.yml`) with a dedicated test step and generate coverage reports.
- Update architecture documentation to reflect the new provider abstraction.
This commit is contained in:
2025-10-12 01:54:25 +02:00
parent 5ac0d152cb
commit 952e4819fe
16 changed files with 664 additions and 459 deletions

View File

@@ -6,8 +6,9 @@ use super::{McpClient, McpToolCall, McpToolDescriptor, McpToolResponse};
use crate::consent::{ConsentManager, ConsentScope};
use crate::tools::{Tool, WebScrapeTool, WebSearchTool};
use crate::types::ModelInfo;
use crate::{Error, Provider, Result};
use async_trait::async_trait;
use crate::types::{ChatResponse, Message, Role};
use crate::{provider::chat_via_stream, Error, LLMProvider, Result};
use futures::{future::BoxFuture, stream, StreamExt};
use reqwest::Client as HttpClient;
use serde_json::json;
use std::path::Path;
@@ -19,10 +20,6 @@ use tokio::process::{Child, Command};
use tokio::sync::Mutex;
use tokio_tungstenite::{connect_async, MaybeTlsStream, WebSocketStream};
use tungstenite::protocol::Message as WsMessage;
// Provider trait is already imported via the earlier use statement.
use crate::types::{ChatResponse, Message, Role};
use futures::stream;
use futures::StreamExt;
/// Client that talks to the external `owlen-mcp-server` over STDIO, HTTP, or WebSocket.
pub struct RemoteMcpClient {
@@ -468,67 +465,66 @@ impl McpClient for RemoteMcpClient {
// Provider implementation forwards chat requests to the generate_text tool.
// ---------------------------------------------------------------------------
#[async_trait]
impl Provider for RemoteMcpClient {
impl LLMProvider for RemoteMcpClient {
type Stream = stream::Iter<std::vec::IntoIter<Result<ChatResponse>>>;
type ListModelsFuture<'a> = BoxFuture<'a, Result<Vec<ModelInfo>>>;
type ChatFuture<'a> = BoxFuture<'a, Result<ChatResponse>>;
type ChatStreamFuture<'a> = BoxFuture<'a, Result<Self::Stream>>;
type HealthCheckFuture<'a> = BoxFuture<'a, Result<()>>;
fn name(&self) -> &str {
"mcp-llm-server"
}
async fn list_models(&self) -> Result<Vec<ModelInfo>> {
let result = self.send_rpc(methods::MODELS_LIST, json!(null)).await?;
let models: Vec<ModelInfo> = serde_json::from_value(result)?;
Ok(models)
fn list_models(&self) -> Self::ListModelsFuture<'_> {
Box::pin(async move {
let result = self.send_rpc(methods::MODELS_LIST, json!(null)).await?;
let models: Vec<ModelInfo> = serde_json::from_value(result)?;
Ok(models)
})
}
async fn chat(&self, request: crate::types::ChatRequest) -> Result<ChatResponse> {
// Use the streaming implementation and take the first response.
let mut stream = self.chat_stream(request).await?;
match stream.next().await {
Some(Ok(resp)) => Ok(resp),
Some(Err(e)) => Err(e),
None => Err(Error::Provider(anyhow::anyhow!("Empty chat stream"))),
}
fn chat(&self, request: crate::types::ChatRequest) -> Self::ChatFuture<'_> {
Box::pin(chat_via_stream(self, request))
}
async fn chat_stream(
&self,
request: crate::types::ChatRequest,
) -> Result<crate::provider::ChatStream> {
// Build arguments matching the generate_text schema.
let args = serde_json::json!({
"messages": request.messages,
"temperature": request.parameters.temperature,
"max_tokens": request.parameters.max_tokens,
"model": request.model,
"stream": request.parameters.stream,
});
let call = McpToolCall {
name: "generate_text".to_string(),
arguments: args,
};
let resp = self.call_tool(call).await?;
// Build a ChatResponse from the tool output (assumed to be a string).
let content = resp.output.as_str().unwrap_or("").to_string();
let message = Message::new(Role::Assistant, content);
let chat_resp = ChatResponse {
message,
usage: None,
is_streaming: false,
is_final: true,
};
let stream = stream::once(async move { Ok(chat_resp) });
Ok(Box::pin(stream))
fn chat_stream(&self, request: crate::types::ChatRequest) -> Self::ChatStreamFuture<'_> {
Box::pin(async move {
let args = serde_json::json!({
"messages": request.messages,
"temperature": request.parameters.temperature,
"max_tokens": request.parameters.max_tokens,
"model": request.model,
"stream": request.parameters.stream,
});
let call = McpToolCall {
name: "generate_text".to_string(),
arguments: args,
};
let resp = self.call_tool(call).await?;
let content = resp.output.as_str().unwrap_or("").to_string();
let message = Message::new(Role::Assistant, content);
let chat_resp = ChatResponse {
message,
usage: None,
is_streaming: false,
is_final: true,
};
Ok(stream::iter(vec![Ok(chat_resp)]))
})
}
async fn health_check(&self) -> Result<()> {
let params = serde_json::json!({
"protocol_version": PROTOCOL_VERSION,
"client_info": {
"name": "owlen",
"version": env!("CARGO_PKG_VERSION"),
},
"capabilities": {}
});
self.send_rpc(methods::INITIALIZE, params).await.map(|_| ())
fn health_check(&self) -> Self::HealthCheckFuture<'_> {
Box::pin(async move {
let params = serde_json::json!({
"protocol_version": PROTOCOL_VERSION,
"client_info": {
"name": "owlen",
"version": env!("CARGO_PKG_VERSION"),
},
"capabilities": {}
});
self.send_rpc(methods::INITIALIZE, params).await.map(|_| ())
})
}
}