refactor(core)!: rename Provider to LLMProvider and update implementations
- Export `LLMProvider` from `owlen-core` and replace public `Provider` re-exports. - Convert `OllamaProvider` to implement the new `LLMProvider` trait with associated future types. - Adjust imports and trait bounds in `remote_client.rs` to use the updated types. - Add comprehensive provider interface tests (`provider_interface.rs`) verifying router routing and provider registry model listing with `MockProvider`. - Align dependency versions across workspace crates by switching to workspace-managed versions. - Extend CI (`.woodpecker.yml`) with a dedicated test step and generate coverage reports. - Update architecture documentation to reflect the new provider abstraction.
This commit is contained in:
@@ -39,6 +39,14 @@ matrix:
|
||||
EXT: ".exe"
|
||||
|
||||
steps:
|
||||
- name: tests
|
||||
image: *rust_image
|
||||
commands:
|
||||
- rustup component add llvm-tools-preview
|
||||
- cargo install cargo-llvm-cov --locked
|
||||
- cargo llvm-cov --workspace --all-features --summary-only
|
||||
- cargo llvm-cov --workspace --all-features --lcov --output-path coverage.lcov --no-run
|
||||
|
||||
- name: build
|
||||
image: *rust_image
|
||||
commands:
|
||||
|
||||
@@ -57,6 +57,10 @@ urlencoding = "2.1"
|
||||
regex = "1.10"
|
||||
rpassword = "7.3"
|
||||
sqlx = { version = "0.7", default-features = false, features = ["runtime-tokio-rustls", "sqlite", "macros", "uuid", "chrono", "migrate"] }
|
||||
log = "0.4"
|
||||
dirs = "5.0"
|
||||
serde_yaml = "0.9"
|
||||
handlebars = "6.0"
|
||||
|
||||
# Configuration
|
||||
toml = "0.8"
|
||||
|
||||
@@ -27,10 +27,10 @@ owlen-core = { path = "../owlen-core" }
|
||||
# Optional TUI dependency, enabled by the "chat-client" feature.
|
||||
owlen-tui = { path = "../owlen-tui", optional = true }
|
||||
owlen-ollama = { path = "../owlen-ollama" }
|
||||
log = "0.4"
|
||||
log = { workspace = true }
|
||||
|
||||
# CLI framework
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
clap = { workspace = true, features = ["derive"] }
|
||||
|
||||
# Async runtime
|
||||
tokio = { workspace = true }
|
||||
@@ -44,9 +44,9 @@ crossterm = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
regex = "1"
|
||||
thiserror = "1"
|
||||
dirs = "5"
|
||||
regex = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
dirs = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true }
|
||||
|
||||
@@ -143,28 +143,27 @@ fn run_config_command(command: ConfigCommand) -> Result<()> {
|
||||
fn run_config_doctor() -> Result<()> {
|
||||
let config_path = core_config::default_config_path();
|
||||
let existed = config_path.exists();
|
||||
let mut config = config::try_load_config().unwrap_or_else(|| Config::default());
|
||||
let mut config = config::try_load_config().unwrap_or_default();
|
||||
let mut changes = Vec::new();
|
||||
|
||||
if !existed {
|
||||
changes.push("created configuration file from defaults".to_string());
|
||||
}
|
||||
|
||||
if config
|
||||
if !config
|
||||
.providers
|
||||
.get(&config.general.default_provider)
|
||||
.is_none()
|
||||
.contains_key(&config.general.default_provider)
|
||||
{
|
||||
config.general.default_provider = "ollama".to_string();
|
||||
changes.push("default provider missing; reset to 'ollama'".to_string());
|
||||
}
|
||||
|
||||
if config.providers.get("ollama").is_none() {
|
||||
if !config.providers.contains_key("ollama") {
|
||||
core_config::ensure_provider_config(&mut config, "ollama");
|
||||
changes.push("added default ollama provider configuration".to_string());
|
||||
}
|
||||
|
||||
if config.providers.get("ollama-cloud").is_none() {
|
||||
if !config.providers.contains_key("ollama-cloud") {
|
||||
core_config::ensure_provider_config(&mut config, "ollama-cloud");
|
||||
changes.push("added default ollama-cloud provider configuration".to_string());
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ description = "Core traits and types for OWLEN LLM client"
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
log = "0.4.20"
|
||||
log = { workspace = true }
|
||||
regex = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
@@ -24,7 +24,7 @@ futures = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
toml = { workspace = true }
|
||||
shellexpand = { workspace = true }
|
||||
dirs = "5.0"
|
||||
dirs = { workspace = true }
|
||||
ratatui = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
jsonschema = { workspace = true }
|
||||
@@ -42,7 +42,7 @@ duckduckgo = "0.2.0"
|
||||
reqwest = { workspace = true, features = ["default"] }
|
||||
reqwest_011 = { version = "0.11", package = "reqwest" }
|
||||
path-clean = "1.0"
|
||||
tokio-stream = "0.1"
|
||||
tokio-stream = { workspace = true }
|
||||
tokio-tungstenite = "0.21"
|
||||
tungstenite = "0.21"
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ pub use mcp::{
|
||||
pub use mode::*;
|
||||
pub use model::*;
|
||||
// Export provider types but exclude test_utils to avoid ambiguity
|
||||
pub use provider::{ChatStream, Provider, ProviderConfig, ProviderRegistry};
|
||||
pub use provider::{ChatStream, LLMProvider, Provider, ProviderConfig, ProviderRegistry};
|
||||
pub use router::*;
|
||||
pub use sandbox::*;
|
||||
pub use session::*;
|
||||
|
||||
@@ -6,8 +6,9 @@ use super::{McpClient, McpToolCall, McpToolDescriptor, McpToolResponse};
|
||||
use crate::consent::{ConsentManager, ConsentScope};
|
||||
use crate::tools::{Tool, WebScrapeTool, WebSearchTool};
|
||||
use crate::types::ModelInfo;
|
||||
use crate::{Error, Provider, Result};
|
||||
use async_trait::async_trait;
|
||||
use crate::types::{ChatResponse, Message, Role};
|
||||
use crate::{provider::chat_via_stream, Error, LLMProvider, Result};
|
||||
use futures::{future::BoxFuture, stream, StreamExt};
|
||||
use reqwest::Client as HttpClient;
|
||||
use serde_json::json;
|
||||
use std::path::Path;
|
||||
@@ -19,10 +20,6 @@ use tokio::process::{Child, Command};
|
||||
use tokio::sync::Mutex;
|
||||
use tokio_tungstenite::{connect_async, MaybeTlsStream, WebSocketStream};
|
||||
use tungstenite::protocol::Message as WsMessage;
|
||||
// Provider trait is already imported via the earlier use statement.
|
||||
use crate::types::{ChatResponse, Message, Role};
|
||||
use futures::stream;
|
||||
use futures::StreamExt;
|
||||
|
||||
/// Client that talks to the external `owlen-mcp-server` over STDIO, HTTP, or WebSocket.
|
||||
pub struct RemoteMcpClient {
|
||||
@@ -468,33 +465,31 @@ impl McpClient for RemoteMcpClient {
|
||||
// Provider implementation – forwards chat requests to the generate_text tool.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[async_trait]
|
||||
impl Provider for RemoteMcpClient {
|
||||
impl LLMProvider for RemoteMcpClient {
|
||||
type Stream = stream::Iter<std::vec::IntoIter<Result<ChatResponse>>>;
|
||||
type ListModelsFuture<'a> = BoxFuture<'a, Result<Vec<ModelInfo>>>;
|
||||
type ChatFuture<'a> = BoxFuture<'a, Result<ChatResponse>>;
|
||||
type ChatStreamFuture<'a> = BoxFuture<'a, Result<Self::Stream>>;
|
||||
type HealthCheckFuture<'a> = BoxFuture<'a, Result<()>>;
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"mcp-llm-server"
|
||||
}
|
||||
|
||||
async fn list_models(&self) -> Result<Vec<ModelInfo>> {
|
||||
fn list_models(&self) -> Self::ListModelsFuture<'_> {
|
||||
Box::pin(async move {
|
||||
let result = self.send_rpc(methods::MODELS_LIST, json!(null)).await?;
|
||||
let models: Vec<ModelInfo> = serde_json::from_value(result)?;
|
||||
Ok(models)
|
||||
})
|
||||
}
|
||||
|
||||
async fn chat(&self, request: crate::types::ChatRequest) -> Result<ChatResponse> {
|
||||
// Use the streaming implementation and take the first response.
|
||||
let mut stream = self.chat_stream(request).await?;
|
||||
match stream.next().await {
|
||||
Some(Ok(resp)) => Ok(resp),
|
||||
Some(Err(e)) => Err(e),
|
||||
None => Err(Error::Provider(anyhow::anyhow!("Empty chat stream"))),
|
||||
}
|
||||
fn chat(&self, request: crate::types::ChatRequest) -> Self::ChatFuture<'_> {
|
||||
Box::pin(chat_via_stream(self, request))
|
||||
}
|
||||
|
||||
async fn chat_stream(
|
||||
&self,
|
||||
request: crate::types::ChatRequest,
|
||||
) -> Result<crate::provider::ChatStream> {
|
||||
// Build arguments matching the generate_text schema.
|
||||
fn chat_stream(&self, request: crate::types::ChatRequest) -> Self::ChatStreamFuture<'_> {
|
||||
Box::pin(async move {
|
||||
let args = serde_json::json!({
|
||||
"messages": request.messages,
|
||||
"temperature": request.parameters.temperature,
|
||||
@@ -507,7 +502,6 @@ impl Provider for RemoteMcpClient {
|
||||
arguments: args,
|
||||
};
|
||||
let resp = self.call_tool(call).await?;
|
||||
// Build a ChatResponse from the tool output (assumed to be a string).
|
||||
let content = resp.output.as_str().unwrap_or("").to_string();
|
||||
let message = Message::new(Role::Assistant, content);
|
||||
let chat_resp = ChatResponse {
|
||||
@@ -516,11 +510,12 @@ impl Provider for RemoteMcpClient {
|
||||
is_streaming: false,
|
||||
is_final: true,
|
||||
};
|
||||
let stream = stream::once(async move { Ok(chat_resp) });
|
||||
Ok(Box::pin(stream))
|
||||
Ok(stream::iter(vec![Ok(chat_resp)]))
|
||||
})
|
||||
}
|
||||
|
||||
async fn health_check(&self) -> Result<()> {
|
||||
fn health_check(&self) -> Self::HealthCheckFuture<'_> {
|
||||
Box::pin(async move {
|
||||
let params = serde_json::json!({
|
||||
"protocol_version": PROTOCOL_VERSION,
|
||||
"client_info": {
|
||||
@@ -530,5 +525,6 @@ impl Provider for RemoteMcpClient {
|
||||
"capabilities": {}
|
||||
});
|
||||
self.send_rpc(methods::INITIALIZE, params).await.map(|_| ())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,109 +1,119 @@
|
||||
//! Provider trait and related types
|
||||
//! Provider traits and registries.
|
||||
|
||||
use crate::{types::*, Result};
|
||||
use futures::Stream;
|
||||
use crate::{types::*, Error, Result};
|
||||
use anyhow::anyhow;
|
||||
use futures::{Stream, StreamExt};
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// A stream of chat responses
|
||||
pub type ChatStream = Pin<Box<dyn Stream<Item = Result<ChatResponse>> + Send>>;
|
||||
|
||||
/// Trait for LLM providers (Ollama, OpenAI, Anthropic, etc.)
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use std::pin::Pin;
|
||||
/// use std::sync::Arc;
|
||||
/// use futures::Stream;
|
||||
/// use owlen_core::provider::{Provider, ProviderRegistry, ChatStream};
|
||||
/// use owlen_core::types::{ChatRequest, ChatResponse, ModelInfo, Message, Role, ChatParameters};
|
||||
/// use owlen_core::Result;
|
||||
///
|
||||
/// // 1. Create a mock provider
|
||||
/// struct MockProvider;
|
||||
///
|
||||
/// #[async_trait::async_trait]
|
||||
/// impl Provider for MockProvider {
|
||||
/// fn name(&self) -> &str {
|
||||
/// "mock"
|
||||
/// }
|
||||
///
|
||||
/// async fn list_models(&self) -> Result<Vec<ModelInfo>> {
|
||||
/// Ok(vec![ModelInfo {
|
||||
/// id: "mock-model".to_string(),
|
||||
/// provider: "mock".to_string(),
|
||||
/// name: "mock-model".to_string(),
|
||||
/// description: None,
|
||||
/// context_window: None,
|
||||
/// capabilities: vec![],
|
||||
/// supports_tools: false,
|
||||
/// }])
|
||||
/// }
|
||||
///
|
||||
/// async fn chat(&self, request: ChatRequest) -> Result<ChatResponse> {
|
||||
/// let content = format!("Response to: {}", request.messages.last().unwrap().content);
|
||||
/// Ok(ChatResponse {
|
||||
/// message: Message::new(Role::Assistant, content),
|
||||
/// usage: None,
|
||||
/// is_streaming: false,
|
||||
/// is_final: true,
|
||||
/// })
|
||||
/// }
|
||||
///
|
||||
/// async fn chat_stream(&self, request: ChatRequest) -> Result<ChatStream> {
|
||||
/// unimplemented!();
|
||||
/// }
|
||||
///
|
||||
/// async fn health_check(&self) -> Result<()> {
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// // 2. Use the provider with a registry
|
||||
/// #[tokio::main]
|
||||
/// async fn main() {
|
||||
/// let mut registry = ProviderRegistry::new();
|
||||
/// registry.register(MockProvider);
|
||||
///
|
||||
/// let provider = registry.get("mock").unwrap();
|
||||
/// let models = provider.list_models().await.unwrap();
|
||||
/// assert_eq!(models[0].name, "mock-model");
|
||||
///
|
||||
/// let request = ChatRequest {
|
||||
/// model: "mock-model".to_string(),
|
||||
/// messages: vec![Message::new(Role::User, "Hello".to_string())],
|
||||
/// parameters: ChatParameters::default(),
|
||||
/// tools: None,
|
||||
/// };
|
||||
///
|
||||
/// let response = provider.chat(request).await.unwrap();
|
||||
/// assert_eq!(response.message.content, "Response to: Hello");
|
||||
/// }
|
||||
/// ```
|
||||
#[async_trait::async_trait]
|
||||
pub trait Provider: Send + Sync {
|
||||
/// Get the name of this provider
|
||||
/// Trait for LLM providers (Ollama, OpenAI, Anthropic, etc.) with zero-cost static dispatch.
|
||||
pub trait LLMProvider: Send + Sync + 'static {
|
||||
type Stream: Stream<Item = Result<ChatResponse>> + Send + 'static;
|
||||
|
||||
type ListModelsFuture<'a>: Future<Output = Result<Vec<ModelInfo>>> + Send
|
||||
where
|
||||
Self: 'a;
|
||||
|
||||
type ChatFuture<'a>: Future<Output = Result<ChatResponse>> + Send
|
||||
where
|
||||
Self: 'a;
|
||||
|
||||
type ChatStreamFuture<'a>: Future<Output = Result<Self::Stream>> + Send
|
||||
where
|
||||
Self: 'a;
|
||||
|
||||
type HealthCheckFuture<'a>: Future<Output = Result<()>> + Send
|
||||
where
|
||||
Self: 'a;
|
||||
|
||||
fn name(&self) -> &str;
|
||||
|
||||
/// List available models from this provider
|
||||
async fn list_models(&self) -> Result<Vec<ModelInfo>>;
|
||||
fn list_models(&self) -> Self::ListModelsFuture<'_>;
|
||||
fn chat(&self, request: ChatRequest) -> Self::ChatFuture<'_>;
|
||||
fn chat_stream(&self, request: ChatRequest) -> Self::ChatStreamFuture<'_>;
|
||||
fn health_check(&self) -> Self::HealthCheckFuture<'_>;
|
||||
|
||||
/// Send a chat completion request
|
||||
async fn chat(&self, request: ChatRequest) -> Result<ChatResponse>;
|
||||
|
||||
/// Send a streaming chat completion request
|
||||
async fn chat_stream(&self, request: ChatRequest) -> Result<ChatStream>;
|
||||
|
||||
/// Check if the provider is available/healthy
|
||||
async fn health_check(&self) -> Result<()>;
|
||||
|
||||
/// Get provider-specific configuration schema
|
||||
fn config_schema(&self) -> serde_json::Value {
|
||||
serde_json::json!({})
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper that implements [`LLMProvider::chat`] in terms of [`LLMProvider::chat_stream`].
|
||||
pub async fn chat_via_stream<'a, P>(provider: &'a P, request: ChatRequest) -> Result<ChatResponse>
|
||||
where
|
||||
P: LLMProvider + 'a,
|
||||
{
|
||||
let stream = provider.chat_stream(request).await?;
|
||||
let mut boxed: ChatStream = Box::pin(stream);
|
||||
match boxed.next().await {
|
||||
Some(Ok(response)) => Ok(response),
|
||||
Some(Err(err)) => Err(err),
|
||||
None => Err(Error::Provider(anyhow!(
|
||||
"Empty chat stream from provider {}",
|
||||
provider.name()
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Object-safe wrapper trait for runtime-configurable provider usage.
|
||||
#[async_trait::async_trait]
|
||||
pub trait Provider: Send + Sync {
|
||||
/// Get the name of this provider.
|
||||
fn name(&self) -> &str;
|
||||
|
||||
/// List available models from this provider.
|
||||
async fn list_models(&self) -> Result<Vec<ModelInfo>>;
|
||||
|
||||
/// Send a chat completion request.
|
||||
async fn chat(&self, request: ChatRequest) -> Result<ChatResponse>;
|
||||
|
||||
/// Send a streaming chat completion request.
|
||||
async fn chat_stream(&self, request: ChatRequest) -> Result<ChatStream>;
|
||||
|
||||
/// Check if the provider is available/healthy.
|
||||
async fn health_check(&self) -> Result<()>;
|
||||
|
||||
/// Get provider-specific configuration schema.
|
||||
fn config_schema(&self) -> serde_json::Value {
|
||||
serde_json::json!({})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<T> Provider for T
|
||||
where
|
||||
T: LLMProvider,
|
||||
{
|
||||
fn name(&self) -> &str {
|
||||
LLMProvider::name(self)
|
||||
}
|
||||
|
||||
async fn list_models(&self) -> Result<Vec<ModelInfo>> {
|
||||
LLMProvider::list_models(self).await
|
||||
}
|
||||
|
||||
async fn chat(&self, request: ChatRequest) -> Result<ChatResponse> {
|
||||
LLMProvider::chat(self, request).await
|
||||
}
|
||||
|
||||
async fn chat_stream(&self, request: ChatRequest) -> Result<ChatStream> {
|
||||
let stream = LLMProvider::chat_stream(self, request).await?;
|
||||
Ok(Box::pin(stream))
|
||||
}
|
||||
|
||||
async fn health_check(&self) -> Result<()> {
|
||||
LLMProvider::health_check(self).await
|
||||
}
|
||||
|
||||
fn config_schema(&self) -> serde_json::Value {
|
||||
LLMProvider::config_schema(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for a provider
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
pub struct ProviderConfig {
|
||||
@@ -131,8 +141,8 @@ impl ProviderRegistry {
|
||||
}
|
||||
}
|
||||
|
||||
/// Register a provider
|
||||
pub fn register<P: Provider + 'static>(&mut self, provider: P) {
|
||||
/// Register a provider using static dispatch.
|
||||
pub fn register<P: LLMProvider + 'static>(&mut self, provider: P) {
|
||||
self.register_arc(Arc::new(provider));
|
||||
}
|
||||
|
||||
@@ -179,19 +189,26 @@ impl Default for ProviderRegistry {
|
||||
pub mod test_utils {
|
||||
use super::*;
|
||||
use crate::types::{ChatRequest, ChatResponse, Message, ModelInfo, Role};
|
||||
use futures::stream;
|
||||
use std::future::{ready, Ready};
|
||||
|
||||
/// Mock provider for testing
|
||||
#[derive(Default)]
|
||||
pub struct MockProvider;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Provider for MockProvider {
|
||||
impl LLMProvider for MockProvider {
|
||||
type Stream = stream::Iter<std::vec::IntoIter<Result<ChatResponse>>>;
|
||||
type ListModelsFuture<'a> = Ready<Result<Vec<ModelInfo>>>;
|
||||
type ChatFuture<'a> = Ready<Result<ChatResponse>>;
|
||||
type ChatStreamFuture<'a> = Ready<Result<Self::Stream>>;
|
||||
type HealthCheckFuture<'a> = Ready<Result<()>>;
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"mock"
|
||||
}
|
||||
|
||||
async fn list_models(&self) -> Result<Vec<ModelInfo>> {
|
||||
Ok(vec![ModelInfo {
|
||||
fn list_models(&self) -> Self::ListModelsFuture<'_> {
|
||||
ready(Ok(vec![ModelInfo {
|
||||
id: "mock-model".to_string(),
|
||||
provider: "mock".to_string(),
|
||||
name: "mock-model".to_string(),
|
||||
@@ -199,24 +216,154 @@ pub mod test_utils {
|
||||
context_window: None,
|
||||
capabilities: vec![],
|
||||
supports_tools: false,
|
||||
}])
|
||||
}]))
|
||||
}
|
||||
|
||||
async fn chat(&self, _request: ChatRequest) -> Result<ChatResponse> {
|
||||
Ok(ChatResponse {
|
||||
message: Message::new(Role::Assistant, "Mock response".to_string()),
|
||||
fn chat(&self, request: ChatRequest) -> Self::ChatFuture<'_> {
|
||||
ready(Ok(self.build_response(&request)))
|
||||
}
|
||||
|
||||
fn chat_stream(&self, request: ChatRequest) -> Self::ChatStreamFuture<'_> {
|
||||
let response = self.build_response(&request);
|
||||
ready(Ok(stream::iter(vec![Ok(response)])))
|
||||
}
|
||||
|
||||
fn health_check(&self) -> Self::HealthCheckFuture<'_> {
|
||||
ready(Ok(()))
|
||||
}
|
||||
}
|
||||
|
||||
impl MockProvider {
|
||||
fn build_response(&self, request: &ChatRequest) -> ChatResponse {
|
||||
let content = format!(
|
||||
"Mock response to: {}",
|
||||
request
|
||||
.messages
|
||||
.last()
|
||||
.map(|m| m.content.clone())
|
||||
.unwrap_or_default()
|
||||
);
|
||||
|
||||
ChatResponse {
|
||||
message: Message::new(Role::Assistant, content),
|
||||
usage: None,
|
||||
is_streaming: false,
|
||||
is_final: true,
|
||||
})
|
||||
}
|
||||
|
||||
async fn chat_stream(&self, _request: ChatRequest) -> Result<ChatStream> {
|
||||
unimplemented!("MockProvider does not support streaming")
|
||||
}
|
||||
|
||||
async fn health_check(&self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::test_utils::MockProvider;
|
||||
use super::*;
|
||||
use crate::types::{ChatParameters, ChatRequest, ChatResponse, Message, ModelInfo, Role};
|
||||
use futures::stream;
|
||||
use std::future::{ready, Ready};
|
||||
use std::sync::Arc;
|
||||
|
||||
struct StreamingProvider;
|
||||
|
||||
impl LLMProvider for StreamingProvider {
|
||||
type Stream = stream::Iter<std::vec::IntoIter<Result<ChatResponse>>>;
|
||||
type ListModelsFuture<'a> = Ready<Result<Vec<ModelInfo>>>;
|
||||
type ChatFuture<'a> = Ready<Result<ChatResponse>>;
|
||||
type ChatStreamFuture<'a> = Ready<Result<Self::Stream>>;
|
||||
type HealthCheckFuture<'a> = Ready<Result<()>>;
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"streaming"
|
||||
}
|
||||
|
||||
fn list_models(&self) -> Self::ListModelsFuture<'_> {
|
||||
ready(Ok(vec![ModelInfo {
|
||||
id: "stream-model".to_string(),
|
||||
provider: "streaming".to_string(),
|
||||
name: "stream-model".to_string(),
|
||||
description: None,
|
||||
context_window: None,
|
||||
capabilities: vec!["chat".to_string()],
|
||||
supports_tools: false,
|
||||
}]))
|
||||
}
|
||||
|
||||
fn chat(&self, request: ChatRequest) -> Self::ChatFuture<'_> {
|
||||
ready(Ok(self.response(&request)))
|
||||
}
|
||||
|
||||
fn chat_stream(&self, request: ChatRequest) -> Self::ChatStreamFuture<'_> {
|
||||
let response = self.response(&request);
|
||||
ready(Ok(stream::iter(vec![Ok(response)])))
|
||||
}
|
||||
|
||||
fn health_check(&self) -> Self::HealthCheckFuture<'_> {
|
||||
ready(Ok(()))
|
||||
}
|
||||
}
|
||||
|
||||
impl StreamingProvider {
|
||||
fn response(&self, request: &ChatRequest) -> ChatResponse {
|
||||
let reply = format!(
|
||||
"echo:{}",
|
||||
request
|
||||
.messages
|
||||
.last()
|
||||
.map(|m| m.content.clone())
|
||||
.unwrap_or_default()
|
||||
);
|
||||
ChatResponse {
|
||||
message: Message::new(Role::Assistant, reply),
|
||||
usage: None,
|
||||
is_streaming: true,
|
||||
is_final: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn default_chat_reads_from_stream() {
|
||||
let provider = StreamingProvider;
|
||||
let request = ChatRequest {
|
||||
model: "stream-model".to_string(),
|
||||
messages: vec![Message::new(Role::User, "ping".to_string())],
|
||||
parameters: ChatParameters::default(),
|
||||
tools: None,
|
||||
};
|
||||
|
||||
let response = LLMProvider::chat(&provider, request)
|
||||
.await
|
||||
.expect("chat succeeded");
|
||||
assert_eq!(response.message.content, "echo:ping");
|
||||
assert!(response.is_final);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn registry_registers_static_provider() {
|
||||
let mut registry = ProviderRegistry::new();
|
||||
registry.register(StreamingProvider);
|
||||
|
||||
let provider = registry.get("streaming").expect("provider registered");
|
||||
let models = provider.list_models().await.expect("models listed");
|
||||
assert_eq!(models[0].id, "stream-model");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn registry_accepts_dynamic_provider() {
|
||||
let mut registry = ProviderRegistry::new();
|
||||
let provider: Arc<dyn Provider> = Arc::new(MockProvider::default());
|
||||
registry.register_arc(provider.clone());
|
||||
|
||||
let fetched = registry.get("mock").expect("mock provider present");
|
||||
let request = ChatRequest {
|
||||
model: "mock-model".to_string(),
|
||||
messages: vec![Message::new(Role::User, "hi".to_string())],
|
||||
parameters: ChatParameters::default(),
|
||||
tools: None,
|
||||
};
|
||||
let response = Provider::chat(fetched.as_ref(), request)
|
||||
.await
|
||||
.expect("chat succeeded");
|
||||
assert_eq!(response.message.content, "Mock response to: hi");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ impl Router {
|
||||
}
|
||||
|
||||
/// Register a provider with the router
|
||||
pub fn register_provider<P: Provider + 'static>(&mut self, provider: P) {
|
||||
pub fn register_provider<P: LLMProvider + 'static>(&mut self, provider: P) {
|
||||
self.registry.register(provider);
|
||||
}
|
||||
|
||||
|
||||
43
crates/owlen-core/tests/provider_interface.rs
Normal file
43
crates/owlen-core/tests/provider_interface.rs
Normal file
@@ -0,0 +1,43 @@
|
||||
use futures::StreamExt;
|
||||
use owlen_core::provider::test_utils::MockProvider;
|
||||
use owlen_core::{provider::ProviderRegistry, types::*, Router};
|
||||
use std::sync::Arc;
|
||||
|
||||
fn request(message: &str) -> ChatRequest {
|
||||
ChatRequest {
|
||||
model: "mock-model".to_string(),
|
||||
messages: vec![Message::new(Role::User, message.to_string())],
|
||||
parameters: ChatParameters::default(),
|
||||
tools: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn router_routes_to_registered_provider() {
|
||||
let mut router = Router::new();
|
||||
router.register_provider(MockProvider::default());
|
||||
router.set_default_provider("mock".to_string());
|
||||
|
||||
let resp = router.chat(request("ping")).await.expect("chat succeeded");
|
||||
assert_eq!(resp.message.content, "Mock response to: ping");
|
||||
|
||||
let mut stream = router
|
||||
.chat_stream(request("pong"))
|
||||
.await
|
||||
.expect("stream returned");
|
||||
let first = stream.next().await.expect("stream item").expect("ok item");
|
||||
assert_eq!(first.message.content, "Mock response to: pong");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn registry_lists_models_from_all_providers() {
|
||||
let mut registry = ProviderRegistry::new();
|
||||
registry.register(MockProvider::default());
|
||||
registry.register_arc(Arc::new(MockProvider::default()));
|
||||
|
||||
let models = registry.list_all_models().await.expect("listed");
|
||||
assert!(
|
||||
models.iter().any(|m| m.name == "mock-model"),
|
||||
"expected mock-model in model list"
|
||||
);
|
||||
}
|
||||
@@ -7,15 +7,15 @@ license = "AGPL-3.0"
|
||||
|
||||
[dependencies]
|
||||
owlen-core = { path = "../owlen-core" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
anyhow = "1.0"
|
||||
async-trait = "0.1"
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
bollard = "0.17"
|
||||
tempfile = "3.0"
|
||||
uuid = { version = "1.0", features = ["v4"] }
|
||||
futures = "0.3"
|
||||
tempfile = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
|
||||
[lib]
|
||||
name = "owlen_mcp_code_server"
|
||||
|
||||
@@ -6,11 +6,11 @@ edition = "2021"
|
||||
[dependencies]
|
||||
owlen-core = { path = "../owlen-core" }
|
||||
owlen-ollama = { path = "../owlen-ollama" }
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
anyhow = "1.0"
|
||||
tokio-stream = "0.1"
|
||||
tokio = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
tokio-stream = { workspace = true }
|
||||
|
||||
[[bin]]
|
||||
name = "owlen-mcp-llm-server"
|
||||
|
||||
@@ -7,14 +7,14 @@ license = "AGPL-3.0"
|
||||
|
||||
[dependencies]
|
||||
owlen-core = { path = "../owlen-core" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
serde_yaml = "0.9"
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
anyhow = "1.0"
|
||||
handlebars = "6.0"
|
||||
dirs = "5.0"
|
||||
futures = "0.3"
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde_yaml = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
handlebars = { workspace = true }
|
||||
dirs = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
|
||||
[lib]
|
||||
name = "owlen_mcp_prompt_server"
|
||||
|
||||
@@ -4,9 +4,9 @@ version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
anyhow = "1.0"
|
||||
tokio = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
path-clean = "1.0"
|
||||
owlen-core = { path = "../owlen-core" }
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
//! Ollama provider for OWLEN LLM client
|
||||
|
||||
use futures_util::StreamExt;
|
||||
use futures_util::{future::BoxFuture, StreamExt};
|
||||
use owlen_core::{
|
||||
config::GeneralSettings,
|
||||
model::ModelManager,
|
||||
provider::{ChatStream, Provider, ProviderConfig},
|
||||
provider::{LLMProvider, ProviderConfig},
|
||||
types::{
|
||||
ChatParameters, ChatRequest, ChatResponse, Message, ModelInfo, Role, TokenUsage, ToolCall,
|
||||
},
|
||||
@@ -639,19 +639,27 @@ impl OllamaProvider {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Provider for OllamaProvider {
|
||||
impl LLMProvider for OllamaProvider {
|
||||
type Stream = UnboundedReceiverStream<Result<ChatResponse>>;
|
||||
type ListModelsFuture<'a> = BoxFuture<'a, Result<Vec<ModelInfo>>>;
|
||||
type ChatFuture<'a> = BoxFuture<'a, Result<ChatResponse>>;
|
||||
type ChatStreamFuture<'a> = BoxFuture<'a, Result<Self::Stream>>;
|
||||
type HealthCheckFuture<'a> = BoxFuture<'a, Result<()>>;
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"ollama"
|
||||
}
|
||||
|
||||
async fn list_models(&self) -> Result<Vec<ModelInfo>> {
|
||||
fn list_models(&self) -> Self::ListModelsFuture<'_> {
|
||||
Box::pin(async move {
|
||||
self.model_manager
|
||||
.get_or_refresh(false, || async { self.fetch_models().await })
|
||||
.await
|
||||
})
|
||||
}
|
||||
|
||||
async fn chat(&self, request: ChatRequest) -> Result<ChatResponse> {
|
||||
fn chat(&self, request: ChatRequest) -> Self::ChatFuture<'_> {
|
||||
Box::pin(async move {
|
||||
let ChatRequest {
|
||||
model,
|
||||
messages,
|
||||
@@ -660,23 +668,14 @@ impl Provider for OllamaProvider {
|
||||
} = request;
|
||||
|
||||
let model_id = model.clone();
|
||||
|
||||
let messages: Vec<OllamaMessage> = messages.iter().map(Self::convert_message).collect();
|
||||
|
||||
let options = Self::build_options(parameters);
|
||||
|
||||
// Only send the `tools` field if there is at least one tool.
|
||||
// An empty array makes Ollama validate tool support and can cause a
|
||||
// 400 Bad Request for models that do not support tools.
|
||||
// Currently the `tools` field is omitted for compatibility; the variable is retained
|
||||
// for potential future use.
|
||||
let _ollama_tools = tools
|
||||
.as_ref()
|
||||
.filter(|t| !t.is_empty())
|
||||
.map(|t| Self::convert_tools_to_ollama(t));
|
||||
|
||||
// Ollama currently rejects any presence of the `tools` field for models that
|
||||
// do not support function calling. To be safe, we omit the field entirely.
|
||||
let ollama_request = OllamaChatRequest {
|
||||
model,
|
||||
messages,
|
||||
@@ -753,9 +752,11 @@ impl Provider for OllamaProvider {
|
||||
is_streaming: false,
|
||||
is_final: true,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
async fn chat_stream(&self, request: ChatRequest) -> Result<ChatStream> {
|
||||
fn chat_stream(&self, request: ChatRequest) -> Self::ChatStreamFuture<'_> {
|
||||
Box::pin(async move {
|
||||
let ChatRequest {
|
||||
model,
|
||||
messages,
|
||||
@@ -764,21 +765,14 @@ impl Provider for OllamaProvider {
|
||||
} = request;
|
||||
|
||||
let model_id = model.clone();
|
||||
|
||||
let messages: Vec<OllamaMessage> = messages.iter().map(Self::convert_message).collect();
|
||||
|
||||
let options = Self::build_options(parameters);
|
||||
|
||||
// Only include the `tools` field if there is at least one tool.
|
||||
// Sending an empty tools array causes Ollama to reject the request for
|
||||
// models without tool support (400 Bad Request).
|
||||
// Retain tools conversion for possible future extensions, but silence unused warnings.
|
||||
let _ollama_tools = tools
|
||||
.as_ref()
|
||||
.filter(|t| !t.is_empty())
|
||||
.map(|t| Self::convert_tools_to_ollama(t));
|
||||
|
||||
// Omit the `tools` field for compatibility with models lacking tool support.
|
||||
let ollama_request = OllamaChatRequest {
|
||||
model,
|
||||
messages,
|
||||
@@ -856,14 +850,18 @@ impl Provider for OllamaProvider {
|
||||
is_final: ollama_response.done,
|
||||
};
|
||||
|
||||
if let (Some(prompt_tokens), Some(completion_tokens)) = (
|
||||
if let (
|
||||
Some(prompt_tokens),
|
||||
Some(completion_tokens),
|
||||
) = (
|
||||
ollama_response.prompt_eval_count,
|
||||
ollama_response.eval_count,
|
||||
) {
|
||||
chat_response.usage = Some(TokenUsage {
|
||||
prompt_tokens,
|
||||
completion_tokens,
|
||||
total_tokens: prompt_tokens + completion_tokens,
|
||||
total_tokens: prompt_tokens
|
||||
+ completion_tokens,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -877,7 +875,8 @@ impl Provider for OllamaProvider {
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
let _ = tx.send(Err(owlen_core::Error::Serialization(e)));
|
||||
let _ =
|
||||
tx.send(Err(owlen_core::Error::Serialization(e)));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -903,10 +902,12 @@ impl Provider for OllamaProvider {
|
||||
});
|
||||
|
||||
let stream = UnboundedReceiverStream::new(rx);
|
||||
Ok(Box::pin(stream))
|
||||
Ok(stream)
|
||||
})
|
||||
}
|
||||
|
||||
async fn health_check(&self) -> Result<()> {
|
||||
fn health_check(&self) -> Self::HealthCheckFuture<'_> {
|
||||
Box::pin(async move {
|
||||
let url = self.api_url("version");
|
||||
|
||||
let response = self
|
||||
@@ -922,6 +923,7 @@ impl Provider for OllamaProvider {
|
||||
let detail = parse_error_body(response).await;
|
||||
Err(self.map_http_failure("health check", status, detail, None))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn config_schema(&self) -> serde_json::Value {
|
||||
|
||||
@@ -31,13 +31,19 @@ A simplified diagram of how components interact:
|
||||
|
||||
## Crate Breakdown
|
||||
|
||||
- `owlen-core`: Defines the core traits and data structures, like `Provider` and `Session`. Also contains the MCP client implementation.
|
||||
- `owlen-tui`: Contains all the logic for the terminal user interface, including event handling and rendering.
|
||||
- `owlen-cli`: The command-line entry point, responsible for parsing arguments and starting the TUI.
|
||||
- `owlen-mcp-llm-server`: MCP server that wraps Ollama providers and exposes them via the Model Context Protocol.
|
||||
- `owlen-core`: Defines the `LLMProvider` abstraction, routing, configuration, session state, encryption, and the MCP client layer. This crate is UI-agnostic and must not depend on concrete providers, terminals, or blocking I/O.
|
||||
- `owlen-tui`: Hosts all terminal UI behaviour (event loop, rendering, input modes) while delegating business logic and provider access back to `owlen-core`.
|
||||
- `owlen-cli`: Small entry point that parses command-line options, resolves configuration, selects providers, and launches either the TUI or headless agent flows by calling into `owlen-core`.
|
||||
- `owlen-mcp-llm-server`: Runs concrete providers (e.g., Ollama) behind an MCP boundary, exposing them as `generate_text` tools. This crate owns provider-specific wiring and process sandboxing.
|
||||
- `owlen-mcp-server`: Generic MCP server for file operations and resource management.
|
||||
- `owlen-ollama`: Direct Ollama provider implementation (legacy, used only by MCP servers).
|
||||
|
||||
### Boundary Guidelines
|
||||
|
||||
- **owlen-core**: The dependency ceiling for most crates. Keep it free of terminal logic, CLIs, or provider-specific HTTP clients. New features should expose traits or data types here and let other crates supply concrete implementations.
|
||||
- **owlen-cli**: Only orchestrates startup/shutdown. Avoid adding business logic; when a new command needs behaviour, implement it in `owlen-core` or another library crate and invoke it from the CLI.
|
||||
- **owlen-mcp-llm-server**: The only crate that should directly talk to Ollama (or other provider processes). TUI/CLI code communicates with providers exclusively through MCP clients in `owlen-core`.
|
||||
|
||||
## MCP Architecture (Phase 10)
|
||||
|
||||
As of Phase 10, OWLEN uses a **MCP-only architecture** where all LLM interactions go through the Model Context Protocol:
|
||||
|
||||
Reference in New Issue
Block a user