diff --git a/crates/owlen-cli/Cargo.toml b/crates/owlen-cli/Cargo.toml index 6d53808..9e36a07 100644 --- a/crates/owlen-cli/Cargo.toml +++ b/crates/owlen-cli/Cargo.toml @@ -24,7 +24,6 @@ required-features = ["chat-client"] [dependencies] owlen-core = { path = "../owlen-core" } -owlen-ollama = { path = "../owlen-ollama" } # Optional TUI dependency, enabled by the "chat-client" feature. owlen-tui = { path = "../owlen-tui", optional = true } diff --git a/crates/owlen-cli/src/agent_main.rs b/crates/owlen-cli/src/agent_main.rs index 5c76fa4..b5a1c8e 100644 --- a/crates/owlen-cli/src/agent_main.rs +++ b/crates/owlen-cli/src/agent_main.rs @@ -11,11 +11,15 @@ use std::sync::Arc; use clap::Parser; use owlen_cli::agent::{AgentConfig, AgentExecutor}; use owlen_core::mcp::remote_client::RemoteMcpClient; -use owlen_ollama::OllamaProvider; /// Command‑line arguments for the agent binary. #[derive(Parser, Debug)] -#[command(name = "owlen-agent", author, version, about = "Run the ReAct agent")] +#[command( + name = "owlen-agent", + author, + version, + about = "Run the ReAct agent via MCP" +)] struct Args { /// The initial user query. prompt: String, @@ -31,11 +35,13 @@ struct Args { async fn main() -> anyhow::Result<()> { let args = Args::parse(); - // Initialise the LLM provider (Ollama) – uses default local URL. - let provider = Arc::new(OllamaProvider::new("http://localhost:11434")?); - // Initialise the MCP client (remote LLM server) – this client also knows how - // to call the built‑in resource tools. - let mcp_client = Arc::new(RemoteMcpClient::new()?); + // Initialise the MCP LLM client – it implements Provider and talks to the + // MCP LLM server which wraps Ollama. This ensures all communication goes + // through the MCP architecture (Phase 10 requirement). + let provider = Arc::new(RemoteMcpClient::new()?); + + // The MCP client also serves as the tool client for resource operations + let mcp_client = Arc::clone(&provider) as Arc; let config = AgentConfig { max_iterations: args.max_iter, diff --git a/crates/owlen-cli/src/main.rs b/crates/owlen-cli/src/main.rs index cb0137c..210320f 100644 --- a/crates/owlen-cli/src/main.rs +++ b/crates/owlen-cli/src/main.rs @@ -2,8 +2,10 @@ use anyhow::Result; use clap::Parser; -use owlen_core::{mode::Mode, session::SessionController, storage::StorageManager}; -use owlen_ollama::OllamaProvider; +use owlen_core::{ + mcp::remote_client::RemoteMcpClient, mode::Mode, session::SessionController, + storage::StorageManager, Provider, +}; use owlen_tui::tui_controller::{TuiController, TuiRequest}; use owlen_tui::{config, ui, AppState, ChatApp, Event, EventHandler, SessionEvent}; use std::io; @@ -21,7 +23,7 @@ use ratatui::{prelude::CrosstermBackend, Terminal}; /// Owlen - Terminal UI for LLM chat #[derive(Parser, Debug)] #[command(name = "owlen")] -#[command(about = "Terminal UI for LLM chat with Ollama", long_about = None)] +#[command(about = "Terminal UI for LLM chat via MCP", long_about = None)] struct Args { /// Start in code mode (enables all tools) #[arg(long, short = 'c')] @@ -44,21 +46,16 @@ async fn main() -> Result<()> { let mut cfg = config::try_load_config().unwrap_or_default(); // Disable encryption for CLI to avoid password prompts in this environment. cfg.privacy.encrypt_local_data = false; - // Determine provider configuration - let provider_name = cfg.general.default_provider.clone(); - let provider_cfg = config::ensure_provider_config(&mut cfg, &provider_name).clone(); - let provider_type = provider_cfg.provider_type.to_ascii_lowercase(); - if provider_type != "ollama" && provider_type != "ollama-cloud" { - anyhow::bail!( - "Unsupported provider type '{}' configured for provider '{}'", - provider_cfg.provider_type, - provider_name, - ); - } - let provider = Arc::new(OllamaProvider::from_config( - &provider_cfg, - Some(&cfg.general), - )?); + + // Create MCP LLM client as the provider (replaces direct OllamaProvider usage) + let provider: Arc = if let Some(mcp_server) = cfg.mcp_servers.first() { + // Use configured MCP server if available + Arc::new(RemoteMcpClient::new_with_config(mcp_server)?) + } else { + // Fall back to default MCP LLM server discovery + Arc::new(RemoteMcpClient::new()?) + }; + let storage = Arc::new(StorageManager::new().await?); let controller = SessionController::new(provider, cfg, storage.clone(), tui_controller, false).await?; diff --git a/crates/owlen-cli/tests/agent_tests.rs b/crates/owlen-cli/tests/agent_tests.rs index f30134b..5650eb6 100644 --- a/crates/owlen-cli/tests/agent_tests.rs +++ b/crates/owlen-cli/tests/agent_tests.rs @@ -9,7 +9,6 @@ use owlen_cli::agent::{AgentConfig, AgentExecutor, LlmResponse}; use owlen_core::mcp::remote_client::RemoteMcpClient; -use owlen_ollama::OllamaProvider; use std::sync::Arc; #[tokio::test] @@ -72,11 +71,11 @@ async fn test_react_parsing_with_multiline_thought() { } #[tokio::test] -#[ignore] // Requires Ollama to be running +#[ignore] // Requires MCP LLM server to be running async fn test_agent_single_tool_scenario() { - // This test requires a running Ollama instance and MCP server - let provider = Arc::new(OllamaProvider::new("http://localhost:11434").unwrap()); - let mcp_client = Arc::new(RemoteMcpClient::new().unwrap()); + // This test requires a running MCP LLM server (which wraps Ollama) + let provider = Arc::new(RemoteMcpClient::new().unwrap()); + let mcp_client = Arc::clone(&provider) as Arc; let config = AgentConfig { max_iterations: 5, @@ -109,8 +108,8 @@ async fn test_agent_single_tool_scenario() { #[ignore] // Requires Ollama to be running async fn test_agent_multi_step_workflow() { // Test a query that requires multiple tool calls - let provider = Arc::new(OllamaProvider::new("http://localhost:11434").unwrap()); - let mcp_client = Arc::new(RemoteMcpClient::new().unwrap()); + let provider = Arc::new(RemoteMcpClient::new().unwrap()); + let mcp_client = Arc::clone(&provider) as Arc; let config = AgentConfig { max_iterations: 10, @@ -141,8 +140,8 @@ async fn test_agent_multi_step_workflow() { #[tokio::test] #[ignore] // Requires Ollama async fn test_agent_iteration_limit() { - let provider = Arc::new(OllamaProvider::new("http://localhost:11434").unwrap()); - let mcp_client = Arc::new(RemoteMcpClient::new().unwrap()); + let provider = Arc::new(RemoteMcpClient::new().unwrap()); + let mcp_client = Arc::clone(&provider) as Arc; let config = AgentConfig { max_iterations: 2, // Very low limit to test enforcement @@ -183,8 +182,8 @@ async fn test_agent_iteration_limit() { #[tokio::test] #[ignore] // Requires Ollama async fn test_agent_tool_budget_enforcement() { - let provider = Arc::new(OllamaProvider::new("http://localhost:11434").unwrap()); - let mcp_client = Arc::new(RemoteMcpClient::new().unwrap()); + let provider = Arc::new(RemoteMcpClient::new().unwrap()); + let mcp_client = Arc::clone(&provider) as Arc; let config = AgentConfig { max_iterations: 20, @@ -224,12 +223,9 @@ async fn test_agent_tool_budget_enforcement() { // Helper function to create a test executor // For parsing tests, we don't need a real connection fn create_test_executor() -> AgentExecutor { - // Create dummy instances - the parse_response method doesn't actually use them - let provider = Arc::new(OllamaProvider::new("http://localhost:11434").unwrap()); - // For parsing tests, we can accept the error from RemoteMcpClient::new() // since we're only testing parse_response which doesn't use the MCP client - let mcp_client = match RemoteMcpClient::new() { + let provider = match RemoteMcpClient::new() { Ok(client) => Arc::new(client), Err(_) => { // If MCP server binary doesn't exist, parsing tests can still run @@ -239,6 +235,8 @@ fn create_test_executor() -> AgentExecutor { } }; + let mcp_client = Arc::clone(&provider) as Arc; + let config = AgentConfig::default(); AgentExecutor::new(provider, mcp_client, config, None) } diff --git a/crates/owlen-tui/src/chat_app.rs b/crates/owlen-tui/src/chat_app.rs index 5ed1c1f..74dc0b7 100644 --- a/crates/owlen-tui/src/chat_app.rs +++ b/crates/owlen-tui/src/chat_app.rs @@ -311,7 +311,7 @@ impl ChatApp { pub async fn set_mode(&mut self, mode: owlen_core::mode::Mode) { self.operating_mode = mode; self.status = format!("Switched to {} mode", mode); - // TODO: Update MCP client mode when MCP integration is fully implemented + // Mode switching is handled by the SessionController's tool filtering } pub(crate) fn model_selector_items(&self) -> &[ModelSelectorItem] { diff --git a/examples/basic_chat.rs b/examples/basic_chat.rs deleted file mode 100644 index 1d8471f..0000000 --- a/examples/basic_chat.rs +++ /dev/null @@ -1,30 +0,0 @@ -// This example demonstrates a basic chat interaction without the TUI. - -use owlen_core::model::Model; -use owlen_core::provider::Provider; -use owlen_core::session::Session; -use owlen_ollama::OllamaProvider; // Assuming you have an Ollama provider - -#[tokio::main] -async fn main() -> Result<(), anyhow::Error> { - // This example requires a running Ollama instance. - // Make sure you have a model available, e.g., `ollama pull llama2` - - let provider = OllamaProvider; - let model = Model::new("llama2"); // Change to a model you have - let mut session = Session::new("basic-chat-session"); - - println!("Starting basic chat with model: {}", model.name); - - let user_message = "What is the capital of France?"; - session.add_message("user", user_message); - println!("User: {}", user_message); - - // Send the chat to the provider - let response = provider.chat(&session, &model).await?; - - session.add_message("bot", &response); - println!("Bot: {}", response); - - Ok(()) -} diff --git a/examples/custom_provider.rs b/examples/custom_provider.rs deleted file mode 100644 index 5bb9075..0000000 --- a/examples/custom_provider.rs +++ /dev/null @@ -1,45 +0,0 @@ -// This example demonstrates how to implement a custom provider. - -use async_trait::async_trait; -use owlen_core::model::Model; -use owlen_core::provider::Provider; -use owlen_core::session::Session; - -// Define a struct for your custom provider. -pub struct MyCustomProvider; - -// Implement the `Provider` trait for your struct. -#[async_trait] -impl Provider for MyCustomProvider { - fn name(&self) -> &str { - "custom-provider" - } - - async fn chat(&self, session: &Session, model: &Model) -> Result { - println!( - "Custom provider received chat request for model: {}", - model.name - ); - // In a real implementation, you would send the session data to an API. - let message_count = session.get_messages().len(); - Ok(format!( - "This is a custom response. You have {} messages in your session.", - message_count - )) - } -} - -#[tokio::main] -async fn main() -> Result<(), anyhow::Error> { - let provider = MyCustomProvider; - let model = Model::new("custom-model"); - let mut session = Session::new("custom-session"); - - session.add_message("user", "Hello, custom provider!"); - - let response = provider.chat(&session, &model).await?; - - println!("Provider response: {}", response); - - Ok(()) -} diff --git a/examples/mcp_chat.rs b/examples/mcp_chat.rs new file mode 100644 index 0000000..0d806b6 --- /dev/null +++ b/examples/mcp_chat.rs @@ -0,0 +1,71 @@ +//! Example demonstrating MCP-based chat interaction. +//! +//! This example shows the recommended way to interact with LLMs via the MCP architecture. +//! It uses `RemoteMcpClient` which communicates with the MCP LLM server. +//! +//! Prerequisites: +//! - Build the MCP LLM server: `cargo build --release -p owlen-mcp-llm-server` +//! - Ensure Ollama is running with a model available + +use owlen_core::{ + mcp::remote_client::RemoteMcpClient, + types::{ChatParameters, ChatRequest, Message, Role}, + Provider, +}; +use std::sync::Arc; + +#[tokio::main] +async fn main() -> Result<(), anyhow::Error> { + println!("🦉 Owlen MCP Chat Example\n"); + + // Create MCP client - this will spawn/connect to the MCP LLM server + println!("Connecting to MCP LLM server..."); + let client = Arc::new(RemoteMcpClient::new()?); + println!("✓ Connected\n"); + + // List available models + println!("Fetching available models..."); + let models = client.list_models().await?; + println!("Available models:"); + for model in &models { + println!(" - {} ({})", model.name, model.provider); + } + println!(); + + // Select first available model or default + let model_name = models + .first() + .map(|m| m.id.clone()) + .unwrap_or_else(|| "llama3.2:latest".to_string()); + println!("Using model: {}\n", model_name); + + // Create a simple chat request + let user_message = "What is the capital of France? Please be concise."; + println!("User: {}", user_message); + + let request = ChatRequest { + model: model_name, + messages: vec![Message::new(Role::User, user_message.to_string())], + parameters: ChatParameters { + temperature: Some(0.7), + max_tokens: Some(100), + stream: false, + extra: std::collections::HashMap::new(), + }, + tools: None, + }; + + // Send request and get response + println!("\nAssistant: "); + let response = client.chat(request).await?; + println!("{}", response.message.content); + + if let Some(usage) = response.usage { + println!( + "\n📊 Tokens: {} prompt + {} completion = {} total", + usage.prompt_tokens, usage.completion_tokens, usage.total_tokens + ); + } + + Ok(()) +}