feat(phase10): complete MCP-only architecture migration

This commit completes Phase 10 of the MCP migration by removing all
direct provider usage from CLI/TUI and enforcing MCP-first architecture.

## Changes

### Core Architecture
- **main.rs**: Replaced OllamaProvider with RemoteMcpClient
  - Uses MCP server configuration from config.toml if available
  - Falls back to auto-discovery of MCP LLM server binary
- **agent_main.rs**: Unified provider and MCP client to single RemoteMcpClient
  - Simplifies initialization with Arc::clone pattern
  - All LLM communication now goes through MCP protocol

### Dependencies
- **Cargo.toml**: Removed owlen-ollama dependency from owlen-cli
  - CLI no longer knows about Ollama implementation details
  - Clean separation: only MCP servers use provider crates internally

### Tests
- **agent_tests.rs**: Updated all tests to use RemoteMcpClient
  - Replaced OllamaProvider::new() with RemoteMcpClient::new()
  - Updated test documentation to reflect MCP requirements
  - All tests compile and run successfully

### Examples
- **Removed**: custom_provider.rs, basic_chat.rs (deprecated)
- **Added**: mcp_chat.rs - demonstrates recommended MCP-based usage
  - Shows how to use RemoteMcpClient for LLM interactions
  - Includes model listing and chat request examples

### Cleanup
- Removed outdated TODO about MCP integration (now complete)
- Updated comments to reflect current MCP architecture

## Architecture

```
CLI/TUI → RemoteMcpClient (impl Provider)
          ↓ MCP Protocol (STDIO/HTTP/WS)
          MCP LLM Server → OllamaProvider → Ollama
```

## Benefits
-  Clean separation of concerns
-  CLI is protocol-agnostic (only knows MCP)
-  Easier to add new LLM backends (just implement MCP server)
-  All tests passing
-  Full workspace builds successfully

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-10-10 22:29:20 +02:00
parent e94df2c48a
commit 9545a4b3ad
8 changed files with 113 additions and 117 deletions

View File

@@ -24,7 +24,6 @@ required-features = ["chat-client"]
[dependencies]
owlen-core = { path = "../owlen-core" }
owlen-ollama = { path = "../owlen-ollama" }
# Optional TUI dependency, enabled by the "chat-client" feature.
owlen-tui = { path = "../owlen-tui", optional = true }

View File

@@ -11,11 +11,15 @@ use std::sync::Arc;
use clap::Parser;
use owlen_cli::agent::{AgentConfig, AgentExecutor};
use owlen_core::mcp::remote_client::RemoteMcpClient;
use owlen_ollama::OllamaProvider;
/// Commandline arguments for the agent binary.
#[derive(Parser, Debug)]
#[command(name = "owlen-agent", author, version, about = "Run the ReAct agent")]
#[command(
name = "owlen-agent",
author,
version,
about = "Run the ReAct agent via MCP"
)]
struct Args {
/// The initial user query.
prompt: String,
@@ -31,11 +35,13 @@ struct Args {
async fn main() -> anyhow::Result<()> {
let args = Args::parse();
// Initialise the LLM provider (Ollama) uses default local URL.
let provider = Arc::new(OllamaProvider::new("http://localhost:11434")?);
// Initialise the MCP client (remote LLM server) this client also knows how
// to call the builtin resource tools.
let mcp_client = Arc::new(RemoteMcpClient::new()?);
// Initialise the MCP LLM client it implements Provider and talks to the
// MCP LLM server which wraps Ollama. This ensures all communication goes
// through the MCP architecture (Phase 10 requirement).
let provider = Arc::new(RemoteMcpClient::new()?);
// The MCP client also serves as the tool client for resource operations
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
let config = AgentConfig {
max_iterations: args.max_iter,

View File

@@ -2,8 +2,10 @@
use anyhow::Result;
use clap::Parser;
use owlen_core::{mode::Mode, session::SessionController, storage::StorageManager};
use owlen_ollama::OllamaProvider;
use owlen_core::{
mcp::remote_client::RemoteMcpClient, mode::Mode, session::SessionController,
storage::StorageManager, Provider,
};
use owlen_tui::tui_controller::{TuiController, TuiRequest};
use owlen_tui::{config, ui, AppState, ChatApp, Event, EventHandler, SessionEvent};
use std::io;
@@ -21,7 +23,7 @@ use ratatui::{prelude::CrosstermBackend, Terminal};
/// Owlen - Terminal UI for LLM chat
#[derive(Parser, Debug)]
#[command(name = "owlen")]
#[command(about = "Terminal UI for LLM chat with Ollama", long_about = None)]
#[command(about = "Terminal UI for LLM chat via MCP", long_about = None)]
struct Args {
/// Start in code mode (enables all tools)
#[arg(long, short = 'c')]
@@ -44,21 +46,16 @@ async fn main() -> Result<()> {
let mut cfg = config::try_load_config().unwrap_or_default();
// Disable encryption for CLI to avoid password prompts in this environment.
cfg.privacy.encrypt_local_data = false;
// Determine provider configuration
let provider_name = cfg.general.default_provider.clone();
let provider_cfg = config::ensure_provider_config(&mut cfg, &provider_name).clone();
let provider_type = provider_cfg.provider_type.to_ascii_lowercase();
if provider_type != "ollama" && provider_type != "ollama-cloud" {
anyhow::bail!(
"Unsupported provider type '{}' configured for provider '{}'",
provider_cfg.provider_type,
provider_name,
);
}
let provider = Arc::new(OllamaProvider::from_config(
&provider_cfg,
Some(&cfg.general),
)?);
// Create MCP LLM client as the provider (replaces direct OllamaProvider usage)
let provider: Arc<dyn Provider> = if let Some(mcp_server) = cfg.mcp_servers.first() {
// Use configured MCP server if available
Arc::new(RemoteMcpClient::new_with_config(mcp_server)?)
} else {
// Fall back to default MCP LLM server discovery
Arc::new(RemoteMcpClient::new()?)
};
let storage = Arc::new(StorageManager::new().await?);
let controller =
SessionController::new(provider, cfg, storage.clone(), tui_controller, false).await?;

View File

@@ -9,7 +9,6 @@
use owlen_cli::agent::{AgentConfig, AgentExecutor, LlmResponse};
use owlen_core::mcp::remote_client::RemoteMcpClient;
use owlen_ollama::OllamaProvider;
use std::sync::Arc;
#[tokio::test]
@@ -72,11 +71,11 @@ async fn test_react_parsing_with_multiline_thought() {
}
#[tokio::test]
#[ignore] // Requires Ollama to be running
#[ignore] // Requires MCP LLM server to be running
async fn test_agent_single_tool_scenario() {
// This test requires a running Ollama instance and MCP server
let provider = Arc::new(OllamaProvider::new("http://localhost:11434").unwrap());
let mcp_client = Arc::new(RemoteMcpClient::new().unwrap());
// This test requires a running MCP LLM server (which wraps Ollama)
let provider = Arc::new(RemoteMcpClient::new().unwrap());
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
let config = AgentConfig {
max_iterations: 5,
@@ -109,8 +108,8 @@ async fn test_agent_single_tool_scenario() {
#[ignore] // Requires Ollama to be running
async fn test_agent_multi_step_workflow() {
// Test a query that requires multiple tool calls
let provider = Arc::new(OllamaProvider::new("http://localhost:11434").unwrap());
let mcp_client = Arc::new(RemoteMcpClient::new().unwrap());
let provider = Arc::new(RemoteMcpClient::new().unwrap());
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
let config = AgentConfig {
max_iterations: 10,
@@ -141,8 +140,8 @@ async fn test_agent_multi_step_workflow() {
#[tokio::test]
#[ignore] // Requires Ollama
async fn test_agent_iteration_limit() {
let provider = Arc::new(OllamaProvider::new("http://localhost:11434").unwrap());
let mcp_client = Arc::new(RemoteMcpClient::new().unwrap());
let provider = Arc::new(RemoteMcpClient::new().unwrap());
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
let config = AgentConfig {
max_iterations: 2, // Very low limit to test enforcement
@@ -183,8 +182,8 @@ async fn test_agent_iteration_limit() {
#[tokio::test]
#[ignore] // Requires Ollama
async fn test_agent_tool_budget_enforcement() {
let provider = Arc::new(OllamaProvider::new("http://localhost:11434").unwrap());
let mcp_client = Arc::new(RemoteMcpClient::new().unwrap());
let provider = Arc::new(RemoteMcpClient::new().unwrap());
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
let config = AgentConfig {
max_iterations: 20,
@@ -224,12 +223,9 @@ async fn test_agent_tool_budget_enforcement() {
// Helper function to create a test executor
// For parsing tests, we don't need a real connection
fn create_test_executor() -> AgentExecutor {
// Create dummy instances - the parse_response method doesn't actually use them
let provider = Arc::new(OllamaProvider::new("http://localhost:11434").unwrap());
// For parsing tests, we can accept the error from RemoteMcpClient::new()
// since we're only testing parse_response which doesn't use the MCP client
let mcp_client = match RemoteMcpClient::new() {
let provider = match RemoteMcpClient::new() {
Ok(client) => Arc::new(client),
Err(_) => {
// If MCP server binary doesn't exist, parsing tests can still run
@@ -239,6 +235,8 @@ fn create_test_executor() -> AgentExecutor {
}
};
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
let config = AgentConfig::default();
AgentExecutor::new(provider, mcp_client, config, None)
}

View File

@@ -311,7 +311,7 @@ impl ChatApp {
pub async fn set_mode(&mut self, mode: owlen_core::mode::Mode) {
self.operating_mode = mode;
self.status = format!("Switched to {} mode", mode);
// TODO: Update MCP client mode when MCP integration is fully implemented
// Mode switching is handled by the SessionController's tool filtering
}
pub(crate) fn model_selector_items(&self) -> &[ModelSelectorItem] {