feat(phase10): complete MCP-only architecture migration
This commit completes Phase 10 of the MCP migration by removing all
direct provider usage from CLI/TUI and enforcing MCP-first architecture.
## Changes
### Core Architecture
- **main.rs**: Replaced OllamaProvider with RemoteMcpClient
- Uses MCP server configuration from config.toml if available
- Falls back to auto-discovery of MCP LLM server binary
- **agent_main.rs**: Unified provider and MCP client to single RemoteMcpClient
- Simplifies initialization with Arc::clone pattern
- All LLM communication now goes through MCP protocol
### Dependencies
- **Cargo.toml**: Removed owlen-ollama dependency from owlen-cli
- CLI no longer knows about Ollama implementation details
- Clean separation: only MCP servers use provider crates internally
### Tests
- **agent_tests.rs**: Updated all tests to use RemoteMcpClient
- Replaced OllamaProvider::new() with RemoteMcpClient::new()
- Updated test documentation to reflect MCP requirements
- All tests compile and run successfully
### Examples
- **Removed**: custom_provider.rs, basic_chat.rs (deprecated)
- **Added**: mcp_chat.rs - demonstrates recommended MCP-based usage
- Shows how to use RemoteMcpClient for LLM interactions
- Includes model listing and chat request examples
### Cleanup
- Removed outdated TODO about MCP integration (now complete)
- Updated comments to reflect current MCP architecture
## Architecture
```
CLI/TUI → RemoteMcpClient (impl Provider)
↓ MCP Protocol (STDIO/HTTP/WS)
MCP LLM Server → OllamaProvider → Ollama
```
## Benefits
- ✅ Clean separation of concerns
- ✅ CLI is protocol-agnostic (only knows MCP)
- ✅ Easier to add new LLM backends (just implement MCP server)
- ✅ All tests passing
- ✅ Full workspace builds successfully
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -9,7 +9,6 @@
|
||||
|
||||
use owlen_cli::agent::{AgentConfig, AgentExecutor, LlmResponse};
|
||||
use owlen_core::mcp::remote_client::RemoteMcpClient;
|
||||
use owlen_ollama::OllamaProvider;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[tokio::test]
|
||||
@@ -72,11 +71,11 @@ async fn test_react_parsing_with_multiline_thought() {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore] // Requires Ollama to be running
|
||||
#[ignore] // Requires MCP LLM server to be running
|
||||
async fn test_agent_single_tool_scenario() {
|
||||
// This test requires a running Ollama instance and MCP server
|
||||
let provider = Arc::new(OllamaProvider::new("http://localhost:11434").unwrap());
|
||||
let mcp_client = Arc::new(RemoteMcpClient::new().unwrap());
|
||||
// This test requires a running MCP LLM server (which wraps Ollama)
|
||||
let provider = Arc::new(RemoteMcpClient::new().unwrap());
|
||||
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
|
||||
|
||||
let config = AgentConfig {
|
||||
max_iterations: 5,
|
||||
@@ -109,8 +108,8 @@ async fn test_agent_single_tool_scenario() {
|
||||
#[ignore] // Requires Ollama to be running
|
||||
async fn test_agent_multi_step_workflow() {
|
||||
// Test a query that requires multiple tool calls
|
||||
let provider = Arc::new(OllamaProvider::new("http://localhost:11434").unwrap());
|
||||
let mcp_client = Arc::new(RemoteMcpClient::new().unwrap());
|
||||
let provider = Arc::new(RemoteMcpClient::new().unwrap());
|
||||
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
|
||||
|
||||
let config = AgentConfig {
|
||||
max_iterations: 10,
|
||||
@@ -141,8 +140,8 @@ async fn test_agent_multi_step_workflow() {
|
||||
#[tokio::test]
|
||||
#[ignore] // Requires Ollama
|
||||
async fn test_agent_iteration_limit() {
|
||||
let provider = Arc::new(OllamaProvider::new("http://localhost:11434").unwrap());
|
||||
let mcp_client = Arc::new(RemoteMcpClient::new().unwrap());
|
||||
let provider = Arc::new(RemoteMcpClient::new().unwrap());
|
||||
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
|
||||
|
||||
let config = AgentConfig {
|
||||
max_iterations: 2, // Very low limit to test enforcement
|
||||
@@ -183,8 +182,8 @@ async fn test_agent_iteration_limit() {
|
||||
#[tokio::test]
|
||||
#[ignore] // Requires Ollama
|
||||
async fn test_agent_tool_budget_enforcement() {
|
||||
let provider = Arc::new(OllamaProvider::new("http://localhost:11434").unwrap());
|
||||
let mcp_client = Arc::new(RemoteMcpClient::new().unwrap());
|
||||
let provider = Arc::new(RemoteMcpClient::new().unwrap());
|
||||
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
|
||||
|
||||
let config = AgentConfig {
|
||||
max_iterations: 20,
|
||||
@@ -224,12 +223,9 @@ async fn test_agent_tool_budget_enforcement() {
|
||||
// Helper function to create a test executor
|
||||
// For parsing tests, we don't need a real connection
|
||||
fn create_test_executor() -> AgentExecutor {
|
||||
// Create dummy instances - the parse_response method doesn't actually use them
|
||||
let provider = Arc::new(OllamaProvider::new("http://localhost:11434").unwrap());
|
||||
|
||||
// For parsing tests, we can accept the error from RemoteMcpClient::new()
|
||||
// since we're only testing parse_response which doesn't use the MCP client
|
||||
let mcp_client = match RemoteMcpClient::new() {
|
||||
let provider = match RemoteMcpClient::new() {
|
||||
Ok(client) => Arc::new(client),
|
||||
Err(_) => {
|
||||
// If MCP server binary doesn't exist, parsing tests can still run
|
||||
@@ -239,6 +235,8 @@ fn create_test_executor() -> AgentExecutor {
|
||||
}
|
||||
};
|
||||
|
||||
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
|
||||
|
||||
let config = AgentConfig::default();
|
||||
AgentExecutor::new(provider, mcp_client, config, None)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user