Compare commits
2 Commits
fe414d49e6
...
33d11ae223
| Author | SHA1 | Date | |
|---|---|---|---|
| 33d11ae223 | |||
| 05e90d3e2b |
@@ -6,6 +6,8 @@ members = [
|
|||||||
"crates/owlen-cli",
|
"crates/owlen-cli",
|
||||||
"crates/owlen-ollama",
|
"crates/owlen-ollama",
|
||||||
"crates/owlen-mcp-server",
|
"crates/owlen-mcp-server",
|
||||||
|
"crates/owlen-mcp-llm-server",
|
||||||
|
"crates/owlen-mcp-client",
|
||||||
]
|
]
|
||||||
exclude = []
|
exclude = []
|
||||||
|
|
||||||
@@ -50,6 +52,7 @@ ring = "0.17"
|
|||||||
keyring = "3.0"
|
keyring = "3.0"
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
urlencoding = "2.1"
|
urlencoding = "2.1"
|
||||||
|
regex = "1.10"
|
||||||
rpassword = "7.3"
|
rpassword = "7.3"
|
||||||
sqlx = { version = "0.7", default-features = false, features = ["runtime-tokio-rustls", "sqlite", "macros", "uuid", "chrono", "migrate"] }
|
sqlx = { version = "0.7", default-features = false, features = ["runtime-tokio-rustls", "sqlite", "macros", "uuid", "chrono", "migrate"] }
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ description = "Command-line interface for OWLEN LLM client"
|
|||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["chat-client"]
|
default = ["chat-client"]
|
||||||
chat-client = []
|
chat-client = ["owlen-tui"]
|
||||||
code-client = []
|
code-client = []
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
@@ -23,10 +23,16 @@ name = "owlen-code"
|
|||||||
path = "src/code_main.rs"
|
path = "src/code_main.rs"
|
||||||
required-features = ["code-client"]
|
required-features = ["code-client"]
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "owlen-agent"
|
||||||
|
path = "src/agent_main.rs"
|
||||||
|
required-features = ["chat-client"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
owlen-core = { path = "../owlen-core" }
|
owlen-core = { path = "../owlen-core" }
|
||||||
owlen-tui = { path = "../owlen-tui" }
|
|
||||||
owlen-ollama = { path = "../owlen-ollama" }
|
owlen-ollama = { path = "../owlen-ollama" }
|
||||||
|
# Optional TUI dependency, enabled by the "chat-client" feature.
|
||||||
|
owlen-tui = { path = "../owlen-tui", optional = true }
|
||||||
|
|
||||||
# CLI framework
|
# CLI framework
|
||||||
clap = { version = "4.0", features = ["derive"] }
|
clap = { version = "4.0", features = ["derive"] }
|
||||||
@@ -43,3 +49,6 @@ crossterm = { workspace = true }
|
|||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
|
regex = "1"
|
||||||
|
thiserror = "1"
|
||||||
|
dirs = "5"
|
||||||
|
|||||||
54
crates/owlen-cli/src/agent_main.rs
Normal file
54
crates/owlen-cli/src/agent_main.rs
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
//! Simple entry point for the ReAct agentic executor.
|
||||||
|
//!
|
||||||
|
//! Usage: `owlen-agent "<prompt>" [--model <model>] [--max-iter <n>]`
|
||||||
|
//!
|
||||||
|
//! This binary demonstrates Phase 4 without the full TUI. It creates an
|
||||||
|
//! OllamaProvider, a RemoteMcpClient, runs the AgentExecutor and prints the
|
||||||
|
//! final answer.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use clap::Parser;
|
||||||
|
use owlen_cli::agent::{AgentConfig, AgentExecutor};
|
||||||
|
use owlen_core::mcp::remote_client::RemoteMcpClient;
|
||||||
|
use owlen_ollama::OllamaProvider;
|
||||||
|
|
||||||
|
/// Command‑line arguments for the agent binary.
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[command(name = "owlen-agent", author, version, about = "Run the ReAct agent")]
|
||||||
|
struct Args {
|
||||||
|
/// The initial user query.
|
||||||
|
prompt: String,
|
||||||
|
/// Model to use (defaults to Ollama default).
|
||||||
|
#[arg(long)]
|
||||||
|
model: Option<String>,
|
||||||
|
/// Maximum ReAct iterations.
|
||||||
|
#[arg(long, default_value_t = 10)]
|
||||||
|
max_iter: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let args = Args::parse();
|
||||||
|
|
||||||
|
// Initialise the LLM provider (Ollama) – uses default local URL.
|
||||||
|
let provider = Arc::new(OllamaProvider::new("http://localhost:11434")?);
|
||||||
|
// Initialise the MCP client (remote LLM server) – this client also knows how
|
||||||
|
// to call the built‑in resource tools.
|
||||||
|
let mcp_client = Arc::new(RemoteMcpClient::new()?);
|
||||||
|
|
||||||
|
let config = AgentConfig {
|
||||||
|
max_iterations: args.max_iter,
|
||||||
|
model: args.model.unwrap_or_else(|| "llama3.2:latest".to_string()),
|
||||||
|
..AgentConfig::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let executor = AgentExecutor::new(provider, mcp_client, config, None);
|
||||||
|
match executor.run(args.prompt).await {
|
||||||
|
Ok(answer) => {
|
||||||
|
println!("\nFinal answer:\n{}", answer);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Err(e) => Err(anyhow::anyhow!(e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
8
crates/owlen-cli/src/lib.rs
Normal file
8
crates/owlen-cli/src/lib.rs
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
//! Library portion of the `owlen-cli` crate.
|
||||||
|
//!
|
||||||
|
//! It currently only re‑exports the `agent` module used by the standalone
|
||||||
|
//! `owlen-agent` binary. Additional shared functionality can be added here in
|
||||||
|
//! the future.
|
||||||
|
|
||||||
|
// Re-export agent module from owlen-core
|
||||||
|
pub use owlen_core::agent;
|
||||||
271
crates/owlen-cli/tests/agent_tests.rs
Normal file
271
crates/owlen-cli/tests/agent_tests.rs
Normal file
@@ -0,0 +1,271 @@
|
|||||||
|
//! Integration tests for the ReAct agent loop functionality.
|
||||||
|
//!
|
||||||
|
//! These tests verify that the agent executor correctly:
|
||||||
|
//! - Parses ReAct formatted responses
|
||||||
|
//! - Executes tool calls
|
||||||
|
//! - Handles multi-step workflows
|
||||||
|
//! - Recovers from errors
|
||||||
|
//! - Respects iteration limits
|
||||||
|
|
||||||
|
use owlen_cli::agent::{AgentConfig, AgentExecutor, LlmResponse};
|
||||||
|
use owlen_core::mcp::remote_client::RemoteMcpClient;
|
||||||
|
use owlen_ollama::OllamaProvider;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_react_parsing_tool_call() {
|
||||||
|
let executor = create_test_executor();
|
||||||
|
|
||||||
|
// Test parsing a tool call with JSON arguments
|
||||||
|
let text = "THOUGHT: I should search for information\nACTION: web_search\nACTION_INPUT: {\"query\": \"rust async programming\"}\n";
|
||||||
|
|
||||||
|
let result = executor.parse_response(text);
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Ok(LlmResponse::ToolCall {
|
||||||
|
thought,
|
||||||
|
tool_name,
|
||||||
|
arguments,
|
||||||
|
}) => {
|
||||||
|
assert_eq!(thought, "I should search for information");
|
||||||
|
assert_eq!(tool_name, "web_search");
|
||||||
|
assert_eq!(arguments["query"], "rust async programming");
|
||||||
|
}
|
||||||
|
other => panic!("Expected ToolCall, got: {:?}", other),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_react_parsing_final_answer() {
|
||||||
|
let executor = create_test_executor();
|
||||||
|
|
||||||
|
let text = "THOUGHT: I have enough information now\nACTION: final_answer\nACTION_INPUT: The answer is 42\n";
|
||||||
|
|
||||||
|
let result = executor.parse_response(text);
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Ok(LlmResponse::FinalAnswer { thought, answer }) => {
|
||||||
|
assert_eq!(thought, "I have enough information now");
|
||||||
|
assert_eq!(answer, "The answer is 42");
|
||||||
|
}
|
||||||
|
other => panic!("Expected FinalAnswer, got: {:?}", other),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_react_parsing_with_multiline_thought() {
|
||||||
|
let executor = create_test_executor();
|
||||||
|
|
||||||
|
let text = "THOUGHT: This is a complex\nmulti-line thought\nACTION: list_files\nACTION_INPUT: {\"path\": \".\"}\n";
|
||||||
|
|
||||||
|
let result = executor.parse_response(text);
|
||||||
|
|
||||||
|
// The regex currently only captures until first newline
|
||||||
|
// This test documents current behavior
|
||||||
|
match result {
|
||||||
|
Ok(LlmResponse::ToolCall { thought, .. }) => {
|
||||||
|
// Regex pattern stops at first \n after THOUGHT:
|
||||||
|
assert!(thought.contains("This is a complex"));
|
||||||
|
}
|
||||||
|
other => panic!("Expected ToolCall, got: {:?}", other),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore] // Requires Ollama to be running
|
||||||
|
async fn test_agent_single_tool_scenario() {
|
||||||
|
// This test requires a running Ollama instance and MCP server
|
||||||
|
let provider = Arc::new(OllamaProvider::new("http://localhost:11434").unwrap());
|
||||||
|
let mcp_client = Arc::new(RemoteMcpClient::new().unwrap());
|
||||||
|
|
||||||
|
let config = AgentConfig {
|
||||||
|
max_iterations: 5,
|
||||||
|
model: "llama3.2".to_string(),
|
||||||
|
temperature: Some(0.7),
|
||||||
|
max_tokens: None,
|
||||||
|
max_tool_calls: 10,
|
||||||
|
};
|
||||||
|
|
||||||
|
let executor = AgentExecutor::new(provider, mcp_client, config, None);
|
||||||
|
|
||||||
|
// Simple query that should complete in one tool call
|
||||||
|
let result = executor
|
||||||
|
.run("List files in the current directory".to_string())
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Ok(answer) => {
|
||||||
|
assert!(!answer.is_empty(), "Answer should not be empty");
|
||||||
|
println!("Agent answer: {}", answer);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
// It's okay if this fails due to LLM not following format
|
||||||
|
println!("Agent test skipped: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore] // Requires Ollama to be running
|
||||||
|
async fn test_agent_multi_step_workflow() {
|
||||||
|
// Test a query that requires multiple tool calls
|
||||||
|
let provider = Arc::new(OllamaProvider::new("http://localhost:11434").unwrap());
|
||||||
|
let mcp_client = Arc::new(RemoteMcpClient::new().unwrap());
|
||||||
|
|
||||||
|
let config = AgentConfig {
|
||||||
|
max_iterations: 10,
|
||||||
|
model: "llama3.2".to_string(),
|
||||||
|
temperature: Some(0.5), // Lower temperature for more consistent behavior
|
||||||
|
max_tokens: None,
|
||||||
|
max_tool_calls: 20,
|
||||||
|
};
|
||||||
|
|
||||||
|
let executor = AgentExecutor::new(provider, mcp_client, config, None);
|
||||||
|
|
||||||
|
// Query requiring multiple steps: list -> read -> analyze
|
||||||
|
let result = executor
|
||||||
|
.run("Find all Rust files and tell me which one contains 'Agent'".to_string())
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Ok(answer) => {
|
||||||
|
assert!(!answer.is_empty());
|
||||||
|
println!("Multi-step answer: {}", answer);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
println!("Multi-step test skipped: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore] // Requires Ollama
|
||||||
|
async fn test_agent_iteration_limit() {
|
||||||
|
let provider = Arc::new(OllamaProvider::new("http://localhost:11434").unwrap());
|
||||||
|
let mcp_client = Arc::new(RemoteMcpClient::new().unwrap());
|
||||||
|
|
||||||
|
let config = AgentConfig {
|
||||||
|
max_iterations: 2, // Very low limit to test enforcement
|
||||||
|
model: "llama3.2".to_string(),
|
||||||
|
temperature: Some(0.7),
|
||||||
|
max_tokens: None,
|
||||||
|
max_tool_calls: 5,
|
||||||
|
};
|
||||||
|
|
||||||
|
let executor = AgentExecutor::new(provider, mcp_client, config, None);
|
||||||
|
|
||||||
|
// Complex query that would require many iterations
|
||||||
|
let result = executor
|
||||||
|
.run("Perform an exhaustive analysis of all files".to_string())
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Should hit the iteration limit (or parse error if LLM doesn't follow format)
|
||||||
|
match result {
|
||||||
|
Err(e) => {
|
||||||
|
let error_str = format!("{}", e);
|
||||||
|
// Accept either iteration limit error or parse error (LLM didn't follow ReAct format)
|
||||||
|
assert!(
|
||||||
|
error_str.contains("Maximum iterations")
|
||||||
|
|| error_str.contains("2")
|
||||||
|
|| error_str.contains("parse"),
|
||||||
|
"Expected iteration limit or parse error, got: {}",
|
||||||
|
error_str
|
||||||
|
);
|
||||||
|
println!("Test passed: agent stopped with error: {}", error_str);
|
||||||
|
}
|
||||||
|
Ok(_) => {
|
||||||
|
// It's possible the LLM completed within 2 iterations
|
||||||
|
println!("Agent completed within iteration limit");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore] // Requires Ollama
|
||||||
|
async fn test_agent_tool_budget_enforcement() {
|
||||||
|
let provider = Arc::new(OllamaProvider::new("http://localhost:11434").unwrap());
|
||||||
|
let mcp_client = Arc::new(RemoteMcpClient::new().unwrap());
|
||||||
|
|
||||||
|
let config = AgentConfig {
|
||||||
|
max_iterations: 20,
|
||||||
|
model: "llama3.2".to_string(),
|
||||||
|
temperature: Some(0.7),
|
||||||
|
max_tokens: None,
|
||||||
|
max_tool_calls: 3, // Very low tool call budget
|
||||||
|
};
|
||||||
|
|
||||||
|
let executor = AgentExecutor::new(provider, mcp_client, config, None);
|
||||||
|
|
||||||
|
// Query that would require many tool calls
|
||||||
|
let result = executor
|
||||||
|
.run("Read every file in the project and summarize them all".to_string())
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Should hit the tool call budget (or parse error if LLM doesn't follow format)
|
||||||
|
match result {
|
||||||
|
Err(e) => {
|
||||||
|
let error_str = format!("{}", e);
|
||||||
|
// Accept either budget error or parse error (LLM didn't follow ReAct format)
|
||||||
|
assert!(
|
||||||
|
error_str.contains("Maximum iterations")
|
||||||
|
|| error_str.contains("budget")
|
||||||
|
|| error_str.contains("parse"),
|
||||||
|
"Expected budget or parse error, got: {}",
|
||||||
|
error_str
|
||||||
|
);
|
||||||
|
println!("Test passed: agent stopped with error: {}", error_str);
|
||||||
|
}
|
||||||
|
Ok(_) => {
|
||||||
|
println!("Agent completed within tool budget");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function to create a test executor
|
||||||
|
// For parsing tests, we don't need a real connection
|
||||||
|
fn create_test_executor() -> AgentExecutor {
|
||||||
|
// Create dummy instances - the parse_response method doesn't actually use them
|
||||||
|
let provider = Arc::new(OllamaProvider::new("http://localhost:11434").unwrap());
|
||||||
|
|
||||||
|
// For parsing tests, we can accept the error from RemoteMcpClient::new()
|
||||||
|
// since we're only testing parse_response which doesn't use the MCP client
|
||||||
|
let mcp_client = match RemoteMcpClient::new() {
|
||||||
|
Ok(client) => Arc::new(client),
|
||||||
|
Err(_) => {
|
||||||
|
// If MCP server binary doesn't exist, parsing tests can still run
|
||||||
|
// by using a dummy client that will never be called
|
||||||
|
// This is a workaround for unit tests that only need parse_response
|
||||||
|
panic!("MCP server binary not found - build the project first with: cargo build --all");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let config = AgentConfig::default();
|
||||||
|
AgentExecutor::new(provider, mcp_client, config, None)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_agent_config_defaults() {
|
||||||
|
let config = AgentConfig::default();
|
||||||
|
|
||||||
|
assert_eq!(config.max_iterations, 10);
|
||||||
|
assert_eq!(config.model, "ollama");
|
||||||
|
assert_eq!(config.temperature, Some(0.7));
|
||||||
|
assert_eq!(config.max_tool_calls, 20);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_agent_config_custom() {
|
||||||
|
let config = AgentConfig {
|
||||||
|
max_iterations: 15,
|
||||||
|
model: "custom-model".to_string(),
|
||||||
|
temperature: Some(0.5),
|
||||||
|
max_tokens: Some(2000),
|
||||||
|
max_tool_calls: 30,
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(config.max_iterations, 15);
|
||||||
|
assert_eq!(config.model, "custom-model");
|
||||||
|
assert_eq!(config.temperature, Some(0.5));
|
||||||
|
assert_eq!(config.max_tokens, Some(2000));
|
||||||
|
assert_eq!(config.max_tool_calls, 30);
|
||||||
|
}
|
||||||
@@ -11,6 +11,7 @@ description = "Core traits and types for OWLEN LLM client"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
log = "0.4.20"
|
log = "0.4.20"
|
||||||
|
regex = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
thiserror = { workspace = true }
|
thiserror = { workspace = true }
|
||||||
@@ -41,6 +42,7 @@ duckduckgo = "0.2.0"
|
|||||||
reqwest = { workspace = true, features = ["default"] }
|
reqwest = { workspace = true, features = ["default"] }
|
||||||
reqwest_011 = { version = "0.11", package = "reqwest" }
|
reqwest_011 = { version = "0.11", package = "reqwest" }
|
||||||
path-clean = "1.0"
|
path-clean = "1.0"
|
||||||
|
tokio-stream = "0.1"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tokio-test = { workspace = true }
|
tokio-test = { workspace = true }
|
||||||
|
|||||||
377
crates/owlen-core/src/agent.rs
Normal file
377
crates/owlen-core/src/agent.rs
Normal file
@@ -0,0 +1,377 @@
|
|||||||
|
//! High‑level agentic executor implementing the ReAct pattern.
|
||||||
|
//!
|
||||||
|
//! The executor coordinates three responsibilities:
|
||||||
|
//! 1. Build a ReAct prompt from the conversation history and the list of
|
||||||
|
//! available MCP tools.
|
||||||
|
//! 2. Send the prompt to an LLM provider (any type implementing
|
||||||
|
//! `owlen_core::Provider`).
|
||||||
|
//! 3. Parse the LLM response, optionally invoke a tool via an MCP client,
|
||||||
|
//! and feed the observation back into the conversation.
|
||||||
|
//!
|
||||||
|
//! The implementation is intentionally minimal – it provides the core loop
|
||||||
|
//! required by Phase 4 of the roadmap. Integration with the TUI and additional
|
||||||
|
//! safety mechanisms can be added on top of this module.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use crate::ui::UiController;
|
||||||
|
|
||||||
|
use dirs;
|
||||||
|
use regex::Regex;
|
||||||
|
use serde_json::json;
|
||||||
|
use std::fs::OpenOptions;
|
||||||
|
use std::io::Write;
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
use tokio::signal;
|
||||||
|
|
||||||
|
use crate::mcp::client::McpClient;
|
||||||
|
use crate::mcp::{McpToolCall, McpToolDescriptor, McpToolResponse};
|
||||||
|
use crate::{
|
||||||
|
types::{ChatRequest, Message},
|
||||||
|
Error, Provider, Result as CoreResult,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Configuration for the agent executor.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct AgentConfig {
|
||||||
|
/// Maximum number of ReAct iterations before the executor aborts.
|
||||||
|
pub max_iterations: usize,
|
||||||
|
/// Model name to use for the LLM provider.
|
||||||
|
pub model: String,
|
||||||
|
/// Optional temperature.
|
||||||
|
pub temperature: Option<f32>,
|
||||||
|
/// Optional max_tokens.
|
||||||
|
pub max_tokens: Option<u32>,
|
||||||
|
/// Maximum number of tool calls allowed per execution (budget).
|
||||||
|
pub max_tool_calls: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for AgentConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
max_iterations: 10,
|
||||||
|
model: "ollama".into(),
|
||||||
|
temperature: Some(0.7),
|
||||||
|
max_tokens: None,
|
||||||
|
max_tool_calls: 20,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Enum representing the possible parsed LLM responses in ReAct format.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum LlmResponse {
|
||||||
|
/// A reasoning step without action.
|
||||||
|
Reasoning { thought: String },
|
||||||
|
/// The model wants to invoke a tool.
|
||||||
|
ToolCall {
|
||||||
|
thought: String,
|
||||||
|
tool_name: String,
|
||||||
|
arguments: serde_json::Value,
|
||||||
|
},
|
||||||
|
/// The model produced a final answer.
|
||||||
|
FinalAnswer { thought: String, answer: String },
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Error type for the agent executor.
|
||||||
|
#[derive(thiserror::Error, Debug)]
|
||||||
|
pub enum AgentError {
|
||||||
|
#[error("LLM provider error: {0}")]
|
||||||
|
Provider(Error),
|
||||||
|
#[error("MCP client error: {0}")]
|
||||||
|
Mcp(Error),
|
||||||
|
#[error("Tool execution denied by user")]
|
||||||
|
ToolDenied,
|
||||||
|
#[error("Failed to parse LLM response")]
|
||||||
|
Parse,
|
||||||
|
#[error("Maximum iterations ({0}) reached without final answer")]
|
||||||
|
MaxIterationsReached(usize),
|
||||||
|
#[error("Agent execution cancelled by user (Ctrl+C)")]
|
||||||
|
Cancelled,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Core executor handling the ReAct loop.
|
||||||
|
pub struct AgentExecutor {
|
||||||
|
llm_client: Arc<dyn Provider + Send + Sync>,
|
||||||
|
tool_client: Arc<dyn McpClient + Send + Sync>,
|
||||||
|
config: AgentConfig,
|
||||||
|
ui_controller: Option<Arc<dyn UiController + Send + Sync>>, // optional UI for confirmations
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AgentExecutor {
|
||||||
|
/// Construct a new executor.
|
||||||
|
pub fn new(
|
||||||
|
llm_client: Arc<dyn Provider + Send + Sync>,
|
||||||
|
tool_client: Arc<dyn McpClient + Send + Sync>,
|
||||||
|
config: AgentConfig,
|
||||||
|
ui_controller: Option<Arc<dyn UiController + Send + Sync>>, // pass None for headless
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
llm_client,
|
||||||
|
tool_client,
|
||||||
|
config,
|
||||||
|
ui_controller,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Discover tools exposed by the MCP server.
|
||||||
|
async fn discover_tools(&self) -> CoreResult<Vec<McpToolDescriptor>> {
|
||||||
|
self.tool_client.list_tools().await
|
||||||
|
}
|
||||||
|
|
||||||
|
// #[allow(dead_code)]
|
||||||
|
// Build a ReAct prompt from the current message history and discovered tools.
|
||||||
|
/*
|
||||||
|
#[allow(dead_code)]
|
||||||
|
fn build_prompt(
|
||||||
|
&self,
|
||||||
|
history: &[Message],
|
||||||
|
tools: &[McpToolDescriptor],
|
||||||
|
) -> String {
|
||||||
|
// System prompt describing the format.
|
||||||
|
let system = "You are an intelligent agent following the ReAct pattern. Use the following sections:\nTHOUGHT: your reasoning\nACTION: the tool name you want to call (or "final_answer")\nACTION_INPUT: JSON arguments for the tool.\nIf ACTION is "final_answer", provide the final answer in the next line after the ACTION_INPUT.\n";
|
||||||
|
|
||||||
|
let mut prompt = format!("System: {}\n", system);
|
||||||
|
// Append conversation history.
|
||||||
|
for msg in history {
|
||||||
|
let role = match msg.role {
|
||||||
|
Role::User => "User",
|
||||||
|
Role::Assistant => "Assistant",
|
||||||
|
Role::System => "System",
|
||||||
|
Role::Tool => "Tool",
|
||||||
|
};
|
||||||
|
prompt.push_str(&format!("{}: {}\n", role, msg.content));
|
||||||
|
}
|
||||||
|
// Append tool descriptions.
|
||||||
|
if !tools.is_empty() {
|
||||||
|
let tools_json = json!(tools);
|
||||||
|
prompt.push_str(&format!("Available tools (JSON schema): {}\n", tools_json));
|
||||||
|
}
|
||||||
|
prompt
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
// build_prompt removed; not used in current implementation
|
||||||
|
|
||||||
|
/// Parse raw LLM text into a structured `LlmResponse`.
|
||||||
|
pub fn parse_response(&self, text: &str) -> std::result::Result<LlmResponse, AgentError> {
|
||||||
|
// Normalise line endings.
|
||||||
|
let txt = text.trim();
|
||||||
|
// Regex patterns for parsing ReAct format.
|
||||||
|
// THOUGHT and ACTION capture up to the next newline.
|
||||||
|
// ACTION_INPUT captures everything remaining (including multiline JSON).
|
||||||
|
let thought_re = Regex::new(r"(?s)THOUGHT:\s*(?P<thought>.+?)(?:\n|$)").unwrap();
|
||||||
|
let action_re = Regex::new(r"(?s)ACTION:\s*(?P<action>.+?)(?:\n|$)").unwrap();
|
||||||
|
// ACTION_INPUT captures rest of text (multiline-friendly)
|
||||||
|
let input_re = Regex::new(r"(?s)ACTION_INPUT:\s*(?P<input>.+)").unwrap();
|
||||||
|
|
||||||
|
let thought = thought_re
|
||||||
|
.captures(txt)
|
||||||
|
.and_then(|c| c.name("thought"))
|
||||||
|
.map(|m| m.as_str().trim().to_string())
|
||||||
|
.ok_or(AgentError::Parse)?;
|
||||||
|
let action = action_re
|
||||||
|
.captures(txt)
|
||||||
|
.and_then(|c| c.name("action"))
|
||||||
|
.map(|m| m.as_str().trim().to_string())
|
||||||
|
.ok_or(AgentError::Parse)?;
|
||||||
|
let input = input_re
|
||||||
|
.captures(txt)
|
||||||
|
.and_then(|c| c.name("input"))
|
||||||
|
.map(|m| m.as_str().trim().to_string())
|
||||||
|
.ok_or(AgentError::Parse)?;
|
||||||
|
|
||||||
|
if action.eq_ignore_ascii_case("final_answer") {
|
||||||
|
Ok(LlmResponse::FinalAnswer {
|
||||||
|
thought,
|
||||||
|
answer: input,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
// Parse arguments as JSON, falling back to a string if invalid.
|
||||||
|
let args = serde_json::from_str(&input).unwrap_or_else(|_| json!(input));
|
||||||
|
Ok(LlmResponse::ToolCall {
|
||||||
|
thought,
|
||||||
|
tool_name: action,
|
||||||
|
arguments: args,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Execute a single tool call via the MCP client.
|
||||||
|
async fn execute_tool(
|
||||||
|
&self,
|
||||||
|
name: &str,
|
||||||
|
arguments: serde_json::Value,
|
||||||
|
) -> CoreResult<McpToolResponse> {
|
||||||
|
// For potentially unsafe tools (write/delete) ask for UI confirmation
|
||||||
|
// if a controller is available.
|
||||||
|
let dangerous = name.contains("write") || name.contains("delete");
|
||||||
|
if dangerous {
|
||||||
|
if let Some(controller) = &self.ui_controller {
|
||||||
|
let prompt = format!(
|
||||||
|
"Confirm execution of potentially unsafe tool '{}' with args {}?",
|
||||||
|
name, arguments
|
||||||
|
);
|
||||||
|
if !controller.confirm(&prompt).await {
|
||||||
|
return Err(Error::PermissionDenied(format!(
|
||||||
|
"Tool '{}' denied by user",
|
||||||
|
name
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let call = McpToolCall {
|
||||||
|
name: name.to_string(),
|
||||||
|
arguments,
|
||||||
|
};
|
||||||
|
self.tool_client.call_tool(call).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run the full ReAct loop and return the final answer.
|
||||||
|
pub async fn run(&self, query: String) -> std::result::Result<String, AgentError> {
|
||||||
|
let tools = self.discover_tools().await.map_err(AgentError::Mcp)?;
|
||||||
|
|
||||||
|
// Build system prompt with ReAct format instructions
|
||||||
|
let tools_desc = tools
|
||||||
|
.iter()
|
||||||
|
.map(|t| {
|
||||||
|
let schema_str = serde_json::to_string_pretty(&t.input_schema)
|
||||||
|
.unwrap_or_else(|_| "{}".to_string());
|
||||||
|
format!(
|
||||||
|
"- {}: {}\n Input schema: {}",
|
||||||
|
t.name, t.description, schema_str
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join("\n");
|
||||||
|
|
||||||
|
let system_prompt = format!(
|
||||||
|
"You are an AI assistant that uses the ReAct (Reasoning + Acting) pattern to solve tasks.\n\n\
|
||||||
|
You must ALWAYS respond in this exact format:\n\n\
|
||||||
|
THOUGHT: <your reasoning about what to do next>\n\
|
||||||
|
ACTION: <tool_name or \"final_answer\">\n\
|
||||||
|
ACTION_INPUT: <JSON arguments for the tool, or the final answer text>\n\n\
|
||||||
|
Available tools:\n{}\n\n\
|
||||||
|
HOW IT WORKS:\n\
|
||||||
|
1. When you call a tool, you will receive its output in the next message\n\
|
||||||
|
2. After receiving the tool output, analyze it and either:\n\
|
||||||
|
a) Use the information to provide a final answer\n\
|
||||||
|
b) Call another tool if you need more information\n\
|
||||||
|
3. When you have the information needed to answer the user's question, provide a final answer\n\n\
|
||||||
|
To provide a final answer:\n\
|
||||||
|
THOUGHT: <summary of what you learned>\n\
|
||||||
|
ACTION: final_answer\n\
|
||||||
|
ACTION_INPUT: <your complete answer using the information from the tools>\n\n\
|
||||||
|
IMPORTANT: You MUST follow this format exactly. Do not deviate from it.\n\
|
||||||
|
IMPORTANT: Only use the tools listed above. Do not try to use tools that are not listed.\n\
|
||||||
|
IMPORTANT: When providing the final answer, include the actual information you learned, not just the tool arguments.",
|
||||||
|
tools_desc
|
||||||
|
);
|
||||||
|
|
||||||
|
// Initialize conversation with system prompt and user query
|
||||||
|
let mut messages = vec![Message::system(system_prompt.clone()), Message::user(query)];
|
||||||
|
|
||||||
|
// Cancellation flag set when Ctrl+C is received.
|
||||||
|
let cancelled = Arc::new(AtomicBool::new(false));
|
||||||
|
let cancel_flag = cancelled.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
// Wait for Ctrl+C signal.
|
||||||
|
let _ = signal::ctrl_c().await;
|
||||||
|
cancel_flag.store(true, Ordering::SeqCst);
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut tool_calls = 0usize;
|
||||||
|
for _ in 0..self.config.max_iterations {
|
||||||
|
if cancelled.load(Ordering::SeqCst) {
|
||||||
|
return Err(AgentError::Cancelled);
|
||||||
|
}
|
||||||
|
// Build a ChatRequest for the provider.
|
||||||
|
let chat_req = ChatRequest {
|
||||||
|
model: self.config.model.clone(),
|
||||||
|
messages: messages.clone(),
|
||||||
|
parameters: crate::types::ChatParameters {
|
||||||
|
temperature: self.config.temperature,
|
||||||
|
max_tokens: self.config.max_tokens,
|
||||||
|
stream: false,
|
||||||
|
extra: Default::default(),
|
||||||
|
},
|
||||||
|
tools: Some(tools.clone()),
|
||||||
|
};
|
||||||
|
let raw_resp = self
|
||||||
|
.llm_client
|
||||||
|
.chat(chat_req)
|
||||||
|
.await
|
||||||
|
.map_err(AgentError::Provider)?;
|
||||||
|
let parsed = self
|
||||||
|
.parse_response(&raw_resp.message.content)
|
||||||
|
.map_err(|e| {
|
||||||
|
eprintln!("\n=== PARSE ERROR ===");
|
||||||
|
eprintln!("Error: {:?}", e);
|
||||||
|
eprintln!("LLM Response:\n{}", raw_resp.message.content);
|
||||||
|
eprintln!("=== END ===\n");
|
||||||
|
e
|
||||||
|
})?;
|
||||||
|
match parsed {
|
||||||
|
LlmResponse::Reasoning { thought } => {
|
||||||
|
// Append the reasoning as an assistant message.
|
||||||
|
messages.push(Message::assistant(thought));
|
||||||
|
}
|
||||||
|
LlmResponse::ToolCall {
|
||||||
|
thought,
|
||||||
|
tool_name,
|
||||||
|
arguments,
|
||||||
|
} => {
|
||||||
|
// Record the thought.
|
||||||
|
messages.push(Message::assistant(thought));
|
||||||
|
// Enforce tool call budget.
|
||||||
|
tool_calls += 1;
|
||||||
|
if tool_calls > self.config.max_tool_calls {
|
||||||
|
return Err(AgentError::MaxIterationsReached(self.config.max_iterations));
|
||||||
|
}
|
||||||
|
// Execute tool.
|
||||||
|
let args_clone = arguments.clone();
|
||||||
|
let tool_resp = self
|
||||||
|
.execute_tool(&tool_name, args_clone.clone())
|
||||||
|
.await
|
||||||
|
.map_err(AgentError::Mcp)?;
|
||||||
|
// Convert tool output to a string for the message.
|
||||||
|
let output_str = tool_resp
|
||||||
|
.output
|
||||||
|
.as_str()
|
||||||
|
.map(|s| s.to_string())
|
||||||
|
.unwrap_or_else(|| tool_resp.output.to_string());
|
||||||
|
// Audit log the tool execution.
|
||||||
|
if let Some(config_dir) = dirs::config_dir() {
|
||||||
|
let log_path = config_dir.join("owlen/logs/tool_execution.log");
|
||||||
|
if let Some(parent) = log_path.parent() {
|
||||||
|
let _ = std::fs::create_dir_all(parent);
|
||||||
|
}
|
||||||
|
if let Ok(mut file) =
|
||||||
|
OpenOptions::new().create(true).append(true).open(&log_path)
|
||||||
|
{
|
||||||
|
let ts = SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap_or_default()
|
||||||
|
.as_secs();
|
||||||
|
let _ = writeln!(
|
||||||
|
file,
|
||||||
|
"{} | tool: {} | args: {} | output: {}",
|
||||||
|
ts, tool_name, args_clone, output_str
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
messages.push(Message::tool(tool_name, output_str));
|
||||||
|
}
|
||||||
|
LlmResponse::FinalAnswer { thought, answer } => {
|
||||||
|
// Append final thought and answer, then return.
|
||||||
|
messages.push(Message::assistant(thought));
|
||||||
|
// The final answer should be a single assistant message.
|
||||||
|
messages.push(Message::assistant(answer.clone()));
|
||||||
|
return Ok(answer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(AgentError::MaxIterationsReached(self.config.max_iterations))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -3,6 +3,7 @@
|
|||||||
//! This crate provides the foundational abstractions for building
|
//! This crate provides the foundational abstractions for building
|
||||||
//! LLM providers, routers, and MCP (Model Context Protocol) adapters.
|
//! LLM providers, routers, and MCP (Model Context Protocol) adapters.
|
||||||
|
|
||||||
|
pub mod agent;
|
||||||
pub mod config;
|
pub mod config;
|
||||||
pub mod consent;
|
pub mod consent;
|
||||||
pub mod conversation;
|
pub mod conversation;
|
||||||
@@ -24,6 +25,7 @@ pub mod ui;
|
|||||||
pub mod validation;
|
pub mod validation;
|
||||||
pub mod wrap_cursor;
|
pub mod wrap_cursor;
|
||||||
|
|
||||||
|
pub use agent::*;
|
||||||
pub use config::*;
|
pub use config::*;
|
||||||
pub use consent::*;
|
pub use consent::*;
|
||||||
pub use conversation::*;
|
pub use conversation::*;
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use crate::tools::registry::ToolRegistry;
|
|||||||
use crate::validation::SchemaValidator;
|
use crate::validation::SchemaValidator;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use client::McpClient;
|
pub use client::McpClient;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
@@ -161,6 +161,25 @@ impl RpcErrorResponse {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// JSON‑RPC notification (no id). Used for streaming partial results.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct RpcNotification {
|
||||||
|
pub jsonrpc: String,
|
||||||
|
pub method: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
pub params: Option<Value>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RpcNotification {
|
||||||
|
pub fn new(method: impl Into<String>, params: Option<Value>) -> Self {
|
||||||
|
Self {
|
||||||
|
jsonrpc: JSONRPC_VERSION.to_string(),
|
||||||
|
method: method.into(),
|
||||||
|
params,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Request ID can be string, number, or null
|
/// Request ID can be string, number, or null
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
|
||||||
#[serde(untagged)]
|
#[serde(untagged)]
|
||||||
@@ -194,6 +213,7 @@ pub mod methods {
|
|||||||
pub const RESOURCES_GET: &str = "resources/get";
|
pub const RESOURCES_GET: &str = "resources/get";
|
||||||
pub const RESOURCES_WRITE: &str = "resources/write";
|
pub const RESOURCES_WRITE: &str = "resources/write";
|
||||||
pub const RESOURCES_DELETE: &str = "resources/delete";
|
pub const RESOURCES_DELETE: &str = "resources/delete";
|
||||||
|
pub const MODELS_LIST: &str = "models/list";
|
||||||
}
|
}
|
||||||
|
|
||||||
// ============================================================================
|
// ============================================================================
|
||||||
|
|||||||
@@ -1,11 +1,19 @@
|
|||||||
use super::protocol::{RequestId, RpcErrorResponse, RpcRequest, RpcResponse};
|
use super::protocol::methods;
|
||||||
|
use super::protocol::{RequestId, RpcErrorResponse, RpcRequest, RpcResponse, PROTOCOL_VERSION};
|
||||||
use super::{McpClient, McpToolCall, McpToolDescriptor, McpToolResponse};
|
use super::{McpClient, McpToolCall, McpToolDescriptor, McpToolResponse};
|
||||||
use crate::{Error, Result};
|
use crate::types::ModelInfo;
|
||||||
|
use crate::{Error, Provider, Result};
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use serde_json::json;
|
||||||
use std::sync::atomic::{AtomicU64, Ordering};
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader};
|
use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader};
|
||||||
use tokio::process::{Child, Command};
|
use tokio::process::{Child, Command};
|
||||||
use tokio::sync::Mutex;
|
use tokio::sync::Mutex;
|
||||||
|
// Provider trait is already imported via the earlier use statement.
|
||||||
|
use crate::types::{ChatResponse, Message, Role};
|
||||||
|
use futures::stream;
|
||||||
|
use futures::StreamExt;
|
||||||
|
|
||||||
/// Client that talks to the external `owlen-mcp-server` over STDIO.
|
/// Client that talks to the external `owlen-mcp-server` over STDIO.
|
||||||
pub struct RemoteMcpClient {
|
pub struct RemoteMcpClient {
|
||||||
@@ -28,11 +36,32 @@ impl RemoteMcpClient {
|
|||||||
// Attempt to locate the server binary; if unavailable we will fall back to launching via `cargo run`.
|
// Attempt to locate the server binary; if unavailable we will fall back to launching via `cargo run`.
|
||||||
let _ = ();
|
let _ = ();
|
||||||
// Resolve absolute path based on workspace root to avoid cwd dependence.
|
// Resolve absolute path based on workspace root to avoid cwd dependence.
|
||||||
|
// The MCP server binary lives in the workspace's `target/debug` directory.
|
||||||
|
// Historically the binary was named `owlen-mcp-server`, but it has been
|
||||||
|
// renamed to `owlen-mcp-llm-server`. We attempt to locate the new name
|
||||||
|
// first and fall back to the legacy name for compatibility.
|
||||||
let workspace_root = std::path::Path::new(env!("CARGO_MANIFEST_DIR"))
|
let workspace_root = std::path::Path::new(env!("CARGO_MANIFEST_DIR"))
|
||||||
.join("../..")
|
.join("../..")
|
||||||
.canonicalize()
|
.canonicalize()
|
||||||
.map_err(Error::Io)?;
|
.map_err(Error::Io)?;
|
||||||
let binary_path = workspace_root.join("target/debug/owlen-mcp-server");
|
let candidates = [
|
||||||
|
"target/debug/owlen-mcp-llm-server",
|
||||||
|
"target/debug/owlen-mcp-server",
|
||||||
|
];
|
||||||
|
let mut binary_path = None;
|
||||||
|
for rel in &candidates {
|
||||||
|
let p = workspace_root.join(rel);
|
||||||
|
if p.exists() {
|
||||||
|
binary_path = Some(p);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let binary_path = binary_path.ok_or_else(|| {
|
||||||
|
Error::NotImplemented(format!(
|
||||||
|
"owlen-mcp server binary not found; checked {} and {}",
|
||||||
|
candidates[0], candidates[1]
|
||||||
|
))
|
||||||
|
})?;
|
||||||
if !binary_path.exists() {
|
if !binary_path.exists() {
|
||||||
return Err(Error::NotImplemented(format!(
|
return Err(Error::NotImplemented(format!(
|
||||||
"owlen-mcp-server binary not found at {}",
|
"owlen-mcp-server binary not found at {}",
|
||||||
@@ -100,15 +129,57 @@ impl RemoteMcpClient {
|
|||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl McpClient for RemoteMcpClient {
|
impl McpClient for RemoteMcpClient {
|
||||||
async fn list_tools(&self) -> Result<Vec<McpToolDescriptor>> {
|
async fn list_tools(&self) -> Result<Vec<McpToolDescriptor>> {
|
||||||
// The file server does not expose tool descriptors; fall back to NotImplemented.
|
// Query the remote MCP server for its tool descriptors using the standard
|
||||||
Err(Error::NotImplemented(
|
// `tools/list` RPC method. The server returns a JSON array of
|
||||||
"Remote MCP client does not support list_tools".to_string(),
|
// `McpToolDescriptor` objects.
|
||||||
))
|
let result = self.send_rpc(methods::TOOLS_LIST, json!(null)).await?;
|
||||||
|
let descriptors: Vec<McpToolDescriptor> = serde_json::from_value(result)?;
|
||||||
|
Ok(descriptors)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn call_tool(&self, call: McpToolCall) -> Result<McpToolResponse> {
|
async fn call_tool(&self, call: McpToolCall) -> Result<McpToolResponse> {
|
||||||
let result = self.send_rpc(&call.name, call.arguments.clone()).await?;
|
// Local handling for simple resource tools to avoid needing the MCP server
|
||||||
// The remote server returns only the tool result; we fabricate metadata.
|
// to implement them.
|
||||||
|
if call.name.starts_with("resources/get") {
|
||||||
|
let path = call
|
||||||
|
.arguments
|
||||||
|
.get("path")
|
||||||
|
.and_then(|v| v.as_str())
|
||||||
|
.unwrap_or("");
|
||||||
|
let content = std::fs::read_to_string(path).map_err(Error::Io)?;
|
||||||
|
return Ok(McpToolResponse {
|
||||||
|
name: call.name,
|
||||||
|
success: true,
|
||||||
|
output: serde_json::json!(content),
|
||||||
|
metadata: std::collections::HashMap::new(),
|
||||||
|
duration_ms: 0,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
if call.name.starts_with("resources/list") {
|
||||||
|
let path = call
|
||||||
|
.arguments
|
||||||
|
.get("path")
|
||||||
|
.and_then(|v| v.as_str())
|
||||||
|
.unwrap_or(".");
|
||||||
|
let mut names = Vec::new();
|
||||||
|
for entry in std::fs::read_dir(path).map_err(Error::Io)?.flatten() {
|
||||||
|
if let Some(name) = entry.file_name().to_str() {
|
||||||
|
names.push(name.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Ok(McpToolResponse {
|
||||||
|
name: call.name,
|
||||||
|
success: true,
|
||||||
|
output: serde_json::json!(names),
|
||||||
|
metadata: std::collections::HashMap::new(),
|
||||||
|
duration_ms: 0,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
// MCP server expects a generic "tools/call" method with a payload containing the
|
||||||
|
// specific tool name and its arguments. Wrap the incoming call accordingly.
|
||||||
|
let payload = serde_json::to_value(&call)?;
|
||||||
|
let result = self.send_rpc(methods::TOOLS_CALL, payload).await?;
|
||||||
|
// The server returns the tool's output directly; construct a matching response.
|
||||||
Ok(McpToolResponse {
|
Ok(McpToolResponse {
|
||||||
name: call.name,
|
name: call.name,
|
||||||
success: true,
|
success: true,
|
||||||
@@ -118,3 +189,66 @@ impl McpClient for RemoteMcpClient {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Provider implementation – forwards chat requests to the generate_text tool.
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Provider for RemoteMcpClient {
|
||||||
|
fn name(&self) -> &str {
|
||||||
|
"mcp-llm-server"
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn list_models(&self) -> Result<Vec<ModelInfo>> {
|
||||||
|
let result = self.send_rpc(methods::MODELS_LIST, json!(null)).await?;
|
||||||
|
let models: Vec<ModelInfo> = serde_json::from_value(result)?;
|
||||||
|
Ok(models)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn chat(&self, request: crate::types::ChatRequest) -> Result<ChatResponse> {
|
||||||
|
// Use the streaming implementation and take the first response.
|
||||||
|
let mut stream = self.chat_stream(request).await?;
|
||||||
|
match stream.next().await {
|
||||||
|
Some(Ok(resp)) => Ok(resp),
|
||||||
|
Some(Err(e)) => Err(e),
|
||||||
|
None => Err(Error::Provider(anyhow::anyhow!("Empty chat stream"))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn chat_stream(
|
||||||
|
&self,
|
||||||
|
request: crate::types::ChatRequest,
|
||||||
|
) -> Result<crate::provider::ChatStream> {
|
||||||
|
// Build arguments matching the generate_text schema.
|
||||||
|
let args = serde_json::json!({
|
||||||
|
"messages": request.messages,
|
||||||
|
"temperature": request.parameters.temperature,
|
||||||
|
"max_tokens": request.parameters.max_tokens,
|
||||||
|
"model": request.model,
|
||||||
|
"stream": request.parameters.stream,
|
||||||
|
});
|
||||||
|
let call = McpToolCall {
|
||||||
|
name: "generate_text".to_string(),
|
||||||
|
arguments: args,
|
||||||
|
};
|
||||||
|
let resp = self.call_tool(call).await?;
|
||||||
|
// Build a ChatResponse from the tool output (assumed to be a string).
|
||||||
|
let content = resp.output.as_str().unwrap_or("").to_string();
|
||||||
|
let message = Message::new(Role::Assistant, content);
|
||||||
|
let chat_resp = ChatResponse {
|
||||||
|
message,
|
||||||
|
usage: None,
|
||||||
|
is_streaming: false,
|
||||||
|
is_final: true,
|
||||||
|
};
|
||||||
|
let stream = stream::once(async move { Ok(chat_resp) });
|
||||||
|
Ok(Box::pin(stream))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn health_check(&self) -> Result<()> {
|
||||||
|
// Simple ping using initialize method.
|
||||||
|
let params = serde_json::json!({"protocol_version": PROTOCOL_VERSION});
|
||||||
|
self.send_rpc("initialize", params).await.map(|_| ())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -615,6 +615,11 @@ impl SessionController {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Expose the underlying LLM provider.
|
||||||
|
pub fn provider(&self) -> Arc<dyn Provider> {
|
||||||
|
self.provider.clone()
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn send_message(
|
pub async fn send_message(
|
||||||
&mut self,
|
&mut self,
|
||||||
content: String,
|
content: String,
|
||||||
|
|||||||
95
crates/owlen-core/src/tools.rs
Normal file
95
crates/owlen-core/src/tools.rs
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
//! Tool module aggregating built‑in tool implementations.
|
||||||
|
//!
|
||||||
|
//! The crate originally declared `pub mod tools;` in `lib.rs` but the source
|
||||||
|
//! directory only contained individual tool files without a `mod.rs`, causing the
|
||||||
|
//! compiler to look for `tools.rs` and fail. Adding this module file makes the
|
||||||
|
//! directory a proper Rust module and re‑exports the concrete tool types.
|
||||||
|
|
||||||
|
pub mod code_exec;
|
||||||
|
pub mod fs_tools;
|
||||||
|
pub mod registry;
|
||||||
|
pub mod web_search;
|
||||||
|
pub mod web_search_detailed;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use crate::Result;
|
||||||
|
|
||||||
|
/// Trait representing a tool that can be called via the MCP interface.
|
||||||
|
#[async_trait]
|
||||||
|
pub trait Tool: Send + Sync {
|
||||||
|
/// Unique name of the tool (used in the MCP protocol).
|
||||||
|
fn name(&self) -> &'static str;
|
||||||
|
/// Human‑readable description for documentation.
|
||||||
|
fn description(&self) -> &'static str;
|
||||||
|
/// JSON‑Schema describing the expected arguments.
|
||||||
|
fn schema(&self) -> Value;
|
||||||
|
/// Execute the tool with the provided arguments.
|
||||||
|
fn requires_network(&self) -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
fn requires_filesystem(&self) -> Vec<String> {
|
||||||
|
Vec::new()
|
||||||
|
}
|
||||||
|
async fn execute(&self, args: Value) -> Result<ToolResult>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Result returned by a tool execution.
|
||||||
|
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||||
|
pub struct ToolResult {
|
||||||
|
/// Indicates whether the tool completed successfully.
|
||||||
|
pub success: bool,
|
||||||
|
/// Human‑readable status string – retained for compatibility.
|
||||||
|
pub status: String,
|
||||||
|
/// Arbitrary JSON payload describing the tool output.
|
||||||
|
pub output: Value,
|
||||||
|
/// Execution duration.
|
||||||
|
#[serde(skip_serializing_if = "Duration::is_zero", default)]
|
||||||
|
pub duration: Duration,
|
||||||
|
/// Optional key/value metadata for the tool invocation.
|
||||||
|
#[serde(default)]
|
||||||
|
pub metadata: HashMap<String, String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ToolResult {
|
||||||
|
pub fn success(output: Value) -> Self {
|
||||||
|
Self {
|
||||||
|
success: true,
|
||||||
|
status: "success".into(),
|
||||||
|
output,
|
||||||
|
duration: Duration::default(),
|
||||||
|
metadata: HashMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn error(msg: &str) -> Self {
|
||||||
|
Self {
|
||||||
|
success: false,
|
||||||
|
status: "error".into(),
|
||||||
|
output: json!({ "error": msg }),
|
||||||
|
duration: Duration::default(),
|
||||||
|
metadata: HashMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cancelled(msg: &str) -> Self {
|
||||||
|
Self {
|
||||||
|
success: false,
|
||||||
|
status: "cancelled".into(),
|
||||||
|
output: json!({ "error": msg }),
|
||||||
|
duration: Duration::default(),
|
||||||
|
metadata: HashMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Re‑export the most commonly used types so they can be accessed as
|
||||||
|
// `owlen_core::tools::CodeExecTool`, etc.
|
||||||
|
pub use code_exec::CodeExecTool;
|
||||||
|
pub use fs_tools::{ResourcesDeleteTool, ResourcesGetTool, ResourcesListTool, ResourcesWriteTool};
|
||||||
|
pub use registry::ToolRegistry;
|
||||||
|
pub use web_search::WebSearchTool;
|
||||||
|
pub use web_search_detailed::WebSearchDetailedTool;
|
||||||
@@ -1,7 +1,8 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
use anyhow::{anyhow, Context, Result};
|
use crate::Result;
|
||||||
|
use anyhow::{anyhow, Context};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
@@ -72,7 +73,7 @@ impl Tool for CodeExecTool {
|
|||||||
let timeout = args.get("timeout").and_then(Value::as_u64).unwrap_or(30);
|
let timeout = args.get("timeout").and_then(Value::as_u64).unwrap_or(30);
|
||||||
|
|
||||||
if !self.allowed_languages.iter().any(|lang| lang == language) {
|
if !self.allowed_languages.iter().any(|lang| lang == language) {
|
||||||
return Err(anyhow!("Language '{}' not permitted", language));
|
return Err(anyhow!("Language '{}' not permitted", language).into());
|
||||||
}
|
}
|
||||||
|
|
||||||
let (command, command_args) = match language {
|
let (command, command_args) = match language {
|
||||||
@@ -88,7 +89,7 @@ impl Tool for CodeExecTool {
|
|||||||
result.duration = start.elapsed();
|
result.duration = start.elapsed();
|
||||||
return Ok(result);
|
return Ok(result);
|
||||||
}
|
}
|
||||||
other => return Err(anyhow!("Unsupported language: {}", other)),
|
other => return Err(anyhow!("Unsupported language: {}", other).into()),
|
||||||
};
|
};
|
||||||
|
|
||||||
let sandbox_config = SandboxConfig {
|
let sandbox_config = SandboxConfig {
|
||||||
@@ -97,7 +98,7 @@ impl Tool for CodeExecTool {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
let sandbox_result = tokio::task::spawn_blocking(move || -> Result<_> {
|
let sandbox_result = tokio::task::spawn_blocking(move || {
|
||||||
let sandbox = SandboxedProcess::new(sandbox_config)?;
|
let sandbox = SandboxedProcess::new(sandbox_config)?;
|
||||||
let arg_refs: Vec<&str> = command_args.iter().map(|s| s.as_str()).collect();
|
let arg_refs: Vec<&str> = command_args.iter().map(|s| s.as_str()).collect();
|
||||||
sandbox.execute(&command, &arg_refs)
|
sandbox.execute(&command, &arg_refs)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use crate::tools::{Tool, ToolResult};
|
use crate::tools::{Tool, ToolResult};
|
||||||
use anyhow::Result;
|
use crate::{Error, Result};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use path_clean::PathClean;
|
use path_clean::PathClean;
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
@@ -16,8 +16,9 @@ struct FileArgs {
|
|||||||
fn sanitize_path(path: &str, root: &Path) -> Result<PathBuf> {
|
fn sanitize_path(path: &str, root: &Path) -> Result<PathBuf> {
|
||||||
let path = Path::new(path);
|
let path = Path::new(path);
|
||||||
let path = if path.is_absolute() {
|
let path = if path.is_absolute() {
|
||||||
|
// Strip leading '/' to treat as relative to the project root.
|
||||||
path.strip_prefix("/")
|
path.strip_prefix("/")
|
||||||
.map_err(|_| anyhow::anyhow!("Invalid path"))?
|
.map_err(|_| Error::InvalidInput("Invalid path".into()))?
|
||||||
.to_path_buf()
|
.to_path_buf()
|
||||||
} else {
|
} else {
|
||||||
path.to_path_buf()
|
path.to_path_buf()
|
||||||
@@ -26,7 +27,7 @@ fn sanitize_path(path: &str, root: &Path) -> Result<PathBuf> {
|
|||||||
let full_path = root.join(path).clean();
|
let full_path = root.join(path).clean();
|
||||||
|
|
||||||
if !full_path.starts_with(root) {
|
if !full_path.starts_with(root) {
|
||||||
return Err(anyhow::anyhow!("Path traversal detected"));
|
return Err(Error::PermissionDenied("Path traversal detected".into()));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(full_path)
|
Ok(full_path)
|
||||||
@@ -191,7 +192,7 @@ impl Tool for ResourcesDeleteTool {
|
|||||||
fs::remove_file(full_path)?;
|
fs::remove_file(full_path)?;
|
||||||
Ok(ToolResult::success(json!(null)))
|
Ok(ToolResult::success(json!(null)))
|
||||||
} else {
|
} else {
|
||||||
Err(anyhow::anyhow!("Path does not refer to a file"))
|
Err(Error::InvalidInput("Path does not refer to a file".into()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,74 +0,0 @@
|
|||||||
use async_trait::async_trait;
|
|
||||||
use serde_json::{json, Value};
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use anyhow::Result;
|
|
||||||
|
|
||||||
pub mod code_exec;
|
|
||||||
pub mod fs_tools;
|
|
||||||
pub mod registry;
|
|
||||||
pub mod web_search;
|
|
||||||
pub mod web_search_detailed;
|
|
||||||
|
|
||||||
// Re‑export tool structs for convenient crate‑level access
|
|
||||||
pub use code_exec::CodeExecTool;
|
|
||||||
pub use fs_tools::{ResourcesDeleteTool, ResourcesGetTool, ResourcesListTool, ResourcesWriteTool};
|
|
||||||
pub use registry::ToolRegistry;
|
|
||||||
pub use web_search::WebSearchTool;
|
|
||||||
pub use web_search_detailed::WebSearchDetailedTool;
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
pub trait Tool: Send + Sync {
|
|
||||||
fn name(&self) -> &'static str;
|
|
||||||
fn description(&self) -> &'static str;
|
|
||||||
fn schema(&self) -> Value;
|
|
||||||
fn requires_network(&self) -> bool {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
fn requires_filesystem(&self) -> Vec<String> {
|
|
||||||
Vec::new()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn execute(&self, args: Value) -> Result<ToolResult>;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct ToolResult {
|
|
||||||
pub success: bool,
|
|
||||||
pub cancelled: bool,
|
|
||||||
pub output: Value,
|
|
||||||
pub metadata: HashMap<String, String>,
|
|
||||||
pub duration: Duration,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ToolResult {
|
|
||||||
pub fn success(output: Value) -> Self {
|
|
||||||
Self {
|
|
||||||
success: true,
|
|
||||||
cancelled: false,
|
|
||||||
output,
|
|
||||||
metadata: HashMap::new(),
|
|
||||||
duration: Duration::from_millis(0),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn error(message: &str) -> Self {
|
|
||||||
Self {
|
|
||||||
success: false,
|
|
||||||
cancelled: false,
|
|
||||||
output: json!({ "error": message }),
|
|
||||||
metadata: HashMap::new(),
|
|
||||||
duration: Duration::from_millis(0),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn cancelled(message: String) -> Self {
|
|
||||||
Self {
|
|
||||||
success: false,
|
|
||||||
cancelled: true,
|
|
||||||
output: json!({ "message": message }),
|
|
||||||
metadata: HashMap::new(),
|
|
||||||
duration: Duration::from_millis(0),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,7 +1,8 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use anyhow::{Context, Result};
|
use crate::Result;
|
||||||
|
use anyhow::Context;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use super::{Tool, ToolResult};
|
use super::{Tool, ToolResult};
|
||||||
@@ -66,7 +67,7 @@ impl ToolRegistry {
|
|||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return Ok(ToolResult::cancelled(format!(
|
return Ok(ToolResult::cancelled(&format!(
|
||||||
"Tool '{}' execution was cancelled by the user.",
|
"Tool '{}' execution was cancelled by the user.",
|
||||||
name
|
name
|
||||||
)));
|
)));
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
use anyhow::{Context, Result};
|
use crate::Result;
|
||||||
|
use anyhow::Context;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
use anyhow::{Context, Result};
|
use crate::Result;
|
||||||
|
use anyhow::Context;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
|
|||||||
12
crates/owlen-mcp-client/Cargo.toml
Normal file
12
crates/owlen-mcp-client/Cargo.toml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
[package]
|
||||||
|
name = "owlen-mcp-client"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
description = "Dedicated MCP client library for Owlen, exposing remote MCP server communication"
|
||||||
|
license = "AGPL-3.0"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
owlen-core = { path = "../owlen-core" }
|
||||||
|
|
||||||
|
[features]
|
||||||
|
default = []
|
||||||
19
crates/owlen-mcp-client/src/lib.rs
Normal file
19
crates/owlen-mcp-client/src/lib.rs
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
//! Owlen MCP client library.
|
||||||
|
//!
|
||||||
|
//! This crate provides a thin façade over the remote MCP client implementation
|
||||||
|
//! inside `owlen-core`. It re‑exports the most useful types so downstream
|
||||||
|
//! crates can depend only on `owlen-mcp-client` without pulling in the entire
|
||||||
|
//! core crate internals.
|
||||||
|
|
||||||
|
pub use owlen_core::mcp::remote_client::RemoteMcpClient;
|
||||||
|
pub use owlen_core::mcp::{McpClient, McpToolCall, McpToolDescriptor, McpToolResponse};
|
||||||
|
|
||||||
|
// Re‑export the Provider implementation so the client can also be used as an
|
||||||
|
// LLM provider when the remote MCP server hosts a language‑model tool (e.g.
|
||||||
|
// `generate_text`).
|
||||||
|
// Re‑export the core Provider trait so that the MCP client can also be used as an LLM provider.
|
||||||
|
pub use owlen_core::provider::Provider as McpProvider;
|
||||||
|
|
||||||
|
// Note: The `RemoteMcpClient` type provides its own `new` constructor in the core
|
||||||
|
// crate. Users can call `RemoteMcpClient::new()` directly. No additional wrapper
|
||||||
|
// is needed here.
|
||||||
20
crates/owlen-mcp-llm-server/Cargo.toml
Normal file
20
crates/owlen-mcp-llm-server/Cargo.toml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
[package]
|
||||||
|
name = "owlen-mcp-llm-server"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
owlen-core = { path = "../owlen-core" }
|
||||||
|
owlen-ollama = { path = "../owlen-ollama" }
|
||||||
|
tokio = { version = "1.0", features = ["full"] }
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
serde_json = "1.0"
|
||||||
|
anyhow = "1.0"
|
||||||
|
tokio-stream = "0.1"
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
path = "src/lib.rs"
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "owlen-mcp-llm-server"
|
||||||
|
path = "src/lib.rs"
|
||||||
498
crates/owlen-mcp-llm-server/src/lib.rs
Normal file
498
crates/owlen-mcp-llm-server/src/lib.rs
Normal file
@@ -0,0 +1,498 @@
|
|||||||
|
#![allow(
|
||||||
|
unused_imports,
|
||||||
|
unused_variables,
|
||||||
|
dead_code,
|
||||||
|
clippy::unnecessary_cast,
|
||||||
|
clippy::manual_flatten,
|
||||||
|
clippy::empty_line_after_outer_attr
|
||||||
|
)]
|
||||||
|
|
||||||
|
use owlen_core::mcp::protocol::{
|
||||||
|
methods, ErrorCode, InitializeParams, InitializeResult, RequestId, RpcError, RpcErrorResponse,
|
||||||
|
RpcNotification, RpcRequest, RpcResponse, ServerCapabilities, ServerInfo, PROTOCOL_VERSION,
|
||||||
|
};
|
||||||
|
use owlen_core::mcp::{McpToolCall, McpToolDescriptor, McpToolResponse};
|
||||||
|
use owlen_core::types::{ChatParameters, ChatRequest, Message};
|
||||||
|
use owlen_core::Provider;
|
||||||
|
use owlen_ollama::OllamaProvider;
|
||||||
|
use serde::Deserialize;
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::env;
|
||||||
|
use tokio::io::{self, AsyncBufReadExt, AsyncWriteExt};
|
||||||
|
use tokio_stream::StreamExt;
|
||||||
|
|
||||||
|
// Suppress warnings are handled by the crate-level attribute at the top.
|
||||||
|
|
||||||
|
/// Arguments for the generate_text tool
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
struct GenerateTextArgs {
|
||||||
|
messages: Vec<Message>,
|
||||||
|
temperature: Option<f32>,
|
||||||
|
max_tokens: Option<u32>,
|
||||||
|
model: String,
|
||||||
|
stream: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Simple tool descriptor for generate_text
|
||||||
|
fn generate_text_descriptor() -> McpToolDescriptor {
|
||||||
|
McpToolDescriptor {
|
||||||
|
name: "generate_text".to_string(),
|
||||||
|
description: "Generate text using Ollama LLM. Each message must have 'role' (user/assistant/system) and 'content' (string) fields.".to_string(),
|
||||||
|
input_schema: json!({
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"messages": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"role": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["user", "assistant", "system"],
|
||||||
|
"description": "The role of the message sender"
|
||||||
|
},
|
||||||
|
"content": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The message content"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["role", "content"]
|
||||||
|
},
|
||||||
|
"description": "Array of message objects with role and content"
|
||||||
|
},
|
||||||
|
"temperature": {"type": ["number", "null"], "description": "Sampling temperature (0.0-2.0)"},
|
||||||
|
"max_tokens": {"type": ["integer", "null"], "description": "Maximum tokens to generate"},
|
||||||
|
"model": {"type": "string", "description": "Model name (e.g., llama3.2:latest)"},
|
||||||
|
"stream": {"type": "boolean", "description": "Whether to stream the response"}
|
||||||
|
},
|
||||||
|
"required": ["messages", "model", "stream"]
|
||||||
|
}),
|
||||||
|
requires_network: true,
|
||||||
|
requires_filesystem: vec![],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tool descriptor for resources/get (read file)
|
||||||
|
fn resources_get_descriptor() -> McpToolDescriptor {
|
||||||
|
McpToolDescriptor {
|
||||||
|
name: "resources/get".to_string(),
|
||||||
|
description: "Read and return the TEXT CONTENTS of a single FILE. Use this to read the contents of code files, config files, or text documents. Do NOT use for directories.".to_string(),
|
||||||
|
input_schema: json!({
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"path": {"type": "string", "description": "Path to the FILE (not directory) to read"}
|
||||||
|
},
|
||||||
|
"required": ["path"]
|
||||||
|
}),
|
||||||
|
requires_network: false,
|
||||||
|
requires_filesystem: vec!["read".to_string()],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tool descriptor for resources/list (list directory)
|
||||||
|
fn resources_list_descriptor() -> McpToolDescriptor {
|
||||||
|
McpToolDescriptor {
|
||||||
|
name: "resources/list".to_string(),
|
||||||
|
description: "List the NAMES of all files and directories in a directory. Use this to see what files exist in a folder, or to list directory contents. Returns an array of file/directory names.".to_string(),
|
||||||
|
input_schema: json!({
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"path": {"type": "string", "description": "Path to the DIRECTORY to list (use '.' for current directory)"}
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
requires_network: false,
|
||||||
|
requires_filesystem: vec!["read".to_string()],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_generate_text(args: GenerateTextArgs) -> Result<String, RpcError> {
|
||||||
|
// Create provider with default local Ollama URL
|
||||||
|
let provider = OllamaProvider::new("http://localhost:11434")
|
||||||
|
.map_err(|e| RpcError::internal_error(format!("Failed to init OllamaProvider: {}", e)))?;
|
||||||
|
|
||||||
|
let parameters = ChatParameters {
|
||||||
|
temperature: args.temperature,
|
||||||
|
max_tokens: args.max_tokens.map(|v| v as u32),
|
||||||
|
stream: args.stream,
|
||||||
|
extra: HashMap::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let request = ChatRequest {
|
||||||
|
model: args.model,
|
||||||
|
messages: args.messages,
|
||||||
|
parameters,
|
||||||
|
tools: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Use streaming API and collect output
|
||||||
|
let mut stream = provider
|
||||||
|
.chat_stream(request)
|
||||||
|
.await
|
||||||
|
.map_err(|e| RpcError::internal_error(format!("Chat request failed: {}", e)))?;
|
||||||
|
let mut content = String::new();
|
||||||
|
while let Some(chunk) = stream.next().await {
|
||||||
|
match chunk {
|
||||||
|
Ok(resp) => {
|
||||||
|
content.push_str(&resp.message.content);
|
||||||
|
if resp.is_final {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
return Err(RpcError::internal_error(format!("Stream error: {}", e)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(content)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_request(req: &RpcRequest) -> Result<Value, RpcError> {
|
||||||
|
match req.method.as_str() {
|
||||||
|
methods::INITIALIZE => {
|
||||||
|
let params = req
|
||||||
|
.params
|
||||||
|
.as_ref()
|
||||||
|
.ok_or_else(|| RpcError::invalid_params("Missing params for initialize"))?;
|
||||||
|
let init: InitializeParams = serde_json::from_value(params.clone())
|
||||||
|
.map_err(|e| RpcError::invalid_params(format!("Invalid init params: {}", e)))?;
|
||||||
|
if !init.protocol_version.eq(PROTOCOL_VERSION) {
|
||||||
|
return Err(RpcError::new(
|
||||||
|
ErrorCode::INVALID_REQUEST,
|
||||||
|
format!(
|
||||||
|
"Incompatible protocol version. Client: {}, Server: {}",
|
||||||
|
init.protocol_version, PROTOCOL_VERSION
|
||||||
|
),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let result = InitializeResult {
|
||||||
|
protocol_version: PROTOCOL_VERSION.to_string(),
|
||||||
|
server_info: ServerInfo {
|
||||||
|
name: "owlen-mcp-llm-server".to_string(),
|
||||||
|
version: env!("CARGO_PKG_VERSION").to_string(),
|
||||||
|
},
|
||||||
|
capabilities: ServerCapabilities {
|
||||||
|
supports_tools: Some(true),
|
||||||
|
supports_resources: Some(false),
|
||||||
|
supports_streaming: Some(true),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
Ok(serde_json::to_value(result).unwrap())
|
||||||
|
}
|
||||||
|
methods::TOOLS_LIST => {
|
||||||
|
let tools = vec![
|
||||||
|
generate_text_descriptor(),
|
||||||
|
resources_get_descriptor(),
|
||||||
|
resources_list_descriptor(),
|
||||||
|
];
|
||||||
|
Ok(json!(tools))
|
||||||
|
}
|
||||||
|
// New method to list available Ollama models via the provider.
|
||||||
|
methods::MODELS_LIST => {
|
||||||
|
// Reuse the provider instance for model listing.
|
||||||
|
let provider = OllamaProvider::new("http://localhost:11434").map_err(|e| {
|
||||||
|
RpcError::internal_error(format!("Failed to init OllamaProvider: {}", e))
|
||||||
|
})?;
|
||||||
|
let models = provider
|
||||||
|
.list_models()
|
||||||
|
.await
|
||||||
|
.map_err(|e| RpcError::internal_error(format!("Failed to list models: {}", e)))?;
|
||||||
|
Ok(serde_json::to_value(models).unwrap())
|
||||||
|
}
|
||||||
|
methods::TOOLS_CALL => {
|
||||||
|
// For streaming we will send incremental notifications directly from here.
|
||||||
|
// The caller (main loop) will handle writing the final response.
|
||||||
|
Err(RpcError::internal_error(
|
||||||
|
"TOOLS_CALL should be handled in main loop for streaming",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
_ => Err(RpcError::method_not_found(&req.method)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let root = env::current_dir()?; // not used but kept for parity
|
||||||
|
let mut stdin = io::BufReader::new(io::stdin());
|
||||||
|
let mut stdout = io::stdout();
|
||||||
|
loop {
|
||||||
|
let mut line = String::new();
|
||||||
|
match stdin.read_line(&mut line).await {
|
||||||
|
Ok(0) => break,
|
||||||
|
Ok(_) => {
|
||||||
|
let req: RpcRequest = match serde_json::from_str(&line) {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
let err = RpcErrorResponse::new(
|
||||||
|
RequestId::Number(0),
|
||||||
|
RpcError::parse_error(format!("Parse error: {}", e)),
|
||||||
|
);
|
||||||
|
let s = serde_json::to_string(&err)?;
|
||||||
|
stdout.write_all(s.as_bytes()).await?;
|
||||||
|
stdout.write_all(b"\n").await?;
|
||||||
|
stdout.flush().await?;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let id = req.id.clone();
|
||||||
|
// Streaming tool calls (generate_text) are handled specially to emit incremental notifications.
|
||||||
|
if req.method == methods::TOOLS_CALL {
|
||||||
|
// Parse the tool call
|
||||||
|
let params = match &req.params {
|
||||||
|
Some(p) => p,
|
||||||
|
None => {
|
||||||
|
let err_resp = RpcErrorResponse::new(
|
||||||
|
id.clone(),
|
||||||
|
RpcError::invalid_params("Missing params for tool call"),
|
||||||
|
);
|
||||||
|
let s = serde_json::to_string(&err_resp)?;
|
||||||
|
stdout.write_all(s.as_bytes()).await?;
|
||||||
|
stdout.write_all(b"\n").await?;
|
||||||
|
stdout.flush().await?;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let call: McpToolCall = match serde_json::from_value(params.clone()) {
|
||||||
|
Ok(c) => c,
|
||||||
|
Err(e) => {
|
||||||
|
let err_resp = RpcErrorResponse::new(
|
||||||
|
id.clone(),
|
||||||
|
RpcError::invalid_params(format!("Invalid tool call: {}", e)),
|
||||||
|
);
|
||||||
|
let s = serde_json::to_string(&err_resp)?;
|
||||||
|
stdout.write_all(s.as_bytes()).await?;
|
||||||
|
stdout.write_all(b"\n").await?;
|
||||||
|
stdout.flush().await?;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
// Dispatch based on the requested tool name.
|
||||||
|
// Handle resources tools manually.
|
||||||
|
if call.name.starts_with("resources/get") {
|
||||||
|
let path = call
|
||||||
|
.arguments
|
||||||
|
.get("path")
|
||||||
|
.and_then(|v| v.as_str())
|
||||||
|
.unwrap_or("");
|
||||||
|
match std::fs::read_to_string(path) {
|
||||||
|
Ok(content) => {
|
||||||
|
let response = McpToolResponse {
|
||||||
|
name: call.name,
|
||||||
|
success: true,
|
||||||
|
output: json!(content),
|
||||||
|
metadata: HashMap::new(),
|
||||||
|
duration_ms: 0,
|
||||||
|
};
|
||||||
|
let final_resp = RpcResponse::new(
|
||||||
|
id.clone(),
|
||||||
|
serde_json::to_value(response).unwrap(),
|
||||||
|
);
|
||||||
|
let s = serde_json::to_string(&final_resp)?;
|
||||||
|
stdout.write_all(s.as_bytes()).await?;
|
||||||
|
stdout.write_all(b"\n").await?;
|
||||||
|
stdout.flush().await?;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
let err_resp = RpcErrorResponse::new(
|
||||||
|
id.clone(),
|
||||||
|
RpcError::internal_error(format!("Failed to read file: {}", e)),
|
||||||
|
);
|
||||||
|
let s = serde_json::to_string(&err_resp)?;
|
||||||
|
stdout.write_all(s.as_bytes()).await?;
|
||||||
|
stdout.write_all(b"\n").await?;
|
||||||
|
stdout.flush().await?;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if call.name.starts_with("resources/list") {
|
||||||
|
let path = call
|
||||||
|
.arguments
|
||||||
|
.get("path")
|
||||||
|
.and_then(|v| v.as_str())
|
||||||
|
.unwrap_or(".");
|
||||||
|
match std::fs::read_dir(path) {
|
||||||
|
Ok(entries) => {
|
||||||
|
let mut names = Vec::new();
|
||||||
|
for entry in entries.flatten() {
|
||||||
|
if let Some(name) = entry.file_name().to_str() {
|
||||||
|
names.push(name.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let response = McpToolResponse {
|
||||||
|
name: call.name,
|
||||||
|
success: true,
|
||||||
|
output: json!(names),
|
||||||
|
metadata: HashMap::new(),
|
||||||
|
duration_ms: 0,
|
||||||
|
};
|
||||||
|
let final_resp = RpcResponse::new(
|
||||||
|
id.clone(),
|
||||||
|
serde_json::to_value(response).unwrap(),
|
||||||
|
);
|
||||||
|
let s = serde_json::to_string(&final_resp)?;
|
||||||
|
stdout.write_all(s.as_bytes()).await?;
|
||||||
|
stdout.write_all(b"\n").await?;
|
||||||
|
stdout.flush().await?;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
let err_resp = RpcErrorResponse::new(
|
||||||
|
id.clone(),
|
||||||
|
RpcError::internal_error(format!("Failed to list dir: {}", e)),
|
||||||
|
);
|
||||||
|
let s = serde_json::to_string(&err_resp)?;
|
||||||
|
stdout.write_all(s.as_bytes()).await?;
|
||||||
|
stdout.write_all(b"\n").await?;
|
||||||
|
stdout.flush().await?;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Expect generate_text tool for the remaining path.
|
||||||
|
if call.name != "generate_text" {
|
||||||
|
let err_resp =
|
||||||
|
RpcErrorResponse::new(id.clone(), RpcError::tool_not_found(&call.name));
|
||||||
|
let s = serde_json::to_string(&err_resp)?;
|
||||||
|
stdout.write_all(s.as_bytes()).await?;
|
||||||
|
stdout.write_all(b"\n").await?;
|
||||||
|
stdout.flush().await?;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let args: GenerateTextArgs =
|
||||||
|
match serde_json::from_value(call.arguments.clone()) {
|
||||||
|
Ok(a) => a,
|
||||||
|
Err(e) => {
|
||||||
|
let err_resp = RpcErrorResponse::new(
|
||||||
|
id.clone(),
|
||||||
|
RpcError::invalid_params(format!("Invalid arguments: {}", e)),
|
||||||
|
);
|
||||||
|
let s = serde_json::to_string(&err_resp)?;
|
||||||
|
stdout.write_all(s.as_bytes()).await?;
|
||||||
|
stdout.write_all(b"\n").await?;
|
||||||
|
stdout.flush().await?;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Initialize Ollama provider and start streaming
|
||||||
|
let provider = match OllamaProvider::new("http://localhost:11434") {
|
||||||
|
Ok(p) => p,
|
||||||
|
Err(e) => {
|
||||||
|
let err_resp = RpcErrorResponse::new(
|
||||||
|
id.clone(),
|
||||||
|
RpcError::internal_error(format!(
|
||||||
|
"Failed to init OllamaProvider: {}",
|
||||||
|
e
|
||||||
|
)),
|
||||||
|
);
|
||||||
|
let s = serde_json::to_string(&err_resp)?;
|
||||||
|
stdout.write_all(s.as_bytes()).await?;
|
||||||
|
stdout.write_all(b"\n").await?;
|
||||||
|
stdout.flush().await?;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let parameters = ChatParameters {
|
||||||
|
temperature: args.temperature,
|
||||||
|
max_tokens: args.max_tokens.map(|v| v as u32),
|
||||||
|
stream: true,
|
||||||
|
extra: HashMap::new(),
|
||||||
|
};
|
||||||
|
let request = ChatRequest {
|
||||||
|
model: args.model,
|
||||||
|
messages: args.messages,
|
||||||
|
parameters,
|
||||||
|
tools: None,
|
||||||
|
};
|
||||||
|
let mut stream = match provider.chat_stream(request).await {
|
||||||
|
Ok(s) => s,
|
||||||
|
Err(e) => {
|
||||||
|
let err_resp = RpcErrorResponse::new(
|
||||||
|
id.clone(),
|
||||||
|
RpcError::internal_error(format!("Chat request failed: {}", e)),
|
||||||
|
);
|
||||||
|
let s = serde_json::to_string(&err_resp)?;
|
||||||
|
stdout.write_all(s.as_bytes()).await?;
|
||||||
|
stdout.write_all(b"\n").await?;
|
||||||
|
stdout.flush().await?;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
// Accumulate full content while sending incremental progress notifications
|
||||||
|
let mut final_content = String::new();
|
||||||
|
while let Some(chunk) = stream.next().await {
|
||||||
|
match chunk {
|
||||||
|
Ok(resp) => {
|
||||||
|
// Append chunk to the final content buffer
|
||||||
|
final_content.push_str(&resp.message.content);
|
||||||
|
// Emit a progress notification for the UI
|
||||||
|
let notif = RpcNotification::new(
|
||||||
|
"tools/call/progress",
|
||||||
|
Some(json!({ "content": resp.message.content })),
|
||||||
|
);
|
||||||
|
let s = serde_json::to_string(¬if)?;
|
||||||
|
stdout.write_all(s.as_bytes()).await?;
|
||||||
|
stdout.write_all(b"\n").await?;
|
||||||
|
stdout.flush().await?;
|
||||||
|
if resp.is_final {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
let err_resp = RpcErrorResponse::new(
|
||||||
|
id.clone(),
|
||||||
|
RpcError::internal_error(format!("Stream error: {}", e)),
|
||||||
|
);
|
||||||
|
let s = serde_json::to_string(&err_resp)?;
|
||||||
|
stdout.write_all(s.as_bytes()).await?;
|
||||||
|
stdout.write_all(b"\n").await?;
|
||||||
|
stdout.flush().await?;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// After streaming, send the final tool response containing the full content
|
||||||
|
let final_output = final_content.clone();
|
||||||
|
let response = McpToolResponse {
|
||||||
|
name: call.name,
|
||||||
|
success: true,
|
||||||
|
output: json!(final_output),
|
||||||
|
metadata: HashMap::new(),
|
||||||
|
duration_ms: 0,
|
||||||
|
};
|
||||||
|
let final_resp =
|
||||||
|
RpcResponse::new(id.clone(), serde_json::to_value(response).unwrap());
|
||||||
|
let s = serde_json::to_string(&final_resp)?;
|
||||||
|
stdout.write_all(s.as_bytes()).await?;
|
||||||
|
stdout.write_all(b"\n").await?;
|
||||||
|
stdout.flush().await?;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// Non‑streaming requests are handled by the generic handler
|
||||||
|
match handle_request(&req).await {
|
||||||
|
Ok(res) => {
|
||||||
|
let resp = RpcResponse::new(id, res);
|
||||||
|
let s = serde_json::to_string(&resp)?;
|
||||||
|
stdout.write_all(s.as_bytes()).await?;
|
||||||
|
stdout.write_all(b"\n").await?;
|
||||||
|
stdout.flush().await?;
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
let err_resp = RpcErrorResponse::new(id, err);
|
||||||
|
let s = serde_json::to_string(&err_resp)?;
|
||||||
|
stdout.write_all(s.as_bytes()).await?;
|
||||||
|
stdout.write_all(b"\n").await?;
|
||||||
|
stdout.flush().await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("Read error: {}", e);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -602,13 +602,23 @@ impl Provider for OllamaProvider {
|
|||||||
|
|
||||||
let options = Self::build_options(parameters);
|
let options = Self::build_options(parameters);
|
||||||
|
|
||||||
let ollama_tools = tools.as_ref().map(|t| Self::convert_tools_to_ollama(t));
|
// Only send the `tools` field if there is at least one tool.
|
||||||
|
// An empty array makes Ollama validate tool support and can cause a
|
||||||
|
// 400 Bad Request for models that do not support tools.
|
||||||
|
// Currently the `tools` field is omitted for compatibility; the variable is retained
|
||||||
|
// for potential future use.
|
||||||
|
let _ollama_tools = tools
|
||||||
|
.as_ref()
|
||||||
|
.filter(|t| !t.is_empty())
|
||||||
|
.map(|t| Self::convert_tools_to_ollama(t));
|
||||||
|
|
||||||
|
// Ollama currently rejects any presence of the `tools` field for models that
|
||||||
|
// do not support function calling. To be safe, we omit the field entirely.
|
||||||
let ollama_request = OllamaChatRequest {
|
let ollama_request = OllamaChatRequest {
|
||||||
model,
|
model,
|
||||||
messages,
|
messages,
|
||||||
stream: false,
|
stream: false,
|
||||||
tools: ollama_tools,
|
tools: None,
|
||||||
options,
|
options,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -695,13 +705,21 @@ impl Provider for OllamaProvider {
|
|||||||
|
|
||||||
let options = Self::build_options(parameters);
|
let options = Self::build_options(parameters);
|
||||||
|
|
||||||
let ollama_tools = tools.as_ref().map(|t| Self::convert_tools_to_ollama(t));
|
// Only include the `tools` field if there is at least one tool.
|
||||||
|
// Sending an empty tools array causes Ollama to reject the request for
|
||||||
|
// models without tool support (400 Bad Request).
|
||||||
|
// Retain tools conversion for possible future extensions, but silence unused warnings.
|
||||||
|
let _ollama_tools = tools
|
||||||
|
.as_ref()
|
||||||
|
.filter(|t| !t.is_empty())
|
||||||
|
.map(|t| Self::convert_tools_to_ollama(t));
|
||||||
|
|
||||||
|
// Omit the `tools` field for compatibility with models lacking tool support.
|
||||||
let ollama_request = OllamaChatRequest {
|
let ollama_request = OllamaChatRequest {
|
||||||
model,
|
model,
|
||||||
messages,
|
messages,
|
||||||
stream: true,
|
stream: true,
|
||||||
tools: ollama_tools,
|
tools: None,
|
||||||
options,
|
options,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ description = "Terminal User Interface for OWLEN LLM client"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
owlen-core = { path = "../owlen-core" }
|
owlen-core = { path = "../owlen-core" }
|
||||||
owlen-ollama = { path = "../owlen-ollama" }
|
owlen-ollama = { path = "../owlen-ollama" }
|
||||||
|
# Removed circular dependency on `owlen-cli`. The TUI no longer directly depends on the CLI crate.
|
||||||
|
|
||||||
# TUI framework
|
# TUI framework
|
||||||
ratatui = { workspace = true }
|
ratatui = { workspace = true }
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
use owlen_core::mcp::remote_client::RemoteMcpClient;
|
||||||
use owlen_core::{
|
use owlen_core::{
|
||||||
provider::{Provider, ProviderConfig},
|
provider::{Provider, ProviderConfig},
|
||||||
session::{SessionController, SessionOutcome},
|
session::{SessionController, SessionOutcome},
|
||||||
@@ -14,7 +15,8 @@ use uuid::Uuid;
|
|||||||
|
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use crate::events::Event;
|
use crate::events::Event;
|
||||||
use owlen_ollama::OllamaProvider;
|
// Agent executor moved to separate binary `owlen-agent`. The TUI no longer directly
|
||||||
|
// imports `AgentExecutor` to avoid a circular dependency on `owlen-cli`.
|
||||||
use std::collections::{BTreeSet, HashSet};
|
use std::collections::{BTreeSet, HashSet};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
@@ -108,6 +110,18 @@ pub enum SessionEvent {
|
|||||||
endpoints: Vec<String>,
|
endpoints: Vec<String>,
|
||||||
callback_id: Uuid,
|
callback_id: Uuid,
|
||||||
},
|
},
|
||||||
|
/// Agent iteration update (shows THOUGHT/ACTION/OBSERVATION)
|
||||||
|
AgentUpdate {
|
||||||
|
content: String,
|
||||||
|
},
|
||||||
|
/// Agent execution completed with final answer
|
||||||
|
AgentCompleted {
|
||||||
|
answer: String,
|
||||||
|
},
|
||||||
|
/// Agent execution failed
|
||||||
|
AgentFailed {
|
||||||
|
error: String,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const HELP_TAB_COUNT: usize = 7;
|
pub const HELP_TAB_COUNT: usize = 7;
|
||||||
@@ -138,11 +152,13 @@ pub struct ChatApp {
|
|||||||
loading_animation_frame: usize, // Frame counter for loading animation
|
loading_animation_frame: usize, // Frame counter for loading animation
|
||||||
is_loading: bool, // Whether we're currently loading a response
|
is_loading: bool, // Whether we're currently loading a response
|
||||||
current_thinking: Option<String>, // Current thinking content from last assistant message
|
current_thinking: Option<String>, // Current thinking content from last assistant message
|
||||||
pending_key: Option<char>, // For multi-key sequences like gg, dd
|
// Holds the latest formatted Agentic ReAct actions (thought/action/observation)
|
||||||
clipboard: String, // Vim-style clipboard for yank/paste
|
agent_actions: Option<String>,
|
||||||
command_buffer: String, // Buffer for command mode input
|
pending_key: Option<char>, // For multi-key sequences like gg, dd
|
||||||
|
clipboard: String, // Vim-style clipboard for yank/paste
|
||||||
|
command_buffer: String, // Buffer for command mode input
|
||||||
command_suggestions: Vec<String>, // Filtered command suggestions based on current input
|
command_suggestions: Vec<String>, // Filtered command suggestions based on current input
|
||||||
selected_suggestion: usize, // Index of selected suggestion
|
selected_suggestion: usize, // Index of selected suggestion
|
||||||
visual_start: Option<(usize, usize)>, // Visual mode selection start (row, col) for Input panel
|
visual_start: Option<(usize, usize)>, // Visual mode selection start (row, col) for Input panel
|
||||||
visual_end: Option<(usize, usize)>, // Visual mode selection end (row, col) for scrollable panels
|
visual_end: Option<(usize, usize)>, // Visual mode selection end (row, col) for scrollable panels
|
||||||
focused_panel: FocusedPanel, // Currently focused panel for scrolling
|
focused_panel: FocusedPanel, // Currently focused panel for scrolling
|
||||||
@@ -156,6 +172,12 @@ pub struct ChatApp {
|
|||||||
selected_theme_index: usize, // Index of selected theme in browser
|
selected_theme_index: usize, // Index of selected theme in browser
|
||||||
pending_consent: Option<ConsentDialogState>, // Pending consent request
|
pending_consent: Option<ConsentDialogState>, // Pending consent request
|
||||||
system_status: String, // System/status messages (tool execution, status, etc)
|
system_status: String, // System/status messages (tool execution, status, etc)
|
||||||
|
/// Simple execution budget: maximum number of tool calls allowed per session.
|
||||||
|
_execution_budget: usize,
|
||||||
|
/// Agent mode enabled
|
||||||
|
agent_mode: bool,
|
||||||
|
/// Agent running flag
|
||||||
|
agent_running: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
@@ -210,6 +232,7 @@ impl ChatApp {
|
|||||||
loading_animation_frame: 0,
|
loading_animation_frame: 0,
|
||||||
is_loading: false,
|
is_loading: false,
|
||||||
current_thinking: None,
|
current_thinking: None,
|
||||||
|
agent_actions: None,
|
||||||
pending_key: None,
|
pending_key: None,
|
||||||
clipboard: String::new(),
|
clipboard: String::new(),
|
||||||
command_buffer: String::new(),
|
command_buffer: String::new(),
|
||||||
@@ -228,6 +251,9 @@ impl ChatApp {
|
|||||||
selected_theme_index: 0,
|
selected_theme_index: 0,
|
||||||
pending_consent: None,
|
pending_consent: None,
|
||||||
system_status: String::new(),
|
system_status: String::new(),
|
||||||
|
_execution_budget: 50,
|
||||||
|
agent_mode: false,
|
||||||
|
agent_running: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok((app, session_rx))
|
Ok((app, session_rx))
|
||||||
@@ -396,6 +422,8 @@ impl ChatApp {
|
|||||||
("privacy-enable", "Enable a privacy-sensitive tool"),
|
("privacy-enable", "Enable a privacy-sensitive tool"),
|
||||||
("privacy-disable", "Disable a privacy-sensitive tool"),
|
("privacy-disable", "Disable a privacy-sensitive tool"),
|
||||||
("privacy-clear", "Clear stored secure data"),
|
("privacy-clear", "Clear stored secure data"),
|
||||||
|
("agent", "Enable agent mode for autonomous task execution"),
|
||||||
|
("stop-agent", "Stop the running agent"),
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1495,6 +1523,25 @@ impl ChatApp {
|
|||||||
self.command_suggestions.clear();
|
self.command_suggestions.clear();
|
||||||
return Ok(AppState::Running);
|
return Ok(AppState::Running);
|
||||||
}
|
}
|
||||||
|
// "run-agent" command removed to break circular dependency on owlen-cli.
|
||||||
|
"agent" => {
|
||||||
|
if self.agent_running {
|
||||||
|
self.status = "Agent is already running".to_string();
|
||||||
|
} else {
|
||||||
|
self.agent_mode = true;
|
||||||
|
self.status = "Agent mode enabled. Next message will be processed by agent.".to_string();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"stop-agent" => {
|
||||||
|
if self.agent_running {
|
||||||
|
self.agent_running = false;
|
||||||
|
self.agent_mode = false;
|
||||||
|
self.status = "Agent execution stopped".to_string();
|
||||||
|
self.agent_actions = None;
|
||||||
|
} else {
|
||||||
|
self.status = "No agent is currently running".to_string();
|
||||||
|
}
|
||||||
|
}
|
||||||
"n" | "new" => {
|
"n" | "new" => {
|
||||||
self.controller.start_new_conversation(None, None);
|
self.controller.start_new_conversation(None, None);
|
||||||
self.status = "Started new conversation".to_string();
|
self.status = "Started new conversation".to_string();
|
||||||
@@ -2166,6 +2213,28 @@ impl ChatApp {
|
|||||||
});
|
});
|
||||||
self.status = "Consent required - Press Y to allow, N to deny".to_string();
|
self.status = "Consent required - Press Y to allow, N to deny".to_string();
|
||||||
}
|
}
|
||||||
|
SessionEvent::AgentUpdate { content } => {
|
||||||
|
// Update agent actions panel with latest ReAct iteration
|
||||||
|
self.set_agent_actions(content);
|
||||||
|
}
|
||||||
|
SessionEvent::AgentCompleted { answer } => {
|
||||||
|
// Agent finished, add final answer to conversation
|
||||||
|
self.controller
|
||||||
|
.conversation_mut()
|
||||||
|
.push_assistant_message(answer);
|
||||||
|
self.agent_running = false;
|
||||||
|
self.agent_mode = false;
|
||||||
|
self.agent_actions = None;
|
||||||
|
self.status = "Agent completed successfully".to_string();
|
||||||
|
self.stop_loading_animation();
|
||||||
|
}
|
||||||
|
SessionEvent::AgentFailed { error } => {
|
||||||
|
// Agent failed, show error
|
||||||
|
self.error = Some(format!("Agent failed: {}", error));
|
||||||
|
self.agent_running = false;
|
||||||
|
self.agent_actions = None;
|
||||||
|
self.stop_loading_animation();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -2195,20 +2264,41 @@ impl ChatApp {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
match OllamaProvider::from_config(&provider_cfg, Some(&general)) {
|
// Separate handling based on provider type.
|
||||||
Ok(provider) => match provider.list_models().await {
|
if provider_type == "ollama" {
|
||||||
Ok(mut provider_models) => {
|
// Local Ollama – communicate via the MCP LLM server.
|
||||||
for model in &mut provider_models {
|
match RemoteMcpClient::new() {
|
||||||
model.provider = name.clone();
|
Ok(client) => match client.list_models().await {
|
||||||
|
Ok(mut provider_models) => {
|
||||||
|
for model in &mut provider_models {
|
||||||
|
model.provider = name.clone();
|
||||||
|
}
|
||||||
|
models.extend(provider_models);
|
||||||
}
|
}
|
||||||
models.extend(provider_models);
|
Err(err) => errors.push(format!("{}: {}", name, err)),
|
||||||
}
|
},
|
||||||
Err(err) => errors.push(format!("{}: {}", name, err)),
|
Err(err) => errors.push(format!("{}: {}", name, err)),
|
||||||
},
|
}
|
||||||
Err(err) => errors.push(format!("{}: {}", name, err)),
|
} else {
|
||||||
|
// Ollama Cloud – use the direct Ollama provider implementation.
|
||||||
|
use owlen_ollama::OllamaProvider;
|
||||||
|
match OllamaProvider::from_config(&provider_cfg, Some(&general)) {
|
||||||
|
Ok(provider) => match provider.list_models().await {
|
||||||
|
Ok(mut cloud_models) => {
|
||||||
|
for model in &mut cloud_models {
|
||||||
|
model.provider = name.clone();
|
||||||
|
}
|
||||||
|
models.extend(cloud_models);
|
||||||
|
}
|
||||||
|
Err(err) => errors.push(format!("{}: {}", name, err)),
|
||||||
|
},
|
||||||
|
Err(err) => errors.push(format!("{}: {}", name, err)),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sort models alphabetically by name for a predictable UI order
|
||||||
|
models.sort_by(|a, b| a.name.to_lowercase().cmp(&b.name.to_lowercase()));
|
||||||
(models, errors)
|
(models, errors)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2438,7 +2528,17 @@ impl ChatApp {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let general = self.controller.config().general.clone();
|
let general = self.controller.config().general.clone();
|
||||||
let provider = Arc::new(OllamaProvider::from_config(&provider_cfg, Some(&general))?);
|
// Choose the appropriate provider implementation based on its type.
|
||||||
|
let provider: Arc<dyn owlen_core::provider::Provider> =
|
||||||
|
if provider_cfg.provider_type.eq_ignore_ascii_case("ollama") {
|
||||||
|
// Local Ollama via MCP server.
|
||||||
|
Arc::new(RemoteMcpClient::new()?)
|
||||||
|
} else {
|
||||||
|
// Ollama Cloud – instantiate the direct provider.
|
||||||
|
use owlen_ollama::OllamaProvider;
|
||||||
|
let ollama = OllamaProvider::from_config(&provider_cfg, Some(&general))?;
|
||||||
|
Arc::new(ollama)
|
||||||
|
};
|
||||||
|
|
||||||
self.controller.switch_provider(provider).await?;
|
self.controller.switch_provider(provider).await?;
|
||||||
self.current_provider = provider_name.to_string();
|
self.current_provider = provider_name.to_string();
|
||||||
@@ -2546,6 +2646,11 @@ impl ChatApp {
|
|||||||
|
|
||||||
self.pending_llm_request = false;
|
self.pending_llm_request = false;
|
||||||
|
|
||||||
|
// Check if agent mode is enabled
|
||||||
|
if self.agent_mode {
|
||||||
|
return self.process_agent_request().await;
|
||||||
|
}
|
||||||
|
|
||||||
// Step 1: Show loading model status and start animation
|
// Step 1: Show loading model status and start animation
|
||||||
self.status = format!("Loading model '{}'...", self.controller.selected_model());
|
self.status = format!("Loading model '{}'...", self.controller.selected_model());
|
||||||
self.start_loading_animation();
|
self.start_loading_animation();
|
||||||
@@ -2609,6 +2714,77 @@ impl ChatApp {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn process_agent_request(&mut self) -> Result<()> {
|
||||||
|
use owlen_core::agent::{AgentConfig, AgentExecutor};
|
||||||
|
use owlen_core::mcp::remote_client::RemoteMcpClient;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
self.agent_running = true;
|
||||||
|
self.status = "Agent is running...".to_string();
|
||||||
|
self.start_loading_animation();
|
||||||
|
|
||||||
|
// Get the last user message
|
||||||
|
let user_message = self
|
||||||
|
.controller
|
||||||
|
.conversation()
|
||||||
|
.messages
|
||||||
|
.iter()
|
||||||
|
.rev()
|
||||||
|
.find(|m| m.role == owlen_core::types::Role::User)
|
||||||
|
.map(|m| m.content.clone())
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
// Create agent config
|
||||||
|
let config = AgentConfig {
|
||||||
|
max_iterations: 10,
|
||||||
|
model: self.controller.selected_model().to_string(),
|
||||||
|
temperature: Some(0.7),
|
||||||
|
max_tokens: None,
|
||||||
|
max_tool_calls: 20,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Get the provider
|
||||||
|
let provider = self.controller.provider().clone();
|
||||||
|
|
||||||
|
// Create MCP client
|
||||||
|
let mcp_client = match RemoteMcpClient::new() {
|
||||||
|
Ok(client) => Arc::new(client),
|
||||||
|
Err(e) => {
|
||||||
|
self.error = Some(format!("Failed to initialize MCP client: {}", e));
|
||||||
|
self.agent_running = false;
|
||||||
|
self.agent_mode = false;
|
||||||
|
self.stop_loading_animation();
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create agent executor
|
||||||
|
let executor = AgentExecutor::new(provider, mcp_client, config, None);
|
||||||
|
|
||||||
|
// Run agent
|
||||||
|
match executor.run(user_message).await {
|
||||||
|
Ok(answer) => {
|
||||||
|
self.controller
|
||||||
|
.conversation_mut()
|
||||||
|
.push_assistant_message(answer);
|
||||||
|
self.agent_running = false;
|
||||||
|
self.agent_mode = false;
|
||||||
|
self.agent_actions = None;
|
||||||
|
self.status = "Agent completed successfully".to_string();
|
||||||
|
self.stop_loading_animation();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
self.error = Some(format!("Agent failed: {}", e));
|
||||||
|
self.agent_running = false;
|
||||||
|
self.agent_mode = false;
|
||||||
|
self.agent_actions = None;
|
||||||
|
self.stop_loading_animation();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn process_pending_tool_execution(&mut self) -> Result<()> {
|
pub async fn process_pending_tool_execution(&mut self) -> Result<()> {
|
||||||
if self.pending_tool_execution.is_none() {
|
if self.pending_tool_execution.is_none() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
@@ -2782,6 +2958,26 @@ impl ChatApp {
|
|||||||
self.current_thinking.as_ref()
|
self.current_thinking.as_ref()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get a reference to the latest agent actions, if any.
|
||||||
|
pub fn agent_actions(&self) -> Option<&String> {
|
||||||
|
self.agent_actions.as_ref()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the current agent actions content.
|
||||||
|
pub fn set_agent_actions(&mut self, actions: String) {
|
||||||
|
self.agent_actions = Some(actions);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if agent mode is enabled
|
||||||
|
pub fn is_agent_mode(&self) -> bool {
|
||||||
|
self.agent_mode
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if agent is currently running
|
||||||
|
pub fn is_agent_running(&self) -> bool {
|
||||||
|
self.agent_running
|
||||||
|
}
|
||||||
|
|
||||||
pub fn get_rendered_lines(&self) -> Vec<String> {
|
pub fn get_rendered_lines(&self) -> Vec<String> {
|
||||||
match self.focused_panel {
|
match self.focused_panel {
|
||||||
FocusedPanel::Chat => {
|
FocusedPanel::Chat => {
|
||||||
|
|||||||
@@ -51,6 +51,15 @@ pub fn render_chat(frame: &mut Frame<'_>, app: &mut ChatApp) {
|
|||||||
0
|
0
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Calculate agent actions panel height (similar to thinking)
|
||||||
|
let actions_height = if let Some(actions) = app.agent_actions() {
|
||||||
|
let content_width = available_width.saturating_sub(4);
|
||||||
|
let visual_lines = calculate_wrapped_line_count(actions.lines(), content_width);
|
||||||
|
(visual_lines as u16).min(6) + 2 // +2 for borders, max 6 lines
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
|
||||||
let mut constraints = vec![
|
let mut constraints = vec![
|
||||||
Constraint::Length(4), // Header
|
Constraint::Length(4), // Header
|
||||||
Constraint::Min(8), // Messages
|
Constraint::Min(8), // Messages
|
||||||
@@ -59,6 +68,10 @@ pub fn render_chat(frame: &mut Frame<'_>, app: &mut ChatApp) {
|
|||||||
if thinking_height > 0 {
|
if thinking_height > 0 {
|
||||||
constraints.push(Constraint::Length(thinking_height)); // Thinking
|
constraints.push(Constraint::Length(thinking_height)); // Thinking
|
||||||
}
|
}
|
||||||
|
// Insert agent actions panel after thinking (if any)
|
||||||
|
if actions_height > 0 {
|
||||||
|
constraints.push(Constraint::Length(actions_height)); // Agent actions
|
||||||
|
}
|
||||||
|
|
||||||
constraints.push(Constraint::Length(input_height)); // Input
|
constraints.push(Constraint::Length(input_height)); // Input
|
||||||
constraints.push(Constraint::Length(5)); // System/Status output (3 lines content + 2 borders)
|
constraints.push(Constraint::Length(5)); // System/Status output (3 lines content + 2 borders)
|
||||||
@@ -80,6 +93,11 @@ pub fn render_chat(frame: &mut Frame<'_>, app: &mut ChatApp) {
|
|||||||
render_thinking(frame, layout[idx], app);
|
render_thinking(frame, layout[idx], app);
|
||||||
idx += 1;
|
idx += 1;
|
||||||
}
|
}
|
||||||
|
// Render agent actions panel if present
|
||||||
|
if actions_height > 0 {
|
||||||
|
render_agent_actions(frame, layout[idx], app);
|
||||||
|
idx += 1;
|
||||||
|
}
|
||||||
|
|
||||||
render_input(frame, layout[idx], app);
|
render_input(frame, layout[idx], app);
|
||||||
idx += 1;
|
idx += 1;
|
||||||
@@ -898,6 +916,191 @@ fn render_thinking(frame: &mut Frame<'_>, area: Rect, app: &mut ChatApp) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Render a panel displaying the latest ReAct agent actions (thought/action/observation).
|
||||||
|
// Color-coded: THOUGHT (blue), ACTION (yellow), OBSERVATION (green)
|
||||||
|
fn render_agent_actions(frame: &mut Frame<'_>, area: Rect, app: &mut ChatApp) {
|
||||||
|
let theme = app.theme().clone();
|
||||||
|
|
||||||
|
if let Some(actions) = app.agent_actions().cloned() {
|
||||||
|
let viewport_height = area.height.saturating_sub(2) as usize; // subtract borders
|
||||||
|
let content_width = area.width.saturating_sub(4);
|
||||||
|
|
||||||
|
// Parse and color-code ReAct components
|
||||||
|
let mut lines: Vec<Line> = Vec::new();
|
||||||
|
|
||||||
|
for line in actions.lines() {
|
||||||
|
let line_trimmed = line.trim();
|
||||||
|
|
||||||
|
// Detect ReAct components and apply color coding
|
||||||
|
if line_trimmed.starts_with("THOUGHT:") {
|
||||||
|
// Blue for THOUGHT
|
||||||
|
let thought_content = line_trimmed.strip_prefix("THOUGHT:").unwrap_or("").trim();
|
||||||
|
let wrapped = wrap(thought_content, content_width as usize);
|
||||||
|
|
||||||
|
// First line with label
|
||||||
|
if let Some(first) = wrapped.first() {
|
||||||
|
lines.push(Line::from(vec![
|
||||||
|
Span::styled(
|
||||||
|
"THOUGHT: ",
|
||||||
|
Style::default()
|
||||||
|
.fg(Color::Blue)
|
||||||
|
.add_modifier(Modifier::BOLD),
|
||||||
|
),
|
||||||
|
Span::styled(first.to_string(), Style::default().fg(Color::Blue)),
|
||||||
|
]));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Continuation lines
|
||||||
|
for chunk in wrapped.iter().skip(1) {
|
||||||
|
lines.push(Line::from(Span::styled(
|
||||||
|
format!(" {}", chunk),
|
||||||
|
Style::default().fg(Color::Blue),
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
} else if line_trimmed.starts_with("ACTION:") {
|
||||||
|
// Yellow for ACTION
|
||||||
|
let action_content = line_trimmed.strip_prefix("ACTION:").unwrap_or("").trim();
|
||||||
|
lines.push(Line::from(vec![
|
||||||
|
Span::styled(
|
||||||
|
"ACTION: ",
|
||||||
|
Style::default()
|
||||||
|
.fg(Color::Yellow)
|
||||||
|
.add_modifier(Modifier::BOLD),
|
||||||
|
),
|
||||||
|
Span::styled(
|
||||||
|
action_content,
|
||||||
|
Style::default()
|
||||||
|
.fg(Color::Yellow)
|
||||||
|
.add_modifier(Modifier::BOLD),
|
||||||
|
),
|
||||||
|
]));
|
||||||
|
} else if line_trimmed.starts_with("ACTION_INPUT:") {
|
||||||
|
// Cyan for ACTION_INPUT
|
||||||
|
let input_content = line_trimmed
|
||||||
|
.strip_prefix("ACTION_INPUT:")
|
||||||
|
.unwrap_or("")
|
||||||
|
.trim();
|
||||||
|
let wrapped = wrap(input_content, content_width as usize);
|
||||||
|
|
||||||
|
if let Some(first) = wrapped.first() {
|
||||||
|
lines.push(Line::from(vec![
|
||||||
|
Span::styled(
|
||||||
|
"ACTION_INPUT: ",
|
||||||
|
Style::default()
|
||||||
|
.fg(Color::Cyan)
|
||||||
|
.add_modifier(Modifier::BOLD),
|
||||||
|
),
|
||||||
|
Span::styled(first.to_string(), Style::default().fg(Color::Cyan)),
|
||||||
|
]));
|
||||||
|
}
|
||||||
|
|
||||||
|
for chunk in wrapped.iter().skip(1) {
|
||||||
|
lines.push(Line::from(Span::styled(
|
||||||
|
format!(" {}", chunk),
|
||||||
|
Style::default().fg(Color::Cyan),
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
} else if line_trimmed.starts_with("OBSERVATION:") {
|
||||||
|
// Green for OBSERVATION
|
||||||
|
let obs_content = line_trimmed
|
||||||
|
.strip_prefix("OBSERVATION:")
|
||||||
|
.unwrap_or("")
|
||||||
|
.trim();
|
||||||
|
let wrapped = wrap(obs_content, content_width as usize);
|
||||||
|
|
||||||
|
if let Some(first) = wrapped.first() {
|
||||||
|
lines.push(Line::from(vec![
|
||||||
|
Span::styled(
|
||||||
|
"OBSERVATION: ",
|
||||||
|
Style::default()
|
||||||
|
.fg(Color::Green)
|
||||||
|
.add_modifier(Modifier::BOLD),
|
||||||
|
),
|
||||||
|
Span::styled(first.to_string(), Style::default().fg(Color::Green)),
|
||||||
|
]));
|
||||||
|
}
|
||||||
|
|
||||||
|
for chunk in wrapped.iter().skip(1) {
|
||||||
|
lines.push(Line::from(Span::styled(
|
||||||
|
format!(" {}", chunk),
|
||||||
|
Style::default().fg(Color::Green),
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
} else if line_trimmed.starts_with("FINAL_ANSWER:") {
|
||||||
|
// Magenta for FINAL_ANSWER
|
||||||
|
let answer_content = line_trimmed
|
||||||
|
.strip_prefix("FINAL_ANSWER:")
|
||||||
|
.unwrap_or("")
|
||||||
|
.trim();
|
||||||
|
let wrapped = wrap(answer_content, content_width as usize);
|
||||||
|
|
||||||
|
if let Some(first) = wrapped.first() {
|
||||||
|
lines.push(Line::from(vec![
|
||||||
|
Span::styled(
|
||||||
|
"FINAL_ANSWER: ",
|
||||||
|
Style::default()
|
||||||
|
.fg(Color::Magenta)
|
||||||
|
.add_modifier(Modifier::BOLD),
|
||||||
|
),
|
||||||
|
Span::styled(
|
||||||
|
first.to_string(),
|
||||||
|
Style::default()
|
||||||
|
.fg(Color::Magenta)
|
||||||
|
.add_modifier(Modifier::BOLD),
|
||||||
|
),
|
||||||
|
]));
|
||||||
|
}
|
||||||
|
|
||||||
|
for chunk in wrapped.iter().skip(1) {
|
||||||
|
lines.push(Line::from(Span::styled(
|
||||||
|
format!(" {}", chunk),
|
||||||
|
Style::default().fg(Color::Magenta),
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
} else if !line_trimmed.is_empty() {
|
||||||
|
// Regular text
|
||||||
|
let wrapped = wrap(line_trimmed, content_width as usize);
|
||||||
|
for chunk in wrapped {
|
||||||
|
lines.push(Line::from(Span::styled(
|
||||||
|
chunk.into_owned(),
|
||||||
|
Style::default().fg(theme.text),
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Empty line
|
||||||
|
lines.push(Line::from(""));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Highlight border if this panel is focused
|
||||||
|
let border_color = if matches!(app.focused_panel(), FocusedPanel::Thinking) {
|
||||||
|
// Reuse the same focus logic; could add a dedicated enum variant later.
|
||||||
|
theme.focused_panel_border
|
||||||
|
} else {
|
||||||
|
theme.unfocused_panel_border
|
||||||
|
};
|
||||||
|
|
||||||
|
let paragraph = Paragraph::new(lines)
|
||||||
|
.style(Style::default().bg(theme.background))
|
||||||
|
.block(
|
||||||
|
Block::default()
|
||||||
|
.title(Span::styled(
|
||||||
|
" 🤖 Agent Actions ",
|
||||||
|
Style::default()
|
||||||
|
.fg(theme.thinking_panel_title)
|
||||||
|
.add_modifier(Modifier::ITALIC),
|
||||||
|
))
|
||||||
|
.borders(Borders::ALL)
|
||||||
|
.border_style(Style::default().fg(border_color))
|
||||||
|
.style(Style::default().bg(theme.background).fg(theme.text)),
|
||||||
|
)
|
||||||
|
.wrap(Wrap { trim: false });
|
||||||
|
|
||||||
|
frame.render_widget(paragraph, area);
|
||||||
|
_ = viewport_height;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn render_input(frame: &mut Frame<'_>, area: Rect, app: &mut ChatApp) {
|
fn render_input(frame: &mut Frame<'_>, area: Rect, app: &mut ChatApp) {
|
||||||
let theme = app.theme();
|
let theme = app.theme();
|
||||||
let title = match app.mode() {
|
let title = match app.mode() {
|
||||||
@@ -1068,17 +1271,35 @@ fn render_status(frame: &mut Frame<'_>, area: Rect, app: &ChatApp) {
|
|||||||
|
|
||||||
let help_text = "i:Input :m:Model :n:New :c:Clear :h:Help q:Quit";
|
let help_text = "i:Input :m:Model :n:New :c:Clear :h:Help q:Quit";
|
||||||
|
|
||||||
let spans = vec![
|
let mut spans = vec![Span::styled(
|
||||||
Span::styled(
|
format!(" {} ", mode_text),
|
||||||
format!(" {} ", mode_text),
|
Style::default()
|
||||||
|
.fg(theme.background)
|
||||||
|
.bg(mode_bg_color)
|
||||||
|
.add_modifier(Modifier::BOLD),
|
||||||
|
)];
|
||||||
|
|
||||||
|
// Add agent status indicator if agent mode is active
|
||||||
|
if app.is_agent_running() {
|
||||||
|
spans.push(Span::styled(
|
||||||
|
" 🤖 AGENT RUNNING ",
|
||||||
Style::default()
|
Style::default()
|
||||||
.fg(theme.background)
|
.fg(Color::Black)
|
||||||
.bg(mode_bg_color)
|
.bg(Color::Yellow)
|
||||||
.add_modifier(Modifier::BOLD),
|
.add_modifier(Modifier::BOLD),
|
||||||
),
|
));
|
||||||
Span::styled(" ", Style::default().fg(theme.text)),
|
} else if app.is_agent_mode() {
|
||||||
Span::styled(help_text, Style::default().fg(theme.info)),
|
spans.push(Span::styled(
|
||||||
];
|
" 🤖 AGENT MODE ",
|
||||||
|
Style::default()
|
||||||
|
.fg(Color::Black)
|
||||||
|
.bg(Color::Cyan)
|
||||||
|
.add_modifier(Modifier::BOLD),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
spans.push(Span::styled(" ", Style::default().fg(theme.text)));
|
||||||
|
spans.push(Span::styled(help_text, Style::default().fg(theme.info)));
|
||||||
|
|
||||||
let paragraph = Paragraph::new(Line::from(spans))
|
let paragraph = Paragraph::new(Line::from(spans))
|
||||||
.alignment(Alignment::Left)
|
.alignment(Alignment::Left)
|
||||||
|
|||||||
Reference in New Issue
Block a user