fix(agent): improve ReAct parser and tool schemas for better LLM compatibility
- Fix ACTION_INPUT regex to properly capture multiline JSON responses - Changed from stopping at first newline to capturing all remaining text - Resolves parsing errors when LLM generates formatted JSON with line breaks - Enhance tool schemas with detailed descriptions and parameter specifications - Add comprehensive Message schema for generate_text tool - Clarify distinction between resources/get (file read) and resources/list (directory listing) - Include clear usage guidance in tool descriptions - Set default model to llama3.2:latest instead of invalid "ollama" - Add parse error debugging to help troubleshoot LLM response issues The agent infrastructure now correctly handles multiline tool arguments and provides better guidance to LLMs through improved tool schemas. Remaining errors are due to LLM quality (model making poor tool choices or generating malformed responses), not infrastructure bugs. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
54
crates/owlen-cli/src/agent_main.rs
Normal file
54
crates/owlen-cli/src/agent_main.rs
Normal file
@@ -0,0 +1,54 @@
|
||||
//! Simple entry point for the ReAct agentic executor.
|
||||
//!
|
||||
//! Usage: `owlen-agent "<prompt>" [--model <model>] [--max-iter <n>]`
|
||||
//!
|
||||
//! This binary demonstrates Phase 4 without the full TUI. It creates an
|
||||
//! OllamaProvider, a RemoteMcpClient, runs the AgentExecutor and prints the
|
||||
//! final answer.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use clap::Parser;
|
||||
use owlen_cli::agent::{AgentConfig, AgentExecutor};
|
||||
use owlen_core::mcp::remote_client::RemoteMcpClient;
|
||||
use owlen_ollama::OllamaProvider;
|
||||
|
||||
/// Command‑line arguments for the agent binary.
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(name = "owlen-agent", author, version, about = "Run the ReAct agent")]
|
||||
struct Args {
|
||||
/// The initial user query.
|
||||
prompt: String,
|
||||
/// Model to use (defaults to Ollama default).
|
||||
#[arg(long)]
|
||||
model: Option<String>,
|
||||
/// Maximum ReAct iterations.
|
||||
#[arg(long, default_value_t = 10)]
|
||||
max_iter: usize,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let args = Args::parse();
|
||||
|
||||
// Initialise the LLM provider (Ollama) – uses default local URL.
|
||||
let provider = Arc::new(OllamaProvider::new("http://localhost:11434")?);
|
||||
// Initialise the MCP client (remote LLM server) – this client also knows how
|
||||
// to call the built‑in resource tools.
|
||||
let mcp_client = Arc::new(RemoteMcpClient::new()?);
|
||||
|
||||
let config = AgentConfig {
|
||||
max_iterations: args.max_iter,
|
||||
model: args.model.unwrap_or_else(|| "llama3.2:latest".to_string()),
|
||||
..AgentConfig::default()
|
||||
};
|
||||
|
||||
let executor = AgentExecutor::new(provider, mcp_client, config, None);
|
||||
match executor.run(args.prompt).await {
|
||||
Ok(answer) => {
|
||||
println!("\nFinal answer:\n{}", answer);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => Err(anyhow::anyhow!(e)),
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user