This commit fixes 12 categories of errors across the codebase: - Fix owlen-mcp-llm-server build target conflict by renaming lib.rs to main.rs - Resolve ambiguous glob re-exports in owlen-core by using explicit exports - Add Default derive to MockMcpClient and MockProvider test utilities - Remove unused imports from owlen-core test files - Fix needless borrows in test file arguments - Improve Config initialization style in mode_tool_filter tests - Make AgentExecutor::parse_response public for testing - Remove non-existent max_tool_calls field from AgentConfig usage - Fix AgentExecutor::new calls to use correct 3-argument signature - Fix AgentResult field access in agent tests - Use Debug formatting instead of Display for AgentResult - Remove unnecessary default() calls on unit structs All changes ensure the project compiles cleanly with: - cargo check --all-targets ✓ - cargo clippy --all-targets -- -D warnings ✓ - cargo test --no-run ✓ 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
267 lines
8.8 KiB
Rust
267 lines
8.8 KiB
Rust
//! Integration tests for the ReAct agent loop functionality.
|
|
//!
|
|
//! These tests verify that the agent executor correctly:
|
|
//! - Parses ReAct formatted responses
|
|
//! - Executes tool calls
|
|
//! - Handles multi-step workflows
|
|
//! - Recovers from errors
|
|
//! - Respects iteration limits
|
|
|
|
use owlen_cli::agent::{AgentConfig, AgentExecutor, LlmResponse};
|
|
use owlen_core::mcp::remote_client::RemoteMcpClient;
|
|
use std::sync::Arc;
|
|
|
|
#[tokio::test]
|
|
async fn test_react_parsing_tool_call() {
|
|
let executor = create_test_executor();
|
|
|
|
// Test parsing a tool call with JSON arguments
|
|
let text = "THOUGHT: I should search for information\nACTION: web_search\nACTION_INPUT: {\"query\": \"rust async programming\"}\n";
|
|
|
|
let result = executor.parse_response(text);
|
|
|
|
match result {
|
|
Ok(LlmResponse::ToolCall {
|
|
thought,
|
|
tool_name,
|
|
arguments,
|
|
}) => {
|
|
assert_eq!(thought, "I should search for information");
|
|
assert_eq!(tool_name, "web_search");
|
|
assert_eq!(arguments["query"], "rust async programming");
|
|
}
|
|
other => panic!("Expected ToolCall, got: {:?}", other),
|
|
}
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_react_parsing_final_answer() {
|
|
let executor = create_test_executor();
|
|
|
|
let text = "THOUGHT: I have enough information now\nACTION: final_answer\nACTION_INPUT: The answer is 42\n";
|
|
|
|
let result = executor.parse_response(text);
|
|
|
|
match result {
|
|
Ok(LlmResponse::FinalAnswer { thought, answer }) => {
|
|
assert_eq!(thought, "I have enough information now");
|
|
assert_eq!(answer, "The answer is 42");
|
|
}
|
|
other => panic!("Expected FinalAnswer, got: {:?}", other),
|
|
}
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_react_parsing_with_multiline_thought() {
|
|
let executor = create_test_executor();
|
|
|
|
let text = "THOUGHT: This is a complex\nmulti-line thought\nACTION: list_files\nACTION_INPUT: {\"path\": \".\"}\n";
|
|
|
|
let result = executor.parse_response(text);
|
|
|
|
// The regex currently only captures until first newline
|
|
// This test documents current behavior
|
|
match result {
|
|
Ok(LlmResponse::ToolCall { thought, .. }) => {
|
|
// Regex pattern stops at first \n after THOUGHT:
|
|
assert!(thought.contains("This is a complex"));
|
|
}
|
|
other => panic!("Expected ToolCall, got: {:?}", other),
|
|
}
|
|
}
|
|
|
|
#[tokio::test]
|
|
#[ignore] // Requires MCP LLM server to be running
|
|
async fn test_agent_single_tool_scenario() {
|
|
// This test requires a running MCP LLM server (which wraps Ollama)
|
|
let provider = Arc::new(RemoteMcpClient::new().unwrap());
|
|
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
|
|
|
|
let config = AgentConfig {
|
|
max_iterations: 5,
|
|
model: "llama3.2".to_string(),
|
|
temperature: Some(0.7),
|
|
max_tokens: None,
|
|
};
|
|
|
|
let executor = AgentExecutor::new(provider, mcp_client, config);
|
|
|
|
// Simple query that should complete in one tool call
|
|
let result = executor
|
|
.run("List files in the current directory".to_string())
|
|
.await;
|
|
|
|
match result {
|
|
Ok(agent_result) => {
|
|
assert!(
|
|
!agent_result.answer.is_empty(),
|
|
"Answer should not be empty"
|
|
);
|
|
println!("Agent answer: {}", agent_result.answer);
|
|
}
|
|
Err(e) => {
|
|
// It's okay if this fails due to LLM not following format
|
|
println!("Agent test skipped: {}", e);
|
|
}
|
|
}
|
|
}
|
|
|
|
#[tokio::test]
|
|
#[ignore] // Requires Ollama to be running
|
|
async fn test_agent_multi_step_workflow() {
|
|
// Test a query that requires multiple tool calls
|
|
let provider = Arc::new(RemoteMcpClient::new().unwrap());
|
|
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
|
|
|
|
let config = AgentConfig {
|
|
max_iterations: 10,
|
|
model: "llama3.2".to_string(),
|
|
temperature: Some(0.5), // Lower temperature for more consistent behavior
|
|
max_tokens: None,
|
|
};
|
|
|
|
let executor = AgentExecutor::new(provider, mcp_client, config);
|
|
|
|
// Query requiring multiple steps: list -> read -> analyze
|
|
let result = executor
|
|
.run("Find all Rust files and tell me which one contains 'Agent'".to_string())
|
|
.await;
|
|
|
|
match result {
|
|
Ok(agent_result) => {
|
|
assert!(!agent_result.answer.is_empty());
|
|
println!("Multi-step answer: {:?}", agent_result);
|
|
}
|
|
Err(e) => {
|
|
println!("Multi-step test skipped: {}", e);
|
|
}
|
|
}
|
|
}
|
|
|
|
#[tokio::test]
|
|
#[ignore] // Requires Ollama
|
|
async fn test_agent_iteration_limit() {
|
|
let provider = Arc::new(RemoteMcpClient::new().unwrap());
|
|
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
|
|
|
|
let config = AgentConfig {
|
|
max_iterations: 2, // Very low limit to test enforcement
|
|
model: "llama3.2".to_string(),
|
|
temperature: Some(0.7),
|
|
max_tokens: None,
|
|
};
|
|
|
|
let executor = AgentExecutor::new(provider, mcp_client, config);
|
|
|
|
// Complex query that would require many iterations
|
|
let result = executor
|
|
.run("Perform an exhaustive analysis of all files".to_string())
|
|
.await;
|
|
|
|
// Should hit the iteration limit (or parse error if LLM doesn't follow format)
|
|
match result {
|
|
Err(e) => {
|
|
let error_str = format!("{}", e);
|
|
// Accept either iteration limit error or parse error (LLM didn't follow ReAct format)
|
|
assert!(
|
|
error_str.contains("Maximum iterations")
|
|
|| error_str.contains("2")
|
|
|| error_str.contains("parse"),
|
|
"Expected iteration limit or parse error, got: {}",
|
|
error_str
|
|
);
|
|
println!("Test passed: agent stopped with error: {}", error_str);
|
|
}
|
|
Ok(_) => {
|
|
// It's possible the LLM completed within 2 iterations
|
|
println!("Agent completed within iteration limit");
|
|
}
|
|
}
|
|
}
|
|
|
|
#[tokio::test]
|
|
#[ignore] // Requires Ollama
|
|
async fn test_agent_tool_budget_enforcement() {
|
|
let provider = Arc::new(RemoteMcpClient::new().unwrap());
|
|
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
|
|
|
|
let config = AgentConfig {
|
|
max_iterations: 3, // Very low iteration limit to enforce budget
|
|
model: "llama3.2".to_string(),
|
|
temperature: Some(0.7),
|
|
max_tokens: None,
|
|
};
|
|
|
|
let executor = AgentExecutor::new(provider, mcp_client, config);
|
|
|
|
// Query that would require many tool calls
|
|
let result = executor
|
|
.run("Read every file in the project and summarize them all".to_string())
|
|
.await;
|
|
|
|
// Should hit the tool call budget (or parse error if LLM doesn't follow format)
|
|
match result {
|
|
Err(e) => {
|
|
let error_str = format!("{}", e);
|
|
// Accept either budget error or parse error (LLM didn't follow ReAct format)
|
|
assert!(
|
|
error_str.contains("Maximum iterations")
|
|
|| error_str.contains("budget")
|
|
|| error_str.contains("parse"),
|
|
"Expected budget or parse error, got: {}",
|
|
error_str
|
|
);
|
|
println!("Test passed: agent stopped with error: {}", error_str);
|
|
}
|
|
Ok(_) => {
|
|
println!("Agent completed within tool budget");
|
|
}
|
|
}
|
|
}
|
|
|
|
// Helper function to create a test executor
|
|
// For parsing tests, we don't need a real connection
|
|
fn create_test_executor() -> AgentExecutor {
|
|
// For parsing tests, we can accept the error from RemoteMcpClient::new()
|
|
// since we're only testing parse_response which doesn't use the MCP client
|
|
let provider = match RemoteMcpClient::new() {
|
|
Ok(client) => Arc::new(client),
|
|
Err(_) => {
|
|
// If MCP server binary doesn't exist, parsing tests can still run
|
|
// by using a dummy client that will never be called
|
|
// This is a workaround for unit tests that only need parse_response
|
|
panic!("MCP server binary not found - build the project first with: cargo build --all");
|
|
}
|
|
};
|
|
|
|
let mcp_client = Arc::clone(&provider) as Arc<RemoteMcpClient>;
|
|
|
|
let config = AgentConfig::default();
|
|
AgentExecutor::new(provider, mcp_client, config)
|
|
}
|
|
|
|
#[test]
|
|
fn test_agent_config_defaults() {
|
|
let config = AgentConfig::default();
|
|
|
|
assert_eq!(config.max_iterations, 10);
|
|
assert_eq!(config.model, "ollama");
|
|
assert_eq!(config.temperature, Some(0.7));
|
|
// max_tool_calls field removed - agent now tracks iterations instead
|
|
}
|
|
|
|
#[test]
|
|
fn test_agent_config_custom() {
|
|
let config = AgentConfig {
|
|
max_iterations: 15,
|
|
model: "custom-model".to_string(),
|
|
temperature: Some(0.5),
|
|
max_tokens: Some(2000),
|
|
};
|
|
|
|
assert_eq!(config.max_iterations, 15);
|
|
assert_eq!(config.model, "custom-model");
|
|
assert_eq!(config.temperature, Some(0.5));
|
|
assert_eq!(config.max_tokens, Some(2000));
|
|
}
|