feat(agent): implement Agent Orchestrator with LLM tool calling

Add complete agent orchestration system that enables LLM to call tools:

**Core Agent System** (`crates/core/agent`):
- Agent execution loop with tool call/result cycle
- Tool definitions in Ollama-compatible format (6 tools)
- Tool execution with permission checking
- Multi-iteration support with max iteration safety

**Tool Definitions**:
- read: Read file contents
- glob: Find files by pattern
- grep: Search for patterns in files
- write: Write content to files
- edit: Edit files with find/replace
- bash: Execute bash commands

**Ollama Integration Updates**:
- Extended ChatMessage to support tool_calls
- Added Tool, ToolCall, ToolFunction types
- Updated chat_stream to accept tools parameter
- Made tool call fields optional for Ollama compatibility

**CLI Integration**:
- Wired agent loop into all output formats (Text, JSON, StreamJSON)
- Tool calls displayed with 🔧 icon, results with 
- Replaced simple chat with agent orchestrator

**Permission Integration**:
- All tool executions check permissions before running
- Respects plan/acceptEdits/code modes
- Returns clear error messages for denied operations

**Example**:
User: "Find all Cargo.toml files in the workspace"
LLM: Calls glob("**/Cargo.toml")
Agent: Executes and returns 14 files
LLM: Formats human-readable response

This transforms owlen from a passive chatbot into an active agent that
can autonomously use tools to accomplish user goals.

Tested with: qwen3:8b successfully calling glob tool

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-11-01 20:56:56 +01:00
parent f87e5d2796
commit e77e33ce2f
8 changed files with 460 additions and 62 deletions

View File

@@ -11,6 +11,7 @@ tokio = { version = "1.39", features = ["macros", "rt-multi-thread"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
color-eyre = "0.6"
agent-core = { path = "../../core/agent" }
llm-ollama = { path = "../../llm/ollama" }
tools-fs = { path = "../../tools/fs" }
tools-bash = { path = "../../tools/bash" }

View File

@@ -461,50 +461,20 @@ async fn main() -> Result<()> {
stream: true,
};
let msgs = vec![ChatMessage {
role: "user".into(),
content: prompt.clone(),
}];
let start_time = SystemTime::now();
// Handle different output formats
match output_format {
OutputFormat::Text => {
// Text format: stream to stdout as before
let mut stream = client.chat_stream(&msgs, &opts).await?;
while let Some(chunk) = stream.try_next().await? {
if let Some(m) = chunk.message {
if let Some(c) = m.content {
print!("{c}");
io::stdout().flush()?;
}
}
if matches!(chunk.done, Some(true)) {
break;
}
}
println!(); // Newline after response
// Text format: Use agent orchestrator with tool calling
let response = agent_core::run_agent_loop(&client, &prompt, &opts, &perms).await?;
println!("{}", response);
}
OutputFormat::Json => {
// JSON format: collect all chunks, then output final JSON
let mut stream = client.chat_stream(&msgs, &opts).await?;
let mut response = String::new();
while let Some(chunk) = stream.try_next().await? {
if let Some(m) = chunk.message {
if let Some(c) = m.content {
response.push_str(&c);
}
}
if matches!(chunk.done, Some(true)) {
break;
}
}
// JSON format: Use agent loop and output as JSON
let response = agent_core::run_agent_loop(&client, &prompt, &opts, &perms).await?;
let duration_ms = start_time.elapsed().unwrap().as_millis() as u64;
// Rough token estimate (tokens ~= chars / 4)
let estimated_tokens = ((prompt.len() + response.len()) / 4) as u64;
let output = SessionOutput {
@@ -526,7 +496,7 @@ async fn main() -> Result<()> {
println!("{}", serde_json::to_string(&output)?);
}
OutputFormat::StreamJson => {
// Stream-JSON format: emit session_start, chunks, and session_end
// Stream-JSON format: emit session_start, response, and session_end
let session_start = StreamEvent {
event_type: "session_start".to_string(),
session_id: Some(session_id.clone()),
@@ -535,30 +505,17 @@ async fn main() -> Result<()> {
};
println!("{}", serde_json::to_string(&session_start)?);
let mut stream = client.chat_stream(&msgs, &opts).await?;
let mut response = String::new();
let response = agent_core::run_agent_loop(&client, &prompt, &opts, &perms).await?;
while let Some(chunk) = stream.try_next().await? {
if let Some(m) = chunk.message {
if let Some(c) = m.content {
response.push_str(&c);
let chunk_event = StreamEvent {
event_type: "chunk".to_string(),
session_id: None,
content: Some(c),
stats: None,
};
println!("{}", serde_json::to_string(&chunk_event)?);
}
}
if matches!(chunk.done, Some(true)) {
break;
}
}
let chunk_event = StreamEvent {
event_type: "chunk".to_string(),
session_id: None,
content: Some(response.clone()),
stats: None,
};
println!("{}", serde_json::to_string(&chunk_event)?);
let duration_ms = start_time.elapsed().unwrap().as_millis() as u64;
// Rough token estimate
let estimated_tokens = ((prompt.len() + response.len()) / 4) as u64;
let session_end = StreamEvent {