Files
owlen/crates/core/agent/examples/streaming_agent.rs
vikingowl 10c8e2baae feat(v2): complete multi-LLM providers, TUI redesign, and advanced agent features
Multi-LLM Provider Support:
- Add llm-core crate with LlmProvider trait abstraction
- Implement Anthropic Claude API client with streaming
- Implement OpenAI API client with streaming
- Add token counting with SimpleTokenCounter and ClaudeTokenCounter
- Add retry logic with exponential backoff and jitter

Borderless TUI Redesign:
- Rewrite theme system with terminal capability detection (Full/Unicode256/Basic)
- Add provider tabs component with keybind switching [1]/[2]/[3]
- Implement vim-modal input (Normal/Insert/Visual/Command modes)
- Redesign chat panel with timestamps and streaming indicators
- Add multi-provider status bar with cost tracking
- Add Nerd Font icons with graceful ASCII fallbacks
- Add syntax highlighting (syntect) and markdown rendering (pulldown-cmark)

Advanced Agent Features:
- Add system prompt builder with configurable components
- Enhance subagent orchestration with parallel execution
- Add git integration module for safe command detection
- Add streaming tool results via channels
- Expand tool set: AskUserQuestion, TodoWrite, LS, MultiEdit, BashOutput, KillShell
- Add WebSearch with provider abstraction

Plugin System Enhancement:
- Add full agent definition parsing from YAML frontmatter
- Add skill system with progressive disclosure
- Wire plugin hooks into HookManager

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-02 17:24:14 +01:00

93 lines
3.8 KiB
Rust

/// Example demonstrating the streaming agent loop API
///
/// This example shows how to use `run_agent_loop_streaming` to receive
/// real-time events during agent execution, including:
/// - Text deltas as the LLM generates text
/// - Tool execution start/end events
/// - Tool output events
/// - Final completion events
///
/// Run with: cargo run --example streaming_agent -p agent-core
use agent_core::{create_event_channel, run_agent_loop_streaming, AgentEvent, ToolContext};
use llm_core::ChatOptions;
use permissions::{Mode, PermissionManager};
#[tokio::main]
async fn main() -> color_eyre::Result<()> {
color_eyre::install()?;
// Note: This is a minimal example. In a real application, you would:
// 1. Initialize a real LLM provider (e.g., OllamaClient)
// 2. Configure the ChatOptions with your preferred model
// 3. Set up appropriate permissions and tool context
println!("=== Streaming Agent Example ===\n");
println!("This example demonstrates how to use the streaming agent loop API.");
println!("To run with a real LLM provider, modify this example to:");
println!(" 1. Create an LLM provider instance");
println!(" 2. Set up permissions and tool context");
println!(" 3. Call run_agent_loop_streaming with your prompt\n");
// Example code structure:
println!("Example code:");
println!("```rust");
println!("// Create LLM provider");
println!("let provider = OllamaClient::new(\"http://localhost:11434\");");
println!();
println!("// Set up permissions and context");
println!("let perms = PermissionManager::new(Mode::Plan);");
println!("let ctx = ToolContext::default();");
println!();
println!("// Create event channel");
println!("let (tx, mut rx) = create_event_channel();");
println!();
println!("// Spawn agent loop");
println!("let handle = tokio::spawn(async move {{");
println!(" run_agent_loop_streaming(");
println!(" &provider,");
println!(" \"Your prompt here\",");
println!(" &ChatOptions::default(),");
println!(" &perms,");
println!(" &ctx,");
println!(" tx,");
println!(" ).await");
println!("}});");
println!();
println!("// Process events");
println!("while let Some(event) = rx.recv().await {{");
println!(" match event {{");
println!(" AgentEvent::TextDelta(text) => {{");
println!(" print!(\"{{text}}\");");
println!(" }}");
println!(" AgentEvent::ToolStart {{ tool_name, .. }} => {{");
println!(" println!(\"\\n[Executing tool: {{tool_name}}]\");");
println!(" }}");
println!(" AgentEvent::ToolOutput {{ content, is_error, .. }} => {{");
println!(" if is_error {{");
println!(" eprintln!(\"Error: {{content}}\");");
println!(" }} else {{");
println!(" println!(\"Output: {{content}}\");");
println!(" }}");
println!(" }}");
println!(" AgentEvent::ToolEnd {{ success, .. }} => {{");
println!(" println!(\"[Tool finished: {{}}]\", if success {{ \"success\" }} else {{ \"failed\" }});");
println!(" }}");
println!(" AgentEvent::Done {{ final_response }} => {{");
println!(" println!(\"\\n\\nFinal response: {{final_response}}\");");
println!(" break;");
println!(" }}");
println!(" AgentEvent::Error(e) => {{");
println!(" eprintln!(\"Error: {{e}}\");");
println!(" break;");
println!(" }}");
println!(" }}");
println!("}}");
println!();
println!("// Wait for completion");
println!("let result = handle.await??;");
println!("```");
Ok(())
}