Files
owlen/examples/mcp_chat.rs

72 lines
2.2 KiB
Rust

//! Example demonstrating MCP-based chat interaction.
//!
//! This example shows the recommended way to interact with LLMs via the MCP architecture.
//! It uses `RemoteMcpClient` which communicates with the MCP LLM server.
//!
//! Prerequisites:
//! - Build the MCP LLM server: `cargo build --release -p owlen-mcp-llm-server`
//! - Ensure Ollama is running with a model available
use owlen_core::{
Provider,
mcp::remote_client::RemoteMcpClient,
types::{ChatParameters, ChatRequest, Message, Role},
};
use std::sync::Arc;
#[tokio::main]
async fn main() -> Result<(), anyhow::Error> {
println!("🦉 Owlen MCP Chat Example\n");
// Create MCP client - this will spawn/connect to the MCP LLM server
println!("Connecting to MCP LLM server...");
let client = Arc::new(RemoteMcpClient::new()?);
println!("✓ Connected\n");
// List available models
println!("Fetching available models...");
let models = client.list_models().await?;
println!("Available models:");
for model in &models {
println!(" - {} ({})", model.name, model.provider);
}
println!();
// Select first available model or default
let model_name = models
.first()
.map(|m| m.id.clone())
.unwrap_or_else(|| "llama3.2:latest".to_string());
println!("Using model: {}\n", model_name);
// Create a simple chat request
let user_message = "What is the capital of France? Please be concise.";
println!("User: {}", user_message);
let request = ChatRequest {
model: model_name,
messages: vec![Message::new(Role::User, user_message.to_string())],
parameters: ChatParameters {
temperature: Some(0.7),
max_tokens: Some(100),
stream: false,
extra: std::collections::HashMap::new(),
},
tools: None,
};
// Send request and get response
println!("\nAssistant: ");
let response = client.send_prompt(request).await?;
println!("{}", response.message.content);
if let Some(usage) = response.usage {
println!(
"\n📊 Tokens: {} prompt + {} completion = {} total",
usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
);
}
Ok(())
}