feat(core): Implement basic reasoning loop in AgentManager
This commit is contained in:
@@ -1,7 +1,9 @@
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use crate::state::AppState;
|
||||
use llm_core::LlmProvider;
|
||||
use llm_core::{LlmProvider, ChatMessage, ChatOptions};
|
||||
use color_eyre::eyre::Result;
|
||||
use futures::StreamExt;
|
||||
|
||||
/// Manages the lifecycle and state of the agent
|
||||
pub struct AgentManager {
|
||||
@@ -24,12 +26,43 @@ impl AgentManager {
|
||||
pub fn state(&self) -> &Arc<Mutex<AppState>> {
|
||||
&self.state
|
||||
}
|
||||
|
||||
/// Execute the reasoning loop: User Input -> LLM -> Thought/Action -> Result -> LLM
|
||||
pub async fn step(&self, input: &str) -> Result<String> {
|
||||
// 1. Add user message to history
|
||||
{
|
||||
let mut guard = self.state.lock().await;
|
||||
guard.add_message(ChatMessage::user(input.to_string()));
|
||||
}
|
||||
|
||||
// 2. Prepare context (Sliding Window logic would go here)
|
||||
let messages = {
|
||||
let guard = self.state.lock().await;
|
||||
guard.messages.clone()
|
||||
};
|
||||
|
||||
// 3. Call LLM (Non-streaming for now to simplify step logic, or use stream internally)
|
||||
// For the Reasoning Loop, we need to handle potential tool calls.
|
||||
// This initial implementation just chats.
|
||||
|
||||
let options = ChatOptions::default();
|
||||
let response = self.client.chat(&messages, &options, None).await?;
|
||||
|
||||
// 4. Process response
|
||||
if let Some(content) = response.content {
|
||||
let mut guard = self.state.lock().await;
|
||||
guard.add_message(ChatMessage::assistant(content.clone()));
|
||||
return Ok(content);
|
||||
}
|
||||
|
||||
Ok(String::new())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use llm_core::{LlmProvider, ChatMessage, ChatOptions, Tool, ChunkStream};
|
||||
use llm_core::{LlmProvider, ChatMessage, ChatOptions, Tool, ChunkStream, ChatResponse};
|
||||
use async_trait::async_trait;
|
||||
|
||||
struct MockProvider;
|
||||
@@ -45,14 +78,27 @@ mod tests {
|
||||
) -> Result<ChunkStream, llm_core::LlmError> {
|
||||
unimplemented!()
|
||||
}
|
||||
async fn chat(
|
||||
&self,
|
||||
_messages: &[ChatMessage],
|
||||
_options: &ChatOptions,
|
||||
_tools: Option<&[Tool]>,
|
||||
) -> Result<ChatResponse, llm_core::LlmError> {
|
||||
Ok(ChatResponse {
|
||||
content: Some("Mock Response".to_string()),
|
||||
tool_calls: None,
|
||||
usage: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_agent_manager_creation() {
|
||||
async fn test_reasoning_loop_basic() {
|
||||
let client = Arc::new(MockProvider);
|
||||
let state = Arc::new(Mutex::new(AppState::new()));
|
||||
let manager = AgentManager::new(client.clone(), state.clone());
|
||||
let manager = AgentManager::new(client, state);
|
||||
|
||||
assert_eq!(manager.client().name(), "mock");
|
||||
let response = manager.step("Hello").await.unwrap();
|
||||
assert_eq!(response, "Mock Response");
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user