feat(engine): Implement dynamic provider/model switching

Add shared ProviderManager architecture for runtime provider/model switching
between TUI and Engine:

Core Architecture:
- Add SwitchProvider and SwitchModel messages to UserAction enum
- Create run_engine_loop_dynamic() with shared ProviderManager
- Add ClientSource enum to AgentManager (Fixed vs Dynamic)
- Implement get_client() that resolves provider at call time

TUI Integration:
- Add ProviderMode::Shared variant for shared manager
- Add with_shared_provider_manager() constructor
- Update switch_provider/set_current_model for shared mode
- Fix /model command to update shared ProviderManager (was only
  updating local TUI state, not propagating to engine)
- Fix /provider command to use switch_provider()

Infrastructure:
- Wire main.rs to create shared ProviderManager for both TUI and engine
- Add HTTP status code validation to Ollama client
- Consolidate messages.rs and state.rs into agent-core

Both TUI and Engine now share the same ProviderManager via
Arc<Mutex<>>. Provider/model changes via [1]/[2]/[3] keys, model
picker, or /model command now properly propagate to the engine.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-12-26 22:13:00 +01:00
parent 9bc865b8fa
commit f97bd44f05
11 changed files with 955 additions and 341 deletions

View File

@@ -1,21 +1,48 @@
use std::sync::Arc;
use tokio::sync::{mpsc, Mutex};
use crate::state::{AppState, AppMode};
use agent_core::state::{AppState, AppMode};
use agent_core::{PlanStep, AccumulatedPlanStatus};
use llm_core::{LlmProvider, ChatMessage, ChatOptions};
use color_eyre::eyre::Result;
use crate::messages::{Message, AgentResponse};
use agent_core::messages::{Message, AgentResponse};
use futures::StreamExt;
use ui::ProviderManager;
/// Client source - either a fixed client or dynamic via ProviderManager
enum ClientSource {
/// Fixed client (legacy mode)
Fixed(Arc<dyn LlmProvider>),
/// Dynamic via ProviderManager (supports provider/model switching)
Dynamic(Arc<Mutex<ProviderManager>>),
}
/// Manages the lifecycle and state of the agent
pub struct AgentManager {
client: Arc<dyn LlmProvider>,
client_source: ClientSource,
state: Arc<Mutex<AppState>>,
tx_ui: Option<mpsc::Sender<Message>>,
}
impl AgentManager {
/// Create a new AgentManager
/// Create a new AgentManager with a fixed client (legacy mode)
pub fn new(client: Arc<dyn LlmProvider>, state: Arc<Mutex<AppState>>) -> Self {
Self { client, state, tx_ui: None }
Self {
client_source: ClientSource::Fixed(client),
state,
tx_ui: None,
}
}
/// Create a new AgentManager with a dynamic ProviderManager
pub fn with_provider_manager(
provider_manager: Arc<Mutex<ProviderManager>>,
state: Arc<Mutex<AppState>>,
) -> Self {
Self {
client_source: ClientSource::Dynamic(provider_manager),
state,
tx_ui: None,
}
}
/// Set the UI message sender
@@ -24,12 +51,31 @@ impl AgentManager {
self
}
/// Get a reference to the LLM client
pub fn client(&self) -> &Arc<dyn LlmProvider> {
&self.client
/// Get the current LLM client (resolves dynamic provider if needed)
async fn get_client(&self) -> Result<Arc<dyn LlmProvider>> {
match &self.client_source {
ClientSource::Fixed(client) => Ok(Arc::clone(client)),
ClientSource::Dynamic(manager) => {
let mut guard = manager.lock().await;
guard.get_provider()
.map_err(|e| color_eyre::eyre::eyre!("Failed to get provider: {}", e))
}
}
}
/// Get the current model name
async fn get_model(&self) -> String {
match &self.client_source {
ClientSource::Fixed(client) => client.model().to_string(),
ClientSource::Dynamic(manager) => {
let guard = manager.lock().await;
guard.current_model().to_string()
}
}
}
/// Get a reference to the shared state
#[allow(dead_code)]
pub fn state(&self) -> &Arc<Mutex<AppState>> {
&self.state
}
@@ -79,6 +125,340 @@ impl AgentManager {
Ok("Sub-agent task completed (mock)".to_string())
}
/// Execute approved steps from the accumulated plan
async fn execute_approved_plan_steps(&self) -> Result<()> {
// Take the approval and apply it to the plan
let approval = {
let mut guard = self.state.lock().await;
guard.take_plan_approval()
};
if let Some(approval) = approval {
// Apply approval to plan
{
let mut guard = self.state.lock().await;
if let Some(plan) = guard.current_plan_mut() {
approval.apply_to(plan);
plan.start_execution();
}
}
}
// Get the approved steps
let approved_steps: Vec<_> = {
let guard = self.state.lock().await;
if let Some(plan) = guard.current_plan() {
plan.approved_steps()
.into_iter()
.map(|s| (s.id.clone(), s.tool.clone(), s.args.clone()))
.collect()
} else {
Vec::new()
}
};
let total_steps = approved_steps.len();
let mut executed = 0;
let mut skipped = 0;
// Execute each approved step
for (index, (id, tool_name, arguments)) in approved_steps.into_iter().enumerate() {
// Notify UI of execution progress
if let Some(tx) = &self.tx_ui {
let _ = tx.send(Message::AgentResponse(AgentResponse::PlanExecuting {
step_id: id.clone(),
step_index: index,
total_steps,
})).await;
}
// Execute the tool
let dummy_perms = permissions::PermissionManager::new(permissions::Mode::Code);
let ctx = agent_core::ToolContext::new();
match agent_core::execute_tool(&tool_name, &arguments, &dummy_perms, &ctx).await {
Ok(result) => {
let mut guard = self.state.lock().await;
guard.add_message(ChatMessage::tool_result(&id, result));
executed += 1;
}
Err(e) => {
let mut guard = self.state.lock().await;
guard.add_message(ChatMessage::tool_result(&id, format!("Error: {}", e)));
skipped += 1;
}
}
}
// Mark plan as complete and notify UI
{
let mut guard = self.state.lock().await;
if let Some(plan) = guard.current_plan_mut() {
plan.complete();
}
}
if let Some(tx) = &self.tx_ui {
let _ = tx.send(Message::AgentResponse(AgentResponse::PlanExecutionComplete {
executed,
skipped,
})).await;
}
// Clear the plan
{
let mut guard = self.state.lock().await;
guard.clear_plan();
}
Ok(())
}
/// Execute the full reasoning loop until a final response is reached
pub async fn run(&self, input: &str) -> Result<()> {
let tools = agent_core::get_tool_definitions();
// 1. Add user message to history
{
let mut guard = self.state.lock().await;
guard.add_message(ChatMessage::user(input.to_string()));
}
let max_iterations = 10;
let mut iteration = 0;
loop {
iteration += 1;
if iteration > max_iterations {
if let Some(tx) = &self.tx_ui {
let _ = tx.send(Message::AgentResponse(AgentResponse::Error("Max iterations reached".to_string()))).await;
}
break;
}
// 2. Prepare context
let messages = {
let guard = self.state.lock().await;
guard.messages.clone()
};
// 3. Get current client (supports dynamic provider switching)
let client = match self.get_client().await {
Ok(c) => c,
Err(e) => {
if let Some(tx) = &self.tx_ui {
let _ = tx.send(Message::AgentResponse(AgentResponse::Error(e.to_string()))).await;
}
return Err(e);
}
};
let options = ChatOptions::new(client.model());
// 4. Call LLM with streaming
let stream_result = client.chat_stream(&messages, &options, Some(&tools)).await;
let mut stream = match stream_result {
Ok(s) => s,
Err(e) => {
if let Some(tx) = &self.tx_ui {
let _ = tx.send(Message::AgentResponse(AgentResponse::Error(e.to_string()))).await;
}
return Err(e.into());
}
};
let mut response_content = String::new();
let mut tool_calls_builder = agent_core::ToolCallsBuilder::new();
while let Some(chunk_result) = stream.next().await {
let chunk = match chunk_result {
Ok(c) => c,
Err(e) => {
if let Some(tx) = &self.tx_ui {
let _ = tx.send(Message::AgentResponse(AgentResponse::Error(e.to_string()))).await;
}
return Err(e.into());
}
};
if let Some(content) = &chunk.content {
response_content.push_str(content);
if let Some(tx) = &self.tx_ui {
let _ = tx.send(Message::AgentResponse(AgentResponse::Token(content.clone()))).await;
}
}
if let Some(deltas) = &chunk.tool_calls {
tool_calls_builder.add_deltas(deltas);
}
}
drop(stream);
let tool_calls = tool_calls_builder.build();
// Add assistant message to history
{
let mut guard = self.state.lock().await;
guard.add_message(ChatMessage {
role: llm_core::Role::Assistant,
content: if response_content.is_empty() { None } else { Some(response_content.clone()) },
tool_calls: if tool_calls.is_empty() { None } else { Some(tool_calls.clone()) },
tool_call_id: None,
name: None,
});
}
let mode = {
let guard = self.state.lock().await;
guard.mode
};
// Check if LLM finished (no tool calls)
if tool_calls.is_empty() {
// Check if we have an accumulated plan with steps
let has_plan_steps = {
let guard = self.state.lock().await;
guard.accumulated_plan.as_ref().map(|p| !p.steps.is_empty()).unwrap_or(false)
};
if mode == AppMode::Plan && has_plan_steps {
// In Plan mode WITH steps: finalize and wait for user approval
let (total_steps, status) = {
let mut guard = self.state.lock().await;
if let Some(plan) = guard.current_plan_mut() {
plan.finalize();
(plan.steps.len(), plan.status)
} else {
(0, AccumulatedPlanStatus::Completed)
}
};
if let Some(tx) = &self.tx_ui {
let _ = tx.send(Message::AgentResponse(AgentResponse::PlanComplete {
total_steps,
status,
})).await;
}
// Wait for user approval
let notify = {
let guard = self.state.lock().await;
guard.plan_notify.clone()
};
notify.notified().await;
// Execute approved steps
self.execute_approved_plan_steps().await?;
if let Some(tx) = &self.tx_ui {
let _ = tx.send(Message::AgentResponse(AgentResponse::Complete)).await;
}
} else {
// Normal mode OR Plan mode with no steps: just complete
if let Some(tx) = &self.tx_ui {
let _ = tx.send(Message::AgentResponse(AgentResponse::Complete)).await;
}
}
break;
}
// Handle Plan mode: accumulate steps instead of executing
if mode == AppMode::Plan {
// Ensure we have an active plan
{
let mut guard = self.state.lock().await;
if guard.accumulated_plan.is_none() {
guard.start_plan();
}
if let Some(plan) = guard.current_plan_mut() {
plan.next_turn();
}
}
// Add each tool call as a plan step
for call in &tool_calls {
let step = PlanStep::new(
call.id.clone(),
iteration,
call.function.name.clone(),
call.function.arguments.clone(),
).with_rationale(response_content.clone());
// Add to accumulated plan
{
let mut guard = self.state.lock().await;
if let Some(plan) = guard.current_plan_mut() {
plan.add_step(step.clone());
}
}
// Notify UI of new step
if let Some(tx) = &self.tx_ui {
let _ = tx.send(Message::AgentResponse(AgentResponse::PlanStepAdded(step))).await;
}
}
// Feed mock results back to LLM so it can continue reasoning
for call in &tool_calls {
let mock_result = format!(
"[Plan Mode] Step '{}' recorded for approval. Continue proposing steps or stop to finalize.",
call.function.name
);
let mut guard = self.state.lock().await;
guard.add_message(ChatMessage::tool_result(&call.id, mock_result));
}
// Continue loop - agent will propose more steps or complete
continue;
}
// Normal/AcceptAll mode: Execute tools immediately
for call in tool_calls {
let tool_name = call.function.name.clone();
let arguments = call.function.arguments.clone();
if let Some(tx) = &self.tx_ui {
let _ = tx.send(Message::AgentResponse(AgentResponse::ToolCall {
name: tool_name.clone(),
args: arguments.to_string(),
})).await;
}
// Check permission
let context = match tool_name.as_str() {
"read" | "write" | "edit" => arguments.get("path").and_then(|v| v.as_str()),
"bash" => arguments.get("command").and_then(|v| v.as_str()),
_ => None,
};
let allowed = self.check_permission(&tool_name, context).await?;
if allowed {
// Execute tool
// We need a dummy PermissionManager that always allows because we already checked
let dummy_perms = permissions::PermissionManager::new(permissions::Mode::Code);
let ctx = agent_core::ToolContext::new(); // TODO: Use real context
match agent_core::execute_tool(&tool_name, &arguments, &dummy_perms, &ctx).await {
Ok(result) => {
let mut guard = self.state.lock().await;
guard.add_message(ChatMessage::tool_result(&call.id, result));
}
Err(e) => {
let mut guard = self.state.lock().await;
guard.add_message(ChatMessage::tool_result(&call.id, format!("Error: {}", e)));
}
}
} else {
let mut guard = self.state.lock().await;
guard.add_message(ChatMessage::tool_result(&call.id, "Permission denied by user".to_string()));
}
}
}
Ok(())
}
/// Execute the reasoning loop: User Input -> LLM -> Thought/Action -> Result -> LLM
pub async fn step(&self, input: &str) -> Result<String> {
// 1. Add user message to history
@@ -93,8 +473,10 @@ impl AgentManager {
guard.messages.clone()
};
let options = ChatOptions::default();
let response = self.client.chat(&messages, &options, None).await?;
// 3. Get current client
let client = self.get_client().await?;
let options = ChatOptions::new(client.model());
let response = client.chat(&messages, &options, None).await?;
// 4. Process response
if let Some(content) = response.content {
@@ -112,6 +494,7 @@ mod tests {
use super::*;
use llm_core::{Tool, ChunkStream, ChatResponse};
use async_trait::async_trait;
use agent_core::messages::UserAction;
struct MockProvider;
#[async_trait]
@@ -180,4 +563,4 @@ mod tests {
let allowed = manager.check_permission("bash", Some("ls")).await.unwrap();
assert!(allowed);
}
}
}

View File

@@ -1,12 +1,51 @@
use crate::messages::{Message, UserAction, AgentResponse};
use crate::state::AppState;
use agent_core::messages::{Message, UserAction, AgentResponse};
use agent_core::state::AppState;
use tokio::sync::{mpsc, Mutex};
use std::sync::Arc;
use llm_core::{LlmProvider, ChatMessage, ChatOptions};
use futures::StreamExt;
use llm_core::LlmProvider;
use ui::ProviderManager;
use crate::agent_manager::AgentManager;
/// The main background task that handles logic, API calls, and state updates.
/// Uses a shared ProviderManager for dynamic provider/model switching.
pub async fn run_engine_loop_dynamic(
mut rx: mpsc::Receiver<Message>,
tx_ui: mpsc::Sender<Message>,
provider_manager: Arc<Mutex<ProviderManager>>,
state: Arc<Mutex<AppState>>,
) {
let agent_manager = Arc::new(
AgentManager::with_provider_manager(provider_manager, state.clone())
.with_ui_sender(tx_ui.clone())
);
while let Some(msg) = rx.recv().await {
match msg {
Message::UserAction(UserAction::Input(text)) => {
let agent_manager_clone = agent_manager.clone();
let tx_clone = tx_ui.clone();
tokio::spawn(async move {
if let Err(e) = agent_manager_clone.run(&text).await {
let _ = tx_clone.send(Message::AgentResponse(AgentResponse::Error(e.to_string()))).await;
}
});
}
Message::UserAction(UserAction::PermissionResult(res)) => {
let mut guard = state.lock().await;
guard.set_permission_result(res);
}
Message::UserAction(UserAction::Exit) => {
let mut guard = state.lock().await;
guard.running = false;
break;
}
_ => {}
}
}
}
/// Legacy engine loop with fixed client (for backward compatibility)
pub async fn run_engine_loop(
mut rx: mpsc::Receiver<Message>,
tx_ui: mpsc::Sender<Message>,
@@ -18,48 +57,12 @@ pub async fn run_engine_loop(
while let Some(msg) = rx.recv().await {
match msg {
Message::UserAction(UserAction::Input(text)) => {
let tx_ui_clone = tx_ui.clone();
let agent_manager_clone = agent_manager.clone();
// Spawn a task for the agent interaction so the engine loop can
// continue receiving messages (like PermissionResult).
let tx_clone = tx_ui.clone();
tokio::spawn(async move {
let messages = {
let mut guard = agent_manager_clone.state().lock().await;
guard.add_message(ChatMessage::user(text.clone()));
guard.messages.clone()
};
let options = ChatOptions::default();
match agent_manager_clone.client().chat_stream(&messages, &options, None).await {
Ok(mut stream) => {
let mut full_response = String::new();
while let Some(result) = stream.next().await {
match result {
Ok(chunk) => {
if let Some(content) = chunk.content {
full_response.push_str(&content);
let _ = tx_ui_clone.send(Message::AgentResponse(AgentResponse::Token(content))).await;
}
}
Err(e) => {
let _ = tx_ui_clone.send(Message::AgentResponse(AgentResponse::Error(e.to_string()))).await;
}
}
}
{
let mut guard = agent_manager_clone.state().lock().await;
guard.add_message(ChatMessage::assistant(full_response));
}
let _ = tx_ui_clone.send(Message::AgentResponse(AgentResponse::Complete)).await;
}
Err(e) => {
let _ = tx_ui_clone.send(Message::AgentResponse(AgentResponse::Error(e.to_string()))).await;
}
if let Err(e) = agent_manager_clone.run(&text).await {
let _ = tx_clone.send(Message::AgentResponse(AgentResponse::Error(e.to_string()))).await;
}
});
}
@@ -80,7 +83,7 @@ pub async fn run_engine_loop(
#[cfg(test)]
mod tests {
use super::*;
use llm_core::{LlmError, Tool, ChunkStream, StreamChunk};
use llm_core::{LlmError, Tool, ChunkStream, StreamChunk, ChatMessage, ChatOptions};
use async_trait::async_trait;
use futures::stream;
@@ -137,7 +140,7 @@ mod tests {
async fn test_engine_permission_result() {
let (tx_in, rx_in) = mpsc::channel(1);
let (tx_out, _rx_out) = mpsc::channel(10);
let client = Arc::new(MockProvider);
let state = Arc::new(Mutex::new(AppState::new()));
let state_clone = state.clone();
@@ -156,4 +159,51 @@ mod tests {
let guard = state.lock().await;
assert_eq!(guard.last_permission_result, Some(true));
}
#[tokio::test]
async fn test_engine_plan_mode() {
use agent_core::state::AppMode;
let (tx_in, rx_in) = mpsc::channel(1);
let (tx_out, mut rx_out) = mpsc::channel(10);
let client = Arc::new(MockProvider);
let state = Arc::new(Mutex::new(AppState::new()));
// Set Plan mode
{
let mut guard = state.lock().await;
guard.mode = AppMode::Plan;
}
let state_clone = state.clone();
// Spawn the engine loop
tokio::spawn(async move {
run_engine_loop(rx_in, tx_out, client, state_clone).await;
});
// Send a message
tx_in.send(Message::UserAction(UserAction::Input("Hi".to_string()))).await.unwrap();
// Verify we get responses (tokens and complete)
let mut received_tokens = false;
let mut received_complete = false;
let timeout = tokio::time::timeout(tokio::time::Duration::from_secs(2), async {
while let Some(msg) = rx_out.recv().await {
match msg {
Message::AgentResponse(AgentResponse::Token(_)) => received_tokens = true,
Message::AgentResponse(AgentResponse::Complete) => {
received_complete = true;
break;
}
_ => {}
}
}
}).await;
assert!(timeout.is_ok(), "Should receive responses within timeout");
assert!(received_tokens, "Should receive tokens");
assert!(received_complete, "Should receive complete signal");
}
}

View File

@@ -146,7 +146,9 @@ fn create_provider(
let use_cloud = model.ends_with("-cloud") && api_key.is_some();
let client = if use_cloud {
OllamaClient::with_cloud().with_api_key(api_key.unwrap())
OllamaClient::with_cloud()
.with_api_key(api_key.unwrap())
.with_model(&model)
} else {
let base_url = ollama_url_override
.map(|s| s.to_string())
@@ -155,7 +157,7 @@ fn create_provider(
if let Some(key) = api_key {
client = client.with_api_key(key);
}
client
client.with_model(&model)
};
Ok((Arc::new(client) as Arc<dyn LlmProvider>, model))
@@ -764,16 +766,18 @@ async fn main() -> Result<()> {
let (tx_engine, rx_engine) = tokio::sync::mpsc::channel::<Message>(100);
let (tx_ui, rx_ui) = tokio::sync::mpsc::channel::<Message>(100);
// Create shared state
// Create shared state with mode from settings
let state = Arc::new(tokio::sync::Mutex::new(AppState::new()));
// Spawn the Engine Loop
let client_clone = client.clone();
let state_clone = state.clone();
let tx_ui_engine = tx_ui.clone();
tokio::spawn(async move {
engine::run_engine_loop(rx_engine, tx_ui_engine, client_clone, state_clone).await;
});
{
let mut guard = state.lock().await;
// Map settings mode string to AppMode
guard.mode = match settings.mode.as_str() {
"plan" => AppMode::Plan,
"acceptEdits" => AppMode::Normal, // AcceptEdits still needs some permission checks
"code" => AppMode::AcceptAll,
_ => AppMode::Normal,
};
}
// Check if interactive mode (no prompt provided)
if args.prompt.is_empty() {
@@ -786,10 +790,31 @@ async fn main() -> Result<()> {
);
let _token_refresher = auth_manager.clone().start_background_refresh();
// Launch TUI with multi-provider support
return ui::run_with_providers(auth_manager, perms, settings, tx_engine, state, rx_ui).await;
// Create shared ProviderManager for both TUI and engine
let provider_manager = Arc::new(tokio::sync::Mutex::new(
ui::ProviderManager::new(auth_manager.clone(), settings.clone())
));
// Spawn the Engine Loop with dynamic provider support
let pm_clone = provider_manager.clone();
let state_clone = state.clone();
let tx_ui_engine = tx_ui.clone();
tokio::spawn(async move {
engine::run_engine_loop_dynamic(rx_engine, tx_ui_engine, pm_clone, state_clone).await;
});
// Launch TUI with shared provider manager
return ui::run_with_providers(provider_manager, perms, settings, tx_engine, state, rx_ui).await;
}
// Legacy headless mode - use fixed client engine
let client_clone = client.clone();
let state_clone = state.clone();
let tx_ui_engine = tx_ui.clone();
tokio::spawn(async move {
engine::run_engine_loop(rx_engine, tx_ui_engine, client_clone, state_clone).await;
});
// Legacy text-based REPL
println!("🤖 Owlen Interactive Mode");
println!("Model: {}", opts.model);

View File

@@ -1,51 +0,0 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum Message {
UserAction(UserAction),
AgentResponse(AgentResponse),
System(SystemNotification),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum UserAction {
Input(String),
Command(String),
PermissionResult(bool),
Exit,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum AgentResponse {
Token(String),
ToolCall { name: String, args: String },
PermissionRequest { tool: String, context: Option<String> },
Complete,
Error(String),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum SystemNotification {
StateUpdate(String),
Warning(String),
}
#[cfg(test)]
mod tests {
use super::*;
use tokio::sync::mpsc;
#[tokio::test]
async fn test_message_channel() {
let (tx, mut rx) = mpsc::channel(32);
let msg = Message::UserAction(UserAction::Input("Hello".to_string()));
tx.send(msg).await.unwrap();
let received = rx.recv().await.unwrap();
match received {
Message::UserAction(UserAction::Input(s)) => assert_eq!(s, "Hello"),
_ => panic!("Wrong message type"),
}
}
}

View File

@@ -1,67 +0,0 @@
use std::sync::Arc;
use tokio::sync::{Mutex, Notify};
use llm_core::ChatMessage;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum AppMode {
Normal,
Plan,
AcceptAll,
}
impl Default for AppMode {
fn default() -> Self {
Self::Normal
}
}
/// Shared application state
#[derive(Debug, Default)]
pub struct AppState {
pub messages: Vec<ChatMessage>,
pub running: bool,
pub mode: AppMode,
pub last_permission_result: Option<bool>,
pub permission_notify: Arc<Notify>,
}
impl AppState {
pub fn new() -> Self {
Self {
messages: Vec::new(),
running: true,
mode: AppMode::Normal,
last_permission_result: None,
permission_notify: Arc::new(Notify::new()),
}
}
pub fn add_message(&mut self, message: ChatMessage) {
self.messages.push(message);
}
pub fn set_permission_result(&mut self, result: bool) {
self.last_permission_result = Some(result);
self.permission_notify.notify_one();
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_app_state_sharing() {
let state = Arc::new(Mutex::new(AppState::new()));
let state_clone = state.clone();
tokio::spawn(async move {
let mut guard = state_clone.lock().await;
guard.add_message(ChatMessage::user("Test"));
}).await.unwrap();
let guard = state.lock().await;
assert_eq!(guard.messages.len(), 1);
}
}

View File

@@ -44,8 +44,10 @@ struct PendingToolCall {
enum ProviderMode {
/// Legacy single-provider mode
Single(Arc<dyn LlmProvider>),
/// Multi-provider with switching support
/// Multi-provider with switching support (owned)
Multi(ProviderManager),
/// Multi-provider with shared manager (for engine integration)
Shared(Arc<tokio::sync::Mutex<ProviderManager>>),
}
use agent_core::messages::{Message, UserAction, AgentResponse};
@@ -173,10 +175,17 @@ impl TuiApp {
let opts = ChatOptions::new(&current_model);
// Check if we're in plan mode from settings
let is_plan_mode = settings.mode == "plan";
let mut status_bar = StatusBar::new(current_model, mode, theme.clone());
if is_plan_mode {
status_bar.set_planning_mode(true);
}
Ok(Self {
chat_panel: ChatPanel::new(theme.clone()),
input_box: InputBox::new(theme.clone()),
status_bar: StatusBar::new(current_model, mode, theme.clone()),
status_bar,
todo_panel: TodoPanel::new(theme.clone()),
permission_popup: None,
autocomplete: Autocomplete::new(theme.clone()),
@@ -204,6 +213,67 @@ impl TuiApp {
})
}
/// Create a new TUI app with a shared ProviderManager (for engine integration)
pub fn with_shared_provider_manager(
provider_manager: Arc<tokio::sync::Mutex<ProviderManager>>,
perms: PermissionManager,
settings: config_agent::Settings,
) -> Result<Self> {
let theme = Theme::default();
let mode = perms.mode();
// Get initial provider and model (need to block to access shared manager)
let (current_provider, current_model) = {
let guard = futures::executor::block_on(provider_manager.lock());
(guard.current_provider_type(), guard.current_model().to_string())
};
let provider = match current_provider {
ProviderType::Anthropic => Provider::Claude,
ProviderType::OpenAI => Provider::OpenAI,
ProviderType::Ollama => Provider::Ollama,
};
let opts = ChatOptions::new(&current_model);
// Check if we're in plan mode from settings
let is_plan_mode = settings.mode == "plan";
let mut status_bar = StatusBar::new(current_model, mode, theme.clone());
if is_plan_mode {
status_bar.set_planning_mode(true);
}
Ok(Self {
chat_panel: ChatPanel::new(theme.clone()),
input_box: InputBox::new(theme.clone()),
status_bar,
todo_panel: TodoPanel::new(theme.clone()),
permission_popup: None,
autocomplete: Autocomplete::new(theme.clone()),
command_help: CommandHelp::new(theme.clone()),
provider_tabs: ProviderTabs::with_provider(provider, theme.clone()),
model_picker: ModelPicker::new(theme.clone()),
theme,
stats: SessionStats::new(),
history: SessionHistory::new(),
checkpoint_mgr: CheckpointManager::new(PathBuf::from(".owlen/checkpoints")),
todo_list: TodoList::new(),
provider_mode: ProviderMode::Shared(provider_manager),
opts,
perms,
ctx: ToolContext::new(),
settings,
engine_tx: None,
shared_state: None,
engine_rx: None,
running: true,
waiting_for_llm: false,
pending_tool: None,
permission_tx: None,
vim_mode: VimMode::Insert,
})
}
/// Get the current LLM provider client
fn get_client(&mut self) -> Result<Arc<dyn LlmProvider>> {
match &mut self.provider_mode {
@@ -211,45 +281,70 @@ impl TuiApp {
ProviderMode::Multi(manager) => manager
.get_provider()
.map_err(|e| color_eyre::eyre::eyre!("{}", e)),
ProviderMode::Shared(manager) => {
let mut guard = futures::executor::block_on(manager.lock());
guard.get_provider()
.map_err(|e| color_eyre::eyre::eyre!("{}", e))
}
}
}
/// Switch to a different provider (only works in multi-provider mode)
/// Switch to a different provider (works in multi-provider and shared modes)
fn switch_provider(&mut self, provider_type: ProviderType) -> Result<()> {
if let ProviderMode::Multi(manager) = &mut self.provider_mode {
match manager.switch_provider(provider_type) {
Ok(_) => {
// Update UI state
let provider = match provider_type {
ProviderType::Anthropic => Provider::Claude,
ProviderType::OpenAI => Provider::OpenAI,
ProviderType::Ollama => Provider::Ollama,
};
self.provider_tabs.set_active(provider);
// Helper to update UI after successful switch
let update_ui = |s: &mut Self, model: String| {
let provider = match provider_type {
ProviderType::Anthropic => Provider::Claude,
ProviderType::OpenAI => Provider::OpenAI,
ProviderType::Ollama => Provider::Ollama,
};
s.provider_tabs.set_active(provider);
s.opts.model = model.clone();
s.status_bar = StatusBar::new(model.clone(), s.perms.mode(), s.theme.clone());
s.chat_panel.add_message(ChatMessage::System(
format!("Switched to {} (model: {})", provider_type, model)
));
};
// Update model and status bar
let model = manager.current_model().to_string();
self.opts.model = model.clone();
self.status_bar = StatusBar::new(model.clone(), self.perms.mode(), self.theme.clone());
self.chat_panel.add_message(ChatMessage::System(
format!("Switched to {} (model: {})", provider_type, model)
));
Ok(())
}
Err(e) => {
self.chat_panel.add_message(ChatMessage::System(
format!("Failed to switch provider: {}", e)
));
Err(color_eyre::eyre::eyre!("{}", e))
match &mut self.provider_mode {
ProviderMode::Multi(manager) => {
match manager.switch_provider(provider_type) {
Ok(_) => {
let model = manager.current_model().to_string();
update_ui(self, model);
Ok(())
}
Err(e) => {
self.chat_panel.add_message(ChatMessage::System(
format!("Failed to switch provider: {}", e)
));
Err(color_eyre::eyre::eyre!("{}", e))
}
}
}
} else {
self.chat_panel.add_message(ChatMessage::System(
"Provider switching requires multi-provider mode. Restart with 'owlen' to enable.".to_string()
));
Ok(())
ProviderMode::Shared(manager) => {
let mut guard = futures::executor::block_on(manager.lock());
match guard.switch_provider(provider_type) {
Ok(_) => {
let model = guard.current_model().to_string();
drop(guard); // Release lock before updating UI
update_ui(self, model);
Ok(())
}
Err(e) => {
self.chat_panel.add_message(ChatMessage::System(
format!("Failed to switch provider: {}", e)
));
Err(color_eyre::eyre::eyre!("{}", e))
}
}
}
ProviderMode::Single(_) => {
self.chat_panel.add_message(ChatMessage::System(
"Provider switching requires multi-provider mode. Restart with 'owlen' to enable.".to_string()
));
Ok(())
}
}
}
@@ -267,43 +362,81 @@ impl TuiApp {
/// Open the model picker for the current provider
async fn open_model_picker(&mut self) {
if let ProviderMode::Multi(manager) = &self.provider_mode {
let provider_type = manager.current_provider_type();
let current_model = manager.current_model().to_string();
match &self.provider_mode {
ProviderMode::Multi(manager) => {
let provider_type = manager.current_provider_type();
let current_model = manager.current_model().to_string();
// Show loading state immediately
self.model_picker.show_loading(provider_type);
// Show loading state immediately
self.model_picker.show_loading(provider_type);
// Fetch models from provider
match manager.list_models_for_provider(provider_type).await {
Ok(models) => {
if models.is_empty() {
self.model_picker.show_error("No models available".to_string());
} else {
self.model_picker.show(models, &provider_type.to_string(), &current_model);
// Fetch models from provider
match manager.list_models_for_provider(provider_type).await {
Ok(models) => {
if models.is_empty() {
self.model_picker.show_error("No models available".to_string());
} else {
self.model_picker.show(models, &provider_type.to_string(), &current_model);
}
}
Err(e) => {
self.model_picker.show_error(e.to_string());
}
}
Err(e) => {
// Show error state with option to use fallback models
self.model_picker.show_error(e.to_string());
}
ProviderMode::Shared(manager) => {
let guard = manager.lock().await;
let provider_type = guard.current_provider_type();
let current_model = guard.current_model().to_string();
// Show loading state immediately
self.model_picker.show_loading(provider_type);
// Fetch models from provider
match guard.list_models_for_provider(provider_type).await {
Ok(models) => {
drop(guard); // Release lock before updating UI
if models.is_empty() {
self.model_picker.show_error("No models available".to_string());
} else {
self.model_picker.show(models, &provider_type.to_string(), &current_model);
}
}
Err(e) => {
self.model_picker.show_error(e.to_string());
}
}
}
} else {
self.chat_panel.add_message(ChatMessage::System(
"Model picker requires multi-provider mode. Use [1][2][3] to switch providers first.".to_string()
));
ProviderMode::Single(_) => {
self.chat_panel.add_message(ChatMessage::System(
"Model picker requires multi-provider mode. Use [1][2][3] to switch providers first.".to_string()
));
}
}
}
/// Set the model for the current provider
fn set_current_model(&mut self, model: String) {
if let ProviderMode::Multi(manager) = &mut self.provider_mode {
manager.set_current_model(model.clone());
self.opts.model = model.clone();
self.status_bar = StatusBar::new(model.clone(), self.perms.mode(), self.theme.clone());
self.chat_panel.add_message(ChatMessage::System(
format!("Model changed to: {}", model)
));
match &mut self.provider_mode {
ProviderMode::Multi(manager) => {
manager.set_current_model(model.clone());
self.opts.model = model.clone();
self.status_bar = StatusBar::new(model.clone(), self.perms.mode(), self.theme.clone());
self.chat_panel.add_message(ChatMessage::System(
format!("Model changed to: {}", model)
));
}
ProviderMode::Shared(manager) => {
let mut guard = futures::executor::block_on(manager.lock());
guard.set_current_model(model.clone());
drop(guard);
self.opts.model = model.clone();
self.status_bar = StatusBar::new(model.clone(), self.perms.mode(), self.theme.clone());
self.chat_panel.add_message(ChatMessage::System(
format!("Model changed to: {}", model)
));
}
ProviderMode::Single(_) => {}
}
}
@@ -724,6 +857,7 @@ Commands: /help, /model <name>, /clear, /theme <name>
// Streaming finished
self.waiting_for_llm = false;
self.chat_panel.set_streaming(false);
self.status_bar.set_state(crate::components::AppState::Idle);
self.history.add_assistant_message(response.clone());
// Update stats (rough estimate)
let tokens = response.len() / 4;
@@ -733,6 +867,7 @@ Commands: /help, /model <name>, /clear, /theme <name>
// Streaming error
self.waiting_for_llm = false;
self.chat_panel.set_streaming(false);
self.status_bar.set_state(crate::components::AppState::Idle);
self.chat_panel.add_message(ChatMessage::System(
format!("Error: {}", error)
));
@@ -778,11 +913,12 @@ Commands: /help, /model <name>, /clear, /theme <name>
AgentResponse::Complete => {
self.waiting_for_llm = false;
self.chat_panel.set_streaming(false);
// TODO: Get full response from state or accumulate here
self.status_bar.set_state(crate::components::AppState::Idle);
}
AgentResponse::Error(e) => {
self.waiting_for_llm = false;
self.chat_panel.set_streaming(false);
self.status_bar.set_state(crate::components::AppState::Idle);
self.chat_panel.add_message(ChatMessage::System(format!("Error: {}", e)));
}
AgentResponse::PermissionRequest { tool, context } => {
@@ -790,9 +926,52 @@ Commands: /help, /model <name>, /clear, /theme <name>
self.status_bar.set_state(crate::components::AppState::WaitingPermission);
self.status_bar.set_pending_permission(Some(tool));
}
AgentResponse::PlanStaging(staging) => {
self.chat_panel.add_message(ChatMessage::System("--- PENDING PLAN ---".to_string()));
for tc in &staging {
self.chat_panel.add_message(ChatMessage::ToolCall { name: tc.name.clone(), args: tc.args.clone() });
}
self.chat_panel.add_message(ChatMessage::System("Approve plan in status bar? (y/n)".to_string()));
self.status_bar.set_state(crate::components::AppState::WaitingPermission);
self.status_bar.set_pending_permission(Some("PLAN".to_string()));
}
AgentResponse::ToolCall { name, args } => {
self.chat_panel.add_message(ChatMessage::ToolCall { name, args });
}
// Plan mode responses
AgentResponse::PlanStepAdded(step) => {
let msg = format!(
"[PLAN] Step {}: {} ({})",
step.turn,
step.tool,
if step.args.is_object() {
step.args.to_string().chars().take(50).collect::<String>()
} else {
step.args.to_string()
}
);
self.chat_panel.add_message(ChatMessage::System(msg));
}
AgentResponse::PlanComplete { total_steps, status: _ } => {
self.chat_panel.add_message(ChatMessage::System(
format!("--- PLAN COMPLETE ({} steps) ---", total_steps)
));
self.chat_panel.add_message(ChatMessage::System(
"Review and approve plan (y/n in status bar)".to_string()
));
self.status_bar.set_state(crate::components::AppState::WaitingPermission);
self.status_bar.set_pending_permission(Some("PLAN".to_string()));
}
AgentResponse::PlanExecuting { step_id: _, step_index, total_steps } => {
self.chat_panel.add_message(ChatMessage::System(
format!("Executing step {}/{}", step_index + 1, total_steps)
));
}
AgentResponse::PlanExecutionComplete { executed, skipped } => {
self.chat_panel.add_message(ChatMessage::System(
format!("Plan execution complete: {} executed, {} skipped", executed, skipped)
));
}
}
}
Message::System(sys) => {
@@ -803,6 +982,17 @@ Commands: /help, /model <name>, /clear, /theme <name>
agent_core::messages::SystemNotification::StateUpdate(_s) => {
// Handle state updates if needed
}
agent_core::messages::SystemNotification::PlanSaved { id, path } => {
self.chat_panel.add_message(ChatMessage::System(
format!("Plan saved: {} -> {}", id, path)
));
}
agent_core::messages::SystemNotification::PlanLoaded { id, name, steps } => {
let name_str = name.unwrap_or_else(|| "(unnamed)".to_string());
self.chat_panel.add_message(ChatMessage::System(
format!("Plan loaded: {} '{}' ({} steps)", id, name_str, steps)
));
}
}
}
_ => {}
@@ -988,17 +1178,6 @@ Commands: /help, /model <name>, /clear, /theme <name>
return Ok(());
}
// Get the current provider client
let client = match self.get_client() {
Ok(c) => c,
Err(e) => {
self.chat_panel.add_message(ChatMessage::System(
format!("Failed to get provider: {}", e)
));
return Ok(());
}
};
// Add user message to chat IMMEDIATELY so it shows before AI response
self.chat_panel
.add_message(ChatMessage::User(message.clone()));
@@ -1007,13 +1186,28 @@ Commands: /help, /model <name>, /clear, /theme <name>
// Start streaming indicator
self.waiting_for_llm = true;
self.chat_panel.set_streaming(true);
self.status_bar.set_state(crate::components::AppState::Streaming);
let _ = event_tx.send(AppEvent::StreamStart);
// Send to engine
// Check if we have an engine connection - use it for proper agent loop
if let Some(tx) = &self.engine_tx {
let _ = tx.send(Message::UserAction(UserAction::Input(message.clone()))).await;
} else {
// Fallback to legacy background stream if no engine
// Only get client when needed for legacy path
let client = match self.get_client() {
Ok(c) => c,
Err(e) => {
self.waiting_for_llm = false;
self.chat_panel.set_streaming(false);
self.status_bar.set_state(crate::components::AppState::Idle);
self.chat_panel.add_message(ChatMessage::System(
format!("Failed to get provider: {}", e)
));
return Ok(());
}
};
// Spawn streaming in background task
let opts = self.opts.clone();
let tx = event_tx.clone();
@@ -1432,17 +1626,22 @@ Commands: /help, /model <name>, /clear, /theme <name>
}
cmd if cmd.starts_with("/provider ") => {
let provider_name = cmd.strip_prefix("/provider ").unwrap().trim();
match provider_name {
"ollama" | "anthropic" | "openai" => {
let provider_type = match provider_name {
"ollama" => Some(ProviderType::Ollama),
"anthropic" | "claude" => Some(ProviderType::Anthropic),
"openai" | "gpt" => Some(ProviderType::OpenAI),
_ => None,
};
if let Some(pt) = provider_type {
if let Err(e) = self.switch_provider(pt) {
self.chat_panel.add_message(ChatMessage::System(format!(
"Provider switching requires restart. Set OWLEN_PROVIDER={}", provider_name
)));
}
_ => {
self.chat_panel.add_message(ChatMessage::System(format!(
"Unknown provider: {}. Available: ollama, anthropic, openai", provider_name
"Failed to switch provider: {}", e
)));
}
} else {
self.chat_panel.add_message(ChatMessage::System(format!(
"Unknown provider: {}. Available: ollama, anthropic, openai", provider_name
)));
}
}
"/model" => {
@@ -1461,15 +1660,8 @@ Commands: /help, /model <name>, /clear, /theme <name>
"Current model: {}", self.opts.model
)));
} else {
self.opts.model = model_name.to_string();
self.status_bar = StatusBar::new(
self.opts.model.clone(),
self.perms.mode(),
self.theme.clone(),
);
self.chat_panel.add_message(ChatMessage::System(format!(
"Model switched to: {}", model_name
)));
// Use set_current_model to update both TUI and shared ProviderManager
self.set_current_model(model_name.to_string());
}
}
"/themes" => {

View File

@@ -34,16 +34,16 @@ pub async fn run(
}
/// Run the TUI application with multi-provider support and engine integration
/// Uses a shared ProviderManager for dynamic provider/model switching
pub async fn run_with_providers(
auth_manager: Arc<AuthManager>,
provider_manager: Arc<tokio::sync::Mutex<ProviderManager>>,
perms: permissions::PermissionManager,
settings: config_agent::Settings,
engine_tx: tokio::sync::mpsc::Sender<agent_core::messages::Message>,
shared_state: Arc<tokio::sync::Mutex<agent_core::state::AppState>>,
engine_rx: tokio::sync::mpsc::Receiver<agent_core::messages::Message>,
) -> Result<()> {
let provider_manager = ProviderManager::new(auth_manager, settings.clone());
let mut app = TuiApp::with_provider_manager(provider_manager, perms, settings)?;
let mut app = TuiApp::with_shared_provider_manager(provider_manager, perms, settings)?;
app.set_engine(engine_tx, shared_state, engine_rx);
app.run().await
}

View File

@@ -24,7 +24,7 @@ use tools_todo::TodoList;
pub use session::{
SessionStats, SessionHistory, ToolCallRecord,
Checkpoint, CheckpointManager, FileDiff,
Checkpoint, CheckpointManager, FileDiff, ToolCallsBuilder,
};
pub use system_prompt::{
SystemPromptBuilder, default_base_prompt, generate_tool_instructions,
@@ -36,7 +36,14 @@ pub use git::{
};
// Re-export planning mode types
pub use tools_plan::{AgentMode, PlanManager, PlanStatus};
pub use tools_plan::{
// Agent mode (document-based planning)
AgentMode, PlanManager, PlanStatus, PlanMetadata,
// Plan execution types (tool call accumulation)
PlanStep, AccumulatedPlan, AccumulatedPlanStatus, PlanApproval,
// Helper functions
enter_plan_mode, exit_plan_mode, is_tool_allowed_in_plan_mode,
};
// Re-export compaction types
pub use compact::{Compactor, TokenCounter};
@@ -423,62 +430,6 @@ pub fn get_tool_definitions() -> Vec<Tool> {
]
}
/// Helper to accumulate streaming tool call deltas
#[derive(Default)]
struct ToolCallsBuilder {
calls: Vec<PartialToolCall>,
}
#[derive(Default)]
struct PartialToolCall {
id: Option<String>,
name: Option<String>,
arguments: String,
}
impl ToolCallsBuilder {
fn new() -> Self {
Self::default()
}
fn add_deltas(&mut self, deltas: &[llm_core::ToolCallDelta]) {
for delta in deltas {
while self.calls.len() <= delta.index {
self.calls.push(PartialToolCall::default());
}
let call = &mut self.calls[delta.index];
if let Some(id) = &delta.id {
call.id = Some(id.clone());
}
if let Some(name) = &delta.function_name {
call.name = Some(name.clone());
}
if let Some(args) = &delta.arguments_delta {
call.arguments.push_str(args);
}
}
}
fn build(self) -> Vec<llm_core::ToolCall> {
self.calls
.into_iter()
.filter_map(|p| {
let id = p.id?;
let name = p.name?;
let args: Value = serde_json::from_str(&p.arguments).ok()?;
Some(llm_core::ToolCall {
id,
call_type: "function".to_string(),
function: llm_core::FunctionCall {
name,
arguments: args,
},
})
})
.collect()
}
}
/// Executes a single tool call and returns its result as a string.
///
/// This function handles permission checking and interacts with various tool-specific

View File

@@ -1,4 +1,5 @@
use serde::{Deserialize, Serialize};
use tools_plan::{PlanStep, AccumulatedPlanStatus, PlanApproval};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum Message {
@@ -9,25 +10,83 @@ pub enum Message {
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum UserAction {
/// User typed a message
Input(String),
/// User issued a command
Command(String),
/// User responded to a permission request
PermissionResult(bool),
/// Exit the application
Exit,
// Plan mode actions
/// Stop accumulating steps, enter review mode
FinalizePlan,
/// User's approval/rejection of plan steps
PlanApproval(PlanApproval),
/// Save the current plan with a name
SavePlan(String),
/// Load a saved plan by ID
LoadPlan(String),
/// Cancel the current plan
CancelPlan,
// Provider switching
/// Switch to a different provider (ollama, anthropic, openai)
SwitchProvider(String),
/// Switch to a different model for the current provider
SwitchModel(String),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum AgentResponse {
/// Streaming text token from LLM
Token(String),
/// Tool call being executed
ToolCall { name: String, args: String },
/// Permission needed for a tool
PermissionRequest { tool: String, context: Option<String> },
/// Legacy: batch staging (deprecated, use PlanStepAdded)
PlanStaging(Vec<ToolCallStaging>),
/// Agent done responding
Complete,
/// Error occurred
Error(String),
// Plan mode responses
/// A new step was added to the accumulated plan
PlanStepAdded(PlanStep),
/// Agent finished proposing steps (in plan mode)
PlanComplete {
total_steps: usize,
status: AccumulatedPlanStatus,
},
/// A step is currently being executed
PlanExecuting {
step_id: String,
step_index: usize,
total_steps: usize,
},
/// Plan execution finished
PlanExecutionComplete {
executed: usize,
skipped: usize,
},
}
/// Legacy tool call staging (for backward compatibility)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ToolCallStaging {
pub id: String,
pub name: String,
pub args: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum SystemNotification {
StateUpdate(String),
StateUpdate(String),
Warning(String),
/// Plan was saved successfully
PlanSaved { id: String, path: String },
/// Plan was loaded successfully
PlanLoaded { id: String, name: Option<String>, steps: usize },
}
#[cfg(test)]

View File

@@ -2,6 +2,7 @@ use std::sync::Arc;
use tokio::sync::{Mutex, Notify};
use llm_core::ChatMessage;
use serde::{Deserialize, Serialize};
use tools_plan::{AccumulatedPlan, PlanApproval};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum AppMode {
@@ -17,13 +18,32 @@ impl Default for AppMode {
}
/// Shared application state
#[derive(Debug, Default)]
#[derive(Debug)]
pub struct AppState {
/// Conversation history
pub messages: Vec<ChatMessage>,
/// Whether the application is running
pub running: bool,
/// Current permission mode
pub mode: AppMode,
/// Result of last permission request
pub last_permission_result: Option<bool>,
/// Notify channel for permission responses
pub permission_notify: Arc<Notify>,
/// Legacy: pending actions (deprecated)
pub pending_actions: Vec<String>,
/// Current accumulated plan (for Plan mode)
pub accumulated_plan: Option<AccumulatedPlan>,
/// Pending plan approval from user
pub plan_approval: Option<PlanApproval>,
/// Notify channel for plan approval responses
pub plan_notify: Arc<Notify>,
}
impl Default for AppState {
fn default() -> Self {
Self::new()
}
}
impl AppState {
@@ -34,6 +54,10 @@ impl AppState {
mode: AppMode::Normal,
last_permission_result: None,
permission_notify: Arc::new(Notify::new()),
pending_actions: Vec::new(),
accumulated_plan: None,
plan_approval: None,
plan_notify: Arc::new(Notify::new()),
}
}
@@ -45,6 +69,43 @@ impl AppState {
self.last_permission_result = Some(result);
self.permission_notify.notify_one();
}
/// Start a new accumulated plan
pub fn start_plan(&mut self) {
self.accumulated_plan = Some(AccumulatedPlan::new());
}
/// Start a new accumulated plan with a name
pub fn start_plan_with_name(&mut self, name: String) {
self.accumulated_plan = Some(AccumulatedPlan::with_name(name));
}
/// Get the current plan (if any)
pub fn current_plan(&self) -> Option<&AccumulatedPlan> {
self.accumulated_plan.as_ref()
}
/// Get the current plan mutably (if any)
pub fn current_plan_mut(&mut self) -> Option<&mut AccumulatedPlan> {
self.accumulated_plan.as_mut()
}
/// Clear the current plan
pub fn clear_plan(&mut self) {
self.accumulated_plan = None;
self.plan_approval = None;
}
/// Set plan approval and notify waiting tasks
pub fn set_plan_approval(&mut self, approval: PlanApproval) {
self.plan_approval = Some(approval);
self.plan_notify.notify_one();
}
/// Take the plan approval (consuming it)
pub fn take_plan_approval(&mut self) -> Option<PlanApproval> {
self.plan_approval.take()
}
}
#[cfg(test)]

View File

@@ -194,6 +194,17 @@ impl LlmProvider for OllamaClient {
let resp = req.send().await
.map_err(|e| LlmError::Http(e.to_string()))?;
// Check response status
let status = resp.status();
if !status.is_success() {
let error_body = resp.text().await.unwrap_or_else(|_| "unknown error".to_string());
return Err(LlmError::Api {
message: format!("Ollama API error: {} - {}", status, error_body),
code: Some(status.as_str().to_string()),
});
}
let bytes_stream = resp.bytes_stream();
// NDJSON parser with buffering for partial lines across chunks