From 5f78761bef0c20e1d176b9ea920f0a26578e64e7 Mon Sep 17 00:00:00 2001 From: vikingowl Date: Sun, 28 Sep 2025 17:15:18 +0200 Subject: [PATCH] Enhance loading feedback: add loading animation for Assistant replies, improve TUI message updates, and refine response handling logic. Update README to reflect roadmap progress. --- README.md | 4 +- crates/owlen-cli/src/main.rs | 10 ++++ crates/owlen-core/src/session.rs | 11 ++++ crates/owlen-tui/src/chat_app.rs | 88 ++++++++++++++++++++++++++++---- crates/owlen-tui/src/ui.rs | 37 +++++++++++++- 5 files changed, 135 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 293eb0e..7298513 100644 --- a/README.md +++ b/README.md @@ -96,8 +96,8 @@ client persists its latest selections back to this file on exit. - Configuration and chat history are cached locally; wipe `~/.config/owlen` to reset. ## Roadmap -- [ ] Add autoscroll. -- [ ] Push user message before loading the LLM response. +- [x] Add autoscroll. +- [x] Push user message before loading the LLM response. - [ ] Add support for "thinking" models. - [ ] Add theming options. - [ ] Provide proper configuration UX. diff --git a/crates/owlen-cli/src/main.rs b/crates/owlen-cli/src/main.rs index 4f21a86..28d2d91 100644 --- a/crates/owlen-cli/src/main.rs +++ b/crates/owlen-cli/src/main.rs @@ -92,8 +92,14 @@ async fn run_app( session_rx: &mut mpsc::UnboundedReceiver, ) -> Result<()> { loop { + // Advance loading animation frame + app.advance_loading_animation(); + terminal.draw(|f| ui::render_chat(f, app))?; + // Process any pending LLM requests AFTER UI has been drawn + app.process_pending_llm_request().await?; + tokio::select! { Some(event) = event_rx.recv() => { if let AppState::Quit = app.handle_event(event).await? { @@ -103,6 +109,10 @@ async fn run_app( Some(session_event) = session_rx.recv() => { app.handle_session_event(session_event)?; } + // Add a timeout to keep the animation going even when there are no events + _ = tokio::time::sleep(tokio::time::Duration::from_millis(100)) => { + // This will cause the loop to continue and advance the animation + } } } } diff --git a/crates/owlen-core/src/session.rs b/crates/owlen-core/src/session.rs index c7bdb47..7ad9942 100644 --- a/crates/owlen-core/src/session.rs +++ b/crates/owlen-core/src/session.rs @@ -144,6 +144,17 @@ impl SessionController { self.conversation.push_user_message(content); + self.send_request_with_current_conversation(parameters).await + } + + /// Send a request using the current conversation without adding a new user message + pub async fn send_request_with_current_conversation( + &mut self, + mut parameters: ChatParameters, + ) -> Result { + let streaming = parameters.stream || self.config.general.enable_streaming; + parameters.stream = streaming; + let request = ChatRequest { model: self.conversation.active().model.clone(), messages: self.conversation.active().messages.clone(), diff --git a/crates/owlen-tui/src/chat_app.rs b/crates/owlen-tui/src/chat_app.rs index e5caf2a..75aa2fc 100644 --- a/crates/owlen-tui/src/chat_app.rs +++ b/crates/owlen-tui/src/chat_app.rs @@ -69,6 +69,9 @@ pub struct ChatApp { session_tx: mpsc::UnboundedSender, streaming: HashSet, textarea: TextArea<'static>, // Advanced text input widget + pending_llm_request: bool, // Flag to indicate LLM request needs to be processed + loading_animation_frame: usize, // Frame counter for loading animation + is_loading: bool, // Whether we're currently loading a response } impl ChatApp { @@ -93,6 +96,9 @@ impl ChatApp { session_tx, streaming: std::collections::HashSet::new(), textarea, + pending_llm_request: false, + loading_animation_frame: 0, + is_loading: false, }; (app, session_rx) @@ -286,7 +292,7 @@ impl ChatApp { KeyCode::Enter if key.modifiers.is_empty() => { // Send message and return to normal mode self.sync_textarea_to_buffer(); - self.try_send_message().await?; + self.send_user_message_and_request_response(); // Clear the textarea by setting it to empty self.textarea = TextArea::default(); configure_textarea_defaults(&mut self.textarea); @@ -407,10 +413,12 @@ impl ChatApp { } if response.is_final { self.streaming.remove(&message_id); - self.status = "Response complete".to_string(); + self.stop_loading_animation(); + self.status = "Ready".to_string(); } } SessionEvent::StreamError { message } => { + self.stop_loading_animation(); self.error = Some(message); } } @@ -474,22 +482,43 @@ impl ChatApp { Ok(()) } - async fn try_send_message(&mut self) -> Result<()> { + fn send_user_message_and_request_response(&mut self) { let content = self.controller.input_buffer().text().trim().to_string(); if content.is_empty() { self.error = Some("Cannot send empty message".to_string()); + return; + } + + // Step 1: Add user message to conversation immediately (synchronous) + let message = self.controller.input_buffer_mut().commit_to_history(); + self.controller.conversation_mut().push_user_message(message.clone()); + self.scroll_to_bottom(); + + // Step 2: Set flag to process LLM request on next event loop iteration + self.pending_llm_request = true; + self.status = "Message sent".to_string(); + self.error = None; + } + + pub async fn process_pending_llm_request(&mut self) -> Result<()> { + if !self.pending_llm_request { return Ok(()); } - self.scroll_to_bottom(); + self.pending_llm_request = false; + + // Step 1: Show loading model status and start animation + self.status = format!("Loading model '{}'...", self.controller.selected_model()); + self.start_loading_animation(); - let message = self.controller.input_buffer_mut().commit_to_history(); let mut parameters = ChatParameters::default(); parameters.stream = self.controller.config().general.enable_streaming; - match self.controller.send_message(message, parameters).await { + // Step 2: Start the actual request + match self.controller.send_request_with_current_conversation(parameters).await { Ok(SessionOutcome::Complete(_response)) => { - self.status = "Response received".to_string(); + self.stop_loading_animation(); + self.status = "Ready".to_string(); self.error = None; Ok(()) } @@ -497,17 +526,19 @@ impl ChatApp { response_id, stream, }) => { + // Step 3: Model loaded, now generating response + self.status = "Generating response...".to_string(); + self.spawn_stream(response_id, stream); match self .controller - .mark_stream_placeholder(response_id, "Loading...") + .mark_stream_placeholder(response_id, "▌") { Ok(_) => self.error = None, Err(err) => { - self.error = Some(format!("Could not set loading placeholder: {}", err)); + self.error = Some(format!("Could not set response placeholder: {}", err)); } } - self.status = "Waiting for response...".to_string(); Ok(()) } Err(err) => { @@ -522,13 +553,15 @@ impl ChatApp { self.mode = InputMode::ProviderSelection; } else { self.error = Some(message); - self.status = "Send failed".to_string(); + self.status = "Request failed".to_string(); } + self.stop_loading_animation(); Ok(()) } } } + fn sync_selected_model_index(&mut self) { let current_model_id = self.controller.selected_model().to_string(); let filtered_models: Vec<&ModelInfo> = self @@ -568,6 +601,39 @@ impl ChatApp { self.content_width = content_width; } + pub fn start_loading_animation(&mut self) { + self.is_loading = true; + self.loading_animation_frame = 0; + } + + pub fn stop_loading_animation(&mut self) { + self.is_loading = false; + } + + pub fn advance_loading_animation(&mut self) { + if self.is_loading { + self.loading_animation_frame = (self.loading_animation_frame + 1) % 8; // 8-frame animation + } + } + + pub fn get_loading_indicator(&self) -> &'static str { + if !self.is_loading { + return ""; + } + + match self.loading_animation_frame { + 0 => "⠋", + 1 => "⠙", + 2 => "⠹", + 3 => "⠸", + 4 => "⠼", + 5 => "⠴", + 6 => "⠦", + 7 => "⠧", + _ => "⠋", + } + } + fn is_at_bottom(&self) -> bool { let total_lines = self.calculate_total_content_lines(); let max_scroll = total_lines.saturating_sub(self.viewport_height); diff --git a/crates/owlen-tui/src/ui.rs b/crates/owlen-tui/src/ui.rs index 94a139b..13cd7b9 100644 --- a/crates/owlen-tui/src/ui.rs +++ b/crates/owlen-tui/src/ui.rs @@ -377,10 +377,23 @@ fn render_messages(frame: &mut Frame<'_>, area: Rect, app: &mut ChatApp) { let indent = if show_role_labels { " " } else { "" }; if show_role_labels { - lines.push(Line::from(Span::styled( + let mut role_spans = vec![Span::styled( prefix, role_color(role).add_modifier(Modifier::BOLD), - ))); + )]; + + // Add loading animation for Assistant if currently loading and this is the last message + if matches!(role, Role::Assistant) && + app.get_loading_indicator() != "" && + message_index == conversation.messages.len() - 1 && + is_streaming { + role_spans.push(Span::styled( + format!(" {}", app.get_loading_indicator()), + Style::default().fg(Color::Yellow), + )); + } + + lines.push(Line::from(role_spans)); } for (i, line) in formatted.iter().enumerate() { @@ -397,6 +410,26 @@ fn render_messages(frame: &mut Frame<'_>, area: Rect, app: &mut ChatApp) { } } + // Add loading indicator ONLY if we're loading and there are no messages at all, + // or if the last message is from the user (no Assistant response started yet) + let last_message_is_user = conversation.messages.last() + .map(|msg| matches!(msg.role, Role::User)) + .unwrap_or(true); + + if app.get_loading_indicator() != "" && last_message_is_user { + let loading_spans = vec![ + Span::styled( + "🤖 Assistant:", + Style::default().fg(Color::LightMagenta).add_modifier(Modifier::BOLD), + ), + Span::styled( + format!(" {}", app.get_loading_indicator()), + Style::default().fg(Color::Yellow), + ), + ]; + lines.push(Line::from(loading_spans)); + } + if lines.is_empty() { lines.push(Line::from("No messages yet. Press 'i' to start typing.")); }