Enhance loading feedback: add loading animation for Assistant replies, improve TUI message updates, and refine response handling logic. Update README to reflect roadmap progress.

This commit is contained in:
2025-09-28 17:15:18 +02:00
parent fcdbd2bf98
commit 5f78761bef
5 changed files with 135 additions and 15 deletions

View File

@@ -96,8 +96,8 @@ client persists its latest selections back to this file on exit.
- Configuration and chat history are cached locally; wipe `~/.config/owlen` to reset. - Configuration and chat history are cached locally; wipe `~/.config/owlen` to reset.
## Roadmap ## Roadmap
- [ ] Add autoscroll. - [x] Add autoscroll.
- [ ] Push user message before loading the LLM response. - [x] Push user message before loading the LLM response.
- [ ] Add support for "thinking" models. - [ ] Add support for "thinking" models.
- [ ] Add theming options. - [ ] Add theming options.
- [ ] Provide proper configuration UX. - [ ] Provide proper configuration UX.

View File

@@ -92,8 +92,14 @@ async fn run_app(
session_rx: &mut mpsc::UnboundedReceiver<SessionEvent>, session_rx: &mut mpsc::UnboundedReceiver<SessionEvent>,
) -> Result<()> { ) -> Result<()> {
loop { loop {
// Advance loading animation frame
app.advance_loading_animation();
terminal.draw(|f| ui::render_chat(f, app))?; terminal.draw(|f| ui::render_chat(f, app))?;
// Process any pending LLM requests AFTER UI has been drawn
app.process_pending_llm_request().await?;
tokio::select! { tokio::select! {
Some(event) = event_rx.recv() => { Some(event) = event_rx.recv() => {
if let AppState::Quit = app.handle_event(event).await? { if let AppState::Quit = app.handle_event(event).await? {
@@ -103,6 +109,10 @@ async fn run_app(
Some(session_event) = session_rx.recv() => { Some(session_event) = session_rx.recv() => {
app.handle_session_event(session_event)?; app.handle_session_event(session_event)?;
} }
// Add a timeout to keep the animation going even when there are no events
_ = tokio::time::sleep(tokio::time::Duration::from_millis(100)) => {
// This will cause the loop to continue and advance the animation
}
} }
} }
} }

View File

@@ -144,6 +144,17 @@ impl SessionController {
self.conversation.push_user_message(content); self.conversation.push_user_message(content);
self.send_request_with_current_conversation(parameters).await
}
/// Send a request using the current conversation without adding a new user message
pub async fn send_request_with_current_conversation(
&mut self,
mut parameters: ChatParameters,
) -> Result<SessionOutcome> {
let streaming = parameters.stream || self.config.general.enable_streaming;
parameters.stream = streaming;
let request = ChatRequest { let request = ChatRequest {
model: self.conversation.active().model.clone(), model: self.conversation.active().model.clone(),
messages: self.conversation.active().messages.clone(), messages: self.conversation.active().messages.clone(),

View File

@@ -69,6 +69,9 @@ pub struct ChatApp {
session_tx: mpsc::UnboundedSender<SessionEvent>, session_tx: mpsc::UnboundedSender<SessionEvent>,
streaming: HashSet<Uuid>, streaming: HashSet<Uuid>,
textarea: TextArea<'static>, // Advanced text input widget textarea: TextArea<'static>, // Advanced text input widget
pending_llm_request: bool, // Flag to indicate LLM request needs to be processed
loading_animation_frame: usize, // Frame counter for loading animation
is_loading: bool, // Whether we're currently loading a response
} }
impl ChatApp { impl ChatApp {
@@ -93,6 +96,9 @@ impl ChatApp {
session_tx, session_tx,
streaming: std::collections::HashSet::new(), streaming: std::collections::HashSet::new(),
textarea, textarea,
pending_llm_request: false,
loading_animation_frame: 0,
is_loading: false,
}; };
(app, session_rx) (app, session_rx)
@@ -286,7 +292,7 @@ impl ChatApp {
KeyCode::Enter if key.modifiers.is_empty() => { KeyCode::Enter if key.modifiers.is_empty() => {
// Send message and return to normal mode // Send message and return to normal mode
self.sync_textarea_to_buffer(); self.sync_textarea_to_buffer();
self.try_send_message().await?; self.send_user_message_and_request_response();
// Clear the textarea by setting it to empty // Clear the textarea by setting it to empty
self.textarea = TextArea::default(); self.textarea = TextArea::default();
configure_textarea_defaults(&mut self.textarea); configure_textarea_defaults(&mut self.textarea);
@@ -407,10 +413,12 @@ impl ChatApp {
} }
if response.is_final { if response.is_final {
self.streaming.remove(&message_id); self.streaming.remove(&message_id);
self.status = "Response complete".to_string(); self.stop_loading_animation();
self.status = "Ready".to_string();
} }
} }
SessionEvent::StreamError { message } => { SessionEvent::StreamError { message } => {
self.stop_loading_animation();
self.error = Some(message); self.error = Some(message);
} }
} }
@@ -474,22 +482,43 @@ impl ChatApp {
Ok(()) Ok(())
} }
async fn try_send_message(&mut self) -> Result<()> { fn send_user_message_and_request_response(&mut self) {
let content = self.controller.input_buffer().text().trim().to_string(); let content = self.controller.input_buffer().text().trim().to_string();
if content.is_empty() { if content.is_empty() {
self.error = Some("Cannot send empty message".to_string()); self.error = Some("Cannot send empty message".to_string());
return;
}
// Step 1: Add user message to conversation immediately (synchronous)
let message = self.controller.input_buffer_mut().commit_to_history();
self.controller.conversation_mut().push_user_message(message.clone());
self.scroll_to_bottom();
// Step 2: Set flag to process LLM request on next event loop iteration
self.pending_llm_request = true;
self.status = "Message sent".to_string();
self.error = None;
}
pub async fn process_pending_llm_request(&mut self) -> Result<()> {
if !self.pending_llm_request {
return Ok(()); return Ok(());
} }
self.scroll_to_bottom(); self.pending_llm_request = false;
// Step 1: Show loading model status and start animation
self.status = format!("Loading model '{}'...", self.controller.selected_model());
self.start_loading_animation();
let message = self.controller.input_buffer_mut().commit_to_history();
let mut parameters = ChatParameters::default(); let mut parameters = ChatParameters::default();
parameters.stream = self.controller.config().general.enable_streaming; parameters.stream = self.controller.config().general.enable_streaming;
match self.controller.send_message(message, parameters).await { // Step 2: Start the actual request
match self.controller.send_request_with_current_conversation(parameters).await {
Ok(SessionOutcome::Complete(_response)) => { Ok(SessionOutcome::Complete(_response)) => {
self.status = "Response received".to_string(); self.stop_loading_animation();
self.status = "Ready".to_string();
self.error = None; self.error = None;
Ok(()) Ok(())
} }
@@ -497,17 +526,19 @@ impl ChatApp {
response_id, response_id,
stream, stream,
}) => { }) => {
// Step 3: Model loaded, now generating response
self.status = "Generating response...".to_string();
self.spawn_stream(response_id, stream); self.spawn_stream(response_id, stream);
match self match self
.controller .controller
.mark_stream_placeholder(response_id, "Loading...") .mark_stream_placeholder(response_id, "")
{ {
Ok(_) => self.error = None, Ok(_) => self.error = None,
Err(err) => { Err(err) => {
self.error = Some(format!("Could not set loading placeholder: {}", err)); self.error = Some(format!("Could not set response placeholder: {}", err));
} }
} }
self.status = "Waiting for response...".to_string();
Ok(()) Ok(())
} }
Err(err) => { Err(err) => {
@@ -522,13 +553,15 @@ impl ChatApp {
self.mode = InputMode::ProviderSelection; self.mode = InputMode::ProviderSelection;
} else { } else {
self.error = Some(message); self.error = Some(message);
self.status = "Send failed".to_string(); self.status = "Request failed".to_string();
} }
self.stop_loading_animation();
Ok(()) Ok(())
} }
} }
} }
fn sync_selected_model_index(&mut self) { fn sync_selected_model_index(&mut self) {
let current_model_id = self.controller.selected_model().to_string(); let current_model_id = self.controller.selected_model().to_string();
let filtered_models: Vec<&ModelInfo> = self let filtered_models: Vec<&ModelInfo> = self
@@ -568,6 +601,39 @@ impl ChatApp {
self.content_width = content_width; self.content_width = content_width;
} }
pub fn start_loading_animation(&mut self) {
self.is_loading = true;
self.loading_animation_frame = 0;
}
pub fn stop_loading_animation(&mut self) {
self.is_loading = false;
}
pub fn advance_loading_animation(&mut self) {
if self.is_loading {
self.loading_animation_frame = (self.loading_animation_frame + 1) % 8; // 8-frame animation
}
}
pub fn get_loading_indicator(&self) -> &'static str {
if !self.is_loading {
return "";
}
match self.loading_animation_frame {
0 => "",
1 => "",
2 => "",
3 => "",
4 => "",
5 => "",
6 => "",
7 => "",
_ => "",
}
}
fn is_at_bottom(&self) -> bool { fn is_at_bottom(&self) -> bool {
let total_lines = self.calculate_total_content_lines(); let total_lines = self.calculate_total_content_lines();
let max_scroll = total_lines.saturating_sub(self.viewport_height); let max_scroll = total_lines.saturating_sub(self.viewport_height);

View File

@@ -377,10 +377,23 @@ fn render_messages(frame: &mut Frame<'_>, area: Rect, app: &mut ChatApp) {
let indent = if show_role_labels { " " } else { "" }; let indent = if show_role_labels { " " } else { "" };
if show_role_labels { if show_role_labels {
lines.push(Line::from(Span::styled( let mut role_spans = vec![Span::styled(
prefix, prefix,
role_color(role).add_modifier(Modifier::BOLD), role_color(role).add_modifier(Modifier::BOLD),
))); )];
// Add loading animation for Assistant if currently loading and this is the last message
if matches!(role, Role::Assistant) &&
app.get_loading_indicator() != "" &&
message_index == conversation.messages.len() - 1 &&
is_streaming {
role_spans.push(Span::styled(
format!(" {}", app.get_loading_indicator()),
Style::default().fg(Color::Yellow),
));
}
lines.push(Line::from(role_spans));
} }
for (i, line) in formatted.iter().enumerate() { for (i, line) in formatted.iter().enumerate() {
@@ -397,6 +410,26 @@ fn render_messages(frame: &mut Frame<'_>, area: Rect, app: &mut ChatApp) {
} }
} }
// Add loading indicator ONLY if we're loading and there are no messages at all,
// or if the last message is from the user (no Assistant response started yet)
let last_message_is_user = conversation.messages.last()
.map(|msg| matches!(msg.role, Role::User))
.unwrap_or(true);
if app.get_loading_indicator() != "" && last_message_is_user {
let loading_spans = vec![
Span::styled(
"🤖 Assistant:",
Style::default().fg(Color::LightMagenta).add_modifier(Modifier::BOLD),
),
Span::styled(
format!(" {}", app.get_loading_indicator()),
Style::default().fg(Color::Yellow),
),
];
lines.push(Line::from(loading_spans));
}
if lines.is_empty() { if lines.is_empty() {
lines.push(Line::from("No messages yet. Press 'i' to start typing.")); lines.push(Line::from("No messages yet. Press 'i' to start typing."));
} }