feat(tui): cache rendered message lines and throttle streaming redraws to improve TUI responsiveness

- Introduce `MessageRenderContext` and `MessageCacheEntry` for caching wrapped lines per message.
- Implement `render_message_lines_cached` using cache, invalidating on updates.
- Add role/style helpers and content hashing for cache validation.
- Throttle UI redraws in the main loop during active streaming (50 ms interval) and adjust idle tick timing.
- Update drawing logic to use cached rendering and manage draw intervals.
- Remove unused `role_color` function and adjust imports accordingly.
This commit is contained in:
2025-10-12 15:02:33 +02:00
parent acbfe47a4b
commit d2a193e5c1
4 changed files with 276 additions and 125 deletions

View File

@@ -25,6 +25,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Secure credential vault integration for Ollama Cloud API keys when `privacy.encrypt_local_data = true`.
- Input panel respects a new `ui.input_max_rows` setting so long prompts expand predictably before scrolling kicks in.
- Command palette offers fuzzy `:model` filtering and `:provider` completions for fast switching.
- Message rendering caches wrapped lines and throttles streaming redraws to keep the TUI responsive on long sessions.
- Chat history honors `ui.scrollback_lines`, trimming older rows to keep the TUI responsive and surfacing a "↓ New messages" badge whenever updates land off-screen.
### Changed

View File

@@ -478,11 +478,25 @@ async fn run_app(
mut event_rx: mpsc::UnboundedReceiver<Event>,
session_rx: &mut mpsc::UnboundedReceiver<SessionEvent>,
) -> Result<()> {
let stream_draw_interval = tokio::time::Duration::from_millis(50);
let idle_tick = tokio::time::Duration::from_millis(100);
let mut last_draw = tokio::time::Instant::now() - stream_draw_interval;
loop {
// Advance loading animation frame
app.advance_loading_animation();
let streaming_active = app.streaming_count() > 0;
let draw_due = if streaming_active {
last_draw.elapsed() >= stream_draw_interval
} else {
true
};
if draw_due {
terminal.draw(|f| ui::render_chat(f, app))?;
last_draw = tokio::time::Instant::now();
}
// Process any pending LLM requests AFTER UI has been drawn
if let Err(e) = app.process_pending_llm_request().await {
@@ -494,6 +508,14 @@ async fn run_app(
eprintln!("Error processing tool execution: {}", e);
}
let sleep_duration = if streaming_active {
stream_draw_interval
.checked_sub(last_draw.elapsed())
.unwrap_or_else(|| tokio::time::Duration::from_millis(0))
} else {
idle_tick
};
tokio::select! {
Some(event) = event_rx.recv() => {
if let AppState::Quit = app.handle_event(event).await? {
@@ -503,10 +525,7 @@ async fn run_app(
Some(session_event) = session_rx.recv() => {
app.handle_session_event(session_event)?;
}
// Add a timeout to keep the animation going even when there are no events
_ = tokio::time::sleep(tokio::time::Duration::from_millis(100)) => {
// This will cause the loop to continue and advance the animation
}
_ = tokio::time::sleep(sleep_duration) => {}
}
}
}

View File

@@ -10,6 +10,7 @@ use owlen_core::{
ui::{AppState, AutoScroll, FocusedPanel, InputMode},
};
use ratatui::style::{Color, Modifier, Style};
use ratatui::text::{Line, Span};
use textwrap::wrap;
use tokio::{sync::mpsc, task::JoinHandle};
use tui_textarea::{Input, TextArea};
@@ -23,7 +24,10 @@ use crate::state::{CommandPalette, ModelPaletteEntry};
use crate::ui::format_tool_output;
// Agent executor moved to separate binary `owlen-agent`. The TUI no longer directly
// imports `AgentExecutor` to avoid a circular dependency on `owlen-cli`.
use std::borrow::Cow;
use std::collections::hash_map::DefaultHasher;
use std::collections::{BTreeSet, HashMap, HashSet};
use std::hash::{Hash, Hasher};
use std::sync::Arc;
const ONBOARDING_STATUS_LINE: &str =
@@ -151,6 +155,7 @@ pub struct ChatApp {
model_info_viewport_height: usize, // Cached viewport height for the info panel
expanded_provider: Option<String>, // Which provider group is currently expanded
current_provider: String, // Provider backing the active session
message_line_cache: HashMap<Uuid, MessageCacheEntry>, // Cached rendered lines per message
auto_scroll: AutoScroll, // Auto-scroll state for message rendering
thinking_scroll: AutoScroll, // Auto-scroll state for thinking panel
viewport_height: usize, // Track the height of the messages viewport
@@ -208,6 +213,47 @@ pub struct ConsentDialogState {
pub callback_id: Uuid, // ID to match callback with the request
}
#[derive(Clone)]
struct MessageCacheEntry {
theme_name: String,
wrap_width: usize,
show_role_labels: bool,
content_hash: u64,
lines: Vec<Line<'static>>,
}
pub(crate) struct MessageRenderContext<'a> {
formatter: &'a mut owlen_core::formatting::MessageFormatter,
show_role_labels: bool,
content_width: usize,
is_last_message: bool,
is_streaming: bool,
loading_indicator: &'a str,
theme: &'a Theme,
}
impl<'a> MessageRenderContext<'a> {
pub(crate) fn new(
formatter: &'a mut owlen_core::formatting::MessageFormatter,
show_role_labels: bool,
content_width: usize,
is_last_message: bool,
is_streaming: bool,
loading_indicator: &'a str,
theme: &'a Theme,
) -> Self {
Self {
formatter,
show_role_labels,
content_width,
is_last_message,
is_streaming,
loading_indicator,
theme,
}
}
}
impl ChatApp {
pub async fn new(
controller: SessionController,
@@ -248,6 +294,7 @@ impl ChatApp {
model_info_viewport_height: 0,
expanded_provider: None,
current_provider,
message_line_cache: HashMap::new(),
auto_scroll: AutoScroll::default(),
thinking_scroll: AutoScroll::default(),
viewport_height: 10, // Default viewport height, will be updated during rendering
@@ -759,6 +806,163 @@ impl ChatApp {
}
}
fn role_style(theme: &Theme, role: &Role) -> Style {
match role {
Role::User => Style::default().fg(theme.user_message_role),
Role::Assistant => Style::default().fg(theme.assistant_message_role),
Role::System => Style::default().fg(theme.unfocused_panel_border),
Role::Tool => Style::default().fg(theme.info),
}
}
fn content_style(theme: &Theme, role: &Role) -> Style {
if matches!(role, Role::Tool) {
Style::default().fg(theme.tool_output)
} else {
Style::default()
}
}
fn message_content_hash(role: &Role, content: &str) -> u64 {
let mut hasher = DefaultHasher::new();
role.to_string().hash(&mut hasher);
content.hash(&mut hasher);
hasher.finish()
}
fn invalidate_message_cache(&mut self, id: &Uuid) {
self.message_line_cache.remove(id);
}
pub(crate) fn render_message_lines_cached(
&mut self,
message_index: usize,
ctx: MessageRenderContext<'_>,
) -> Vec<Line<'static>> {
let MessageRenderContext {
formatter,
show_role_labels,
content_width,
is_last_message,
is_streaming,
loading_indicator,
theme,
} = ctx;
let (message_id, role, raw_content) = {
let conversation = self.conversation();
let message = &conversation.messages[message_index];
(message.id, message.role.clone(), message.content.clone())
};
let display_content = if matches!(role, Role::Assistant) {
formatter.extract_thinking(&raw_content).0
} else if matches!(role, Role::Tool) {
format_tool_output(&raw_content)
} else {
raw_content
};
let formatted_lines: Vec<String> = display_content
.trim()
.lines()
.map(|s| s.to_string())
.collect();
let content = formatted_lines.join("\n");
let content_hash = Self::message_content_hash(&role, &content);
if !is_streaming
&& let Some(entry) = self.message_line_cache.get(&message_id)
&& entry.wrap_width == content_width
&& entry.show_role_labels == show_role_labels
&& entry.theme_name == theme.name
&& entry.content_hash == content_hash
{
return entry.lines.clone();
}
let mut rendered: Vec<Line<'static>> = Vec::new();
let content_style = Self::content_style(theme, &role);
if show_role_labels {
let (emoji, name) = match role {
Role::User => ("👤 ", "You: "),
Role::Assistant => ("🤖 ", "Assistant: "),
Role::System => ("⚙️ ", "System: "),
Role::Tool => ("🔧 ", "Tool: "),
};
let mut role_line_spans = vec![
Span::raw(emoji),
Span::styled(
name.to_string(),
Self::role_style(theme, &role).add_modifier(Modifier::BOLD),
),
];
if matches!(role, Role::Assistant)
&& is_streaming
&& is_last_message
&& !loading_indicator.is_empty()
{
role_line_spans.push(Span::styled(
format!(" {}", loading_indicator),
Style::default().fg(theme.info),
));
}
rendered.push(Line::from(role_line_spans));
let indent = " ";
let available_width = content_width.saturating_sub(2);
let chunks: Vec<Cow<'_, str>> = if available_width > 0 {
wrap(content.as_str(), available_width)
} else {
Vec::new()
};
let last_index = chunks.len().saturating_sub(1);
for (chunk_idx, seg) in chunks.into_iter().enumerate() {
let mut spans = vec![Span::styled(
format!("{indent}{}", seg.into_owned()),
content_style,
)];
if chunk_idx == last_index && is_streaming {
spans.push(Span::styled("", Style::default().fg(theme.cursor)));
}
rendered.push(Line::from(spans));
}
} else {
let chunks: Vec<Cow<'_, str>> = if content_width > 0 {
wrap(content.as_str(), content_width)
} else {
Vec::new()
};
let last_index = chunks.len().saturating_sub(1);
for (chunk_idx, seg) in chunks.into_iter().enumerate() {
let mut spans = vec![Span::styled(seg.into_owned(), content_style)];
if chunk_idx == last_index && is_streaming {
spans.push(Span::styled("", Style::default().fg(theme.cursor)));
}
rendered.push(Line::from(spans));
}
}
if !is_streaming {
self.message_line_cache.insert(
message_id,
MessageCacheEntry {
theme_name: theme.name.clone(),
wrap_width: content_width,
show_role_labels,
content_hash,
lines: rendered.clone(),
},
);
}
rendered
}
pub fn apply_chat_scrollback_trim(&mut self, removed: usize, remaining: usize) {
if removed == 0 {
self.chat_line_offset = 0;
@@ -807,11 +1011,13 @@ impl ChatApp {
pub fn set_theme(&mut self, theme: Theme) {
self.theme = theme;
self.message_line_cache.clear();
}
pub fn switch_theme(&mut self, theme_name: &str) -> Result<()> {
if let Some(theme) = owlen_core::theme::get_theme(theme_name) {
self.theme = theme;
self.message_line_cache.clear();
// Save theme to config
self.controller.config_mut().ui.theme = theme_name.to_string();
if let Err(err) = config::save_config(&self.controller.config()) {
@@ -2700,6 +2906,8 @@ impl ChatApp {
);
self.error = None;
self.update_thinking_from_last_message();
self.message_line_cache.clear();
self.chat_line_offset = 0;
}
Err(e) => {
self.error = Some(format!("Failed to load session: {}", e));
@@ -2958,6 +3166,7 @@ impl ChatApp {
response,
} => {
self.controller.apply_stream_chunk(message_id, &response)?;
self.invalidate_message_cache(&message_id);
// Update thinking content in real-time during streaming
self.update_thinking_from_last_message();
@@ -2991,9 +3200,11 @@ impl ChatApp {
if let Some(id) = message_id {
self.streaming.remove(&id);
self.stream_tasks.remove(&id);
self.invalidate_message_cache(&id);
} else {
self.streaming.clear();
self.stream_tasks.clear();
self.message_line_cache.clear();
}
self.error = Some(message);
}
@@ -3744,6 +3955,7 @@ impl ChatApp {
cancel_error = Some(err.to_string());
}
self.streaming.remove(&message_id);
self.invalidate_message_cache(&message_id);
cancelled = true;
}
}
@@ -3801,6 +4013,7 @@ impl ChatApp {
self.agent_mode = false;
self.agent_running = false;
self.is_loading = false;
self.message_line_cache.clear();
// Ensure no orphaned stream tasks remain
for (_, handle) in self.stream_tasks.drain() {

View File

@@ -9,7 +9,7 @@ use tui_textarea::TextArea;
use unicode_segmentation::UnicodeSegmentation;
use unicode_width::UnicodeWidthStr;
use crate::chat_app::{ChatApp, HELP_TAB_COUNT, ModelSelectorItemKind};
use crate::chat_app::{ChatApp, HELP_TAB_COUNT, MessageRenderContext, ModelSelectorItemKind};
use owlen_core::model::DetailedModelInfo;
use owlen_core::types::{ModelInfo, Role};
use owlen_core::ui::{FocusedPanel, InputMode};
@@ -677,127 +677,54 @@ fn render_messages(frame: &mut Frame<'_>, area: Rect, app: &mut ChatApp) {
let content_width = area.width.saturating_sub(4).max(20);
app.set_viewport_dimensions(viewport_height, usize::from(content_width));
let conversation = app.conversation();
let total_messages = app.message_count();
let mut formatter = app.formatter().clone();
// Reserve space for borders and the message indent so text fits within the block
formatter.set_wrap_width(usize::from(content_width));
// Build the lines for messages
let mut lines: Vec<Line> = Vec::new();
for (message_index, message) in conversation.messages.iter().enumerate() {
let role = &message.role;
let (emoji, name) = match role {
Role::User => ("👤 ", "You: "),
Role::Assistant => ("🤖 ", "Assistant: "),
Role::System => ("⚙️ ", "System: "),
Role::Tool => ("🔧 ", "Tool: "),
};
// Extract content without thinking tags for assistant messages
let content_to_display = if matches!(role, Role::Assistant) {
let (content_without_think, _) = formatter.extract_thinking(&message.content);
content_without_think
} else if matches!(role, Role::Tool) {
// Format tool results nicely
format_tool_output(&message.content)
} else {
message.content.clone()
};
let formatted: Vec<String> = content_to_display
.trim()
.lines()
.map(|s| s.to_string())
.collect();
let is_streaming = message
// Build the lines for messages using cached rendering
let mut lines: Vec<Line<'static>> = Vec::new();
let show_role_labels = formatter.show_role_labels();
for message_index in 0..total_messages {
let is_streaming = {
let conversation = app.conversation();
conversation.messages[message_index]
.metadata
.get("streaming")
.and_then(|v| v.as_bool())
.unwrap_or(false);
let show_role_labels = formatter.show_role_labels();
if show_role_labels {
// Role name line
let mut role_line_spans = vec![
Span::raw(emoji),
Span::styled(name, role_color(role, &theme).add_modifier(Modifier::BOLD)),
];
// Add loading indicator if applicable
if matches!(role, Role::Assistant)
&& app.get_loading_indicator() != ""
&& message_index == conversation.messages.len() - 1
&& is_streaming
{
role_line_spans.push(Span::styled(
format!(" {}", app.get_loading_indicator()),
Style::default().fg(theme.info),
));
}
lines.push(Line::from(role_line_spans));
// Join all formatted lines into single content string
let content = formatted.join("\n");
// Wrap content with available width minus indent (2 spaces)
let indent = " ";
let available_width = (content_width as usize).saturating_sub(2);
let chunks = if available_width > 0 {
wrap(&content, available_width)
} else {
vec![]
.unwrap_or(false)
};
let chunks_len = chunks.len();
for (i, seg) in chunks.into_iter().enumerate() {
let style = if matches!(role, Role::Tool) {
Style::default().fg(theme.tool_output)
} else {
Style::default()
};
let mut spans = vec![Span::styled(format!("{indent}{}", seg), style)];
if i == chunks_len - 1 && is_streaming {
spans.push(Span::styled("", Style::default().fg(theme.cursor)));
}
lines.push(Line::from(spans));
}
} else {
// No role labels - just show content
let content = formatted.join("\n");
let chunks = wrap(&content, content_width as usize);
let chunks_len = chunks.len();
for (i, seg) in chunks.into_iter().enumerate() {
let style = if matches!(role, Role::Tool) {
Style::default().fg(theme.tool_output)
} else {
Style::default()
};
let mut spans = vec![Span::styled(seg.into_owned(), style)];
if i == chunks_len - 1 && is_streaming {
spans.push(Span::styled("", Style::default().fg(theme.cursor)));
}
lines.push(Line::from(spans));
}
}
// Add an empty line after each message, except the last one
if message_index < conversation.messages.len() - 1 {
lines.push(Line::from(""));
let message_lines = app.render_message_lines_cached(
message_index,
MessageRenderContext::new(
&mut formatter,
show_role_labels,
content_width as usize,
message_index + 1 == total_messages,
is_streaming,
app.get_loading_indicator(),
&theme,
),
);
lines.extend(message_lines);
if message_index + 1 < total_messages {
lines.push(Line::from(String::new()));
}
}
// Add loading indicator ONLY if we're loading and there are no messages at all,
// or if the last message is from the user (no Assistant response started yet)
let last_message_is_user = conversation
let last_message_is_user = if total_messages == 0 {
true
} else {
let conversation = app.conversation();
conversation
.messages
.last()
.map(|msg| matches!(msg.role, Role::User))
.unwrap_or(true);
.unwrap_or(true)
};
if app.get_loading_indicator() != "" && last_message_is_user {
let loading_spans = vec![
@@ -2704,15 +2631,6 @@ fn centered_rect(percent_x: u16, percent_y: u16, area: Rect) -> Rect {
.split(vertical[1])[1]
}
fn role_color(role: &Role, theme: &owlen_core::theme::Theme) -> Style {
match role {
Role::User => Style::default().fg(theme.user_message_role),
Role::Assistant => Style::default().fg(theme.assistant_message_role),
Role::System => Style::default().fg(theme.unfocused_panel_border),
Role::Tool => Style::default().fg(theme.info),
}
}
/// Format tool output JSON into a nice human-readable format
pub(crate) fn format_tool_output(content: &str) -> String {
// Try to parse as JSON