feat(tui): cache rendered message lines and throttle streaming redraws to improve TUI responsiveness

- Introduce `MessageRenderContext` and `MessageCacheEntry` for caching wrapped lines per message.
- Implement `render_message_lines_cached` using cache, invalidating on updates.
- Add role/style helpers and content hashing for cache validation.
- Throttle UI redraws in the main loop during active streaming (50 ms interval) and adjust idle tick timing.
- Update drawing logic to use cached rendering and manage draw intervals.
- Remove unused `role_color` function and adjust imports accordingly.
This commit is contained in:
2025-10-12 15:02:33 +02:00
parent acbfe47a4b
commit d2a193e5c1
4 changed files with 276 additions and 125 deletions

View File

@@ -10,6 +10,7 @@ use owlen_core::{
ui::{AppState, AutoScroll, FocusedPanel, InputMode},
};
use ratatui::style::{Color, Modifier, Style};
use ratatui::text::{Line, Span};
use textwrap::wrap;
use tokio::{sync::mpsc, task::JoinHandle};
use tui_textarea::{Input, TextArea};
@@ -23,7 +24,10 @@ use crate::state::{CommandPalette, ModelPaletteEntry};
use crate::ui::format_tool_output;
// Agent executor moved to separate binary `owlen-agent`. The TUI no longer directly
// imports `AgentExecutor` to avoid a circular dependency on `owlen-cli`.
use std::borrow::Cow;
use std::collections::hash_map::DefaultHasher;
use std::collections::{BTreeSet, HashMap, HashSet};
use std::hash::{Hash, Hasher};
use std::sync::Arc;
const ONBOARDING_STATUS_LINE: &str =
@@ -151,6 +155,7 @@ pub struct ChatApp {
model_info_viewport_height: usize, // Cached viewport height for the info panel
expanded_provider: Option<String>, // Which provider group is currently expanded
current_provider: String, // Provider backing the active session
message_line_cache: HashMap<Uuid, MessageCacheEntry>, // Cached rendered lines per message
auto_scroll: AutoScroll, // Auto-scroll state for message rendering
thinking_scroll: AutoScroll, // Auto-scroll state for thinking panel
viewport_height: usize, // Track the height of the messages viewport
@@ -208,6 +213,47 @@ pub struct ConsentDialogState {
pub callback_id: Uuid, // ID to match callback with the request
}
#[derive(Clone)]
struct MessageCacheEntry {
theme_name: String,
wrap_width: usize,
show_role_labels: bool,
content_hash: u64,
lines: Vec<Line<'static>>,
}
pub(crate) struct MessageRenderContext<'a> {
formatter: &'a mut owlen_core::formatting::MessageFormatter,
show_role_labels: bool,
content_width: usize,
is_last_message: bool,
is_streaming: bool,
loading_indicator: &'a str,
theme: &'a Theme,
}
impl<'a> MessageRenderContext<'a> {
pub(crate) fn new(
formatter: &'a mut owlen_core::formatting::MessageFormatter,
show_role_labels: bool,
content_width: usize,
is_last_message: bool,
is_streaming: bool,
loading_indicator: &'a str,
theme: &'a Theme,
) -> Self {
Self {
formatter,
show_role_labels,
content_width,
is_last_message,
is_streaming,
loading_indicator,
theme,
}
}
}
impl ChatApp {
pub async fn new(
controller: SessionController,
@@ -248,6 +294,7 @@ impl ChatApp {
model_info_viewport_height: 0,
expanded_provider: None,
current_provider,
message_line_cache: HashMap::new(),
auto_scroll: AutoScroll::default(),
thinking_scroll: AutoScroll::default(),
viewport_height: 10, // Default viewport height, will be updated during rendering
@@ -759,6 +806,163 @@ impl ChatApp {
}
}
fn role_style(theme: &Theme, role: &Role) -> Style {
match role {
Role::User => Style::default().fg(theme.user_message_role),
Role::Assistant => Style::default().fg(theme.assistant_message_role),
Role::System => Style::default().fg(theme.unfocused_panel_border),
Role::Tool => Style::default().fg(theme.info),
}
}
fn content_style(theme: &Theme, role: &Role) -> Style {
if matches!(role, Role::Tool) {
Style::default().fg(theme.tool_output)
} else {
Style::default()
}
}
fn message_content_hash(role: &Role, content: &str) -> u64 {
let mut hasher = DefaultHasher::new();
role.to_string().hash(&mut hasher);
content.hash(&mut hasher);
hasher.finish()
}
fn invalidate_message_cache(&mut self, id: &Uuid) {
self.message_line_cache.remove(id);
}
pub(crate) fn render_message_lines_cached(
&mut self,
message_index: usize,
ctx: MessageRenderContext<'_>,
) -> Vec<Line<'static>> {
let MessageRenderContext {
formatter,
show_role_labels,
content_width,
is_last_message,
is_streaming,
loading_indicator,
theme,
} = ctx;
let (message_id, role, raw_content) = {
let conversation = self.conversation();
let message = &conversation.messages[message_index];
(message.id, message.role.clone(), message.content.clone())
};
let display_content = if matches!(role, Role::Assistant) {
formatter.extract_thinking(&raw_content).0
} else if matches!(role, Role::Tool) {
format_tool_output(&raw_content)
} else {
raw_content
};
let formatted_lines: Vec<String> = display_content
.trim()
.lines()
.map(|s| s.to_string())
.collect();
let content = formatted_lines.join("\n");
let content_hash = Self::message_content_hash(&role, &content);
if !is_streaming
&& let Some(entry) = self.message_line_cache.get(&message_id)
&& entry.wrap_width == content_width
&& entry.show_role_labels == show_role_labels
&& entry.theme_name == theme.name
&& entry.content_hash == content_hash
{
return entry.lines.clone();
}
let mut rendered: Vec<Line<'static>> = Vec::new();
let content_style = Self::content_style(theme, &role);
if show_role_labels {
let (emoji, name) = match role {
Role::User => ("👤 ", "You: "),
Role::Assistant => ("🤖 ", "Assistant: "),
Role::System => ("⚙️ ", "System: "),
Role::Tool => ("🔧 ", "Tool: "),
};
let mut role_line_spans = vec![
Span::raw(emoji),
Span::styled(
name.to_string(),
Self::role_style(theme, &role).add_modifier(Modifier::BOLD),
),
];
if matches!(role, Role::Assistant)
&& is_streaming
&& is_last_message
&& !loading_indicator.is_empty()
{
role_line_spans.push(Span::styled(
format!(" {}", loading_indicator),
Style::default().fg(theme.info),
));
}
rendered.push(Line::from(role_line_spans));
let indent = " ";
let available_width = content_width.saturating_sub(2);
let chunks: Vec<Cow<'_, str>> = if available_width > 0 {
wrap(content.as_str(), available_width)
} else {
Vec::new()
};
let last_index = chunks.len().saturating_sub(1);
for (chunk_idx, seg) in chunks.into_iter().enumerate() {
let mut spans = vec![Span::styled(
format!("{indent}{}", seg.into_owned()),
content_style,
)];
if chunk_idx == last_index && is_streaming {
spans.push(Span::styled("", Style::default().fg(theme.cursor)));
}
rendered.push(Line::from(spans));
}
} else {
let chunks: Vec<Cow<'_, str>> = if content_width > 0 {
wrap(content.as_str(), content_width)
} else {
Vec::new()
};
let last_index = chunks.len().saturating_sub(1);
for (chunk_idx, seg) in chunks.into_iter().enumerate() {
let mut spans = vec![Span::styled(seg.into_owned(), content_style)];
if chunk_idx == last_index && is_streaming {
spans.push(Span::styled("", Style::default().fg(theme.cursor)));
}
rendered.push(Line::from(spans));
}
}
if !is_streaming {
self.message_line_cache.insert(
message_id,
MessageCacheEntry {
theme_name: theme.name.clone(),
wrap_width: content_width,
show_role_labels,
content_hash,
lines: rendered.clone(),
},
);
}
rendered
}
pub fn apply_chat_scrollback_trim(&mut self, removed: usize, remaining: usize) {
if removed == 0 {
self.chat_line_offset = 0;
@@ -807,11 +1011,13 @@ impl ChatApp {
pub fn set_theme(&mut self, theme: Theme) {
self.theme = theme;
self.message_line_cache.clear();
}
pub fn switch_theme(&mut self, theme_name: &str) -> Result<()> {
if let Some(theme) = owlen_core::theme::get_theme(theme_name) {
self.theme = theme;
self.message_line_cache.clear();
// Save theme to config
self.controller.config_mut().ui.theme = theme_name.to_string();
if let Err(err) = config::save_config(&self.controller.config()) {
@@ -2700,6 +2906,8 @@ impl ChatApp {
);
self.error = None;
self.update_thinking_from_last_message();
self.message_line_cache.clear();
self.chat_line_offset = 0;
}
Err(e) => {
self.error = Some(format!("Failed to load session: {}", e));
@@ -2958,6 +3166,7 @@ impl ChatApp {
response,
} => {
self.controller.apply_stream_chunk(message_id, &response)?;
self.invalidate_message_cache(&message_id);
// Update thinking content in real-time during streaming
self.update_thinking_from_last_message();
@@ -2991,9 +3200,11 @@ impl ChatApp {
if let Some(id) = message_id {
self.streaming.remove(&id);
self.stream_tasks.remove(&id);
self.invalidate_message_cache(&id);
} else {
self.streaming.clear();
self.stream_tasks.clear();
self.message_line_cache.clear();
}
self.error = Some(message);
}
@@ -3744,6 +3955,7 @@ impl ChatApp {
cancel_error = Some(err.to_string());
}
self.streaming.remove(&message_id);
self.invalidate_message_cache(&message_id);
cancelled = true;
}
}
@@ -3801,6 +4013,7 @@ impl ChatApp {
self.agent_mode = false;
self.agent_running = false;
self.is_loading = false;
self.message_line_cache.clear();
// Ensure no orphaned stream tasks remain
for (_, handle) in self.stream_tasks.drain() {