Enhance TUI thinking panel: add dynamic height calculation, implement real-time updates from assistant messages, and refine thinking content rendering logic.

This commit is contained in:
2025-09-30 01:07:00 +02:00
parent 004fc0ba5e
commit 8409bf646a
3 changed files with 194 additions and 13 deletions

View File

@@ -1,5 +1,4 @@
use crate::types::Message;
use textwrap::{wrap, Options};
/// Formats messages for display across different clients.
#[derive(Debug, Clone)]
@@ -38,4 +37,55 @@ impl MessageFormatter {
pub fn format_message(&self, message: &Message) -> Vec<String> {
message.content.trim().lines().map(|s| s.to_string()).collect()
}
/// Extract thinking content from <think> tags, returning (content_without_think, thinking_content)
/// This handles both complete and incomplete (streaming) think tags.
pub fn extract_thinking(&self, content: &str) -> (String, Option<String>) {
let mut result = String::new();
let mut thinking = String::new();
let mut current_pos = 0;
while let Some(start_pos) = content[current_pos..].find("<think>") {
let abs_start = current_pos + start_pos;
// Add content before <think> tag to result
result.push_str(&content[current_pos..abs_start]);
// Find closing tag
if let Some(end_pos) = content[abs_start..].find("</think>") {
let abs_end = abs_start + end_pos;
let think_content = &content[abs_start + 7..abs_end]; // 7 = len("<think>")
if !thinking.is_empty() {
thinking.push_str("\n\n");
}
thinking.push_str(think_content.trim());
current_pos = abs_end + 8; // 8 = len("</think>")
} else {
// Unclosed tag - this is streaming content
// Extract everything after <think> as thinking content
let think_content = &content[abs_start + 7..]; // 7 = len("<think>")
if !thinking.is_empty() {
thinking.push_str("\n\n");
}
thinking.push_str(think_content);
current_pos = content.len();
break;
}
}
// Add remaining content
result.push_str(&content[current_pos..]);
let thinking_result = if thinking.is_empty() {
None
} else {
Some(thinking)
};
(result, thinking_result)
}
}

View File

@@ -1,7 +1,7 @@
use anyhow::Result;
use owlen_core::{
session::{SessionController, SessionOutcome},
types::{ChatParameters, ChatResponse, Conversation, ModelInfo},
types::{ChatParameters, ChatResponse, Conversation, ModelInfo, Role},
};
use ratatui::style::{Modifier, Style};
use tokio::sync::mpsc;
@@ -98,7 +98,9 @@ pub struct ChatApp {
pub selected_provider_index: usize, // Index into the available_providers list
pub selected_model: Option<usize>, // Index into the *filtered* models list
auto_scroll: AutoScroll, // Auto-scroll state for message rendering
thinking_scroll: AutoScroll, // Auto-scroll state for thinking panel
viewport_height: usize, // Track the height of the messages viewport
thinking_viewport_height: usize, // Track the height of the thinking viewport
content_width: usize, // Track the content width for line wrapping calculations
session_tx: mpsc::UnboundedSender<SessionEvent>,
streaming: HashSet<Uuid>,
@@ -106,6 +108,7 @@ pub struct ChatApp {
pending_llm_request: bool, // Flag to indicate LLM request needs to be processed
loading_animation_frame: usize, // Frame counter for loading animation
is_loading: bool, // Whether we're currently loading a response
current_thinking: Option<String>, // Current thinking content from last assistant message
}
impl ChatApp {
@@ -125,7 +128,9 @@ impl ChatApp {
selected_provider_index: 0,
selected_model: None,
auto_scroll: AutoScroll::default(),
thinking_scroll: AutoScroll::default(),
viewport_height: 10, // Default viewport height, will be updated during rendering
thinking_viewport_height: 4, // Default thinking viewport height
content_width: 80, // Default content width, will be updated during rendering
session_tx,
streaming: std::collections::HashSet::new(),
@@ -133,6 +138,7 @@ impl ChatApp {
pending_llm_request: false,
loading_animation_frame: 0,
is_loading: false,
current_thinking: None,
};
(app, session_rx)
@@ -185,6 +191,18 @@ impl ChatApp {
self.auto_scroll.scroll
}
pub fn thinking_scroll(&self) -> &AutoScroll {
&self.thinking_scroll
}
pub fn thinking_scroll_mut(&mut self) -> &mut AutoScroll {
&mut self.thinking_scroll
}
pub fn thinking_scroll_position(&self) -> usize {
self.thinking_scroll.scroll
}
pub fn message_count(&self) -> usize {
self.controller.conversation().messages.len()
}
@@ -454,6 +472,10 @@ impl ChatApp {
response,
} => {
self.controller.apply_stream_chunk(message_id, &response)?;
// Update thinking content in real-time during streaming
self.update_thinking_from_last_message();
// Auto-scroll will handle this in the render loop
if response.is_final {
self.streaming.remove(&message_id);
@@ -647,6 +669,10 @@ impl ChatApp {
self.content_width = content_width;
}
pub fn set_thinking_viewport_height(&mut self, height: usize) {
self.thinking_viewport_height = height;
}
pub fn start_loading_animation(&mut self) {
self.is_loading = true;
self.loading_animation_frame = 0;
@@ -680,6 +706,22 @@ impl ChatApp {
}
}
pub fn current_thinking(&self) -> Option<&String> {
self.current_thinking.as_ref()
}
pub fn update_thinking_from_last_message(&mut self) {
// Extract thinking from the last assistant message
if let Some(last_msg) = self.conversation().messages.iter().rev().find(|m| matches!(m.role, Role::Assistant)) {
let (_, thinking) = self.formatter().extract_thinking(&last_msg.content);
self.current_thinking = thinking;
// Auto-scroll thinking panel to bottom when content updates
self.thinking_scroll.stick_to_bottom = true;
} else {
self.current_thinking = None;
}
}
fn spawn_stream(&mut self, message_id: Uuid, mut stream: owlen_core::provider::ChatStream) {

View File

@@ -11,6 +11,9 @@ use crate::chat_app::{ChatApp, InputMode};
use owlen_core::types::Role;
pub fn render_chat(frame: &mut Frame<'_>, app: &mut ChatApp) {
// Update thinking content from last message
app.update_thinking_from_last_message();
// Calculate dynamic input height based on textarea content
let available_width = frame.area().width;
let input_height = if matches!(app.mode(), InputMode::Editing) {
@@ -30,20 +33,51 @@ pub fn render_chat(frame: &mut Frame<'_>, app: &mut ChatApp) {
(visual_lines as u16).min(10) + 2 // +2 for borders
};
// Calculate thinking section height
let thinking_height = if let Some(thinking) = app.current_thinking() {
let content_width = available_width.saturating_sub(4);
let visual_lines = calculate_wrapped_line_count(
thinking.lines(),
content_width,
);
(visual_lines as u16).min(6) + 2 // +2 for borders, max 6 lines
} else {
0
};
let mut constraints = vec![
Constraint::Length(4), // Header
Constraint::Min(8), // Messages
];
if thinking_height > 0 {
constraints.push(Constraint::Length(thinking_height)); // Thinking
}
constraints.push(Constraint::Length(input_height)); // Input
constraints.push(Constraint::Length(3)); // Status
let layout = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(4), // Header
Constraint::Min(8), // Messages
Constraint::Length(input_height), // Input
Constraint::Length(3), // Status
])
.constraints(constraints)
.split(frame.area());
render_header(frame, layout[0], app);
render_messages(frame, layout[1], app);
render_input(frame, layout[2], app);
render_status(frame, layout[3], app);
let mut idx = 0;
render_header(frame, layout[idx], app);
idx += 1;
render_messages(frame, layout[idx], app);
idx += 1;
if thinking_height > 0 {
render_thinking(frame, layout[idx], app);
idx += 1;
}
render_input(frame, layout[idx], app);
idx += 1;
render_status(frame, layout[idx], app);
match app.mode() {
InputMode::ProviderSelection => render_provider_selector(frame, app),
@@ -420,7 +454,15 @@ fn render_messages(frame: &mut Frame<'_>, area: Rect, app: &mut ChatApp) {
Role::System => ("⚙️ ", "System: "),
};
let formatted = formatter.format_message(message);
// Extract content without thinking tags for assistant messages
let content_to_display = if matches!(role, Role::Assistant) {
let (content_without_think, _) = formatter.extract_thinking(&message.content);
content_without_think
} else {
message.content.clone()
};
let formatted: Vec<String> = content_to_display.trim().lines().map(|s| s.to_string()).collect();
let is_streaming = message
.metadata
.get("streaming")
@@ -532,6 +574,53 @@ fn render_messages(frame: &mut Frame<'_>, area: Rect, app: &mut ChatApp) {
frame.render_widget(paragraph, area);
}
fn render_thinking(frame: &mut Frame<'_>, area: Rect, app: &mut ChatApp) {
if let Some(thinking) = app.current_thinking().cloned() {
let viewport_height = area.height.saturating_sub(2) as usize; // subtract borders
let content_width = area.width.saturating_sub(4);
app.set_thinking_viewport_height(viewport_height);
let chunks = wrap(&thinking, content_width as usize);
let lines: Vec<Line> = chunks
.into_iter()
.map(|seg| {
Line::from(Span::styled(
seg.into_owned(),
Style::default()
.fg(Color::DarkGray)
.add_modifier(Modifier::ITALIC),
))
})
.collect();
// Update AutoScroll state with accurate content length
let thinking_scroll = app.thinking_scroll_mut();
thinking_scroll.content_len = lines.len();
thinking_scroll.on_viewport(viewport_height);
let scroll_position = app.thinking_scroll_position().min(u16::MAX as usize) as u16;
let paragraph = Paragraph::new(lines)
.block(
Block::default()
.title(Span::styled(
" 💭 Thinking ",
Style::default()
.fg(Color::DarkGray)
.add_modifier(Modifier::ITALIC),
))
.borders(Borders::ALL)
.border_style(Style::default().fg(Color::DarkGray)),
)
.scroll((scroll_position, 0))
.wrap(Wrap { trim: false });
frame.render_widget(paragraph, area);
}
}
fn render_input(frame: &mut Frame<'_>, area: Rect, app: &ChatApp) {
let title = match app.mode() {
InputMode::Editing => " Input (Enter=send · Ctrl+J=newline · Esc=exit input mode) ",