From f8fb5ce172b1f173ea60936ec1ed7280d5f213b0 Mon Sep 17 00:00:00 2001 From: vikingowl Date: Sun, 4 Jan 2026 00:41:42 +0100 Subject: [PATCH] fix: keep processing indicator visible until LLM starts streaming Clear 'Processing...' text only when first token arrives, not before the LLM request. This keeps the indicator visible during prompt resolution, RAG retrieval, and LLM initialization. --- .../src/lib/components/chat/ChatWindow.svelte | 16 ++++++++++++---- frontend/src/routes/+page.svelte | 16 ++++++++++++---- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/frontend/src/lib/components/chat/ChatWindow.svelte b/frontend/src/lib/components/chat/ChatWindow.svelte index 160f350..3df5299 100644 --- a/frontend/src/lib/components/chat/ChatWindow.svelte +++ b/frontend/src/lib/components/chat/ChatWindow.svelte @@ -577,10 +577,8 @@ const assistantMessageId = existingMessageId || chatState.startStreaming(); abortController = new AbortController(); - // Clear any existing content (e.g., "Processing..." text) before LLM starts streaming - if (existingMessageId) { - chatState.setStreamContent(''); - } + // Track if we need to clear the "Processing..." text on first token + let needsClearOnFirstToken = !!existingMessageId; // Start streaming metrics tracking streamingMetricsState.startStream(); @@ -669,6 +667,11 @@ }, { onThinkingToken: (token) => { + // Clear "Processing..." on first token + if (needsClearOnFirstToken) { + chatState.setStreamContent(''); + needsClearOnFirstToken = false; + } // Accumulate thinking and update the message if (!streamingThinking) { // Start the thinking block @@ -680,6 +683,11 @@ streamingMetricsState.incrementTokens(); }, onToken: (token) => { + // Clear "Processing..." on first token + if (needsClearOnFirstToken) { + chatState.setStreamContent(''); + needsClearOnFirstToken = false; + } // Close thinking block when content starts if (streamingThinking && !thinkingClosed) { chatState.appendToStreaming('\n\n'); diff --git a/frontend/src/routes/+page.svelte b/frontend/src/routes/+page.svelte index 212d5ac..2d53211 100644 --- a/frontend/src/routes/+page.svelte +++ b/frontend/src/routes/+page.svelte @@ -234,10 +234,8 @@ assistantMessageId = chatState.startStreaming(); } - // Clear any existing content (e.g., "Processing..." text) before LLM starts streaming - if (hadProcessingMessage) { - chatState.setStreamContent(''); - } + // Track if we need to clear the "Processing..." text on first token + let needsClearOnFirstToken = hadProcessingMessage; // Start streaming metrics tracking streamingMetricsState.startStream(); @@ -297,6 +295,11 @@ { model: chatModel, messages, tools, think: useNativeThinking, options: settingsState.apiParameters }, { onThinkingToken: (token) => { + // Clear "Processing..." on first token + if (needsClearOnFirstToken) { + chatState.setStreamContent(''); + needsClearOnFirstToken = false; + } // Accumulate thinking and update the message if (!streamingThinking) { // Start the thinking block @@ -307,6 +310,11 @@ streamingMetricsState.incrementTokens(); }, onToken: (token) => { + // Clear "Processing..." on first token + if (needsClearOnFirstToken) { + chatState.setStreamContent(''); + needsClearOnFirstToken = false; + } // Close thinking block when content starts if (streamingThinking && !thinkingClosed) { chatState.appendToStreaming('\n\n');