fix: keep processing indicator visible until LLM starts streaming
Clear 'Processing...' text only when first token arrives, not before the LLM request. This keeps the indicator visible during prompt resolution, RAG retrieval, and LLM initialization.
This commit is contained in:
@@ -577,10 +577,8 @@
|
||||
const assistantMessageId = existingMessageId || chatState.startStreaming();
|
||||
abortController = new AbortController();
|
||||
|
||||
// Clear any existing content (e.g., "Processing..." text) before LLM starts streaming
|
||||
if (existingMessageId) {
|
||||
chatState.setStreamContent('');
|
||||
}
|
||||
// Track if we need to clear the "Processing..." text on first token
|
||||
let needsClearOnFirstToken = !!existingMessageId;
|
||||
|
||||
// Start streaming metrics tracking
|
||||
streamingMetricsState.startStream();
|
||||
@@ -669,6 +667,11 @@
|
||||
},
|
||||
{
|
||||
onThinkingToken: (token) => {
|
||||
// Clear "Processing..." on first token
|
||||
if (needsClearOnFirstToken) {
|
||||
chatState.setStreamContent('');
|
||||
needsClearOnFirstToken = false;
|
||||
}
|
||||
// Accumulate thinking and update the message
|
||||
if (!streamingThinking) {
|
||||
// Start the thinking block
|
||||
@@ -680,6 +683,11 @@
|
||||
streamingMetricsState.incrementTokens();
|
||||
},
|
||||
onToken: (token) => {
|
||||
// Clear "Processing..." on first token
|
||||
if (needsClearOnFirstToken) {
|
||||
chatState.setStreamContent('');
|
||||
needsClearOnFirstToken = false;
|
||||
}
|
||||
// Close thinking block when content starts
|
||||
if (streamingThinking && !thinkingClosed) {
|
||||
chatState.appendToStreaming('</think>\n\n');
|
||||
|
||||
@@ -234,10 +234,8 @@
|
||||
assistantMessageId = chatState.startStreaming();
|
||||
}
|
||||
|
||||
// Clear any existing content (e.g., "Processing..." text) before LLM starts streaming
|
||||
if (hadProcessingMessage) {
|
||||
chatState.setStreamContent('');
|
||||
}
|
||||
// Track if we need to clear the "Processing..." text on first token
|
||||
let needsClearOnFirstToken = hadProcessingMessage;
|
||||
|
||||
// Start streaming metrics tracking
|
||||
streamingMetricsState.startStream();
|
||||
@@ -297,6 +295,11 @@
|
||||
{ model: chatModel, messages, tools, think: useNativeThinking, options: settingsState.apiParameters },
|
||||
{
|
||||
onThinkingToken: (token) => {
|
||||
// Clear "Processing..." on first token
|
||||
if (needsClearOnFirstToken) {
|
||||
chatState.setStreamContent('');
|
||||
needsClearOnFirstToken = false;
|
||||
}
|
||||
// Accumulate thinking and update the message
|
||||
if (!streamingThinking) {
|
||||
// Start the thinking block
|
||||
@@ -307,6 +310,11 @@
|
||||
streamingMetricsState.incrementTokens();
|
||||
},
|
||||
onToken: (token) => {
|
||||
// Clear "Processing..." on first token
|
||||
if (needsClearOnFirstToken) {
|
||||
chatState.setStreamContent('');
|
||||
needsClearOnFirstToken = false;
|
||||
}
|
||||
// Close thinking block when content starts
|
||||
if (streamingThinking && !thinkingClosed) {
|
||||
chatState.appendToStreaming('</think>\n\n');
|
||||
|
||||
Reference in New Issue
Block a user