**feat:** update default model to qwen3:8b and simplify chat streaming loop with proper error handling and trailing newline.

This commit is contained in:
2025-11-01 16:37:35 +01:00
parent 64fd3206a2
commit b63d26f0cd
2 changed files with 3 additions and 2 deletions

View File

@@ -52,7 +52,7 @@ async fn main() -> Result<()> {
}];
let mut stream = client.chat_stream(&msgs, &opts).await?;
while let Ok(Some(chunk)) = stream.try_next().await {
while let Some(chunk) = stream.try_next().await? {
if let Some(m) = chunk.message {
if let Some(c) = m.content {
print!("{c}");
@@ -63,5 +63,6 @@ async fn main() -> Result<()> {
break;
}
}
println!(); // Newline after response
Ok(())
}

View File

@@ -20,7 +20,7 @@ fn default_ollama_url() -> String {
"http://localhost:11434".into()
}
fn default_model() -> String {
"qwen2.5".into()
"qwen3:8b".into()
}
fn default_mode() -> String {
"plan".into()