**feat:** update default model to qwen3:8b and simplify chat streaming loop with proper error handling and trailing newline.

This commit is contained in:
2025-11-01 16:37:35 +01:00
parent 64fd3206a2
commit b63d26f0cd
2 changed files with 3 additions and 2 deletions

View File

@@ -52,7 +52,7 @@ async fn main() -> Result<()> {
}]; }];
let mut stream = client.chat_stream(&msgs, &opts).await?; let mut stream = client.chat_stream(&msgs, &opts).await?;
while let Ok(Some(chunk)) = stream.try_next().await { while let Some(chunk) = stream.try_next().await? {
if let Some(m) = chunk.message { if let Some(m) = chunk.message {
if let Some(c) = m.content { if let Some(c) = m.content {
print!("{c}"); print!("{c}");
@@ -63,5 +63,6 @@ async fn main() -> Result<()> {
break; break;
} }
} }
println!(); // Newline after response
Ok(()) Ok(())
} }

View File

@@ -20,7 +20,7 @@ fn default_ollama_url() -> String {
"http://localhost:11434".into() "http://localhost:11434".into()
} }
fn default_model() -> String { fn default_model() -> String {
"qwen2.5".into() "qwen3:8b".into()
} }
fn default_mode() -> String { fn default_mode() -> String {
"plan".into() "plan".into()