feat(workspace): initialize Rust workspace structure for v2

Set up Cargo workspace with initial crates:
- cli: main application entry point with chat streaming tests
- config: configuration management
- llm/ollama: Ollama client integration with NDJSON support

Includes .gitignore for Rust and JetBrains IDEs.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-11-01 16:30:09 +01:00
parent 491fd049b0
commit 2a651ebd7b
15 changed files with 513 additions and 0 deletions

22
crates/cli/.gitignore vendored Normal file
View File

@@ -0,0 +1,22 @@
/target
### Rust template
# Generated by Cargo
# will have compiled files and executables
debug/
target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb
### rust-analyzer template
# Can be generated by other build systems other than cargo (ex: bazelbuild/rust_rules)
rust-project.json

22
crates/cli/Cargo.toml Normal file
View File

@@ -0,0 +1,22 @@
[package]
name = "code"
version = "0.1.0"
edition.workspace = true
license.workspace = true
rust-version.workspace = true
[dependencies]
clap = { version = "4.5", features = ["derive"] }
tokio = { version = "1.39", features = ["macros", "rt-multi-thread"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
color-eyre = "0.6"
llm-ollama = { path = "../llm/ollama" }
config-agent = { package = "config-agent", path = "../config" }
futures-util = "0.3.31"
[dev-dependencies]
assert_cmd = "2.0"
predicates = "3.1"
httpmock = "0.7"
tokio = { version = "1.39", features = ["macros", "rt-multi-thread"] }

67
crates/cli/src/main.rs Normal file
View File

@@ -0,0 +1,67 @@
use clap::Parser;
use color_eyre::eyre::Result;
use config_agent::load_settings;
use futures_util::TryStreamExt;
use llm_ollama::{OllamaClient, OllamaOptions, types::ChatMessage};
use std::io::{self, Write};
#[derive(Parser, Debug)]
#[command(name = "code", version, about = "Rust code-agent (Ollama)")]
struct Args {
/// Override Ollama base URL (local or cloud)
#[arg(long)]
ollama_url: Option<String>,
/// Model name
#[arg(long)]
model: Option<String>,
/// Print response only (headless-like)
#[arg(long)]
print: bool,
/// Prompt to send
#[arg()]
prompt: Vec<String>,
}
#[tokio::main]
async fn main() -> Result<()> {
color_eyre::install()?;
let args = Args::parse();
let prompt = if args.prompt.is_empty() {
"Say hello".to_string()
} else {
args.prompt.join(" ")
};
let settings = load_settings(None).unwrap_or_default();
let base_url = args.ollama_url.unwrap_or(settings.ollama_url);
let model = args.model.unwrap_or(settings.model);
let client = OllamaClient::new(base_url);
let opts = OllamaOptions {
model,
stream: true,
};
let msgs = vec![ChatMessage {
role: "user".into(),
content: prompt,
}];
let mut stream = client.chat_stream(&msgs, &opts).await?;
while let Ok(Some(chunk)) = stream.try_next().await {
if let Some(m) = chunk.message {
if let Some(c) = m.content {
print!("{c}");
io::stdout().flush()?;
}
}
if matches!(chunk.done, Some(true)) {
break;
}
}
Ok(())
}

View File

@@ -0,0 +1,39 @@
use assert_cmd::Command;
use httpmock::prelude::*;
use predicates::prelude::PredicateBooleanExt;
#[tokio::test]
async fn headless_streams_ndjson() {
let server = MockServer::start_async().await;
// Mock /api/chat with NDJSON lines
let body = serde_json::json!({
"model": "qwen2.5",
"messages": [{"role": "user", "content": "hello"}],
"stream": true
});
let response = concat!(
r#"{"message":{"role":"assistant","content":"Hel"}}"#,"\n",
r#"{"message":{"role":"assistant","content":"lo"}}"#,"\n",
r#"{"done":true}"#,"\n",
);
let _m = server.mock(|when, then| {
when.method(POST)
.path("/api/chat")
.json_body(body.clone());
then.status(200)
.header("content-type", "application/x-ndjson")
.body(response);
});
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("code"));
cmd.arg("--ollama-url").arg(server.base_url())
.arg("--model").arg("qwen2.5")
.arg("--print")
.arg("hello");
cmd.assert()
.success()
.stdout(predicates::str::contains("Hello").count(1).or(predicates::str::contains("Hel").and(predicates::str::contains("lo"))));
}

22
crates/config/.gitignore vendored Normal file
View File

@@ -0,0 +1,22 @@
/target
### Rust template
# Generated by Cargo
# will have compiled files and executables
debug/
target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb
### rust-analyzer template
# Can be generated by other build systems other than cargo (ex: bazelbuild/rust_rules)
rust-project.json

13
crates/config/Cargo.toml Normal file
View File

@@ -0,0 +1,13 @@
[package]
name = "config-agent"
version = "0.1.0"
edition.workspace = true
license.workspace = true
rust-version.workspace = true
[dependencies]
serde = { version = "1", features = ["derive"] }
serde_json = "1"
directories = "5"
figment = { version = "0.10", features = ["toml", "env"] }
toml = "0.8"

55
crates/config/src/lib.rs Normal file
View File

@@ -0,0 +1,55 @@
use directories::ProjectDirs;
use figment::{
Figment,
providers::{Env, Format, Serialized, Toml},
};
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Settings {
#[serde(default = "default_ollama_url")]
pub ollama_url: String,
#[serde(default = "default_model")]
pub model: String,
#[serde(default = "default_mode")]
pub mode: String, // "plan" (read-only) for now
}
fn default_ollama_url() -> String {
"http://localhost:11434".into()
}
fn default_model() -> String {
"qwen2.5".into()
}
fn default_mode() -> String {
"plan".into()
}
impl Default for Settings {
fn default() -> Self {
Self {
ollama_url: default_ollama_url(),
model: default_model(),
mode: default_mode(),
}
}
}
pub fn load_settings(project_root: Option<&str>) -> Result<Settings, figment::Error> {
let mut fig = Figment::from(Serialized::defaults(Settings::default()))
.merge(Env::prefixed("CODE_").split("__"));
// User file: ~/.config/owlen/config.toml
if let Some(pd) = ProjectDirs::from("dev", "owlibou", "owlen") {
let user = pd.config_dir().join("config.toml");
fig = fig.merge(Toml::file(user));
}
// Project file: <root>/.owlen.toml
if let Some(root) = project_root {
fig = fig.merge(Toml::file(PathBuf::from(root).join(".owlen.toml")));
}
fig.extract()
}

22
crates/llm/ollama/.gitignore vendored Normal file
View File

@@ -0,0 +1,22 @@
/target
### Rust template
# Generated by Cargo
# will have compiled files and executables
debug/
target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb
### rust-analyzer template
# Can be generated by other build systems other than cargo (ex: bazelbuild/rust_rules)
rust-project.json

View File

@@ -0,0 +1,16 @@
[package]
name = "llm-ollama"
version = "0.1.0"
edition.workspace = true
license.workspace = true
rust-version.workspace = true
[dependencies]
reqwest = { version = "0.12", features = ["json", "stream"] }
tokio = { version = "1.39", features = ["rt-multi-thread"] }
futures = "0.3"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
thiserror = "1"
bytes = "1"
tokio-stream = "0.1.17"

View File

@@ -0,0 +1,84 @@
use crate::types::{ChatMessage, ChatResponseChunk};
use futures::{Stream, TryStreamExt};
use reqwest::Client;
use serde::Serialize;
use thiserror::Error;
#[derive(Debug, Clone)]
pub struct OllamaClient {
http: Client,
base_url: String, // e.g. "http://localhost:11434"
}
#[derive(Debug, Clone, Default)]
pub struct OllamaOptions {
pub model: String,
pub stream: bool,
}
#[derive(Error, Debug)]
pub enum OllamaError {
#[error("http: {0}")]
Http(#[from] reqwest::Error),
#[error("json: {0}")]
Json(#[from] serde_json::Error),
#[error("protocol: {0}")]
Protocol(String),
}
impl OllamaClient {
pub fn new(base_url: impl Into<String>) -> Self {
Self {
http: Client::new(),
base_url: base_url.into().trim_end_matches('/').to_string(),
}
}
pub fn with_cloud() -> Self {
// Same API, different base
Self::new("https://ollama.com")
}
pub async fn chat_stream(
&self,
messages: &[ChatMessage],
opts: &OllamaOptions,
) -> Result<impl Stream<Item = Result<ChatResponseChunk, OllamaError>>, OllamaError> {
#[derive(Serialize)]
struct Body<'a> {
model: &'a str,
messages: &'a [ChatMessage],
stream: bool,
}
let url = format!("{}/api/chat", self.base_url);
let body = Body {model: &opts.model, messages, stream: true};
let resp = self.http.post(url).json(&body).send().await?;
let bytes_stream = resp.bytes_stream();
// NDJSON parser: split by '\n', parse each as JSON and stream the results
let out = bytes_stream
.map_err(OllamaError::Http)
.map_ok(|bytes| {
// Convert the chunk to a UTF8 string and own it
let txt = String::from_utf8_lossy(&bytes).into_owned();
// Parse each nonempty line into a ChatResponseChunk
let results: Vec<Result<ChatResponseChunk, OllamaError>> = txt
.lines()
.filter_map(|line| {
let trimmed = line.trim();
if trimmed.is_empty() {
None
} else {
Some(
serde_json::from_str::<ChatResponseChunk>(trimmed)
.map_err(OllamaError::Json),
)
}
})
.collect();
futures::stream::iter(results)
})
.try_flatten(); // Stream<Item = Result<ChatResponseChunk, OllamaError>>
Ok(out)
}
}

View File

@@ -0,0 +1,5 @@
pub mod client;
pub mod types;
pub use client::{OllamaClient, OllamaOptions};
pub use types::{ChatMessage, ChatResponseChunk};

View File

@@ -0,0 +1,22 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ChatMessage {
pub role: String, // "user", | "assistant" | "system"
pub content: String,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ChatResponseChunk {
pub model: Option<String>,
pub created_at: Option<String>,
pub message: Option<ChunkMessage>,
pub done: Option<bool>,
pub total_duration: Option<u64>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ChunkMessage {
pub role: Option<String>,
pub content: Option<String>,
}

View File

@@ -0,0 +1,12 @@
use llm_ollama::{OllamaClient, OllamaOptions};
// This test stubs NDJSON by spinning a tiny local server is overkill for M0.
// Instead, test the line parser indirectly by mocking reqwest is complex.
// We'll smoke-test the client type compiles and leave end-to-end to cli tests.
#[tokio::test]
async fn client_compiles_smoke() {
let _ = OllamaClient::new("http://localhost:11434");
let _ = OllamaClient::with_cloud();
let _ = OllamaOptions { model: "qwen2.5".into(), stream: true };
}