22 Commits

Author SHA1 Message Date
d86888704f chore(release): bump version to 0.1.11
Some checks failed
ci/someci/push/woodpecker Pipeline is pending approval
macos-check / cargo check (macOS) (push) Has been cancelled
Update pkgver in PKGBUILD, version badge in README, and workspace package version in Cargo.toml. Add changelog entry for 0.1.11 reflecting the metadata bump.
2025-10-18 03:34:57 +02:00
de6b6e20a5 docs(readme): quick start matrices + platform notes 2025-10-18 03:25:10 +02:00
1e8a5e08ed docs(tui): MVU migration guide + module map 2025-10-18 03:20:32 +02:00
218ebbf32f feat(tui): debug log panel toggle 2025-10-18 03:18:34 +02:00
c49e7f4b22 test(core+tui): end-to-end agent tool scenarios
Some checks failed
ci/someci/push/woodpecker Pipeline is pending approval
macos-check / cargo check (macOS) (push) Has been cancelled
2025-10-17 05:24:01 +02:00
9588c8c562 feat(tui): model picker UX polish (filters, sizing, search) 2025-10-17 04:52:38 +02:00
1948ac1284 fix(providers/ollama): strengthen model cache + scope status UI 2025-10-17 03:58:25 +02:00
3f92b7d963 feat(agent): event-driven tool consent handshake (explicit UI prompts) 2025-10-17 03:42:13 +02:00
5553e61dbf feat(tui): declarative keymap + command registry 2025-10-17 02:47:09 +02:00
7f987737f9 refactor(core): add LLMClient facade trait; decouple TUI from Provider/MCP details 2025-10-17 01:52:10 +02:00
5182f86133 feat(tui): introduce MVU core (AppModel, AppEvent, update()) 2025-10-17 01:40:50 +02:00
a50099ad74 ci(mac): add compile-only macOS build (no artifacts) 2025-10-17 01:13:36 +02:00
20ba5523ee ci(build): split tests from matrix builds to avoid repetition 2025-10-17 01:12:39 +02:00
0b2b3701dc ci(security): add cargo-audit job (weekly + on push) 2025-10-17 01:10:24 +02:00
438b05b8a3 ci: derive release notes from CHANGELOG.md 2025-10-17 01:08:57 +02:00
e2a31b192f build(cli)!: add owlen-code binary and wire code mode 2025-10-17 01:02:40 +02:00
b827d3d047 ci: add PR pipeline (push) with fmt+clippy+test (linux only) 2025-10-17 00:51:25 +02:00
9c0cf274a3 chore(workspace): add cargo xtask crate for common ops 2025-10-17 00:47:54 +02:00
85ae319690 docs(architecture): clarify provider boundaries and MCP topology 2025-10-17 00:44:07 +02:00
449f133a1f docs: add repo map (tree) and generating script 2025-10-17 00:41:47 +02:00
2f6b03ef65 chore(repo): move placeholder provider crates to crates/providers/experimental/ 2025-10-17 00:37:02 +02:00
d4030dc598 refactor(workspace)!: move MCP crates under crates/mcp/ and update paths 2025-10-17 00:31:35 +02:00
60 changed files with 4374 additions and 777 deletions

34
.github/workflows/macos-check.yml vendored Normal file
View File

@@ -0,0 +1,34 @@
name: macos-check
on:
push:
branches:
- dev
pull_request:
branches:
- dev
jobs:
build:
name: cargo check (macOS)
runs-on: macos-latest
steps:
- name: Checkout sources
uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
- name: Cache Cargo registry
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-
- name: Cargo check
run: cargo check --workspace --all-features

View File

@@ -9,6 +9,7 @@ repos:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
args: ['--allow-multiple-documents']
- id: check-toml
- id: check-merge-conflict
- id: check-added-large-files

View File

@@ -1,3 +1,61 @@
---
kind: pipeline
name: pr-checks
when:
event:
- push
- pull_request
steps:
- name: fmt-clippy-test
image: rust:1.83
commands:
- rustup component add rustfmt clippy
- cargo fmt --all -- --check
- cargo clippy --workspace --all-features -- -D warnings
- cargo test --workspace --all-features
---
kind: pipeline
name: security-audit
when:
event:
- push
- cron
branch:
- dev
cron: weekly-security
steps:
- name: cargo-audit
image: rust:1.83
commands:
- cargo install cargo-audit --locked
- cargo audit
---
kind: pipeline
name: release-tests
when:
event: tag
tag: v*
steps:
- name: workspace-tests
image: rust:1.83
commands:
- rustup component add llvm-tools-preview
- cargo install cargo-llvm-cov --locked
- cargo llvm-cov --workspace --all-features --summary-only
- cargo llvm-cov --workspace --all-features --lcov --output-path coverage.lcov --no-run
---
kind: pipeline
name: release
when:
event: tag
tag: v*
@@ -5,6 +63,9 @@ when:
variables:
- &rust_image 'rust:1.83'
depends_on:
- release-tests
matrix:
include:
# Linux
@@ -39,14 +100,6 @@ matrix:
EXT: ".exe"
steps:
- name: tests
image: *rust_image
commands:
- rustup component add llvm-tools-preview
- cargo install cargo-llvm-cov --locked
- cargo llvm-cov --workspace --all-features --summary-only
- cargo llvm-cov --workspace --all-features --lcov --output-path coverage.lcov --no-run
- name: build
image: *rust_image
commands:
@@ -124,6 +177,11 @@ steps:
sha256sum ${ARTIFACT}.tar.gz > ${ARTIFACT}.tar.gz.sha256
fi
- name: release-notes
image: *rust_image
commands:
- scripts/release-notes.sh "${CI_COMMIT_TAG}" release-notes.md
- name: release
image: plugins/gitea-release
settings:
@@ -136,4 +194,4 @@ steps:
- ${ARTIFACT}.zip
- ${ARTIFACT}.zip.sha256
title: Release ${CI_COMMIT_TAG}
note: "Release ${CI_COMMIT_TAG}"
note_file: release-notes.md

View File

@@ -45,6 +45,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
---
## [0.1.11] - 2025-10-18
### Changed
- Bump workspace packages and distribution metadata to version `0.1.11`.
## [0.1.10] - 2025-10-03
### Added

View File

@@ -10,6 +10,10 @@ This project and everyone participating in it is governed by the [Owlen Code of
## How Can I Contribute?
### Repository map
Need a quick orientation before diving in? Start with the curated [repo map](docs/repo-map.md) for a two-level directory overview. If you move folders around, regenerate it with `scripts/gen-repo-map.sh`.
### Reporting Bugs
This is one of the most helpful ways you can contribute. Before creating a bug report, please check a few things:

View File

@@ -5,17 +5,18 @@ members = [
"crates/owlen-tui",
"crates/owlen-cli",
"crates/owlen-providers",
"crates/owlen-mcp-server",
"crates/owlen-mcp-llm-server",
"crates/owlen-mcp-client",
"crates/owlen-mcp-code-server",
"crates/owlen-mcp-prompt-server",
"crates/mcp/server",
"crates/mcp/llm-server",
"crates/mcp/client",
"crates/mcp/code-server",
"crates/mcp/prompt-server",
"crates/owlen-markdown",
"xtask",
]
exclude = []
[workspace.package]
version = "0.1.9"
version = "0.1.11"
edition = "2024"
authors = ["Owlibou"]
license = "AGPL-3.0"

View File

@@ -1,6 +1,6 @@
# Maintainer: vikingowl <christian@nachtigall.dev>
pkgname=owlen
pkgver=0.1.9
pkgver=0.1.11
pkgrel=1
pkgdesc="Terminal User Interface LLM client for Ollama with chat and code assistance features"
arch=('x86_64')

View File

@@ -3,7 +3,7 @@
> Terminal-native assistant for running local language models with a comfortable TUI.
![Status](https://img.shields.io/badge/status-alpha-yellow)
![Version](https://img.shields.io/badge/version-0.1.9-blue)
![Version](https://img.shields.io/badge/version-0.1.11-blue)
![Rust](https://img.shields.io/badge/made_with-Rust-ffc832?logo=rust&logoColor=white)
![License](https://img.shields.io/badge/license-AGPL--3.0-blue)
@@ -57,20 +57,28 @@ Owlen is designed to keep data local by default while still allowing controlled
### Installation
#### Linux & macOS
The recommended way to install on Linux and macOS is to clone the repository and install using `cargo`.
Pick the option that matches your platform and appetite for source builds:
| Platform | Package / Command | Notes |
| --- | --- | --- |
| Arch Linux | `yay -S owlen-git` | Builds from the latest `dev` branch via AUR. |
| Other Linux | `cargo install --path crates/owlen-cli --locked --force` | Requires Rust 1.75+ and a running Ollama daemon. |
| macOS | `cargo install --path crates/owlen-cli --locked --force` | macOS 12+ tested. Install Ollama separately (`brew install ollama`). The binary links against the system OpenSSL ensure Command Line Tools are installed. |
| Windows (experimental) | `cargo install --path crates/owlen-cli --locked --force` | Enable the GNU toolchain (`rustup target add x86_64-pc-windows-gnu`) and install Ollama for Windows preview builds. Some optional tools (e.g., Docker-based code execution) are currently disabled. |
If you prefer containerised builds, use the provided `Dockerfile` as a base image and copy out `target/release/owlen`.
Run the helper scripts to sanity-check platform coverage:
```bash
git clone https://github.com/Owlibou/owlen.git
cd owlen
cargo install --path crates/owlen-cli
# Windows compatibility smoke test (GNU toolchain)
scripts/check-windows.sh
# Reproduce CI packaging locally (choose a target from .woodpecker.yml)
dev/local_build.sh x86_64-unknown-linux-gnu
```
**Note for macOS**: While this method works, official binary releases for macOS are planned for the future.
#### Windows
The Windows build has not been thoroughly tested yet. Installation is possible via the same `cargo install` method, but it is considered experimental at this time.
From Unix hosts you can run `scripts/check-windows.sh` to ensure the code base still compiles for Windows (`rustup` will install the required target automatically).
> **Tip (macOS):** On the first launch macOS Gatekeeper may quarantine the binary. Clear the attribute (`xattr -d com.apple.quarantine $(which owlen)`) or build from source locally to avoid notarisation prompts.
### Running OWLEN
@@ -112,8 +120,10 @@ For more detailed information, please refer to the following documents:
- **[CHANGELOG.md](CHANGELOG.md)**: A log of changes for each version.
- **[docs/architecture.md](docs/architecture.md)**: An overview of the project's architecture.
- **[docs/troubleshooting.md](docs/troubleshooting.md)**: Help with common issues.
- **[docs/repo-map.md](docs/repo-map.md)**: Snapshot of the workspace layout and key crates.
- **[docs/provider-implementation.md](docs/provider-implementation.md)**: Trait-level details for implementing providers.
- **[docs/adding-providers.md](docs/adding-providers.md)**: Step-by-step checklist for wiring a provider into the multi-provider architecture and test suite.
- **Experimental providers staging area**: [crates/providers/experimental/README.md](crates/providers/experimental/README.md) records the placeholder crates (OpenAI, Anthropic, Gemini) and their current status.
- **[docs/platform-support.md](docs/platform-support.md)**: Current OS support matrix and cross-check instructions.
## Configuration

View File

@@ -6,7 +6,7 @@ description = "Dedicated MCP client library for Owlen, exposing remote MCP serve
license = "AGPL-3.0"
[dependencies]
owlen-core = { path = "../owlen-core" }
owlen-core = { path = "../../owlen-core" }
[features]
default = []

View File

@@ -6,7 +6,7 @@ description = "MCP server exposing safe code execution tools for Owlen"
license = "AGPL-3.0"
[dependencies]
owlen-core = { path = "../owlen-core" }
owlen-core = { path = "../../owlen-core" }
serde = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true }

View File

@@ -4,7 +4,7 @@ version = "0.1.0"
edition.workspace = true
[dependencies]
owlen-core = { path = "../owlen-core" }
owlen-core = { path = "../../owlen-core" }
tokio = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }

View File

@@ -6,7 +6,7 @@ description = "MCP server that renders prompt templates (YAML) for Owlen"
license = "AGPL-3.0"
[dependencies]
owlen-core = { path = "../owlen-core" }
owlen-core = { path = "../../owlen-core" }
serde = { workspace = true }
serde_json = { workspace = true }
serde_yaml = { workspace = true }

View File

@@ -9,4 +9,4 @@ serde = { workspace = true }
serde_json = { workspace = true }
anyhow = { workspace = true }
path-clean = "1.0"
owlen-core = { path = "../owlen-core" }
owlen-core = { path = "../../owlen-core" }

View File

@@ -17,6 +17,11 @@ name = "owlen"
path = "src/main.rs"
required-features = ["chat-client"]
[[bin]]
name = "owlen-code"
path = "src/code_main.rs"
required-features = ["chat-client"]
[[bin]]
name = "owlen-agent"
path = "src/agent_main.rs"

View File

@@ -0,0 +1,326 @@
use std::borrow::Cow;
use std::io;
use std::sync::Arc;
use anyhow::{Result, anyhow};
use async_trait::async_trait;
use crossterm::{
event::{DisableBracketedPaste, DisableMouseCapture, EnableBracketedPaste, EnableMouseCapture},
execute,
terminal::{EnterAlternateScreen, LeaveAlternateScreen, disable_raw_mode, enable_raw_mode},
};
use futures::stream;
use owlen_core::{
ChatStream, Error, Provider,
config::{Config, McpMode},
mcp::remote_client::RemoteMcpClient,
mode::Mode,
provider::ProviderManager,
providers::OllamaProvider,
session::{ControllerEvent, SessionController},
storage::StorageManager,
types::{ChatRequest, ChatResponse, Message, ModelInfo},
};
use owlen_tui::{
ChatApp, SessionEvent,
app::App as RuntimeApp,
config,
tui_controller::{TuiController, TuiRequest},
ui,
};
use ratatui::{Terminal, prelude::CrosstermBackend};
use tokio::sync::mpsc;
use crate::commands::cloud::{load_runtime_credentials, set_env_var};
pub async fn launch(initial_mode: Mode) -> Result<()> {
set_env_var("OWLEN_AUTO_CONSENT", "1");
let color_support = detect_terminal_color_support();
let mut cfg = config::try_load_config().unwrap_or_default();
let _ = cfg.refresh_mcp_servers(None);
if let Some(previous_theme) = apply_terminal_theme(&mut cfg, &color_support) {
let term_label = match &color_support {
TerminalColorSupport::Limited { term } => Cow::from(term.as_str()),
TerminalColorSupport::Full => Cow::from("current terminal"),
};
eprintln!(
"Terminal '{}' lacks full 256-color support. Using '{}' theme instead of '{}'.",
term_label, BASIC_THEME_NAME, previous_theme
);
} else if let TerminalColorSupport::Limited { term } = &color_support {
eprintln!(
"Warning: terminal '{}' may not fully support 256-color themes.",
term
);
}
cfg.validate()?;
let storage = Arc::new(StorageManager::new().await?);
load_runtime_credentials(&mut cfg, storage.clone()).await?;
let (tui_tx, _tui_rx) = mpsc::unbounded_channel::<TuiRequest>();
let tui_controller = Arc::new(TuiController::new(tui_tx));
let provider = build_provider(&cfg)?;
let mut offline_notice: Option<String> = None;
let provider = match provider.health_check().await {
Ok(_) => provider,
Err(err) => {
let hint = if matches!(cfg.mcp.mode, McpMode::RemotePreferred | McpMode::RemoteOnly)
&& !cfg.effective_mcp_servers().is_empty()
{
"Ensure the configured MCP server is running and reachable."
} else {
"Ensure Ollama is running (`ollama serve`) and reachable at the configured base_url."
};
let notice =
format!("Provider health check failed: {err}. {hint} Continuing in offline mode.");
eprintln!("{notice}");
offline_notice = Some(notice.clone());
let fallback_model = cfg
.general
.default_model
.clone()
.unwrap_or_else(|| "offline".to_string());
Arc::new(OfflineProvider::new(notice, fallback_model)) as Arc<dyn Provider>
}
};
let (controller_event_tx, controller_event_rx) = mpsc::unbounded_channel::<ControllerEvent>();
let controller = SessionController::new(
provider,
cfg,
storage.clone(),
tui_controller,
false,
Some(controller_event_tx),
)
.await?;
let provider_manager = Arc::new(ProviderManager::default());
let mut runtime = RuntimeApp::new(provider_manager);
let (mut app, mut session_rx) = ChatApp::new(controller, controller_event_rx).await?;
app.initialize_models().await?;
if let Some(notice) = offline_notice.clone() {
app.set_status_message(&notice);
app.set_system_status(notice);
}
app.set_mode(initial_mode).await;
enable_raw_mode()?;
let mut stdout = io::stdout();
execute!(
stdout,
EnterAlternateScreen,
EnableMouseCapture,
EnableBracketedPaste
)?;
let backend = CrosstermBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
let result = run_app(&mut terminal, &mut runtime, &mut app, &mut session_rx).await;
config::save_config(&app.config())?;
disable_raw_mode()?;
execute!(
terminal.backend_mut(),
LeaveAlternateScreen,
DisableMouseCapture,
DisableBracketedPaste
)?;
terminal.show_cursor()?;
if let Err(err) = result {
println!("{err:?}");
}
Ok(())
}
fn build_provider(cfg: &Config) -> Result<Arc<dyn Provider>> {
match cfg.mcp.mode {
McpMode::RemotePreferred => {
let remote_result = if let Some(mcp_server) = cfg.effective_mcp_servers().first() {
RemoteMcpClient::new_with_config(mcp_server)
} else {
RemoteMcpClient::new()
};
match remote_result {
Ok(client) => Ok(Arc::new(client) as Arc<dyn Provider>),
Err(err) if cfg.mcp.allow_fallback => {
log::warn!(
"Remote MCP client unavailable ({}); falling back to local provider.",
err
);
build_local_provider(cfg)
}
Err(err) => Err(anyhow!(err)),
}
}
McpMode::RemoteOnly => {
let mcp_server = cfg.effective_mcp_servers().first().ok_or_else(|| {
anyhow!("[[mcp_servers]] must be configured when [mcp].mode = \"remote_only\"")
})?;
let client = RemoteMcpClient::new_with_config(mcp_server)?;
Ok(Arc::new(client) as Arc<dyn Provider>)
}
McpMode::LocalOnly | McpMode::Legacy => build_local_provider(cfg),
McpMode::Disabled => Err(anyhow!(
"MCP mode 'disabled' is not supported by the owlen TUI"
)),
}
}
fn build_local_provider(cfg: &Config) -> Result<Arc<dyn Provider>> {
let provider_name = cfg.general.default_provider.clone();
let provider_cfg = cfg.provider(&provider_name).ok_or_else(|| {
anyhow!(format!(
"No provider configuration found for '{provider_name}' in [providers]"
))
})?;
match provider_cfg.provider_type.as_str() {
"ollama" | "ollama_cloud" => {
let provider = OllamaProvider::from_config(provider_cfg, Some(&cfg.general))?;
Ok(Arc::new(provider) as Arc<dyn Provider>)
}
other => Err(anyhow!(format!(
"Provider type '{other}' is not supported in legacy/local MCP mode"
))),
}
}
const BASIC_THEME_NAME: &str = "ansi_basic";
#[derive(Debug, Clone)]
enum TerminalColorSupport {
Full,
Limited { term: String },
}
fn detect_terminal_color_support() -> TerminalColorSupport {
let term = std::env::var("TERM").unwrap_or_else(|_| "unknown".to_string());
let colorterm = std::env::var("COLORTERM").unwrap_or_default();
let term_lower = term.to_lowercase();
let color_lower = colorterm.to_lowercase();
let supports_extended = term_lower.contains("256color")
|| color_lower.contains("truecolor")
|| color_lower.contains("24bit")
|| color_lower.contains("fullcolor");
if supports_extended {
TerminalColorSupport::Full
} else {
TerminalColorSupport::Limited { term }
}
}
fn apply_terminal_theme(cfg: &mut Config, support: &TerminalColorSupport) -> Option<String> {
match support {
TerminalColorSupport::Full => None,
TerminalColorSupport::Limited { .. } => {
if cfg.ui.theme != BASIC_THEME_NAME {
let previous = std::mem::replace(&mut cfg.ui.theme, BASIC_THEME_NAME.to_string());
Some(previous)
} else {
None
}
}
}
}
struct OfflineProvider {
reason: String,
placeholder_model: String,
}
impl OfflineProvider {
fn new(reason: String, placeholder_model: String) -> Self {
Self {
reason,
placeholder_model,
}
}
fn friendly_response(&self, requested_model: &str) -> ChatResponse {
let mut message = String::new();
message.push_str("⚠️ Owlen is running in offline mode.\n\n");
message.push_str(&self.reason);
if !requested_model.is_empty() && requested_model != self.placeholder_model {
message.push_str(&format!(
"\n\nYou requested model '{}', but no providers are reachable.",
requested_model
));
}
message.push_str(
"\n\nStart your preferred provider (e.g. `ollama serve`) or switch providers with `:provider` once connectivity is restored.",
);
ChatResponse {
message: Message::assistant(message),
usage: None,
is_streaming: false,
is_final: true,
}
}
}
#[async_trait]
impl Provider for OfflineProvider {
fn name(&self) -> &str {
"offline"
}
async fn list_models(&self) -> Result<Vec<ModelInfo>, Error> {
Ok(vec![ModelInfo {
id: self.placeholder_model.clone(),
provider: "offline".to_string(),
name: format!("Offline (fallback: {})", self.placeholder_model),
description: Some("Placeholder model used while no providers are reachable".into()),
context_window: None,
capabilities: vec![],
supports_tools: false,
}])
}
async fn send_prompt(&self, request: ChatRequest) -> Result<ChatResponse, Error> {
Ok(self.friendly_response(&request.model))
}
async fn stream_prompt(&self, request: ChatRequest) -> Result<ChatStream, Error> {
let response = self.friendly_response(&request.model);
Ok(Box::pin(stream::iter(vec![Ok(response)])))
}
async fn health_check(&self) -> Result<(), Error> {
Err(Error::Provider(anyhow!(
"offline provider cannot reach any backing models"
)))
}
fn as_any(&self) -> &(dyn std::any::Any + Send + Sync) {
self
}
}
async fn run_app(
terminal: &mut Terminal<CrosstermBackend<io::Stdout>>,
runtime: &mut RuntimeApp,
app: &mut ChatApp,
session_rx: &mut mpsc::UnboundedReceiver<SessionEvent>,
) -> Result<()> {
let mut render = |terminal: &mut Terminal<CrosstermBackend<io::Stdout>>,
state: &mut ChatApp|
-> Result<()> {
terminal.draw(|f| ui::render_chat(f, state))?;
Ok(())
};
runtime.run(terminal, app, session_rx, &mut render).await?;
Ok(())
}

View File

@@ -0,0 +1,16 @@
//! Owlen CLI entrypoint optimised for code-first workflows.
#![allow(dead_code, unused_imports)]
mod bootstrap;
mod commands;
mod mcp;
use anyhow::Result;
use owlen_core::config as core_config;
use owlen_core::mode::Mode;
use owlen_tui::config;
#[tokio::main(flavor = "multi_thread")]
async fn main() -> Result<()> {
bootstrap::launch(Mode::Code).await
}

View File

@@ -195,13 +195,13 @@ async fn list_models(filter: Option<&str>) -> Result<()> {
}
fn verify_provider_filter(config: &Config, filter: Option<&str>) -> Result<()> {
if let Some(filter) = filter {
if !config.providers.contains_key(filter) {
return Err(anyhow!(
"Provider '{}' is not defined in configuration.",
filter
));
}
if let Some(filter) = filter
&& !config.providers.contains_key(filter)
{
return Err(anyhow!(
"Provider '{}' is not defined in configuration.",
filter
));
}
Ok(())
}
@@ -254,10 +254,10 @@ fn toggle_provider(provider: &str, enable: bool) -> Result<()> {
entry.enabled = previous_enabled;
}
config.general.default_provider = previous_default;
if let Some(enabled) = previous_fallback_enabled {
if let Some(entry) = config.providers.get_mut("ollama_local") {
entry.enabled = enabled;
}
if let Some(enabled) = previous_fallback_enabled
&& let Some(entry) = config.providers.get_mut("ollama_local")
{
entry.enabled = enabled;
}
return Err(anyhow!(err));
}
@@ -273,12 +273,11 @@ fn toggle_provider(provider: &str, enable: bool) -> Result<()> {
}
fn choose_fallback_provider(config: &Config, exclude: &str) -> Option<String> {
if exclude != "ollama_local" {
if let Some(cfg) = config.providers.get("ollama_local") {
if cfg.enabled {
return Some("ollama_local".to_string());
}
}
if exclude != "ollama_local"
&& let Some(cfg) = config.providers.get("ollama_local")
&& cfg.enabled
{
return Some("ollama_local".to_string());
}
let mut candidates: Vec<String> = config
@@ -300,10 +299,10 @@ async fn register_enabled_providers(
let mut records = Vec::new();
for (id, cfg) in &config.providers {
if let Some(filter) = filter {
if id != filter {
continue;
}
if let Some(filter) = filter
&& id != filter
{
continue;
}
let mut record = ProviderRecord::from_config(id, cfg, id == &default_provider);
@@ -537,10 +536,10 @@ fn print_models(
} else {
for entry in entries {
let mut line = format!(" - {}", entry.model.name);
if let Some(description) = &entry.model.description {
if !description.trim().is_empty() {
line.push_str(&format!("{}", description.trim()));
}
if let Some(description) = &entry.model.description
&& !description.trim().is_empty()
{
line.push_str(&format!("{}", description.trim()));
}
println!("{}", line);
}
@@ -549,10 +548,10 @@ fn print_models(
println!(" (no models reported)");
}
if let Some(ProviderStatus::RequiresSetup) = status_value {
if record.requires_auth {
println!(" configure provider credentials or API key");
}
if let Some(ProviderStatus::RequiresSetup) = status_value
&& record.requires_auth
{
println!(" configure provider credentials or API key");
}
println!();
}

View File

@@ -2,44 +2,21 @@
//! OWLEN CLI - Chat TUI client
mod bootstrap;
mod commands;
mod mcp;
use anyhow::{Result, anyhow};
use async_trait::async_trait;
use anyhow::Result;
use clap::{Parser, Subcommand};
use commands::{
cloud::{CloudCommand, load_runtime_credentials, run_cloud_command, set_env_var},
cloud::{CloudCommand, run_cloud_command},
providers::{ModelsArgs, ProvidersCommand, run_models_command, run_providers_command},
};
use mcp::{McpCommand, run_mcp_command};
use owlen_core::config as core_config;
use owlen_core::{
ChatStream, Error, Provider,
config::{Config, McpMode},
mcp::remote_client::RemoteMcpClient,
mode::Mode,
provider::ProviderManager,
providers::OllamaProvider,
session::SessionController,
storage::StorageManager,
types::{ChatRequest, ChatResponse, Message, ModelInfo},
};
use owlen_tui::tui_controller::{TuiController, TuiRequest};
use owlen_tui::{ChatApp, SessionEvent, app::App as RuntimeApp, config, ui};
use std::any::Any;
use std::borrow::Cow;
use std::io;
use std::sync::Arc;
use tokio::sync::mpsc;
use crossterm::{
event::{DisableBracketedPaste, DisableMouseCapture, EnableBracketedPaste, EnableMouseCapture},
execute,
terminal::{EnterAlternateScreen, LeaveAlternateScreen, disable_raw_mode, enable_raw_mode},
};
use futures::stream;
use ratatui::{Terminal, prelude::CrosstermBackend};
use owlen_core::config::McpMode;
use owlen_core::mode::Mode;
use owlen_tui::config;
/// Owlen - Terminal UI for LLM chat
#[derive(Parser, Debug)]
@@ -81,66 +58,6 @@ enum ConfigCommand {
Path,
}
fn build_provider(cfg: &Config) -> anyhow::Result<Arc<dyn Provider>> {
match cfg.mcp.mode {
McpMode::RemotePreferred => {
let remote_result = if let Some(mcp_server) = cfg.effective_mcp_servers().first() {
RemoteMcpClient::new_with_config(mcp_server)
} else {
RemoteMcpClient::new()
};
match remote_result {
Ok(client) => {
let provider: Arc<dyn Provider> = Arc::new(client);
Ok(provider)
}
Err(err) if cfg.mcp.allow_fallback => {
log::warn!(
"Remote MCP client unavailable ({}); falling back to local provider.",
err
);
build_local_provider(cfg)
}
Err(err) => Err(anyhow::Error::from(err)),
}
}
McpMode::RemoteOnly => {
let mcp_server = cfg.effective_mcp_servers().first().ok_or_else(|| {
anyhow::anyhow!(
"[[mcp_servers]] must be configured when [mcp].mode = \"remote_only\""
)
})?;
let client = RemoteMcpClient::new_with_config(mcp_server)?;
let provider: Arc<dyn Provider> = Arc::new(client);
Ok(provider)
}
McpMode::LocalOnly | McpMode::Legacy => build_local_provider(cfg),
McpMode::Disabled => Err(anyhow::anyhow!(
"MCP mode 'disabled' is not supported by the owlen TUI"
)),
}
}
fn build_local_provider(cfg: &Config) -> anyhow::Result<Arc<dyn Provider>> {
let provider_name = cfg.general.default_provider.clone();
let provider_cfg = cfg.provider(&provider_name).ok_or_else(|| {
anyhow::anyhow!(format!(
"No provider configuration found for '{provider_name}' in [providers]"
))
})?;
match provider_cfg.provider_type.as_str() {
"ollama" | "ollama_cloud" => {
let provider = OllamaProvider::from_config(provider_cfg, Some(&cfg.general))?;
Ok(Arc::new(provider) as Arc<dyn Provider>)
}
other => Err(anyhow::anyhow!(format!(
"Provider type '{other}' is not supported in legacy/local MCP mode"
))),
}
}
async fn run_command(command: OwlenCommand) -> Result<()> {
match command {
OwlenCommand::Config(config_cmd) => run_config_command(config_cmd),
@@ -299,120 +216,6 @@ fn run_config_doctor() -> Result<()> {
Ok(())
}
const BASIC_THEME_NAME: &str = "ansi_basic";
#[derive(Debug, Clone)]
enum TerminalColorSupport {
Full,
Limited { term: String },
}
fn detect_terminal_color_support() -> TerminalColorSupport {
let term = std::env::var("TERM").unwrap_or_else(|_| "unknown".to_string());
let colorterm = std::env::var("COLORTERM").unwrap_or_default();
let term_lower = term.to_lowercase();
let color_lower = colorterm.to_lowercase();
let supports_extended = term_lower.contains("256color")
|| color_lower.contains("truecolor")
|| color_lower.contains("24bit")
|| color_lower.contains("fullcolor");
if supports_extended {
TerminalColorSupport::Full
} else {
TerminalColorSupport::Limited { term }
}
}
fn apply_terminal_theme(cfg: &mut Config, support: &TerminalColorSupport) -> Option<String> {
match support {
TerminalColorSupport::Full => None,
TerminalColorSupport::Limited { .. } => {
if cfg.ui.theme != BASIC_THEME_NAME {
let previous = std::mem::replace(&mut cfg.ui.theme, BASIC_THEME_NAME.to_string());
Some(previous)
} else {
None
}
}
}
}
struct OfflineProvider {
reason: String,
placeholder_model: String,
}
impl OfflineProvider {
fn new(reason: String, placeholder_model: String) -> Self {
Self {
reason,
placeholder_model,
}
}
fn friendly_response(&self, requested_model: &str) -> ChatResponse {
let mut message = String::new();
message.push_str("⚠️ Owlen is running in offline mode.\n\n");
message.push_str(&self.reason);
if !requested_model.is_empty() && requested_model != self.placeholder_model {
message.push_str(&format!(
"\n\nYou requested model '{}', but no providers are reachable.",
requested_model
));
}
message.push_str(
"\n\nStart your preferred provider (e.g. `ollama serve`) or switch providers with `:provider` once connectivity is restored.",
);
ChatResponse {
message: Message::assistant(message),
usage: None,
is_streaming: false,
is_final: true,
}
}
}
#[async_trait]
impl Provider for OfflineProvider {
fn name(&self) -> &str {
"offline"
}
async fn list_models(&self) -> Result<Vec<ModelInfo>, Error> {
Ok(vec![ModelInfo {
id: self.placeholder_model.clone(),
provider: "offline".to_string(),
name: format!("Offline (fallback: {})", self.placeholder_model),
description: Some("Placeholder model used while no providers are reachable".into()),
context_window: None,
capabilities: vec![],
supports_tools: false,
}])
}
async fn send_prompt(&self, request: ChatRequest) -> Result<ChatResponse, Error> {
Ok(self.friendly_response(&request.model))
}
async fn stream_prompt(&self, request: ChatRequest) -> Result<ChatStream, Error> {
let response = self.friendly_response(&request.model);
Ok(Box::pin(stream::iter(vec![Ok(response)])))
}
async fn health_check(&self) -> Result<(), Error> {
Err(Error::Provider(anyhow!(
"offline provider cannot reach any backing models"
)))
}
fn as_any(&self) -> &(dyn Any + Send + Sync) {
self
}
}
#[tokio::main(flavor = "multi_thread")]
async fn main() -> Result<()> {
// Parse command-line arguments
@@ -421,122 +224,5 @@ async fn main() -> Result<()> {
return run_command(command).await;
}
let initial_mode = if code { Mode::Code } else { Mode::Chat };
// Set auto-consent for TUI mode to prevent blocking stdin reads
set_env_var("OWLEN_AUTO_CONSENT", "1");
let color_support = detect_terminal_color_support();
// Load configuration (or fall back to defaults) for the session controller.
let mut cfg = config::try_load_config().unwrap_or_default();
let _ = cfg.refresh_mcp_servers(None);
if let Some(previous_theme) = apply_terminal_theme(&mut cfg, &color_support) {
let term_label = match &color_support {
TerminalColorSupport::Limited { term } => Cow::from(term.as_str()),
TerminalColorSupport::Full => Cow::from("current terminal"),
};
eprintln!(
"Terminal '{}' lacks full 256-color support. Using '{}' theme instead of '{}'.",
term_label, BASIC_THEME_NAME, previous_theme
);
} else if let TerminalColorSupport::Limited { term } = &color_support {
eprintln!(
"Warning: terminal '{}' may not fully support 256-color themes.",
term
);
}
cfg.validate()?;
let storage = Arc::new(StorageManager::new().await?);
load_runtime_credentials(&mut cfg, storage.clone()).await?;
let (tui_tx, _tui_rx) = mpsc::unbounded_channel::<TuiRequest>();
let tui_controller = Arc::new(TuiController::new(tui_tx));
// Create provider according to MCP configuration (supports legacy/local fallback)
let provider = build_provider(&cfg)?;
let mut offline_notice: Option<String> = None;
let provider = match provider.health_check().await {
Ok(_) => provider,
Err(err) => {
let hint = if matches!(cfg.mcp.mode, McpMode::RemotePreferred | McpMode::RemoteOnly)
&& !cfg.effective_mcp_servers().is_empty()
{
"Ensure the configured MCP server is running and reachable."
} else {
"Ensure Ollama is running (`ollama serve`) and reachable at the configured base_url."
};
let notice =
format!("Provider health check failed: {err}. {hint} Continuing in offline mode.");
eprintln!("{notice}");
offline_notice = Some(notice.clone());
let fallback_model = cfg
.general
.default_model
.clone()
.unwrap_or_else(|| "offline".to_string());
Arc::new(OfflineProvider::new(notice, fallback_model)) as Arc<dyn Provider>
}
};
let controller =
SessionController::new(provider, cfg, storage.clone(), tui_controller, false).await?;
let provider_manager = Arc::new(ProviderManager::default());
let mut runtime = RuntimeApp::new(provider_manager);
let (mut app, mut session_rx) = ChatApp::new(controller).await?;
app.initialize_models().await?;
if let Some(notice) = offline_notice {
app.set_status_message(&notice);
app.set_system_status(notice);
}
// Set the initial mode
app.set_mode(initial_mode).await;
// Terminal setup
enable_raw_mode()?;
let mut stdout = io::stdout();
execute!(
stdout,
EnterAlternateScreen,
EnableMouseCapture,
EnableBracketedPaste
)?;
let backend = CrosstermBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
let result = run_app(&mut terminal, &mut runtime, &mut app, &mut session_rx).await;
// Persist configuration updates (e.g., selected model)
config::save_config(&app.config())?;
disable_raw_mode()?;
execute!(
terminal.backend_mut(),
LeaveAlternateScreen,
DisableMouseCapture,
DisableBracketedPaste
)?;
terminal.show_cursor()?;
if let Err(err) = result {
println!("{err:?}");
}
Ok(())
}
async fn run_app(
terminal: &mut Terminal<CrosstermBackend<io::Stdout>>,
runtime: &mut RuntimeApp,
app: &mut ChatApp,
session_rx: &mut mpsc::UnboundedReceiver<SessionEvent>,
) -> Result<()> {
let mut render = |terminal: &mut Terminal<CrosstermBackend<io::Stdout>>,
state: &mut ChatApp|
-> Result<()> {
terminal.draw(|f| ui::render_chat(f, state))?;
Ok(())
};
runtime.run(terminal, app, session_rx, &mut render).await?;
Ok(())
bootstrap::launch(initial_mode).await
}

View File

@@ -1584,6 +1584,8 @@ pub struct UiSettings {
pub show_timestamps: bool,
#[serde(default = "UiSettings::default_icon_mode")]
pub icon_mode: IconMode,
#[serde(default)]
pub keymap_path: Option<String>,
}
/// Preference for which symbol set to render in the terminal UI.
@@ -1721,6 +1723,7 @@ impl Default for UiSettings {
render_markdown: Self::default_render_markdown(),
show_timestamps: Self::default_show_timestamps(),
icon_mode: Self::default_icon_mode(),
keymap_path: None,
}
}
}

View File

@@ -0,0 +1,32 @@
use std::sync::Arc;
use async_trait::async_trait;
use crate::{
Result,
llm::ChatStream,
mcp::{McpToolCall, McpToolDescriptor, McpToolResponse},
types::{ChatRequest, ChatResponse, ModelInfo},
};
/// Object-safe facade for interacting with LLM backends.
#[async_trait]
pub trait LlmClient: Send + Sync {
/// List the models exposed by this client.
async fn list_models(&self) -> Result<Vec<ModelInfo>>;
/// Issue a one-shot chat request and wait for the complete response.
async fn send_chat(&self, request: ChatRequest) -> Result<ChatResponse>;
/// Stream chat responses incrementally.
async fn stream_chat(&self, request: ChatRequest) -> Result<ChatStream>;
/// Enumerate tools exposed by the backing provider.
async fn list_tools(&self) -> Result<Vec<McpToolDescriptor>>;
/// Invoke a tool exposed by the provider.
async fn call_tool(&self, call: McpToolCall) -> Result<McpToolResponse>;
}
/// Convenience alias for trait-object clients.
pub type DynLlmClient = Arc<dyn LlmClient>;

View File

@@ -0,0 +1 @@
pub mod llm_client;

View File

@@ -11,6 +11,7 @@ pub mod consent;
pub mod conversation;
pub mod credentials;
pub mod encryption;
pub mod facade;
pub mod formatting;
pub mod input;
pub mod llm;
@@ -42,6 +43,7 @@ pub use formatting::*;
pub use input::*;
pub use oauth::*;
// Export MCP types but exclude test_utils to avoid ambiguity
pub use facade::llm_client::*;
pub use llm::{
ChatStream, LlmProvider, Provider, ProviderConfig, ProviderRegistry, send_via_stream,
};

View File

@@ -7,7 +7,10 @@ use crate::consent::{ConsentManager, ConsentScope};
use crate::tools::{Tool, WebScrapeTool, WebSearchTool};
use crate::types::ModelInfo;
use crate::types::{ChatResponse, Message, Role};
use crate::{Error, LlmProvider, Result, mode::Mode, send_via_stream};
use crate::{
ChatStream, Error, LlmProvider, Result, facade::llm_client::LlmClient, mode::Mode,
send_via_stream,
};
use anyhow::anyhow;
use futures::{StreamExt, future::BoxFuture, stream};
use reqwest::Client as HttpClient;
@@ -564,3 +567,27 @@ impl LlmProvider for RemoteMcpClient {
})
}
}
#[async_trait::async_trait]
impl LlmClient for RemoteMcpClient {
async fn list_models(&self) -> Result<Vec<ModelInfo>> {
<Self as LlmProvider>::list_models(self).await
}
async fn send_chat(&self, request: crate::types::ChatRequest) -> Result<ChatResponse> {
<Self as LlmProvider>::send_prompt(self, request).await
}
async fn stream_chat(&self, request: crate::types::ChatRequest) -> Result<ChatStream> {
let stream = <Self as LlmProvider>::stream_prompt(self, request).await?;
Ok(Box::pin(stream))
}
async fn list_tools(&self) -> Result<Vec<McpToolDescriptor>> {
<Self as McpClient>::list_tools(self).await
}
async fn call_tool(&self, call: McpToolCall) -> Result<McpToolResponse> {
<Self as McpClient>::call_tool(self, call).await
}
}

View File

@@ -88,6 +88,7 @@ struct ScopeSnapshot {
availability: ScopeAvailability,
last_error: Option<String>,
last_checked: Option<Instant>,
last_success_at: Option<Instant>,
}
impl Default for ScopeSnapshot {
@@ -98,10 +99,29 @@ impl Default for ScopeSnapshot {
availability: ScopeAvailability::Unknown,
last_error: None,
last_checked: None,
last_success_at: None,
}
}
}
impl ScopeSnapshot {
fn is_stale(&self, ttl: Duration) -> bool {
match self.fetched_at {
Some(ts) => ts.elapsed() >= ttl,
None => !self.models.is_empty(),
}
}
fn last_checked_age_secs(&self) -> Option<u64> {
self.last_checked.map(|instant| instant.elapsed().as_secs())
}
fn last_success_age_secs(&self) -> Option<u64> {
self.last_success_at
.map(|instant| instant.elapsed().as_secs())
}
}
#[derive(Debug)]
struct OllamaOptions {
mode: OllamaMode,
@@ -410,22 +430,29 @@ impl OllamaProvider {
return None;
}
entry.fetched_at.and_then(|ts| {
if entry.models.is_empty() {
return None;
}
if let Some(ts) = entry.fetched_at {
if ts.elapsed() < self.model_cache_ttl {
Some(entry.models.clone())
} else {
None
return Some(entry.models.clone());
}
})
}
// Fallback to last good models even if stale; UI will mark as degraded
Some(entry.models.clone())
})
}
async fn update_scope_success(&self, scope: OllamaMode, models: &[ModelInfo]) {
let mut cache = self.scope_cache.write().await;
let entry = cache.entry(scope).or_default();
let now = Instant::now();
entry.models = models.to_vec();
entry.fetched_at = Some(Instant::now());
entry.last_checked = Some(Instant::now());
entry.fetched_at = Some(now);
entry.last_checked = Some(now);
entry.last_success_at = Some(now);
entry.availability = ScopeAvailability::Available;
entry.last_error = None;
}
@@ -461,6 +488,45 @@ impl OllamaProvider {
}
}
let stale = snapshot.is_stale(self.model_cache_ttl);
let stale_capability = format!(
"scope-status-stale:{}:{}",
scope_key,
if stale { "1" } else { "0" }
);
for model in models.iter_mut() {
if !model
.capabilities
.iter()
.any(|cap| cap == &stale_capability)
{
model.capabilities.push(stale_capability.clone());
}
}
if let Some(age) = snapshot.last_checked_age_secs() {
let age_capability = format!("scope-status-age:{}:{}", scope_key, age);
for model in models.iter_mut() {
if !model.capabilities.iter().any(|cap| cap == &age_capability) {
model.capabilities.push(age_capability.clone());
}
}
}
if let Some(success_age) = snapshot.last_success_age_secs() {
let success_capability =
format!("scope-status-success-age:{}:{}", scope_key, success_age);
for model in models.iter_mut() {
if !model
.capabilities
.iter()
.any(|cap| cap == &success_capability)
{
model.capabilities.push(success_capability.clone());
}
}
}
if let Some(raw_reason) = snapshot.last_error.as_ref() {
let cleaned = raw_reason.replace('\n', " ").trim().to_string();
if !cleaned.is_empty() {
@@ -1658,6 +1724,7 @@ fn annotate_scope_status_adds_capabilities_for_unavailable_scopes() {
let entry = cache.entry(OllamaMode::Cloud).or_default();
entry.availability = ScopeAvailability::Unavailable;
entry.last_error = Some("Cloud endpoint unreachable".to_string());
entry.last_checked = Some(Instant::now());
}
provider.annotate_scope_status(&mut models).await;
@@ -1674,4 +1741,14 @@ fn annotate_scope_status_adds_capabilities_for_unavailable_scopes() {
.iter()
.any(|cap| cap.starts_with("scope-status-message:cloud:"))
);
assert!(
capabilities
.iter()
.any(|cap| cap.starts_with("scope-status-age:cloud:"))
);
assert!(
capabilities
.iter()
.any(|cap| cap == "scope-status-stale:cloud:0")
);
}

View File

@@ -1,5 +1,5 @@
use crate::config::{Config, McpResourceConfig, McpServerConfig};
use crate::consent::ConsentManager;
use crate::consent::{ConsentManager, ConsentScope};
use crate::conversation::ConversationManager;
use crate::credentials::CredentialManager;
use crate::encryption::{self, VaultHandle};
@@ -34,6 +34,7 @@ use std::env;
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
use tokio::sync::Mutex as TokioMutex;
use tokio::sync::mpsc::UnboundedSender;
use uuid::Uuid;
pub enum SessionOutcome {
@@ -44,6 +45,36 @@ pub enum SessionOutcome {
},
}
#[derive(Debug, Clone)]
pub enum ControllerEvent {
ToolRequested {
request_id: Uuid,
message_id: Uuid,
tool_name: String,
data_types: Vec<String>,
endpoints: Vec<String>,
tool_calls: Vec<ToolCall>,
},
}
#[derive(Clone, Debug)]
struct PendingToolRequest {
message_id: Uuid,
tool_name: String,
data_types: Vec<String>,
endpoints: Vec<String>,
tool_calls: Vec<ToolCall>,
}
#[derive(Debug, Clone)]
pub struct ToolConsentResolution {
pub request_id: Uuid,
pub message_id: Uuid,
pub tool_name: String,
pub scope: ConsentScope,
pub tool_calls: Vec<ToolCall>,
}
fn extract_resource_content(value: &Value) -> Option<String> {
match value {
Value::Null => Some(String::new()),
@@ -111,6 +142,8 @@ pub struct SessionController {
enable_code_tools: bool,
current_mode: Mode,
missing_oauth_servers: Vec<String>,
event_tx: Option<UnboundedSender<ControllerEvent>>,
pending_tool_requests: HashMap<Uuid, PendingToolRequest>,
}
async fn build_tools(
@@ -331,6 +364,7 @@ impl SessionController {
storage: Arc<StorageManager>,
ui: Arc<dyn UiController>,
enable_code_tools: bool,
event_tx: Option<UnboundedSender<ControllerEvent>>,
) -> Result<Self> {
let config_arc = Arc::new(TokioMutex::new(config));
// Acquire the config asynchronously to avoid blocking the runtime.
@@ -435,6 +469,8 @@ impl SessionController {
enable_code_tools,
current_mode: initial_mode,
missing_oauth_servers,
event_tx,
pending_tool_requests: HashMap::new(),
})
}
@@ -1222,14 +1258,84 @@ impl SessionController {
.append_stream_chunk(message_id, &chunk.message.content, chunk.is_final)
}
pub fn check_streaming_tool_calls(&self, message_id: Uuid) -> Option<Vec<ToolCall>> {
self.conversation
pub fn check_streaming_tool_calls(&mut self, message_id: Uuid) -> Option<Vec<ToolCall>> {
let maybe_calls = self
.conversation
.active()
.messages
.iter()
.find(|m| m.id == message_id)
.and_then(|m| m.tool_calls.clone())
.filter(|calls| !calls.is_empty())
.filter(|calls| !calls.is_empty());
let calls = maybe_calls?;
if !self
.pending_tool_requests
.values()
.any(|pending| pending.message_id == message_id)
{
if let Some((tool_name, data_types, endpoints)) =
self.check_tools_consent_needed(&calls).into_iter().next()
{
let request_id = Uuid::new_v4();
let pending = PendingToolRequest {
message_id,
tool_name: tool_name.clone(),
data_types: data_types.clone(),
endpoints: endpoints.clone(),
tool_calls: calls.clone(),
};
self.pending_tool_requests.insert(request_id, pending);
if let Some(tx) = &self.event_tx {
let _ = tx.send(ControllerEvent::ToolRequested {
request_id,
message_id,
tool_name,
data_types,
endpoints,
tool_calls: calls.clone(),
});
}
}
}
Some(calls)
}
pub fn resolve_tool_consent(
&mut self,
request_id: Uuid,
scope: ConsentScope,
) -> Result<ToolConsentResolution> {
let pending = self
.pending_tool_requests
.remove(&request_id)
.ok_or_else(|| {
Error::InvalidInput(format!("Unknown tool consent request: {}", request_id))
})?;
let PendingToolRequest {
message_id,
tool_name,
data_types,
endpoints,
tool_calls,
..
} = pending;
if !matches!(scope, ConsentScope::Denied) {
self.grant_consent_with_scope(&tool_name, data_types, endpoints, scope.clone());
}
Ok(ToolConsentResolution {
request_id,
message_id,
tool_name,
scope,
tool_calls,
})
}
pub fn cancel_stream(&mut self, message_id: Uuid, notice: &str) -> Result<()> {
@@ -1352,7 +1458,7 @@ mod tests {
let provider: Arc<dyn Provider> = Arc::new(MockProvider::default()) as Arc<dyn Provider>;
let ui = Arc::new(NoOpUiController);
let session = SessionController::new(provider, config, storage, ui, false)
let session = SessionController::new(provider, config, storage, ui, false, None)
.await
.expect("session");

View File

@@ -3,14 +3,14 @@
use std::fmt;
/// High-level application state reported by the UI loop.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum AppState {
Running,
Quit,
}
/// Vim-style input modes supported by the TUI.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum InputMode {
Normal,
Editing,
@@ -45,7 +45,7 @@ impl fmt::Display for InputMode {
}
/// Represents which panel is currently focused in the TUI layout.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum FocusedPanel {
Files,
Chat,

View File

@@ -0,0 +1,310 @@
use std::{any::Any, collections::HashMap, sync::Arc};
use async_trait::async_trait;
use futures::StreamExt;
use owlen_core::{
Config, Error, Mode, Provider,
config::McpMode,
consent::ConsentScope,
mcp::{
McpClient, McpToolCall, McpToolDescriptor, McpToolResponse,
failover::{FailoverMcpClient, ServerEntry},
},
session::{ControllerEvent, SessionController, SessionOutcome},
storage::StorageManager,
types::{ChatParameters, ChatRequest, ChatResponse, Message, ModelInfo, Role, ToolCall},
ui::NoOpUiController,
};
use tempfile::tempdir;
use tokio::sync::mpsc;
struct StreamingToolProvider;
#[async_trait]
impl Provider for StreamingToolProvider {
fn name(&self) -> &str {
"mock-streaming-provider"
}
async fn list_models(&self) -> owlen_core::Result<Vec<ModelInfo>> {
Ok(vec![ModelInfo {
id: "mock-model".into(),
name: "Mock Model".into(),
description: Some("A mock model that emits tool calls".into()),
provider: self.name().into(),
context_window: Some(4096),
capabilities: vec!["chat".into(), "tools".into()],
supports_tools: true,
}])
}
async fn send_prompt(&self, _request: ChatRequest) -> owlen_core::Result<ChatResponse> {
let mut message = Message::assistant("tool-call".to_string());
message.tool_calls = Some(vec![ToolCall {
id: "call-1".to_string(),
name: "resources/write".to_string(),
arguments: serde_json::json!({"path": "README.md", "content": "hello"}),
}]);
Ok(ChatResponse {
message,
usage: None,
is_streaming: false,
is_final: true,
})
}
async fn stream_prompt(
&self,
_request: ChatRequest,
) -> owlen_core::Result<owlen_core::ChatStream> {
let mut first_chunk = Message::assistant(
"Thought: need to update README.\nAction: resources/write".to_string(),
);
first_chunk.tool_calls = Some(vec![ToolCall {
id: "call-1".to_string(),
name: "resources/write".to_string(),
arguments: serde_json::json!({"path": "README.md", "content": "hello"}),
}]);
let chunk = ChatResponse {
message: first_chunk,
usage: None,
is_streaming: true,
is_final: false,
};
Ok(Box::pin(futures::stream::iter(vec![Ok(chunk)])))
}
async fn health_check(&self) -> owlen_core::Result<()> {
Ok(())
}
fn as_any(&self) -> &(dyn Any + Send + Sync) {
self
}
}
fn tool_descriptor() -> McpToolDescriptor {
McpToolDescriptor {
name: "web_search".to_string(),
description: "search".to_string(),
input_schema: serde_json::json!({"type": "object"}),
requires_network: true,
requires_filesystem: vec![],
}
}
struct TimeoutClient;
#[async_trait]
impl McpClient for TimeoutClient {
async fn list_tools(&self) -> owlen_core::Result<Vec<McpToolDescriptor>> {
Ok(vec![tool_descriptor()])
}
async fn call_tool(&self, _call: McpToolCall) -> owlen_core::Result<McpToolResponse> {
Err(Error::Network(
"timeout while contacting remote web search endpoint".into(),
))
}
}
#[derive(Clone)]
struct CachedResponseClient {
response: Arc<McpToolResponse>,
}
impl CachedResponseClient {
fn new() -> Self {
let mut metadata = HashMap::new();
metadata.insert("source".to_string(), "cache".to_string());
metadata.insert("cached".to_string(), "true".to_string());
let response = McpToolResponse {
name: "web_search".to_string(),
success: true,
output: serde_json::json!({
"query": "rust",
"results": [
{"title": "Rust Programming Language", "url": "https://www.rust-lang.org"}
],
"note": "cached result"
}),
metadata,
duration_ms: 0,
};
Self {
response: Arc::new(response),
}
}
}
#[async_trait]
impl McpClient for CachedResponseClient {
async fn list_tools(&self) -> owlen_core::Result<Vec<McpToolDescriptor>> {
Ok(vec![tool_descriptor()])
}
async fn call_tool(&self, _call: McpToolCall) -> owlen_core::Result<McpToolResponse> {
Ok((*self.response).clone())
}
}
#[tokio::test(flavor = "multi_thread")]
async fn streaming_file_write_consent_denied_returns_resolution() {
let temp_dir = tempdir().expect("temp dir");
let storage = StorageManager::with_database_path(temp_dir.path().join("owlen-tests.db"))
.await
.expect("storage");
let mut config = Config::default();
config.general.enable_streaming = true;
config.privacy.encrypt_local_data = false;
config.privacy.require_consent_per_session = true;
config.general.default_model = Some("mock-model".into());
config.mcp.mode = McpMode::LocalOnly;
config
.refresh_mcp_servers(None)
.expect("refresh MCP servers");
let provider: Arc<dyn Provider> = Arc::new(StreamingToolProvider);
let ui = Arc::new(NoOpUiController);
let (event_tx, mut event_rx) = mpsc::unbounded_channel::<ControllerEvent>();
let mut session = SessionController::new(
provider,
config,
Arc::new(storage),
ui,
true,
Some(event_tx),
)
.await
.expect("session controller");
session
.set_operating_mode(Mode::Code)
.await
.expect("code mode");
let outcome = session
.send_message(
"Please write to README".to_string(),
ChatParameters {
stream: true,
..Default::default()
},
)
.await
.expect("send message");
let (response_id, mut stream) = if let SessionOutcome::Streaming {
response_id,
stream,
} = outcome
{
(response_id, stream)
} else {
panic!("expected streaming outcome");
};
session
.mark_stream_placeholder(response_id, "")
.expect("placeholder");
let chunk = stream
.next()
.await
.expect("stream chunk")
.expect("chunk result");
session
.apply_stream_chunk(response_id, &chunk)
.expect("apply chunk");
let tool_calls = session
.check_streaming_tool_calls(response_id)
.expect("tool calls");
assert_eq!(tool_calls.len(), 1);
assert_eq!(tool_calls[0].name, "resources/write");
let event = event_rx.recv().await.expect("controller event");
let request_id = match event {
ControllerEvent::ToolRequested {
request_id,
tool_name,
data_types,
endpoints,
..
} => {
assert_eq!(tool_name, "resources/write");
assert!(data_types.iter().any(|t| t.contains("file")));
assert!(endpoints.iter().any(|e| e.contains("filesystem")));
request_id
}
};
let resolution = session
.resolve_tool_consent(request_id, ConsentScope::Denied)
.expect("resolution");
assert_eq!(resolution.scope, ConsentScope::Denied);
assert_eq!(resolution.tool_name, "resources/write");
assert_eq!(resolution.tool_calls.len(), tool_calls.len());
let err = session
.resolve_tool_consent(request_id, ConsentScope::Denied)
.expect_err("second resolution should fail");
matches!(err, Error::InvalidInput(_));
let conversation = session.conversation().clone();
let assistant = conversation
.messages
.iter()
.find(|message| message.role == Role::Assistant)
.expect("assistant message present");
assert!(
assistant
.tool_calls
.as_ref()
.and_then(|calls| calls.first())
.is_some_and(|call| call.name == "resources/write"),
"stream chunk should capture the tool call on the assistant message"
);
}
#[tokio::test]
async fn web_tool_timeout_fails_over_to_cached_result() {
let primary: Arc<dyn McpClient> = Arc::new(TimeoutClient);
let cached = CachedResponseClient::new();
let backup: Arc<dyn McpClient> = Arc::new(cached.clone());
let client = FailoverMcpClient::with_servers(vec![
ServerEntry::new("primary".into(), primary, 1),
ServerEntry::new("cache".into(), backup, 2),
]);
let call = McpToolCall {
name: "web_search".to_string(),
arguments: serde_json::json!({ "query": "rust", "max_results": 3 }),
};
let response = client.call_tool(call.clone()).await.expect("fallback");
assert_eq!(response.name, "web_search");
assert_eq!(
response.metadata.get("source").map(String::as_str),
Some("cache")
);
assert_eq!(
response.output.get("note").and_then(|value| value.as_str()),
Some("cached result")
);
let statuses = client.get_server_status().await;
assert!(statuses.iter().any(|(name, health)| name == "primary"
&& !matches!(health, owlen_core::mcp::failover::ServerHealth::Healthy)));
assert!(statuses.iter().any(|(name, health)| name == "cache"
&& matches!(health, owlen_core::mcp::failover::ServerHealth::Healthy)));
}

View File

@@ -30,6 +30,8 @@ toml = { workspace = true }
syntect = "5.3"
once_cell = "1.19"
owlen-markdown = { path = "../owlen-markdown" }
shellexpand = { workspace = true }
regex = { workspace = true }
# Async runtime
tokio = { workspace = true }

View File

@@ -0,0 +1,99 @@
[[binding]]
mode = "normal"
keys = ["m"]
command = "model.open_all"
[[binding]]
mode = "normal"
keys = ["Ctrl+Shift+L"]
command = "model.open_local"
[[binding]]
mode = "normal"
keys = ["Ctrl+Shift+C"]
command = "model.open_cloud"
[[binding]]
mode = "normal"
keys = ["Ctrl+Shift+P"]
command = "model.open_available"
[[binding]]
mode = "normal"
keys = ["Ctrl+P"]
command = "palette.open"
[[binding]]
mode = "editing"
keys = ["Ctrl+P"]
command = "palette.open"
[[binding]]
mode = "normal"
keys = ["Tab"]
command = "focus.next"
[[binding]]
mode = "normal"
keys = ["Shift+Tab"]
command = "focus.prev"
[[binding]]
mode = "normal"
keys = ["Ctrl+1"]
command = "focus.files"
[[binding]]
mode = "normal"
keys = ["Ctrl+2"]
command = "focus.chat"
[[binding]]
mode = "normal"
keys = ["Ctrl+3"]
command = "focus.code"
[[binding]]
mode = "normal"
keys = ["Ctrl+4"]
command = "focus.thinking"
[[binding]]
mode = "normal"
keys = ["Ctrl+5"]
command = "focus.input"
[[binding]]
mode = "editing"
keys = ["Enter"]
command = "composer.submit"
[[binding]]
mode = "normal"
keys = ["Ctrl+;"]
command = "mode.command"
[[binding]]
mode = "normal"
keys = ["F12"]
command = "debug.toggle"
[[binding]]
mode = "editing"
keys = ["F12"]
command = "debug.toggle"
[[binding]]
mode = "visual"
keys = ["F12"]
command = "debug.toggle"
[[binding]]
mode = "command"
keys = ["F12"]
command = "debug.toggle"
[[binding]]
mode = "help"
keys = ["F12"]
command = "debug.toggle"

View File

@@ -1,5 +1,6 @@
mod generation;
mod handler;
pub mod mvu;
mod worker;
pub mod messages;
@@ -33,6 +34,7 @@ pub trait UiRuntime: MessageState {
async fn handle_session_event(&mut self, event: SessionEvent) -> Result<()>;
async fn process_pending_llm_request(&mut self) -> Result<()>;
async fn process_pending_tool_execution(&mut self) -> Result<()>;
fn poll_controller_events(&mut self) -> Result<()>;
fn advance_loading_animation(&mut self);
fn streaming_count(&self) -> usize;
}
@@ -115,6 +117,7 @@ impl App {
state.process_pending_llm_request().await?;
state.process_pending_tool_execution().await?;
state.poll_controller_events()?;
loop {
match session_rx.try_recv() {

View File

@@ -0,0 +1,165 @@
use owlen_core::{consent::ConsentScope, ui::InputMode};
use uuid::Uuid;
#[derive(Debug, Clone, Default)]
pub struct AppModel {
pub composer: ComposerModel,
}
#[derive(Debug, Clone)]
pub struct ComposerModel {
pub draft: String,
pub pending_submit: bool,
pub mode: InputMode,
}
impl Default for ComposerModel {
fn default() -> Self {
Self {
draft: String::new(),
pending_submit: false,
mode: InputMode::Normal,
}
}
}
#[derive(Debug, Clone)]
pub enum AppEvent {
Composer(ComposerEvent),
ToolPermission {
request_id: Uuid,
scope: ConsentScope,
},
}
#[derive(Debug, Clone)]
pub enum ComposerEvent {
DraftChanged { content: String },
ModeChanged { mode: InputMode },
Submit,
SubmissionHandled { result: SubmissionOutcome },
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SubmissionOutcome {
MessageSent,
CommandExecuted,
Failed,
}
#[derive(Debug, Clone)]
pub enum AppEffect {
SetStatus(String),
RequestSubmit,
ResolveToolConsent {
request_id: Uuid,
scope: ConsentScope,
},
}
pub fn update(model: &mut AppModel, event: AppEvent) -> Vec<AppEffect> {
match event {
AppEvent::Composer(event) => update_composer(&mut model.composer, event),
AppEvent::ToolPermission { request_id, scope } => {
vec![AppEffect::ResolveToolConsent { request_id, scope }]
}
}
}
fn update_composer(model: &mut ComposerModel, event: ComposerEvent) -> Vec<AppEffect> {
match event {
ComposerEvent::DraftChanged { content } => {
model.draft = content;
Vec::new()
}
ComposerEvent::ModeChanged { mode } => {
model.mode = mode;
Vec::new()
}
ComposerEvent::Submit => {
if model.draft.trim().is_empty() {
return vec![AppEffect::SetStatus(
"Cannot send empty message".to_string(),
)];
}
model.pending_submit = true;
vec![AppEffect::RequestSubmit]
}
ComposerEvent::SubmissionHandled { result } => {
model.pending_submit = false;
match result {
SubmissionOutcome::MessageSent | SubmissionOutcome::CommandExecuted => {
model.draft.clear();
if model.mode == InputMode::Editing {
model.mode = InputMode::Normal;
}
}
SubmissionOutcome::Failed => {}
}
Vec::new()
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn submit_with_empty_draft_sets_error() {
let mut model = AppModel::default();
let effects = update(&mut model, AppEvent::Composer(ComposerEvent::Submit));
assert!(!model.composer.pending_submit);
assert_eq!(effects.len(), 1);
match &effects[0] {
AppEffect::SetStatus(message) => {
assert!(message.contains("Cannot send empty message"));
}
other => panic!("unexpected effect: {:?}", other),
}
}
#[test]
fn submit_with_content_requests_processing() {
let mut model = AppModel::default();
let _ = update(
&mut model,
AppEvent::Composer(ComposerEvent::DraftChanged {
content: "hello world".into(),
}),
);
let effects = update(&mut model, AppEvent::Composer(ComposerEvent::Submit));
assert!(model.composer.pending_submit);
assert_eq!(effects.len(), 1);
matches!(effects[0], AppEffect::RequestSubmit);
}
#[test]
fn submission_success_clears_draft_and_mode() {
let mut model = AppModel::default();
let _ = update(
&mut model,
AppEvent::Composer(ComposerEvent::DraftChanged {
content: "hello world".into(),
}),
);
let _ = update(&mut model, AppEvent::Composer(ComposerEvent::Submit));
assert!(model.composer.pending_submit);
let effects = update(
&mut model,
AppEvent::Composer(ComposerEvent::SubmissionHandled {
result: SubmissionOutcome::MessageSent,
}),
);
assert!(effects.is_empty());
assert!(!model.composer.pending_submit);
assert!(model.composer.draft.is_empty());
assert_eq!(model.composer.mode, InputMode::Normal);
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
use anyhow::Result;
use owlen_core::session::SessionController;
use owlen_core::session::{ControllerEvent, SessionController};
use owlen_core::ui::{AppState, InputMode};
use tokio::sync::mpsc;
@@ -16,11 +16,12 @@ pub struct CodeApp {
impl CodeApp {
pub async fn new(
mut controller: SessionController,
controller_event_rx: mpsc::UnboundedReceiver<ControllerEvent>,
) -> Result<(Self, mpsc::UnboundedReceiver<SessionEvent>)> {
controller
.conversation_mut()
.push_system_message(DEFAULT_SYSTEM_PROMPT.to_string());
let (inner, rx) = ChatApp::new(controller).await?;
let (inner, rx) = ChatApp::new(controller, controller_event_rx).await?;
Ok((Self { inner }, rx))
}

View File

@@ -1,4 +1,7 @@
//! Command catalog and lookup utilities for the command palette.
pub mod registry;
pub use registry::{AppCommand, CommandRegistry};
// Command catalog and lookup utilities for the command palette.
/// Metadata describing a single command keyword.
#[derive(Debug, Clone, Copy)]
@@ -240,6 +243,10 @@ const COMMANDS: &[CommandSpec] = &[
keyword: "explorer",
description: "Alias for files",
},
CommandSpec {
keyword: "debug log",
description: "Toggle the debug log panel",
},
];
/// Return the static catalog of commands.

View File

@@ -0,0 +1,107 @@
use std::collections::HashMap;
use owlen_core::ui::FocusedPanel;
use crate::widgets::model_picker::FilterMode;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum AppCommand {
OpenModelPicker(Option<FilterMode>),
OpenCommandPalette,
CycleFocusForward,
CycleFocusBackward,
FocusPanel(FocusedPanel),
ComposerSubmit,
EnterCommandMode,
ToggleDebugLog,
}
#[derive(Debug)]
pub struct CommandRegistry {
commands: HashMap<String, AppCommand>,
}
impl CommandRegistry {
pub fn new() -> Self {
let mut commands = HashMap::new();
commands.insert(
"model.open_all".to_string(),
AppCommand::OpenModelPicker(None),
);
commands.insert(
"model.open_local".to_string(),
AppCommand::OpenModelPicker(Some(FilterMode::LocalOnly)),
);
commands.insert(
"model.open_cloud".to_string(),
AppCommand::OpenModelPicker(Some(FilterMode::CloudOnly)),
);
commands.insert(
"model.open_available".to_string(),
AppCommand::OpenModelPicker(Some(FilterMode::Available)),
);
commands.insert("palette.open".to_string(), AppCommand::OpenCommandPalette);
commands.insert("focus.next".to_string(), AppCommand::CycleFocusForward);
commands.insert("focus.prev".to_string(), AppCommand::CycleFocusBackward);
commands.insert(
"focus.files".to_string(),
AppCommand::FocusPanel(FocusedPanel::Files),
);
commands.insert(
"focus.chat".to_string(),
AppCommand::FocusPanel(FocusedPanel::Chat),
);
commands.insert(
"focus.thinking".to_string(),
AppCommand::FocusPanel(FocusedPanel::Thinking),
);
commands.insert(
"focus.input".to_string(),
AppCommand::FocusPanel(FocusedPanel::Input),
);
commands.insert(
"focus.code".to_string(),
AppCommand::FocusPanel(FocusedPanel::Code),
);
commands.insert("composer.submit".to_string(), AppCommand::ComposerSubmit);
commands.insert("mode.command".to_string(), AppCommand::EnterCommandMode);
commands.insert("debug.toggle".to_string(), AppCommand::ToggleDebugLog);
Self { commands }
}
pub fn resolve(&self, command: &str) -> Option<AppCommand> {
self.commands.get(command).copied()
}
}
impl Default for CommandRegistry {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn resolve_known_command() {
let registry = CommandRegistry::new();
assert_eq!(
registry.resolve("focus.next"),
Some(AppCommand::CycleFocusForward)
);
assert_eq!(
registry.resolve("model.open_cloud"),
Some(AppCommand::OpenModelPicker(Some(FilterMode::CloudOnly)))
);
}
#[test]
fn resolve_unknown_command() {
let registry = CommandRegistry::new();
assert_eq!(registry.resolve("does.not.exist"), None);
}
}

View File

@@ -0,0 +1,235 @@
use chrono::{DateTime, Local};
use log::{Level, LevelFilter, Metadata, Record};
use once_cell::sync::{Lazy, OnceCell};
use regex::Regex;
use std::collections::VecDeque;
use std::sync::Mutex;
/// Maximum number of entries to retain in the in-memory ring buffer.
const MAX_ENTRIES: usize = 256;
/// Global access handle for the debug log store.
static STORE: Lazy<DebugLogStore> = Lazy::new(DebugLogStore::default);
static LOGGER: OnceCell<()> = OnceCell::new();
static DEBUG_LOGGER: DebugLogger = DebugLogger;
/// Install the in-process logger that feeds the debug log ring buffer.
pub fn install_global_logger() {
LOGGER.get_or_init(|| {
if log::set_logger(&DEBUG_LOGGER).is_ok() {
log::set_max_level(LevelFilter::Trace);
}
});
}
/// Per-application state for presenting and acknowledging debug log entries.
#[derive(Debug)]
pub struct DebugLogState {
visible: bool,
last_seen_id: u64,
}
impl DebugLogState {
pub fn new() -> Self {
let last_seen_id = STORE.latest_id();
Self {
visible: false,
last_seen_id,
}
}
pub fn toggle_visible(&mut self) -> bool {
self.visible = !self.visible;
if self.visible {
self.mark_seen();
}
self.visible
}
pub fn set_visible(&mut self, visible: bool) {
self.visible = visible;
if visible {
self.mark_seen();
}
}
pub fn is_visible(&self) -> bool {
self.visible
}
pub fn entries(&self) -> Vec<DebugLogEntry> {
STORE.snapshot()
}
pub fn take_unseen(&mut self) -> Vec<DebugLogEntry> {
let entries = STORE.entries_since(self.last_seen_id);
if let Some(entry) = entries.last() {
self.last_seen_id = entry.id;
}
entries
}
pub fn has_unseen(&self) -> bool {
STORE.latest_id() > self.last_seen_id
}
fn mark_seen(&mut self) {
self.last_seen_id = STORE.latest_id();
}
}
impl Default for DebugLogState {
fn default() -> Self {
Self::new()
}
}
/// Metadata describing a single debug log entry.
#[derive(Clone, Debug)]
pub struct DebugLogEntry {
pub id: u64,
pub timestamp: DateTime<Local>,
pub level: Level,
pub target: String,
pub message: String,
}
#[derive(Default)]
struct DebugLogStore {
inner: Mutex<Inner>,
}
#[derive(Default)]
struct Inner {
entries: VecDeque<DebugLogEntry>,
next_id: u64,
}
impl DebugLogStore {
fn snapshot(&self) -> Vec<DebugLogEntry> {
let inner = self.inner.lock().unwrap();
inner.entries.iter().cloned().collect()
}
fn latest_id(&self) -> u64 {
let inner = self.inner.lock().unwrap();
inner.next_id
}
fn entries_since(&self, last_seen_id: u64) -> Vec<DebugLogEntry> {
let inner = self.inner.lock().unwrap();
inner
.entries
.iter()
.filter(|entry| entry.id > last_seen_id)
.cloned()
.collect()
}
fn push(&self, level: Level, target: &str, message: &str) -> DebugLogEntry {
let sanitized = sanitize_message(message);
let mut inner = self.inner.lock().unwrap();
inner.next_id = inner.next_id.saturating_add(1);
let entry = DebugLogEntry {
id: inner.next_id,
timestamp: Local::now(),
level,
target: target.to_string(),
message: sanitized,
};
inner.entries.push_back(entry.clone());
while inner.entries.len() > MAX_ENTRIES {
inner.entries.pop_front();
}
entry
}
}
struct DebugLogger;
impl log::Log for DebugLogger {
fn enabled(&self, metadata: &Metadata) -> bool {
metadata.level() <= LevelFilter::Trace
}
fn log(&self, record: &Record) {
if !self.enabled(record.metadata()) {
return;
}
// Only persist warnings and errors in the in-memory buffer.
if record.level() < Level::Warn {
return;
}
let message = record.args().to_string();
let entry = STORE.push(record.level(), record.target(), &message);
if record.level() == Level::Error {
eprintln!(
"[owlen:error][{}] {}",
entry.timestamp.format("%Y-%m-%d %H:%M:%S"),
entry.message
);
} else if record.level() == Level::Warn {
eprintln!(
"[owlen:warn][{}] {}",
entry.timestamp.format("%Y-%m-%d %H:%M:%S"),
entry.message
);
}
}
fn flush(&self) {}
}
fn sanitize_message(message: &str) -> String {
static AUTH_HEADER: Lazy<Regex> =
Lazy::new(|| Regex::new(r"(?i)\b(authorization)(\s*[:=]\s*)([^\r\n]+)").unwrap());
static GENERIC_SECRET: Lazy<Regex> =
Lazy::new(|| Regex::new(r"(?i)\b(api[_-]?key|token)(\s*[:=]\s*)([^,\s;]+)").unwrap());
static BEARER_TOKEN: Lazy<Regex> =
Lazy::new(|| Regex::new(r"(?i)\bBearer\s+[A-Za-z0-9._\-+/=]+").unwrap());
let step = AUTH_HEADER.replace_all(message, |caps: &regex::Captures<'_>| {
format!("{}{}<redacted>", &caps[1], &caps[2])
});
let step = GENERIC_SECRET.replace_all(&step, |caps: &regex::Captures<'_>| {
format!("{}{}<redacted>", &caps[1], &caps[2])
});
BEARER_TOKEN
.replace_all(&step, "Bearer <redacted>")
.into_owned()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn sanitize_masks_common_tokens() {
let input =
"Authorization: Bearer abc123 token=xyz456 KEY=value Authorization=Token secretStuff";
let sanitized = sanitize_message(input);
assert!(!sanitized.contains("abc123"));
assert!(!sanitized.contains("xyz456"));
assert!(!sanitized.contains("secretStuff"));
assert_eq!(sanitized, "Authorization: <redacted>");
}
#[test]
fn ring_buffer_discards_old_entries() {
install_global_logger();
let initial_latest = STORE.latest_id();
for idx in 0..(MAX_ENTRIES as u64 + 10) {
let message = format!("warn #{idx}");
STORE.push(Level::Warn, "test", &message);
}
let entries = STORE.snapshot();
assert_eq!(entries.len(), MAX_ENTRIES);
assert!(entries.first().unwrap().id > initial_latest);
}
}

View File

@@ -0,0 +1,307 @@
use std::{
collections::HashMap,
fs,
path::{Path, PathBuf},
};
use crossterm::event::{KeyCode, KeyEvent, KeyModifiers};
use log::warn;
use owlen_core::{config::default_config_path, ui::InputMode};
use serde::Deserialize;
use crate::commands::registry::{AppCommand, CommandRegistry};
const DEFAULT_KEYMAP: &str = include_str!("../../keymap.toml");
#[derive(Debug, Clone)]
pub struct Keymap {
bindings: HashMap<(InputMode, KeyPattern), AppCommand>,
}
impl Keymap {
pub fn load(custom_path: Option<&str>, registry: &CommandRegistry) -> Self {
let mut content = None;
if let Some(path) = custom_path.and_then(expand_path) {
if let Ok(text) = fs::read_to_string(&path) {
content = Some(text);
} else {
warn!(
"Failed to read keymap from {}. Falling back to defaults.",
path.display()
);
}
}
if content.is_none() {
let default_path = default_config_keymap_path();
if let Some(path) = default_path {
if let Ok(text) = fs::read_to_string(&path) {
content = Some(text);
}
}
}
let data = content.unwrap_or_else(|| DEFAULT_KEYMAP.to_string());
let parsed: KeymapConfig = toml::from_str(&data).unwrap_or_else(|err| {
warn!("Failed to parse keymap: {err}. Using built-in defaults.");
toml::from_str(DEFAULT_KEYMAP).expect("embedded keymap should parse successfully")
});
let mut bindings = HashMap::new();
for entry in parsed.bindings {
let mode = match parse_mode(&entry.mode) {
Some(mode) => mode,
None => {
warn!("Unknown input mode '{}' in keymap binding", entry.mode);
continue;
}
};
let command = match registry.resolve(&entry.command) {
Some(cmd) => cmd,
None => {
warn!("Unknown command '{}' in keymap binding", entry.command);
continue;
}
};
for key in entry.keys.into_iter() {
match KeyPattern::from_str(&key) {
Some(pattern) => {
bindings.insert((mode, pattern), command);
}
None => warn!(
"Unrecognised key specification '{}' for mode {}",
key, entry.mode
),
}
}
}
Self { bindings }
}
pub fn resolve(&self, mode: InputMode, event: &KeyEvent) -> Option<AppCommand> {
let pattern = KeyPattern::from_event(event)?;
self.bindings.get(&(mode, pattern)).copied()
}
}
#[derive(Debug, Deserialize)]
struct KeymapConfig {
#[serde(default, rename = "binding")]
bindings: Vec<KeyBindingConfig>,
}
#[derive(Debug, Deserialize)]
struct KeyBindingConfig {
mode: String,
command: String,
keys: KeyList,
}
#[derive(Debug, Deserialize)]
#[serde(untagged)]
enum KeyList {
Single(String),
Multiple(Vec<String>),
}
impl KeyList {
fn into_iter(self) -> Vec<String> {
match self {
KeyList::Single(key) => vec![key],
KeyList::Multiple(keys) => keys,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
struct KeyPattern {
code: KeyCodeKind,
modifiers: KeyModifiers,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
enum KeyCodeKind {
Char(char),
Enter,
Tab,
BackTab,
Backspace,
Esc,
Up,
Down,
Left,
Right,
PageUp,
PageDown,
Home,
End,
F(u8),
}
impl KeyPattern {
fn from_event(event: &KeyEvent) -> Option<Self> {
let code = match event.code {
KeyCode::Char(c) => KeyCodeKind::Char(c),
KeyCode::Enter => KeyCodeKind::Enter,
KeyCode::Tab => KeyCodeKind::Tab,
KeyCode::BackTab => KeyCodeKind::BackTab,
KeyCode::Backspace => KeyCodeKind::Backspace,
KeyCode::Esc => KeyCodeKind::Esc,
KeyCode::Up => KeyCodeKind::Up,
KeyCode::Down => KeyCodeKind::Down,
KeyCode::Left => KeyCodeKind::Left,
KeyCode::Right => KeyCodeKind::Right,
KeyCode::PageUp => KeyCodeKind::PageUp,
KeyCode::PageDown => KeyCodeKind::PageDown,
KeyCode::Home => KeyCodeKind::Home,
KeyCode::End => KeyCodeKind::End,
KeyCode::F(n) => KeyCodeKind::F(n),
_ => return None,
};
Some(Self {
code,
modifiers: normalize_modifiers(event.modifiers),
})
}
fn from_str(spec: &str) -> Option<Self> {
let tokens: Vec<&str> = spec
.split('+')
.map(|token| token.trim())
.filter(|token| !token.is_empty())
.collect();
if tokens.is_empty() {
return None;
}
let mut modifiers = KeyModifiers::empty();
let key_token = tokens.last().copied().unwrap();
for token in tokens[..tokens.len().saturating_sub(1)].iter() {
match token.to_ascii_lowercase().as_str() {
"ctrl" | "control" => modifiers.insert(KeyModifiers::CONTROL),
"alt" | "option" => modifiers.insert(KeyModifiers::ALT),
"shift" => modifiers.insert(KeyModifiers::SHIFT),
other => warn!("Unknown modifier '{other}' in key binding '{spec}'"),
}
}
let code = parse_key_token(key_token, &mut modifiers)?;
Some(Self {
code,
modifiers: normalize_modifiers(modifiers),
})
}
}
fn parse_key_token(token: &str, modifiers: &mut KeyModifiers) -> Option<KeyCodeKind> {
let token_lower = token.to_ascii_lowercase();
let code = match token_lower.as_str() {
"enter" | "return" => KeyCodeKind::Enter,
"tab" => {
if modifiers.contains(KeyModifiers::SHIFT) {
modifiers.remove(KeyModifiers::SHIFT);
KeyCodeKind::BackTab
} else {
KeyCodeKind::Tab
}
}
"backtab" => KeyCodeKind::BackTab,
"backspace" | "bs" => KeyCodeKind::Backspace,
"esc" | "escape" => KeyCodeKind::Esc,
"up" => KeyCodeKind::Up,
"down" => KeyCodeKind::Down,
"left" => KeyCodeKind::Left,
"right" => KeyCodeKind::Right,
"pageup" | "page_up" | "pgup" => KeyCodeKind::PageUp,
"pagedown" | "page_down" | "pgdn" => KeyCodeKind::PageDown,
"home" => KeyCodeKind::Home,
"end" => KeyCodeKind::End,
token if token.starts_with('f') && token.len() > 1 => {
let num = token[1..].parse::<u8>().ok()?;
KeyCodeKind::F(num)
}
"space" => KeyCodeKind::Char(' '),
"semicolon" => KeyCodeKind::Char(';'),
"slash" => KeyCodeKind::Char('/'),
_ => {
let chars: Vec<char> = token.chars().collect();
if chars.len() == 1 {
KeyCodeKind::Char(chars[0])
} else {
return None;
}
}
};
Some(code)
}
fn parse_mode(mode: &str) -> Option<InputMode> {
match mode.to_ascii_lowercase().as_str() {
"normal" => Some(InputMode::Normal),
"editing" => Some(InputMode::Editing),
"command" => Some(InputMode::Command),
"visual" => Some(InputMode::Visual),
"provider_selection" | "provider" => Some(InputMode::ProviderSelection),
"model_selection" | "model" => Some(InputMode::ModelSelection),
"help" => Some(InputMode::Help),
"session_browser" | "sessions" => Some(InputMode::SessionBrowser),
"theme_browser" | "themes" => Some(InputMode::ThemeBrowser),
"repo_search" | "search" => Some(InputMode::RepoSearch),
"symbol_search" | "symbols" => Some(InputMode::SymbolSearch),
_ => None,
}
}
fn default_config_keymap_path() -> Option<PathBuf> {
let config_path = default_config_path();
let dir = config_path.parent()?;
Some(dir.join("keymap.toml"))
}
fn expand_path(path: &str) -> Option<PathBuf> {
if path.trim().is_empty() {
return None;
}
let expanded = shellexpand::tilde(path);
let candidate = Path::new(expanded.as_ref()).to_path_buf();
Some(candidate)
}
fn normalize_modifiers(modifiers: KeyModifiers) -> KeyModifiers {
modifiers
}
#[cfg(test)]
mod tests {
use super::*;
use crossterm::event::{KeyCode, KeyModifiers};
#[test]
fn resolve_binding_from_default_keymap() {
let registry = CommandRegistry::new();
assert!(registry.resolve("model.open_all").is_some());
let parsed: KeymapConfig = toml::from_str(DEFAULT_KEYMAP).unwrap();
assert!(!parsed.bindings.is_empty());
let keymap = Keymap::load(None, &registry);
let event = KeyEvent::new(KeyCode::Char('m'), KeyModifiers::NONE);
assert!(
!keymap.bindings.is_empty(),
"expected default keymap to provide bindings"
);
assert_eq!(
keymap.resolve(InputMode::Normal, &event),
Some(AppCommand::OpenModelPicker(None))
);
}
}

View File

@@ -6,16 +6,20 @@
//! to test in isolation.
mod command_palette;
mod debug_log;
mod file_icons;
mod file_tree;
mod keymap;
mod search;
mod workspace;
pub use command_palette::{CommandPalette, ModelPaletteEntry, PaletteGroup, PaletteSuggestion};
pub use debug_log::{DebugLogEntry, DebugLogState, install_global_logger};
pub use file_icons::{FileIconResolver, FileIconSet, IconDetection};
pub use file_tree::{
FileNode, FileTreeState, FilterMode as FileFilterMode, GitDecoration, VisibleFileEntry,
};
pub use keymap::Keymap;
pub use search::{
RepoSearchFile, RepoSearchMatch, RepoSearchMessage, RepoSearchRow, RepoSearchRowKind,
RepoSearchState, SymbolEntry, SymbolKind, SymbolSearchMessage, SymbolSearchState,

View File

@@ -1,3 +1,4 @@
use log::Level;
use pathdiff::diff_paths;
use ratatui::Frame;
use ratatui::layout::{Alignment, Constraint, Direction, Layout, Rect};
@@ -366,6 +367,20 @@ pub fn render_chat(frame: &mut Frame<'_>, app: &mut ChatApp) {
render_code_workspace(frame, area, app);
}
if app.is_debug_log_visible() {
let min_height = 6;
let computed_height = content_area.height.saturating_div(3).max(min_height);
let panel_height = computed_height.min(content_area.height);
if panel_height >= 4 {
let y = content_area
.y
.saturating_add(content_area.height.saturating_sub(panel_height));
let log_area = Rect::new(content_area.x, y, content_area.width, panel_height);
render_debug_log_panel(frame, log_area, app);
}
}
render_toasts(frame, app, content_area);
}
@@ -1964,6 +1979,134 @@ fn render_system_output(frame: &mut Frame<'_>, area: Rect, app: &ChatApp, messag
frame.render_widget(paragraph, area);
}
fn render_debug_log_panel(frame: &mut Frame<'_>, area: Rect, app: &ChatApp) {
let theme = app.theme();
frame.render_widget(Clear, area);
let title = Line::from(vec![
Span::styled(
" Debug log ",
Style::default()
.fg(theme.pane_header_active)
.add_modifier(Modifier::BOLD),
),
Span::styled(
"warnings & errors",
Style::default()
.fg(theme.pane_hint_text)
.add_modifier(Modifier::DIM),
),
]);
let block = Block::default()
.borders(Borders::ALL)
.border_style(Style::default().fg(theme.focused_panel_border))
.style(Style::default().bg(theme.background).fg(theme.text))
.title(title);
let inner = block.inner(area);
frame.render_widget(block, area);
if inner.width == 0 || inner.height == 0 {
return;
}
let entries = app.debug_log_entries();
let available_rows = inner.height as usize;
let mut lines: Vec<Line> = Vec::new();
if entries.is_empty() {
lines.push(Line::styled(
"No warnings captured this session.",
Style::default()
.fg(theme.pane_hint_text)
.add_modifier(Modifier::DIM),
));
} else {
let total_entries = entries.len();
let mut subset: Vec<_> = entries.into_iter().rev().take(available_rows).collect();
subset.reverse();
if total_entries > subset.len() && subset.len() == available_rows && !subset.is_empty() {
subset.remove(0);
}
let overflow = total_entries.saturating_sub(subset.len());
if overflow > 0 {
lines.push(Line::styled(
format!("{overflow} older entries not shown"),
Style::default()
.fg(theme.pane_hint_text)
.add_modifier(Modifier::DIM),
));
}
for entry in subset {
let (label, badge_style, message_style) = debug_level_styles(entry.level, theme);
let timestamp = entry.timestamp.format("%H:%M:%S");
let mut spans = vec![
Span::styled(format!(" {label} "), badge_style),
Span::raw(" "),
Span::styled(
timestamp.to_string(),
Style::default()
.fg(theme.pane_hint_text)
.add_modifier(Modifier::DIM),
),
];
if !entry.target.is_empty() {
spans.push(Span::raw(" "));
spans.push(Span::styled(
entry.target,
Style::default().fg(theme.pane_header_active),
));
}
spans.push(Span::raw(" "));
spans.push(Span::styled(entry.message, message_style));
lines.push(Line::from(spans));
}
}
let paragraph = Paragraph::new(lines)
.wrap(Wrap { trim: true })
.alignment(Alignment::Left)
.style(Style::default().bg(theme.background));
frame.render_widget(paragraph, inner);
}
fn debug_level_styles(level: Level, theme: &Theme) -> (&'static str, Style, Style) {
match level {
Level::Error => (
"ERR",
Style::default()
.fg(theme.background)
.bg(theme.error)
.add_modifier(Modifier::BOLD),
Style::default().fg(theme.error),
),
Level::Warn => (
"WARN",
Style::default()
.fg(theme.background)
.bg(theme.agent_action)
.add_modifier(Modifier::BOLD),
Style::default().fg(theme.agent_action),
),
_ => (
"INFO",
Style::default()
.fg(theme.background)
.bg(theme.info)
.add_modifier(Modifier::BOLD),
Style::default().fg(theme.text),
),
}
}
fn calculate_wrapped_line_count<'a, I>(lines: I, available_width: u16) -> usize
where
I: IntoIterator<Item = &'a str>,
@@ -2944,6 +3087,7 @@ fn render_help(frame: &mut Frame<'_>, app: &ChatApp) {
Line::from(" Ctrl+↑/↓ → adjust chat ↔ thinking split"),
Line::from(" Alt+←/→/↑/↓ → resize focused code pane"),
Line::from(" g then t → expand files panel and focus it"),
Line::from(" F12 → toggle debug log panel"),
Line::from(" F1 or ? → toggle this help overlay"),
Line::from(""),
Line::from(vec![Span::styled(
@@ -3086,6 +3230,7 @@ fn render_help(frame: &mut Frame<'_>, app: &ChatApp) {
)]),
Line::from(" :h, :help → show this help"),
Line::from(" F1 or ? → toggle help overlay"),
Line::from(" F12 → toggle debug log panel"),
Line::from(" :files, :explorer → toggle files panel"),
Line::from(" :markdown [on|off] → toggle markdown rendering"),
Line::from(" Ctrl+←/→ → resize files panel"),

View File

@@ -12,10 +12,13 @@ use ratatui::{
use unicode_segmentation::UnicodeSegmentation;
use unicode_width::UnicodeWidthStr;
use crate::chat_app::{ChatApp, ModelAvailabilityState, ModelScope, ModelSelectorItemKind};
use crate::chat_app::{
ChatApp, HighlightMask, ModelAvailabilityState, ModelScope, ModelSearchInfo,
ModelSelectorItemKind,
};
/// Filtering modes for the model picker popup.
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)]
pub enum FilterMode {
#[default]
All,
@@ -36,16 +39,21 @@ pub fn render_model_picker(frame: &mut Frame<'_>, app: &ChatApp) {
return;
}
let max_width: u16 = 80;
let min_width: u16 = 50;
let mut width = area.width.min(max_width);
if area.width >= min_width {
width = width.max(min_width);
}
width = width.max(1);
let search_query = app.model_search_query().trim().to_string();
let search_active = !search_query.is_empty();
let mut height = (selector_items.len().clamp(1, 10) as u16) * 3 + 6;
height = height.clamp(6, area.height);
let max_width = area.width.min(90);
let min_width = area.width.min(56);
let width = area.width.min(max_width).max(min_width).max(1);
let visible_models = app.visible_model_count();
let min_rows: usize = if search_active { 5 } else { 4 };
let max_rows: usize = 12;
let row_estimate = visible_models.max(min_rows).min(max_rows);
let mut height = (row_estimate as u16) * 3 + 8;
let min_height = area.height.clamp(8, 12);
let max_height = area.height.min(32);
height = height.clamp(min_height, max_height);
let x = area.x + (area.width.saturating_sub(width)) / 2;
let mut y = area.y + (area.height.saturating_sub(height)) / 3;
@@ -84,15 +92,110 @@ pub fn render_model_picker(frame: &mut Frame<'_>, app: &ChatApp) {
if inner.width == 0 || inner.height == 0 {
return;
}
let highlight_symbol = " ";
let highlight_width = UnicodeWidthStr::width(highlight_symbol);
let max_line_width = inner.width.saturating_sub(highlight_width as u16).max(1) as usize;
let layout = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Min(4), Constraint::Length(2)])
.constraints([
Constraint::Length(3),
Constraint::Min(4),
Constraint::Length(2),
])
.split(inner);
let matches = app.visible_model_count();
let search_prefix = Style::default()
.fg(theme.placeholder)
.add_modifier(Modifier::DIM);
let bracket_style = Style::default()
.fg(theme.placeholder)
.add_modifier(Modifier::DIM);
let caret_style = if search_active {
Style::default()
.fg(theme.selection_fg)
.add_modifier(Modifier::BOLD)
} else {
Style::default()
.fg(theme.placeholder)
.add_modifier(Modifier::DIM)
};
let mut search_spans = Vec::new();
search_spans.push(Span::styled("Search ▸ ", search_prefix));
search_spans.push(Span::styled("[", bracket_style));
search_spans.push(Span::styled(" ", bracket_style));
if search_active {
search_spans.push(Span::styled(
search_query.clone(),
Style::default()
.fg(theme.selection_fg)
.add_modifier(Modifier::BOLD),
));
} else {
search_spans.push(Span::styled(
"Type to search…",
Style::default()
.fg(theme.placeholder)
.add_modifier(Modifier::DIM),
));
}
search_spans.push(Span::styled(" ", bracket_style));
search_spans.push(Span::styled("", caret_style));
search_spans.push(Span::styled(" ", bracket_style));
search_spans.push(Span::styled("]", bracket_style));
search_spans.push(Span::raw(" "));
let suffix_label = if search_active { "match" } else { "model" };
search_spans.push(Span::styled(
format!(
"({} {}{})",
matches,
suffix_label,
if matches == 1 { "" } else { "s" }
),
Style::default().fg(theme.placeholder),
));
let search_line = Line::from(search_spans);
let instruction_line = if search_active {
Line::from(vec![
Span::styled("Backspace", Style::default().fg(theme.placeholder)),
Span::raw(": delete "),
Span::styled("Ctrl+U", Style::default().fg(theme.placeholder)),
Span::raw(": clear "),
Span::styled("Enter", Style::default().fg(theme.placeholder)),
Span::raw(": select "),
Span::styled("Esc", Style::default().fg(theme.placeholder)),
Span::raw(": close"),
])
} else {
Line::from(vec![
Span::styled("Enter", Style::default().fg(theme.placeholder)),
Span::raw(": select "),
Span::styled("Space", Style::default().fg(theme.placeholder)),
Span::raw(": toggle provider "),
Span::styled("Esc", Style::default().fg(theme.placeholder)),
Span::raw(": close"),
])
};
let search_paragraph = Paragraph::new(vec![search_line, instruction_line])
.style(Style::default().bg(theme.background).fg(theme.text));
frame.render_widget(search_paragraph, layout[0]);
let highlight_style = Style::default()
.fg(theme.selection_fg)
.bg(theme.selection_bg)
.add_modifier(Modifier::BOLD);
let highlight_symbol = " ";
let highlight_width = UnicodeWidthStr::width(highlight_symbol);
let max_line_width = layout[1]
.width
.saturating_sub(highlight_width as u16)
.max(1) as usize;
let active_model_id = app.selected_model();
let annotated = app.annotated_models();
@@ -108,12 +211,19 @@ pub fn render_model_picker(frame: &mut Frame<'_>, app: &ChatApp) {
let mut spans = Vec::new();
spans.push(status_icon(*status, theme));
spans.push(Span::raw(" "));
spans.push(Span::styled(
provider.clone(),
let header_spans = render_highlighted_text(
provider,
if search_active {
app.provider_search_highlight(provider)
} else {
None
},
Style::default()
.fg(theme.mode_command)
.add_modifier(Modifier::BOLD),
));
highlight_style,
);
spans.extend(header_spans);
spans.push(Span::raw(" "));
spans.push(provider_type_badge(*provider_type, theme));
spans.push(Span::raw(" "));
@@ -145,6 +255,11 @@ pub fn render_model_picker(frame: &mut Frame<'_>, app: &ChatApp) {
let badges = model_badge_icons(model);
let detail = app.cached_model_detail(&model.id);
let annotated_model = annotated.get(*model_index);
let search_info = if search_active {
app.model_search_info(*model_index)
} else {
None
};
let (title, metadata) = build_model_selector_lines(
theme,
model,
@@ -152,6 +267,10 @@ pub fn render_model_picker(frame: &mut Frame<'_>, app: &ChatApp) {
&badges,
detail,
model.id == active_model_id,
SearchRenderContext {
info: search_info,
highlight_style,
},
);
lines.push(clip_line_to_width(title, max_line_width));
if let Some(meta) = metadata {
@@ -176,14 +295,9 @@ pub fn render_model_picker(frame: &mut Frame<'_>, app: &ChatApp) {
.as_ref()
.map(|msg| msg.as_str())
.unwrap_or("(no models configured)");
let line = clip_line_to_width(
Line::from(vec![
Span::styled(icon, style),
Span::raw(" "),
Span::styled(format!(" {}", msg), style),
]),
max_line_width,
);
let mut spans = vec![Span::styled(icon, style), Span::raw(" ")];
spans.push(Span::styled(format!(" {}", msg), style));
let line = clip_line_to_width(Line::from(spans), max_line_width);
items.push(ListItem::new(vec![line]).style(Style::default().bg(theme.background)));
}
}
@@ -199,16 +313,22 @@ pub fn render_model_picker(frame: &mut Frame<'_>, app: &ChatApp) {
.highlight_symbol(" ");
let mut state = ListState::default();
state.select(app.selected_model_item);
frame.render_stateful_widget(list, layout[0], &mut state);
state.select(app.selected_model_item());
frame.render_stateful_widget(list, layout[1], &mut state);
let footer_text = if search_active {
"Enter: select · Space: toggle provider · Backspace: delete · Ctrl+U: clear"
} else {
"Enter: select · Space: toggle provider · Type to search · Esc: cancel"
};
let footer = Paragraph::new(Line::from(Span::styled(
"Enter: select · Space: toggle provider · ←/→ collapse/expand · Esc: cancel",
footer_text,
Style::default().fg(theme.placeholder),
)))
.alignment(ratatui::layout::Alignment::Center)
.style(Style::default().bg(theme.background).fg(theme.placeholder));
frame.render_widget(footer, layout[1]);
frame.render_widget(footer, layout[2]);
}
fn status_icon(status: ProviderStatus, theme: &owlen_core::theme::Theme) -> Span<'static> {
@@ -302,13 +422,72 @@ fn filter_badge(mode: FilterMode, theme: &owlen_core::theme::Theme) -> Span<'sta
)
}
fn build_model_selector_lines(
fn render_highlighted_text(
text: &str,
highlight: Option<&HighlightMask>,
normal_style: Style,
highlight_style: Style,
) -> Vec<Span<'static>> {
if text.is_empty() {
return Vec::new();
}
let graphemes: Vec<&str> = UnicodeSegmentation::graphemes(text, true).collect();
let mask = highlight.map(|mask| mask.bits()).unwrap_or(&[]);
let mut spans: Vec<Span<'static>> = Vec::new();
let mut buffer = String::new();
let mut current_highlight = false;
for (idx, grapheme) in graphemes.iter().enumerate() {
let mark = mask.get(idx).copied().unwrap_or(false);
if idx == 0 {
current_highlight = mark;
}
if mark != current_highlight {
if !buffer.is_empty() {
let style = if current_highlight {
highlight_style
} else {
normal_style
};
spans.push(Span::styled(buffer.clone(), style));
buffer.clear();
}
current_highlight = mark;
}
buffer.push_str(grapheme);
}
if !buffer.is_empty() {
let style = if current_highlight {
highlight_style
} else {
normal_style
};
spans.push(Span::styled(buffer, style));
}
if spans.is_empty() {
spans.push(Span::styled(text.to_string(), normal_style));
}
spans
}
struct SearchRenderContext<'a> {
info: Option<&'a ModelSearchInfo>,
highlight_style: Style,
}
fn build_model_selector_lines<'a>(
theme: &owlen_core::theme::Theme,
model: &ModelInfo,
annotated: Option<&AnnotatedModelInfo>,
model: &'a ModelInfo,
annotated: Option<&'a AnnotatedModelInfo>,
badges: &[&'static str],
detail: Option<&owlen_core::model::DetailedModelInfo>,
detail: Option<&'a owlen_core::model::DetailedModelInfo>,
is_current: bool,
search: SearchRenderContext<'a>,
) -> (Line<'static>, Option<Line<'static>>) {
let provider_type = annotated
.map(|info| info.model.provider.provider_type)
@@ -329,19 +508,42 @@ fn build_model_selector_lines(
spans.push(provider_type_badge(provider_type, theme));
spans.push(Span::raw(" "));
let mut display_name = if model.name.trim().is_empty() {
model.id.clone()
} else {
model.name.clone()
};
if !display_name.eq_ignore_ascii_case(&model.id) {
display_name.push_str(&format!(" · {}", model.id));
}
let name_style = Style::default().fg(theme.text).add_modifier(Modifier::BOLD);
let id_style = Style::default()
.fg(theme.placeholder)
.add_modifier(Modifier::DIM);
spans.push(Span::styled(
display_name,
Style::default().fg(theme.text).add_modifier(Modifier::BOLD),
));
let name_trimmed = model.name.trim();
if !name_trimmed.is_empty() {
let name_spans = render_highlighted_text(
name_trimmed,
search.info.and_then(|info| info.name.as_ref()),
name_style,
search.highlight_style,
);
spans.extend(name_spans);
if !model.id.eq_ignore_ascii_case(name_trimmed) {
spans.push(Span::raw(" "));
spans.push(Span::styled("·", Style::default().fg(theme.placeholder)));
spans.push(Span::raw(" "));
let id_spans = render_highlighted_text(
model.id.as_str(),
search.info.and_then(|info| info.id.as_ref()),
id_style,
search.highlight_style,
);
spans.extend(id_spans);
}
} else {
let id_spans = render_highlighted_text(
model.id.as_str(),
search.info.and_then(|info| info.id.as_ref()),
name_style,
search.highlight_style,
);
spans.extend(id_spans);
}
if !badges.is_empty() {
spans.push(Span::raw(" "));
@@ -359,7 +561,7 @@ fn build_model_selector_lines(
));
}
let mut meta_parts: Vec<String> = Vec::new();
let mut meta_tags: Vec<String> = Vec::new();
let mut seen_meta: HashSet<String> = HashSet::new();
let mut push_meta = |value: String| {
let trimmed = value.trim();
@@ -368,7 +570,7 @@ fn build_model_selector_lines(
}
let key = trimmed.to_ascii_lowercase();
if seen_meta.insert(key) {
meta_parts.push(trimmed.to_string());
meta_tags.push(trimmed.to_string());
}
};
@@ -437,22 +639,62 @@ fn build_model_selector_lines(
push_meta(format!("max tokens {}", ctx));
}
let mut description_segment: Option<(String, Option<HighlightMask>)> = None;
if let Some(desc) = model.description.as_deref() {
let trimmed = desc.trim();
if !trimmed.is_empty() {
meta_parts.push(ellipsize(trimmed, 80));
let (display, retained, truncated) = ellipsize(trimmed, 80);
let highlight = search
.info
.and_then(|info| info.description.as_ref())
.filter(|mask| mask.is_marked())
.map(|mask| {
if truncated {
mask.truncated(retained)
} else {
mask.clone()
}
});
description_segment = Some((display, highlight));
}
}
let metadata = if meta_parts.is_empty() {
let metadata = if meta_tags.is_empty() && description_segment.is_none() {
None
} else {
Some(Line::from(vec![Span::styled(
format!(" {}", meta_parts.join("")),
Style::default()
.fg(theme.placeholder)
.add_modifier(Modifier::DIM),
)]))
let meta_style = Style::default()
.fg(theme.placeholder)
.add_modifier(Modifier::DIM);
let mut segments: Vec<Span<'static>> = Vec::new();
segments.push(Span::styled(" ", meta_style));
let mut first = true;
for tag in meta_tags {
if !first {
segments.push(Span::styled("", meta_style));
}
segments.push(Span::styled(tag, meta_style));
first = false;
}
if let Some((text, highlight)) = description_segment {
if !first {
segments.push(Span::styled("", meta_style));
}
if let Some(mask) = highlight.as_ref() {
let desc_spans = render_highlighted_text(
text.as_str(),
Some(mask),
meta_style,
search.highlight_style,
);
segments.extend(desc_spans);
} else {
segments.push(Span::styled(text, meta_style));
}
}
Some(Line::from(segments))
};
(Line::from(spans), metadata)
@@ -501,18 +743,19 @@ fn clip_line_to_width(line: Line<'_>, max_width: usize) -> Line<'static> {
Line::from(clipped)
}
fn ellipsize(text: &str, max_chars: usize) -> String {
if text.chars().count() <= max_chars {
return text.to_string();
fn ellipsize(text: &str, max_graphemes: usize) -> (String, usize, bool) {
let graphemes: Vec<&str> = UnicodeSegmentation::graphemes(text, true).collect();
if graphemes.len() <= max_graphemes {
return (text.to_string(), graphemes.len(), false);
}
let target = max_chars.saturating_sub(1).max(1);
let keep = max_graphemes.saturating_sub(1).max(1);
let mut truncated = String::new();
for ch in text.chars().take(target) {
truncated.push(ch);
for grapheme in graphemes.iter().take(keep) {
truncated.push_str(grapheme);
}
truncated.push('…');
truncated
(truncated, keep, true)
}
fn model_badge_icons(model: &ModelInfo) -> Vec<&'static str> {

View File

@@ -0,0 +1,164 @@
use std::{any::Any, sync::Arc};
use async_trait::async_trait;
use crossterm::event::{KeyCode, KeyEvent, KeyModifiers};
use futures_util::stream;
use owlen_core::{
Config, Mode, Provider,
config::McpMode,
session::SessionController,
storage::StorageManager,
types::{ChatResponse, Message, Role, ToolCall},
ui::{NoOpUiController, UiController},
};
use owlen_tui::ChatApp;
use owlen_tui::app::UiRuntime;
use owlen_tui::events::Event;
use tempfile::tempdir;
use tokio::sync::mpsc;
struct StubProvider;
#[async_trait]
impl Provider for StubProvider {
fn name(&self) -> &str {
"stub-provider"
}
async fn list_models(&self) -> owlen_core::Result<Vec<owlen_core::types::ModelInfo>> {
Ok(vec![owlen_core::types::ModelInfo {
id: "stub-model".into(),
name: "Stub Model".into(),
description: Some("Stub model for testing".into()),
provider: self.name().into(),
context_window: Some(4096),
capabilities: vec!["chat".into()],
supports_tools: true,
}])
}
async fn send_prompt(
&self,
_request: owlen_core::types::ChatRequest,
) -> owlen_core::Result<ChatResponse> {
Ok(ChatResponse {
message: Message::assistant("stub response".to_string()),
usage: None,
is_streaming: false,
is_final: true,
})
}
async fn stream_prompt(
&self,
_request: owlen_core::types::ChatRequest,
) -> owlen_core::Result<owlen_core::ChatStream> {
Ok(Box::pin(stream::empty()))
}
async fn health_check(&self) -> owlen_core::Result<()> {
Ok(())
}
fn as_any(&self) -> &(dyn Any + Send + Sync) {
self
}
}
#[tokio::test(flavor = "multi_thread")]
async fn denied_consent_appends_apology_message() {
let temp_dir = tempdir().expect("temp dir");
let storage = Arc::new(
StorageManager::with_database_path(temp_dir.path().join("owlen-tui-tests.db"))
.await
.expect("storage"),
);
let mut config = Config::default();
config.privacy.encrypt_local_data = false;
config.general.default_model = Some("stub-model".into());
config.mcp.mode = McpMode::LocalOnly;
config
.refresh_mcp_servers(None)
.expect("refresh MCP servers");
let provider: Arc<dyn Provider> = Arc::new(StubProvider);
let ui: Arc<dyn UiController> = Arc::new(NoOpUiController);
let (event_tx, controller_event_rx) = mpsc::unbounded_channel();
// Pre-populate a pending consent request before handing the controller to the TUI.
let mut session = SessionController::new(
Arc::clone(&provider),
config,
Arc::clone(&storage),
Arc::clone(&ui),
true,
Some(event_tx.clone()),
)
.await
.expect("session controller");
session
.set_operating_mode(Mode::Code)
.await
.expect("code mode");
let tool_call = ToolCall {
id: "call-1".to_string(),
name: "resources/delete".to_string(),
arguments: serde_json::json!({"path": "/tmp/example.txt"}),
};
let message_id = session
.conversation_mut()
.push_assistant_message("Preparing to modify files.");
session
.conversation_mut()
.set_tool_calls_on_message(message_id, vec![tool_call])
.expect("tool calls");
let advertised_calls = session
.check_streaming_tool_calls(message_id)
.expect("queued consent");
assert_eq!(advertised_calls.len(), 1);
let (mut app, mut session_rx) = ChatApp::new(session, controller_event_rx)
.await
.expect("chat app");
// Session events are not used in this test.
session_rx.close();
// Process the controller event emitted by check_streaming_tool_calls.
UiRuntime::poll_controller_events(&mut app).expect("poll controller events");
assert!(app.has_pending_consent());
let consent_state = app
.consent_dialog()
.expect("consent dialog should be visible")
.clone();
assert_eq!(consent_state.tool_name, "resources/delete");
// Simulate the user pressing "4" to deny consent.
let deny_key = KeyEvent::new(KeyCode::Char('4'), KeyModifiers::NONE);
UiRuntime::handle_ui_event(&mut app, Event::Key(deny_key))
.await
.expect("handle deny key");
assert!(!app.has_pending_consent());
assert!(
app.status_message()
.to_lowercase()
.contains("consent denied")
);
let conversation = app.conversation();
let last_message = conversation.messages.last().expect("last message");
assert_eq!(last_message.role, Role::Assistant);
assert!(
last_message
.content
.to_lowercase()
.contains("consent was denied"),
"assistant should acknowledge the denied consent"
);
}

View File

@@ -0,0 +1,13 @@
# Experimental Providers
This directory collects non-workspace placeholder crates for potential
third-party providers. The code under the following folders is not yet
implemented and is kept out of the default Cargo workspace to avoid
confusion:
- `openai`
- `anthropic`
- `gemini`
If you want to explore or contribute to these providers, start by reading
the `README.md` inside each crate for the current status and ideas.

View File

@@ -37,9 +37,9 @@ A simplified diagram of how components interact:
- `owlen-core`: Defines the `LlmProvider` abstraction, routing, configuration, session state, encryption, and the MCP client layer. This crate is UI-agnostic and must not depend on concrete providers, terminals, or blocking I/O.
- `owlen-tui`: Hosts all terminal UI behaviour (event loop, rendering, input modes) while delegating business logic and provider access back to `owlen-core`.
- `owlen-cli`: Small entry point that parses command-line options, resolves configuration, selects providers, and launches either the TUI or headless agent flows by calling into `owlen-core`.
- `owlen-mcp-llm-server`: Runs concrete providers (e.g., Ollama) behind an MCP boundary, exposing them as `generate_text` tools. This crate owns provider-specific wiring and process sandboxing.
- `owlen-mcp-server`: Generic MCP server for file operations and resource management.
- `owlen-ollama`: Direct Ollama provider implementation (legacy, used only by MCP servers).
- `owlen-mcp-llm-server`: Runs concrete providers (e.g., Ollama Local, Ollama Cloud) behind an MCP boundary, exposing them as `generate_text` tools. This crate owns provider-specific wiring and process sandboxing.
- `owlen-mcp-server`: Generic MCP server for file operations, resource projection, and other non-LLM tools.
- `owlen-providers`: Houses concrete provider adapters (today: Ollama local + cloud) that the MCP servers embed.
### Boundary Guidelines
@@ -47,6 +47,46 @@ A simplified diagram of how components interact:
- **owlen-cli**: Only orchestrates startup/shutdown. Avoid adding business logic; when a new command needs behaviour, implement it in `owlen-core` or another library crate and invoke it from the CLI.
- **owlen-mcp-llm-server**: The only crate that should directly talk to Ollama (or other provider processes). TUI/CLI code communicates with providers exclusively through MCP clients in `owlen-core`.
## Provider Boundaries & MCP Topology
Owlens runtime is intentionally layered so that user interfaces never couple to provider-specific code. The flow can be visualised as:
```
[owlen-tui] / [owlen-cli]
│ chat + model requests
[owlen-core::ProviderManager] ──> Arc<dyn ModelProvider>
│ ▲
│ │ implements `ModelProvider`
▼ │
[owlen-core::mcp::RemoteMcpClient] ─────┘
│ (JSON-RPC over stdio)
┌───────────────────────────────────────────────────────────┐
│ MCP Process Boundary (spawned per provider) │
│ │
│ crates/mcp/llm-server ──> owlen-providers::ollama::* │
│ crates/mcp/server ──> filesystem & workspace tools │
│ crates/mcp/prompt-server ─> template rendering helpers │
└───────────────────────────────────────────────────────────┘
```
- **ProviderManager (owlen-core)** keeps the registry of `ModelProvider` implementations, merges model catalogues, and caches health. Local Ollama and Cloud Ollama appear as separate providers whose metadata is merged for the UI.
- **RemoteMcpClient (owlen-core)** is the default `ModelProvider`. It implements both the MCP client traits and the `ModelProvider` interface, allowing it to bridge chat streams back into the ProviderManager without exposing transport details.
- **MCP servers (crates/mcp/\*)** are short-lived binaries with narrowly scoped responsibilities:
- `crates/mcp/llm-server` wraps `owlen-providers::ollama` backends and exposes `generate_text` / `list_models`.
- `crates/mcp/server` offers tool calls (file reads/writes, search).
- `crates/mcp/prompt-server` renders prompt templates.
- **owlen-providers** contains the actual provider adapters (Ollama local & cloud today). MCP servers embed these adapters directly; nothing else should reach into them.
### Health & Model Discovery Flow
1. Frontends call `ProviderManager::list_all_models()`. The manager fans out health checks to each registered provider (including the MCP client) and collates their models into a single list tagged with scope (`Local`, `Cloud`, etc.).
2. The TUI model picker (`owlen-tui/src/widgets/model_picker.rs`) reads those annotated entries to drive filters like **Local**, **Cloud**, and **Available**.
3. When the user kicks off a chat, the TUI emits a request that flows through `Session::send_message`, which delegates to `ProviderManager::generate`. The selected provider (usually `RemoteMcpClient`) streams chunks back across the MCP transport and the manager updates health status based on success or failure.
4. Tool invocations travel the same transport: the MCP client sends tool calls to `crates/mcp/server`, and responses surface as consent prompts or streamed completions in the UI.
## MCP Architecture (Phase 10)
As of Phase 10, OWLEN uses a **MCP-only architecture** where all LLM interactions go through the Model Context Protocol:
@@ -135,4 +175,4 @@ The TUI is rendered on each iteration of the main application loop in `owlen-tui
4. **State-Driven Rendering**: Each rendering function takes the current application state as an argument. It uses this state to decide what and how to render. For example, the border color of a panel might change if it is focused.
5. **Buffer and Diff**: `ratatui` does not draw directly to the terminal. Instead, it renders the widgets to an in-memory buffer. It then compares this buffer to the previous buffer and only sends the necessary changes to the terminal. This is highly efficient and prevents flickering.
The command palette and other modal helpers expose lightweight state structs in `owlen_tui::state`. These components keep business logic (suggestion filtering, selection state, etc.) independent from rendering, which in turn makes them straightforward to unit test.
The command palette and other modal helpers expose lightweight state structs in `owlen_tui::state`. These components keep business logic (suggestion filtering, selection state, etc.) independent from rendering, which in turn makes them straightforward to unit test. The ongoing migration of more features into the `ModelViewUpdate` core is documented in [`docs/tui-mvu-migration.md`](tui-mvu-migration.md).

70
docs/repo-map.md Normal file
View File

@@ -0,0 +1,70 @@
# Repo Map
> Generated by `scripts/gen-repo-map.sh`. Regenerate whenever the workspace layout changes.
```text
.
├── crates
│ ├── mcp
│ │ ├── client
│ │ ├── code-server
│ │ ├── llm-server
│ │ ├── prompt-server
│ │ └── server
│ ├── owlen-cli
│ │ ├── src
│ │ ├── tests
│ │ ├── Cargo.toml
│ │ └── README.md
│ ├── owlen-core
│ │ ├── examples
│ │ ├── migrations
│ │ ├── src
│ │ ├── tests
│ │ ├── Cargo.toml
│ │ └── README.md
│ ├── owlen-markdown
│ │ ├── src
│ │ └── Cargo.toml
│ ├── owlen-providers
│ │ ├── src
│ │ ├── tests
│ │ └── Cargo.toml
│ ├── owlen-tui
│ │ ├── src
│ │ ├── tests
│ │ ├── Cargo.toml
│ │ └── README.md
│ └── providers
│ └── experimental
├── docs
│ ├── migrations
│ ├── CHANGELOG_v1.0.md
│ ├── adding-providers.md
│ ├── architecture.md
│ ├── configuration.md
│ ├── faq.md
│ ├── migration-guide.md
│ ├── phase5-mode-system.md
│ ├── platform-support.md
│ ├── provider-implementation.md
│ ├── testing.md
│ └── troubleshooting.md
├── examples
├── scripts
│ ├── check-windows.sh
│ └── gen-repo-map.sh
├── AGENTS.md
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Cargo.lock
├── Cargo.toml
├── LICENSE
├── PKGBUILD
├── README.md
├── SECURITY.md
└── config.toml
29 directories, 32 files
```

109
docs/tui-mvu-migration.md Normal file
View File

@@ -0,0 +1,109 @@
# TUI MVU Migration Guide
This guide explains how we are migrating the Owlen terminal UI to a predictable **ModelViewUpdate (MVU)** architecture. Use it to understand the current layout, decide where new logic belongs, and track which features have already moved to the MVU core.
---
## Goals
- Make UI state transitions pure and testable.
- Reduce duplicated control flow inside `chat_app.rs`.
- Keep rendering functions dumb; they should depend on read-only view models.
- Ensure new features land in MVU-first form so the imperative paths shrink over time.
Adopt the checklist below whenever you touch a feature that still lives in the imperative code path.
---
## Module Map (owlen-tui)
| Area | Path | Responsibility | MVU Status |
| --- | --- | --- | --- |
| Core state | `src/app/mvu.rs` | Shared `AppModel`, `AppEvent`, `AppEffect` definitions | **Ready** composer + consent events implemented |
| Legacy app | `src/chat_app.rs` | Orchestrates IO, manages pending tasks, renders via ratatui | **Transitioning** increasingly delegates to MVU |
| Event loop | `src/app/handler.rs` | Converts session messages into app updates | Needs cleanup once message flow is MVU aware |
| Rendering | `src/ui.rs` + `src/widgets/*` | Pure rendering helpers that pull data from `ChatApp` | Already read-only; keep that invariant |
| Commands | `src/commands/*` | Keymap and palette command registry | Candidate for MVU once palette state migrates |
| Shared state | `src/state/*` | Small state helpers (command palette, file tree, etc.) | Each module can become an MVU sub-model |
Use the table to find the right starting point before adding new events.
---
## Event Taxonomy
Current events live in `app/mvu.rs`.
- `AppEvent::Composer` covers draft changes, mode switches, submissions.
- `AppEvent::ToolPermission` bridges consent dialog choices back to the controller.
`AppEffect` represents side effects the imperative shell must execute:
- `SetStatus` surface validation failures.
- `RequestSubmit` hand control back to the async send pipeline.
- `ResolveToolConsent` notify the session controller of user decisions.
### Adding a new feature
1. Extend `AppModel` with the new view state.
2. Create a dedicated event enum (e.g. `PaletteEvent`) and nest it under `AppEvent`.
3. Add pure update logic that mutates the model and returns zero or more effects.
4. Handle emitted effects inside `ChatApp::handle_app_effects`.
Keep the event names UI-centric. Provider-side actions should remain in `owlen-core`.
---
## Feature Migration Checklist
| Feature | Scope | MVU tasks | Status |
| --- | --- | --- | --- |
| Composer (input buffer) | Draft text, submission workflow | ✅ `ComposerModel`, `ComposerEvent`, `SubmissionOutcome` | ✅ Complete |
| Tool consent dialog | Approval / denial flow | ✅ `AppEvent::ToolPermission`, `AppEffect::ResolveToolConsent` | ✅ Complete |
| Chat timeline | Message ordering, cursor, scrollback | Model struct for timeline + events for history updates | ☐ TODO |
| Thinking pane | Agent reasoning text, auto-scroll | Model + event to toggle visibility and append lines | ☐ TODO |
| Model picker | Filters, search, selection | Convert `ModelSelectorItem` list + search metadata into MVU | ☐ TODO |
| Command palette | Suggestions, history, apply actions | Move palette state into `AppModel` and surface events | ☐ TODO |
| File workspace | Pane layout, file tree focus | Represent pane tree in MVU, drive focus + resize events | ☐ TODO |
| Toasts & status bar | Transient notifications | Consider MVU-managed queue with explicit events | ☐ TODO |
When you pick up one of the TODO rows, document the plan in the PR description and link back to this table.
---
## Migration Playbook
1. **Inventory state** list every field in `ChatApp` that your feature touches.
2. **Define view model** move the persistent state into `AppModel` (or a new sub-struct).
3. **Write events** describe all user intents and background updates as `AppEvent` variants.
4. **Translate side effects** whenever the update logic needs to call into async code, emit an `AppEffect`. Handle it inside `handle_app_effects`.
5. **Refactor call sites** replace direct mutations with `apply_app_event` calls.
6. **Write tests** cover the pure update function with table-driven unit tests.
7. **Remove duplicates** once the MVU path handles everything, delete the legacy branch in `chat_app.rs`.
This flow keeps commits reviewable and avoids breaking the live UI during migration.
---
## Testing Guidance
- **Unit tests** cover the pure update functions inside `app/mvu.rs`.
- **Integration tests** add scenarios to `crates/owlen-tui/tests/agent_flow_ui.rs` when side effects change.
- **Golden behaviour** ensure the ratatui renderers still consume read-only data; add lightweight snapshot tests if needed.
- **Manual verification** run `cargo run -p owlen-cli -- --help` to open the TUI and confirm the migrated feature behaves as expected.
Every new MVU feature should land with unit tests plus a note about manual validation.
---
## Tracking TODOs
- Keep this file up to date when you migrate a feature.
- Add inline `// TODO(mvu)` tags in code with a short description so they are easy to grep.
- Use the `docs/` folder for design notes; avoid long comment blocks inside the code.
Future contributors should be able to glance at this document, see what is done, and understand where to continue the migration.
---
Questions? Reach out in the Owlen discussion board or drop a note in the relevant PR thread. Consistent updates here will keep MVU adoption predictable for everyone.

31
scripts/gen-repo-map.sh Executable file
View File

@@ -0,0 +1,31 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
OUTPUT_PATH="${1:-${REPO_ROOT}/docs/repo-map.md}"
if ! command -v tree >/dev/null 2>&1; then
echo "error: the 'tree' command is required to regenerate the repo map. Install it (e.g., 'sudo pacman -S tree') and re-run this script." >&2
exit 1
fi
EXCLUDES='target|\\.git|\\.github|node_modules|dist|images|themes|dev|\\.venv'
TMP_FILE="$(mktemp)"
trap 'rm -f "${TMP_FILE}"' EXIT
pushd "${REPO_ROOT}" >/dev/null
tree -a -L 2 --dirsfirst --prune -I "${EXCLUDES}" > "${TMP_FILE}"
popd >/dev/null
{
printf '# Repo Map\n\n'
printf '> Generated by `scripts/gen-repo-map.sh`. Regenerate when the layout changes.\n\n'
printf '```text\n'
cat "${TMP_FILE}"
printf '```\n'
} > "${OUTPUT_PATH}"
echo "Repo map written to ${OUTPUT_PATH}"

57
scripts/release-notes.sh Executable file
View File

@@ -0,0 +1,57 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
CHANGELOG="${REPO_ROOT}/CHANGELOG.md"
TAG="${1:-}"
OUTPUT="${2:-}"
if [[ -z "${TAG}" ]]; then
echo "usage: $0 <tag> [output-file]" >&2
exit 1
fi
TAG="${TAG#v}"
TAG="${TAG#V}"
if [[ ! -f "${CHANGELOG}" ]]; then
echo "error: CHANGELOG.md not found at ${CHANGELOG}" >&2
exit 1
fi
NOTES=$(TAG="${TAG}" CHANGELOG_PATH="${CHANGELOG}" python - <<'PY'
import os
import re
import sys
from pathlib import Path
changelog_path = Path(os.environ['CHANGELOG_PATH'])
tag = os.environ['TAG']
text = changelog_path.read_text(encoding='utf-8')
pattern = re.compile(rf'^## \[{re.escape(tag)}\]\s*(?:-.*)?$', re.MULTILINE)
match = pattern.search(text)
if not match:
sys.stderr.write(f"No changelog section found for tag {tag}.\n")
sys.exit(1)
start = match.end()
rest = text[start:]
next_heading = re.search(r'^## \[', rest, re.MULTILINE)
section = rest[:next_heading.start()] if next_heading else rest
lines = [line.rstrip() for line in section.strip().splitlines()]
print('\n'.join(lines))
PY
)
if [[ -z "${NOTES}" ]]; then
echo "error: no content generated for tag ${TAG}" >&2
exit 1
fi
if [[ -n "${OUTPUT}" ]]; then
printf '%s\n' "${NOTES}" > "${OUTPUT}"
else
printf '%s\n' "${NOTES}"
fi

9
xtask/Cargo.toml Normal file
View File

@@ -0,0 +1,9 @@
[package]
name = "xtask"
version = "0.1.0"
edition.workspace = true
publish = false
[dependencies]
anyhow = { workspace = true }
clap = { workspace = true, features = ["derive"] }

162
xtask/src/main.rs Normal file
View File

@@ -0,0 +1,162 @@
use std::path::{Path, PathBuf};
use std::process::Command;
use anyhow::{Context, Result, bail};
use clap::{Parser, Subcommand};
#[derive(Parser)]
#[command(author, version, about = "Owlen developer tasks", long_about = None)]
struct Xtask {
#[command(subcommand)]
command: Task,
}
#[derive(Subcommand)]
enum Task {
/// Format the workspace (use --check to verify without writing).
Fmt {
#[arg(long, help = "Run rustfmt in check mode")]
check: bool,
},
/// Run clippy with all warnings elevated to errors.
Lint,
/// Execute the full workspace test suite.
Test,
/// Run coverage via cargo-llvm-cov (requires the tool to be installed).
Coverage,
/// Launch the default Owlen CLI binary (owlen) with optional args.
DevRun {
#[arg(last = true, help = "Arguments forwarded to `owlen`")]
args: Vec<String>,
},
/// Composite release validation (fmt --check, clippy, test).
ReleaseCheck,
/// Regenerate docs/repo-map.md (accepts optional output path).
GenRepoMap {
#[arg(long, value_name = "PATH", help = "Override the repo map output path")]
output: Option<PathBuf>,
},
}
fn main() -> Result<()> {
let cli = Xtask::parse();
match cli.command {
Task::Fmt { check } => fmt(check),
Task::Lint => lint(),
Task::Test => test(),
Task::Coverage => coverage(),
Task::DevRun { args } => dev_run(args),
Task::ReleaseCheck => release_check(),
Task::GenRepoMap { output } => gen_repo_map(output),
}
}
fn fmt(check: bool) -> Result<()> {
let mut args = vec!["fmt".to_string(), "--all".to_string()];
if check {
args.push("--".to_string());
args.push("--check".to_string());
}
run_cargo(args)
}
fn lint() -> Result<()> {
run_cargo(vec![
"clippy".into(),
"--workspace".into(),
"--all-features".into(),
"--".into(),
"-D".into(),
"warnings".into(),
])
}
fn test() -> Result<()> {
run_cargo(vec![
"test".into(),
"--workspace".into(),
"--all-features".into(),
])
}
fn coverage() -> Result<()> {
run_cargo(vec![
"llvm-cov".into(),
"--workspace".into(),
"--all-features".into(),
"--summary-only".into(),
])
.with_context(|| "install `cargo llvm-cov` to use the coverage task".to_string())
}
fn dev_run(args: Vec<String>) -> Result<()> {
let mut command_args = vec![
"run".into(),
"-p".into(),
"owlen-cli".into(),
"--bin".into(),
"owlen".into(),
];
if !args.is_empty() {
command_args.push("--".into());
command_args.extend(args);
}
run_cargo(command_args)
}
fn release_check() -> Result<()> {
fmt(true)?;
lint()?;
test()?;
Ok(())
}
fn gen_repo_map(output: Option<PathBuf>) -> Result<()> {
let script = workspace_root().join("scripts/gen-repo-map.sh");
if !script.exists() {
bail!("repo map script not found at {}", script.display());
}
let mut cmd = Command::new(&script);
cmd.current_dir(workspace_root());
if let Some(path) = output {
cmd.arg(path);
}
let status = cmd
.status()
.with_context(|| format!("failed to run {}", script.display()))?;
if !status.success() {
bail!(
"{} exited with status {}",
script.display(),
status.code().unwrap_or_default()
);
}
Ok(())
}
fn run_cargo(args: Vec<String>) -> Result<()> {
let mut cmd = Command::new("cargo");
cmd.current_dir(workspace_root());
cmd.args(&args);
let status = cmd
.status()
.with_context(|| format!("failed to run cargo {}", args.join(" ")))?;
if !status.success() {
bail!(
"`cargo {}` exited with status {}",
args.join(" "),
status.code().unwrap_or_default()
);
}
Ok(())
}
fn workspace_root() -> PathBuf {
Path::new(env!("CARGO_MANIFEST_DIR"))
.parent()
.expect("xtask has a parent directory")
.to_path_buf()
}