37 Commits

Author SHA1 Message Date
d86888704f chore(release): bump version to 0.1.11
Some checks failed
ci/someci/push/woodpecker Pipeline is pending approval
macos-check / cargo check (macOS) (push) Has been cancelled
Update pkgver in PKGBUILD, version badge in README, and workspace package version in Cargo.toml. Add changelog entry for 0.1.11 reflecting the metadata bump.
2025-10-18 03:34:57 +02:00
de6b6e20a5 docs(readme): quick start matrices + platform notes 2025-10-18 03:25:10 +02:00
1e8a5e08ed docs(tui): MVU migration guide + module map 2025-10-18 03:20:32 +02:00
218ebbf32f feat(tui): debug log panel toggle 2025-10-18 03:18:34 +02:00
c49e7f4b22 test(core+tui): end-to-end agent tool scenarios
Some checks failed
ci/someci/push/woodpecker Pipeline is pending approval
macos-check / cargo check (macOS) (push) Has been cancelled
2025-10-17 05:24:01 +02:00
9588c8c562 feat(tui): model picker UX polish (filters, sizing, search) 2025-10-17 04:52:38 +02:00
1948ac1284 fix(providers/ollama): strengthen model cache + scope status UI 2025-10-17 03:58:25 +02:00
3f92b7d963 feat(agent): event-driven tool consent handshake (explicit UI prompts) 2025-10-17 03:42:13 +02:00
5553e61dbf feat(tui): declarative keymap + command registry 2025-10-17 02:47:09 +02:00
7f987737f9 refactor(core): add LLMClient facade trait; decouple TUI from Provider/MCP details 2025-10-17 01:52:10 +02:00
5182f86133 feat(tui): introduce MVU core (AppModel, AppEvent, update()) 2025-10-17 01:40:50 +02:00
a50099ad74 ci(mac): add compile-only macOS build (no artifacts) 2025-10-17 01:13:36 +02:00
20ba5523ee ci(build): split tests from matrix builds to avoid repetition 2025-10-17 01:12:39 +02:00
0b2b3701dc ci(security): add cargo-audit job (weekly + on push) 2025-10-17 01:10:24 +02:00
438b05b8a3 ci: derive release notes from CHANGELOG.md 2025-10-17 01:08:57 +02:00
e2a31b192f build(cli)!: add owlen-code binary and wire code mode 2025-10-17 01:02:40 +02:00
b827d3d047 ci: add PR pipeline (push) with fmt+clippy+test (linux only) 2025-10-17 00:51:25 +02:00
9c0cf274a3 chore(workspace): add cargo xtask crate for common ops 2025-10-17 00:47:54 +02:00
85ae319690 docs(architecture): clarify provider boundaries and MCP topology 2025-10-17 00:44:07 +02:00
449f133a1f docs: add repo map (tree) and generating script 2025-10-17 00:41:47 +02:00
2f6b03ef65 chore(repo): move placeholder provider crates to crates/providers/experimental/ 2025-10-17 00:37:02 +02:00
d4030dc598 refactor(workspace)!: move MCP crates under crates/mcp/ and update paths 2025-10-17 00:31:35 +02:00
3271697f6b feat(cli): add provider management and model listing commands and integrate them into the CLI 2025-10-16 23:35:38 +02:00
cbfef5a5df docs: add provider onboarding guide and update documentation for ProviderManager, health worker, and multi‑provider architecture 2025-10-16 23:01:57 +02:00
52efd5f341 test(app): add generation and message unit tests
- New test suite in `crates/owlen-tui/tests` covering generation orchestration, message variant round‑trip, and background worker status updates.
- Extend `model_picker` to filter models by matching keywords against capabilities as well as provider names.
- Update `state_tests` to assert that suggestion lists are non‑empty instead of checking prefix matches.
- Re‑export `background_worker` from `app::mod.rs` for external consumption.
2025-10-16 22:56:00 +02:00
200cdbc4bd test(provider): add integration tests for ProviderManager using MockProvider
- Introduce `MockProvider` with configurable models, health status, generation handlers, and error simulation.
- Add common test utilities and integration tests covering provider registration, model aggregation, request routing, error handling, and health refresh.
2025-10-16 22:41:33 +02:00
8525819ab4 feat(app): introduce UiRuntime trait and RuntimeApp run loop, add crossterm event conversion, refactor CLI to use RuntimeApp for unified UI handling 2025-10-16 22:21:33 +02:00
bcd52d526c feat(app): introduce MessageState trait and handler for AppMessage dispatch
- Add `MessageState` trait defining UI reaction callbacks for generation lifecycle, model updates, provider status, resize, and tick events.
- Implement `App::handle_message` to route `AppMessage` variants to the provided `MessageState` and determine exit condition.
- Add `handler.rs` module with the trait and dispatch logic; re-export `MessageState` in `app/mod.rs`.
- Extend `ActiveGeneration` with a public `request_id` getter and clean up dead code annotations.
- Implement empty `MessageState` for `ChatApp` to integrate UI handling.
- Add `log` crate dependency for warning messages.
2025-10-16 21:58:26 +02:00
7effade1d3 refactor(tui): extract model selector UI into dedicated widget module
Added `widgets::model_picker` containing the full model picker rendering logic and moved related helper functions there. Updated `ui.rs` to use `render_model_picker` and removed the now‑duplicate model selector implementation. This cleanly separates UI concerns and improves code reuse.
2025-10-16 21:39:50 +02:00
dc0fee2ee3 feat(app): add background worker for provider health checks
Introduce a `worker` module with `background_worker` that periodically refreshes provider health and emits status updates via the app's message channel. Add `spawn_background_worker` method to `App` for launching the worker as a Tokio task.
2025-10-16 21:01:08 +02:00
ea04a25ed6 feat(app): add generation orchestration, messaging, and core App struct
Introduce `App` with provider manager, unbounded message channel, and active generation tracking.
Add `AppMessage` enum covering UI events, generation lifecycle (start, chunk, complete, error), model refresh, and provider status updates.
Implement `start_generation` to spawn asynchronous generation tasks, stream results, handle errors, and abort any previous generation.
Expose the new module via `pub mod app` in the crate root.
2025-10-16 20:39:53 +02:00
282dcdce88 feat(config): separate Ollama into local/cloud providers, add OpenAI & Anthropic defaults, bump schema version to 1.6.0 2025-10-15 22:13:00 +02:00
b49f58bc16 feat(ollama): add cloud provider with API key handling and auth‑aware health check
Introduce `OllamaCloudProvider` that resolves the API key from configuration or the `OLLAMA_CLOUD_API_KEY` environment variable, constructs provider metadata (including timeout as numeric), and maps auth errors to `ProviderStatus::RequiresSetup`. Export the new provider in the `ollama` module. Add shared HTTP error mapping utilities (`map_http_error`, `truncated_body`) and update local provider metadata to store timeout as a number.
2025-10-15 21:07:41 +02:00
cdc425ae93 feat(ollama): add local provider implementation and request timeout support
Introduce `OllamaLocalProvider` for communicating with a local Ollama daemon, including health checks, model listing, and stream generation. Export the provider in the Ollama module. Extend `OllamaClient` to accept an optional request timeout and apply it to the underlying HTTP client configuration.
2025-10-15 21:01:18 +02:00
3525cb3949 feat(provider): add Ollama client implementation in new providers crate
- Introduce `owlen-providers` crate with Cargo.toml and lib entry.
- Expose `OllamaClient` handling HTTP communication, health checks, model listing, and streaming generation.
- Implement request building, endpoint handling, and error mapping.
- Parse Ollama tags response and generation stream lines into core types.
- Add shared module re-exports for easy integration with the provider layer.
2025-10-15 20:54:52 +02:00
9d85420bf6 feat(provider): add ProviderManager to coordinate providers and cache health status
- Introduce `ProviderManager` for registering providers, routing generate calls, listing models, and refreshing health in parallel.
- Maintain a status cache to expose the last known health of each provider.
- Update `provider` module to re‑export the new manager alongside existing types.
2025-10-15 20:37:36 +02:00
641c95131f feat(provider): add unified provider abstraction layer with ModelProvider trait and shared types 2025-10-15 20:27:30 +02:00
92 changed files with 8785 additions and 1542 deletions

34
.github/workflows/macos-check.yml vendored Normal file
View File

@@ -0,0 +1,34 @@
name: macos-check
on:
push:
branches:
- dev
pull_request:
branches:
- dev
jobs:
build:
name: cargo check (macOS)
runs-on: macos-latest
steps:
- name: Checkout sources
uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
- name: Cache Cargo registry
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-
- name: Cargo check
run: cargo check --workspace --all-features

View File

@@ -9,6 +9,7 @@ repos:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
args: ['--allow-multiple-documents']
- id: check-toml
- id: check-merge-conflict
- id: check-added-large-files

View File

@@ -1,3 +1,61 @@
---
kind: pipeline
name: pr-checks
when:
event:
- push
- pull_request
steps:
- name: fmt-clippy-test
image: rust:1.83
commands:
- rustup component add rustfmt clippy
- cargo fmt --all -- --check
- cargo clippy --workspace --all-features -- -D warnings
- cargo test --workspace --all-features
---
kind: pipeline
name: security-audit
when:
event:
- push
- cron
branch:
- dev
cron: weekly-security
steps:
- name: cargo-audit
image: rust:1.83
commands:
- cargo install cargo-audit --locked
- cargo audit
---
kind: pipeline
name: release-tests
when:
event: tag
tag: v*
steps:
- name: workspace-tests
image: rust:1.83
commands:
- rustup component add llvm-tools-preview
- cargo install cargo-llvm-cov --locked
- cargo llvm-cov --workspace --all-features --summary-only
- cargo llvm-cov --workspace --all-features --lcov --output-path coverage.lcov --no-run
---
kind: pipeline
name: release
when:
event: tag
tag: v*
@@ -5,6 +63,9 @@ when:
variables:
- &rust_image 'rust:1.83'
depends_on:
- release-tests
matrix:
include:
# Linux
@@ -39,14 +100,6 @@ matrix:
EXT: ".exe"
steps:
- name: tests
image: *rust_image
commands:
- rustup component add llvm-tools-preview
- cargo install cargo-llvm-cov --locked
- cargo llvm-cov --workspace --all-features --summary-only
- cargo llvm-cov --workspace --all-features --lcov --output-path coverage.lcov --no-run
- name: build
image: *rust_image
commands:
@@ -124,6 +177,11 @@ steps:
sha256sum ${ARTIFACT}.tar.gz > ${ARTIFACT}.tar.gz.sha256
fi
- name: release-notes
image: *rust_image
commands:
- scripts/release-notes.sh "${CI_COMMIT_TAG}" release-notes.md
- name: release
image: plugins/gitea-release
settings:
@@ -136,4 +194,4 @@ steps:
- ${ARTIFACT}.zip
- ${ARTIFACT}.zip.sha256
title: Release ${CI_COMMIT_TAG}
note: "Release ${CI_COMMIT_TAG}"
note_file: release-notes.md

View File

@@ -11,6 +11,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Comprehensive documentation suite including guides for architecture, configuration, testing, and more.
- Rustdoc examples for core components like `Provider` and `SessionController`.
- Module-level documentation for `owlen-tui`.
- Provider integration tests (`crates/owlen-providers/tests`) covering registration, routing, and health status handling for the new `ProviderManager`.
- TUI message and generation tests that exercise the non-blocking event loop, background worker, and message dispatch.
- Ollama integration can now talk to Ollama Cloud when an API key is configured.
- Ollama provider will also read `OLLAMA_API_KEY` / `OLLAMA_CLOUD_API_KEY` environment variables when no key is stored in the config.
- `owlen config doctor`, `owlen config path`, and `owlen upgrade` CLI commands to automate migrations and surface manual update steps.
@@ -26,6 +28,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Input panel respects a new `ui.input_max_rows` setting so long prompts expand predictably before scrolling kicks in.
- Command palette offers fuzzy `:model` filtering and `:provider` completions for fast switching.
- Message rendering caches wrapped lines and throttles streaming redraws to keep the TUI responsive on long sessions.
- Model picker badges now inspect provider capabilities so vision/audio/thinking models surface the correct icons even when descriptions are sparse.
- Chat history honors `ui.scrollback_lines`, trimming older rows to keep the TUI responsive and surfacing a "↓ New messages" badge whenever updates land off-screen.
### Changed
@@ -38,9 +41,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- `config.toml` now carries a schema version (`1.2.0`) and is migrated automatically; deprecated keys such as `agent.max_tool_calls` trigger warnings instead of hard failures.
- Model selector navigation (Tab/Shift-Tab) now switches between local and cloud tabs while preserving selection state.
- Header displays the active model together with its provider (e.g., `Model (Provider)`), improving clarity when swapping backends.
- Documentation refreshed to cover the message handler architecture, the background health worker, multi-provider configuration, and the new provider onboarding checklist.
---
## [0.1.11] - 2025-10-18
### Changed
- Bump workspace packages and distribution metadata to version `0.1.11`.
## [0.1.10] - 2025-10-03
### Added

View File

@@ -10,6 +10,10 @@ This project and everyone participating in it is governed by the [Owlen Code of
## How Can I Contribute?
### Repository map
Need a quick orientation before diving in? Start with the curated [repo map](docs/repo-map.md) for a two-level directory overview. If you move folders around, regenerate it with `scripts/gen-repo-map.sh`.
### Reporting Bugs
This is one of the most helpful ways you can contribute. Before creating a bug report, please check a few things:

View File

@@ -4,17 +4,19 @@ members = [
"crates/owlen-core",
"crates/owlen-tui",
"crates/owlen-cli",
"crates/owlen-mcp-server",
"crates/owlen-mcp-llm-server",
"crates/owlen-mcp-client",
"crates/owlen-mcp-code-server",
"crates/owlen-mcp-prompt-server",
"crates/owlen-providers",
"crates/mcp/server",
"crates/mcp/llm-server",
"crates/mcp/client",
"crates/mcp/code-server",
"crates/mcp/prompt-server",
"crates/owlen-markdown",
"xtask",
]
exclude = []
[workspace.package]
version = "0.1.9"
version = "0.1.11"
edition = "2024"
authors = ["Owlibou"]
license = "AGPL-3.0"

View File

@@ -1,6 +1,6 @@
# Maintainer: vikingowl <christian@nachtigall.dev>
pkgname=owlen
pkgver=0.1.9
pkgver=0.1.11
pkgrel=1
pkgdesc="Terminal User Interface LLM client for Ollama with chat and code assistance features"
arch=('x86_64')

View File

@@ -3,16 +3,17 @@
> Terminal-native assistant for running local language models with a comfortable TUI.
![Status](https://img.shields.io/badge/status-alpha-yellow)
![Version](https://img.shields.io/badge/version-0.1.9-blue)
![Version](https://img.shields.io/badge/version-0.1.11-blue)
![Rust](https://img.shields.io/badge/made_with-Rust-ffc832?logo=rust&logoColor=white)
![License](https://img.shields.io/badge/license-AGPL--3.0-blue)
## What Is OWLEN?
OWLEN is a Rust-powered, terminal-first interface for interacting with local large
language models. It provides a responsive chat workflow that runs against
[Ollama](https://ollama.com/) with a focus on developer productivity, vim-style navigation,
and seamless session management—all without leaving your terminal.
OWLEN is a Rust-powered, terminal-first interface for interacting with local and cloud
language models. It provides a responsive chat workflow that now routes through a
multi-provider manager—handling local Ollama, Ollama Cloud, and future MCP-backed providers—
with a focus on developer productivity, vim-style navigation, and seamless session
management—all without leaving your terminal.
## Alpha Status
@@ -32,8 +33,9 @@ The OWLEN interface features a clean, multi-panel layout with vim-inspired navig
- **Session Management**: Save, load, and manage conversations.
- **Code Side Panel**: Switch to code mode (`:mode code`) and open files inline with `:open <path>` for LLM-assisted coding.
- **Theming System**: 10 built-in themes and support for custom themes.
- **Modular Architecture**: Extensible provider system (Ollama today, additional providers on the roadmap).
- **Dual-Source Model Picker**: Merge local and cloud Ollama models with live availability indicators so you can see at a glance which catalogues are reachable.
- **Modular Architecture**: Extensible provider system orchestrated by the new `ProviderManager`, ready for additional MCP-backed providers.
- **Dual-Source Model Picker**: Merge local and cloud catalogues with real-time availability badges powered by the background health worker.
- **Non-Blocking UI Loop**: Asynchronous generation tasks and provider health checks run off-thread, keeping the TUI responsive even while streaming long replies.
- **Guided Setup**: `owlen config doctor` upgrades legacy configs and verifies your environment in seconds.
## Security & Privacy
@@ -55,20 +57,28 @@ Owlen is designed to keep data local by default while still allowing controlled
### Installation
#### Linux & macOS
The recommended way to install on Linux and macOS is to clone the repository and install using `cargo`.
Pick the option that matches your platform and appetite for source builds:
| Platform | Package / Command | Notes |
| --- | --- | --- |
| Arch Linux | `yay -S owlen-git` | Builds from the latest `dev` branch via AUR. |
| Other Linux | `cargo install --path crates/owlen-cli --locked --force` | Requires Rust 1.75+ and a running Ollama daemon. |
| macOS | `cargo install --path crates/owlen-cli --locked --force` | macOS 12+ tested. Install Ollama separately (`brew install ollama`). The binary links against the system OpenSSL ensure Command Line Tools are installed. |
| Windows (experimental) | `cargo install --path crates/owlen-cli --locked --force` | Enable the GNU toolchain (`rustup target add x86_64-pc-windows-gnu`) and install Ollama for Windows preview builds. Some optional tools (e.g., Docker-based code execution) are currently disabled. |
If you prefer containerised builds, use the provided `Dockerfile` as a base image and copy out `target/release/owlen`.
Run the helper scripts to sanity-check platform coverage:
```bash
git clone https://github.com/Owlibou/owlen.git
cd owlen
cargo install --path crates/owlen-cli
# Windows compatibility smoke test (GNU toolchain)
scripts/check-windows.sh
# Reproduce CI packaging locally (choose a target from .woodpecker.yml)
dev/local_build.sh x86_64-unknown-linux-gnu
```
**Note for macOS**: While this method works, official binary releases for macOS are planned for the future.
#### Windows
The Windows build has not been thoroughly tested yet. Installation is possible via the same `cargo install` method, but it is considered experimental at this time.
From Unix hosts you can run `scripts/check-windows.sh` to ensure the code base still compiles for Windows (`rustup` will install the required target automatically).
> **Tip (macOS):** On the first launch macOS Gatekeeper may quarantine the binary. Clear the attribute (`xattr -d com.apple.quarantine $(which owlen)`) or build from source locally to avoid notarisation prompts.
### Running OWLEN
@@ -110,7 +120,10 @@ For more detailed information, please refer to the following documents:
- **[CHANGELOG.md](CHANGELOG.md)**: A log of changes for each version.
- **[docs/architecture.md](docs/architecture.md)**: An overview of the project's architecture.
- **[docs/troubleshooting.md](docs/troubleshooting.md)**: Help with common issues.
- **[docs/provider-implementation.md](docs/provider-implementation.md)**: A guide for adding new providers.
- **[docs/repo-map.md](docs/repo-map.md)**: Snapshot of the workspace layout and key crates.
- **[docs/provider-implementation.md](docs/provider-implementation.md)**: Trait-level details for implementing providers.
- **[docs/adding-providers.md](docs/adding-providers.md)**: Step-by-step checklist for wiring a provider into the multi-provider architecture and test suite.
- **Experimental providers staging area**: [crates/providers/experimental/README.md](crates/providers/experimental/README.md) records the placeholder crates (OpenAI, Anthropic, Gemini) and their current status.
- **[docs/platform-support.md](docs/platform-support.md)**: Current OS support matrix and cross-check instructions.
## Configuration

29
config.toml Normal file
View File

@@ -0,0 +1,29 @@
[general]
default_provider = "ollama_local"
default_model = "llama3.2:latest"
[privacy]
encrypt_local_data = true
[providers.ollama_local]
enabled = true
provider_type = "ollama"
base_url = "http://localhost:11434"
[providers.ollama_cloud]
enabled = false
provider_type = "ollama_cloud"
base_url = "https://ollama.com"
api_key_env = "OLLAMA_CLOUD_API_KEY"
[providers.openai]
enabled = false
provider_type = "openai"
base_url = "https://api.openai.com/v1"
api_key_env = "OPENAI_API_KEY"
[providers.anthropic]
enabled = false
provider_type = "anthropic"
base_url = "https://api.anthropic.com/v1"
api_key_env = "ANTHROPIC_API_KEY"

View File

@@ -6,7 +6,7 @@ description = "Dedicated MCP client library for Owlen, exposing remote MCP serve
license = "AGPL-3.0"
[dependencies]
owlen-core = { path = "../owlen-core" }
owlen-core = { path = "../../owlen-core" }
[features]
default = []

View File

@@ -6,7 +6,7 @@ description = "MCP server exposing safe code execution tools for Owlen"
license = "AGPL-3.0"
[dependencies]
owlen-core = { path = "../owlen-core" }
owlen-core = { path = "../../owlen-core" }
serde = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true }

View File

@@ -4,7 +4,7 @@ version = "0.1.0"
edition.workspace = true
[dependencies]
owlen-core = { path = "../owlen-core" }
owlen-core = { path = "../../owlen-core" }
tokio = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }

View File

@@ -126,7 +126,7 @@ fn provider_from_config() -> Result<Arc<dyn Provider>, RpcError> {
})?;
match provider_cfg.provider_type.as_str() {
"ollama" | "ollama-cloud" => {
"ollama" | "ollama_cloud" => {
let provider = OllamaProvider::from_config(&provider_cfg, Some(&config.general))
.map_err(|e| {
RpcError::internal_error(format!(
@@ -153,10 +153,12 @@ fn create_provider() -> Result<Arc<dyn Provider>, RpcError> {
}
fn canonical_provider_name(name: &str) -> String {
if name.eq_ignore_ascii_case("ollama-cloud") {
"ollama".to_string()
} else {
name.to_string()
let normalized = name.trim().to_ascii_lowercase().replace('-', "_");
match normalized.as_str() {
"" => "ollama_local".to_string(),
"ollama" | "ollama_local" => "ollama_local".to_string(),
"ollama_cloud" => "ollama_cloud".to_string(),
other => other.to_string(),
}
}

View File

@@ -6,7 +6,7 @@ description = "MCP server that renders prompt templates (YAML) for Owlen"
license = "AGPL-3.0"
[dependencies]
owlen-core = { path = "../owlen-core" }
owlen-core = { path = "../../owlen-core" }
serde = { workspace = true }
serde_json = { workspace = true }
serde_yaml = { workspace = true }

View File

@@ -9,4 +9,4 @@ serde = { workspace = true }
serde_json = { workspace = true }
anyhow = { workspace = true }
path-clean = "1.0"
owlen-core = { path = "../owlen-core" }
owlen-core = { path = "../../owlen-core" }

View File

@@ -17,6 +17,11 @@ name = "owlen"
path = "src/main.rs"
required-features = ["chat-client"]
[[bin]]
name = "owlen-code"
path = "src/code_main.rs"
required-features = ["chat-client"]
[[bin]]
name = "owlen-agent"
path = "src/agent_main.rs"
@@ -24,6 +29,7 @@ required-features = ["chat-client"]
[dependencies]
owlen-core = { path = "../owlen-core" }
owlen-providers = { path = "../owlen-providers" }
# Optional TUI dependency, enabled by the "chat-client" feature.
owlen-tui = { path = "../owlen-tui", optional = true }
log = { workspace = true }

View File

@@ -0,0 +1,326 @@
use std::borrow::Cow;
use std::io;
use std::sync::Arc;
use anyhow::{Result, anyhow};
use async_trait::async_trait;
use crossterm::{
event::{DisableBracketedPaste, DisableMouseCapture, EnableBracketedPaste, EnableMouseCapture},
execute,
terminal::{EnterAlternateScreen, LeaveAlternateScreen, disable_raw_mode, enable_raw_mode},
};
use futures::stream;
use owlen_core::{
ChatStream, Error, Provider,
config::{Config, McpMode},
mcp::remote_client::RemoteMcpClient,
mode::Mode,
provider::ProviderManager,
providers::OllamaProvider,
session::{ControllerEvent, SessionController},
storage::StorageManager,
types::{ChatRequest, ChatResponse, Message, ModelInfo},
};
use owlen_tui::{
ChatApp, SessionEvent,
app::App as RuntimeApp,
config,
tui_controller::{TuiController, TuiRequest},
ui,
};
use ratatui::{Terminal, prelude::CrosstermBackend};
use tokio::sync::mpsc;
use crate::commands::cloud::{load_runtime_credentials, set_env_var};
pub async fn launch(initial_mode: Mode) -> Result<()> {
set_env_var("OWLEN_AUTO_CONSENT", "1");
let color_support = detect_terminal_color_support();
let mut cfg = config::try_load_config().unwrap_or_default();
let _ = cfg.refresh_mcp_servers(None);
if let Some(previous_theme) = apply_terminal_theme(&mut cfg, &color_support) {
let term_label = match &color_support {
TerminalColorSupport::Limited { term } => Cow::from(term.as_str()),
TerminalColorSupport::Full => Cow::from("current terminal"),
};
eprintln!(
"Terminal '{}' lacks full 256-color support. Using '{}' theme instead of '{}'.",
term_label, BASIC_THEME_NAME, previous_theme
);
} else if let TerminalColorSupport::Limited { term } = &color_support {
eprintln!(
"Warning: terminal '{}' may not fully support 256-color themes.",
term
);
}
cfg.validate()?;
let storage = Arc::new(StorageManager::new().await?);
load_runtime_credentials(&mut cfg, storage.clone()).await?;
let (tui_tx, _tui_rx) = mpsc::unbounded_channel::<TuiRequest>();
let tui_controller = Arc::new(TuiController::new(tui_tx));
let provider = build_provider(&cfg)?;
let mut offline_notice: Option<String> = None;
let provider = match provider.health_check().await {
Ok(_) => provider,
Err(err) => {
let hint = if matches!(cfg.mcp.mode, McpMode::RemotePreferred | McpMode::RemoteOnly)
&& !cfg.effective_mcp_servers().is_empty()
{
"Ensure the configured MCP server is running and reachable."
} else {
"Ensure Ollama is running (`ollama serve`) and reachable at the configured base_url."
};
let notice =
format!("Provider health check failed: {err}. {hint} Continuing in offline mode.");
eprintln!("{notice}");
offline_notice = Some(notice.clone());
let fallback_model = cfg
.general
.default_model
.clone()
.unwrap_or_else(|| "offline".to_string());
Arc::new(OfflineProvider::new(notice, fallback_model)) as Arc<dyn Provider>
}
};
let (controller_event_tx, controller_event_rx) = mpsc::unbounded_channel::<ControllerEvent>();
let controller = SessionController::new(
provider,
cfg,
storage.clone(),
tui_controller,
false,
Some(controller_event_tx),
)
.await?;
let provider_manager = Arc::new(ProviderManager::default());
let mut runtime = RuntimeApp::new(provider_manager);
let (mut app, mut session_rx) = ChatApp::new(controller, controller_event_rx).await?;
app.initialize_models().await?;
if let Some(notice) = offline_notice.clone() {
app.set_status_message(&notice);
app.set_system_status(notice);
}
app.set_mode(initial_mode).await;
enable_raw_mode()?;
let mut stdout = io::stdout();
execute!(
stdout,
EnterAlternateScreen,
EnableMouseCapture,
EnableBracketedPaste
)?;
let backend = CrosstermBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
let result = run_app(&mut terminal, &mut runtime, &mut app, &mut session_rx).await;
config::save_config(&app.config())?;
disable_raw_mode()?;
execute!(
terminal.backend_mut(),
LeaveAlternateScreen,
DisableMouseCapture,
DisableBracketedPaste
)?;
terminal.show_cursor()?;
if let Err(err) = result {
println!("{err:?}");
}
Ok(())
}
fn build_provider(cfg: &Config) -> Result<Arc<dyn Provider>> {
match cfg.mcp.mode {
McpMode::RemotePreferred => {
let remote_result = if let Some(mcp_server) = cfg.effective_mcp_servers().first() {
RemoteMcpClient::new_with_config(mcp_server)
} else {
RemoteMcpClient::new()
};
match remote_result {
Ok(client) => Ok(Arc::new(client) as Arc<dyn Provider>),
Err(err) if cfg.mcp.allow_fallback => {
log::warn!(
"Remote MCP client unavailable ({}); falling back to local provider.",
err
);
build_local_provider(cfg)
}
Err(err) => Err(anyhow!(err)),
}
}
McpMode::RemoteOnly => {
let mcp_server = cfg.effective_mcp_servers().first().ok_or_else(|| {
anyhow!("[[mcp_servers]] must be configured when [mcp].mode = \"remote_only\"")
})?;
let client = RemoteMcpClient::new_with_config(mcp_server)?;
Ok(Arc::new(client) as Arc<dyn Provider>)
}
McpMode::LocalOnly | McpMode::Legacy => build_local_provider(cfg),
McpMode::Disabled => Err(anyhow!(
"MCP mode 'disabled' is not supported by the owlen TUI"
)),
}
}
fn build_local_provider(cfg: &Config) -> Result<Arc<dyn Provider>> {
let provider_name = cfg.general.default_provider.clone();
let provider_cfg = cfg.provider(&provider_name).ok_or_else(|| {
anyhow!(format!(
"No provider configuration found for '{provider_name}' in [providers]"
))
})?;
match provider_cfg.provider_type.as_str() {
"ollama" | "ollama_cloud" => {
let provider = OllamaProvider::from_config(provider_cfg, Some(&cfg.general))?;
Ok(Arc::new(provider) as Arc<dyn Provider>)
}
other => Err(anyhow!(format!(
"Provider type '{other}' is not supported in legacy/local MCP mode"
))),
}
}
const BASIC_THEME_NAME: &str = "ansi_basic";
#[derive(Debug, Clone)]
enum TerminalColorSupport {
Full,
Limited { term: String },
}
fn detect_terminal_color_support() -> TerminalColorSupport {
let term = std::env::var("TERM").unwrap_or_else(|_| "unknown".to_string());
let colorterm = std::env::var("COLORTERM").unwrap_or_default();
let term_lower = term.to_lowercase();
let color_lower = colorterm.to_lowercase();
let supports_extended = term_lower.contains("256color")
|| color_lower.contains("truecolor")
|| color_lower.contains("24bit")
|| color_lower.contains("fullcolor");
if supports_extended {
TerminalColorSupport::Full
} else {
TerminalColorSupport::Limited { term }
}
}
fn apply_terminal_theme(cfg: &mut Config, support: &TerminalColorSupport) -> Option<String> {
match support {
TerminalColorSupport::Full => None,
TerminalColorSupport::Limited { .. } => {
if cfg.ui.theme != BASIC_THEME_NAME {
let previous = std::mem::replace(&mut cfg.ui.theme, BASIC_THEME_NAME.to_string());
Some(previous)
} else {
None
}
}
}
}
struct OfflineProvider {
reason: String,
placeholder_model: String,
}
impl OfflineProvider {
fn new(reason: String, placeholder_model: String) -> Self {
Self {
reason,
placeholder_model,
}
}
fn friendly_response(&self, requested_model: &str) -> ChatResponse {
let mut message = String::new();
message.push_str("⚠️ Owlen is running in offline mode.\n\n");
message.push_str(&self.reason);
if !requested_model.is_empty() && requested_model != self.placeholder_model {
message.push_str(&format!(
"\n\nYou requested model '{}', but no providers are reachable.",
requested_model
));
}
message.push_str(
"\n\nStart your preferred provider (e.g. `ollama serve`) or switch providers with `:provider` once connectivity is restored.",
);
ChatResponse {
message: Message::assistant(message),
usage: None,
is_streaming: false,
is_final: true,
}
}
}
#[async_trait]
impl Provider for OfflineProvider {
fn name(&self) -> &str {
"offline"
}
async fn list_models(&self) -> Result<Vec<ModelInfo>, Error> {
Ok(vec![ModelInfo {
id: self.placeholder_model.clone(),
provider: "offline".to_string(),
name: format!("Offline (fallback: {})", self.placeholder_model),
description: Some("Placeholder model used while no providers are reachable".into()),
context_window: None,
capabilities: vec![],
supports_tools: false,
}])
}
async fn send_prompt(&self, request: ChatRequest) -> Result<ChatResponse, Error> {
Ok(self.friendly_response(&request.model))
}
async fn stream_prompt(&self, request: ChatRequest) -> Result<ChatStream, Error> {
let response = self.friendly_response(&request.model);
Ok(Box::pin(stream::iter(vec![Ok(response)])))
}
async fn health_check(&self) -> Result<(), Error> {
Err(Error::Provider(anyhow!(
"offline provider cannot reach any backing models"
)))
}
fn as_any(&self) -> &(dyn std::any::Any + Send + Sync) {
self
}
}
async fn run_app(
terminal: &mut Terminal<CrosstermBackend<io::Stdout>>,
runtime: &mut RuntimeApp,
app: &mut ChatApp,
session_rx: &mut mpsc::UnboundedReceiver<SessionEvent>,
) -> Result<()> {
let mut render = |terminal: &mut Terminal<CrosstermBackend<io::Stdout>>,
state: &mut ChatApp|
-> Result<()> {
terminal.draw(|f| ui::render_chat(f, state))?;
Ok(())
};
runtime.run(terminal, app, session_rx, &mut render).await?;
Ok(())
}

View File

@@ -0,0 +1,16 @@
//! Owlen CLI entrypoint optimised for code-first workflows.
#![allow(dead_code, unused_imports)]
mod bootstrap;
mod commands;
mod mcp;
use anyhow::Result;
use owlen_core::config as core_config;
use owlen_core::mode::Mode;
use owlen_tui::config;
#[tokio::main(flavor = "multi_thread")]
async fn main() -> Result<()> {
bootstrap::launch(Mode::Code).await
}

View File

@@ -7,7 +7,8 @@ use clap::Subcommand;
use owlen_core::LlmProvider;
use owlen_core::ProviderConfig;
use owlen_core::config::{
self as core_config, Config, OLLAMA_CLOUD_BASE_URL, OLLAMA_CLOUD_ENDPOINT_KEY, OLLAMA_MODE_KEY,
self as core_config, Config, OLLAMA_CLOUD_API_KEY_ENV, OLLAMA_CLOUD_BASE_URL,
OLLAMA_CLOUD_ENDPOINT_KEY, OLLAMA_MODE_KEY,
};
use owlen_core::credentials::{ApiCredentials, CredentialManager, OLLAMA_CLOUD_CREDENTIAL_ID};
use owlen_core::encryption;
@@ -17,6 +18,7 @@ use serde_json::Value;
const DEFAULT_CLOUD_ENDPOINT: &str = OLLAMA_CLOUD_BASE_URL;
const CLOUD_ENDPOINT_KEY: &str = OLLAMA_CLOUD_ENDPOINT_KEY;
const CLOUD_PROVIDER_KEY: &str = "ollama_cloud";
#[derive(Debug, Subcommand)]
pub enum CloudCommand {
@@ -28,8 +30,8 @@ pub enum CloudCommand {
/// Override the cloud endpoint (default: https://ollama.com)
#[arg(long)]
endpoint: Option<String>,
/// Provider name to configure (default: ollama)
#[arg(long, default_value = "ollama")]
/// Provider name to configure (default: ollama_cloud)
#[arg(long, default_value = "ollama_cloud")]
provider: String,
/// Overwrite the provider base URL with the cloud endpoint
#[arg(long)]
@@ -37,20 +39,20 @@ pub enum CloudCommand {
},
/// Check connectivity to Ollama Cloud
Status {
/// Provider name to check (default: ollama)
#[arg(long, default_value = "ollama")]
/// Provider name to check (default: ollama_cloud)
#[arg(long, default_value = "ollama_cloud")]
provider: String,
},
/// List available cloud-hosted models
Models {
/// Provider name to query (default: ollama)
#[arg(long, default_value = "ollama")]
/// Provider name to query (default: ollama_cloud)
#[arg(long, default_value = "ollama_cloud")]
provider: String,
},
/// Remove stored Ollama Cloud credentials
Logout {
/// Provider name to clear (default: ollama)
#[arg(long, default_value = "ollama")]
/// Provider name to clear (default: ollama_cloud)
#[arg(long, default_value = "ollama_cloud")]
provider: String,
},
}
@@ -82,6 +84,7 @@ async fn setup(
let base_changed = {
let entry = ensure_provider_entry(&mut config, &provider);
entry.enabled = true;
configure_cloud_endpoint(entry, &endpoint, force_cloud_base_url)
};
@@ -140,6 +143,7 @@ async fn status(provider: String) -> Result<()> {
let api_key = hydrate_api_key(&mut config, manager.as_ref()).await?;
{
let entry = ensure_provider_entry(&mut config, &provider);
entry.enabled = true;
configure_cloud_endpoint(entry, DEFAULT_CLOUD_ENDPOINT, false);
}
@@ -190,6 +194,7 @@ async fn models(provider: String) -> Result<()> {
{
let entry = ensure_provider_entry(&mut config, &provider);
entry.enabled = true;
configure_cloud_endpoint(entry, DEFAULT_CLOUD_ENDPOINT, false);
}
@@ -245,8 +250,9 @@ async fn logout(provider: String) -> Result<()> {
.await?;
}
if let Some(entry) = provider_entry_mut(&mut config) {
if let Some(entry) = config.providers.get_mut(&provider) {
entry.api_key = None;
entry.enabled = false;
}
crate::config::save_config(&config)?;
@@ -255,28 +261,7 @@ async fn logout(provider: String) -> Result<()> {
}
fn ensure_provider_entry<'a>(config: &'a mut Config, provider: &str) -> &'a mut ProviderConfig {
if provider == "ollama"
&& config.providers.contains_key("ollama-cloud")
&& !config.providers.contains_key("ollama")
{
if let Some(mut legacy) = config.providers.remove("ollama-cloud") {
legacy.provider_type = "ollama".to_string();
config.providers.insert("ollama".to_string(), legacy);
}
}
core_config::ensure_provider_config(config, provider);
let entry = config
.providers
.get_mut(provider)
.expect("provider entry must exist");
if entry.provider_type != "ollama" {
entry.provider_type = "ollama".to_string();
}
entry
core_config::ensure_provider_config_mut(config, provider)
}
fn configure_cloud_endpoint(entry: &mut ProviderConfig, endpoint: &str, force: bool) -> bool {
@@ -287,6 +272,10 @@ fn configure_cloud_endpoint(entry: &mut ProviderConfig, endpoint: &str, force: b
Value::String(normalized.clone()),
);
if entry.api_key_env.is_none() {
entry.api_key_env = Some(OLLAMA_CLOUD_API_KEY_ENV.to_string());
}
if force
|| entry
.base_url
@@ -298,10 +287,7 @@ fn configure_cloud_endpoint(entry: &mut ProviderConfig, endpoint: &str, force: b
}
if force {
entry.extra.insert(
OLLAMA_MODE_KEY.to_string(),
Value::String("cloud".to_string()),
);
entry.enabled = true;
}
entry.base_url != previous_base
@@ -333,10 +319,11 @@ fn normalize_endpoint(endpoint: &str) -> String {
}
fn canonical_provider_name(provider: &str) -> String {
let normalized = provider.trim().replace('_', "-").to_ascii_lowercase();
let normalized = provider.trim().to_ascii_lowercase().replace('-', "_");
match normalized.as_str() {
"" => "ollama".to_string(),
"ollama-cloud" => "ollama".to_string(),
"" => CLOUD_PROVIDER_KEY.to_string(),
"ollama" => CLOUD_PROVIDER_KEY.to_string(),
"ollama_cloud" => CLOUD_PROVIDER_KEY.to_string(),
value => value.to_string(),
}
}
@@ -362,21 +349,6 @@ fn set_env_if_missing(var: &str, value: &str) {
}
}
fn provider_entry_mut(config: &mut Config) -> Option<&mut ProviderConfig> {
if config.providers.contains_key("ollama") {
config.providers.get_mut("ollama")
} else {
config.providers.get_mut("ollama-cloud")
}
}
fn provider_entry(config: &Config) -> Option<&ProviderConfig> {
if let Some(entry) = config.providers.get("ollama") {
return Some(entry);
}
config.providers.get("ollama-cloud")
}
fn unlock_credential_manager(
config: &Config,
storage: Arc<StorageManager>,
@@ -463,14 +435,13 @@ async fn hydrate_api_key(
set_env_if_missing("OLLAMA_CLOUD_API_KEY", &key);
}
let Some(cfg) = provider_entry_mut(config) else {
return Ok(Some(key));
};
let cfg = core_config::ensure_provider_config_mut(config, CLOUD_PROVIDER_KEY);
configure_cloud_endpoint(cfg, &credentials.endpoint, false);
return Ok(Some(key));
}
if let Some(key) = provider_entry(config)
if let Some(key) = config
.provider(CLOUD_PROVIDER_KEY)
.and_then(|cfg| cfg.api_key.as_ref())
.map(|value| value.trim())
.filter(|value| !value.is_empty())
@@ -501,8 +472,8 @@ mod tests {
#[test]
fn canonicalises_provider_names() {
assert_eq!(canonical_provider_name("OLLAMA_CLOUD"), "ollama");
assert_eq!(canonical_provider_name(" ollama-cloud"), "ollama");
assert_eq!(canonical_provider_name(""), "ollama");
assert_eq!(canonical_provider_name("OLLAMA_CLOUD"), CLOUD_PROVIDER_KEY);
assert_eq!(canonical_provider_name(" ollama-cloud"), CLOUD_PROVIDER_KEY);
assert_eq!(canonical_provider_name(""), CLOUD_PROVIDER_KEY);
}
}

View File

@@ -0,0 +1,4 @@
//! Command implementations for the `owlen` CLI.
pub mod cloud;
pub mod providers;

View File

@@ -0,0 +1,651 @@
use std::collections::HashMap;
use std::sync::Arc;
use anyhow::{Result, anyhow};
use clap::{Args, Subcommand};
use owlen_core::ProviderConfig;
use owlen_core::config::{self as core_config, Config};
use owlen_core::provider::{
AnnotatedModelInfo, ModelProvider, ProviderManager, ProviderStatus, ProviderType,
};
use owlen_core::storage::StorageManager;
use owlen_providers::ollama::{OllamaCloudProvider, OllamaLocalProvider};
use owlen_tui::config as tui_config;
use super::cloud;
/// CLI subcommands for provider management.
#[derive(Debug, Subcommand)]
pub enum ProvidersCommand {
/// List configured providers and their metadata.
List,
/// Run health checks against providers.
Status {
/// Optional provider identifier to check.
#[arg(value_name = "PROVIDER")]
provider: Option<String>,
},
/// Enable a provider in the configuration.
Enable {
/// Provider identifier to enable.
provider: String,
},
/// Disable a provider in the configuration.
Disable {
/// Provider identifier to disable.
provider: String,
},
}
/// Arguments for the `owlen models` command.
#[derive(Debug, Default, Args)]
pub struct ModelsArgs {
/// Restrict output to a specific provider.
#[arg(long)]
pub provider: Option<String>,
}
pub async fn run_providers_command(command: ProvidersCommand) -> Result<()> {
match command {
ProvidersCommand::List => list_providers(),
ProvidersCommand::Status { provider } => status_providers(provider.as_deref()).await,
ProvidersCommand::Enable { provider } => toggle_provider(&provider, true),
ProvidersCommand::Disable { provider } => toggle_provider(&provider, false),
}
}
pub async fn run_models_command(args: ModelsArgs) -> Result<()> {
list_models(args.provider.as_deref()).await
}
fn list_providers() -> Result<()> {
let config = tui_config::try_load_config().unwrap_or_default();
let default_provider = canonical_provider_id(&config.general.default_provider);
let mut rows = Vec::new();
for (id, cfg) in &config.providers {
let type_label = describe_provider_type(id, cfg);
let auth_label = describe_auth(cfg, requires_auth(id, cfg));
let enabled = if cfg.enabled { "yes" } else { "no" };
let default = if id == &default_provider { "*" } else { "" };
let base = cfg
.base_url
.as_ref()
.map(|value| value.trim().to_string())
.unwrap_or_else(|| "-".to_string());
rows.push(ProviderListRow {
id: id.to_string(),
type_label,
enabled: enabled.to_string(),
default: default.to_string(),
auth: auth_label,
base_url: base,
});
}
rows.sort_by(|a, b| a.id.cmp(&b.id));
let id_width = rows
.iter()
.map(|row| row.id.len())
.max()
.unwrap_or(8)
.max("Provider".len());
let enabled_width = rows
.iter()
.map(|row| row.enabled.len())
.max()
.unwrap_or(7)
.max("Enabled".len());
let default_width = rows
.iter()
.map(|row| row.default.len())
.max()
.unwrap_or(7)
.max("Default".len());
let type_width = rows
.iter()
.map(|row| row.type_label.len())
.max()
.unwrap_or(4)
.max("Type".len());
let auth_width = rows
.iter()
.map(|row| row.auth.len())
.max()
.unwrap_or(4)
.max("Auth".len());
println!(
"{:<id_width$} {:<enabled_width$} {:<default_width$} {:<type_width$} {:<auth_width$} Base URL",
"Provider",
"Enabled",
"Default",
"Type",
"Auth",
id_width = id_width,
enabled_width = enabled_width,
default_width = default_width,
type_width = type_width,
auth_width = auth_width,
);
for row in rows {
println!(
"{:<id_width$} {:<enabled_width$} {:<default_width$} {:<type_width$} {:<auth_width$} {}",
row.id,
row.enabled,
row.default,
row.type_label,
row.auth,
row.base_url,
id_width = id_width,
enabled_width = enabled_width,
default_width = default_width,
type_width = type_width,
auth_width = auth_width,
);
}
Ok(())
}
async fn status_providers(filter: Option<&str>) -> Result<()> {
let mut config = tui_config::try_load_config().unwrap_or_default();
let filter = filter.map(canonical_provider_id);
verify_provider_filter(&config, filter.as_deref())?;
let storage = Arc::new(StorageManager::new().await?);
cloud::load_runtime_credentials(&mut config, storage.clone()).await?;
let manager = ProviderManager::new(&config);
let records = register_enabled_providers(&manager, &config, filter.as_deref()).await?;
let health = manager.refresh_health().await;
let mut rows = Vec::new();
for record in records {
let status = health.get(&record.id).copied();
rows.push(ProviderStatusRow::from_record(record, status));
}
rows.sort_by(|a, b| a.id.cmp(&b.id));
print_status_rows(&rows);
Ok(())
}
async fn list_models(filter: Option<&str>) -> Result<()> {
let mut config = tui_config::try_load_config().unwrap_or_default();
let filter = filter.map(canonical_provider_id);
verify_provider_filter(&config, filter.as_deref())?;
let storage = Arc::new(StorageManager::new().await?);
cloud::load_runtime_credentials(&mut config, storage.clone()).await?;
let manager = ProviderManager::new(&config);
let records = register_enabled_providers(&manager, &config, filter.as_deref()).await?;
let models = manager
.list_all_models()
.await
.map_err(|err| anyhow!(err))?;
let statuses = manager.provider_statuses().await;
print_models(records, models, statuses);
Ok(())
}
fn verify_provider_filter(config: &Config, filter: Option<&str>) -> Result<()> {
if let Some(filter) = filter
&& !config.providers.contains_key(filter)
{
return Err(anyhow!(
"Provider '{}' is not defined in configuration.",
filter
));
}
Ok(())
}
fn toggle_provider(provider: &str, enable: bool) -> Result<()> {
let mut config = tui_config::try_load_config().unwrap_or_default();
let canonical = canonical_provider_id(provider);
if canonical.is_empty() {
return Err(anyhow!("Provider name cannot be empty."));
}
let previous_default = config.general.default_provider.clone();
let previous_fallback_enabled = config.providers.get("ollama_local").map(|cfg| cfg.enabled);
let previous_enabled;
{
let entry = core_config::ensure_provider_config_mut(&mut config, &canonical);
previous_enabled = entry.enabled;
if previous_enabled == enable {
println!(
"Provider '{}' is already {}.",
canonical,
if enable { "enabled" } else { "disabled" }
);
return Ok(());
}
entry.enabled = enable;
}
if !enable && config.general.default_provider == canonical {
if let Some(candidate) = choose_fallback_provider(&config, &canonical) {
config.general.default_provider = candidate.clone();
println!(
"Default provider set to '{}' because '{}' was disabled.",
candidate, canonical
);
} else {
let entry = core_config::ensure_provider_config_mut(&mut config, "ollama_local");
entry.enabled = true;
config.general.default_provider = "ollama_local".to_string();
println!(
"Enabled 'ollama_local' and made it default because no other providers are active."
);
}
}
if let Err(err) = config.validate() {
{
let entry = core_config::ensure_provider_config_mut(&mut config, &canonical);
entry.enabled = previous_enabled;
}
config.general.default_provider = previous_default;
if let Some(enabled) = previous_fallback_enabled
&& let Some(entry) = config.providers.get_mut("ollama_local")
{
entry.enabled = enabled;
}
return Err(anyhow!(err));
}
tui_config::save_config(&config).map_err(|err| anyhow!(err))?;
println!(
"{} provider '{}'.",
if enable { "Enabled" } else { "Disabled" },
canonical
);
Ok(())
}
fn choose_fallback_provider(config: &Config, exclude: &str) -> Option<String> {
if exclude != "ollama_local"
&& let Some(cfg) = config.providers.get("ollama_local")
&& cfg.enabled
{
return Some("ollama_local".to_string());
}
let mut candidates: Vec<String> = config
.providers
.iter()
.filter(|(id, cfg)| cfg.enabled && id.as_str() != exclude)
.map(|(id, _)| id.clone())
.collect();
candidates.sort();
candidates.into_iter().next()
}
async fn register_enabled_providers(
manager: &ProviderManager,
config: &Config,
filter: Option<&str>,
) -> Result<Vec<ProviderRecord>> {
let default_provider = canonical_provider_id(&config.general.default_provider);
let mut records = Vec::new();
for (id, cfg) in &config.providers {
if let Some(filter) = filter
&& id != filter
{
continue;
}
let mut record = ProviderRecord::from_config(id, cfg, id == &default_provider);
if !cfg.enabled {
records.push(record);
continue;
}
match instantiate_provider(id, cfg) {
Ok(provider) => {
let metadata = provider.metadata().clone();
record.provider_type_label = provider_type_label(metadata.provider_type);
record.requires_auth = metadata.requires_auth;
record.metadata = Some(metadata);
manager.register_provider(provider).await;
}
Err(err) => {
record.registration_error = Some(err.to_string());
}
}
records.push(record);
}
records.sort_by(|a, b| a.id.cmp(&b.id));
Ok(records)
}
fn instantiate_provider(id: &str, cfg: &ProviderConfig) -> Result<Arc<dyn ModelProvider>> {
let kind = cfg.provider_type.trim().to_ascii_lowercase();
if kind == "ollama" || id == "ollama_local" {
let provider = OllamaLocalProvider::new(cfg.base_url.clone(), None, None)
.map_err(|err| anyhow!(err))?;
Ok(Arc::new(provider))
} else if kind == "ollama_cloud" || id == "ollama_cloud" {
let provider = OllamaCloudProvider::new(cfg.base_url.clone(), cfg.api_key.clone(), None)
.map_err(|err| anyhow!(err))?;
Ok(Arc::new(provider))
} else {
Err(anyhow!(
"Provider '{}' uses unsupported type '{}'.",
id,
if kind.is_empty() {
"unknown"
} else {
kind.as_str()
}
))
}
}
fn describe_provider_type(id: &str, cfg: &ProviderConfig) -> String {
if cfg.provider_type.trim().eq_ignore_ascii_case("ollama") || id.ends_with("_local") {
"Local".to_string()
} else if cfg
.provider_type
.trim()
.eq_ignore_ascii_case("ollama_cloud")
|| id.contains("cloud")
{
"Cloud".to_string()
} else {
"Custom".to_string()
}
}
fn requires_auth(id: &str, cfg: &ProviderConfig) -> bool {
cfg.api_key.is_some()
|| cfg.api_key_env.is_some()
|| matches!(id, "ollama_cloud" | "openai" | "anthropic")
}
fn describe_auth(cfg: &ProviderConfig, required: bool) -> String {
if let Some(env) = cfg
.api_key_env
.as_ref()
.map(|value| value.trim())
.filter(|value| !value.is_empty())
{
format!("env:{env}")
} else if cfg
.api_key
.as_ref()
.map(|value| !value.trim().is_empty())
.unwrap_or(false)
{
"config".to_string()
} else if required {
"required".to_string()
} else {
"-".to_string()
}
}
fn canonical_provider_id(raw: &str) -> String {
let trimmed = raw.trim().to_ascii_lowercase();
if trimmed.is_empty() {
return trimmed;
}
match trimmed.as_str() {
"ollama" | "ollama-local" => "ollama_local".to_string(),
"ollama_cloud" | "ollama-cloud" => "ollama_cloud".to_string(),
other => other.replace('-', "_"),
}
}
fn provider_type_label(provider_type: ProviderType) -> String {
match provider_type {
ProviderType::Local => "Local".to_string(),
ProviderType::Cloud => "Cloud".to_string(),
}
}
fn provider_status_strings(status: ProviderStatus) -> (&'static str, &'static str) {
match status {
ProviderStatus::Available => ("OK", "available"),
ProviderStatus::Unavailable => ("ERR", "unavailable"),
ProviderStatus::RequiresSetup => ("SETUP", "requires setup"),
}
}
fn print_status_rows(rows: &[ProviderStatusRow]) {
let id_width = rows
.iter()
.map(|row| row.id.len())
.max()
.unwrap_or(8)
.max("Provider".len());
let type_width = rows
.iter()
.map(|row| row.provider_type.len())
.max()
.unwrap_or(4)
.max("Type".len());
let status_width = rows
.iter()
.map(|row| row.indicator.len() + 1 + row.status_label.len())
.max()
.unwrap_or(6)
.max("State".len());
println!(
"{:<id_width$} {:<4} {:<type_width$} {:<status_width$} Details",
"Provider",
"Def",
"Type",
"State",
id_width = id_width,
type_width = type_width,
status_width = status_width,
);
for row in rows {
let def = if row.default_provider { "*" } else { "-" };
let details = row.detail.as_deref().unwrap_or("-");
println!(
"{:<id_width$} {:<4} {:<type_width$} {:<status_width$} {}",
row.id,
def,
row.provider_type,
format!("{} {}", row.indicator, row.status_label),
details,
id_width = id_width,
type_width = type_width,
status_width = status_width,
);
}
}
fn print_models(
records: Vec<ProviderRecord>,
models: Vec<AnnotatedModelInfo>,
statuses: HashMap<String, ProviderStatus>,
) {
let mut grouped: HashMap<String, Vec<AnnotatedModelInfo>> = HashMap::new();
for info in models {
grouped
.entry(info.provider_id.clone())
.or_default()
.push(info);
}
for record in records {
let status = statuses.get(&record.id).copied().or_else(|| {
if record.metadata.is_some() && record.registration_error.is_none() && record.enabled {
Some(ProviderStatus::Unavailable)
} else {
None
}
});
let (indicator, label, status_value) = if !record.enabled {
("-", "disabled", None)
} else if record.registration_error.is_some() {
("ERR", "error", None)
} else if let Some(status) = status {
let (indicator, label) = provider_status_strings(status);
(indicator, label, Some(status))
} else {
("?", "unknown", None)
};
let title = if record.default_provider {
format!("{} (default)", record.id)
} else {
record.id.clone()
};
println!(
"{} {} [{}] {}",
indicator, title, record.provider_type_label, label
);
if let Some(err) = &record.registration_error {
println!(" error: {}", err);
println!();
continue;
}
if !record.enabled {
println!(" provider disabled");
println!();
continue;
}
if let Some(entries) = grouped.get(&record.id) {
let mut entries = entries.clone();
entries.sort_by(|a, b| a.model.name.cmp(&b.model.name));
if entries.is_empty() {
println!(" (no models reported)");
} else {
for entry in entries {
let mut line = format!(" - {}", entry.model.name);
if let Some(description) = &entry.model.description
&& !description.trim().is_empty()
{
line.push_str(&format!("{}", description.trim()));
}
println!("{}", line);
}
}
} else {
println!(" (no models reported)");
}
if let Some(ProviderStatus::RequiresSetup) = status_value
&& record.requires_auth
{
println!(" configure provider credentials or API key");
}
println!();
}
}
struct ProviderListRow {
id: String,
type_label: String,
enabled: String,
default: String,
auth: String,
base_url: String,
}
struct ProviderRecord {
id: String,
enabled: bool,
default_provider: bool,
provider_type_label: String,
requires_auth: bool,
registration_error: Option<String>,
metadata: Option<owlen_core::provider::ProviderMetadata>,
}
impl ProviderRecord {
fn from_config(id: &str, cfg: &ProviderConfig, default_provider: bool) -> Self {
Self {
id: id.to_string(),
enabled: cfg.enabled,
default_provider,
provider_type_label: describe_provider_type(id, cfg),
requires_auth: requires_auth(id, cfg),
registration_error: None,
metadata: None,
}
}
}
struct ProviderStatusRow {
id: String,
provider_type: String,
default_provider: bool,
indicator: String,
status_label: String,
detail: Option<String>,
}
impl ProviderStatusRow {
fn from_record(record: ProviderRecord, status: Option<ProviderStatus>) -> Self {
if !record.enabled {
return Self {
id: record.id,
provider_type: record.provider_type_label,
default_provider: record.default_provider,
indicator: "-".to_string(),
status_label: "disabled".to_string(),
detail: None,
};
}
if let Some(err) = record.registration_error {
return Self {
id: record.id,
provider_type: record.provider_type_label,
default_provider: record.default_provider,
indicator: "ERR".to_string(),
status_label: "error".to_string(),
detail: Some(err),
};
}
if let Some(status) = status {
let (indicator, label) = provider_status_strings(status);
return Self {
id: record.id,
provider_type: record.provider_type_label,
default_provider: record.default_provider,
indicator: indicator.to_string(),
status_label: label.to_string(),
detail: if matches!(status, ProviderStatus::RequiresSetup) && record.requires_auth {
Some("credentials required".to_string())
} else {
None
},
};
}
Self {
id: record.id,
provider_type: record.provider_type_label,
default_provider: record.default_provider,
indicator: "?".to_string(),
status_label: "unknown".to_string(),
detail: None,
}
}
}

View File

@@ -2,41 +2,21 @@
//! OWLEN CLI - Chat TUI client
mod cloud;
mod bootstrap;
mod commands;
mod mcp;
use anyhow::{Result, anyhow};
use async_trait::async_trait;
use anyhow::Result;
use clap::{Parser, Subcommand};
use cloud::{CloudCommand, load_runtime_credentials, set_env_var};
use commands::{
cloud::{CloudCommand, run_cloud_command},
providers::{ModelsArgs, ProvidersCommand, run_models_command, run_providers_command},
};
use mcp::{McpCommand, run_mcp_command};
use owlen_core::config as core_config;
use owlen_core::{
ChatStream, Error, Provider,
config::{Config, McpMode},
mcp::remote_client::RemoteMcpClient,
mode::Mode,
providers::OllamaProvider,
session::SessionController,
storage::StorageManager,
types::{ChatRequest, ChatResponse, Message, ModelInfo},
};
use owlen_tui::tui_controller::{TuiController, TuiRequest};
use owlen_tui::{AppState, ChatApp, Event, EventHandler, SessionEvent, config, ui};
use std::any::Any;
use std::borrow::Cow;
use std::io;
use std::sync::Arc;
use tokio::sync::mpsc;
use tokio_util::sync::CancellationToken;
use crossterm::{
event::{DisableBracketedPaste, DisableMouseCapture, EnableBracketedPaste, EnableMouseCapture},
execute,
terminal::{EnterAlternateScreen, LeaveAlternateScreen, disable_raw_mode, enable_raw_mode},
};
use futures::stream;
use ratatui::{Terminal, prelude::CrosstermBackend};
use owlen_core::config::McpMode;
use owlen_core::mode::Mode;
use owlen_tui::config;
/// Owlen - Terminal UI for LLM chat
#[derive(Parser, Debug)]
@@ -58,6 +38,11 @@ enum OwlenCommand {
/// Manage Ollama Cloud credentials
#[command(subcommand)]
Cloud(CloudCommand),
/// Manage model providers
#[command(subcommand)]
Providers(ProvidersCommand),
/// List models exposed by configured providers
Models(ModelsArgs),
/// Manage MCP server registrations
#[command(subcommand)]
Mcp(McpCommand),
@@ -73,70 +58,12 @@ enum ConfigCommand {
Path,
}
fn build_provider(cfg: &Config) -> anyhow::Result<Arc<dyn Provider>> {
match cfg.mcp.mode {
McpMode::RemotePreferred => {
let remote_result = if let Some(mcp_server) = cfg.effective_mcp_servers().first() {
RemoteMcpClient::new_with_config(mcp_server)
} else {
RemoteMcpClient::new()
};
match remote_result {
Ok(client) => {
let provider: Arc<dyn Provider> = Arc::new(client);
Ok(provider)
}
Err(err) if cfg.mcp.allow_fallback => {
log::warn!(
"Remote MCP client unavailable ({}); falling back to local provider.",
err
);
build_local_provider(cfg)
}
Err(err) => Err(anyhow::Error::from(err)),
}
}
McpMode::RemoteOnly => {
let mcp_server = cfg.effective_mcp_servers().first().ok_or_else(|| {
anyhow::anyhow!(
"[[mcp_servers]] must be configured when [mcp].mode = \"remote_only\""
)
})?;
let client = RemoteMcpClient::new_with_config(mcp_server)?;
let provider: Arc<dyn Provider> = Arc::new(client);
Ok(provider)
}
McpMode::LocalOnly | McpMode::Legacy => build_local_provider(cfg),
McpMode::Disabled => Err(anyhow::anyhow!(
"MCP mode 'disabled' is not supported by the owlen TUI"
)),
}
}
fn build_local_provider(cfg: &Config) -> anyhow::Result<Arc<dyn Provider>> {
let provider_name = cfg.general.default_provider.clone();
let provider_cfg = cfg.provider(&provider_name).ok_or_else(|| {
anyhow::anyhow!(format!(
"No provider configuration found for '{provider_name}' in [providers]"
))
})?;
match provider_cfg.provider_type.as_str() {
"ollama" | "ollama-cloud" => {
let provider = OllamaProvider::from_config(provider_cfg, Some(&cfg.general))?;
Ok(Arc::new(provider) as Arc<dyn Provider>)
}
other => Err(anyhow::anyhow!(format!(
"Provider type '{other}' is not supported in legacy/local MCP mode"
))),
}
}
async fn run_command(command: OwlenCommand) -> Result<()> {
match command {
OwlenCommand::Config(config_cmd) => run_config_command(config_cmd),
OwlenCommand::Cloud(cloud_cmd) => cloud::run_cloud_command(cloud_cmd).await,
OwlenCommand::Cloud(cloud_cmd) => run_cloud_command(cloud_cmd).await,
OwlenCommand::Providers(provider_cmd) => run_providers_command(provider_cmd).await,
OwlenCommand::Models(args) => run_models_command(args).await,
OwlenCommand::Mcp(mcp_cmd) => run_mcp_command(mcp_cmd),
OwlenCommand::Upgrade => {
println!(
@@ -172,40 +99,78 @@ fn run_config_doctor() -> Result<()> {
changes.push("created configuration file from defaults".to_string());
}
if !config
.providers
.contains_key(&config.general.default_provider)
{
config.general.default_provider = "ollama".to_string();
changes.push("default provider missing; reset to 'ollama'".to_string());
if config.provider(&config.general.default_provider).is_none() {
config.general.default_provider = "ollama_local".to_string();
changes.push("default provider missing; reset to 'ollama_local'".to_string());
}
if let Some(mut legacy) = config.providers.remove("ollama-cloud") {
legacy.provider_type = "ollama".to_string();
use std::collections::hash_map::Entry;
match config.providers.entry("ollama".to_string()) {
Entry::Occupied(mut existing) => {
let entry = existing.get_mut();
if entry.api_key.is_none() {
entry.api_key = legacy.api_key.take();
for key in ["ollama_local", "ollama_cloud", "openai", "anthropic"] {
if !config.providers.contains_key(key) {
core_config::ensure_provider_config_mut(&mut config, key);
changes.push(format!("added default configuration for provider '{key}'"));
}
}
if let Some(entry) = config.providers.get_mut("ollama_local") {
if entry.provider_type.trim().is_empty() || entry.provider_type != "ollama" {
entry.provider_type = "ollama".to_string();
changes.push("normalised providers.ollama_local.provider_type to 'ollama'".to_string());
}
}
let mut ensure_default_enabled = true;
if !config.providers.values().any(|cfg| cfg.enabled) {
let entry = core_config::ensure_provider_config_mut(&mut config, "ollama_local");
if !entry.enabled {
entry.enabled = true;
changes.push("no providers were enabled; enabled 'ollama_local'".to_string());
}
if config.general.default_provider != "ollama_local" {
config.general.default_provider = "ollama_local".to_string();
changes.push(
"default provider reset to 'ollama_local' because no providers were enabled"
.to_string(),
);
}
ensure_default_enabled = false;
}
if ensure_default_enabled {
let default_id = config.general.default_provider.clone();
if let Some(default_cfg) = config.providers.get(&default_id) {
if !default_cfg.enabled {
if let Some(new_default) = config
.providers
.iter()
.filter(|(id, cfg)| cfg.enabled && *id != &default_id)
.map(|(id, _)| id.clone())
.min()
{
config.general.default_provider = new_default.clone();
changes.push(format!(
"default provider '{default_id}' was disabled; switched default to '{new_default}'"
));
} else {
let entry =
core_config::ensure_provider_config_mut(&mut config, "ollama_local");
if !entry.enabled {
entry.enabled = true;
changes.push(
"enabled 'ollama_local' because default provider was disabled"
.to_string(),
);
}
if config.general.default_provider != "ollama_local" {
config.general.default_provider = "ollama_local".to_string();
changes.push(
"default provider reset to 'ollama_local' because previous default was disabled"
.to_string(),
);
}
}
if entry.base_url.is_none() && legacy.base_url.is_some() {
entry.base_url = legacy.base_url.take();
}
entry.extra.extend(legacy.extra);
}
Entry::Vacant(slot) => {
slot.insert(legacy);
}
}
changes.push(
"migrated legacy 'ollama-cloud' provider into unified 'ollama' entry".to_string(),
);
}
if !config.providers.contains_key("ollama") {
core_config::ensure_provider_config(&mut config, "ollama");
changes.push("added default ollama provider configuration".to_string());
}
match config.mcp.mode {
@@ -251,120 +216,6 @@ fn run_config_doctor() -> Result<()> {
Ok(())
}
const BASIC_THEME_NAME: &str = "ansi_basic";
#[derive(Debug, Clone)]
enum TerminalColorSupport {
Full,
Limited { term: String },
}
fn detect_terminal_color_support() -> TerminalColorSupport {
let term = std::env::var("TERM").unwrap_or_else(|_| "unknown".to_string());
let colorterm = std::env::var("COLORTERM").unwrap_or_default();
let term_lower = term.to_lowercase();
let color_lower = colorterm.to_lowercase();
let supports_extended = term_lower.contains("256color")
|| color_lower.contains("truecolor")
|| color_lower.contains("24bit")
|| color_lower.contains("fullcolor");
if supports_extended {
TerminalColorSupport::Full
} else {
TerminalColorSupport::Limited { term }
}
}
fn apply_terminal_theme(cfg: &mut Config, support: &TerminalColorSupport) -> Option<String> {
match support {
TerminalColorSupport::Full => None,
TerminalColorSupport::Limited { .. } => {
if cfg.ui.theme != BASIC_THEME_NAME {
let previous = std::mem::replace(&mut cfg.ui.theme, BASIC_THEME_NAME.to_string());
Some(previous)
} else {
None
}
}
}
}
struct OfflineProvider {
reason: String,
placeholder_model: String,
}
impl OfflineProvider {
fn new(reason: String, placeholder_model: String) -> Self {
Self {
reason,
placeholder_model,
}
}
fn friendly_response(&self, requested_model: &str) -> ChatResponse {
let mut message = String::new();
message.push_str("⚠️ Owlen is running in offline mode.\n\n");
message.push_str(&self.reason);
if !requested_model.is_empty() && requested_model != self.placeholder_model {
message.push_str(&format!(
"\n\nYou requested model '{}', but no providers are reachable.",
requested_model
));
}
message.push_str(
"\n\nStart your preferred provider (e.g. `ollama serve`) or switch providers with `:provider` once connectivity is restored.",
);
ChatResponse {
message: Message::assistant(message),
usage: None,
is_streaming: false,
is_final: true,
}
}
}
#[async_trait]
impl Provider for OfflineProvider {
fn name(&self) -> &str {
"offline"
}
async fn list_models(&self) -> Result<Vec<ModelInfo>, Error> {
Ok(vec![ModelInfo {
id: self.placeholder_model.clone(),
provider: "offline".to_string(),
name: format!("Offline (fallback: {})", self.placeholder_model),
description: Some("Placeholder model used while no providers are reachable".into()),
context_window: None,
capabilities: vec![],
supports_tools: false,
}])
}
async fn send_prompt(&self, request: ChatRequest) -> Result<ChatResponse, Error> {
Ok(self.friendly_response(&request.model))
}
async fn stream_prompt(&self, request: ChatRequest) -> Result<ChatStream, Error> {
let response = self.friendly_response(&request.model);
Ok(Box::pin(stream::iter(vec![Ok(response)])))
}
async fn health_check(&self) -> Result<(), Error> {
Err(Error::Provider(anyhow!(
"offline provider cannot reach any backing models"
)))
}
fn as_any(&self) -> &(dyn Any + Send + Sync) {
self
}
}
#[tokio::main(flavor = "multi_thread")]
async fn main() -> Result<()> {
// Parse command-line arguments
@@ -373,171 +224,5 @@ async fn main() -> Result<()> {
return run_command(command).await;
}
let initial_mode = if code { Mode::Code } else { Mode::Chat };
// Set auto-consent for TUI mode to prevent blocking stdin reads
set_env_var("OWLEN_AUTO_CONSENT", "1");
let color_support = detect_terminal_color_support();
// Load configuration (or fall back to defaults) for the session controller.
let mut cfg = config::try_load_config().unwrap_or_default();
let _ = cfg.refresh_mcp_servers(None);
if let Some(previous_theme) = apply_terminal_theme(&mut cfg, &color_support) {
let term_label = match &color_support {
TerminalColorSupport::Limited { term } => Cow::from(term.as_str()),
TerminalColorSupport::Full => Cow::from("current terminal"),
};
eprintln!(
"Terminal '{}' lacks full 256-color support. Using '{}' theme instead of '{}'.",
term_label, BASIC_THEME_NAME, previous_theme
);
} else if let TerminalColorSupport::Limited { term } = &color_support {
eprintln!(
"Warning: terminal '{}' may not fully support 256-color themes.",
term
);
}
cfg.validate()?;
let storage = Arc::new(StorageManager::new().await?);
load_runtime_credentials(&mut cfg, storage.clone()).await?;
let (tui_tx, _tui_rx) = mpsc::unbounded_channel::<TuiRequest>();
let tui_controller = Arc::new(TuiController::new(tui_tx));
// Create provider according to MCP configuration (supports legacy/local fallback)
let provider = build_provider(&cfg)?;
let mut offline_notice: Option<String> = None;
let provider = match provider.health_check().await {
Ok(_) => provider,
Err(err) => {
let hint = if matches!(cfg.mcp.mode, McpMode::RemotePreferred | McpMode::RemoteOnly)
&& !cfg.effective_mcp_servers().is_empty()
{
"Ensure the configured MCP server is running and reachable."
} else {
"Ensure Ollama is running (`ollama serve`) and reachable at the configured base_url."
};
let notice =
format!("Provider health check failed: {err}. {hint} Continuing in offline mode.");
eprintln!("{notice}");
offline_notice = Some(notice.clone());
let fallback_model = cfg
.general
.default_model
.clone()
.unwrap_or_else(|| "offline".to_string());
Arc::new(OfflineProvider::new(notice, fallback_model)) as Arc<dyn Provider>
}
};
let controller =
SessionController::new(provider, cfg, storage.clone(), tui_controller, false).await?;
let (mut app, mut session_rx) = ChatApp::new(controller).await?;
app.initialize_models().await?;
if let Some(notice) = offline_notice {
app.set_status_message(&notice);
app.set_system_status(notice);
}
// Set the initial mode
app.set_mode(initial_mode).await;
// Event infrastructure
let cancellation_token = CancellationToken::new();
let (event_tx, event_rx) = mpsc::unbounded_channel();
let event_handler = EventHandler::new(event_tx, cancellation_token.clone());
let event_handle = tokio::spawn(async move { event_handler.run().await });
// Terminal setup
enable_raw_mode()?;
let mut stdout = io::stdout();
execute!(
stdout,
EnterAlternateScreen,
EnableMouseCapture,
EnableBracketedPaste
)?;
let backend = CrosstermBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
let result = run_app(&mut terminal, &mut app, event_rx, &mut session_rx).await;
// Shutdown
cancellation_token.cancel();
event_handle.await?;
// Persist configuration updates (e.g., selected model)
config::save_config(&app.config())?;
disable_raw_mode()?;
execute!(
terminal.backend_mut(),
LeaveAlternateScreen,
DisableMouseCapture,
DisableBracketedPaste
)?;
terminal.show_cursor()?;
if let Err(err) = result {
println!("{err:?}");
}
Ok(())
}
async fn run_app(
terminal: &mut Terminal<CrosstermBackend<io::Stdout>>,
app: &mut ChatApp,
mut event_rx: mpsc::UnboundedReceiver<Event>,
session_rx: &mut mpsc::UnboundedReceiver<SessionEvent>,
) -> Result<()> {
let stream_draw_interval = tokio::time::Duration::from_millis(50);
let idle_tick = tokio::time::Duration::from_millis(100);
let mut last_draw = tokio::time::Instant::now() - stream_draw_interval;
loop {
// Advance loading animation frame
app.advance_loading_animation();
let streaming_active = app.streaming_count() > 0;
let draw_due = if streaming_active {
last_draw.elapsed() >= stream_draw_interval
} else {
true
};
if draw_due {
terminal.draw(|f| ui::render_chat(f, app))?;
last_draw = tokio::time::Instant::now();
}
// Process any pending LLM requests AFTER UI has been drawn
if let Err(e) = app.process_pending_llm_request().await {
eprintln!("Error processing LLM request: {}", e);
}
// Process any pending tool executions AFTER UI has been drawn
if let Err(e) = app.process_pending_tool_execution().await {
eprintln!("Error processing tool execution: {}", e);
}
let sleep_duration = if streaming_active {
stream_draw_interval
.checked_sub(last_draw.elapsed())
.unwrap_or_else(|| tokio::time::Duration::from_millis(0))
} else {
idle_tick
};
tokio::select! {
Some(event) = event_rx.recv() => {
if let AppState::Quit = app.handle_event(event).await? {
return Ok(());
}
}
Some(session_event) = session_rx.recv() => {
app.handle_session_event(session_event).await?;
}
_ = tokio::time::sleep(sleep_duration) => {}
}
}
bootstrap::launch(initial_mode).await
}

View File

@@ -16,7 +16,7 @@ use std::time::Duration;
pub const DEFAULT_CONFIG_PATH: &str = "~/.config/owlen/config.toml";
/// Current schema version written to `config.toml`.
pub const CONFIG_SCHEMA_VERSION: &str = "1.5.0";
pub const CONFIG_SCHEMA_VERSION: &str = "1.6.0";
/// Provider config key for forcing Ollama provider mode.
pub const OLLAMA_MODE_KEY: &str = "ollama_mode";
@@ -24,6 +24,18 @@ pub const OLLAMA_MODE_KEY: &str = "ollama_mode";
pub const OLLAMA_CLOUD_ENDPOINT_KEY: &str = "cloud_endpoint";
/// Canonical Ollama Cloud base URL.
pub const OLLAMA_CLOUD_BASE_URL: &str = "https://ollama.com";
/// Environment variable used for Ollama Cloud authentication.
pub const OLLAMA_CLOUD_API_KEY_ENV: &str = "OLLAMA_CLOUD_API_KEY";
/// Default base URL for local Ollama daemons.
pub const OLLAMA_LOCAL_BASE_URL: &str = "http://localhost:11434";
/// Default OpenAI API base URL.
pub const OPENAI_DEFAULT_BASE_URL: &str = "https://api.openai.com/v1";
/// Environment variable name used for OpenAI API keys.
pub const OPENAI_API_KEY_ENV: &str = "OPENAI_API_KEY";
/// Default Anthropic API base URL.
pub const ANTHROPIC_DEFAULT_BASE_URL: &str = "https://api.anthropic.com/v1";
/// Environment variable name used for Anthropic API keys.
pub const ANTHROPIC_API_KEY_ENV: &str = "ANTHROPIC_API_KEY";
/// Core configuration shared by all OWLEN clients
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -82,8 +94,7 @@ pub struct Config {
impl Default for Config {
fn default() -> Self {
let mut providers = HashMap::new();
providers.insert("ollama".to_string(), default_ollama_provider_config());
let providers = default_provider_configs();
Self {
schema_version: Self::default_schema_version(),
@@ -270,6 +281,8 @@ impl Config {
let content = fs::read_to_string(&path)?;
let parsed: toml::Value =
toml::from_str(&content).map_err(|e| crate::Error::Config(e.to_string()))?;
let mut parsed = parsed;
migrate_legacy_provider_tables(&mut parsed);
let previous_version = parsed
.get("schema_version")
.and_then(|value| value.as_str())
@@ -326,12 +339,19 @@ impl Config {
/// Get provider configuration by provider name
pub fn provider(&self, name: &str) -> Option<&ProviderConfig> {
self.providers.get(name)
let key = normalize_provider_key(name);
self.providers.get(&key)
}
/// Update or insert a provider configuration
pub fn upsert_provider(&mut self, name: impl Into<String>, config: ProviderConfig) {
self.providers.insert(name.into(), config);
let raw = name.into();
let key = normalize_provider_key(&raw);
let mut config = config;
if config.provider_type.is_empty() {
config.provider_type = key.clone();
}
self.providers.insert(key, config);
}
/// Resolve default model in order of priority: explicit default, first cached model, provider fallback
@@ -353,11 +373,15 @@ impl Config {
}
fn ensure_defaults(&mut self) {
if self.general.default_provider.is_empty() {
self.general.default_provider = "ollama".to_string();
if self.general.default_provider.is_empty() || self.general.default_provider == "ollama" {
self.general.default_provider = "ollama_local".to_string();
}
let mut defaults = default_provider_configs();
for (name, default_cfg) in defaults.drain() {
self.providers.entry(name).or_insert(default_cfg);
}
ensure_provider_config(self, "ollama");
if self.schema_version.is_empty() {
self.schema_version = Self::default_schema_version();
}
@@ -561,6 +585,7 @@ impl Config {
self.validate_default_provider()?;
self.validate_mcp_settings()?;
self.validate_mcp_servers()?;
self.validate_providers()?;
Ok(())
}
@@ -573,57 +598,92 @@ impl Config {
);
}
if let Some(legacy_cloud) = self.providers.remove("ollama_cloud") {
self.merge_legacy_ollama_provider(legacy_cloud);
self.migrate_provider_entries();
if self.general.default_provider == "ollama" {
self.general.default_provider = "ollama_local".to_string();
}
if let Some(legacy_cloud) = self.providers.remove("ollama-cloud") {
self.merge_legacy_ollama_provider(legacy_cloud);
}
if let Some(ollama) = self.providers.get_mut("ollama") {
let previous_mode = ollama
.extra
.get(OLLAMA_MODE_KEY)
.and_then(|value| value.as_str())
.map(|value| value.to_ascii_lowercase());
ensure_ollama_mode_extra(ollama);
if previous_mode.as_deref().unwrap_or("auto") == "auto"
&& is_cloud_base_url(ollama.base_url.as_ref())
{
ollama.extra.insert(
OLLAMA_MODE_KEY.to_string(),
serde_json::Value::String("cloud".to_string()),
);
}
}
self.ensure_defaults();
self.schema_version = CONFIG_SCHEMA_VERSION.to_string();
}
fn merge_legacy_ollama_provider(&mut self, mut legacy_cloud: ProviderConfig) {
use std::collections::hash_map::Entry;
fn migrate_provider_entries(&mut self) {
let mut migrated = default_provider_configs();
let legacy_entries = std::mem::take(&mut self.providers);
legacy_cloud.provider_type = "ollama".to_string();
match self.providers.entry("ollama".to_string()) {
Entry::Occupied(mut entry) => {
let target = entry.get_mut();
if target.base_url.is_none() {
target.base_url = legacy_cloud.base_url.take();
}
if target.api_key.is_none() {
target.api_key = legacy_cloud.api_key.take();
}
if target.extra.is_empty() && !legacy_cloud.extra.is_empty() {
target.extra = legacy_cloud.extra;
}
ensure_ollama_mode_extra(target);
for (original_key, mut legacy) in legacy_entries {
if original_key == "ollama" {
Self::merge_legacy_ollama_provider(legacy, &mut migrated);
continue;
}
Entry::Vacant(entry) => {
let mut inserted = legacy_cloud;
ensure_ollama_mode_extra(&mut inserted);
entry.insert(inserted);
let normalized = normalize_provider_key(&original_key);
let entry = migrated
.entry(normalized.clone())
.or_insert_with(|| ProviderConfig {
enabled: true,
provider_type: normalized.clone(),
base_url: None,
api_key: None,
api_key_env: None,
extra: HashMap::new(),
});
if legacy.provider_type.is_empty() {
legacy.provider_type = normalized.clone();
}
entry.merge_from(legacy);
if entry.provider_type.is_empty() {
entry.provider_type = normalized;
}
}
self.providers = migrated;
}
fn merge_legacy_ollama_provider(
mut legacy: ProviderConfig,
targets: &mut HashMap<String, ProviderConfig>,
) {
let mode = legacy
.extra
.remove(OLLAMA_MODE_KEY)
.and_then(|value| value.as_str().map(|s| s.trim().to_ascii_lowercase()));
let api_key_present = legacy
.api_key
.as_ref()
.map(|value| !value.trim().is_empty())
.unwrap_or(false);
let cloud_candidate =
matches!(mode.as_deref(), Some("cloud")) || is_cloud_base_url(legacy.base_url.as_ref());
let should_enable_cloud = cloud_candidate || api_key_present;
if matches!(mode.as_deref(), Some("local")) || !should_enable_cloud {
if let Some(local) = targets.get_mut("ollama_local") {
let mut copy = legacy.clone();
copy.api_key = None;
copy.api_key_env = None;
copy.enabled = true;
local.merge_from(copy);
local.enabled = true;
if local.base_url.is_none() {
local.base_url = Some(OLLAMA_LOCAL_BASE_URL.to_string());
}
}
}
if should_enable_cloud || matches!(mode.as_deref(), Some("cloud")) {
if let Some(cloud) = targets.get_mut("ollama_cloud") {
legacy.enabled = true;
cloud.merge_from(legacy);
cloud.enabled = true;
if cloud.base_url.is_none() {
cloud.base_url = Some(OLLAMA_CLOUD_BASE_URL.to_string());
}
if cloud.api_key_env.is_none() {
cloud.api_key_env = Some(OLLAMA_CLOUD_API_KEY_ENV.to_string());
}
}
}
}
@@ -693,40 +753,164 @@ impl Config {
Ok(())
}
fn validate_providers(&self) -> Result<()> {
for (name, provider) in &self.providers {
if !provider.enabled {
continue;
}
match name.as_str() {
"ollama_local" => {
if is_blank(&provider.base_url) {
return Err(Error::Config(
"providers.ollama_local.base_url must be set when enabled".into(),
));
}
}
"ollama_cloud" => {
if is_blank(&provider.base_url) {
return Err(Error::Config(
"providers.ollama_cloud.base_url must be set when enabled".into(),
));
}
if is_blank(&provider.api_key) && is_blank(&provider.api_key_env) {
return Err(Error::Config(
"providers.ollama_cloud requires `api_key` or `api_key_env` when enabled"
.into(),
));
}
}
"openai" | "anthropic" => {
if is_blank(&provider.api_key) && is_blank(&provider.api_key_env) {
return Err(Error::Config(format!(
"providers.{name} requires `api_key` or `api_key_env` when enabled"
)));
}
}
_ => {}
}
}
Ok(())
}
}
fn default_ollama_provider_config() -> ProviderConfig {
let mut config = ProviderConfig {
provider_type: "ollama".to_string(),
base_url: Some("http://localhost:11434".to_string()),
fn default_provider_configs() -> HashMap<String, ProviderConfig> {
let mut providers = HashMap::new();
for name in ["ollama_local", "ollama_cloud", "openai", "anthropic"] {
if let Some(config) = default_provider_config_for(name) {
providers.insert(name.to_string(), config);
}
}
providers
}
fn default_ollama_local_config() -> ProviderConfig {
ProviderConfig {
enabled: true,
provider_type: canonical_provider_type("ollama_local"),
base_url: Some(OLLAMA_LOCAL_BASE_URL.to_string()),
api_key: None,
api_key_env: None,
extra: HashMap::new(),
};
ensure_ollama_mode_extra(&mut config);
config
}
}
fn ensure_ollama_mode_extra(provider: &mut ProviderConfig) {
if provider.provider_type != "ollama" {
fn default_ollama_cloud_config() -> ProviderConfig {
let mut extra = HashMap::new();
extra.insert(
OLLAMA_CLOUD_ENDPOINT_KEY.to_string(),
serde_json::Value::String(OLLAMA_CLOUD_BASE_URL.to_string()),
);
ProviderConfig {
enabled: false,
provider_type: canonical_provider_type("ollama_cloud"),
base_url: Some(OLLAMA_CLOUD_BASE_URL.to_string()),
api_key: None,
api_key_env: Some(OLLAMA_CLOUD_API_KEY_ENV.to_string()),
extra,
}
}
fn default_openai_config() -> ProviderConfig {
ProviderConfig {
enabled: false,
provider_type: canonical_provider_type("openai"),
base_url: Some(OPENAI_DEFAULT_BASE_URL.to_string()),
api_key: None,
api_key_env: Some(OPENAI_API_KEY_ENV.to_string()),
extra: HashMap::new(),
}
}
fn default_anthropic_config() -> ProviderConfig {
ProviderConfig {
enabled: false,
provider_type: canonical_provider_type("anthropic"),
base_url: Some(ANTHROPIC_DEFAULT_BASE_URL.to_string()),
api_key: None,
api_key_env: Some(ANTHROPIC_API_KEY_ENV.to_string()),
extra: HashMap::new(),
}
}
fn default_provider_config_for(name: &str) -> Option<ProviderConfig> {
match name {
"ollama_local" => Some(default_ollama_local_config()),
"ollama_cloud" => Some(default_ollama_cloud_config()),
"openai" => Some(default_openai_config()),
"anthropic" => Some(default_anthropic_config()),
_ => None,
}
}
fn normalize_provider_key(name: &str) -> String {
let normalized = name.trim().to_ascii_lowercase();
match normalized.as_str() {
"ollama" | "ollama-local" => "ollama_local".to_string(),
"ollama_cloud" | "ollama-cloud" => "ollama_cloud".to_string(),
other => other.replace('-', "_"),
}
}
fn canonical_provider_type(key: &str) -> String {
match key {
"ollama_local" => "ollama".to_string(),
other => other.to_string(),
}
}
fn is_blank(value: &Option<String>) -> bool {
value.as_ref().map(|s| s.trim().is_empty()).unwrap_or(true)
}
fn migrate_legacy_provider_tables(document: &mut toml::Value) {
let Some(table) = document.as_table_mut() else {
return;
};
let mut legacy = Vec::new();
for key in ["ollama", "ollama_cloud", "ollama-cloud"] {
if let Some(entry) = table.remove(key) {
legacy.push((key.to_string(), entry));
}
}
if legacy.is_empty() {
return;
}
let entry = provider
.extra
.entry(OLLAMA_MODE_KEY.to_string())
.or_insert_with(|| serde_json::Value::String("auto".to_string()));
let providers_entry = table
.entry("providers".to_string())
.or_insert_with(|| toml::Value::Table(toml::map::Map::new()));
if let Some(value) = entry.as_str() {
let normalized = value.trim().to_ascii_lowercase();
if matches!(normalized.as_str(), "auto" | "local" | "cloud") {
if normalized != value {
*entry = serde_json::Value::String(normalized);
}
} else {
*entry = serde_json::Value::String("auto".to_string());
if let Some(providers_table) = providers_entry.as_table_mut() {
for (key, value) in legacy {
providers_table.insert(key, value);
}
} else {
*entry = serde_json::Value::String("auto".to_string());
}
}
@@ -1117,7 +1301,7 @@ impl GeneralSettings {
impl Default for GeneralSettings {
fn default() -> Self {
Self {
default_provider: "ollama".to_string(),
default_provider: "ollama_local".to_string(),
default_model: Some("llama3.2:latest".to_string()),
enable_streaming: Self::default_streaming(),
project_context_file: Some("OWLEN.md".to_string()),
@@ -1400,6 +1584,8 @@ pub struct UiSettings {
pub show_timestamps: bool,
#[serde(default = "UiSettings::default_icon_mode")]
pub icon_mode: IconMode,
#[serde(default)]
pub keymap_path: Option<String>,
}
/// Preference for which symbol set to render in the terminal UI.
@@ -1537,6 +1723,7 @@ impl Default for UiSettings {
render_markdown: Self::default_render_markdown(),
show_timestamps: Self::default_show_timestamps(),
icon_mode: Self::default_icon_mode(),
keymap_path: None,
}
}
}
@@ -1650,7 +1837,35 @@ impl Default for InputSettings {
/// Convenience accessor for an Ollama provider entry, creating a default if missing
pub fn ensure_ollama_config(config: &mut Config) -> &ProviderConfig {
ensure_provider_config(config, "ollama")
ensure_provider_config(config, "ollama_local")
}
/// Ensure a provider configuration exists for the requested provider name and return a mutable reference.
pub fn ensure_provider_config_mut<'a>(
config: &'a mut Config,
provider_name: &str,
) -> &'a mut ProviderConfig {
let key = normalize_provider_key(provider_name);
let entry = config.providers.entry(key.clone()).or_insert_with(|| {
let mut default = default_provider_config_for(&key).unwrap_or_else(|| ProviderConfig {
enabled: true,
provider_type: canonical_provider_type(&key),
base_url: None,
api_key: None,
api_key_env: None,
extra: HashMap::new(),
});
if default.provider_type.is_empty() {
default.provider_type = canonical_provider_type(&key);
}
default
});
if entry.provider_type.is_empty() {
entry.provider_type = canonical_provider_type(&key);
}
entry
}
/// Ensure a provider configuration exists for the requested provider name
@@ -1658,35 +1873,8 @@ pub fn ensure_provider_config<'a>(
config: &'a mut Config,
provider_name: &str,
) -> &'a ProviderConfig {
use std::collections::hash_map::Entry;
if matches!(provider_name, "ollama_cloud" | "ollama-cloud") {
return ensure_provider_config(config, "ollama");
}
match config.providers.entry(provider_name.to_string()) {
Entry::Occupied(mut entry) => {
ensure_ollama_mode_extra(entry.get_mut());
}
Entry::Vacant(entry) => {
let mut default = match provider_name {
"ollama" => default_ollama_provider_config(),
other => ProviderConfig {
provider_type: other.to_string(),
base_url: None,
api_key: None,
extra: HashMap::new(),
},
};
ensure_ollama_mode_extra(&mut default);
entry.insert(default);
}
}
config
.providers
.get(provider_name)
.expect("provider entry must exist")
let entry = ensure_provider_config_mut(config, provider_name);
&*entry
}
/// Calculate absolute timeout for session data based on configuration
@@ -1705,8 +1893,8 @@ mod tests {
}
let mut config = Config::default();
if let Some(ollama) = config.providers.get_mut("ollama") {
ollama.api_key = Some("${OWLEN_TEST_API_KEY}".to_string());
if let Some(ollama_local) = config.providers.get_mut("ollama_local") {
ollama_local.api_key = Some("${OWLEN_TEST_API_KEY}".to_string());
}
config
@@ -1714,7 +1902,7 @@ mod tests {
.expect("environment expansion succeeded");
assert_eq!(
config.providers["ollama"].api_key.as_deref(),
config.providers["ollama_local"].api_key.as_deref(),
Some("super-secret")
);
@@ -1730,8 +1918,8 @@ mod tests {
}
let mut config = Config::default();
if let Some(ollama) = config.providers.get_mut("ollama") {
ollama.api_key = Some("${OWLEN_TEST_MISSING}".to_string());
if let Some(ollama_local) = config.providers.get_mut("ollama_local") {
ollama_local.api_key = Some("${OWLEN_TEST_MISSING}".to_string());
}
let error = config
@@ -1792,15 +1980,19 @@ mod tests {
#[test]
fn default_config_contains_local_provider() {
let config = Config::default();
assert!(config.providers.contains_key("ollama"));
let provider = config.providers.get("ollama").unwrap();
assert_eq!(
provider
.extra
.get(OLLAMA_MODE_KEY)
.and_then(|value| value.as_str()),
Some("auto")
);
let local = config
.providers
.get("ollama_local")
.expect("default local provider");
assert!(local.enabled);
assert_eq!(local.base_url.as_deref(), Some(OLLAMA_LOCAL_BASE_URL));
let cloud = config
.providers
.get("ollama_cloud")
.expect("default cloud provider");
assert!(!cloud.enabled);
assert_eq!(cloud.api_key_env.as_deref(), Some(OLLAMA_CLOUD_API_KEY_ENV));
}
#[test]
@@ -1808,16 +2000,10 @@ mod tests {
let mut config = Config::default();
config.providers.clear();
let cloud = ensure_provider_config(&mut config, "ollama-cloud");
assert_eq!(cloud.provider_type, "ollama");
assert_eq!(cloud.base_url.as_deref(), Some("http://localhost:11434"));
assert_eq!(
cloud
.extra
.get(OLLAMA_MODE_KEY)
.and_then(|value| value.as_str()),
Some("auto")
);
assert!(config.providers.contains_key("ollama"));
assert_eq!(cloud.provider_type, "ollama_cloud");
assert_eq!(cloud.base_url.as_deref(), Some(OLLAMA_CLOUD_BASE_URL));
assert_eq!(cloud.api_key_env.as_deref(), Some(OLLAMA_CLOUD_API_KEY_ENV));
assert!(config.providers.contains_key("ollama_cloud"));
assert!(!config.providers.contains_key("ollama-cloud"));
}
@@ -1828,48 +2014,100 @@ mod tests {
config.providers.insert(
"ollama_cloud".to_string(),
ProviderConfig {
enabled: true,
provider_type: "ollama_cloud".to_string(),
base_url: Some("https://api.ollama.com".to_string()),
api_key: Some("secret".to_string()),
api_key_env: None,
extra: HashMap::new(),
},
);
config.apply_schema_migrations("1.0.0");
assert!(config.providers.get("ollama_cloud").is_none());
assert!(config.providers.get("ollama-cloud").is_none());
let cloud = config.providers.get("ollama").expect("migrated config");
assert_eq!(cloud.provider_type, "ollama");
assert!(config.providers.get("ollama_cloud").is_some());
let cloud = config
.providers
.get("ollama_cloud")
.expect("migrated config");
assert!(cloud.enabled);
assert_eq!(cloud.provider_type, "ollama_cloud");
assert_eq!(cloud.base_url.as_deref(), Some("https://api.ollama.com"));
assert_eq!(cloud.api_key.as_deref(), Some("secret"));
assert_eq!(
cloud
.extra
.get(OLLAMA_MODE_KEY)
.and_then(|value| value.as_str()),
Some("auto")
);
}
#[test]
fn migration_sets_cloud_mode_for_cloud_base() {
let mut config = Config::default();
if let Some(ollama) = config.providers.get_mut("ollama") {
if let Some(ollama) = config.providers.get_mut("ollama_local") {
ollama.base_url = Some(OLLAMA_CLOUD_BASE_URL.to_string());
ollama.extra.remove(OLLAMA_MODE_KEY);
}
config.apply_schema_migrations("1.4.0");
let provider = config.providers.get("ollama").expect("ollama provider");
assert_eq!(
provider
.extra
.get(OLLAMA_MODE_KEY)
.and_then(|value| value.as_str()),
Some("cloud")
let cloud = config
.providers
.get("ollama_cloud")
.expect("cloud provider created");
assert!(cloud.enabled);
assert_eq!(cloud.base_url.as_deref(), Some(OLLAMA_CLOUD_BASE_URL));
assert_eq!(cloud.api_key_env.as_deref(), Some(OLLAMA_CLOUD_API_KEY_ENV));
}
#[test]
fn migrate_legacy_monolithic_ollama_entry() {
let mut config = Config::default();
config.providers.clear();
config.providers.insert(
"ollama".to_string(),
ProviderConfig {
enabled: true,
provider_type: "ollama".to_string(),
base_url: Some(OLLAMA_LOCAL_BASE_URL.to_string()),
api_key: None,
api_key_env: None,
extra: HashMap::new(),
},
);
config.apply_schema_migrations("1.2.0");
let local = config
.providers
.get("ollama_local")
.expect("local provider migrated");
assert!(local.enabled);
assert_eq!(local.base_url.as_deref(), Some(OLLAMA_LOCAL_BASE_URL));
let cloud = config
.providers
.get("ollama_cloud")
.expect("cloud provider placeholder");
assert!(!cloud.enabled);
}
#[test]
fn migrate_legacy_provider_tables_moves_top_level_entries() {
let mut document: toml::Value = toml::from_str(
r#"
[ollama]
base_url = "http://localhost:11434"
[general]
default_provider = "ollama"
"#,
)
.expect("valid inline config");
migrate_legacy_provider_tables(&mut document);
let providers = document
.get("providers")
.and_then(|value| value.as_table())
.expect("providers table present");
assert!(providers.contains_key("ollama"));
assert!(providers["ollama"].get("base_url").is_some());
assert!(document.get("ollama").is_none());
}
#[test]

View File

@@ -0,0 +1,32 @@
use std::sync::Arc;
use async_trait::async_trait;
use crate::{
Result,
llm::ChatStream,
mcp::{McpToolCall, McpToolDescriptor, McpToolResponse},
types::{ChatRequest, ChatResponse, ModelInfo},
};
/// Object-safe facade for interacting with LLM backends.
#[async_trait]
pub trait LlmClient: Send + Sync {
/// List the models exposed by this client.
async fn list_models(&self) -> Result<Vec<ModelInfo>>;
/// Issue a one-shot chat request and wait for the complete response.
async fn send_chat(&self, request: ChatRequest) -> Result<ChatResponse>;
/// Stream chat responses incrementally.
async fn stream_chat(&self, request: ChatRequest) -> Result<ChatStream>;
/// Enumerate tools exposed by the backing provider.
async fn list_tools(&self) -> Result<Vec<McpToolDescriptor>>;
/// Invoke a tool exposed by the provider.
async fn call_tool(&self, call: McpToolCall) -> Result<McpToolResponse>;
}
/// Convenience alias for trait-object clients.
pub type DynLlmClient = Arc<dyn LlmClient>;

View File

@@ -0,0 +1 @@
pub mod llm_client;

View File

@@ -11,6 +11,7 @@ pub mod consent;
pub mod conversation;
pub mod credentials;
pub mod encryption;
pub mod facade;
pub mod formatting;
pub mod input;
pub mod llm;
@@ -18,6 +19,7 @@ pub mod mcp;
pub mod mode;
pub mod model;
pub mod oauth;
pub mod provider;
pub mod providers;
pub mod router;
pub mod sandbox;
@@ -41,6 +43,7 @@ pub use formatting::*;
pub use input::*;
pub use oauth::*;
// Export MCP types but exclude test_utils to avoid ambiguity
pub use facade::llm_client::*;
pub use llm::{
ChatStream, LlmProvider, Provider, ProviderConfig, ProviderRegistry, send_via_stream,
};
@@ -50,6 +53,7 @@ pub use mcp::{
};
pub use mode::*;
pub use model::*;
pub use provider::*;
pub use providers::*;
pub use router::*;
pub use sandbox::*;

View File

@@ -144,17 +144,57 @@ where
/// Runtime configuration for a provider instance.
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct ProviderConfig {
/// Provider type identifier.
/// Whether this provider should be activated.
#[serde(default = "ProviderConfig::default_enabled")]
pub enabled: bool,
/// Provider type identifier used to resolve implementations.
#[serde(default)]
pub provider_type: String,
/// Base URL for API calls.
#[serde(default)]
pub base_url: Option<String>,
/// API key or token material.
#[serde(default)]
pub api_key: Option<String>,
/// Environment variable holding the API key.
#[serde(default)]
pub api_key_env: Option<String>,
/// Additional provider-specific configuration.
#[serde(flatten)]
pub extra: HashMap<String, Value>,
}
impl ProviderConfig {
const fn default_enabled() -> bool {
true
}
/// Merge the current configuration with overrides from `other`.
pub fn merge_from(&mut self, mut other: ProviderConfig) {
self.enabled = other.enabled;
if !other.provider_type.is_empty() {
self.provider_type = other.provider_type;
}
if let Some(base_url) = other.base_url.take() {
self.base_url = Some(base_url);
}
if let Some(api_key) = other.api_key.take() {
self.api_key = Some(api_key);
}
if let Some(api_key_env) = other.api_key_env.take() {
self.api_key_env = Some(api_key_env);
}
if !other.extra.is_empty() {
self.extra.extend(other.extra);
}
}
}
/// Static registry of providers available to the application.
pub struct ProviderRegistry {
providers: HashMap<String, Arc<dyn Provider>>,

View File

@@ -7,7 +7,10 @@ use crate::consent::{ConsentManager, ConsentScope};
use crate::tools::{Tool, WebScrapeTool, WebSearchTool};
use crate::types::ModelInfo;
use crate::types::{ChatResponse, Message, Role};
use crate::{Error, LlmProvider, Result, mode::Mode, send_via_stream};
use crate::{
ChatStream, Error, LlmProvider, Result, facade::llm_client::LlmClient, mode::Mode,
send_via_stream,
};
use anyhow::anyhow;
use futures::{StreamExt, future::BoxFuture, stream};
use reqwest::Client as HttpClient;
@@ -564,3 +567,27 @@ impl LlmProvider for RemoteMcpClient {
})
}
}
#[async_trait::async_trait]
impl LlmClient for RemoteMcpClient {
async fn list_models(&self) -> Result<Vec<ModelInfo>> {
<Self as LlmProvider>::list_models(self).await
}
async fn send_chat(&self, request: crate::types::ChatRequest) -> Result<ChatResponse> {
<Self as LlmProvider>::send_prompt(self, request).await
}
async fn stream_chat(&self, request: crate::types::ChatRequest) -> Result<ChatStream> {
let stream = <Self as LlmProvider>::stream_prompt(self, request).await?;
Ok(Box::pin(stream))
}
async fn list_tools(&self) -> Result<Vec<McpToolDescriptor>> {
<Self as McpClient>::list_tools(self).await
}
async fn call_tool(&self, call: McpToolCall) -> Result<McpToolResponse> {
<Self as McpClient>::call_tool(self, call).await
}
}

View File

@@ -0,0 +1,227 @@
use std::collections::HashMap;
use std::sync::Arc;
use futures::stream::{FuturesUnordered, StreamExt};
use log::{debug, warn};
use tokio::sync::RwLock;
use crate::config::Config;
use crate::{Error, Result};
use super::{GenerateRequest, GenerateStream, ModelInfo, ModelProvider, ProviderStatus};
/// Model information annotated with the originating provider metadata.
#[derive(Debug, Clone)]
pub struct AnnotatedModelInfo {
pub provider_id: String,
pub provider_status: ProviderStatus,
pub model: ModelInfo,
}
/// Coordinates multiple [`ModelProvider`] implementations and tracks their
/// health state.
pub struct ProviderManager {
providers: RwLock<HashMap<String, Arc<dyn ModelProvider>>>,
status_cache: RwLock<HashMap<String, ProviderStatus>>,
}
impl ProviderManager {
/// Construct a new manager using the supplied configuration. Providers
/// defined in the configuration start with a `RequiresSetup` status so
/// that frontends can surface incomplete configuration to users.
pub fn new(config: &Config) -> Self {
let mut status_cache = HashMap::new();
for provider_id in config.providers.keys() {
status_cache.insert(provider_id.clone(), ProviderStatus::RequiresSetup);
}
Self {
providers: RwLock::new(HashMap::new()),
status_cache: RwLock::new(status_cache),
}
}
/// Register a provider instance with the manager.
pub async fn register_provider(&self, provider: Arc<dyn ModelProvider>) {
let provider_id = provider.metadata().id.clone();
debug!("registering provider {}", provider_id);
self.providers
.write()
.await
.insert(provider_id.clone(), provider);
self.status_cache
.write()
.await
.insert(provider_id, ProviderStatus::Unavailable);
}
/// Return a stream by routing the request to the designated provider.
pub async fn generate(
&self,
provider_id: &str,
request: GenerateRequest,
) -> Result<GenerateStream> {
let provider = {
let guard = self.providers.read().await;
guard.get(provider_id).cloned()
}
.ok_or_else(|| Error::Config(format!("provider '{provider_id}' not registered")))?;
match provider.generate_stream(request).await {
Ok(stream) => {
self.status_cache
.write()
.await
.insert(provider_id.to_string(), ProviderStatus::Available);
Ok(stream)
}
Err(err) => {
self.status_cache
.write()
.await
.insert(provider_id.to_string(), ProviderStatus::Unavailable);
Err(err)
}
}
}
/// List models across all providers, updating provider status along the way.
pub async fn list_all_models(&self) -> Result<Vec<AnnotatedModelInfo>> {
let providers: Vec<(String, Arc<dyn ModelProvider>)> = {
let guard = self.providers.read().await;
guard
.iter()
.map(|(id, provider)| (id.clone(), Arc::clone(provider)))
.collect()
};
let mut tasks = FuturesUnordered::new();
for (provider_id, provider) in providers {
tasks.push(async move {
let log_id = provider_id.clone();
let mut status = ProviderStatus::Unavailable;
let mut models = Vec::new();
match provider.health_check().await {
Ok(health) => {
status = health;
if matches!(status, ProviderStatus::Available) {
match provider.list_models().await {
Ok(list) => {
models = list;
}
Err(err) => {
status = ProviderStatus::Unavailable;
warn!("listing models failed for provider {}: {}", log_id, err);
}
}
}
}
Err(err) => {
warn!("health check failed for provider {}: {}", log_id, err);
}
}
(provider_id, status, models)
});
}
let mut annotated = Vec::new();
let mut status_updates = HashMap::new();
while let Some((provider_id, status, models)) = tasks.next().await {
status_updates.insert(provider_id.clone(), status);
for model in models {
annotated.push(AnnotatedModelInfo {
provider_id: provider_id.clone(),
provider_status: status,
model,
});
}
}
{
let mut guard = self.status_cache.write().await;
for (provider_id, status) in status_updates {
guard.insert(provider_id, status);
}
}
Ok(annotated)
}
/// Refresh the health of all registered providers in parallel, returning
/// the latest status snapshot.
pub async fn refresh_health(&self) -> HashMap<String, ProviderStatus> {
let providers: Vec<(String, Arc<dyn ModelProvider>)> = {
let guard = self.providers.read().await;
guard
.iter()
.map(|(id, provider)| (id.clone(), Arc::clone(provider)))
.collect()
};
let mut tasks = FuturesUnordered::new();
for (provider_id, provider) in providers {
tasks.push(async move {
let status = match provider.health_check().await {
Ok(status) => status,
Err(err) => {
warn!("health check failed for provider {}: {}", provider_id, err);
ProviderStatus::Unavailable
}
};
(provider_id, status)
});
}
let mut updates = HashMap::new();
while let Some((provider_id, status)) = tasks.next().await {
updates.insert(provider_id, status);
}
{
let mut guard = self.status_cache.write().await;
for (provider_id, status) in &updates {
guard.insert(provider_id.clone(), *status);
}
}
updates
}
/// Return the provider instance for an identifier.
pub async fn get_provider(&self, provider_id: &str) -> Option<Arc<dyn ModelProvider>> {
let guard = self.providers.read().await;
guard.get(provider_id).cloned()
}
/// List the registered provider identifiers.
pub async fn provider_ids(&self) -> Vec<String> {
let guard = self.providers.read().await;
guard.keys().cloned().collect()
}
/// Retrieve the last known status for a provider.
pub async fn provider_status(&self, provider_id: &str) -> Option<ProviderStatus> {
let guard = self.status_cache.read().await;
guard.get(provider_id).copied()
}
/// Snapshot the currently cached statuses.
pub async fn provider_statuses(&self) -> HashMap<String, ProviderStatus> {
let guard = self.status_cache.read().await;
guard.clone()
}
}
impl Default for ProviderManager {
fn default() -> Self {
Self {
providers: RwLock::new(HashMap::new()),
status_cache: RwLock::new(HashMap::new()),
}
}
}

View File

@@ -0,0 +1,36 @@
//! Unified provider abstraction layer.
//!
//! This module defines the async [`ModelProvider`] trait that all model
//! backends implement, together with a small suite of shared data structures
//! used for model discovery and streaming generation. The [`ProviderManager`]
//! orchestrates multiple providers and coordinates their health state.
mod manager;
mod types;
use std::pin::Pin;
use async_trait::async_trait;
use futures::Stream;
pub use self::{manager::*, types::*};
use crate::Result;
/// Convenience alias for the stream type yielded by [`ModelProvider::generate_stream`].
pub type GenerateStream = Pin<Box<dyn Stream<Item = Result<GenerateChunk>> + Send + 'static>>;
#[async_trait]
pub trait ModelProvider: Send + Sync {
/// Returns descriptive metadata about the provider.
fn metadata(&self) -> &ProviderMetadata;
/// Check the current health state for the provider.
async fn health_check(&self) -> Result<ProviderStatus>;
/// List all models available through the provider.
async fn list_models(&self) -> Result<Vec<ModelInfo>>;
/// Acquire a streaming response for a generation request.
async fn generate_stream(&self, request: GenerateRequest) -> Result<GenerateStream>;
}

View File

@@ -0,0 +1,124 @@
//! Shared types used by the unified provider abstraction layer.
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use serde_json::Value;
/// Categorises providers so the UI can distinguish between local and hosted
/// backends.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum ProviderType {
Local,
Cloud,
}
/// Represents the current availability state for a provider.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum ProviderStatus {
Available,
Unavailable,
RequiresSetup,
}
/// Describes core metadata for a provider implementation.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct ProviderMetadata {
pub id: String,
pub name: String,
pub provider_type: ProviderType,
pub requires_auth: bool,
#[serde(default)]
pub metadata: HashMap<String, Value>,
}
impl ProviderMetadata {
/// Construct a new metadata instance for a provider.
pub fn new(
id: impl Into<String>,
name: impl Into<String>,
provider_type: ProviderType,
requires_auth: bool,
) -> Self {
Self {
id: id.into(),
name: name.into(),
provider_type,
requires_auth,
metadata: HashMap::new(),
}
}
}
/// Information about a model that can be displayed to users.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ModelInfo {
pub name: String,
#[serde(default)]
pub size_bytes: Option<u64>,
#[serde(default)]
pub capabilities: Vec<String>,
#[serde(default)]
pub description: Option<String>,
pub provider: ProviderMetadata,
#[serde(default)]
pub metadata: HashMap<String, Value>,
}
/// Unified request for streaming text generation across providers.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct GenerateRequest {
pub model: String,
#[serde(default)]
pub prompt: Option<String>,
#[serde(default)]
pub context: Vec<String>,
#[serde(default)]
pub parameters: HashMap<String, Value>,
#[serde(default)]
pub metadata: HashMap<String, Value>,
}
impl GenerateRequest {
/// Helper for building a request from the minimum required fields.
pub fn new(model: impl Into<String>) -> Self {
Self {
model: model.into(),
prompt: None,
context: Vec::new(),
parameters: HashMap::new(),
metadata: HashMap::new(),
}
}
}
/// Streamed chunk of generation output from a model.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct GenerateChunk {
#[serde(default)]
pub text: Option<String>,
#[serde(default)]
pub is_final: bool,
#[serde(default)]
pub metadata: HashMap<String, Value>,
}
impl GenerateChunk {
/// Construct a new chunk with the provided text payload.
pub fn from_text(text: impl Into<String>) -> Self {
Self {
text: Some(text.into()),
is_final: false,
metadata: HashMap::new(),
}
}
/// Mark the chunk as the terminal item in a stream.
pub fn final_chunk() -> Self {
Self {
text: None,
is_final: true,
metadata: HashMap::new(),
}
}
}

View File

@@ -88,6 +88,7 @@ struct ScopeSnapshot {
availability: ScopeAvailability,
last_error: Option<String>,
last_checked: Option<Instant>,
last_success_at: Option<Instant>,
}
impl Default for ScopeSnapshot {
@@ -98,10 +99,29 @@ impl Default for ScopeSnapshot {
availability: ScopeAvailability::Unknown,
last_error: None,
last_checked: None,
last_success_at: None,
}
}
}
impl ScopeSnapshot {
fn is_stale(&self, ttl: Duration) -> bool {
match self.fetched_at {
Some(ts) => ts.elapsed() >= ttl,
None => !self.models.is_empty(),
}
}
fn last_checked_age_secs(&self) -> Option<u64> {
self.last_checked.map(|instant| instant.elapsed().as_secs())
}
fn last_success_age_secs(&self) -> Option<u64> {
self.last_success_at
.map(|instant| instant.elapsed().as_secs())
}
}
#[derive(Debug)]
struct OllamaOptions {
mode: OllamaMode,
@@ -410,22 +430,29 @@ impl OllamaProvider {
return None;
}
entry.fetched_at.and_then(|ts| {
if entry.models.is_empty() {
return None;
}
if let Some(ts) = entry.fetched_at {
if ts.elapsed() < self.model_cache_ttl {
Some(entry.models.clone())
} else {
None
return Some(entry.models.clone());
}
})
}
// Fallback to last good models even if stale; UI will mark as degraded
Some(entry.models.clone())
})
}
async fn update_scope_success(&self, scope: OllamaMode, models: &[ModelInfo]) {
let mut cache = self.scope_cache.write().await;
let entry = cache.entry(scope).or_default();
let now = Instant::now();
entry.models = models.to_vec();
entry.fetched_at = Some(Instant::now());
entry.last_checked = Some(Instant::now());
entry.fetched_at = Some(now);
entry.last_checked = Some(now);
entry.last_success_at = Some(now);
entry.availability = ScopeAvailability::Available;
entry.last_error = None;
}
@@ -461,6 +488,45 @@ impl OllamaProvider {
}
}
let stale = snapshot.is_stale(self.model_cache_ttl);
let stale_capability = format!(
"scope-status-stale:{}:{}",
scope_key,
if stale { "1" } else { "0" }
);
for model in models.iter_mut() {
if !model
.capabilities
.iter()
.any(|cap| cap == &stale_capability)
{
model.capabilities.push(stale_capability.clone());
}
}
if let Some(age) = snapshot.last_checked_age_secs() {
let age_capability = format!("scope-status-age:{}:{}", scope_key, age);
for model in models.iter_mut() {
if !model.capabilities.iter().any(|cap| cap == &age_capability) {
model.capabilities.push(age_capability.clone());
}
}
}
if let Some(success_age) = snapshot.last_success_age_secs() {
let success_capability =
format!("scope-status-success-age:{}:{}", scope_key, success_age);
for model in models.iter_mut() {
if !model
.capabilities
.iter()
.any(|cap| cap == &success_capability)
{
model.capabilities.push(success_capability.clone());
}
}
}
if let Some(raw_reason) = snapshot.last_error.as_ref() {
let cleaned = raw_reason.replace('\n', " ").trim().to_string();
if !cleaned.is_empty() {
@@ -1467,9 +1533,11 @@ mod tests {
#[test]
fn explicit_local_mode_overrides_api_key() {
let mut config = ProviderConfig {
enabled: true,
provider_type: "ollama".to_string(),
base_url: Some("http://localhost:11434".to_string()),
api_key: Some("secret-key".to_string()),
api_key_env: None,
extra: HashMap::new(),
};
config.extra.insert(
@@ -1486,9 +1554,11 @@ mod tests {
#[test]
fn auto_mode_prefers_explicit_local_base() {
let config = ProviderConfig {
enabled: true,
provider_type: "ollama".to_string(),
base_url: Some("http://localhost:11434".to_string()),
api_key: Some("secret-key".to_string()),
api_key_env: None,
extra: HashMap::new(),
};
// simulate missing explicit mode; defaults to auto
@@ -1502,9 +1572,11 @@ mod tests {
#[test]
fn auto_mode_with_api_key_and_no_local_probe_switches_to_cloud() {
let mut config = ProviderConfig {
enabled: true,
provider_type: "ollama".to_string(),
base_url: None,
api_key: Some("secret-key".to_string()),
api_key_env: None,
extra: HashMap::new(),
};
config.extra.insert(
@@ -1580,9 +1652,11 @@ fn auto_mode_with_api_key_and_successful_probe_prefers_local() {
let _guard = ProbeOverrideGuard::set(Some(true));
let mut config = ProviderConfig {
enabled: true,
provider_type: "ollama".to_string(),
base_url: None,
api_key: Some("secret-key".to_string()),
api_key_env: None,
extra: HashMap::new(),
};
config.extra.insert(
@@ -1603,9 +1677,11 @@ fn auto_mode_with_api_key_and_failed_probe_prefers_cloud() {
let _guard = ProbeOverrideGuard::set(Some(false));
let mut config = ProviderConfig {
enabled: true,
provider_type: "ollama".to_string(),
base_url: None,
api_key: Some("secret-key".to_string()),
api_key_env: None,
extra: HashMap::new(),
};
config.extra.insert(
@@ -1622,9 +1698,11 @@ fn auto_mode_with_api_key_and_failed_probe_prefers_cloud() {
#[test]
fn annotate_scope_status_adds_capabilities_for_unavailable_scopes() {
let config = ProviderConfig {
enabled: true,
provider_type: "ollama".to_string(),
base_url: Some("http://localhost:11434".to_string()),
api_key: None,
api_key_env: None,
extra: HashMap::new(),
};
@@ -1646,6 +1724,7 @@ fn annotate_scope_status_adds_capabilities_for_unavailable_scopes() {
let entry = cache.entry(OllamaMode::Cloud).or_default();
entry.availability = ScopeAvailability::Unavailable;
entry.last_error = Some("Cloud endpoint unreachable".to_string());
entry.last_checked = Some(Instant::now());
}
provider.annotate_scope_status(&mut models).await;
@@ -1662,4 +1741,14 @@ fn annotate_scope_status_adds_capabilities_for_unavailable_scopes() {
.iter()
.any(|cap| cap.starts_with("scope-status-message:cloud:"))
);
assert!(
capabilities
.iter()
.any(|cap| cap.starts_with("scope-status-age:cloud:"))
);
assert!(
capabilities
.iter()
.any(|cap| cap == "scope-status-stale:cloud:0")
);
}

View File

@@ -1,5 +1,5 @@
use crate::config::{Config, McpResourceConfig, McpServerConfig};
use crate::consent::ConsentManager;
use crate::consent::{ConsentManager, ConsentScope};
use crate::conversation::ConversationManager;
use crate::credentials::CredentialManager;
use crate::encryption::{self, VaultHandle};
@@ -34,6 +34,7 @@ use std::env;
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
use tokio::sync::Mutex as TokioMutex;
use tokio::sync::mpsc::UnboundedSender;
use uuid::Uuid;
pub enum SessionOutcome {
@@ -44,6 +45,36 @@ pub enum SessionOutcome {
},
}
#[derive(Debug, Clone)]
pub enum ControllerEvent {
ToolRequested {
request_id: Uuid,
message_id: Uuid,
tool_name: String,
data_types: Vec<String>,
endpoints: Vec<String>,
tool_calls: Vec<ToolCall>,
},
}
#[derive(Clone, Debug)]
struct PendingToolRequest {
message_id: Uuid,
tool_name: String,
data_types: Vec<String>,
endpoints: Vec<String>,
tool_calls: Vec<ToolCall>,
}
#[derive(Debug, Clone)]
pub struct ToolConsentResolution {
pub request_id: Uuid,
pub message_id: Uuid,
pub tool_name: String,
pub scope: ConsentScope,
pub tool_calls: Vec<ToolCall>,
}
fn extract_resource_content(value: &Value) -> Option<String> {
match value {
Value::Null => Some(String::new()),
@@ -111,6 +142,8 @@ pub struct SessionController {
enable_code_tools: bool,
current_mode: Mode,
missing_oauth_servers: Vec<String>,
event_tx: Option<UnboundedSender<ControllerEvent>>,
pending_tool_requests: HashMap<Uuid, PendingToolRequest>,
}
async fn build_tools(
@@ -331,6 +364,7 @@ impl SessionController {
storage: Arc<StorageManager>,
ui: Arc<dyn UiController>,
enable_code_tools: bool,
event_tx: Option<UnboundedSender<ControllerEvent>>,
) -> Result<Self> {
let config_arc = Arc::new(TokioMutex::new(config));
// Acquire the config asynchronously to avoid blocking the runtime.
@@ -435,6 +469,8 @@ impl SessionController {
enable_code_tools,
current_mode: initial_mode,
missing_oauth_servers,
event_tx,
pending_tool_requests: HashMap::new(),
})
}
@@ -1222,14 +1258,84 @@ impl SessionController {
.append_stream_chunk(message_id, &chunk.message.content, chunk.is_final)
}
pub fn check_streaming_tool_calls(&self, message_id: Uuid) -> Option<Vec<ToolCall>> {
self.conversation
pub fn check_streaming_tool_calls(&mut self, message_id: Uuid) -> Option<Vec<ToolCall>> {
let maybe_calls = self
.conversation
.active()
.messages
.iter()
.find(|m| m.id == message_id)
.and_then(|m| m.tool_calls.clone())
.filter(|calls| !calls.is_empty())
.filter(|calls| !calls.is_empty());
let calls = maybe_calls?;
if !self
.pending_tool_requests
.values()
.any(|pending| pending.message_id == message_id)
{
if let Some((tool_name, data_types, endpoints)) =
self.check_tools_consent_needed(&calls).into_iter().next()
{
let request_id = Uuid::new_v4();
let pending = PendingToolRequest {
message_id,
tool_name: tool_name.clone(),
data_types: data_types.clone(),
endpoints: endpoints.clone(),
tool_calls: calls.clone(),
};
self.pending_tool_requests.insert(request_id, pending);
if let Some(tx) = &self.event_tx {
let _ = tx.send(ControllerEvent::ToolRequested {
request_id,
message_id,
tool_name,
data_types,
endpoints,
tool_calls: calls.clone(),
});
}
}
}
Some(calls)
}
pub fn resolve_tool_consent(
&mut self,
request_id: Uuid,
scope: ConsentScope,
) -> Result<ToolConsentResolution> {
let pending = self
.pending_tool_requests
.remove(&request_id)
.ok_or_else(|| {
Error::InvalidInput(format!("Unknown tool consent request: {}", request_id))
})?;
let PendingToolRequest {
message_id,
tool_name,
data_types,
endpoints,
tool_calls,
..
} = pending;
if !matches!(scope, ConsentScope::Denied) {
self.grant_consent_with_scope(&tool_name, data_types, endpoints, scope.clone());
}
Ok(ToolConsentResolution {
request_id,
message_id,
tool_name,
scope,
tool_calls,
})
}
pub fn cancel_stream(&mut self, message_id: Uuid, notice: &str) -> Result<()> {
@@ -1352,7 +1458,7 @@ mod tests {
let provider: Arc<dyn Provider> = Arc::new(MockProvider::default()) as Arc<dyn Provider>;
let ui = Arc::new(NoOpUiController);
let session = SessionController::new(provider, config, storage, ui, false)
let session = SessionController::new(provider, config, storage, ui, false, None)
.await
.expect("session");

View File

@@ -3,14 +3,14 @@
use std::fmt;
/// High-level application state reported by the UI loop.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum AppState {
Running,
Quit,
}
/// Vim-style input modes supported by the TUI.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum InputMode {
Normal,
Editing,
@@ -45,7 +45,7 @@ impl fmt::Display for InputMode {
}
/// Represents which panel is currently focused in the TUI layout.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum FocusedPanel {
Files,
Chat,

View File

@@ -0,0 +1,310 @@
use std::{any::Any, collections::HashMap, sync::Arc};
use async_trait::async_trait;
use futures::StreamExt;
use owlen_core::{
Config, Error, Mode, Provider,
config::McpMode,
consent::ConsentScope,
mcp::{
McpClient, McpToolCall, McpToolDescriptor, McpToolResponse,
failover::{FailoverMcpClient, ServerEntry},
},
session::{ControllerEvent, SessionController, SessionOutcome},
storage::StorageManager,
types::{ChatParameters, ChatRequest, ChatResponse, Message, ModelInfo, Role, ToolCall},
ui::NoOpUiController,
};
use tempfile::tempdir;
use tokio::sync::mpsc;
struct StreamingToolProvider;
#[async_trait]
impl Provider for StreamingToolProvider {
fn name(&self) -> &str {
"mock-streaming-provider"
}
async fn list_models(&self) -> owlen_core::Result<Vec<ModelInfo>> {
Ok(vec![ModelInfo {
id: "mock-model".into(),
name: "Mock Model".into(),
description: Some("A mock model that emits tool calls".into()),
provider: self.name().into(),
context_window: Some(4096),
capabilities: vec!["chat".into(), "tools".into()],
supports_tools: true,
}])
}
async fn send_prompt(&self, _request: ChatRequest) -> owlen_core::Result<ChatResponse> {
let mut message = Message::assistant("tool-call".to_string());
message.tool_calls = Some(vec![ToolCall {
id: "call-1".to_string(),
name: "resources/write".to_string(),
arguments: serde_json::json!({"path": "README.md", "content": "hello"}),
}]);
Ok(ChatResponse {
message,
usage: None,
is_streaming: false,
is_final: true,
})
}
async fn stream_prompt(
&self,
_request: ChatRequest,
) -> owlen_core::Result<owlen_core::ChatStream> {
let mut first_chunk = Message::assistant(
"Thought: need to update README.\nAction: resources/write".to_string(),
);
first_chunk.tool_calls = Some(vec![ToolCall {
id: "call-1".to_string(),
name: "resources/write".to_string(),
arguments: serde_json::json!({"path": "README.md", "content": "hello"}),
}]);
let chunk = ChatResponse {
message: first_chunk,
usage: None,
is_streaming: true,
is_final: false,
};
Ok(Box::pin(futures::stream::iter(vec![Ok(chunk)])))
}
async fn health_check(&self) -> owlen_core::Result<()> {
Ok(())
}
fn as_any(&self) -> &(dyn Any + Send + Sync) {
self
}
}
fn tool_descriptor() -> McpToolDescriptor {
McpToolDescriptor {
name: "web_search".to_string(),
description: "search".to_string(),
input_schema: serde_json::json!({"type": "object"}),
requires_network: true,
requires_filesystem: vec![],
}
}
struct TimeoutClient;
#[async_trait]
impl McpClient for TimeoutClient {
async fn list_tools(&self) -> owlen_core::Result<Vec<McpToolDescriptor>> {
Ok(vec![tool_descriptor()])
}
async fn call_tool(&self, _call: McpToolCall) -> owlen_core::Result<McpToolResponse> {
Err(Error::Network(
"timeout while contacting remote web search endpoint".into(),
))
}
}
#[derive(Clone)]
struct CachedResponseClient {
response: Arc<McpToolResponse>,
}
impl CachedResponseClient {
fn new() -> Self {
let mut metadata = HashMap::new();
metadata.insert("source".to_string(), "cache".to_string());
metadata.insert("cached".to_string(), "true".to_string());
let response = McpToolResponse {
name: "web_search".to_string(),
success: true,
output: serde_json::json!({
"query": "rust",
"results": [
{"title": "Rust Programming Language", "url": "https://www.rust-lang.org"}
],
"note": "cached result"
}),
metadata,
duration_ms: 0,
};
Self {
response: Arc::new(response),
}
}
}
#[async_trait]
impl McpClient for CachedResponseClient {
async fn list_tools(&self) -> owlen_core::Result<Vec<McpToolDescriptor>> {
Ok(vec![tool_descriptor()])
}
async fn call_tool(&self, _call: McpToolCall) -> owlen_core::Result<McpToolResponse> {
Ok((*self.response).clone())
}
}
#[tokio::test(flavor = "multi_thread")]
async fn streaming_file_write_consent_denied_returns_resolution() {
let temp_dir = tempdir().expect("temp dir");
let storage = StorageManager::with_database_path(temp_dir.path().join("owlen-tests.db"))
.await
.expect("storage");
let mut config = Config::default();
config.general.enable_streaming = true;
config.privacy.encrypt_local_data = false;
config.privacy.require_consent_per_session = true;
config.general.default_model = Some("mock-model".into());
config.mcp.mode = McpMode::LocalOnly;
config
.refresh_mcp_servers(None)
.expect("refresh MCP servers");
let provider: Arc<dyn Provider> = Arc::new(StreamingToolProvider);
let ui = Arc::new(NoOpUiController);
let (event_tx, mut event_rx) = mpsc::unbounded_channel::<ControllerEvent>();
let mut session = SessionController::new(
provider,
config,
Arc::new(storage),
ui,
true,
Some(event_tx),
)
.await
.expect("session controller");
session
.set_operating_mode(Mode::Code)
.await
.expect("code mode");
let outcome = session
.send_message(
"Please write to README".to_string(),
ChatParameters {
stream: true,
..Default::default()
},
)
.await
.expect("send message");
let (response_id, mut stream) = if let SessionOutcome::Streaming {
response_id,
stream,
} = outcome
{
(response_id, stream)
} else {
panic!("expected streaming outcome");
};
session
.mark_stream_placeholder(response_id, "")
.expect("placeholder");
let chunk = stream
.next()
.await
.expect("stream chunk")
.expect("chunk result");
session
.apply_stream_chunk(response_id, &chunk)
.expect("apply chunk");
let tool_calls = session
.check_streaming_tool_calls(response_id)
.expect("tool calls");
assert_eq!(tool_calls.len(), 1);
assert_eq!(tool_calls[0].name, "resources/write");
let event = event_rx.recv().await.expect("controller event");
let request_id = match event {
ControllerEvent::ToolRequested {
request_id,
tool_name,
data_types,
endpoints,
..
} => {
assert_eq!(tool_name, "resources/write");
assert!(data_types.iter().any(|t| t.contains("file")));
assert!(endpoints.iter().any(|e| e.contains("filesystem")));
request_id
}
};
let resolution = session
.resolve_tool_consent(request_id, ConsentScope::Denied)
.expect("resolution");
assert_eq!(resolution.scope, ConsentScope::Denied);
assert_eq!(resolution.tool_name, "resources/write");
assert_eq!(resolution.tool_calls.len(), tool_calls.len());
let err = session
.resolve_tool_consent(request_id, ConsentScope::Denied)
.expect_err("second resolution should fail");
matches!(err, Error::InvalidInput(_));
let conversation = session.conversation().clone();
let assistant = conversation
.messages
.iter()
.find(|message| message.role == Role::Assistant)
.expect("assistant message present");
assert!(
assistant
.tool_calls
.as_ref()
.and_then(|calls| calls.first())
.is_some_and(|call| call.name == "resources/write"),
"stream chunk should capture the tool call on the assistant message"
);
}
#[tokio::test]
async fn web_tool_timeout_fails_over_to_cached_result() {
let primary: Arc<dyn McpClient> = Arc::new(TimeoutClient);
let cached = CachedResponseClient::new();
let backup: Arc<dyn McpClient> = Arc::new(cached.clone());
let client = FailoverMcpClient::with_servers(vec![
ServerEntry::new("primary".into(), primary, 1),
ServerEntry::new("cache".into(), backup, 2),
]);
let call = McpToolCall {
name: "web_search".to_string(),
arguments: serde_json::json!({ "query": "rust", "max_results": 3 }),
};
let response = client.call_tool(call.clone()).await.expect("fallback");
assert_eq!(response.name, "web_search");
assert_eq!(
response.metadata.get("source").map(String::as_str),
Some("cache")
);
assert_eq!(
response.output.get("note").and_then(|value| value.as_str()),
Some("cached result")
);
let statuses = client.get_server_status().await;
assert!(statuses.iter().any(|(name, health)| name == "primary"
&& !matches!(health, owlen_core::mcp::failover::ServerHealth::Healthy)));
assert!(statuses.iter().any(|(name, health)| name == "cache"
&& matches!(health, owlen_core::mcp::failover::ServerHealth::Healthy)));
}

View File

@@ -0,0 +1,20 @@
[package]
name = "owlen-providers"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
homepage.workspace = true
description = "Provider implementations for OWLEN"
[dependencies]
owlen-core = { path = "../owlen-core" }
anyhow = { workspace = true }
async-trait = { workspace = true }
futures = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true }
tokio-stream = { workspace = true }
reqwest = { package = "reqwest", version = "0.11", features = ["json", "stream"] }

View File

@@ -0,0 +1,3 @@
//! Provider implementations for OWLEN.
pub mod ollama;

View File

@@ -0,0 +1,108 @@
use std::{env, time::Duration};
use async_trait::async_trait;
use owlen_core::{
Error as CoreError, Result as CoreResult,
config::OLLAMA_CLOUD_BASE_URL,
provider::{
GenerateRequest, GenerateStream, ModelInfo, ModelProvider, ProviderMetadata,
ProviderStatus, ProviderType,
},
};
use serde_json::{Number, Value};
use super::OllamaClient;
const API_KEY_ENV: &str = "OLLAMA_CLOUD_API_KEY";
/// ModelProvider implementation for the hosted Ollama Cloud service.
pub struct OllamaCloudProvider {
client: OllamaClient,
}
impl OllamaCloudProvider {
/// Construct a new cloud provider. An API key must be supplied either
/// directly or via the `OLLAMA_CLOUD_API_KEY` environment variable.
pub fn new(
base_url: Option<String>,
api_key: Option<String>,
request_timeout: Option<Duration>,
) -> CoreResult<Self> {
let (api_key, key_source) = resolve_api_key(api_key)?;
let base_url = base_url.unwrap_or_else(|| OLLAMA_CLOUD_BASE_URL.to_string());
let mut metadata =
ProviderMetadata::new("ollama_cloud", "Ollama (Cloud)", ProviderType::Cloud, true);
metadata
.metadata
.insert("base_url".into(), Value::String(base_url.clone()));
metadata.metadata.insert(
"api_key_source".into(),
Value::String(key_source.to_string()),
);
metadata
.metadata
.insert("api_key_env".into(), Value::String(API_KEY_ENV.to_string()));
if let Some(timeout) = request_timeout {
let timeout_ms = timeout.as_millis().min(u128::from(u64::MAX)) as u64;
metadata.metadata.insert(
"request_timeout_ms".into(),
Value::Number(Number::from(timeout_ms)),
);
}
let client = OllamaClient::new(&base_url, Some(api_key), metadata, request_timeout)?;
Ok(Self { client })
}
}
#[async_trait]
impl ModelProvider for OllamaCloudProvider {
fn metadata(&self) -> &ProviderMetadata {
self.client.metadata()
}
async fn health_check(&self) -> CoreResult<ProviderStatus> {
match self.client.health_check().await {
Ok(status) => Ok(status),
Err(CoreError::Auth(_)) => Ok(ProviderStatus::RequiresSetup),
Err(err) => Err(err),
}
}
async fn list_models(&self) -> CoreResult<Vec<ModelInfo>> {
self.client.list_models().await
}
async fn generate_stream(&self, request: GenerateRequest) -> CoreResult<GenerateStream> {
self.client.generate_stream(request).await
}
}
fn resolve_api_key(api_key: Option<String>) -> CoreResult<(String, &'static str)> {
let key_from_config = api_key
.as_ref()
.map(|value| value.trim())
.filter(|value| !value.is_empty())
.map(str::to_string);
if let Some(key) = key_from_config {
return Ok((key, "config"));
}
let key_from_env = env::var(API_KEY_ENV)
.ok()
.map(|value| value.trim().to_string())
.filter(|value| !value.is_empty());
if let Some(key) = key_from_env {
return Ok((key, "env"));
}
Err(CoreError::Config(
"Ollama Cloud API key not configured. Set OLLAMA_CLOUD_API_KEY or configure an API key."
.into(),
))
}

View File

@@ -0,0 +1,80 @@
use std::time::Duration;
use async_trait::async_trait;
use owlen_core::provider::{
GenerateRequest, GenerateStream, ModelInfo, ModelProvider, ProviderMetadata, ProviderStatus,
ProviderType,
};
use owlen_core::{Error as CoreError, Result as CoreResult};
use serde_json::{Number, Value};
use tokio::time::timeout;
use super::OllamaClient;
const DEFAULT_BASE_URL: &str = "http://localhost:11434";
const DEFAULT_HEALTH_TIMEOUT_SECS: u64 = 5;
/// ModelProvider implementation for a local Ollama daemon.
pub struct OllamaLocalProvider {
client: OllamaClient,
health_timeout: Duration,
}
impl OllamaLocalProvider {
/// Construct a new local provider using the shared [`OllamaClient`].
pub fn new(
base_url: Option<String>,
request_timeout: Option<Duration>,
health_timeout: Option<Duration>,
) -> CoreResult<Self> {
let base_url = base_url.unwrap_or_else(|| DEFAULT_BASE_URL.to_string());
let health_timeout =
health_timeout.unwrap_or_else(|| Duration::from_secs(DEFAULT_HEALTH_TIMEOUT_SECS));
let mut metadata =
ProviderMetadata::new("ollama_local", "Ollama (Local)", ProviderType::Local, false);
metadata
.metadata
.insert("base_url".into(), Value::String(base_url.clone()));
if let Some(timeout) = request_timeout {
let timeout_ms = timeout.as_millis().min(u128::from(u64::MAX)) as u64;
metadata.metadata.insert(
"request_timeout_ms".into(),
Value::Number(Number::from(timeout_ms)),
);
}
let client = OllamaClient::new(&base_url, None, metadata, request_timeout)?;
Ok(Self {
client,
health_timeout,
})
}
}
#[async_trait]
impl ModelProvider for OllamaLocalProvider {
fn metadata(&self) -> &ProviderMetadata {
self.client.metadata()
}
async fn health_check(&self) -> CoreResult<ProviderStatus> {
match timeout(self.health_timeout, self.client.health_check()).await {
Ok(Ok(status)) => Ok(status),
Ok(Err(CoreError::Network(_))) | Ok(Err(CoreError::Timeout(_))) => {
Ok(ProviderStatus::Unavailable)
}
Ok(Err(err)) => Err(err),
Err(_) => Ok(ProviderStatus::Unavailable),
}
}
async fn list_models(&self) -> CoreResult<Vec<ModelInfo>> {
self.client.list_models().await
}
async fn generate_stream(&self, request: GenerateRequest) -> CoreResult<GenerateStream> {
self.client.generate_stream(request).await
}
}

View File

@@ -0,0 +1,7 @@
pub mod cloud;
pub mod local;
pub mod shared;
pub use cloud::OllamaCloudProvider;
pub use local::OllamaLocalProvider;
pub use shared::OllamaClient;

View File

@@ -0,0 +1,389 @@
use std::collections::HashMap;
use std::time::Duration;
use futures::StreamExt;
use owlen_core::provider::{
GenerateChunk, GenerateRequest, GenerateStream, ModelInfo, ProviderMetadata, ProviderStatus,
};
use owlen_core::{Error as CoreError, Result as CoreResult};
use reqwest::{Client, Method, StatusCode, Url};
use serde::Deserialize;
use serde_json::{Map as JsonMap, Value};
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
const DEFAULT_TIMEOUT_SECS: u64 = 60;
/// Shared Ollama HTTP client used by both local and cloud providers.
#[derive(Clone)]
pub struct OllamaClient {
http: Client,
base_url: Url,
api_key: Option<String>,
provider_metadata: ProviderMetadata,
}
impl OllamaClient {
/// Create a new client with the given base URL and optional API key.
pub fn new(
base_url: impl AsRef<str>,
api_key: Option<String>,
provider_metadata: ProviderMetadata,
request_timeout: Option<Duration>,
) -> CoreResult<Self> {
let base_url = Url::parse(base_url.as_ref())
.map_err(|err| CoreError::Config(format!("invalid base url: {}", err)))?;
let timeout = request_timeout.unwrap_or_else(|| Duration::from_secs(DEFAULT_TIMEOUT_SECS));
let http = Client::builder()
.timeout(timeout)
.build()
.map_err(map_reqwest_error)?;
Ok(Self {
http,
base_url,
api_key,
provider_metadata,
})
}
/// Provider metadata associated with this client.
pub fn metadata(&self) -> &ProviderMetadata {
&self.provider_metadata
}
/// Perform a basic health check to determine provider availability.
pub async fn health_check(&self) -> CoreResult<ProviderStatus> {
let url = self.endpoint("api/tags")?;
let response = self
.request(Method::GET, url)
.send()
.await
.map_err(map_reqwest_error)?;
match response.status() {
status if status.is_success() => Ok(ProviderStatus::Available),
StatusCode::UNAUTHORIZED | StatusCode::FORBIDDEN => Ok(ProviderStatus::RequiresSetup),
_ => Ok(ProviderStatus::Unavailable),
}
}
/// Fetch the available models from the Ollama API.
pub async fn list_models(&self) -> CoreResult<Vec<ModelInfo>> {
let url = self.endpoint("api/tags")?;
let response = self
.request(Method::GET, url)
.send()
.await
.map_err(map_reqwest_error)?;
let status = response.status();
let bytes = response.bytes().await.map_err(map_reqwest_error)?;
if !status.is_success() {
return Err(map_http_error("tags", status, &bytes));
}
let payload: TagsResponse =
serde_json::from_slice(&bytes).map_err(CoreError::Serialization)?;
let models = payload
.models
.into_iter()
.map(|model| self.parse_model_info(model))
.collect();
Ok(models)
}
/// Request a streaming generation session from Ollama.
pub async fn generate_stream(&self, request: GenerateRequest) -> CoreResult<GenerateStream> {
let url = self.endpoint("api/generate")?;
let body = self.build_generate_body(request);
let response = self
.request(Method::POST, url)
.json(&body)
.send()
.await
.map_err(map_reqwest_error)?;
let status = response.status();
if !status.is_success() {
let bytes = response.bytes().await.map_err(map_reqwest_error)?;
return Err(map_http_error("generate", status, &bytes));
}
let stream = response.bytes_stream();
let (tx, rx) = mpsc::channel::<CoreResult<GenerateChunk>>(32);
tokio::spawn(async move {
let mut stream = stream;
let mut buffer: Vec<u8> = Vec::new();
while let Some(chunk) = stream.next().await {
match chunk {
Ok(bytes) => {
buffer.extend_from_slice(&bytes);
while let Some(pos) = buffer.iter().position(|byte| *byte == b'\n') {
let line_bytes: Vec<u8> = buffer.drain(..=pos).collect();
let line = String::from_utf8_lossy(&line_bytes).trim().to_string();
if line.is_empty() {
continue;
}
match parse_stream_line(&line) {
Ok(item) => {
if tx.send(Ok(item)).await.is_err() {
return;
}
}
Err(err) => {
let _ = tx.send(Err(err)).await;
return;
}
}
}
}
Err(err) => {
let _ = tx.send(Err(map_reqwest_error(err))).await;
return;
}
}
}
if !buffer.is_empty() {
let line = String::from_utf8_lossy(&buffer).trim().to_string();
if !line.is_empty() {
match parse_stream_line(&line) {
Ok(item) => {
let _ = tx.send(Ok(item)).await;
}
Err(err) => {
let _ = tx.send(Err(err)).await;
}
}
}
}
});
let stream = ReceiverStream::new(rx);
Ok(Box::pin(stream))
}
fn request(&self, method: Method, url: Url) -> reqwest::RequestBuilder {
let mut builder = self.http.request(method, url);
if let Some(api_key) = &self.api_key {
builder = builder.bearer_auth(api_key);
}
builder
}
fn endpoint(&self, path: &str) -> CoreResult<Url> {
self.base_url
.join(path)
.map_err(|err| CoreError::Config(format!("invalid endpoint '{}': {}", path, err)))
}
fn build_generate_body(&self, request: GenerateRequest) -> Value {
let GenerateRequest {
model,
prompt,
context,
parameters,
metadata,
} = request;
let mut body = JsonMap::new();
body.insert("model".into(), Value::String(model));
body.insert("stream".into(), Value::Bool(true));
if let Some(prompt) = prompt {
body.insert("prompt".into(), Value::String(prompt));
}
if !context.is_empty() {
let items = context.into_iter().map(Value::String).collect();
body.insert("context".into(), Value::Array(items));
}
if !parameters.is_empty() {
body.insert("options".into(), Value::Object(to_json_map(parameters)));
}
if !metadata.is_empty() {
body.insert("metadata".into(), Value::Object(to_json_map(metadata)));
}
Value::Object(body)
}
fn parse_model_info(&self, model: OllamaModel) -> ModelInfo {
let mut metadata = HashMap::new();
if let Some(digest) = model.digest {
metadata.insert("digest".to_string(), Value::String(digest));
}
if let Some(modified) = model.modified_at {
metadata.insert("modified_at".to_string(), Value::String(modified));
}
if let Some(details) = model.details {
let mut details_map = JsonMap::new();
if let Some(format) = details.format {
details_map.insert("format".into(), Value::String(format));
}
if let Some(family) = details.family {
details_map.insert("family".into(), Value::String(family));
}
if let Some(parameter_size) = details.parameter_size {
details_map.insert("parameter_size".into(), Value::String(parameter_size));
}
if let Some(quantisation) = details.quantization_level {
details_map.insert("quantization_level".into(), Value::String(quantisation));
}
if !details_map.is_empty() {
metadata.insert("details".to_string(), Value::Object(details_map));
}
}
ModelInfo {
name: model.name,
size_bytes: model.size,
capabilities: Vec::new(),
description: None,
provider: self.provider_metadata.clone(),
metadata,
}
}
}
#[derive(Debug, Deserialize)]
struct TagsResponse {
#[serde(default)]
models: Vec<OllamaModel>,
}
#[derive(Debug, Deserialize)]
struct OllamaModel {
name: String,
#[serde(default)]
size: Option<u64>,
#[serde(default)]
digest: Option<String>,
#[serde(default)]
modified_at: Option<String>,
#[serde(default)]
details: Option<OllamaModelDetails>,
}
#[derive(Debug, Deserialize)]
struct OllamaModelDetails {
#[serde(default)]
format: Option<String>,
#[serde(default)]
family: Option<String>,
#[serde(default)]
parameter_size: Option<String>,
#[serde(default)]
quantization_level: Option<String>,
}
fn to_json_map(source: HashMap<String, Value>) -> JsonMap<String, Value> {
source.into_iter().collect()
}
fn to_metadata_map(value: &Value) -> HashMap<String, Value> {
let mut metadata = HashMap::new();
if let Value::Object(obj) = value {
for (key, item) in obj {
if key == "response" || key == "done" {
continue;
}
metadata.insert(key.clone(), item.clone());
}
}
metadata
}
fn parse_stream_line(line: &str) -> CoreResult<GenerateChunk> {
let value: Value = serde_json::from_str(line).map_err(CoreError::Serialization)?;
if let Some(error) = value.get("error").and_then(Value::as_str) {
return Err(CoreError::Provider(anyhow::anyhow!(
"ollama generation error: {}",
error
)));
}
let mut chunk = GenerateChunk {
text: value
.get("response")
.and_then(Value::as_str)
.map(str::to_string),
is_final: value.get("done").and_then(Value::as_bool).unwrap_or(false),
metadata: to_metadata_map(&value),
};
if chunk.is_final && chunk.text.is_none() && chunk.metadata.is_empty() {
chunk
.metadata
.insert("status".into(), Value::String("done".into()));
}
Ok(chunk)
}
fn map_http_error(endpoint: &str, status: StatusCode, body: &[u8]) -> CoreError {
match status {
StatusCode::UNAUTHORIZED | StatusCode::FORBIDDEN => CoreError::Auth(format!(
"Ollama {} request unauthorized (status {})",
endpoint, status
)),
StatusCode::TOO_MANY_REQUESTS => CoreError::Provider(anyhow::anyhow!(
"Ollama {} request rate limited (status {})",
endpoint,
status
)),
_ => {
let snippet = truncated_body(body);
CoreError::Provider(anyhow::anyhow!(
"Ollama {} request failed: HTTP {} - {}",
endpoint,
status,
snippet
))
}
}
}
fn truncated_body(body: &[u8]) -> String {
const MAX_CHARS: usize = 512;
let text = String::from_utf8_lossy(body);
let mut value = String::new();
for (idx, ch) in text.chars().enumerate() {
if idx >= MAX_CHARS {
value.push('…');
return value;
}
value.push(ch);
}
value
}
fn map_reqwest_error(err: reqwest::Error) -> CoreError {
if err.is_timeout() {
CoreError::Timeout(err.to_string())
} else if err.is_connect() || err.is_request() {
CoreError::Network(err.to_string())
} else {
CoreError::Provider(err.into())
}
}

View File

@@ -0,0 +1,106 @@
use std::sync::Arc;
use async_trait::async_trait;
use futures::stream::{self, StreamExt};
use owlen_core::Result as CoreResult;
use owlen_core::provider::{
GenerateChunk, GenerateRequest, GenerateStream, ModelInfo, ModelProvider, ProviderMetadata,
ProviderStatus, ProviderType,
};
pub struct MockProvider {
metadata: ProviderMetadata,
models: Vec<ModelInfo>,
status: ProviderStatus,
#[allow(clippy::type_complexity)]
generate_handler: Option<Arc<dyn Fn(GenerateRequest) -> Vec<GenerateChunk> + Send + Sync>>,
generate_error: Option<Arc<dyn Fn() -> owlen_core::Error + Send + Sync>>,
}
impl MockProvider {
pub fn new(id: &str) -> Self {
let metadata = ProviderMetadata::new(
id,
format!("Mock Provider ({})", id),
ProviderType::Local,
false,
);
Self {
metadata,
models: vec![ModelInfo {
name: format!("{}-primary", id),
size_bytes: None,
capabilities: vec!["chat".into()],
description: Some("Mock model".into()),
provider: ProviderMetadata::new(id, "Mock", ProviderType::Local, false),
metadata: Default::default(),
}],
status: ProviderStatus::Available,
generate_handler: None,
generate_error: None,
}
}
pub fn with_models(mut self, models: Vec<ModelInfo>) -> Self {
self.models = models;
self
}
pub fn with_status(mut self, status: ProviderStatus) -> Self {
self.status = status;
self
}
pub fn with_generate_handler<F>(mut self, handler: F) -> Self
where
F: Fn(GenerateRequest) -> Vec<GenerateChunk> + Send + Sync + 'static,
{
self.generate_handler = Some(Arc::new(handler));
self
}
pub fn with_generate_error<F>(mut self, factory: F) -> Self
where
F: Fn() -> owlen_core::Error + Send + Sync + 'static,
{
self.generate_error = Some(Arc::new(factory));
self
}
}
#[async_trait]
impl ModelProvider for MockProvider {
fn metadata(&self) -> &ProviderMetadata {
&self.metadata
}
async fn health_check(&self) -> CoreResult<ProviderStatus> {
Ok(self.status)
}
async fn list_models(&self) -> CoreResult<Vec<ModelInfo>> {
Ok(self.models.clone())
}
async fn generate_stream(&self, request: GenerateRequest) -> CoreResult<GenerateStream> {
if let Some(factory) = &self.generate_error {
return Err(factory());
}
let chunks = if let Some(handler) = &self.generate_handler {
(handler)(request)
} else {
vec![GenerateChunk::final_chunk()]
};
let stream = stream::iter(chunks.into_iter().map(Ok)).boxed();
Ok(Box::pin(stream))
}
}
impl From<MockProvider> for Arc<dyn ModelProvider> {
fn from(provider: MockProvider) -> Self {
Arc::new(provider)
}
}

View File

@@ -0,0 +1 @@
pub mod mock_provider;

View File

@@ -0,0 +1,117 @@
mod common;
use std::sync::Arc;
use futures::StreamExt;
use common::mock_provider::MockProvider;
use owlen_core::config::Config;
use owlen_core::provider::{
GenerateChunk, GenerateRequest, ModelInfo, ProviderManager, ProviderType,
};
#[allow(dead_code)]
fn base_config() -> Config {
Config {
providers: Default::default(),
..Default::default()
}
}
fn make_model(name: &str, provider: &str) -> ModelInfo {
ModelInfo {
name: name.into(),
size_bytes: None,
capabilities: vec!["chat".into()],
description: Some("mock".into()),
provider: owlen_core::provider::ProviderMetadata::new(
provider,
provider,
ProviderType::Local,
false,
),
metadata: Default::default(),
}
}
#[tokio::test]
async fn registers_providers_and_lists_ids() {
let manager = ProviderManager::default();
let provider: Arc<dyn owlen_core::provider::ModelProvider> = MockProvider::new("mock-a").into();
manager.register_provider(provider).await;
let ids = manager.provider_ids().await;
assert_eq!(ids, vec!["mock-a".to_string()]);
}
#[tokio::test]
async fn aggregates_models_across_providers() {
let manager = ProviderManager::default();
let provider_a = MockProvider::new("mock-a").with_models(vec![make_model("alpha", "mock-a")]);
let provider_b = MockProvider::new("mock-b").with_models(vec![make_model("beta", "mock-b")]);
manager.register_provider(provider_a.into()).await;
manager.register_provider(provider_b.into()).await;
let models = manager.list_all_models().await.unwrap();
assert_eq!(models.len(), 2);
assert!(models.iter().any(|m| m.model.name == "alpha"));
assert!(models.iter().any(|m| m.model.name == "beta"));
}
#[tokio::test]
async fn routes_generation_to_specific_provider() {
let manager = ProviderManager::default();
let provider = MockProvider::new("mock-gen").with_generate_handler(|_req| {
vec![
GenerateChunk::from_text("hello"),
GenerateChunk::final_chunk(),
]
});
manager.register_provider(provider.into()).await;
let request = GenerateRequest::new("mock-gen::primary");
let mut stream = manager.generate("mock-gen", request).await.unwrap();
let mut collected = Vec::new();
while let Some(chunk) = stream.next().await {
collected.push(chunk.unwrap());
}
assert_eq!(collected.len(), 2);
assert_eq!(collected[0].text.as_deref(), Some("hello"));
assert!(collected[1].is_final);
}
#[tokio::test]
async fn marks_provider_unavailable_on_error() {
let manager = ProviderManager::default();
let provider = MockProvider::new("flaky")
.with_generate_error(|| owlen_core::Error::Network("boom".into()));
manager.register_provider(provider.into()).await;
let request = GenerateRequest::new("flaky::model");
let result = manager.generate("flaky", request).await;
assert!(result.is_err());
let status = manager.provider_status("flaky").await.unwrap();
assert!(matches!(
status,
owlen_core::provider::ProviderStatus::Unavailable
));
}
#[tokio::test]
async fn health_refresh_updates_status_cache() {
let manager = ProviderManager::default();
let provider =
MockProvider::new("healthy").with_status(owlen_core::provider::ProviderStatus::Available);
manager.register_provider(provider.into()).await;
let statuses = manager.refresh_health().await;
assert_eq!(
statuses.get("healthy"),
Some(&owlen_core::provider::ProviderStatus::Available)
);
}

View File

@@ -30,6 +30,8 @@ toml = { workspace = true }
syntect = "5.3"
once_cell = "1.19"
owlen-markdown = { path = "../owlen-markdown" }
shellexpand = { workspace = true }
regex = { workspace = true }
# Async runtime
tokio = { workspace = true }
@@ -42,6 +44,7 @@ uuid = { workspace = true }
serde_json.workspace = true
serde.workspace = true
chrono = { workspace = true }
log = { workspace = true }
[dev-dependencies]
tokio-test = { workspace = true }

View File

@@ -0,0 +1,99 @@
[[binding]]
mode = "normal"
keys = ["m"]
command = "model.open_all"
[[binding]]
mode = "normal"
keys = ["Ctrl+Shift+L"]
command = "model.open_local"
[[binding]]
mode = "normal"
keys = ["Ctrl+Shift+C"]
command = "model.open_cloud"
[[binding]]
mode = "normal"
keys = ["Ctrl+Shift+P"]
command = "model.open_available"
[[binding]]
mode = "normal"
keys = ["Ctrl+P"]
command = "palette.open"
[[binding]]
mode = "editing"
keys = ["Ctrl+P"]
command = "palette.open"
[[binding]]
mode = "normal"
keys = ["Tab"]
command = "focus.next"
[[binding]]
mode = "normal"
keys = ["Shift+Tab"]
command = "focus.prev"
[[binding]]
mode = "normal"
keys = ["Ctrl+1"]
command = "focus.files"
[[binding]]
mode = "normal"
keys = ["Ctrl+2"]
command = "focus.chat"
[[binding]]
mode = "normal"
keys = ["Ctrl+3"]
command = "focus.code"
[[binding]]
mode = "normal"
keys = ["Ctrl+4"]
command = "focus.thinking"
[[binding]]
mode = "normal"
keys = ["Ctrl+5"]
command = "focus.input"
[[binding]]
mode = "editing"
keys = ["Enter"]
command = "composer.submit"
[[binding]]
mode = "normal"
keys = ["Ctrl+;"]
command = "mode.command"
[[binding]]
mode = "normal"
keys = ["F12"]
command = "debug.toggle"
[[binding]]
mode = "editing"
keys = ["F12"]
command = "debug.toggle"
[[binding]]
mode = "visual"
keys = ["F12"]
command = "debug.toggle"
[[binding]]
mode = "command"
keys = ["F12"]
command = "debug.toggle"
[[binding]]
mode = "help"
keys = ["F12"]
command = "debug.toggle"

View File

@@ -0,0 +1,77 @@
use std::sync::Arc;
use anyhow::{Result, anyhow};
use futures_util::StreamExt;
use owlen_core::provider::GenerateRequest;
use uuid::Uuid;
use super::{ActiveGeneration, App, AppMessage};
impl App {
/// Kick off a new generation task on the supplied provider.
pub fn start_generation(
&mut self,
provider_id: impl Into<String>,
request: GenerateRequest,
) -> Result<Uuid> {
let provider_id = provider_id.into();
let request_id = Uuid::new_v4();
// Cancel any existing task so we don't interleave output.
if let Some(active) = self.active_generation.take() {
active.abort();
}
self.message_tx
.send(AppMessage::GenerateStart {
request_id,
provider_id: provider_id.clone(),
request: request.clone(),
})
.map_err(|err| anyhow!("failed to queue generation start: {err:?}"))?;
let manager = Arc::clone(&self.provider_manager);
let message_tx = self.message_tx.clone();
let provider_for_task = provider_id.clone();
let join_handle = tokio::spawn(async move {
let mut stream = match manager.generate(&provider_for_task, request).await {
Ok(stream) => stream,
Err(err) => {
let _ = message_tx.send(AppMessage::GenerateError {
request_id: Some(request_id),
message: err.to_string(),
});
return;
}
};
while let Some(chunk_result) = stream.next().await {
match chunk_result {
Ok(chunk) => {
if message_tx
.send(AppMessage::GenerateChunk { request_id, chunk })
.is_err()
{
break;
}
}
Err(err) => {
let _ = message_tx.send(AppMessage::GenerateError {
request_id: Some(request_id),
message: err.to_string(),
});
return;
}
}
}
let _ = message_tx.send(AppMessage::GenerateComplete { request_id });
});
let generation = ActiveGeneration::new(request_id, provider_id, join_handle);
self.active_generation = Some(generation);
Ok(request_id)
}
}

View File

@@ -0,0 +1,135 @@
use super::{App, messages::AppMessage};
use log::warn;
use owlen_core::{
provider::{GenerateChunk, GenerateRequest, ProviderStatus},
state::AppState,
};
use uuid::Uuid;
/// Trait implemented by UI state containers to react to [`AppMessage`] events.
pub trait MessageState {
/// Called when a generation request is about to start.
#[allow(unused_variables)]
fn start_generation(
&mut self,
request_id: Uuid,
provider_id: &str,
request: &GenerateRequest,
) -> AppState {
AppState::Running
}
/// Called for every streamed generation chunk.
#[allow(unused_variables)]
fn append_chunk(&mut self, request_id: Uuid, chunk: &GenerateChunk) -> AppState {
AppState::Running
}
/// Called when a generation finishes successfully.
#[allow(unused_variables)]
fn generation_complete(&mut self, request_id: Uuid) -> AppState {
AppState::Running
}
/// Called when a generation fails.
#[allow(unused_variables)]
fn generation_failed(&mut self, request_id: Option<Uuid>, message: &str) -> AppState {
AppState::Running
}
/// Called when refreshed model metadata is available.
fn update_model_list(&mut self) -> AppState {
AppState::Running
}
/// Called when a models refresh has been requested.
fn refresh_model_list(&mut self) -> AppState {
AppState::Running
}
/// Called when provider status updates arrive.
#[allow(unused_variables)]
fn update_provider_status(&mut self, provider_id: &str, status: ProviderStatus) -> AppState {
AppState::Running
}
/// Called when a resize event occurs.
#[allow(unused_variables)]
fn handle_resize(&mut self, width: u16, height: u16) -> AppState {
AppState::Running
}
/// Called on periodic ticks.
fn handle_tick(&mut self) -> AppState {
AppState::Running
}
}
impl App {
/// Dispatch a message to the provided [`MessageState`]. Returns `true` when the
/// state indicates the UI should exit.
pub fn handle_message<State>(&mut self, state: &mut State, message: AppMessage) -> bool
where
State: MessageState,
{
use AppMessage::*;
let outcome = match message {
KeyPress(_) => AppState::Running,
Resize { width, height } => state.handle_resize(width, height),
Tick => state.handle_tick(),
GenerateStart {
request_id,
provider_id,
request,
} => state.start_generation(request_id, &provider_id, &request),
GenerateChunk { request_id, chunk } => state.append_chunk(request_id, &chunk),
GenerateComplete { request_id } => {
self.clear_active_generation(request_id);
state.generation_complete(request_id)
}
GenerateError {
request_id,
message,
} => {
self.clear_active_generation_optional(request_id);
state.generation_failed(request_id, &message)
}
ModelsRefresh => state.refresh_model_list(),
ModelsUpdated => state.update_model_list(),
ProviderStatus {
provider_id,
status,
} => state.update_provider_status(&provider_id, status),
};
matches!(outcome, AppState::Quit)
}
fn clear_active_generation(&mut self, request_id: Uuid) {
if self
.active_generation
.as_ref()
.map(|active| active.request_id() == request_id)
.unwrap_or(false)
{
self.active_generation = None;
} else {
warn!(
"received completion for unknown request {}, ignoring",
request_id
);
}
}
fn clear_active_generation_optional(&mut self, request_id: Option<Uuid>) {
match request_id {
Some(id) => self.clear_active_generation(id),
None => {
if self.active_generation.is_some() {
self.active_generation = None;
}
}
}
}
}

View File

@@ -0,0 +1,41 @@
use crossterm::event::KeyEvent;
use owlen_core::provider::{GenerateChunk, GenerateRequest, ProviderStatus};
use uuid::Uuid;
/// Messages exchanged between the UI event loop and background workers.
#[derive(Debug)]
pub enum AppMessage {
/// User input event bubbled up from the terminal layer.
KeyPress(KeyEvent),
/// Terminal resize notification.
Resize { width: u16, height: u16 },
/// Periodic tick used to drive animations.
Tick,
/// Initiate a new text generation request.
GenerateStart {
request_id: Uuid,
provider_id: String,
request: GenerateRequest,
},
/// Streamed response chunk from the active generation task.
GenerateChunk {
request_id: Uuid,
chunk: GenerateChunk,
},
/// Generation finished successfully.
GenerateComplete { request_id: Uuid },
/// Generation failed or was aborted.
GenerateError {
request_id: Option<Uuid>,
message: String,
},
/// Trigger a background refresh of available models.
ModelsRefresh,
/// New model list data is ready.
ModelsUpdated,
/// Provider health status update.
ProviderStatus {
provider_id: String,
status: ProviderStatus,
},
}

View File

@@ -0,0 +1,240 @@
mod generation;
mod handler;
pub mod mvu;
mod worker;
pub mod messages;
pub use worker::background_worker;
use std::{
io,
sync::Arc,
time::{Duration, Instant},
};
use anyhow::Result;
use async_trait::async_trait;
use crossterm::event::{self, KeyEventKind};
use owlen_core::{provider::ProviderManager, state::AppState};
use ratatui::{Terminal, backend::CrosstermBackend};
use tokio::{
sync::mpsc::{self, error::TryRecvError},
task::{AbortHandle, JoinHandle, yield_now},
};
use uuid::Uuid;
use crate::{Event, SessionEvent, events};
pub use handler::MessageState;
pub use messages::AppMessage;
#[async_trait]
pub trait UiRuntime: MessageState {
async fn handle_ui_event(&mut self, event: Event) -> Result<AppState>;
async fn handle_session_event(&mut self, event: SessionEvent) -> Result<()>;
async fn process_pending_llm_request(&mut self) -> Result<()>;
async fn process_pending_tool_execution(&mut self) -> Result<()>;
fn poll_controller_events(&mut self) -> Result<()>;
fn advance_loading_animation(&mut self);
fn streaming_count(&self) -> usize;
}
/// High-level application state driving the non-blocking TUI.
pub struct App {
provider_manager: Arc<ProviderManager>,
message_tx: mpsc::UnboundedSender<AppMessage>,
message_rx: Option<mpsc::UnboundedReceiver<AppMessage>>,
active_generation: Option<ActiveGeneration>,
}
impl App {
/// Construct a new application instance with an associated message channel.
pub fn new(provider_manager: Arc<ProviderManager>) -> Self {
let (message_tx, message_rx) = mpsc::unbounded_channel();
Self {
provider_manager,
message_tx,
message_rx: Some(message_rx),
active_generation: None,
}
}
/// Cloneable sender handle for pushing messages into the application loop.
pub fn message_sender(&self) -> mpsc::UnboundedSender<AppMessage> {
self.message_tx.clone()
}
/// Whether a generation task is currently in flight.
pub fn has_active_generation(&self) -> bool {
self.active_generation.is_some()
}
/// Abort any in-flight generation task.
pub fn abort_active_generation(&mut self) {
if let Some(active) = self.active_generation.take() {
active.abort();
}
}
/// Launch the background worker responsible for provider health checks.
pub fn spawn_background_worker(&self) -> JoinHandle<()> {
let manager = Arc::clone(&self.provider_manager);
let sender = self.message_tx.clone();
tokio::spawn(async move {
worker::background_worker(manager, sender).await;
})
}
/// Drive the main UI loop, handling terminal events, background messages, and
/// provider status updates without blocking rendering.
pub async fn run<State, RenderFn>(
&mut self,
terminal: &mut Terminal<CrosstermBackend<io::Stdout>>,
state: &mut State,
session_rx: &mut mpsc::UnboundedReceiver<SessionEvent>,
mut render: RenderFn,
) -> Result<AppState>
where
State: UiRuntime,
RenderFn: FnMut(&mut Terminal<CrosstermBackend<io::Stdout>>, &mut State) -> Result<()>,
{
let mut message_rx = self
.message_rx
.take()
.expect("App::run called without an available message receiver");
let poll_interval = Duration::from_millis(16);
let mut last_frame = Instant::now();
let frame_interval = Duration::from_millis(16);
let mut worker_handle = Some(self.spawn_background_worker());
let exit_state = AppState::Quit;
'main: loop {
state.advance_loading_animation();
state.process_pending_llm_request().await?;
state.process_pending_tool_execution().await?;
state.poll_controller_events()?;
loop {
match session_rx.try_recv() {
Ok(session_event) => {
state.handle_session_event(session_event).await?;
}
Err(TryRecvError::Empty) => break,
Err(TryRecvError::Disconnected) => {
break 'main;
}
}
}
loop {
match message_rx.try_recv() {
Ok(message) => {
if self.handle_message(state, message) {
if let Some(handle) = worker_handle.take() {
handle.abort();
}
break 'main;
}
}
Err(tokio::sync::mpsc::error::TryRecvError::Empty) => break,
Err(tokio::sync::mpsc::error::TryRecvError::Disconnected) => break,
}
}
if last_frame.elapsed() >= frame_interval {
render(terminal, state)?;
last_frame = Instant::now();
}
if self.handle_message(state, AppMessage::Tick) {
if let Some(handle) = worker_handle.take() {
handle.abort();
}
break 'main;
}
match event::poll(poll_interval) {
Ok(true) => match event::read() {
Ok(raw_event) => {
if let Some(ui_event) = events::from_crossterm_event(raw_event) {
if let Event::Key(key) = &ui_event {
if key.kind == KeyEventKind::Press {
let _ = self.message_tx.send(AppMessage::KeyPress(*key));
}
} else if let Event::Resize(width, height) = &ui_event {
let _ = self.message_tx.send(AppMessage::Resize {
width: *width,
height: *height,
});
}
if matches!(state.handle_ui_event(ui_event).await?, AppState::Quit) {
if let Some(handle) = worker_handle.take() {
handle.abort();
}
break 'main;
}
}
}
Err(err) => {
if let Some(handle) = worker_handle.take() {
handle.abort();
}
return Err(err.into());
}
},
Ok(false) => {}
Err(err) => {
if let Some(handle) = worker_handle.take() {
handle.abort();
}
return Err(err.into());
}
}
yield_now().await;
}
if let Some(handle) = worker_handle {
handle.abort();
}
self.message_rx = Some(message_rx);
Ok(exit_state)
}
}
struct ActiveGeneration {
request_id: Uuid,
#[allow(dead_code)]
provider_id: String,
abort_handle: AbortHandle,
#[allow(dead_code)]
join_handle: JoinHandle<()>,
}
impl ActiveGeneration {
fn new(request_id: Uuid, provider_id: String, join_handle: JoinHandle<()>) -> Self {
let abort_handle = join_handle.abort_handle();
Self {
request_id,
provider_id,
abort_handle,
join_handle,
}
}
fn abort(self) {
self.abort_handle.abort();
}
fn request_id(&self) -> Uuid {
self.request_id
}
}

View File

@@ -0,0 +1,165 @@
use owlen_core::{consent::ConsentScope, ui::InputMode};
use uuid::Uuid;
#[derive(Debug, Clone, Default)]
pub struct AppModel {
pub composer: ComposerModel,
}
#[derive(Debug, Clone)]
pub struct ComposerModel {
pub draft: String,
pub pending_submit: bool,
pub mode: InputMode,
}
impl Default for ComposerModel {
fn default() -> Self {
Self {
draft: String::new(),
pending_submit: false,
mode: InputMode::Normal,
}
}
}
#[derive(Debug, Clone)]
pub enum AppEvent {
Composer(ComposerEvent),
ToolPermission {
request_id: Uuid,
scope: ConsentScope,
},
}
#[derive(Debug, Clone)]
pub enum ComposerEvent {
DraftChanged { content: String },
ModeChanged { mode: InputMode },
Submit,
SubmissionHandled { result: SubmissionOutcome },
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SubmissionOutcome {
MessageSent,
CommandExecuted,
Failed,
}
#[derive(Debug, Clone)]
pub enum AppEffect {
SetStatus(String),
RequestSubmit,
ResolveToolConsent {
request_id: Uuid,
scope: ConsentScope,
},
}
pub fn update(model: &mut AppModel, event: AppEvent) -> Vec<AppEffect> {
match event {
AppEvent::Composer(event) => update_composer(&mut model.composer, event),
AppEvent::ToolPermission { request_id, scope } => {
vec![AppEffect::ResolveToolConsent { request_id, scope }]
}
}
}
fn update_composer(model: &mut ComposerModel, event: ComposerEvent) -> Vec<AppEffect> {
match event {
ComposerEvent::DraftChanged { content } => {
model.draft = content;
Vec::new()
}
ComposerEvent::ModeChanged { mode } => {
model.mode = mode;
Vec::new()
}
ComposerEvent::Submit => {
if model.draft.trim().is_empty() {
return vec![AppEffect::SetStatus(
"Cannot send empty message".to_string(),
)];
}
model.pending_submit = true;
vec![AppEffect::RequestSubmit]
}
ComposerEvent::SubmissionHandled { result } => {
model.pending_submit = false;
match result {
SubmissionOutcome::MessageSent | SubmissionOutcome::CommandExecuted => {
model.draft.clear();
if model.mode == InputMode::Editing {
model.mode = InputMode::Normal;
}
}
SubmissionOutcome::Failed => {}
}
Vec::new()
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn submit_with_empty_draft_sets_error() {
let mut model = AppModel::default();
let effects = update(&mut model, AppEvent::Composer(ComposerEvent::Submit));
assert!(!model.composer.pending_submit);
assert_eq!(effects.len(), 1);
match &effects[0] {
AppEffect::SetStatus(message) => {
assert!(message.contains("Cannot send empty message"));
}
other => panic!("unexpected effect: {:?}", other),
}
}
#[test]
fn submit_with_content_requests_processing() {
let mut model = AppModel::default();
let _ = update(
&mut model,
AppEvent::Composer(ComposerEvent::DraftChanged {
content: "hello world".into(),
}),
);
let effects = update(&mut model, AppEvent::Composer(ComposerEvent::Submit));
assert!(model.composer.pending_submit);
assert_eq!(effects.len(), 1);
matches!(effects[0], AppEffect::RequestSubmit);
}
#[test]
fn submission_success_clears_draft_and_mode() {
let mut model = AppModel::default();
let _ = update(
&mut model,
AppEvent::Composer(ComposerEvent::DraftChanged {
content: "hello world".into(),
}),
);
let _ = update(&mut model, AppEvent::Composer(ComposerEvent::Submit));
assert!(model.composer.pending_submit);
let effects = update(
&mut model,
AppEvent::Composer(ComposerEvent::SubmissionHandled {
result: SubmissionOutcome::MessageSent,
}),
);
assert!(effects.is_empty());
assert!(!model.composer.pending_submit);
assert!(model.composer.draft.is_empty());
assert_eq!(model.composer.mode, InputMode::Normal);
}
}

View File

@@ -0,0 +1,52 @@
use std::sync::Arc;
use std::time::Duration;
use tokio::{sync::mpsc, time};
use owlen_core::provider::ProviderManager;
use super::AppMessage;
const HEALTH_CHECK_INTERVAL: Duration = Duration::from_secs(30);
/// Periodically refresh provider health and emit status updates into the app's
/// message channel. Exits automatically once the receiver side of the channel
/// is dropped.
pub async fn background_worker(
provider_manager: Arc<ProviderManager>,
message_tx: mpsc::UnboundedSender<AppMessage>,
) {
let mut interval = time::interval(HEALTH_CHECK_INTERVAL);
let mut last_statuses = provider_manager.provider_statuses().await;
loop {
interval.tick().await;
if message_tx.is_closed() {
break;
}
let statuses = provider_manager.refresh_health().await;
for (provider_id, status) in statuses {
let changed = match last_statuses.get(&provider_id) {
Some(previous) => previous != &status,
None => true,
};
last_statuses.insert(provider_id.clone(), status);
if changed
&& message_tx
.send(AppMessage::ProviderStatus {
provider_id,
status,
})
.is_err()
{
// Receiver dropped; terminate worker.
return;
}
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
use anyhow::Result;
use owlen_core::session::SessionController;
use owlen_core::session::{ControllerEvent, SessionController};
use owlen_core::ui::{AppState, InputMode};
use tokio::sync::mpsc;
@@ -16,11 +16,12 @@ pub struct CodeApp {
impl CodeApp {
pub async fn new(
mut controller: SessionController,
controller_event_rx: mpsc::UnboundedReceiver<ControllerEvent>,
) -> Result<(Self, mpsc::UnboundedReceiver<SessionEvent>)> {
controller
.conversation_mut()
.push_system_message(DEFAULT_SYSTEM_PROMPT.to_string());
let (inner, rx) = ChatApp::new(controller).await?;
let (inner, rx) = ChatApp::new(controller, controller_event_rx).await?;
Ok((Self { inner }, rx))
}

View File

@@ -1,4 +1,7 @@
//! Command catalog and lookup utilities for the command palette.
pub mod registry;
pub use registry::{AppCommand, CommandRegistry};
// Command catalog and lookup utilities for the command palette.
/// Metadata describing a single command keyword.
#[derive(Debug, Clone, Copy)]
@@ -148,6 +151,10 @@ const COMMANDS: &[CommandSpec] = &[
keyword: "models --cloud",
description: "Open model picker focused on cloud models",
},
CommandSpec {
keyword: "models --available",
description: "Open model picker showing available models",
},
CommandSpec {
keyword: "new",
description: "Start a new conversation",
@@ -236,6 +243,10 @@ const COMMANDS: &[CommandSpec] = &[
keyword: "explorer",
description: "Alias for files",
},
CommandSpec {
keyword: "debug log",
description: "Toggle the debug log panel",
},
];
/// Return the static catalog of commands.

View File

@@ -0,0 +1,107 @@
use std::collections::HashMap;
use owlen_core::ui::FocusedPanel;
use crate::widgets::model_picker::FilterMode;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum AppCommand {
OpenModelPicker(Option<FilterMode>),
OpenCommandPalette,
CycleFocusForward,
CycleFocusBackward,
FocusPanel(FocusedPanel),
ComposerSubmit,
EnterCommandMode,
ToggleDebugLog,
}
#[derive(Debug)]
pub struct CommandRegistry {
commands: HashMap<String, AppCommand>,
}
impl CommandRegistry {
pub fn new() -> Self {
let mut commands = HashMap::new();
commands.insert(
"model.open_all".to_string(),
AppCommand::OpenModelPicker(None),
);
commands.insert(
"model.open_local".to_string(),
AppCommand::OpenModelPicker(Some(FilterMode::LocalOnly)),
);
commands.insert(
"model.open_cloud".to_string(),
AppCommand::OpenModelPicker(Some(FilterMode::CloudOnly)),
);
commands.insert(
"model.open_available".to_string(),
AppCommand::OpenModelPicker(Some(FilterMode::Available)),
);
commands.insert("palette.open".to_string(), AppCommand::OpenCommandPalette);
commands.insert("focus.next".to_string(), AppCommand::CycleFocusForward);
commands.insert("focus.prev".to_string(), AppCommand::CycleFocusBackward);
commands.insert(
"focus.files".to_string(),
AppCommand::FocusPanel(FocusedPanel::Files),
);
commands.insert(
"focus.chat".to_string(),
AppCommand::FocusPanel(FocusedPanel::Chat),
);
commands.insert(
"focus.thinking".to_string(),
AppCommand::FocusPanel(FocusedPanel::Thinking),
);
commands.insert(
"focus.input".to_string(),
AppCommand::FocusPanel(FocusedPanel::Input),
);
commands.insert(
"focus.code".to_string(),
AppCommand::FocusPanel(FocusedPanel::Code),
);
commands.insert("composer.submit".to_string(), AppCommand::ComposerSubmit);
commands.insert("mode.command".to_string(), AppCommand::EnterCommandMode);
commands.insert("debug.toggle".to_string(), AppCommand::ToggleDebugLog);
Self { commands }
}
pub fn resolve(&self, command: &str) -> Option<AppCommand> {
self.commands.get(command).copied()
}
}
impl Default for CommandRegistry {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn resolve_known_command() {
let registry = CommandRegistry::new();
assert_eq!(
registry.resolve("focus.next"),
Some(AppCommand::CycleFocusForward)
);
assert_eq!(
registry.resolve("model.open_cloud"),
Some(AppCommand::OpenModelPicker(Some(FilterMode::CloudOnly)))
);
}
#[test]
fn resolve_unknown_command() {
let registry = CommandRegistry::new();
assert_eq!(registry.resolve("does.not.exist"), None);
}
}

View File

@@ -17,6 +17,22 @@ pub enum Event {
Tick,
}
/// Convert a raw crossterm event into an application event.
pub fn from_crossterm_event(raw: crossterm::event::Event) -> Option<Event> {
match raw {
crossterm::event::Event::Key(key) => {
if key.kind == KeyEventKind::Press {
Some(Event::Key(key))
} else {
None
}
}
crossterm::event::Event::Resize(width, height) => Some(Event::Resize(width, height)),
crossterm::event::Event::Paste(text) => Some(Event::Paste(text)),
_ => None,
}
}
/// Event handler that captures terminal events and sends them to the application
pub struct EventHandler {
sender: mpsc::UnboundedSender<Event>,
@@ -52,20 +68,8 @@ impl EventHandler {
if event::poll(timeout).unwrap_or(false) {
match event::read() {
Ok(event) => {
match event {
crossterm::event::Event::Key(key) => {
// Only handle KeyEventKind::Press to avoid duplicate events
if key.kind == KeyEventKind::Press {
let _ = self.sender.send(Event::Key(key));
}
}
crossterm::event::Event::Resize(width, height) => {
let _ = self.sender.send(Event::Resize(width, height));
}
crossterm::event::Event::Paste(text) => {
let _ = self.sender.send(Event::Paste(text));
}
_ => {}
if let Some(converted) = from_crossterm_event(event) {
let _ = self.sender.send(converted);
}
}
Err(_) => {

View File

@@ -14,6 +14,7 @@
//! - `events`: Event handling for user input and other asynchronous actions.
//! - `ui`: The rendering logic for all TUI components.
pub mod app;
pub mod chat_app;
pub mod code_app;
pub mod commands;
@@ -26,6 +27,7 @@ pub mod state;
pub mod toast;
pub mod tui_controller;
pub mod ui;
pub mod widgets;
pub use chat_app::{ChatApp, SessionEvent};
pub use code_app::CodeApp;

View File

@@ -0,0 +1,235 @@
use chrono::{DateTime, Local};
use log::{Level, LevelFilter, Metadata, Record};
use once_cell::sync::{Lazy, OnceCell};
use regex::Regex;
use std::collections::VecDeque;
use std::sync::Mutex;
/// Maximum number of entries to retain in the in-memory ring buffer.
const MAX_ENTRIES: usize = 256;
/// Global access handle for the debug log store.
static STORE: Lazy<DebugLogStore> = Lazy::new(DebugLogStore::default);
static LOGGER: OnceCell<()> = OnceCell::new();
static DEBUG_LOGGER: DebugLogger = DebugLogger;
/// Install the in-process logger that feeds the debug log ring buffer.
pub fn install_global_logger() {
LOGGER.get_or_init(|| {
if log::set_logger(&DEBUG_LOGGER).is_ok() {
log::set_max_level(LevelFilter::Trace);
}
});
}
/// Per-application state for presenting and acknowledging debug log entries.
#[derive(Debug)]
pub struct DebugLogState {
visible: bool,
last_seen_id: u64,
}
impl DebugLogState {
pub fn new() -> Self {
let last_seen_id = STORE.latest_id();
Self {
visible: false,
last_seen_id,
}
}
pub fn toggle_visible(&mut self) -> bool {
self.visible = !self.visible;
if self.visible {
self.mark_seen();
}
self.visible
}
pub fn set_visible(&mut self, visible: bool) {
self.visible = visible;
if visible {
self.mark_seen();
}
}
pub fn is_visible(&self) -> bool {
self.visible
}
pub fn entries(&self) -> Vec<DebugLogEntry> {
STORE.snapshot()
}
pub fn take_unseen(&mut self) -> Vec<DebugLogEntry> {
let entries = STORE.entries_since(self.last_seen_id);
if let Some(entry) = entries.last() {
self.last_seen_id = entry.id;
}
entries
}
pub fn has_unseen(&self) -> bool {
STORE.latest_id() > self.last_seen_id
}
fn mark_seen(&mut self) {
self.last_seen_id = STORE.latest_id();
}
}
impl Default for DebugLogState {
fn default() -> Self {
Self::new()
}
}
/// Metadata describing a single debug log entry.
#[derive(Clone, Debug)]
pub struct DebugLogEntry {
pub id: u64,
pub timestamp: DateTime<Local>,
pub level: Level,
pub target: String,
pub message: String,
}
#[derive(Default)]
struct DebugLogStore {
inner: Mutex<Inner>,
}
#[derive(Default)]
struct Inner {
entries: VecDeque<DebugLogEntry>,
next_id: u64,
}
impl DebugLogStore {
fn snapshot(&self) -> Vec<DebugLogEntry> {
let inner = self.inner.lock().unwrap();
inner.entries.iter().cloned().collect()
}
fn latest_id(&self) -> u64 {
let inner = self.inner.lock().unwrap();
inner.next_id
}
fn entries_since(&self, last_seen_id: u64) -> Vec<DebugLogEntry> {
let inner = self.inner.lock().unwrap();
inner
.entries
.iter()
.filter(|entry| entry.id > last_seen_id)
.cloned()
.collect()
}
fn push(&self, level: Level, target: &str, message: &str) -> DebugLogEntry {
let sanitized = sanitize_message(message);
let mut inner = self.inner.lock().unwrap();
inner.next_id = inner.next_id.saturating_add(1);
let entry = DebugLogEntry {
id: inner.next_id,
timestamp: Local::now(),
level,
target: target.to_string(),
message: sanitized,
};
inner.entries.push_back(entry.clone());
while inner.entries.len() > MAX_ENTRIES {
inner.entries.pop_front();
}
entry
}
}
struct DebugLogger;
impl log::Log for DebugLogger {
fn enabled(&self, metadata: &Metadata) -> bool {
metadata.level() <= LevelFilter::Trace
}
fn log(&self, record: &Record) {
if !self.enabled(record.metadata()) {
return;
}
// Only persist warnings and errors in the in-memory buffer.
if record.level() < Level::Warn {
return;
}
let message = record.args().to_string();
let entry = STORE.push(record.level(), record.target(), &message);
if record.level() == Level::Error {
eprintln!(
"[owlen:error][{}] {}",
entry.timestamp.format("%Y-%m-%d %H:%M:%S"),
entry.message
);
} else if record.level() == Level::Warn {
eprintln!(
"[owlen:warn][{}] {}",
entry.timestamp.format("%Y-%m-%d %H:%M:%S"),
entry.message
);
}
}
fn flush(&self) {}
}
fn sanitize_message(message: &str) -> String {
static AUTH_HEADER: Lazy<Regex> =
Lazy::new(|| Regex::new(r"(?i)\b(authorization)(\s*[:=]\s*)([^\r\n]+)").unwrap());
static GENERIC_SECRET: Lazy<Regex> =
Lazy::new(|| Regex::new(r"(?i)\b(api[_-]?key|token)(\s*[:=]\s*)([^,\s;]+)").unwrap());
static BEARER_TOKEN: Lazy<Regex> =
Lazy::new(|| Regex::new(r"(?i)\bBearer\s+[A-Za-z0-9._\-+/=]+").unwrap());
let step = AUTH_HEADER.replace_all(message, |caps: &regex::Captures<'_>| {
format!("{}{}<redacted>", &caps[1], &caps[2])
});
let step = GENERIC_SECRET.replace_all(&step, |caps: &regex::Captures<'_>| {
format!("{}{}<redacted>", &caps[1], &caps[2])
});
BEARER_TOKEN
.replace_all(&step, "Bearer <redacted>")
.into_owned()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn sanitize_masks_common_tokens() {
let input =
"Authorization: Bearer abc123 token=xyz456 KEY=value Authorization=Token secretStuff";
let sanitized = sanitize_message(input);
assert!(!sanitized.contains("abc123"));
assert!(!sanitized.contains("xyz456"));
assert!(!sanitized.contains("secretStuff"));
assert_eq!(sanitized, "Authorization: <redacted>");
}
#[test]
fn ring_buffer_discards_old_entries() {
install_global_logger();
let initial_latest = STORE.latest_id();
for idx in 0..(MAX_ENTRIES as u64 + 10) {
let message = format!("warn #{idx}");
STORE.push(Level::Warn, "test", &message);
}
let entries = STORE.snapshot();
assert_eq!(entries.len(), MAX_ENTRIES);
assert!(entries.first().unwrap().id > initial_latest);
}
}

View File

@@ -0,0 +1,307 @@
use std::{
collections::HashMap,
fs,
path::{Path, PathBuf},
};
use crossterm::event::{KeyCode, KeyEvent, KeyModifiers};
use log::warn;
use owlen_core::{config::default_config_path, ui::InputMode};
use serde::Deserialize;
use crate::commands::registry::{AppCommand, CommandRegistry};
const DEFAULT_KEYMAP: &str = include_str!("../../keymap.toml");
#[derive(Debug, Clone)]
pub struct Keymap {
bindings: HashMap<(InputMode, KeyPattern), AppCommand>,
}
impl Keymap {
pub fn load(custom_path: Option<&str>, registry: &CommandRegistry) -> Self {
let mut content = None;
if let Some(path) = custom_path.and_then(expand_path) {
if let Ok(text) = fs::read_to_string(&path) {
content = Some(text);
} else {
warn!(
"Failed to read keymap from {}. Falling back to defaults.",
path.display()
);
}
}
if content.is_none() {
let default_path = default_config_keymap_path();
if let Some(path) = default_path {
if let Ok(text) = fs::read_to_string(&path) {
content = Some(text);
}
}
}
let data = content.unwrap_or_else(|| DEFAULT_KEYMAP.to_string());
let parsed: KeymapConfig = toml::from_str(&data).unwrap_or_else(|err| {
warn!("Failed to parse keymap: {err}. Using built-in defaults.");
toml::from_str(DEFAULT_KEYMAP).expect("embedded keymap should parse successfully")
});
let mut bindings = HashMap::new();
for entry in parsed.bindings {
let mode = match parse_mode(&entry.mode) {
Some(mode) => mode,
None => {
warn!("Unknown input mode '{}' in keymap binding", entry.mode);
continue;
}
};
let command = match registry.resolve(&entry.command) {
Some(cmd) => cmd,
None => {
warn!("Unknown command '{}' in keymap binding", entry.command);
continue;
}
};
for key in entry.keys.into_iter() {
match KeyPattern::from_str(&key) {
Some(pattern) => {
bindings.insert((mode, pattern), command);
}
None => warn!(
"Unrecognised key specification '{}' for mode {}",
key, entry.mode
),
}
}
}
Self { bindings }
}
pub fn resolve(&self, mode: InputMode, event: &KeyEvent) -> Option<AppCommand> {
let pattern = KeyPattern::from_event(event)?;
self.bindings.get(&(mode, pattern)).copied()
}
}
#[derive(Debug, Deserialize)]
struct KeymapConfig {
#[serde(default, rename = "binding")]
bindings: Vec<KeyBindingConfig>,
}
#[derive(Debug, Deserialize)]
struct KeyBindingConfig {
mode: String,
command: String,
keys: KeyList,
}
#[derive(Debug, Deserialize)]
#[serde(untagged)]
enum KeyList {
Single(String),
Multiple(Vec<String>),
}
impl KeyList {
fn into_iter(self) -> Vec<String> {
match self {
KeyList::Single(key) => vec![key],
KeyList::Multiple(keys) => keys,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
struct KeyPattern {
code: KeyCodeKind,
modifiers: KeyModifiers,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
enum KeyCodeKind {
Char(char),
Enter,
Tab,
BackTab,
Backspace,
Esc,
Up,
Down,
Left,
Right,
PageUp,
PageDown,
Home,
End,
F(u8),
}
impl KeyPattern {
fn from_event(event: &KeyEvent) -> Option<Self> {
let code = match event.code {
KeyCode::Char(c) => KeyCodeKind::Char(c),
KeyCode::Enter => KeyCodeKind::Enter,
KeyCode::Tab => KeyCodeKind::Tab,
KeyCode::BackTab => KeyCodeKind::BackTab,
KeyCode::Backspace => KeyCodeKind::Backspace,
KeyCode::Esc => KeyCodeKind::Esc,
KeyCode::Up => KeyCodeKind::Up,
KeyCode::Down => KeyCodeKind::Down,
KeyCode::Left => KeyCodeKind::Left,
KeyCode::Right => KeyCodeKind::Right,
KeyCode::PageUp => KeyCodeKind::PageUp,
KeyCode::PageDown => KeyCodeKind::PageDown,
KeyCode::Home => KeyCodeKind::Home,
KeyCode::End => KeyCodeKind::End,
KeyCode::F(n) => KeyCodeKind::F(n),
_ => return None,
};
Some(Self {
code,
modifiers: normalize_modifiers(event.modifiers),
})
}
fn from_str(spec: &str) -> Option<Self> {
let tokens: Vec<&str> = spec
.split('+')
.map(|token| token.trim())
.filter(|token| !token.is_empty())
.collect();
if tokens.is_empty() {
return None;
}
let mut modifiers = KeyModifiers::empty();
let key_token = tokens.last().copied().unwrap();
for token in tokens[..tokens.len().saturating_sub(1)].iter() {
match token.to_ascii_lowercase().as_str() {
"ctrl" | "control" => modifiers.insert(KeyModifiers::CONTROL),
"alt" | "option" => modifiers.insert(KeyModifiers::ALT),
"shift" => modifiers.insert(KeyModifiers::SHIFT),
other => warn!("Unknown modifier '{other}' in key binding '{spec}'"),
}
}
let code = parse_key_token(key_token, &mut modifiers)?;
Some(Self {
code,
modifiers: normalize_modifiers(modifiers),
})
}
}
fn parse_key_token(token: &str, modifiers: &mut KeyModifiers) -> Option<KeyCodeKind> {
let token_lower = token.to_ascii_lowercase();
let code = match token_lower.as_str() {
"enter" | "return" => KeyCodeKind::Enter,
"tab" => {
if modifiers.contains(KeyModifiers::SHIFT) {
modifiers.remove(KeyModifiers::SHIFT);
KeyCodeKind::BackTab
} else {
KeyCodeKind::Tab
}
}
"backtab" => KeyCodeKind::BackTab,
"backspace" | "bs" => KeyCodeKind::Backspace,
"esc" | "escape" => KeyCodeKind::Esc,
"up" => KeyCodeKind::Up,
"down" => KeyCodeKind::Down,
"left" => KeyCodeKind::Left,
"right" => KeyCodeKind::Right,
"pageup" | "page_up" | "pgup" => KeyCodeKind::PageUp,
"pagedown" | "page_down" | "pgdn" => KeyCodeKind::PageDown,
"home" => KeyCodeKind::Home,
"end" => KeyCodeKind::End,
token if token.starts_with('f') && token.len() > 1 => {
let num = token[1..].parse::<u8>().ok()?;
KeyCodeKind::F(num)
}
"space" => KeyCodeKind::Char(' '),
"semicolon" => KeyCodeKind::Char(';'),
"slash" => KeyCodeKind::Char('/'),
_ => {
let chars: Vec<char> = token.chars().collect();
if chars.len() == 1 {
KeyCodeKind::Char(chars[0])
} else {
return None;
}
}
};
Some(code)
}
fn parse_mode(mode: &str) -> Option<InputMode> {
match mode.to_ascii_lowercase().as_str() {
"normal" => Some(InputMode::Normal),
"editing" => Some(InputMode::Editing),
"command" => Some(InputMode::Command),
"visual" => Some(InputMode::Visual),
"provider_selection" | "provider" => Some(InputMode::ProviderSelection),
"model_selection" | "model" => Some(InputMode::ModelSelection),
"help" => Some(InputMode::Help),
"session_browser" | "sessions" => Some(InputMode::SessionBrowser),
"theme_browser" | "themes" => Some(InputMode::ThemeBrowser),
"repo_search" | "search" => Some(InputMode::RepoSearch),
"symbol_search" | "symbols" => Some(InputMode::SymbolSearch),
_ => None,
}
}
fn default_config_keymap_path() -> Option<PathBuf> {
let config_path = default_config_path();
let dir = config_path.parent()?;
Some(dir.join("keymap.toml"))
}
fn expand_path(path: &str) -> Option<PathBuf> {
if path.trim().is_empty() {
return None;
}
let expanded = shellexpand::tilde(path);
let candidate = Path::new(expanded.as_ref()).to_path_buf();
Some(candidate)
}
fn normalize_modifiers(modifiers: KeyModifiers) -> KeyModifiers {
modifiers
}
#[cfg(test)]
mod tests {
use super::*;
use crossterm::event::{KeyCode, KeyModifiers};
#[test]
fn resolve_binding_from_default_keymap() {
let registry = CommandRegistry::new();
assert!(registry.resolve("model.open_all").is_some());
let parsed: KeymapConfig = toml::from_str(DEFAULT_KEYMAP).unwrap();
assert!(!parsed.bindings.is_empty());
let keymap = Keymap::load(None, &registry);
let event = KeyEvent::new(KeyCode::Char('m'), KeyModifiers::NONE);
assert!(
!keymap.bindings.is_empty(),
"expected default keymap to provide bindings"
);
assert_eq!(
keymap.resolve(InputMode::Normal, &event),
Some(AppCommand::OpenModelPicker(None))
);
}
}

View File

@@ -6,16 +6,20 @@
//! to test in isolation.
mod command_palette;
mod debug_log;
mod file_icons;
mod file_tree;
mod keymap;
mod search;
mod workspace;
pub use command_palette::{CommandPalette, ModelPaletteEntry, PaletteGroup, PaletteSuggestion};
pub use debug_log::{DebugLogEntry, DebugLogState, install_global_logger};
pub use file_icons::{FileIconResolver, FileIconSet, IconDetection};
pub use file_tree::{
FileNode, FileTreeState, FilterMode as FileFilterMode, GitDecoration, VisibleFileEntry,
};
pub use keymap::Keymap;
pub use search::{
RepoSearchFile, RepoSearchMatch, RepoSearchMessage, RepoSearchRow, RepoSearchRowKind,
RepoSearchState, SymbolEntry, SymbolKind, SymbolSearchMessage, SymbolSearchState,

View File

@@ -1,3 +1,4 @@
use log::Level;
use pathdiff::diff_paths;
use ratatui::Frame;
use ratatui::layout::{Alignment, Constraint, Direction, Layout, Rect};
@@ -11,19 +12,16 @@ use tui_textarea::TextArea;
use unicode_segmentation::UnicodeSegmentation;
use unicode_width::UnicodeWidthStr;
use crate::chat_app::{
ChatApp, HELP_TAB_COUNT, MIN_MESSAGE_CARD_WIDTH, MessageRenderContext, ModelScope,
ModelSelectorItemKind,
};
use crate::chat_app::{ChatApp, HELP_TAB_COUNT, MIN_MESSAGE_CARD_WIDTH, MessageRenderContext};
use crate::highlight;
use crate::state::{
CodePane, EditorTab, FileFilterMode, FileNode, LayoutNode, PaletteGroup, PaneId,
RepoSearchRowKind, SplitAxis, VisibleFileEntry,
};
use crate::toast::{Toast, ToastLevel};
use owlen_core::model::DetailedModelInfo;
use crate::widgets::model_picker::render_model_picker;
use owlen_core::theme::Theme;
use owlen_core::types::{ModelInfo, Role};
use owlen_core::types::Role;
use owlen_core::ui::{FocusedPanel, InputMode, RoleLabelDisplay};
use textwrap::wrap;
@@ -337,7 +335,7 @@ pub fn render_chat(frame: &mut Frame<'_>, app: &mut ChatApp) {
} else {
match app.mode() {
InputMode::ProviderSelection => render_provider_selector(frame, app),
InputMode::ModelSelection => render_model_selector(frame, app),
InputMode::ModelSelection => render_model_picker(frame, app),
InputMode::Help => render_help(frame, app),
InputMode::SessionBrowser => render_session_browser(frame, app),
InputMode::ThemeBrowser => render_theme_browser(frame, app),
@@ -369,6 +367,20 @@ pub fn render_chat(frame: &mut Frame<'_>, app: &mut ChatApp) {
render_code_workspace(frame, area, app);
}
if app.is_debug_log_visible() {
let min_height = 6;
let computed_height = content_area.height.saturating_div(3).max(min_height);
let panel_height = computed_height.min(content_area.height);
if panel_height >= 4 {
let y = content_area
.y
.saturating_add(content_area.height.saturating_sub(panel_height));
let log_area = Rect::new(content_area.x, y, content_area.width, panel_height);
render_debug_log_panel(frame, log_area, app);
}
}
render_toasts(frame, app, content_area);
}
@@ -1967,6 +1979,134 @@ fn render_system_output(frame: &mut Frame<'_>, area: Rect, app: &ChatApp, messag
frame.render_widget(paragraph, area);
}
fn render_debug_log_panel(frame: &mut Frame<'_>, area: Rect, app: &ChatApp) {
let theme = app.theme();
frame.render_widget(Clear, area);
let title = Line::from(vec![
Span::styled(
" Debug log ",
Style::default()
.fg(theme.pane_header_active)
.add_modifier(Modifier::BOLD),
),
Span::styled(
"warnings & errors",
Style::default()
.fg(theme.pane_hint_text)
.add_modifier(Modifier::DIM),
),
]);
let block = Block::default()
.borders(Borders::ALL)
.border_style(Style::default().fg(theme.focused_panel_border))
.style(Style::default().bg(theme.background).fg(theme.text))
.title(title);
let inner = block.inner(area);
frame.render_widget(block, area);
if inner.width == 0 || inner.height == 0 {
return;
}
let entries = app.debug_log_entries();
let available_rows = inner.height as usize;
let mut lines: Vec<Line> = Vec::new();
if entries.is_empty() {
lines.push(Line::styled(
"No warnings captured this session.",
Style::default()
.fg(theme.pane_hint_text)
.add_modifier(Modifier::DIM),
));
} else {
let total_entries = entries.len();
let mut subset: Vec<_> = entries.into_iter().rev().take(available_rows).collect();
subset.reverse();
if total_entries > subset.len() && subset.len() == available_rows && !subset.is_empty() {
subset.remove(0);
}
let overflow = total_entries.saturating_sub(subset.len());
if overflow > 0 {
lines.push(Line::styled(
format!("{overflow} older entries not shown"),
Style::default()
.fg(theme.pane_hint_text)
.add_modifier(Modifier::DIM),
));
}
for entry in subset {
let (label, badge_style, message_style) = debug_level_styles(entry.level, theme);
let timestamp = entry.timestamp.format("%H:%M:%S");
let mut spans = vec![
Span::styled(format!(" {label} "), badge_style),
Span::raw(" "),
Span::styled(
timestamp.to_string(),
Style::default()
.fg(theme.pane_hint_text)
.add_modifier(Modifier::DIM),
),
];
if !entry.target.is_empty() {
spans.push(Span::raw(" "));
spans.push(Span::styled(
entry.target,
Style::default().fg(theme.pane_header_active),
));
}
spans.push(Span::raw(" "));
spans.push(Span::styled(entry.message, message_style));
lines.push(Line::from(spans));
}
}
let paragraph = Paragraph::new(lines)
.wrap(Wrap { trim: true })
.alignment(Alignment::Left)
.style(Style::default().bg(theme.background));
frame.render_widget(paragraph, inner);
}
fn debug_level_styles(level: Level, theme: &Theme) -> (&'static str, Style, Style) {
match level {
Level::Error => (
"ERR",
Style::default()
.fg(theme.background)
.bg(theme.error)
.add_modifier(Modifier::BOLD),
Style::default().fg(theme.error),
),
Level::Warn => (
"WARN",
Style::default()
.fg(theme.background)
.bg(theme.agent_action)
.add_modifier(Modifier::BOLD),
Style::default().fg(theme.agent_action),
),
_ => (
"INFO",
Style::default()
.fg(theme.background)
.bg(theme.info)
.add_modifier(Modifier::BOLD),
Style::default().fg(theme.text),
),
}
}
fn calculate_wrapped_line_count<'a, I>(lines: I, available_width: u16) -> usize
where
I: IntoIterator<Item = &'a str>,
@@ -2653,429 +2793,6 @@ fn render_provider_selector(frame: &mut Frame<'_>, app: &ChatApp) {
frame.render_stateful_widget(list, area, &mut state);
}
fn model_badge_icons(model: &ModelInfo) -> Vec<&'static str> {
let mut badges = Vec::new();
if model.supports_tools {
badges.push("🔧");
}
if model_has_feature(model, &["think", "reason"]) {
badges.push("🧠");
}
if model_has_feature(model, &["vision", "multimodal", "image"]) {
badges.push("👁️");
}
if model_has_feature(model, &["audio", "speech", "voice"]) {
badges.push("🎧");
}
badges
}
fn model_has_feature(model: &ModelInfo, keywords: &[&str]) -> bool {
let name_lower = model.name.to_ascii_lowercase();
if keywords.iter().any(|kw| name_lower.contains(kw)) {
return true;
}
if let Some(description) = &model.description {
let description_lower = description.to_ascii_lowercase();
if keywords.iter().any(|kw| description_lower.contains(kw)) {
return true;
}
}
model.capabilities.iter().any(|cap| {
let lower = cap.to_ascii_lowercase();
keywords.iter().any(|kw| lower.contains(kw))
})
}
fn render_model_selector(frame: &mut Frame<'_>, app: &ChatApp) {
let theme = app.theme();
let area = frame.area();
if area.width == 0 || area.height == 0 {
return;
}
let selector_items = app.model_selector_items();
if selector_items.is_empty() {
return;
}
let max_width: u16 = 80;
let min_width: u16 = 50;
let mut width = area.width.min(max_width);
if area.width >= min_width {
width = width.max(min_width);
}
width = width.max(1);
let mut height = (selector_items.len().clamp(1, 10) as u16) * 3 + 6;
height = height.clamp(6, area.height);
let x = area.x + (area.width.saturating_sub(width)) / 2;
let mut y = area.y + (area.height.saturating_sub(height)) / 3;
if y < area.y {
y = area.y;
}
let popup_area = Rect::new(x, y, width, height);
frame.render_widget(Clear, popup_area);
let title_line = Line::from(vec![
Span::styled(
" Model Selector ",
Style::default().fg(theme.info).add_modifier(Modifier::BOLD),
),
Span::styled(
format!("· Provider: {}", app.selected_provider),
Style::default()
.fg(theme.placeholder)
.add_modifier(Modifier::DIM),
),
]);
let block = Block::default()
.title(title_line)
.borders(Borders::ALL)
.border_style(Style::default().fg(theme.info))
.style(Style::default().bg(theme.background).fg(theme.text));
let inner = block.inner(popup_area);
frame.render_widget(block, popup_area);
if inner.width == 0 || inner.height == 0 {
return;
}
let highlight_symbol = " ";
let highlight_width = UnicodeWidthStr::width(highlight_symbol);
let max_line_width = inner.width.saturating_sub(highlight_width as u16).max(1) as usize;
let layout = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Min(4), Constraint::Length(2)])
.split(inner);
let active_model_id = app.selected_model();
let mut items: Vec<ListItem> = Vec::new();
for item in selector_items.iter() {
match item.kind() {
ModelSelectorItemKind::Header { provider, expanded } => {
let marker = if *expanded { "" } else { "" };
let line = clip_line_to_width(
Line::from(vec![
Span::styled(
marker,
Style::default()
.fg(theme.placeholder)
.add_modifier(Modifier::BOLD),
),
Span::raw(" "),
Span::styled(
provider.clone(),
Style::default()
.fg(theme.mode_command)
.add_modifier(Modifier::BOLD),
),
]),
max_line_width,
);
items.push(ListItem::new(vec![line]).style(Style::default().bg(theme.background)));
}
ModelSelectorItemKind::Scope { label, scope, .. } => {
let (fg, modifier) = match scope {
ModelScope::Local => (theme.mode_normal, Modifier::BOLD),
ModelScope::Cloud => (theme.mode_help, Modifier::BOLD),
ModelScope::Other(_) => (theme.placeholder, Modifier::ITALIC),
};
let style = Style::default().fg(fg).add_modifier(modifier);
let line = clip_line_to_width(
Line::from(Span::styled(format!(" {label}"), style)),
max_line_width,
);
items.push(ListItem::new(vec![line]).style(Style::default().bg(theme.background)));
}
ModelSelectorItemKind::Model { model_index, .. } => {
let mut lines: Vec<Line<'static>> = Vec::new();
if let Some(model) = app.model_info_by_index(*model_index) {
let badges = model_badge_icons(model);
let detail = app.cached_model_detail(&model.id);
let (title, metadata) = build_model_selector_label(
model,
detail,
&badges,
model.id == active_model_id,
);
lines.push(clip_line_to_width(
Line::from(Span::styled(title, Style::default().fg(theme.text))),
max_line_width,
));
if let Some(meta) = metadata {
lines.push(clip_line_to_width(
Line::from(Span::styled(
meta,
Style::default()
.fg(theme.placeholder)
.add_modifier(Modifier::DIM),
)),
max_line_width,
));
}
} else {
lines.push(clip_line_to_width(
Line::from(Span::styled(
" <model unavailable>",
Style::default().fg(theme.error),
)),
max_line_width,
));
}
items.push(ListItem::new(lines).style(Style::default().bg(theme.background)));
}
ModelSelectorItemKind::Empty { provider, message } => {
let text = message
.as_ref()
.map(|msg| format!(" {msg}"))
.unwrap_or_else(|| format!(" (no models configured for {provider})"));
let is_unavailable = message
.as_ref()
.map(|msg| msg.to_ascii_lowercase().contains("unavailable"))
.unwrap_or(false);
let style = if is_unavailable {
Style::default()
.fg(theme.error)
.add_modifier(Modifier::BOLD)
} else {
Style::default()
.fg(theme.placeholder)
.add_modifier(Modifier::DIM | Modifier::ITALIC)
};
let line =
clip_line_to_width(Line::from(Span::styled(text, style)), max_line_width);
items.push(ListItem::new(vec![line]).style(Style::default().bg(theme.background)));
}
}
}
let highlight_style = Style::default()
.bg(theme.selection_bg)
.fg(theme.selection_fg)
.add_modifier(Modifier::BOLD);
let mut state = ListState::default();
state.select(app.selected_model_item());
let list = List::new(items)
.highlight_style(highlight_style)
.highlight_symbol(" ")
.style(Style::default().bg(theme.background).fg(theme.text));
frame.render_stateful_widget(list, layout[0], &mut state);
let footer = Paragraph::new(Line::from(Span::styled(
"Enter: select · Space: toggle provider · ←/→ collapse/expand · Esc: cancel",
Style::default().fg(theme.placeholder),
)))
.alignment(Alignment::Center)
.style(Style::default().bg(theme.background).fg(theme.placeholder));
frame.render_widget(footer, layout[1]);
}
fn clip_line_to_width(line: Line<'_>, max_width: usize) -> Line<'static> {
if max_width == 0 {
return Line::from(Vec::<Span<'static>>::new());
}
let mut used = 0usize;
let mut clipped: Vec<Span<'static>> = Vec::new();
for span in line.spans {
if used >= max_width {
break;
}
let text = span.content.to_string();
let span_width = UnicodeWidthStr::width(text.as_str());
if used + span_width <= max_width {
if !text.is_empty() {
clipped.push(Span::styled(text, span.style));
}
used += span_width;
} else {
let mut buf = String::new();
for grapheme in span.content.as_ref().graphemes(true) {
let g_width = UnicodeWidthStr::width(grapheme);
if g_width == 0 {
buf.push_str(grapheme);
continue;
}
if used + g_width > max_width {
break;
}
buf.push_str(grapheme);
used += g_width;
}
if !buf.is_empty() {
clipped.push(Span::styled(buf, span.style));
}
break;
}
}
Line::from(clipped)
}
fn build_model_selector_label(
model: &ModelInfo,
detail: Option<&DetailedModelInfo>,
badges: &[&'static str],
is_current: bool,
) -> (String, Option<String>) {
let scope = ChatApp::model_scope_from_capabilities(model);
let scope_icon = ChatApp::scope_icon(&scope);
let scope_label = ChatApp::scope_display_name(&scope);
let mut display_name = if model.name.trim().is_empty() {
model.id.clone()
} else {
model.name.clone()
};
if !display_name.eq_ignore_ascii_case(&model.id) {
display_name.push_str(&format!(" · {}", model.id));
}
let mut title = format!(" {} {}", scope_icon, display_name);
if !badges.is_empty() {
title.push(' ');
title.push_str(&badges.join(" "));
}
if is_current {
title.push_str("");
}
let mut meta_parts: Vec<String> = Vec::new();
let mut seen_meta: HashSet<String> = HashSet::new();
let mut push_meta = |value: String| {
let trimmed = value.trim();
if trimmed.is_empty() {
return;
}
let key = trimmed.to_ascii_lowercase();
if seen_meta.insert(key) {
meta_parts.push(trimmed.to_string());
}
};
if !scope_label.eq_ignore_ascii_case("unknown") {
push_meta(scope_label.clone());
}
if let Some(detail) = detail {
if let Some(ctx) = detail.context_length {
push_meta(format!("max tokens {}", ctx));
} else if let Some(ctx) = model.context_window {
push_meta(format!("max tokens {}", ctx));
}
if let Some(parameters) = detail
.parameter_size
.as_ref()
.or(detail.parameters.as_ref())
&& !parameters.trim().is_empty()
{
push_meta(parameters.trim().to_string());
}
if let Some(arch) = detail.architecture.as_deref() {
let trimmed = arch.trim();
if !trimmed.is_empty() {
push_meta(format!("arch {}", trimmed));
}
} else if let Some(family) = detail.family.as_deref() {
let trimmed = family.trim();
if !trimmed.is_empty() {
push_meta(format!("family {}", trimmed));
}
} else if !detail.families.is_empty() {
let families = detail
.families
.iter()
.map(|f| f.trim())
.filter(|f| !f.is_empty())
.take(2)
.collect::<Vec<_>>()
.join("/");
if !families.is_empty() {
push_meta(format!("family {}", families));
}
}
if let Some(embedding) = detail.embedding_length {
push_meta(format!("embedding {}", embedding));
}
if let Some(size) = detail.size {
push_meta(format_short_size(size));
}
if let Some(quant) = detail
.quantization
.as_ref()
.filter(|q| !q.trim().is_empty())
{
push_meta(format!("quant {}", quant.trim()));
}
} else if let Some(ctx) = model.context_window {
push_meta(format!("max tokens {}", ctx));
}
if let Some(desc) = model.description.as_deref() {
let trimmed = desc.trim();
if !trimmed.is_empty() {
meta_parts.push(ellipsize(trimmed, 80));
}
}
let metadata = if meta_parts.is_empty() {
None
} else {
Some(format!(" {}", meta_parts.join("")))
};
(title, metadata)
}
fn ellipsize(text: &str, max_chars: usize) -> String {
if text.chars().count() <= max_chars {
return text.to_string();
}
let target = max_chars.saturating_sub(1).max(1);
let mut truncated = String::new();
for ch in text.chars().take(target) {
truncated.push(ch);
}
truncated.push('…');
truncated
}
fn format_short_size(bytes: u64) -> String {
if bytes >= 1_000_000_000 {
format!("{:.1} GB", bytes as f64 / 1_000_000_000_f64)
} else if bytes >= 1_000_000 {
format!("{:.1} MB", bytes as f64 / 1_000_000_f64)
} else if bytes >= 1_000 {
format!("{:.1} KB", bytes as f64 / 1_000_f64)
} else {
format!("{} B", bytes)
}
}
fn render_consent_dialog(frame: &mut Frame<'_>, app: &ChatApp) {
let theme = app.theme();
@@ -3232,67 +2949,6 @@ fn render_consent_dialog(frame: &mut Frame<'_>, app: &ChatApp) {
frame.render_widget(paragraph, area);
}
#[cfg(test)]
mod tests {
use super::*;
fn model_with(capabilities: Vec<&str>, description: Option<&str>) -> ModelInfo {
ModelInfo {
id: "model".into(),
name: "model".into(),
description: description.map(|s| s.to_string()),
provider: "test".into(),
context_window: None,
capabilities: capabilities.into_iter().map(|s| s.to_string()).collect(),
supports_tools: false,
}
}
#[test]
fn badges_include_tool_icon() {
let model = ModelInfo {
id: "tool-model".into(),
name: "tool-model".into(),
description: None,
provider: "test".into(),
context_window: None,
capabilities: vec![],
supports_tools: true,
};
assert!(model_badge_icons(&model).contains(&"🔧"));
}
#[test]
fn badges_detect_thinking_capability() {
let model = model_with(vec!["Thinking"], None);
let icons = model_badge_icons(&model);
assert!(icons.contains(&"🧠"));
}
#[test]
fn badges_detect_vision_from_description() {
let model = model_with(vec!["chat"], Some("Supports multimodal vision"));
let icons = model_badge_icons(&model);
assert!(icons.contains(&"👁️"));
}
#[test]
fn badges_detect_audio_from_name() {
let model = ModelInfo {
id: "voice-specialist".into(),
name: "Voice-Specialist".into(),
description: None,
provider: "test".into(),
context_window: None,
capabilities: vec![],
supports_tools: false,
};
let icons = model_badge_icons(&model);
assert!(icons.contains(&"🎧"));
}
}
fn render_privacy_settings(frame: &mut Frame<'_>, area: Rect, app: &ChatApp) {
let theme = app.theme();
let config = app.config();
@@ -3431,6 +3087,7 @@ fn render_help(frame: &mut Frame<'_>, app: &ChatApp) {
Line::from(" Ctrl+↑/↓ → adjust chat ↔ thinking split"),
Line::from(" Alt+←/→/↑/↓ → resize focused code pane"),
Line::from(" g then t → expand files panel and focus it"),
Line::from(" F12 → toggle debug log panel"),
Line::from(" F1 or ? → toggle this help overlay"),
Line::from(""),
Line::from(vec![Span::styled(
@@ -3573,6 +3230,7 @@ fn render_help(frame: &mut Frame<'_>, app: &ChatApp) {
)]),
Line::from(" :h, :help → show this help"),
Line::from(" F1 or ? → toggle help overlay"),
Line::from(" F12 → toggle debug log panel"),
Line::from(" :files, :explorer → toggle files panel"),
Line::from(" :markdown [on|off] → toggle markdown rendering"),
Line::from(" Ctrl+←/→ → resize files panel"),

View File

@@ -0,0 +1,3 @@
//! Reusable widgets composed specifically for the Owlen TUI.
pub mod model_picker;

View File

@@ -0,0 +1,864 @@
use std::collections::HashSet;
use owlen_core::provider::{AnnotatedModelInfo, ProviderStatus, ProviderType};
use owlen_core::types::ModelInfo;
use ratatui::{
Frame,
layout::{Constraint, Direction, Layout, Rect},
style::{Color, Modifier, Style},
text::{Line, Span},
widgets::{Block, Borders, Clear, List, ListItem, ListState, Paragraph},
};
use unicode_segmentation::UnicodeSegmentation;
use unicode_width::UnicodeWidthStr;
use crate::chat_app::{
ChatApp, HighlightMask, ModelAvailabilityState, ModelScope, ModelSearchInfo,
ModelSelectorItemKind,
};
/// Filtering modes for the model picker popup.
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)]
pub enum FilterMode {
#[default]
All,
LocalOnly,
CloudOnly,
Available,
}
pub fn render_model_picker(frame: &mut Frame<'_>, app: &ChatApp) {
let theme = app.theme();
let area = frame.area();
if area.width == 0 || area.height == 0 {
return;
}
let selector_items = app.model_selector_items();
if selector_items.is_empty() {
return;
}
let search_query = app.model_search_query().trim().to_string();
let search_active = !search_query.is_empty();
let max_width = area.width.min(90);
let min_width = area.width.min(56);
let width = area.width.min(max_width).max(min_width).max(1);
let visible_models = app.visible_model_count();
let min_rows: usize = if search_active { 5 } else { 4 };
let max_rows: usize = 12;
let row_estimate = visible_models.max(min_rows).min(max_rows);
let mut height = (row_estimate as u16) * 3 + 8;
let min_height = area.height.clamp(8, 12);
let max_height = area.height.min(32);
height = height.clamp(min_height, max_height);
let x = area.x + (area.width.saturating_sub(width)) / 2;
let mut y = area.y + (area.height.saturating_sub(height)) / 3;
if y < area.y {
y = area.y;
}
let popup_area = Rect::new(x, y, width, height);
frame.render_widget(Clear, popup_area);
let mut title_spans = vec![
Span::styled(
" Model Selector ",
Style::default().fg(theme.info).add_modifier(Modifier::BOLD),
),
Span::styled(
format!("· Provider: {}", app.selected_provider),
Style::default()
.fg(theme.placeholder)
.add_modifier(Modifier::DIM),
),
];
if app.model_filter_mode() != FilterMode::All {
title_spans.push(Span::raw(" "));
title_spans.push(filter_badge(app.model_filter_mode(), theme));
}
let block = Block::default()
.title(Line::from(title_spans))
.borders(Borders::ALL)
.border_style(Style::default().fg(theme.info))
.style(Style::default().bg(theme.background).fg(theme.text));
let inner = block.inner(popup_area);
frame.render_widget(block, popup_area);
if inner.width == 0 || inner.height == 0 {
return;
}
let layout = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(3),
Constraint::Min(4),
Constraint::Length(2),
])
.split(inner);
let matches = app.visible_model_count();
let search_prefix = Style::default()
.fg(theme.placeholder)
.add_modifier(Modifier::DIM);
let bracket_style = Style::default()
.fg(theme.placeholder)
.add_modifier(Modifier::DIM);
let caret_style = if search_active {
Style::default()
.fg(theme.selection_fg)
.add_modifier(Modifier::BOLD)
} else {
Style::default()
.fg(theme.placeholder)
.add_modifier(Modifier::DIM)
};
let mut search_spans = Vec::new();
search_spans.push(Span::styled("Search ▸ ", search_prefix));
search_spans.push(Span::styled("[", bracket_style));
search_spans.push(Span::styled(" ", bracket_style));
if search_active {
search_spans.push(Span::styled(
search_query.clone(),
Style::default()
.fg(theme.selection_fg)
.add_modifier(Modifier::BOLD),
));
} else {
search_spans.push(Span::styled(
"Type to search…",
Style::default()
.fg(theme.placeholder)
.add_modifier(Modifier::DIM),
));
}
search_spans.push(Span::styled(" ", bracket_style));
search_spans.push(Span::styled("", caret_style));
search_spans.push(Span::styled(" ", bracket_style));
search_spans.push(Span::styled("]", bracket_style));
search_spans.push(Span::raw(" "));
let suffix_label = if search_active { "match" } else { "model" };
search_spans.push(Span::styled(
format!(
"({} {}{})",
matches,
suffix_label,
if matches == 1 { "" } else { "s" }
),
Style::default().fg(theme.placeholder),
));
let search_line = Line::from(search_spans);
let instruction_line = if search_active {
Line::from(vec![
Span::styled("Backspace", Style::default().fg(theme.placeholder)),
Span::raw(": delete "),
Span::styled("Ctrl+U", Style::default().fg(theme.placeholder)),
Span::raw(": clear "),
Span::styled("Enter", Style::default().fg(theme.placeholder)),
Span::raw(": select "),
Span::styled("Esc", Style::default().fg(theme.placeholder)),
Span::raw(": close"),
])
} else {
Line::from(vec![
Span::styled("Enter", Style::default().fg(theme.placeholder)),
Span::raw(": select "),
Span::styled("Space", Style::default().fg(theme.placeholder)),
Span::raw(": toggle provider "),
Span::styled("Esc", Style::default().fg(theme.placeholder)),
Span::raw(": close"),
])
};
let search_paragraph = Paragraph::new(vec![search_line, instruction_line])
.style(Style::default().bg(theme.background).fg(theme.text));
frame.render_widget(search_paragraph, layout[0]);
let highlight_style = Style::default()
.fg(theme.selection_fg)
.bg(theme.selection_bg)
.add_modifier(Modifier::BOLD);
let highlight_symbol = " ";
let highlight_width = UnicodeWidthStr::width(highlight_symbol);
let max_line_width = layout[1]
.width
.saturating_sub(highlight_width as u16)
.max(1) as usize;
let active_model_id = app.selected_model();
let annotated = app.annotated_models();
let mut items: Vec<ListItem> = Vec::new();
for item in selector_items.iter() {
match item.kind() {
ModelSelectorItemKind::Header {
provider,
expanded,
status,
provider_type,
} => {
let mut spans = Vec::new();
spans.push(status_icon(*status, theme));
spans.push(Span::raw(" "));
let header_spans = render_highlighted_text(
provider,
if search_active {
app.provider_search_highlight(provider)
} else {
None
},
Style::default()
.fg(theme.mode_command)
.add_modifier(Modifier::BOLD),
highlight_style,
);
spans.extend(header_spans);
spans.push(Span::raw(" "));
spans.push(provider_type_badge(*provider_type, theme));
spans.push(Span::raw(" "));
spans.push(Span::styled(
if *expanded { "" } else { "" },
Style::default()
.fg(theme.placeholder)
.add_modifier(Modifier::DIM),
));
let line = clip_line_to_width(Line::from(spans), max_line_width);
items.push(ListItem::new(vec![line]).style(Style::default().bg(theme.background)));
}
ModelSelectorItemKind::Scope { label, status, .. } => {
let (style, icon) = scope_status_style(*status, theme);
let line = clip_line_to_width(
Line::from(vec![
Span::styled(icon, style),
Span::raw(" "),
Span::styled(label.clone(), style),
]),
max_line_width,
);
items.push(ListItem::new(vec![line]).style(Style::default().bg(theme.background)));
}
ModelSelectorItemKind::Model { model_index, .. } => {
let mut lines: Vec<Line<'static>> = Vec::new();
if let Some(model) = app.model_info_by_index(*model_index) {
let badges = model_badge_icons(model);
let detail = app.cached_model_detail(&model.id);
let annotated_model = annotated.get(*model_index);
let search_info = if search_active {
app.model_search_info(*model_index)
} else {
None
};
let (title, metadata) = build_model_selector_lines(
theme,
model,
annotated_model,
&badges,
detail,
model.id == active_model_id,
SearchRenderContext {
info: search_info,
highlight_style,
},
);
lines.push(clip_line_to_width(title, max_line_width));
if let Some(meta) = metadata {
lines.push(clip_line_to_width(meta, max_line_width));
}
} else {
lines.push(clip_line_to_width(
Line::from(Span::styled(
" <model unavailable>",
Style::default().fg(theme.error),
)),
max_line_width,
));
}
items.push(ListItem::new(lines).style(Style::default().bg(theme.background)));
}
ModelSelectorItemKind::Empty {
message, status, ..
} => {
let (style, icon) = empty_status_style(*status, theme);
let msg = message
.as_ref()
.map(|msg| msg.as_str())
.unwrap_or("(no models configured)");
let mut spans = vec![Span::styled(icon, style), Span::raw(" ")];
spans.push(Span::styled(format!(" {}", msg), style));
let line = clip_line_to_width(Line::from(spans), max_line_width);
items.push(ListItem::new(vec![line]).style(Style::default().bg(theme.background)));
}
}
}
let list = List::new(items)
.highlight_style(
Style::default()
.bg(theme.selection_bg)
.fg(theme.selection_fg)
.add_modifier(Modifier::BOLD),
)
.highlight_symbol(" ");
let mut state = ListState::default();
state.select(app.selected_model_item());
frame.render_stateful_widget(list, layout[1], &mut state);
let footer_text = if search_active {
"Enter: select · Space: toggle provider · Backspace: delete · Ctrl+U: clear"
} else {
"Enter: select · Space: toggle provider · Type to search · Esc: cancel"
};
let footer = Paragraph::new(Line::from(Span::styled(
footer_text,
Style::default().fg(theme.placeholder),
)))
.alignment(ratatui::layout::Alignment::Center)
.style(Style::default().bg(theme.background).fg(theme.placeholder));
frame.render_widget(footer, layout[2]);
}
fn status_icon(status: ProviderStatus, theme: &owlen_core::theme::Theme) -> Span<'static> {
let (symbol, color) = match status {
ProviderStatus::Available => ("", theme.info),
ProviderStatus::Unavailable => ("", theme.error),
ProviderStatus::RequiresSetup => ("", Color::Yellow),
};
Span::styled(
symbol,
Style::default().fg(color).add_modifier(Modifier::BOLD),
)
}
fn provider_type_badge(
provider_type: ProviderType,
theme: &owlen_core::theme::Theme,
) -> Span<'static> {
let (label, color) = match provider_type {
ProviderType::Local => ("[Local]", theme.mode_normal),
ProviderType::Cloud => ("[Cloud]", theme.mode_help),
};
Span::styled(
label,
Style::default().fg(color).add_modifier(Modifier::BOLD),
)
}
fn scope_status_style(
status: ModelAvailabilityState,
theme: &owlen_core::theme::Theme,
) -> (Style, &'static str) {
match status {
ModelAvailabilityState::Available => (
Style::default().fg(theme.info).add_modifier(Modifier::BOLD),
"",
),
ModelAvailabilityState::Unavailable => (
Style::default()
.fg(theme.error)
.add_modifier(Modifier::BOLD),
"",
),
ModelAvailabilityState::Unknown => (
Style::default()
.fg(Color::Yellow)
.add_modifier(Modifier::BOLD),
"",
),
}
}
fn empty_status_style(
status: Option<ModelAvailabilityState>,
theme: &owlen_core::theme::Theme,
) -> (Style, &'static str) {
match status.unwrap_or(ModelAvailabilityState::Unknown) {
ModelAvailabilityState::Available => (
Style::default()
.fg(theme.placeholder)
.add_modifier(Modifier::DIM),
"",
),
ModelAvailabilityState::Unavailable => (
Style::default()
.fg(theme.error)
.add_modifier(Modifier::BOLD),
"",
),
ModelAvailabilityState::Unknown => (
Style::default()
.fg(Color::Yellow)
.add_modifier(Modifier::BOLD),
"",
),
}
}
fn filter_badge(mode: FilterMode, theme: &owlen_core::theme::Theme) -> Span<'static> {
let label = match mode {
FilterMode::All => return Span::raw(""),
FilterMode::LocalOnly => "Local",
FilterMode::CloudOnly => "Cloud",
FilterMode::Available => "Available",
};
Span::styled(
format!("[{label}]"),
Style::default()
.fg(theme.mode_provider_selection)
.add_modifier(Modifier::BOLD),
)
}
fn render_highlighted_text(
text: &str,
highlight: Option<&HighlightMask>,
normal_style: Style,
highlight_style: Style,
) -> Vec<Span<'static>> {
if text.is_empty() {
return Vec::new();
}
let graphemes: Vec<&str> = UnicodeSegmentation::graphemes(text, true).collect();
let mask = highlight.map(|mask| mask.bits()).unwrap_or(&[]);
let mut spans: Vec<Span<'static>> = Vec::new();
let mut buffer = String::new();
let mut current_highlight = false;
for (idx, grapheme) in graphemes.iter().enumerate() {
let mark = mask.get(idx).copied().unwrap_or(false);
if idx == 0 {
current_highlight = mark;
}
if mark != current_highlight {
if !buffer.is_empty() {
let style = if current_highlight {
highlight_style
} else {
normal_style
};
spans.push(Span::styled(buffer.clone(), style));
buffer.clear();
}
current_highlight = mark;
}
buffer.push_str(grapheme);
}
if !buffer.is_empty() {
let style = if current_highlight {
highlight_style
} else {
normal_style
};
spans.push(Span::styled(buffer, style));
}
if spans.is_empty() {
spans.push(Span::styled(text.to_string(), normal_style));
}
spans
}
struct SearchRenderContext<'a> {
info: Option<&'a ModelSearchInfo>,
highlight_style: Style,
}
fn build_model_selector_lines<'a>(
theme: &owlen_core::theme::Theme,
model: &'a ModelInfo,
annotated: Option<&'a AnnotatedModelInfo>,
badges: &[&'static str],
detail: Option<&'a owlen_core::model::DetailedModelInfo>,
is_current: bool,
search: SearchRenderContext<'a>,
) -> (Line<'static>, Option<Line<'static>>) {
let provider_type = annotated
.map(|info| info.model.provider.provider_type)
.unwrap_or_else(|| match ChatApp::model_scope_from_capabilities(model) {
ModelScope::Cloud => ProviderType::Cloud,
ModelScope::Local => ProviderType::Local,
ModelScope::Other(_) => {
if model.provider.to_ascii_lowercase().contains("cloud") {
ProviderType::Cloud
} else {
ProviderType::Local
}
}
});
let mut spans: Vec<Span<'static>> = Vec::new();
spans.push(Span::raw(" "));
spans.push(provider_type_badge(provider_type, theme));
spans.push(Span::raw(" "));
let name_style = Style::default().fg(theme.text).add_modifier(Modifier::BOLD);
let id_style = Style::default()
.fg(theme.placeholder)
.add_modifier(Modifier::DIM);
let name_trimmed = model.name.trim();
if !name_trimmed.is_empty() {
let name_spans = render_highlighted_text(
name_trimmed,
search.info.and_then(|info| info.name.as_ref()),
name_style,
search.highlight_style,
);
spans.extend(name_spans);
if !model.id.eq_ignore_ascii_case(name_trimmed) {
spans.push(Span::raw(" "));
spans.push(Span::styled("·", Style::default().fg(theme.placeholder)));
spans.push(Span::raw(" "));
let id_spans = render_highlighted_text(
model.id.as_str(),
search.info.and_then(|info| info.id.as_ref()),
id_style,
search.highlight_style,
);
spans.extend(id_spans);
}
} else {
let id_spans = render_highlighted_text(
model.id.as_str(),
search.info.and_then(|info| info.id.as_ref()),
name_style,
search.highlight_style,
);
spans.extend(id_spans);
}
if !badges.is_empty() {
spans.push(Span::raw(" "));
spans.push(Span::styled(
badges.join(" "),
Style::default().fg(theme.placeholder),
));
}
if is_current {
spans.push(Span::raw(" "));
spans.push(Span::styled(
"",
Style::default().fg(theme.info).add_modifier(Modifier::BOLD),
));
}
let mut meta_tags: Vec<String> = Vec::new();
let mut seen_meta: HashSet<String> = HashSet::new();
let mut push_meta = |value: String| {
let trimmed = value.trim();
if trimmed.is_empty() {
return;
}
let key = trimmed.to_ascii_lowercase();
if seen_meta.insert(key) {
meta_tags.push(trimmed.to_string());
}
};
let scope = ChatApp::model_scope_from_capabilities(model);
let scope_label = ChatApp::scope_display_name(&scope);
if !scope_label.eq_ignore_ascii_case("unknown") {
push_meta(scope_label.clone());
}
if let Some(detail) = detail {
if let Some(ctx) = detail.context_length {
push_meta(format!("max tokens {}", ctx));
} else if let Some(ctx) = model.context_window {
push_meta(format!("max tokens {}", ctx));
}
if let Some(parameters) = detail
.parameter_size
.as_ref()
.or(detail.parameters.as_ref())
&& !parameters.trim().is_empty()
{
push_meta(parameters.trim().to_string());
}
if let Some(arch) = detail.architecture.as_deref() {
let trimmed = arch.trim();
if !trimmed.is_empty() {
push_meta(format!("arch {}", trimmed));
}
} else if let Some(family) = detail.family.as_deref() {
let trimmed = family.trim();
if !trimmed.is_empty() {
push_meta(format!("family {}", trimmed));
}
} else if !detail.families.is_empty() {
let families = detail
.families
.iter()
.map(|f| f.trim())
.filter(|f| !f.is_empty())
.take(2)
.collect::<Vec<_>>()
.join("/");
if !families.is_empty() {
push_meta(format!("family {}", families));
}
}
if let Some(embedding) = detail.embedding_length {
push_meta(format!("embedding {}", embedding));
}
if let Some(size) = detail.size {
push_meta(format_short_size(size));
}
if let Some(quant) = detail
.quantization
.as_ref()
.filter(|q| !q.trim().is_empty())
{
push_meta(format!("quant {}", quant.trim()));
}
} else if let Some(ctx) = model.context_window {
push_meta(format!("max tokens {}", ctx));
}
let mut description_segment: Option<(String, Option<HighlightMask>)> = None;
if let Some(desc) = model.description.as_deref() {
let trimmed = desc.trim();
if !trimmed.is_empty() {
let (display, retained, truncated) = ellipsize(trimmed, 80);
let highlight = search
.info
.and_then(|info| info.description.as_ref())
.filter(|mask| mask.is_marked())
.map(|mask| {
if truncated {
mask.truncated(retained)
} else {
mask.clone()
}
});
description_segment = Some((display, highlight));
}
}
let metadata = if meta_tags.is_empty() && description_segment.is_none() {
None
} else {
let meta_style = Style::default()
.fg(theme.placeholder)
.add_modifier(Modifier::DIM);
let mut segments: Vec<Span<'static>> = Vec::new();
segments.push(Span::styled(" ", meta_style));
let mut first = true;
for tag in meta_tags {
if !first {
segments.push(Span::styled("", meta_style));
}
segments.push(Span::styled(tag, meta_style));
first = false;
}
if let Some((text, highlight)) = description_segment {
if !first {
segments.push(Span::styled("", meta_style));
}
if let Some(mask) = highlight.as_ref() {
let desc_spans = render_highlighted_text(
text.as_str(),
Some(mask),
meta_style,
search.highlight_style,
);
segments.extend(desc_spans);
} else {
segments.push(Span::styled(text, meta_style));
}
}
Some(Line::from(segments))
};
(Line::from(spans), metadata)
}
fn clip_line_to_width(line: Line<'_>, max_width: usize) -> Line<'static> {
if max_width == 0 {
return Line::from(Vec::<Span<'static>>::new());
}
let mut used = 0usize;
let mut clipped: Vec<Span<'static>> = Vec::new();
for span in line.spans {
if used >= max_width {
break;
}
let text = span.content.to_string();
let span_width = UnicodeWidthStr::width(text.as_str());
if used + span_width <= max_width {
if !text.is_empty() {
clipped.push(Span::styled(text, span.style));
}
used += span_width;
} else {
let mut buf = String::new();
for grapheme in span.content.as_ref().graphemes(true) {
let g_width = UnicodeWidthStr::width(grapheme);
if g_width == 0 {
buf.push_str(grapheme);
continue;
}
if used + g_width > max_width {
break;
}
buf.push_str(grapheme);
used += g_width;
}
if !buf.is_empty() {
clipped.push(Span::styled(buf, span.style));
}
break;
}
}
Line::from(clipped)
}
fn ellipsize(text: &str, max_graphemes: usize) -> (String, usize, bool) {
let graphemes: Vec<&str> = UnicodeSegmentation::graphemes(text, true).collect();
if graphemes.len() <= max_graphemes {
return (text.to_string(), graphemes.len(), false);
}
let keep = max_graphemes.saturating_sub(1).max(1);
let mut truncated = String::new();
for grapheme in graphemes.iter().take(keep) {
truncated.push_str(grapheme);
}
truncated.push('…');
(truncated, keep, true)
}
fn model_badge_icons(model: &ModelInfo) -> Vec<&'static str> {
let mut badges = Vec::new();
if model.supports_tools {
badges.push("🔧");
}
if model_has_feature(model, &["think", "reason"]) {
badges.push("🧠");
}
if model_has_feature(model, &["vision", "multimodal", "image"]) {
badges.push("👁️");
}
if model_has_feature(model, &["audio", "speech", "voice"]) {
badges.push("🎧");
}
badges
}
fn model_has_feature(model: &ModelInfo, keywords: &[&str]) -> bool {
let name_lower = model.name.to_ascii_lowercase();
if keywords.iter().any(|kw| name_lower.contains(kw)) {
return true;
}
if let Some(description) = &model.description {
let description_lower = description.to_ascii_lowercase();
if keywords.iter().any(|kw| description_lower.contains(kw)) {
return true;
}
}
if model.capabilities.iter().any(|cap| {
let lc = cap.to_ascii_lowercase();
keywords.iter().any(|kw| lc.contains(kw))
}) {
return true;
}
keywords
.iter()
.any(|kw| model.provider.to_ascii_lowercase().contains(kw))
}
fn format_short_size(bytes: u64) -> String {
if bytes >= 1_000_000_000 {
format!("{:.1} GB", bytes as f64 / 1_000_000_000_f64)
} else if bytes >= 1_000_000 {
format!("{:.1} MB", bytes as f64 / 1_000_000_f64)
} else if bytes >= 1_000 {
format!("{:.1} KB", bytes as f64 / 1_000_f64)
} else {
format!("{} B", bytes)
}
}
#[cfg(test)]
mod tests {
use super::*;
use owlen_core::types::ModelInfo;
fn model_with(capabilities: Vec<&str>, description: Option<&str>) -> ModelInfo {
ModelInfo {
id: "model".into(),
name: "model".into(),
description: description.map(|s| s.to_string()),
provider: "test".into(),
context_window: None,
capabilities: capabilities.into_iter().map(|s| s.to_string()).collect(),
supports_tools: false,
}
}
#[test]
fn model_badges_recognize_thinking_capability() {
let model = model_with(vec!["think"], None);
assert!(model_badge_icons(&model).contains(&"🧠"));
}
#[test]
fn model_badges_detect_tool_support() {
let mut model = model_with(vec![], None);
model.supports_tools = true;
let icons = model_badge_icons(&model);
assert!(icons.contains(&"🔧"));
}
#[test]
fn model_badges_detect_vision_capability() {
let model = model_with(vec![], Some("Supports vision tasks"));
let icons = model_badge_icons(&model);
assert!(icons.contains(&"👁️"));
}
#[test]
fn model_badges_detect_audio_capability() {
let model = model_with(vec!["audio"], None);
let icons = model_badge_icons(&model);
assert!(icons.contains(&"🎧"));
}
}

View File

@@ -0,0 +1,164 @@
use std::{any::Any, sync::Arc};
use async_trait::async_trait;
use crossterm::event::{KeyCode, KeyEvent, KeyModifiers};
use futures_util::stream;
use owlen_core::{
Config, Mode, Provider,
config::McpMode,
session::SessionController,
storage::StorageManager,
types::{ChatResponse, Message, Role, ToolCall},
ui::{NoOpUiController, UiController},
};
use owlen_tui::ChatApp;
use owlen_tui::app::UiRuntime;
use owlen_tui::events::Event;
use tempfile::tempdir;
use tokio::sync::mpsc;
struct StubProvider;
#[async_trait]
impl Provider for StubProvider {
fn name(&self) -> &str {
"stub-provider"
}
async fn list_models(&self) -> owlen_core::Result<Vec<owlen_core::types::ModelInfo>> {
Ok(vec![owlen_core::types::ModelInfo {
id: "stub-model".into(),
name: "Stub Model".into(),
description: Some("Stub model for testing".into()),
provider: self.name().into(),
context_window: Some(4096),
capabilities: vec!["chat".into()],
supports_tools: true,
}])
}
async fn send_prompt(
&self,
_request: owlen_core::types::ChatRequest,
) -> owlen_core::Result<ChatResponse> {
Ok(ChatResponse {
message: Message::assistant("stub response".to_string()),
usage: None,
is_streaming: false,
is_final: true,
})
}
async fn stream_prompt(
&self,
_request: owlen_core::types::ChatRequest,
) -> owlen_core::Result<owlen_core::ChatStream> {
Ok(Box::pin(stream::empty()))
}
async fn health_check(&self) -> owlen_core::Result<()> {
Ok(())
}
fn as_any(&self) -> &(dyn Any + Send + Sync) {
self
}
}
#[tokio::test(flavor = "multi_thread")]
async fn denied_consent_appends_apology_message() {
let temp_dir = tempdir().expect("temp dir");
let storage = Arc::new(
StorageManager::with_database_path(temp_dir.path().join("owlen-tui-tests.db"))
.await
.expect("storage"),
);
let mut config = Config::default();
config.privacy.encrypt_local_data = false;
config.general.default_model = Some("stub-model".into());
config.mcp.mode = McpMode::LocalOnly;
config
.refresh_mcp_servers(None)
.expect("refresh MCP servers");
let provider: Arc<dyn Provider> = Arc::new(StubProvider);
let ui: Arc<dyn UiController> = Arc::new(NoOpUiController);
let (event_tx, controller_event_rx) = mpsc::unbounded_channel();
// Pre-populate a pending consent request before handing the controller to the TUI.
let mut session = SessionController::new(
Arc::clone(&provider),
config,
Arc::clone(&storage),
Arc::clone(&ui),
true,
Some(event_tx.clone()),
)
.await
.expect("session controller");
session
.set_operating_mode(Mode::Code)
.await
.expect("code mode");
let tool_call = ToolCall {
id: "call-1".to_string(),
name: "resources/delete".to_string(),
arguments: serde_json::json!({"path": "/tmp/example.txt"}),
};
let message_id = session
.conversation_mut()
.push_assistant_message("Preparing to modify files.");
session
.conversation_mut()
.set_tool_calls_on_message(message_id, vec![tool_call])
.expect("tool calls");
let advertised_calls = session
.check_streaming_tool_calls(message_id)
.expect("queued consent");
assert_eq!(advertised_calls.len(), 1);
let (mut app, mut session_rx) = ChatApp::new(session, controller_event_rx)
.await
.expect("chat app");
// Session events are not used in this test.
session_rx.close();
// Process the controller event emitted by check_streaming_tool_calls.
UiRuntime::poll_controller_events(&mut app).expect("poll controller events");
assert!(app.has_pending_consent());
let consent_state = app
.consent_dialog()
.expect("consent dialog should be visible")
.clone();
assert_eq!(consent_state.tool_name, "resources/delete");
// Simulate the user pressing "4" to deny consent.
let deny_key = KeyEvent::new(KeyCode::Char('4'), KeyModifiers::NONE);
UiRuntime::handle_ui_event(&mut app, Event::Key(deny_key))
.await
.expect("handle deny key");
assert!(!app.has_pending_consent());
assert!(
app.status_message()
.to_lowercase()
.contains("consent denied")
);
let conversation = app.conversation();
let last_message = conversation.messages.last().expect("last message");
assert_eq!(last_message.role, Role::Assistant);
assert!(
last_message
.content
.to_lowercase()
.contains("consent was denied"),
"assistant should acknowledge the denied consent"
);
}

View File

@@ -0,0 +1,216 @@
use std::sync::{Arc, Mutex};
use std::time::Duration;
use anyhow::Result;
use async_trait::async_trait;
use futures_util::stream;
use owlen_core::provider::{
GenerateChunk, GenerateRequest, GenerateStream, ModelInfo, ModelProvider, ProviderMetadata,
ProviderStatus, ProviderType,
};
use owlen_core::state::AppState;
use owlen_tui::app::{self, App, MessageState, messages::AppMessage};
use tokio::sync::mpsc;
use tokio::task::{JoinHandle, yield_now};
use tokio::time::advance;
use uuid::Uuid;
#[derive(Clone)]
struct StatusProvider {
metadata: ProviderMetadata,
status: Arc<Mutex<ProviderStatus>>,
chunks: Arc<Vec<GenerateChunk>>,
}
impl StatusProvider {
fn new(status: ProviderStatus, chunks: Vec<GenerateChunk>) -> Self {
Self {
metadata: ProviderMetadata::new("stub", "Stub", ProviderType::Local, false),
status: Arc::new(Mutex::new(status)),
chunks: Arc::new(chunks),
}
}
fn set_status(&self, status: ProviderStatus) {
*self.status.lock().unwrap() = status;
}
}
#[async_trait]
impl ModelProvider for StatusProvider {
fn metadata(&self) -> &ProviderMetadata {
&self.metadata
}
async fn health_check(&self) -> Result<ProviderStatus, owlen_core::Error> {
Ok(*self.status.lock().unwrap())
}
async fn list_models(&self) -> Result<Vec<ModelInfo>, owlen_core::Error> {
Ok(vec![])
}
async fn generate_stream(
&self,
_request: GenerateRequest,
) -> Result<GenerateStream, owlen_core::Error> {
let items = Arc::clone(&self.chunks);
let stream_items = items.as_ref().clone();
Ok(Box::pin(stream::iter(stream_items.into_iter().map(Ok))))
}
}
#[derive(Default)]
struct RecordingState {
started: bool,
appended: bool,
completed: bool,
failed: bool,
refreshed: bool,
updated: bool,
provider_status: Option<ProviderStatus>,
}
impl MessageState for RecordingState {
fn start_generation(
&mut self,
_request_id: Uuid,
_provider_id: &str,
_request: &GenerateRequest,
) -> AppState {
self.started = true;
AppState::Running
}
fn append_chunk(&mut self, _request_id: Uuid, _chunk: &GenerateChunk) -> AppState {
self.appended = true;
AppState::Running
}
fn generation_complete(&mut self, _request_id: Uuid) -> AppState {
self.completed = true;
AppState::Running
}
fn generation_failed(&mut self, _request_id: Option<Uuid>, _message: &str) -> AppState {
self.failed = true;
AppState::Running
}
fn refresh_model_list(&mut self) -> AppState {
self.refreshed = true;
AppState::Running
}
fn update_model_list(&mut self) -> AppState {
self.updated = true;
AppState::Running
}
fn update_provider_status(&mut self, _provider_id: &str, status: ProviderStatus) -> AppState {
self.provider_status = Some(status);
AppState::Running
}
}
#[tokio::test]
async fn start_and_abort_generation_manage_active_state() {
let manager = Arc::new(owlen_core::provider::ProviderManager::default());
let provider = StatusProvider::new(
ProviderStatus::Available,
vec![
GenerateChunk::from_text("hello"),
GenerateChunk::final_chunk(),
],
);
manager.register_provider(Arc::new(provider.clone())).await;
let mut app = App::new(Arc::clone(&manager));
let request_id = app
.start_generation("stub", GenerateRequest::new("stub-model"))
.expect("start generation");
assert!(app.has_active_generation());
assert_ne!(request_id, Uuid::nil());
app.abort_active_generation();
assert!(!app.has_active_generation());
}
#[test]
fn handle_message_dispatches_variants() {
let manager = Arc::new(owlen_core::provider::ProviderManager::default());
let mut app = App::new(Arc::clone(&manager));
let mut state = RecordingState::default();
let request_id = Uuid::new_v4();
let _ = app.handle_message(
&mut state,
AppMessage::GenerateStart {
request_id,
provider_id: "stub".into(),
request: GenerateRequest::new("stub"),
},
);
let _ = app.handle_message(
&mut state,
AppMessage::GenerateChunk {
request_id,
chunk: GenerateChunk::from_text("chunk"),
},
);
let _ = app.handle_message(&mut state, AppMessage::GenerateComplete { request_id });
let _ = app.handle_message(
&mut state,
AppMessage::GenerateError {
request_id: Some(request_id),
message: "error".into(),
},
);
let _ = app.handle_message(&mut state, AppMessage::ModelsRefresh);
let _ = app.handle_message(&mut state, AppMessage::ModelsUpdated);
let _ = app.handle_message(
&mut state,
AppMessage::ProviderStatus {
provider_id: "stub".into(),
status: ProviderStatus::Available,
},
);
assert!(state.started);
assert!(state.appended);
assert!(state.completed);
assert!(state.failed);
assert!(state.refreshed);
assert!(state.updated);
assert!(matches!(
state.provider_status,
Some(ProviderStatus::Available)
));
}
#[tokio::test(start_paused = true)]
async fn background_worker_emits_status_changes() {
let manager = Arc::new(owlen_core::provider::ProviderManager::default());
let provider = StatusProvider::new(
ProviderStatus::Unavailable,
vec![GenerateChunk::final_chunk()],
);
manager.register_provider(Arc::new(provider.clone())).await;
let (tx, mut rx) = mpsc::unbounded_channel();
let worker: JoinHandle<()> = tokio::spawn(app::background_worker(Arc::clone(&manager), tx));
provider.set_status(ProviderStatus::Available);
advance(Duration::from_secs(31)).await;
yield_now().await;
if let Some(AppMessage::ProviderStatus { status, .. }) = rx.recv().await {
assert!(matches!(status, ProviderStatus::Available));
} else {
panic!("expected provider status update");
}
worker.abort();
let _ = worker.await;
yield_now().await;
}

View File

@@ -0,0 +1,97 @@
use crossterm::event::{KeyCode, KeyEvent, KeyEventKind, KeyEventState, KeyModifiers};
use owlen_core::provider::{GenerateChunk, GenerateRequest, ProviderStatus};
use owlen_tui::app::messages::AppMessage;
use uuid::Uuid;
#[test]
fn message_variants_roundtrip_their_data() {
let request = GenerateRequest::new("demo-model");
let request_id = Uuid::new_v4();
let key_event = KeyEvent {
code: KeyCode::Char('a'),
modifiers: KeyModifiers::CONTROL,
kind: KeyEventKind::Press,
state: KeyEventState::NONE,
};
let messages = vec![
AppMessage::KeyPress(key_event),
AppMessage::Resize {
width: 120,
height: 40,
},
AppMessage::Tick,
AppMessage::GenerateStart {
request_id,
provider_id: "mock".into(),
request: request.clone(),
},
AppMessage::GenerateChunk {
request_id,
chunk: GenerateChunk::from_text("hi"),
},
AppMessage::GenerateComplete { request_id },
AppMessage::GenerateError {
request_id: Some(request_id),
message: "oops".into(),
},
AppMessage::ModelsRefresh,
AppMessage::ModelsUpdated,
AppMessage::ProviderStatus {
provider_id: "mock".into(),
status: ProviderStatus::Available,
},
];
for message in messages {
match message {
AppMessage::KeyPress(event) => {
assert_eq!(event.code, KeyCode::Char('a'));
assert!(event.modifiers.contains(KeyModifiers::CONTROL));
}
AppMessage::Resize { width, height } => {
assert_eq!(width, 120);
assert_eq!(height, 40);
}
AppMessage::Tick => {}
AppMessage::GenerateStart {
request_id: id,
provider_id,
request,
} => {
assert_eq!(id, request_id);
assert_eq!(provider_id, "mock");
assert_eq!(request.model, "demo-model");
}
AppMessage::GenerateChunk {
request_id: id,
chunk,
} => {
assert_eq!(id, request_id);
assert_eq!(chunk.text.as_deref(), Some("hi"));
}
AppMessage::GenerateComplete { request_id: id } => {
assert_eq!(id, request_id);
}
AppMessage::GenerateError {
request_id: Some(id),
message,
} => {
assert_eq!(id, request_id);
assert_eq!(message, "oops");
}
AppMessage::ModelsRefresh => {}
AppMessage::ModelsUpdated => {}
AppMessage::ProviderStatus {
provider_id,
status,
} => {
assert_eq!(provider_id, "mock");
assert!(matches!(status, ProviderStatus::Available));
}
AppMessage::GenerateError {
request_id: None, ..
} => panic!("missing request id"),
}
}
}

View File

@@ -9,21 +9,11 @@ fn palette_tracks_buffer_and_suggestions() {
palette.set_buffer("mo");
assert_eq!(palette.buffer(), "mo");
assert!(
palette
.suggestions()
.iter()
.all(|s| s.value.starts_with("mo"))
);
assert!(!palette.suggestions().is_empty());
palette.push_char('d');
assert_eq!(palette.buffer(), "mod");
assert!(
palette
.suggestions()
.iter()
.all(|s| s.value.starts_with("mod"))
);
assert!(!palette.suggestions().is_empty());
palette.pop_char();
assert_eq!(palette.buffer(), "mo");

View File

@@ -0,0 +1,13 @@
# Experimental Providers
This directory collects non-workspace placeholder crates for potential
third-party providers. The code under the following folders is not yet
implemented and is kept out of the default Cargo workspace to avoid
confusion:
- `openai`
- `anthropic`
- `gemini`
If you want to explore or contribute to these providers, start by reading
the `README.md` inside each crate for the current status and ideas.

62
docs/adding-providers.md Normal file
View File

@@ -0,0 +1,62 @@
# Adding a Provider to Owlen
This guide complements `docs/provider-implementation.md` with a practical checklist for wiring a new model backend into the Phase 10 architecture.
## 1. Define the Provider Type
Providers live in their own crate (for example `owlen-providers`). Create a module that implements the `owlen_core::provider::ModelProvider` trait:
```rust
pub struct MyProvider {
client: MyHttpClient,
metadata: ProviderMetadata,
}
#[async_trait]
impl ModelProvider for MyProvider {
fn metadata(&self) -> &ProviderMetadata { &self.metadata }
async fn health_check(&self) -> Result<ProviderStatus> { ... }
async fn list_models(&self) -> Result<Vec<ModelInfo>> { ... }
async fn generate_stream(&self, request: GenerateRequest) -> Result<GenerateStream> { ... }
}
```
Set `ProviderMetadata::provider_type` to `ProviderType::Local` or `ProviderType::Cloud` so the TUI can label it correctly.
## 2. Register with `ProviderManager`
`ProviderManager` owns provider instances and tracks their health. In your startup code (usually `owlen-cli` or an MCP server), construct the provider and register it:
```rust
let manager = ProviderManager::new(config);
manager.register_provider(Arc::new(MyProvider::new(config)?.into())).await;
```
The manager caches `ProviderStatus` values so the TUI can surface availability in the picker and background worker events.
## 3. Expose Through MCP (Optional)
For providers that should run out-of-process, implement an MCP server (`owlen-mcp-llm-server` demonstrates the pattern). The TUI uses `RemoteMcpClient`, so exposing `generate_text` keeps the UI completely decoupled from provider details.
## 4. Add Tests
Commit 13 introduced integration tests in `crates/owlen-providers/tests`. Follow this pattern to exercise:
- registration with `ProviderManager`
- model aggregation across providers
- routing of `generate` requests
- provider status transitions when generation succeeds or fails
In-memory mocks are enough; the goal is to protect the trait contract and the managers health cache.
## 5. Document Configuration
Update `docs/configuration.md` and the default `config.toml` snippet so users can enable the new provider. Include environment variables, auth requirements, or special flags.
## 6. Update User-Facing Docs
- Add a short entry to the feature list in `README.md`.
- Mention the new provider in `CHANGELOG.md` under the “Added” section.
- If the provider requires troubleshooting steps, append them to `docs/troubleshooting.md`.
Following these steps keeps the provider lifecycle consistent with Owlens multi-provider architecture: providers register once, the manager handles orchestration, and the TUI reacts via message-driven updates.

View File

@@ -6,7 +6,8 @@ This document provides a high-level overview of the Owlen architecture. Its purp
The architecture is designed to be modular and extensible, centered around a few key concepts:
- **Providers**: Connect to various LLM APIs (Ollama, OpenAI, etc.).
- **Provider Manager**: Coordinates multiple `ModelProvider` implementations, aggregates model metadata, and caches health status for the UI.
- **Providers**: Concrete backends (Ollama Local, Ollama Cloud, future providers) accessed either directly or through MCP servers.
- **Session**: Manages the conversation history and state.
- **TUI**: The terminal user interface, built with `ratatui`.
- **Events**: A system for handling user input and other events.
@@ -16,27 +17,29 @@ The architecture is designed to be modular and extensible, centered around a few
A simplified diagram of how components interact:
```
[User Input] -> [Event Loop] -> [Session Controller] -> [Provider]
^ |
| v
[TUI Renderer] <------------------------------------ [API Response]
[User Input] -> [Event Loop] -> [Message Handler] -> [Session Controller] -> [Provider Manager] -> [Provider]
^ |
| v
[TUI Renderer] <- [AppMessage Stream] <- [Background Worker] <--------------- [Provider Health]
```
1. **User Input**: The user interacts with the TUI, generating events (e.g., key presses).
2. **Event Loop**: The main event loop in `owlen-tui` captures these events.
3. **Session Controller**: The event is processed, and if it's a prompt, the session controller sends a request to the current provider.
4. **Provider**: The provider formats the request for the specific LLM API and sends it.
5. **API Response**: The LLM API returns a response.
6. **TUI Renderer**: The response is processed, the session state is updated, and the TUI is re-rendered to display the new information.
2. **Event Loop**: The non-blocking event loop in `owlen-tui` bundles raw input, async session events, and background health updates into `AppMessage` events.
3. **Message Handler**: `App::handle_message` centralises dispatch, updating runtime state (chat, model picker, provider indicators) before the UI redraw.
4. **Session Controller**: Prompt events create `GenerateRequest`s that flow through `ProviderManager::generate` to the designated provider.
5. **Provider**: The provider formats requests for its API and streams back `GenerateChunk`s.
6. **Provider Manager**: Tracks health while streaming; errors mark a provider unavailable so background workers and the model picker reflect the state.
7. **Background Worker**: A periodic task runs health checks and emits status updates as `AppMessage::ProviderStatus` events.
8. **TUI Renderer**: The response is processed, the session state is updated, and the TUI is re-rendered to display the new information.
## Crate Breakdown
- `owlen-core`: Defines the `LlmProvider` abstraction, routing, configuration, session state, encryption, and the MCP client layer. This crate is UI-agnostic and must not depend on concrete providers, terminals, or blocking I/O.
- `owlen-tui`: Hosts all terminal UI behaviour (event loop, rendering, input modes) while delegating business logic and provider access back to `owlen-core`.
- `owlen-cli`: Small entry point that parses command-line options, resolves configuration, selects providers, and launches either the TUI or headless agent flows by calling into `owlen-core`.
- `owlen-mcp-llm-server`: Runs concrete providers (e.g., Ollama) behind an MCP boundary, exposing them as `generate_text` tools. This crate owns provider-specific wiring and process sandboxing.
- `owlen-mcp-server`: Generic MCP server for file operations and resource management.
- `owlen-ollama`: Direct Ollama provider implementation (legacy, used only by MCP servers).
- `owlen-mcp-llm-server`: Runs concrete providers (e.g., Ollama Local, Ollama Cloud) behind an MCP boundary, exposing them as `generate_text` tools. This crate owns provider-specific wiring and process sandboxing.
- `owlen-mcp-server`: Generic MCP server for file operations, resource projection, and other non-LLM tools.
- `owlen-providers`: Houses concrete provider adapters (today: Ollama local + cloud) that the MCP servers embed.
### Boundary Guidelines
@@ -44,6 +47,46 @@ A simplified diagram of how components interact:
- **owlen-cli**: Only orchestrates startup/shutdown. Avoid adding business logic; when a new command needs behaviour, implement it in `owlen-core` or another library crate and invoke it from the CLI.
- **owlen-mcp-llm-server**: The only crate that should directly talk to Ollama (or other provider processes). TUI/CLI code communicates with providers exclusively through MCP clients in `owlen-core`.
## Provider Boundaries & MCP Topology
Owlens runtime is intentionally layered so that user interfaces never couple to provider-specific code. The flow can be visualised as:
```
[owlen-tui] / [owlen-cli]
│ chat + model requests
[owlen-core::ProviderManager] ──> Arc<dyn ModelProvider>
│ ▲
│ │ implements `ModelProvider`
▼ │
[owlen-core::mcp::RemoteMcpClient] ─────┘
│ (JSON-RPC over stdio)
┌───────────────────────────────────────────────────────────┐
│ MCP Process Boundary (spawned per provider) │
│ │
│ crates/mcp/llm-server ──> owlen-providers::ollama::* │
│ crates/mcp/server ──> filesystem & workspace tools │
│ crates/mcp/prompt-server ─> template rendering helpers │
└───────────────────────────────────────────────────────────┘
```
- **ProviderManager (owlen-core)** keeps the registry of `ModelProvider` implementations, merges model catalogues, and caches health. Local Ollama and Cloud Ollama appear as separate providers whose metadata is merged for the UI.
- **RemoteMcpClient (owlen-core)** is the default `ModelProvider`. It implements both the MCP client traits and the `ModelProvider` interface, allowing it to bridge chat streams back into the ProviderManager without exposing transport details.
- **MCP servers (crates/mcp/\*)** are short-lived binaries with narrowly scoped responsibilities:
- `crates/mcp/llm-server` wraps `owlen-providers::ollama` backends and exposes `generate_text` / `list_models`.
- `crates/mcp/server` offers tool calls (file reads/writes, search).
- `crates/mcp/prompt-server` renders prompt templates.
- **owlen-providers** contains the actual provider adapters (Ollama local & cloud today). MCP servers embed these adapters directly; nothing else should reach into them.
### Health & Model Discovery Flow
1. Frontends call `ProviderManager::list_all_models()`. The manager fans out health checks to each registered provider (including the MCP client) and collates their models into a single list tagged with scope (`Local`, `Cloud`, etc.).
2. The TUI model picker (`owlen-tui/src/widgets/model_picker.rs`) reads those annotated entries to drive filters like **Local**, **Cloud**, and **Available**.
3. When the user kicks off a chat, the TUI emits a request that flows through `Session::send_message`, which delegates to `ProviderManager::generate`. The selected provider (usually `RemoteMcpClient`) streams chunks back across the MCP transport and the manager updates health status based on success or failure.
4. Tool invocations travel the same transport: the MCP client sends tool calls to `crates/mcp/server`, and responses surface as consent prompts or streamed completions in the UI.
## MCP Architecture (Phase 10)
As of Phase 10, OWLEN uses a **MCP-only architecture** where all LLM interactions go through the Model Context Protocol:
@@ -106,7 +149,7 @@ The session management system is responsible for tracking the state of a convers
- **`SessionController`**: This is the high-level controller that manages the active conversation. It handles:
- Storing and retrieving conversation history via the `ConversationManager`.
- Managing the context that is sent to the LLM provider.
- Switching between different models.
- Switching between different models by selecting a provider ID managed by `ProviderManager`.
- Sending requests to the provider and handling the responses (both streaming and complete).
When a user sends a message, the `SessionController` adds the message to the current `Conversation`, sends the updated message list to the `Provider`, and then adds the provider's response to the `Conversation`.
@@ -132,4 +175,4 @@ The TUI is rendered on each iteration of the main application loop in `owlen-tui
4. **State-Driven Rendering**: Each rendering function takes the current application state as an argument. It uses this state to decide what and how to render. For example, the border color of a panel might change if it is focused.
5. **Buffer and Diff**: `ratatui` does not draw directly to the terminal. Instead, it renders the widgets to an in-memory buffer. It then compares this buffer to the previous buffer and only sends the necessary changes to the terminal. This is highly efficient and prevents flickering.
The command palette and other modal helpers expose lightweight state structs in `owlen_tui::state`. These components keep business logic (suggestion filtering, selection state, etc.) independent from rendering, which in turn makes them straightforward to unit test.
The command palette and other modal helpers expose lightweight state structs in `owlen_tui::state`. These components keep business logic (suggestion filtering, selection state, etc.) independent from rendering, which in turn makes them straightforward to unit test. The ongoing migration of more features into the `ModelViewUpdate` core is documented in [`docs/tui-mvu-migration.md`](tui-mvu-migration.md).

View File

@@ -113,45 +113,67 @@ These settings control the behavior of the text input area.
## Provider Settings (`[providers]`)
This section contains a table for each provider you want to configure. Owlen ships with two entries pre-populated: `ollama` for a local daemon and `ollama-cloud` for the hosted API. You can switch between them by changing `general.default_provider`.
This section contains a table for each provider you want to configure. Owlen now ships with four entries pre-populated`ollama_local`, `ollama_cloud`, `openai`, and `anthropic`. Switch between them by updating `general.default_provider`.
```toml
[providers.ollama]
[providers.ollama_local]
enabled = true
provider_type = "ollama"
base_url = "http://localhost:11434"
# api_key = "..."
[providers.ollama-cloud]
provider_type = "ollama-cloud"
[providers.ollama_cloud]
enabled = false
provider_type = "ollama_cloud"
base_url = "https://ollama.com"
# api_key = "${OLLAMA_API_KEY}"
api_key_env = "OLLAMA_CLOUD_API_KEY"
[providers.openai]
enabled = false
provider_type = "openai"
base_url = "https://api.openai.com/v1"
api_key_env = "OPENAI_API_KEY"
[providers.anthropic]
enabled = false
provider_type = "anthropic"
base_url = "https://api.anthropic.com/v1"
api_key_env = "ANTHROPIC_API_KEY"
```
- `enabled` (boolean, default: `true`)
Whether the provider should be considered when refreshing models or issuing requests.
- `provider_type` (string, required)
The type of the provider. The built-in options are `"ollama"` (local daemon) and `"ollama-cloud"` (hosted service).
Identifies which implementation to use. Local Ollama instances use `"ollama"`; the hosted service uses `"ollama_cloud"`. Third-party providers use their own identifiers (`"openai"`, `"anthropic"`, ...).
- `base_url` (string, optional)
The base URL of the provider's API.
- `api_key` (string, optional)
The API key to use for authentication, if required.
**Note:** `ollama-cloud` now requires an API key; Owlen will refuse to start the provider without one and will hint at the missing configuration.
- `api_key` / `api_key_env` (string, optional)
Authentication material. Prefer `api_key_env` to reference an environment variable so secrets remain outside of the config file.
- `extra` (table, optional)
Any additional, provider-specific parameters can be added here.
### Using Ollama Cloud
Owlen now ships a single unified `ollama` provider. When an API key is present, Owlen automatically routes traffic to [Ollama Cloud](https://docs.ollama.com/cloud); otherwise it talks to the local daemon. A minimal configuration looks like this:
Owlen now separates the local daemon and the hosted API into two providers. Enable `ollama_cloud` once you have credentials, while keeping `ollama_local` available for on-device workloads. A minimal configuration looks like this:
```toml
[providers.ollama]
provider_type = "ollama"
base_url = "http://localhost:11434" # ignored once an API key is supplied
api_key = "${OLLAMA_API_KEY}"
[general]
default_provider = "ollama_local"
[providers.ollama_local]
enabled = true
base_url = "http://localhost:11434"
[providers.ollama_cloud]
enabled = true
base_url = "https://ollama.com"
api_key_env = "OLLAMA_CLOUD_API_KEY"
```
Requests target the same `/api/chat` endpoint documented by Ollama and automatically include the API key using a `Bearer` authorization header. If you prefer not to store the key in the config file, you can leave `api_key` unset and provide it via the `OLLAMA_API_KEY` (or `OLLAMA_CLOUD_API_KEY`) environment variable instead. You can also reference an environment variable inline (for example `api_key = "$OLLAMA_API_KEY"` or `api_key = "${OLLAMA_API_KEY}"`), which Owlen expands when the configuration is loaded. The base URL is normalised automatically—Owlen enforces HTTPS, trims trailing slashes, and accepts both `https://ollama.com` and `https://api.ollama.com` without rewriting the host.
Requests target the same `/api/chat` endpoint documented by Ollama and automatically include the API key using a `Bearer` authorization header. If you prefer not to store the key in the config file, either rely on `api_key_env` or export the environment variable manually. Owlen normalises the base URL automatically—it enforces HTTPS, trims trailing slashes, and accepts both `https://ollama.com` and `https://api.ollama.com` without rewriting the host.
> **Tip:** If the official `ollama signin` flow fails on Linux v0.12.3, follow the [Linux Ollama sign-in workaround](#linux-ollama-sign-in-workaround-v0123) in the troubleshooting guide to copy keys from a working machine or register them manually.
@@ -166,4 +188,4 @@ owlen cloud models # List the hosted models your account can access
owlen cloud logout # Forget the stored API key
```
When `privacy.encrypt_local_data = true`, the API key is written to Owlen's encrypted credential vault instead of being persisted in plaintext. Subsequent invocations automatically load the key into the runtime environment so that the config file can remain redacted. If encryption is disabled, the key is stored under `[providers.ollama-cloud].api_key` as before.
When `privacy.encrypt_local_data = true`, the API key is written to Owlen's encrypted credential vault instead of being persisted in plaintext. Subsequent invocations automatically load the key into the runtime environment so that the config file can remain redacted. If encryption is disabled, the key is stored under `[providers.ollama_cloud].api_key` as before.

View File

@@ -82,17 +82,19 @@ Ensure your provider configuration is correct. For Ollama:
```toml
[general]
default_provider = "ollama"
default_provider = "ollama_local"
default_model = "llama3.2:latest" # or your preferred model
[providers.ollama]
[providers.ollama_local]
enabled = true
provider_type = "ollama"
base_url = "http://localhost:11434"
[providers.ollama-cloud]
provider_type = "ollama-cloud"
[providers.ollama_cloud]
enabled = true # set to false if you do not use the hosted API
provider_type = "ollama_cloud"
base_url = "https://ollama.com"
api_key = "$OLLAMA_API_KEY" # Optional: for Ollama Cloud
api_key_env = "OLLAMA_CLOUD_API_KEY"
```
#### Step 3: Understanding MCP Server Configuration
@@ -156,7 +158,7 @@ After updating your config:
**Solution**:
- Verify model availability on https://ollama.com/models
- Remove the `-cloud` suffix from model names when using cloud provider
- Ensure `api_key` is set in `[providers.ollama-cloud]` config
- Ensure `api_key`/`api_key_env` is set in `[providers.ollama_cloud]` config
### 0.1.9 Explicit Ollama Modes & Cloud Endpoint Storage

View File

@@ -127,7 +127,7 @@ Create or edit `~/.config/owlen/config.toml`:
```toml
[general]
default_provider = "ollama"
default_provider = "ollama_local"
default_model = "llama3.2:latest"
[modes.chat]

View File

@@ -2,24 +2,22 @@
This guide explains how to implement a new provider for Owlen. Providers are the components that connect to different LLM APIs.
## The `Provider` Trait
## The `ModelProvider` Trait
The core of the provider system is the `Provider` trait, located in `owlen-core`. Any new provider must implement this trait.
The core of the provider system is the `ModelProvider` trait, located in `owlen-core::provider`. Any new provider must implement this async trait so it can be managed by `ProviderManager`.
Here is a simplified version of the trait:
```rust
use async_trait::async_trait;
use owlen_core::model::Model;
use owlen_core::session::Session;
use owlen_core::provider::{GenerateChunk, GenerateRequest, GenerateStream, ModelInfo, ProviderMetadata, ProviderStatus};
#[async_trait]
pub trait Provider {
/// Returns the name of the provider.
fn name(&self) -> &str;
/// Sends the session to the provider and returns the response.
async fn chat(&self, session: &Session, model: &Model) -> Result<String, anyhow::Error>;
pub trait ModelProvider: Send + Sync {
fn metadata(&self) -> &ProviderMetadata;
async fn health_check(&self) -> owlen_core::Result<ProviderStatus>;
async fn list_models(&self) -> owlen_core::Result<Vec<ModelInfo>>;
async fn generate_stream(&self, request: GenerateRequest) -> owlen_core::Result<GenerateStream>;
}
```
@@ -35,41 +33,66 @@ In your new crate's `lib.rs`, you will define a struct for your provider and imp
```rust
use async_trait::async_trait;
use owlen_core::model::Model;
use owlen_core::Provider;
use owlen_core::session::Session;
use owlen_core::provider::{
GenerateRequest, GenerateStream, ModelInfo, ModelProvider, ProviderMetadata,
ProviderStatus, ProviderType,
};
pub struct MyProvider;
pub struct MyProvider {
metadata: ProviderMetadata,
client: MyHttpClient,
}
impl MyProvider {
pub fn new(config: &MyConfig) -> owlen_core::Result<Self> {
let metadata = ProviderMetadata::new(
"my_provider",
"My Provider",
ProviderType::Cloud,
true,
);
Ok(Self {
metadata,
client: MyHttpClient::new(config)?,
})
}
}
#[async_trait]
impl Provider for MyProvider {
fn name(&self) -> &str {
"my-provider"
impl ModelProvider for MyProvider {
fn metadata(&self) -> &ProviderMetadata {
&self.metadata
}
async fn chat(&self, session: &Session, model: &Model) -> Result<String, anyhow::Error> {
// 1. Get the conversation history from the session.
let history = session.get_messages();
async fn health_check(&self) -> owlen_core::Result<ProviderStatus> {
self.client.ping().await.map(|_| ProviderStatus::Available)
}
// 2. Format the request for your provider's API.
// This might involve creating a JSON body with the messages.
async fn list_models(&self) -> owlen_core::Result<Vec<ModelInfo>> {
self.client.list_models().await
}
// 3. Send the request to the API using a client like reqwest.
// 4. Parse the response from the API.
// 5. Return the content of the response as a String.
Ok("Hello from my provider!".to_string())
async fn generate_stream(&self, request: GenerateRequest) -> owlen_core::Result<GenerateStream> {
self.client.generate(request).await
}
}
```
## Integrating with Owlen
Once your provider is implemented, you will need to integrate it into the main Owlen application.
Once your provider is implemented, you will need to register it with the `ProviderManager` and surface it to users.
1. **Add your provider crate** as a dependency to `owlen-cli`.
2. **In `owlen-cli`, modify the provider registration** to include your new provider. This will likely involve adding it to a list of available providers that the user can select from in the configuration.
1. **Add your provider crate** as a dependency to the component that will host it (an MCP server or `owlen-cli`).
2. **Register the provider** with `ProviderManager` during startup:
This guide provides a basic outline. For more detailed examples, you can look at the existing provider implementations, such as `owlen-ollama`.
```rust
let manager = ProviderManager::new(config);
manager.register_provider(Arc::new(MyProvider::new(config)?)).await;
```
3. **Update configuration docs/examples** so the provider has a `[providers.my_provider]` entry.
4. **Expose via MCP (optional)** if the provider should run out-of-process. Owlens TUI talks to providers exclusively via MCP after Phase 10.
5. **Add tests** similar to `crates/owlen-providers/tests/integration_test.rs` that exercise registration, model aggregation, generation routing, and health transitions.
For concrete examples, see the Ollama providers in `crates/owlen-providers/` and the integration tests added in commit 13.

70
docs/repo-map.md Normal file
View File

@@ -0,0 +1,70 @@
# Repo Map
> Generated by `scripts/gen-repo-map.sh`. Regenerate whenever the workspace layout changes.
```text
.
├── crates
│ ├── mcp
│ │ ├── client
│ │ ├── code-server
│ │ ├── llm-server
│ │ ├── prompt-server
│ │ └── server
│ ├── owlen-cli
│ │ ├── src
│ │ ├── tests
│ │ ├── Cargo.toml
│ │ └── README.md
│ ├── owlen-core
│ │ ├── examples
│ │ ├── migrations
│ │ ├── src
│ │ ├── tests
│ │ ├── Cargo.toml
│ │ └── README.md
│ ├── owlen-markdown
│ │ ├── src
│ │ └── Cargo.toml
│ ├── owlen-providers
│ │ ├── src
│ │ ├── tests
│ │ └── Cargo.toml
│ ├── owlen-tui
│ │ ├── src
│ │ ├── tests
│ │ ├── Cargo.toml
│ │ └── README.md
│ └── providers
│ └── experimental
├── docs
│ ├── migrations
│ ├── CHANGELOG_v1.0.md
│ ├── adding-providers.md
│ ├── architecture.md
│ ├── configuration.md
│ ├── faq.md
│ ├── migration-guide.md
│ ├── phase5-mode-system.md
│ ├── platform-support.md
│ ├── provider-implementation.md
│ ├── testing.md
│ └── troubleshooting.md
├── examples
├── scripts
│ ├── check-windows.sh
│ └── gen-repo-map.sh
├── AGENTS.md
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Cargo.lock
├── Cargo.toml
├── LICENSE
├── PKGBUILD
├── README.md
├── SECURITY.md
└── config.toml
29 directories, 32 files
```

View File

@@ -31,6 +31,7 @@ Owlen now queries both the local daemon and Ollama Cloud and shows them side-by-
4. **Keep the base URL local.** The cloud setup command no longer overrides `providers.ollama.base_url` unless `--force-cloud-base-url` is passed. If you changed it manually, edit `config.toml` or run `owlen config doctor` to restore the default `http://localhost:11434` value.
Once the daemon responds again, the picker will automatically merge the updated local list with the cloud catalogue.
Owlen runs a background health worker every 30 seconds; once the daemon responds it will update the picker automatically without needing a restart.
## Terminal Compatibility Issues

109
docs/tui-mvu-migration.md Normal file
View File

@@ -0,0 +1,109 @@
# TUI MVU Migration Guide
This guide explains how we are migrating the Owlen terminal UI to a predictable **ModelViewUpdate (MVU)** architecture. Use it to understand the current layout, decide where new logic belongs, and track which features have already moved to the MVU core.
---
## Goals
- Make UI state transitions pure and testable.
- Reduce duplicated control flow inside `chat_app.rs`.
- Keep rendering functions dumb; they should depend on read-only view models.
- Ensure new features land in MVU-first form so the imperative paths shrink over time.
Adopt the checklist below whenever you touch a feature that still lives in the imperative code path.
---
## Module Map (owlen-tui)
| Area | Path | Responsibility | MVU Status |
| --- | --- | --- | --- |
| Core state | `src/app/mvu.rs` | Shared `AppModel`, `AppEvent`, `AppEffect` definitions | **Ready** composer + consent events implemented |
| Legacy app | `src/chat_app.rs` | Orchestrates IO, manages pending tasks, renders via ratatui | **Transitioning** increasingly delegates to MVU |
| Event loop | `src/app/handler.rs` | Converts session messages into app updates | Needs cleanup once message flow is MVU aware |
| Rendering | `src/ui.rs` + `src/widgets/*` | Pure rendering helpers that pull data from `ChatApp` | Already read-only; keep that invariant |
| Commands | `src/commands/*` | Keymap and palette command registry | Candidate for MVU once palette state migrates |
| Shared state | `src/state/*` | Small state helpers (command palette, file tree, etc.) | Each module can become an MVU sub-model |
Use the table to find the right starting point before adding new events.
---
## Event Taxonomy
Current events live in `app/mvu.rs`.
- `AppEvent::Composer` covers draft changes, mode switches, submissions.
- `AppEvent::ToolPermission` bridges consent dialog choices back to the controller.
`AppEffect` represents side effects the imperative shell must execute:
- `SetStatus` surface validation failures.
- `RequestSubmit` hand control back to the async send pipeline.
- `ResolveToolConsent` notify the session controller of user decisions.
### Adding a new feature
1. Extend `AppModel` with the new view state.
2. Create a dedicated event enum (e.g. `PaletteEvent`) and nest it under `AppEvent`.
3. Add pure update logic that mutates the model and returns zero or more effects.
4. Handle emitted effects inside `ChatApp::handle_app_effects`.
Keep the event names UI-centric. Provider-side actions should remain in `owlen-core`.
---
## Feature Migration Checklist
| Feature | Scope | MVU tasks | Status |
| --- | --- | --- | --- |
| Composer (input buffer) | Draft text, submission workflow | ✅ `ComposerModel`, `ComposerEvent`, `SubmissionOutcome` | ✅ Complete |
| Tool consent dialog | Approval / denial flow | ✅ `AppEvent::ToolPermission`, `AppEffect::ResolveToolConsent` | ✅ Complete |
| Chat timeline | Message ordering, cursor, scrollback | Model struct for timeline + events for history updates | ☐ TODO |
| Thinking pane | Agent reasoning text, auto-scroll | Model + event to toggle visibility and append lines | ☐ TODO |
| Model picker | Filters, search, selection | Convert `ModelSelectorItem` list + search metadata into MVU | ☐ TODO |
| Command palette | Suggestions, history, apply actions | Move palette state into `AppModel` and surface events | ☐ TODO |
| File workspace | Pane layout, file tree focus | Represent pane tree in MVU, drive focus + resize events | ☐ TODO |
| Toasts & status bar | Transient notifications | Consider MVU-managed queue with explicit events | ☐ TODO |
When you pick up one of the TODO rows, document the plan in the PR description and link back to this table.
---
## Migration Playbook
1. **Inventory state** list every field in `ChatApp` that your feature touches.
2. **Define view model** move the persistent state into `AppModel` (or a new sub-struct).
3. **Write events** describe all user intents and background updates as `AppEvent` variants.
4. **Translate side effects** whenever the update logic needs to call into async code, emit an `AppEffect`. Handle it inside `handle_app_effects`.
5. **Refactor call sites** replace direct mutations with `apply_app_event` calls.
6. **Write tests** cover the pure update function with table-driven unit tests.
7. **Remove duplicates** once the MVU path handles everything, delete the legacy branch in `chat_app.rs`.
This flow keeps commits reviewable and avoids breaking the live UI during migration.
---
## Testing Guidance
- **Unit tests** cover the pure update functions inside `app/mvu.rs`.
- **Integration tests** add scenarios to `crates/owlen-tui/tests/agent_flow_ui.rs` when side effects change.
- **Golden behaviour** ensure the ratatui renderers still consume read-only data; add lightweight snapshot tests if needed.
- **Manual verification** run `cargo run -p owlen-cli -- --help` to open the TUI and confirm the migrated feature behaves as expected.
Every new MVU feature should land with unit tests plus a note about manual validation.
---
## Tracking TODOs
- Keep this file up to date when you migrate a feature.
- Add inline `// TODO(mvu)` tags in code with a short description so they are easy to grep.
- Use the `docs/` folder for design notes; avoid long comment blocks inside the code.
Future contributors should be able to glance at this document, see what is done, and understand where to continue the migration.
---
Questions? Reach out in the Owlen discussion board or drop a note in the relevant PR thread. Consistent updates here will keep MVU adoption predictable for everyone.

31
scripts/gen-repo-map.sh Executable file
View File

@@ -0,0 +1,31 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
OUTPUT_PATH="${1:-${REPO_ROOT}/docs/repo-map.md}"
if ! command -v tree >/dev/null 2>&1; then
echo "error: the 'tree' command is required to regenerate the repo map. Install it (e.g., 'sudo pacman -S tree') and re-run this script." >&2
exit 1
fi
EXCLUDES='target|\\.git|\\.github|node_modules|dist|images|themes|dev|\\.venv'
TMP_FILE="$(mktemp)"
trap 'rm -f "${TMP_FILE}"' EXIT
pushd "${REPO_ROOT}" >/dev/null
tree -a -L 2 --dirsfirst --prune -I "${EXCLUDES}" > "${TMP_FILE}"
popd >/dev/null
{
printf '# Repo Map\n\n'
printf '> Generated by `scripts/gen-repo-map.sh`. Regenerate when the layout changes.\n\n'
printf '```text\n'
cat "${TMP_FILE}"
printf '```\n'
} > "${OUTPUT_PATH}"
echo "Repo map written to ${OUTPUT_PATH}"

57
scripts/release-notes.sh Executable file
View File

@@ -0,0 +1,57 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
CHANGELOG="${REPO_ROOT}/CHANGELOG.md"
TAG="${1:-}"
OUTPUT="${2:-}"
if [[ -z "${TAG}" ]]; then
echo "usage: $0 <tag> [output-file]" >&2
exit 1
fi
TAG="${TAG#v}"
TAG="${TAG#V}"
if [[ ! -f "${CHANGELOG}" ]]; then
echo "error: CHANGELOG.md not found at ${CHANGELOG}" >&2
exit 1
fi
NOTES=$(TAG="${TAG}" CHANGELOG_PATH="${CHANGELOG}" python - <<'PY'
import os
import re
import sys
from pathlib import Path
changelog_path = Path(os.environ['CHANGELOG_PATH'])
tag = os.environ['TAG']
text = changelog_path.read_text(encoding='utf-8')
pattern = re.compile(rf'^## \[{re.escape(tag)}\]\s*(?:-.*)?$', re.MULTILINE)
match = pattern.search(text)
if not match:
sys.stderr.write(f"No changelog section found for tag {tag}.\n")
sys.exit(1)
start = match.end()
rest = text[start:]
next_heading = re.search(r'^## \[', rest, re.MULTILINE)
section = rest[:next_heading.start()] if next_heading else rest
lines = [line.rstrip() for line in section.strip().splitlines()]
print('\n'.join(lines))
PY
)
if [[ -z "${NOTES}" ]]; then
echo "error: no content generated for tag ${TAG}" >&2
exit 1
fi
if [[ -n "${OUTPUT}" ]]; then
printf '%s\n' "${NOTES}" > "${OUTPUT}"
else
printf '%s\n' "${NOTES}"
fi

9
xtask/Cargo.toml Normal file
View File

@@ -0,0 +1,9 @@
[package]
name = "xtask"
version = "0.1.0"
edition.workspace = true
publish = false
[dependencies]
anyhow = { workspace = true }
clap = { workspace = true, features = ["derive"] }

162
xtask/src/main.rs Normal file
View File

@@ -0,0 +1,162 @@
use std::path::{Path, PathBuf};
use std::process::Command;
use anyhow::{Context, Result, bail};
use clap::{Parser, Subcommand};
#[derive(Parser)]
#[command(author, version, about = "Owlen developer tasks", long_about = None)]
struct Xtask {
#[command(subcommand)]
command: Task,
}
#[derive(Subcommand)]
enum Task {
/// Format the workspace (use --check to verify without writing).
Fmt {
#[arg(long, help = "Run rustfmt in check mode")]
check: bool,
},
/// Run clippy with all warnings elevated to errors.
Lint,
/// Execute the full workspace test suite.
Test,
/// Run coverage via cargo-llvm-cov (requires the tool to be installed).
Coverage,
/// Launch the default Owlen CLI binary (owlen) with optional args.
DevRun {
#[arg(last = true, help = "Arguments forwarded to `owlen`")]
args: Vec<String>,
},
/// Composite release validation (fmt --check, clippy, test).
ReleaseCheck,
/// Regenerate docs/repo-map.md (accepts optional output path).
GenRepoMap {
#[arg(long, value_name = "PATH", help = "Override the repo map output path")]
output: Option<PathBuf>,
},
}
fn main() -> Result<()> {
let cli = Xtask::parse();
match cli.command {
Task::Fmt { check } => fmt(check),
Task::Lint => lint(),
Task::Test => test(),
Task::Coverage => coverage(),
Task::DevRun { args } => dev_run(args),
Task::ReleaseCheck => release_check(),
Task::GenRepoMap { output } => gen_repo_map(output),
}
}
fn fmt(check: bool) -> Result<()> {
let mut args = vec!["fmt".to_string(), "--all".to_string()];
if check {
args.push("--".to_string());
args.push("--check".to_string());
}
run_cargo(args)
}
fn lint() -> Result<()> {
run_cargo(vec![
"clippy".into(),
"--workspace".into(),
"--all-features".into(),
"--".into(),
"-D".into(),
"warnings".into(),
])
}
fn test() -> Result<()> {
run_cargo(vec![
"test".into(),
"--workspace".into(),
"--all-features".into(),
])
}
fn coverage() -> Result<()> {
run_cargo(vec![
"llvm-cov".into(),
"--workspace".into(),
"--all-features".into(),
"--summary-only".into(),
])
.with_context(|| "install `cargo llvm-cov` to use the coverage task".to_string())
}
fn dev_run(args: Vec<String>) -> Result<()> {
let mut command_args = vec![
"run".into(),
"-p".into(),
"owlen-cli".into(),
"--bin".into(),
"owlen".into(),
];
if !args.is_empty() {
command_args.push("--".into());
command_args.extend(args);
}
run_cargo(command_args)
}
fn release_check() -> Result<()> {
fmt(true)?;
lint()?;
test()?;
Ok(())
}
fn gen_repo_map(output: Option<PathBuf>) -> Result<()> {
let script = workspace_root().join("scripts/gen-repo-map.sh");
if !script.exists() {
bail!("repo map script not found at {}", script.display());
}
let mut cmd = Command::new(&script);
cmd.current_dir(workspace_root());
if let Some(path) = output {
cmd.arg(path);
}
let status = cmd
.status()
.with_context(|| format!("failed to run {}", script.display()))?;
if !status.success() {
bail!(
"{} exited with status {}",
script.display(),
status.code().unwrap_or_default()
);
}
Ok(())
}
fn run_cargo(args: Vec<String>) -> Result<()> {
let mut cmd = Command::new("cargo");
cmd.current_dir(workspace_root());
cmd.args(&args);
let status = cmd
.status()
.with_context(|| format!("failed to run cargo {}", args.join(" ")))?;
if !status.success() {
bail!(
"`cargo {}` exited with status {}",
args.join(" "),
status.code().unwrap_or_default()
);
}
Ok(())
}
fn workspace_root() -> PathBuf {
Path::new(env!("CARGO_MANIFEST_DIR"))
.parent()
.expect("xtask has a parent directory")
.to_path_buf()
}