commit 5bc0e02cd3bbffa1a2a3b661fc0f04ecfdb8ffda Author: vikingowl Date: Sat Sep 27 05:41:46 2025 +0200 Add `App` core struct with event-handling and initialization logic for TUI. diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..da50599 --- /dev/null +++ b/.gitignore @@ -0,0 +1,102 @@ +### Rust template +# Generated by Cargo +# will have compiled files and executables +debug/ +target/ +dev/ + +# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries +# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html +Cargo.lock + +# These are backup files generated by rustfmt +**/*.rs.bk + +# MSVC Windows builds of rustc generate these, which store debugging information +*.pdb + +# RustRover +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ +### JetBrains template +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# AWS User-specific +.idea/**/aws.xml + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# SonarLint plugin +.idea/sonarlint/ + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..20cff15 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,51 @@ +[workspace] +resolver = "2" +members = [ + "crates/owlen-core", + "crates/owlen-tui", + "crates/owlen-cli", + "crates/owlen-ollama", +] +exclude = [] + +[workspace.dependencies] +# Async runtime and utilities +tokio = { version = "1.0", features = ["full"] } +tokio-stream = "0.1" +tokio-util = { version = "0.7", features = ["rt"] } +futures = "0.3" +futures-util = "0.3" + +# TUI framework +ratatui = "0.28" +crossterm = "0.28" + +# HTTP client and JSON handling +reqwest = { version = "0.12", features = ["json", "stream"] } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" + +# Utilities +uuid = { version = "1.0", features = ["v4", "serde"] } +anyhow = "1.0" +thiserror = "1.0" + +# Configuration +toml = "0.8" +shellexpand = "3.1" + +# Database +sled = "0.34" + +# For better text handling +textwrap = "0.16" + +# Async traits +async-trait = "0.1" + +# CLI framework +clap = { version = "4.0", features = ["derive"] } + +# Dev dependencies +tempfile = "3.8" +tokio-test = "0.4" \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..8420e0e --- /dev/null +++ b/README.md @@ -0,0 +1,242 @@ +# OWLEN + +A terminal user interface (TUI) for interacting with Ollama models, similar to `claude code` or `gemini-cli` but using Ollama as the backend. + +## Features + +๐Ÿค– **AI Chat Interface**: Interactive conversations with Ollama models +๐Ÿ”„ **Real-time Streaming**: See responses as they're generated +๐Ÿ“ **Multi-model Support**: Switch between different Ollama models +โŒจ๏ธ **Vim-inspired Keys**: Intuitive keyboard navigation +๐ŸŽจ **Rich UI**: Clean, modern terminal interface with syntax highlighting +๐Ÿ“œ **Conversation History**: Keep track of your chat history +๐Ÿš€ **Fast & Lightweight**: Built in Rust for performance + +## Prerequisites + +- [Ollama](https://ollama.ai/) installed and running +- Rust 1.70+ (for building from source) + +## Installation + +### Build from Source + +```bash +git clone https://github.com/yourusername/owlen +cd owlen +cargo build --release +``` + +This will build two executables: `owlen` (for general chat) and `owlen-code` (for code-focused interactions). + +## Quick Start + +1. **Start Ollama**: Make sure Ollama is running on your system: + ```bash + ollama serve + ``` + +2. **Pull a Model**: Download a model to chat with: + ```bash + ollama pull llama3.2 + ``` + +3. **Run OWLEN (General Chat)**: + ```bash + ./target/release/owlen + # Or using cargo: + cargo run + ``` + +4. **Run OWLEN (Code Mode)**: + ```bash + ./target/release/owlen-code + # Or using cargo: + cargo run --bin owlen-code + ``` + +## Usage (General Chat Mode) + +### Key Bindings + +#### Normal Mode (Default) +- `i` - Enter input mode to type a message +- `m` - Open model selection menu +- `c` - Clear current conversation +- `r` - Refresh available models list +- `j`/`k` - Scroll up/down in chat history +- `โ†‘`/`โ†“` - Scroll up/down in chat history +- `q` - Quit application + +#### Input Mode +- `Enter` - Send message +- `Esc` - Cancel input and return to normal mode +- `โ†`/`โ†’` - Move cursor left/right +- `Backspace` - Delete character + +#### Model Selection Mode +- `โ†‘`/`โ†“` - Navigate model list +- `Enter` - Select model +- `Esc` - Cancel selection + +### Interface Layout + +``` +โ”Œโ”€ OWLEN โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” +โ”‚ ๐Ÿฆ‰ OWLEN - AI Assistant โ”‚ +โ”‚ Model: llama3.2:latest โ”‚ +โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค +โ”‚ โ”‚ +โ”‚ ๐Ÿ‘ค You: โ”‚ +โ”‚ Hello! Can you help me with Rust? โ”‚ +โ”‚ โ”‚ +โ”‚ ๐Ÿค– Assistant: โ”‚ +โ”‚ Of course! I'd be happy to help โ”‚ +โ”‚ you with Rust programming... โ”‚ +โ”‚ โ”‚ +โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค +โ”‚ Input (Press 'i' to start typing) โ”‚ +โ”‚ โ”‚ +โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค +โ”‚ NORMAL | Ready โ”‚ +โ”‚ Help: i:Input m:Model c:Clear q:Quit โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +## Code Mode (`owlen-code`) + +The `owlen-code` binary provides a specialized interface for interacting with LLMs for code-related tasks. It is designed to be used in conjunction with a code editor or IDE, allowing you to quickly get assistance with debugging, code generation, refactoring, and more. + +### Key Bindings + +(Assuming similar key bindings to general chat mode for now. Further details can be added if `owlen-code` has distinct keybindings or features.) + +- `i` - Enter input mode to type a message +- `m` - Open model selection menu +- `c` - Clear current conversation +- `r` - Refresh available models list +- `j`/`k` - Scroll up/down in chat history +- `โ†‘`/`โ†“` - Scroll up/down in chat history +- `q` - Quit application + +#### Input Mode +- `Enter` - Send message +- `Esc` - Cancel input and return to normal mode +- `โ†`/`โ†’` - Move cursor left/right +- `Backspace` - Delete character + +#### Model Selection Mode +- `โ†‘`/`โ†“` - Navigate model list +- `Enter` - Select model +- `Esc` - Cancel selection + +## Configuration + +The application connects to Ollama on `localhost:11434` by default. You can modify this in the source code if needed. + +## Use Cases + +### Code Assistant +Perfect for getting help with programming tasks: + +- Debugging code issues +- Learning new programming concepts +- Code reviews and suggestions +- Architecture discussions + +### General AI Chat +Use it as a general-purpose AI assistant: + +- Writing assistance +- Research questions +- Creative projects +- Learning new topics + +## Architecture + +The application is built with a modular architecture, composed of several crates: + +- **owlen-core**: Provides core traits and types for the LLM client, acting as the foundation. +- **owlen-ollama**: Implements the Ollama API client with streaming support, handling communication with Ollama models. +- **owlen-tui**: Manages the Terminal User Interface rendering and interactions using `ratatui`. +- **owlen-cli**: The command-line interface, which orchestrates the `owlen-tui` and `owlen-ollama` crates to provide the main `owlen` and `owlen-code` binaries. + +- **main.rs** - Application entry point and terminal setup +- **app.rs** - Core application state and event handling +- **ollama.rs** - Ollama API client with streaming support +- **ui.rs** - Terminal UI rendering with ratatui +- **events.rs** - Terminal event handling and processing + +## Contributing + +1. Fork the repository +2. Create a feature branch +3. Make your changes +4. Add tests if applicable +5. Submit a pull request + +## License + +This project is licensed under the MIT License - see the LICENSE file for details. + +## Acknowledgments + +- [Ollama](https://ollama.ai/) - Local LLM inference +- [Ratatui](https://github.com/ratatui/ratatui) - Rust TUI library +- [Claude Code](https://claude.ai/code) - Inspiration for the interface +- [Gemini CLI](https://github.com/google-gemini/gemini-cli) - CLI patterns + +## Troubleshooting + +### Ollama Not Found +``` +Error: Failed to connect to Ollama +``` + +**Solution**: Make sure Ollama is installed and running: +```bash +# Install Ollama +curl -fsSL https://ollama.ai/install.sh | sh + +# Start Ollama +ollama serve + +# Pull a model +ollama pull llama3.2 +``` + +### No Models Available +``` +No models available +``` + +**Solution**: Pull at least one model: +```bash +ollama pull llama3.2 +# or +ollama pull codellama +# or +ollama pull mistral +``` + +### Connection Refused +``` +Connection refused (os error 61) +``` + +**Solution**: Check if Ollama is running on the correct port: +```bash +# Default port is 11434 +curl http://localhost:11434/api/tags +``` + +## Roadmap + +- [ ] Configuration file support +- [ ] Custom Ollama host configuration +- [ ] Session persistence +- [ ] Export conversations +- [ ] Syntax highlighting for code blocks +- [ ] Plugin system for custom commands +- [ ] Multiple conversation tabs +- [ ] Search within conversations diff --git a/crates/owlen-cli/Cargo.toml b/crates/owlen-cli/Cargo.toml new file mode 100644 index 0000000..9d98326 --- /dev/null +++ b/crates/owlen-cli/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "owlen-cli" +version = "0.1.0" +edition = "2021" +description = "Command-line interface for OWLEN LLM client" + +[[bin]] +name = "owlen" +path = "src/main.rs" + +[[bin]] +name = "owlen-code" +path = "src/code_main.rs" + +[dependencies] +owlen-core = { path = "../owlen-core" } +owlen-tui = { path = "../owlen-tui" } +owlen-ollama = { path = "../owlen-ollama" } + +# CLI framework +clap = { version = "4.0", features = ["derive"] } + +# Async runtime +tokio = { workspace = true } +tokio-util = { workspace = true } + +# TUI framework +ratatui = { workspace = true } +crossterm = { workspace = true } + +# Utilities +anyhow = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } \ No newline at end of file diff --git a/crates/owlen-cli/src/code_main.rs b/crates/owlen-cli/src/code_main.rs new file mode 100644 index 0000000..7f0f8ed --- /dev/null +++ b/crates/owlen-cli/src/code_main.rs @@ -0,0 +1,103 @@ +//! OWLEN Code Mode - TUI client optimized for coding assistance + +use anyhow::Result; +use clap::{Arg, Command}; +use owlen_core::session::SessionController; +use owlen_ollama::OllamaProvider; +use owlen_tui::{config, ui, AppState, CodeApp, Event, EventHandler, SessionEvent}; +use std::io; +use std::sync::Arc; +use tokio::sync::mpsc; +use tokio_util::sync::CancellationToken; + +use crossterm::{ + event::{DisableMouseCapture, EnableMouseCapture}, + execute, + terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen}, +}; +use ratatui::{backend::CrosstermBackend, Terminal}; + +#[tokio::main] +async fn main() -> Result<()> { + let matches = Command::new("owlen-code") + .about("OWLEN Code Mode - TUI optimized for programming assistance") + .version("0.2.0") + .arg( + Arg::new("model") + .short('m') + .long("model") + .value_name("MODEL") + .help("Preferred model to use for this session"), + ) + .get_matches(); + + let mut config = config::try_load_config().unwrap_or_default(); + + if let Some(model) = matches.get_one::("model") { + config.general.default_model = Some(model.clone()); + } + + let provider_cfg = config::ensure_ollama_config(&mut config).clone(); + let provider = Arc::new(OllamaProvider::from_config( + &provider_cfg, + Some(&config.general), + )?); + + let controller = SessionController::new(provider, config.clone()); + let (mut app, mut session_rx) = CodeApp::new(controller); + app.inner_mut().initialize_models().await?; + + let cancellation_token = CancellationToken::new(); + let (event_tx, event_rx) = mpsc::unbounded_channel(); + let event_handler = EventHandler::new(event_tx, cancellation_token.clone()); + let event_handle = tokio::spawn(async move { event_handler.run().await }); + + enable_raw_mode()?; + let mut stdout = io::stdout(); + execute!(stdout, EnterAlternateScreen, EnableMouseCapture)?; + let backend = CrosstermBackend::new(stdout); + let mut terminal = Terminal::new(backend)?; + + let result = run_app(&mut terminal, &mut app, event_rx, &mut session_rx).await; + + cancellation_token.cancel(); + event_handle.await?; + + config::save_config(app.inner().config())?; + + disable_raw_mode()?; + execute!( + terminal.backend_mut(), + LeaveAlternateScreen, + DisableMouseCapture + )?; + terminal.show_cursor()?; + + if let Err(err) = result { + println!("{err:?}"); + } + + Ok(()) +} + +async fn run_app( + terminal: &mut Terminal>, + app: &mut CodeApp, + mut event_rx: mpsc::UnboundedReceiver, + session_rx: &mut mpsc::UnboundedReceiver, +) -> Result<()> { + loop { + terminal.draw(|f| ui::render_chat(f, app.inner()))?; + + tokio::select! { + Some(event) = event_rx.recv() => { + if let AppState::Quit = app.handle_event(event).await? { + return Ok(()); + } + } + Some(session_event) = session_rx.recv() => { + app.handle_session_event(session_event)?; + } + } + } +} diff --git a/crates/owlen-cli/src/main.rs b/crates/owlen-cli/src/main.rs new file mode 100644 index 0000000..4f21a86 --- /dev/null +++ b/crates/owlen-cli/src/main.rs @@ -0,0 +1,108 @@ +//! OWLEN CLI - Chat TUI client + +use anyhow::Result; +use clap::{Arg, Command}; +use owlen_core::session::SessionController; +use owlen_ollama::OllamaProvider; +use owlen_tui::{config, ui, AppState, ChatApp, Event, EventHandler, SessionEvent}; +use std::io; +use std::sync::Arc; +use tokio::sync::mpsc; +use tokio_util::sync::CancellationToken; + +use crossterm::{ + event::{DisableMouseCapture, EnableMouseCapture}, + execute, + terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen}, +}; +use ratatui::{backend::CrosstermBackend, Terminal}; + +#[tokio::main] +async fn main() -> Result<()> { + let matches = Command::new("owlen") + .about("OWLEN - A chat-focused TUI client for Ollama") + .version("0.2.0") + .arg( + Arg::new("model") + .short('m') + .long("model") + .value_name("MODEL") + .help("Preferred model to use for this session"), + ) + .get_matches(); + + let mut config = config::try_load_config().unwrap_or_default(); + + if let Some(model) = matches.get_one::("model") { + config.general.default_model = Some(model.clone()); + } + + // Prepare provider from configuration + let provider_cfg = config::ensure_ollama_config(&mut config).clone(); + let provider = Arc::new(OllamaProvider::from_config( + &provider_cfg, + Some(&config.general), + )?); + + let controller = SessionController::new(provider, config.clone()); + let (mut app, mut session_rx) = ChatApp::new(controller); + app.initialize_models().await?; + + // Event infrastructure + let cancellation_token = CancellationToken::new(); + let (event_tx, event_rx) = mpsc::unbounded_channel(); + let event_handler = EventHandler::new(event_tx, cancellation_token.clone()); + let event_handle = tokio::spawn(async move { event_handler.run().await }); + + // Terminal setup + enable_raw_mode()?; + let mut stdout = io::stdout(); + execute!(stdout, EnterAlternateScreen, EnableMouseCapture)?; + let backend = CrosstermBackend::new(stdout); + let mut terminal = Terminal::new(backend)?; + + let result = run_app(&mut terminal, &mut app, event_rx, &mut session_rx).await; + + // Shutdown + cancellation_token.cancel(); + event_handle.await?; + + // Persist configuration updates (e.g., selected model) + config::save_config(app.config())?; + + disable_raw_mode()?; + execute!( + terminal.backend_mut(), + LeaveAlternateScreen, + DisableMouseCapture + )?; + terminal.show_cursor()?; + + if let Err(err) = result { + println!("{err:?}"); + } + + Ok(()) +} + +async fn run_app( + terminal: &mut Terminal>, + app: &mut ChatApp, + mut event_rx: mpsc::UnboundedReceiver, + session_rx: &mut mpsc::UnboundedReceiver, +) -> Result<()> { + loop { + terminal.draw(|f| ui::render_chat(f, app))?; + + tokio::select! { + Some(event) = event_rx.recv() => { + if let AppState::Quit = app.handle_event(event).await? { + return Ok(()); + } + } + Some(session_event) = session_rx.recv() => { + app.handle_session_event(session_event)?; + } + } + } +} diff --git a/crates/owlen-core/Cargo.toml b/crates/owlen-core/Cargo.toml new file mode 100644 index 0000000..c63578e --- /dev/null +++ b/crates/owlen-core/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "owlen-core" +version = "0.1.0" +edition = "2021" +description = "Core traits and types for OWLEN LLM client" + +[dependencies] +serde = { workspace = true } +serde_json = { workspace = true } +uuid = { workspace = true } +anyhow = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true } +futures = { workspace = true } +tokio-stream = { workspace = true } +async-trait = "0.1" +textwrap = { workspace = true } +toml = { workspace = true } +shellexpand = { workspace = true } + +[dev-dependencies] +tokio-test = { workspace = true } diff --git a/crates/owlen-core/src/config.rs b/crates/owlen-core/src/config.rs new file mode 100644 index 0000000..9be1689 --- /dev/null +++ b/crates/owlen-core/src/config.rs @@ -0,0 +1,342 @@ +use crate::provider::ProviderConfig; +use crate::Result; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::fs; +use std::path::{Path, PathBuf}; +use std::time::Duration; + +/// Default location for the OWLEN configuration file +pub const DEFAULT_CONFIG_PATH: &str = "~/.config/owlen/config.toml"; + +/// Core configuration shared by all OWLEN clients +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + /// General application settings + pub general: GeneralSettings, + /// Provider specific configuration keyed by provider name + #[serde(default)] + pub providers: HashMap, + /// UI preferences that frontends can opt into + #[serde(default)] + pub ui: UiSettings, + /// Storage related options + #[serde(default)] + pub storage: StorageSettings, + /// Input handling preferences + #[serde(default)] + pub input: InputSettings, +} + +impl Default for Config { + fn default() -> Self { + let mut providers = HashMap::new(); + providers.insert( + "ollama".to_string(), + ProviderConfig { + provider_type: "ollama".to_string(), + base_url: Some("http://localhost:11434".to_string()), + api_key: None, + extra: HashMap::new(), + }, + ); + + Self { + general: GeneralSettings::default(), + providers, + ui: UiSettings::default(), + storage: StorageSettings::default(), + input: InputSettings::default(), + } + } +} + +impl Config { + /// Load configuration from disk, falling back to defaults when missing + pub fn load(path: Option<&Path>) -> Result { + let path = match path { + Some(path) => path.to_path_buf(), + None => default_config_path(), + }; + + if path.exists() { + let content = fs::read_to_string(&path)?; + let mut config: Config = + toml::from_str(&content).map_err(|e| crate::Error::Config(e.to_string()))?; + config.ensure_defaults(); + Ok(config) + } else { + Ok(Config::default()) + } + } + + /// Persist configuration to disk + pub fn save(&self, path: Option<&Path>) -> Result<()> { + let path = match path { + Some(path) => path.to_path_buf(), + None => default_config_path(), + }; + + if let Some(dir) = path.parent() { + fs::create_dir_all(dir)?; + } + + let content = + toml::to_string_pretty(self).map_err(|e| crate::Error::Config(e.to_string()))?; + fs::write(path, content)?; + Ok(()) + } + + /// Get provider configuration by provider name + pub fn provider(&self, name: &str) -> Option<&ProviderConfig> { + self.providers.get(name) + } + + /// Update or insert a provider configuration + pub fn upsert_provider(&mut self, name: impl Into, config: ProviderConfig) { + self.providers.insert(name.into(), config); + } + + /// Resolve default model in order of priority: explicit default, first cached model, provider fallback + pub fn resolve_default_model<'a>( + &'a self, + models: &'a [crate::types::ModelInfo], + ) -> Option<&'a str> { + if let Some(model) = self.general.default_model.as_deref() { + if models.iter().any(|m| m.id == model || m.name == model) { + return Some(model); + } + } + + if let Some(first) = models.first() { + return Some(&first.id); + } + + self.general.default_model.as_deref() + } + + fn ensure_defaults(&mut self) { + if self.general.default_provider.is_empty() { + self.general.default_provider = "ollama".to_string(); + } + + if !self.providers.contains_key("ollama") { + self.providers.insert( + "ollama".to_string(), + ProviderConfig { + provider_type: "ollama".to_string(), + base_url: Some("http://localhost:11434".to_string()), + api_key: None, + extra: HashMap::new(), + }, + ); + } + } +} + +/// Default configuration path with user home expansion +pub fn default_config_path() -> PathBuf { + PathBuf::from(shellexpand::tilde(DEFAULT_CONFIG_PATH).as_ref()) +} + +/// General behaviour settings shared across clients +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct GeneralSettings { + /// Default provider name for routing + pub default_provider: String, + /// Optional default model id + #[serde(default)] + pub default_model: Option, + /// Whether streaming responses are preferred + #[serde(default = "GeneralSettings::default_streaming")] + pub enable_streaming: bool, + /// Optional path to a project context file automatically injected as system prompt + #[serde(default)] + pub project_context_file: Option, + /// TTL for cached model listings in seconds + #[serde(default = "GeneralSettings::default_model_cache_ttl")] + pub model_cache_ttl_secs: u64, +} + +impl GeneralSettings { + fn default_streaming() -> bool { + true + } + + fn default_model_cache_ttl() -> u64 { + 60 + } + + /// Duration representation of model cache TTL + pub fn model_cache_ttl(&self) -> Duration { + Duration::from_secs(self.model_cache_ttl_secs.max(5)) + } +} + +impl Default for GeneralSettings { + fn default() -> Self { + Self { + default_provider: "ollama".to_string(), + default_model: Some("llama3.2:latest".to_string()), + enable_streaming: Self::default_streaming(), + project_context_file: Some("OWLEN.md".to_string()), + model_cache_ttl_secs: Self::default_model_cache_ttl(), + } + } +} + +/// UI preferences that consumers can respect as needed +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UiSettings { + #[serde(default = "UiSettings::default_theme")] + pub theme: String, + #[serde(default = "UiSettings::default_word_wrap")] + pub word_wrap: bool, + #[serde(default = "UiSettings::default_max_history_lines")] + pub max_history_lines: usize, + #[serde(default = "UiSettings::default_show_role_labels")] + pub show_role_labels: bool, + #[serde(default = "UiSettings::default_wrap_column")] + pub wrap_column: u16, +} + +impl UiSettings { + fn default_theme() -> String { + "default".to_string() + } + + fn default_word_wrap() -> bool { + true + } + + fn default_max_history_lines() -> usize { + 2000 + } + + fn default_show_role_labels() -> bool { + true + } + + fn default_wrap_column() -> u16 { + 100 + } +} + +impl Default for UiSettings { + fn default() -> Self { + Self { + theme: Self::default_theme(), + word_wrap: Self::default_word_wrap(), + max_history_lines: Self::default_max_history_lines(), + show_role_labels: Self::default_show_role_labels(), + wrap_column: Self::default_wrap_column(), + } + } +} + +/// Storage related preferences +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct StorageSettings { + #[serde(default = "StorageSettings::default_conversation_dir")] + pub conversation_dir: String, + #[serde(default = "StorageSettings::default_auto_save")] + pub auto_save_sessions: bool, + #[serde(default = "StorageSettings::default_max_sessions")] + pub max_saved_sessions: usize, + #[serde(default = "StorageSettings::default_session_timeout")] + pub session_timeout_minutes: u64, +} + +impl StorageSettings { + fn default_conversation_dir() -> String { + "~/.local/share/owlen/conversations".to_string() + } + + fn default_auto_save() -> bool { + true + } + + fn default_max_sessions() -> usize { + 25 + } + + fn default_session_timeout() -> u64 { + 120 + } + + /// Resolve storage directory path + pub fn conversation_path(&self) -> PathBuf { + PathBuf::from(shellexpand::tilde(&self.conversation_dir).as_ref()) + } +} + +impl Default for StorageSettings { + fn default() -> Self { + Self { + conversation_dir: Self::default_conversation_dir(), + auto_save_sessions: Self::default_auto_save(), + max_saved_sessions: Self::default_max_sessions(), + session_timeout_minutes: Self::default_session_timeout(), + } + } +} + +/// Input handling preferences shared across clients +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct InputSettings { + #[serde(default = "InputSettings::default_multiline")] + pub multiline: bool, + #[serde(default = "InputSettings::default_history_size")] + pub history_size: usize, + #[serde(default = "InputSettings::default_tab_width")] + pub tab_width: u8, + #[serde(default = "InputSettings::default_confirm_send")] + pub confirm_send: bool, +} + +impl InputSettings { + fn default_multiline() -> bool { + true + } + + fn default_history_size() -> usize { + 100 + } + + fn default_tab_width() -> u8 { + 4 + } + + fn default_confirm_send() -> bool { + false + } +} + +impl Default for InputSettings { + fn default() -> Self { + Self { + multiline: Self::default_multiline(), + history_size: Self::default_history_size(), + tab_width: Self::default_tab_width(), + confirm_send: Self::default_confirm_send(), + } + } +} + +/// Convenience accessor for an Ollama provider entry, creating a default if missing +pub fn ensure_ollama_config(config: &mut Config) -> &ProviderConfig { + config + .providers + .entry("ollama".to_string()) + .or_insert_with(|| ProviderConfig { + provider_type: "ollama".to_string(), + base_url: Some("http://localhost:11434".to_string()), + api_key: None, + extra: HashMap::new(), + }) +} + +/// Calculate absolute timeout for session data based on configuration +pub fn session_timeout(config: &Config) -> Duration { + Duration::from_secs(config.storage.session_timeout_minutes.max(1) * 60) +} diff --git a/crates/owlen-core/src/conversation.rs b/crates/owlen-core/src/conversation.rs new file mode 100644 index 0000000..d69664a --- /dev/null +++ b/crates/owlen-core/src/conversation.rs @@ -0,0 +1,289 @@ +use crate::types::{Conversation, Message}; +use crate::Result; +use serde_json::{Number, Value}; +use std::collections::{HashMap, VecDeque}; +use std::time::{Duration, Instant}; +use uuid::Uuid; + +const STREAMING_FLAG: &str = "streaming"; +const LAST_CHUNK_TS: &str = "last_chunk_ts"; +const PLACEHOLDER_FLAG: &str = "placeholder"; + +/// Manage active and historical conversations, including streaming updates. +pub struct ConversationManager { + active: Conversation, + history: VecDeque, + message_index: HashMap, + streaming: HashMap, + max_history: usize, +} + +#[derive(Debug, Clone)] +pub struct StreamingMetadata { + started: Instant, + last_update: Instant, +} + +impl ConversationManager { + /// Create a new conversation manager with a default model + pub fn new(model: impl Into) -> Self { + Self::with_history_capacity(model, 32) + } + + /// Create with explicit history capacity + pub fn with_history_capacity(model: impl Into, max_history: usize) -> Self { + let conversation = Conversation::new(model.into()); + Self { + active: conversation, + history: VecDeque::new(), + message_index: HashMap::new(), + streaming: HashMap::new(), + max_history: max_history.max(1), + } + } + + /// Access the active conversation + pub fn active(&self) -> &Conversation { + &self.active + } + + /// Mutable access to the active conversation (auto refreshing indexes afterwards) + fn active_mut(&mut self) -> &mut Conversation { + &mut self.active + } + + /// Replace the active conversation with a provided one, archiving the existing conversation if it contains data + pub fn load(&mut self, conversation: Conversation) { + if !self.active.messages.is_empty() { + self.archive_active(); + } + + self.message_index.clear(); + for (idx, message) in conversation.messages.iter().enumerate() { + self.message_index.insert(message.id, idx); + } + + self.stream_reset(); + self.active = conversation; + } + + /// Start a brand new conversation, archiving the previous one + pub fn start_new(&mut self, model: Option, name: Option) { + self.archive_active(); + let model = model.unwrap_or_else(|| self.active.model.clone()); + self.active = Conversation::new(model); + self.active.name = name; + self.message_index.clear(); + self.stream_reset(); + } + + /// Archive the active conversation into history + pub fn archive_active(&mut self) { + if self.active.messages.is_empty() { + return; + } + + let mut archived = self.active.clone(); + archived.updated_at = std::time::SystemTime::now(); + self.history.push_front(archived); + + while self.history.len() > self.max_history { + self.history.pop_back(); + } + } + + /// Get immutable history + pub fn history(&self) -> impl Iterator { + self.history.iter() + } + + /// Add a user message and return its identifier + pub fn push_user_message(&mut self, content: impl Into) -> Uuid { + let message = Message::user(content.into()); + self.register_message(message) + } + + /// Add a system message and return its identifier + pub fn push_system_message(&mut self, content: impl Into) -> Uuid { + let message = Message::system(content.into()); + self.register_message(message) + } + + /// Add an assistant message (non-streaming) and return its identifier + pub fn push_assistant_message(&mut self, content: impl Into) -> Uuid { + let message = Message::assistant(content.into()); + self.register_message(message) + } + + /// Push an arbitrary message into the active conversation + pub fn push_message(&mut self, message: Message) -> Uuid { + self.register_message(message) + } + + /// Start tracking a streaming assistant response, returning the message id to update + pub fn start_streaming_response(&mut self) -> Uuid { + let mut message = Message::assistant(String::new()); + message + .metadata + .insert(STREAMING_FLAG.to_string(), Value::Bool(true)); + let id = message.id; + self.register_message(message); + self.streaming.insert( + id, + StreamingMetadata { + started: Instant::now(), + last_update: Instant::now(), + }, + ); + id + } + + /// Append streaming content to an assistant message + pub fn append_stream_chunk( + &mut self, + message_id: Uuid, + chunk: &str, + is_final: bool, + ) -> Result<()> { + let index = self + .message_index + .get(&message_id) + .copied() + .ok_or_else(|| crate::Error::Unknown(format!("Unknown message id: {message_id}")))?; + + let conversation = self.active_mut(); + if let Some(message) = conversation.messages.get_mut(index) { + let was_placeholder = message + .metadata + .remove(PLACEHOLDER_FLAG) + .and_then(|v| v.as_bool()) + .unwrap_or(false); + + if was_placeholder { + message.content.clear(); + } + + if !chunk.is_empty() { + message.content.push_str(chunk); + } + message.timestamp = std::time::SystemTime::now(); + let millis = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap_or_default() + .as_millis() as u64; + message.metadata.insert( + LAST_CHUNK_TS.to_string(), + Value::Number(Number::from(millis)), + ); + + if is_final { + message + .metadata + .insert(STREAMING_FLAG.to_string(), Value::Bool(false)); + self.streaming.remove(&message_id); + } else if let Some(info) = self.streaming.get_mut(&message_id) { + info.last_update = Instant::now(); + } + } + + Ok(()) + } + + /// Set placeholder text for a streaming message + pub fn set_stream_placeholder( + &mut self, + message_id: Uuid, + text: impl Into, + ) -> Result<()> { + let index = self + .message_index + .get(&message_id) + .copied() + .ok_or_else(|| crate::Error::Unknown(format!("Unknown message id: {message_id}")))?; + + if let Some(message) = self.active_mut().messages.get_mut(index) { + message.content = text.into(); + message.timestamp = std::time::SystemTime::now(); + message + .metadata + .insert(PLACEHOLDER_FLAG.to_string(), Value::Bool(true)); + } + + Ok(()) + } + + /// Update the active model (used when user changes model mid session) + pub fn set_model(&mut self, model: impl Into) { + self.active.model = model.into(); + self.active.updated_at = std::time::SystemTime::now(); + } + + /// Provide read access to the cached streaming metadata + pub fn streaming_metadata(&self, message_id: &Uuid) -> Option { + self.streaming.get(message_id).cloned() + } + + /// Remove inactive streaming messages that have stalled beyond the provided timeout + pub fn expire_stalled_streams(&mut self, idle_timeout: Duration) -> Vec { + let cutoff = Instant::now() - idle_timeout; + let mut expired = Vec::new(); + + self.streaming.retain(|id, meta| { + if meta.last_update < cutoff { + expired.push(*id); + false + } else { + true + } + }); + + expired + } + + /// Clear all state + pub fn clear(&mut self) { + self.active.clear(); + self.history.clear(); + self.message_index.clear(); + self.streaming.clear(); + } + + fn register_message(&mut self, message: Message) -> Uuid { + let id = message.id; + let idx; + { + let conversation = self.active_mut(); + idx = conversation.messages.len(); + conversation.messages.push(message); + conversation.updated_at = std::time::SystemTime::now(); + } + self.message_index.insert(id, idx); + id + } + + fn stream_reset(&mut self) { + self.streaming.clear(); + } +} + +impl StreamingMetadata { + /// Duration since the stream started + pub fn elapsed(&self) -> Duration { + self.started.elapsed() + } + + /// Duration since the last chunk was received + pub fn idle_duration(&self) -> Duration { + self.last_update.elapsed() + } + + /// Timestamp when streaming started + pub fn started_at(&self) -> Instant { + self.started + } + + /// Timestamp of most recent update + pub fn last_update_at(&self) -> Instant { + self.last_update + } +} diff --git a/crates/owlen-core/src/formatting.rs b/crates/owlen-core/src/formatting.rs new file mode 100644 index 0000000..3f85972 --- /dev/null +++ b/crates/owlen-core/src/formatting.rs @@ -0,0 +1,61 @@ +use crate::types::Message; +use textwrap::{wrap, Options}; + +/// Formats messages for display across different clients. +#[derive(Debug, Clone)] +pub struct MessageFormatter { + wrap_width: usize, + show_role_labels: bool, + preserve_empty_lines: bool, +} + +impl MessageFormatter { + /// Create a new formatter + pub fn new(wrap_width: usize, show_role_labels: bool) -> Self { + Self { + wrap_width: wrap_width.max(20), + show_role_labels, + preserve_empty_lines: true, + } + } + + /// Override whether empty lines should be preserved + pub fn with_preserve_empty(mut self, preserve: bool) -> Self { + self.preserve_empty_lines = preserve; + self + } + + /// Render a message to a list of visual lines ready for display + pub fn format_message(&self, message: &Message) -> Vec { + let mut lines = Vec::new(); + + let mut content = message.content.trim_end().to_string(); + if content.is_empty() && self.preserve_empty_lines { + content.push(' '); + } + + let options = Options::new(self.wrap_width) + .break_words(true) + .word_separator(textwrap::WordSeparator::UnicodeBreakProperties); + + let wrapped = wrap(&content, &options); + + if self.show_role_labels { + let label = format!("{}:", message.role.to_string().to_uppercase()); + if let Some(first) = wrapped.first() { + lines.push(format!("{label} {first}")); + for line in wrapped.iter().skip(1) { + lines.push(format!("{:width$} {line}", "", width = label.len())); + } + } else { + lines.push(label); + } + } else { + for line in wrapped { + lines.push(line.into_owned()); + } + } + + lines + } +} diff --git a/crates/owlen-core/src/input.rs b/crates/owlen-core/src/input.rs new file mode 100644 index 0000000..c797aa3 --- /dev/null +++ b/crates/owlen-core/src/input.rs @@ -0,0 +1,217 @@ +use std::collections::VecDeque; + +/// Text input buffer with history and cursor management. +#[derive(Debug, Clone)] +pub struct InputBuffer { + buffer: String, + cursor: usize, + history: VecDeque, + history_index: Option, + max_history: usize, + pub multiline: bool, + tab_width: u8, +} + +impl InputBuffer { + /// Create a new input buffer + pub fn new(max_history: usize, multiline: bool, tab_width: u8) -> Self { + Self { + buffer: String::new(), + cursor: 0, + history: VecDeque::with_capacity(max_history.max(1)), + history_index: None, + max_history: max_history.max(1), + multiline, + tab_width: tab_width.max(1), + } + } + + /// Get current text + pub fn text(&self) -> &str { + &self.buffer + } + + /// Current cursor position + pub fn cursor(&self) -> usize { + self.cursor + } + + /// Replace buffer contents + pub fn set_text(&mut self, text: impl Into) { + self.buffer = text.into(); + self.cursor = self.buffer.len(); + self.history_index = None; + } + + /// Clear buffer and reset cursor + pub fn clear(&mut self) { + self.buffer.clear(); + self.cursor = 0; + self.history_index = None; + } + + /// Insert a character at the cursor position + pub fn insert_char(&mut self, ch: char) { + if ch == '\t' { + self.insert_tab(); + return; + } + + self.buffer.insert(self.cursor, ch); + self.cursor += ch.len_utf8(); + } + + /// Insert text at cursor + pub fn insert_text(&mut self, text: &str) { + self.buffer.insert_str(self.cursor, text); + self.cursor += text.len(); + } + + /// Insert spaces representing a tab + pub fn insert_tab(&mut self) { + let spaces = " ".repeat(self.tab_width as usize); + self.insert_text(&spaces); + } + + /// Remove character before cursor + pub fn backspace(&mut self) { + if self.cursor == 0 { + return; + } + + let prev_index = prev_char_boundary(&self.buffer, self.cursor); + self.buffer.drain(prev_index..self.cursor); + self.cursor = prev_index; + } + + /// Remove character at cursor + pub fn delete(&mut self) { + if self.cursor >= self.buffer.len() { + return; + } + + let next_index = next_char_boundary(&self.buffer, self.cursor); + self.buffer.drain(self.cursor..next_index); + } + + /// Move cursor left by one grapheme + pub fn move_left(&mut self) { + if self.cursor == 0 { + return; + } + self.cursor = prev_char_boundary(&self.buffer, self.cursor); + } + + /// Move cursor right by one grapheme + pub fn move_right(&mut self) { + if self.cursor >= self.buffer.len() { + return; + } + self.cursor = next_char_boundary(&self.buffer, self.cursor); + } + + /// Move cursor to start of the buffer + pub fn move_home(&mut self) { + self.cursor = 0; + } + + /// Move cursor to end of the buffer + pub fn move_end(&mut self) { + self.cursor = self.buffer.len(); + } + + /// Push current buffer into history, clearing the buffer afterwards + pub fn commit_to_history(&mut self) -> String { + let text = std::mem::take(&mut self.buffer); + if !text.trim().is_empty() { + self.push_history_entry(text.clone()); + } + self.cursor = 0; + self.history_index = None; + text + } + + /// Navigate to previous history entry + pub fn history_previous(&mut self) { + if self.history.is_empty() { + return; + } + + let new_index = match self.history_index { + Some(idx) if idx + 1 < self.history.len() => idx + 1, + None => 0, + _ => return, + }; + + self.history_index = Some(new_index); + if let Some(entry) = self.history.get(new_index) { + self.buffer = entry.clone(); + self.cursor = self.buffer.len(); + } + } + + /// Navigate to next history entry + pub fn history_next(&mut self) { + if self.history.is_empty() { + return; + } + + if let Some(idx) = self.history_index { + if idx > 0 { + let new_idx = idx - 1; + self.history_index = Some(new_idx); + if let Some(entry) = self.history.get(new_idx) { + self.buffer = entry.clone(); + self.cursor = self.buffer.len(); + } + } else { + self.history_index = None; + self.buffer.clear(); + self.cursor = 0; + } + } else { + self.buffer.clear(); + self.cursor = 0; + } + } + + /// Push a new entry into the history buffer, enforcing capacity + pub fn push_history_entry(&mut self, entry: String) { + if self + .history + .front() + .map(|existing| existing == &entry) + .unwrap_or(false) + { + return; + } + + self.history.push_front(entry); + while self.history.len() > self.max_history { + self.history.pop_back(); + } + } +} + +fn prev_char_boundary(buffer: &str, cursor: usize) -> usize { + buffer[..cursor] + .char_indices() + .last() + .map(|(idx, _)| idx) + .unwrap_or(0) +} + +fn next_char_boundary(buffer: &str, cursor: usize) -> usize { + if cursor >= buffer.len() { + return buffer.len(); + } + + let slice = &buffer[cursor..]; + let mut iter = slice.char_indices(); + iter.next(); + if let Some((idx, _)) = iter.next() { + cursor + idx + } else { + buffer.len() + } +} diff --git a/crates/owlen-core/src/lib.rs b/crates/owlen-core/src/lib.rs new file mode 100644 index 0000000..309ac61 --- /dev/null +++ b/crates/owlen-core/src/lib.rs @@ -0,0 +1,58 @@ +//! Core traits and types for OWLEN LLM client +//! +//! This crate provides the foundational abstractions for building +//! LLM providers, routers, and MCP (Model Context Protocol) adapters. + +pub mod config; +pub mod conversation; +pub mod formatting; +pub mod input; +pub mod model; +pub mod provider; +pub mod router; +pub mod session; +pub mod types; + +pub use config::*; +pub use conversation::*; +pub use formatting::*; +pub use input::*; +pub use model::*; +pub use provider::*; +pub use router::*; +pub use session::*; +pub use types::*; + +/// Result type used throughout the OWLEN ecosystem +pub type Result = std::result::Result; + +/// Core error types for OWLEN +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("Provider error: {0}")] + Provider(#[from] anyhow::Error), + + #[error("Network error: {0}")] + Network(String), + + #[error("Authentication error: {0}")] + Auth(String), + + #[error("Configuration error: {0}")] + Config(String), + + #[error("I/O error: {0}")] + Io(#[from] std::io::Error), + + #[error("Invalid input: {0}")] + InvalidInput(String), + + #[error("Operation timed out: {0}")] + Timeout(String), + + #[error("Serialization error: {0}")] + Serialization(#[from] serde_json::Error), + + #[error("Unknown error: {0}")] + Unknown(String), +} diff --git a/crates/owlen-core/src/model.rs b/crates/owlen-core/src/model.rs new file mode 100644 index 0000000..0dd1d02 --- /dev/null +++ b/crates/owlen-core/src/model.rs @@ -0,0 +1,84 @@ +use crate::types::ModelInfo; +use crate::Result; +use std::future::Future; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use tokio::sync::RwLock; + +#[derive(Default, Debug)] +struct ModelCache { + models: Vec, + last_refresh: Option, +} + +/// Caches model listings for improved selection performance +#[derive(Clone, Debug)] +pub struct ModelManager { + cache: Arc>, + ttl: Duration, +} + +impl ModelManager { + /// Create a new manager with the desired cache TTL + pub fn new(ttl: Duration) -> Self { + Self { + cache: Arc::new(RwLock::new(ModelCache::default())), + ttl, + } + } + + /// Get cached models, refreshing via the provided fetcher when stale. Returns the up-to-date model list. + pub async fn get_or_refresh( + &self, + force_refresh: bool, + fetcher: F, + ) -> Result> + where + F: FnOnce() -> Fut, + Fut: Future>>, + { + if !force_refresh { + if let Some(models) = self.cached_if_fresh().await { + return Ok(models); + } + } + + let models = fetcher().await?; + let mut cache = self.cache.write().await; + cache.models = models.clone(); + cache.last_refresh = Some(Instant::now()); + Ok(models) + } + + /// Return cached models without refreshing + pub async fn cached(&self) -> Vec { + self.cache.read().await.models.clone() + } + + /// Drop cached models, forcing next call to refresh + pub async fn invalidate(&self) { + let mut cache = self.cache.write().await; + cache.models.clear(); + cache.last_refresh = None; + } + + /// Select a model by id or name from the cache + pub async fn select(&self, identifier: &str) -> Option { + let cache = self.cache.read().await; + cache + .models + .iter() + .find(|m| m.id == identifier || m.name == identifier) + .cloned() + } + + async fn cached_if_fresh(&self) -> Option> { + let cache = self.cache.read().await; + let fresh = matches!(cache.last_refresh, Some(ts) if ts.elapsed() < self.ttl); + if fresh && !cache.models.is_empty() { + Some(cache.models.clone()) + } else { + None + } + } +} diff --git a/crates/owlen-core/src/provider.rs b/crates/owlen-core/src/provider.rs new file mode 100644 index 0000000..ebc5460 --- /dev/null +++ b/crates/owlen-core/src/provider.rs @@ -0,0 +1,105 @@ +//! Provider trait and related types + +use crate::{types::*, Result}; +use futures::Stream; +use std::pin::Pin; +use std::sync::Arc; + +/// A stream of chat responses +pub type ChatStream = Pin> + Send>>; + +/// Trait for LLM providers (Ollama, OpenAI, Anthropic, etc.) +#[async_trait::async_trait] +pub trait Provider: Send + Sync { + /// Get the name of this provider + fn name(&self) -> &str; + + /// List available models from this provider + async fn list_models(&self) -> Result>; + + /// Send a chat completion request + async fn chat(&self, request: ChatRequest) -> Result; + + /// Send a streaming chat completion request + async fn chat_stream(&self, request: ChatRequest) -> Result; + + /// Check if the provider is available/healthy + async fn health_check(&self) -> Result<()>; + + /// Get provider-specific configuration schema + fn config_schema(&self) -> serde_json::Value { + serde_json::json!({}) + } +} + +/// Configuration for a provider +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +pub struct ProviderConfig { + /// Provider type identifier + pub provider_type: String, + /// Base URL for API calls + pub base_url: Option, + /// API key or token + pub api_key: Option, + /// Additional provider-specific configuration + #[serde(flatten)] + pub extra: std::collections::HashMap, +} + +/// A registry of providers +pub struct ProviderRegistry { + providers: std::collections::HashMap>, +} + +impl ProviderRegistry { + /// Create a new provider registry + pub fn new() -> Self { + Self { + providers: std::collections::HashMap::new(), + } + } + + /// Register a provider + pub fn register(&mut self, provider: P) { + self.register_arc(Arc::new(provider)); + } + + /// Register an already wrapped provider + pub fn register_arc(&mut self, provider: Arc) { + let name = provider.name().to_string(); + self.providers.insert(name, provider); + } + + /// Get a provider by name + pub fn get(&self, name: &str) -> Option> { + self.providers.get(name).cloned() + } + + /// List all registered provider names + pub fn list_providers(&self) -> Vec { + self.providers.keys().cloned().collect() + } + + /// Get all models from all providers + pub async fn list_all_models(&self) -> Result> { + let mut all_models = Vec::new(); + + for provider in self.providers.values() { + match provider.list_models().await { + Ok(mut models) => all_models.append(&mut models), + Err(e) => { + // Log error but continue with other providers + eprintln!("Failed to get models from {}: {}", provider.name(), e); + } + } + } + + Ok(all_models) + } +} + +impl Default for ProviderRegistry { + fn default() -> Self { + Self::new() + } +} diff --git a/crates/owlen-core/src/router.rs b/crates/owlen-core/src/router.rs new file mode 100644 index 0000000..d3d4bdd --- /dev/null +++ b/crates/owlen-core/src/router.rs @@ -0,0 +1,155 @@ +//! Router for managing multiple providers and routing requests + +use crate::{provider::*, types::*, Result}; +use std::sync::Arc; + +/// A router that can distribute requests across multiple providers +pub struct Router { + registry: ProviderRegistry, + routing_rules: Vec, + default_provider: Option, +} + +/// A rule for routing requests to specific providers +#[derive(Debug, Clone)] +pub struct RoutingRule { + /// Pattern to match against model names + pub model_pattern: String, + /// Provider to route to + pub provider: String, + /// Priority (higher numbers are checked first) + pub priority: u32, +} + +impl Router { + /// Create a new router + pub fn new() -> Self { + Self { + registry: ProviderRegistry::new(), + routing_rules: Vec::new(), + default_provider: None, + } + } + + /// Register a provider with the router + pub fn register_provider(&mut self, provider: P) { + self.registry.register(provider); + } + + /// Set the default provider + pub fn set_default_provider(&mut self, provider_name: String) { + self.default_provider = Some(provider_name); + } + + /// Add a routing rule + pub fn add_routing_rule(&mut self, rule: RoutingRule) { + self.routing_rules.push(rule); + // Sort by priority (descending) + self.routing_rules + .sort_by(|a, b| b.priority.cmp(&a.priority)); + } + + /// Route a request to the appropriate provider + pub async fn chat(&self, request: ChatRequest) -> Result { + let provider = self.find_provider_for_model(&request.model)?; + provider.chat(request).await + } + + /// Route a streaming request to the appropriate provider + pub async fn chat_stream(&self, request: ChatRequest) -> Result { + let provider = self.find_provider_for_model(&request.model)?; + provider.chat_stream(request).await + } + + /// List all available models from all providers + pub async fn list_models(&self) -> Result> { + self.registry.list_all_models().await + } + + /// Find the appropriate provider for a given model + fn find_provider_for_model(&self, model: &str) -> Result> { + // Check routing rules first + for rule in &self.routing_rules { + if self.matches_pattern(&rule.model_pattern, model) { + if let Some(provider) = self.registry.get(&rule.provider) { + return Ok(provider); + } + } + } + + // Fall back to default provider + if let Some(default) = &self.default_provider { + if let Some(provider) = self.registry.get(default) { + return Ok(provider); + } + } + + // If no default, try to find any provider that has this model + // This is a fallback for cases where routing isn't configured + for provider_name in self.registry.list_providers() { + if let Some(provider) = self.registry.get(&provider_name) { + return Ok(provider); + } + } + + Err(crate::Error::Provider(anyhow::anyhow!( + "No provider found for model: {}", + model + ))) + } + + /// Check if a model name matches a pattern + fn matches_pattern(&self, pattern: &str, model: &str) -> bool { + // Simple pattern matching for now + // Could be extended to support more complex patterns + if pattern == "*" { + return true; + } + + if pattern.ends_with('*') { + let prefix = &pattern[..pattern.len() - 1]; + return model.starts_with(prefix); + } + + if pattern.starts_with('*') { + let suffix = &pattern[1..]; + return model.ends_with(suffix); + } + + pattern == model + } + + /// Get routing configuration + pub fn get_routing_rules(&self) -> &[RoutingRule] { + &self.routing_rules + } + + /// Get the default provider name + pub fn get_default_provider(&self) -> Option<&str> { + self.default_provider.as_deref() + } +} + +impl Default for Router { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_pattern_matching() { + let router = Router::new(); + + assert!(router.matches_pattern("*", "any-model")); + assert!(router.matches_pattern("gpt*", "gpt-4")); + assert!(router.matches_pattern("gpt*", "gpt-3.5-turbo")); + assert!(!router.matches_pattern("gpt*", "claude-3")); + assert!(router.matches_pattern("*:latest", "llama2:latest")); + assert!(router.matches_pattern("exact-match", "exact-match")); + assert!(!router.matches_pattern("exact-match", "different-model")); + } +} diff --git a/crates/owlen-core/src/session.rs b/crates/owlen-core/src/session.rs new file mode 100644 index 0000000..36bb98c --- /dev/null +++ b/crates/owlen-core/src/session.rs @@ -0,0 +1,204 @@ +use crate::config::Config; +use crate::conversation::ConversationManager; +use crate::formatting::MessageFormatter; +use crate::input::InputBuffer; +use crate::model::ModelManager; +use crate::provider::{ChatStream, Provider}; +use crate::types::{ChatParameters, ChatRequest, ChatResponse, Conversation, ModelInfo}; +use crate::Result; +use std::sync::Arc; +use uuid::Uuid; + +/// Outcome of submitting a chat request +pub enum SessionOutcome { + /// Immediate response received (non-streaming) + Complete(ChatResponse), + /// Streaming response where chunks will arrive asynchronously + Streaming { + response_id: Uuid, + stream: ChatStream, + }, +} + +/// High-level controller encapsulating session state and provider interactions +pub struct SessionController { + provider: Arc, + conversation: ConversationManager, + model_manager: ModelManager, + input_buffer: InputBuffer, + formatter: MessageFormatter, + config: Config, +} + +impl SessionController { + /// Create a new controller with the given provider and configuration + pub fn new(provider: Arc, config: Config) -> Self { + let model = config + .general + .default_model + .clone() + .unwrap_or_else(|| "ollama/default".to_string()); + + let conversation = + ConversationManager::with_history_capacity(model, config.storage.max_saved_sessions); + let formatter = + MessageFormatter::new(config.ui.wrap_column as usize, config.ui.show_role_labels) + .with_preserve_empty(config.ui.word_wrap); + let input_buffer = InputBuffer::new( + config.input.history_size, + config.input.multiline, + config.input.tab_width, + ); + + let model_manager = ModelManager::new(config.general.model_cache_ttl()); + + Self { + provider, + conversation, + model_manager, + input_buffer, + formatter, + config, + } + } + + /// Access the active conversation + pub fn conversation(&self) -> &Conversation { + self.conversation.active() + } + + /// Mutable access to the conversation manager + pub fn conversation_mut(&mut self) -> &mut ConversationManager { + &mut self.conversation + } + + /// Access input buffer + pub fn input_buffer(&self) -> &InputBuffer { + &self.input_buffer + } + + /// Mutable input buffer access + pub fn input_buffer_mut(&mut self) -> &mut InputBuffer { + &mut self.input_buffer + } + + /// Formatter for rendering messages + pub fn formatter(&self) -> &MessageFormatter { + &self.formatter + } + + /// Access configuration + pub fn config(&self) -> &Config { + &self.config + } + + /// Mutable configuration access + pub fn config_mut(&mut self) -> &mut Config { + &mut self.config + } + + /// Currently selected model identifier + pub fn selected_model(&self) -> &str { + &self.conversation.active().model + } + + /// Change current model for upcoming requests + pub fn set_model(&mut self, model: String) { + self.conversation.set_model(model.clone()); + self.config.general.default_model = Some(model); + } + + /// Retrieve cached models, refreshing from provider as needed + pub async fn models(&self, force_refresh: bool) -> Result> { + self.model_manager + .get_or_refresh(force_refresh, || async { + self.provider.list_models().await + }) + .await + } + + /// Attempt to select the configured default model from cached models + pub fn ensure_default_model(&mut self, models: &[ModelInfo]) { + if let Some(default) = self.config.general.default_model.clone() { + if models.iter().any(|m| m.id == default || m.name == default) { + self.set_model(default); + } + } else if let Some(model) = models.first() { + self.set_model(model.id.clone()); + } + } + + /// Submit a user message; optionally stream the response + pub async fn send_message( + &mut self, + content: String, + mut parameters: ChatParameters, + ) -> Result { + let streaming = parameters.stream || self.config.general.enable_streaming; + parameters.stream = streaming; + + self.conversation.push_user_message(content); + + let request = ChatRequest { + model: self.conversation.active().model.clone(), + messages: self.conversation.active().messages.clone(), + parameters, + }; + + if streaming { + match self.provider.chat_stream(request).await { + Ok(stream) => { + let response_id = self.conversation.start_streaming_response(); + Ok(SessionOutcome::Streaming { + response_id, + stream, + }) + } + Err(err) => { + self.conversation + .push_assistant_message(format!("Error starting stream: {}", err)); + Err(err) + } + } + } else { + match self.provider.chat(request).await { + Ok(response) => { + self.conversation.push_message(response.message.clone()); + Ok(SessionOutcome::Complete(response)) + } + Err(err) => { + self.conversation + .push_assistant_message(format!("Error: {}", err)); + Err(err) + } + } + } + } + + /// Mark a streaming response message with placeholder content + pub fn mark_stream_placeholder(&mut self, message_id: Uuid, text: &str) -> Result<()> { + self.conversation + .set_stream_placeholder(message_id, text.to_string()) + } + + /// Apply streaming chunk to the conversation + pub fn apply_stream_chunk(&mut self, message_id: Uuid, chunk: &ChatResponse) -> Result<()> { + self.conversation + .append_stream_chunk(message_id, &chunk.message.content, chunk.is_final) + } + + /// Access conversation history + pub fn history(&self) -> Vec { + self.conversation.history().cloned().collect() + } + + /// Start a new conversation optionally targeting a specific model + pub fn start_new_conversation(&mut self, model: Option, name: Option) { + self.conversation.start_new(model, name); + } + + /// Clear current conversation messages + pub fn clear(&mut self) { + self.conversation.clear(); + } +} diff --git a/crates/owlen-core/src/types.rs b/crates/owlen-core/src/types.rs new file mode 100644 index 0000000..cb34593 --- /dev/null +++ b/crates/owlen-core/src/types.rs @@ -0,0 +1,203 @@ +//! Core types used across OWLEN + +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::fmt; +use uuid::Uuid; + +/// A message in a conversation +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct Message { + /// Unique identifier for this message + pub id: Uuid, + /// Role of the message sender (user, assistant, system) + pub role: Role, + /// Content of the message + pub content: String, + /// Optional metadata + pub metadata: HashMap, + /// Timestamp when the message was created + pub timestamp: std::time::SystemTime, +} + +/// Role of a message sender +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum Role { + /// Message from the user + User, + /// Message from the AI assistant + Assistant, + /// System message (prompts, context, etc.) + System, +} + +impl fmt::Display for Role { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let label = match self { + Role::User => "user", + Role::Assistant => "assistant", + Role::System => "system", + }; + f.write_str(label) + } +} + +/// A conversation containing multiple messages +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Conversation { + /// Unique identifier for this conversation + pub id: Uuid, + /// Optional name/title for the conversation + pub name: Option, + /// Messages in chronological order + pub messages: Vec, + /// Model used for this conversation + pub model: String, + /// When the conversation was created + pub created_at: std::time::SystemTime, + /// When the conversation was last updated + pub updated_at: std::time::SystemTime, +} + +/// Configuration for a chat completion request +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ChatRequest { + /// The model to use for completion + pub model: String, + /// The conversation messages + pub messages: Vec, + /// Optional parameters for the request + pub parameters: ChatParameters, +} + +/// Parameters for chat completion +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ChatParameters { + /// Temperature for randomness (0.0 to 2.0) + #[serde(skip_serializing_if = "Option::is_none")] + pub temperature: Option, + /// Maximum tokens to generate + #[serde(skip_serializing_if = "Option::is_none")] + pub max_tokens: Option, + /// Whether to stream the response + #[serde(default)] + pub stream: bool, + /// Additional provider-specific parameters + #[serde(flatten)] + pub extra: HashMap, +} + +impl Default for ChatParameters { + fn default() -> Self { + Self { + temperature: None, + max_tokens: None, + stream: false, + extra: HashMap::new(), + } + } +} + +/// Response from a chat completion request +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ChatResponse { + /// The generated message + pub message: Message, + /// Token usage information + pub usage: Option, + /// Whether this is a streaming chunk + #[serde(default)] + pub is_streaming: bool, + /// Whether this is the final chunk in a stream + #[serde(default)] + pub is_final: bool, +} + +/// Token usage information +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TokenUsage { + /// Tokens in the prompt + pub prompt_tokens: u32, + /// Tokens in the completion + pub completion_tokens: u32, + /// Total tokens used + pub total_tokens: u32, +} + +/// Information about an available model +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ModelInfo { + /// Model identifier + pub id: String, + /// Human-readable name + pub name: String, + /// Model description + pub description: Option, + /// Provider that hosts this model + pub provider: String, + /// Context window size + pub context_window: Option, + /// Additional capabilities + pub capabilities: Vec, +} + +impl Message { + /// Create a new message + pub fn new(role: Role, content: String) -> Self { + Self { + id: Uuid::new_v4(), + role, + content, + metadata: HashMap::new(), + timestamp: std::time::SystemTime::now(), + } + } + + /// Create a user message + pub fn user(content: String) -> Self { + Self::new(Role::User, content) + } + + /// Create an assistant message + pub fn assistant(content: String) -> Self { + Self::new(Role::Assistant, content) + } + + /// Create a system message + pub fn system(content: String) -> Self { + Self::new(Role::System, content) + } +} + +impl Conversation { + /// Create a new conversation + pub fn new(model: String) -> Self { + let now = std::time::SystemTime::now(); + Self { + id: Uuid::new_v4(), + name: None, + messages: Vec::new(), + model, + created_at: now, + updated_at: now, + } + } + + /// Add a message to the conversation + pub fn add_message(&mut self, message: Message) { + self.messages.push(message); + self.updated_at = std::time::SystemTime::now(); + } + + /// Get the last message in the conversation + pub fn last_message(&self) -> Option<&Message> { + self.messages.last() + } + + /// Clear all messages + pub fn clear(&mut self) { + self.messages.clear(); + self.updated_at = std::time::SystemTime::now(); + } +} diff --git a/crates/owlen-ollama/Cargo.toml b/crates/owlen-ollama/Cargo.toml new file mode 100644 index 0000000..5446f7a --- /dev/null +++ b/crates/owlen-ollama/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "owlen-ollama" +version = "0.1.0" +edition = "2021" +description = "Ollama provider for OWLEN LLM client" + +[dependencies] +owlen-core = { path = "../owlen-core" } + +# HTTP client +reqwest = { workspace = true } + +# Async runtime +tokio = { workspace = true } +tokio-stream = { workspace = true } +futures = { workspace = true } +futures-util = { workspace = true } + +# Serialization +serde = { workspace = true } +serde_json = { workspace = true } + +# Utilities +anyhow = { workspace = true } +thiserror = { workspace = true } +uuid = { workspace = true } +async-trait = { workspace = true } + +[dev-dependencies] +tokio-test = { workspace = true } \ No newline at end of file diff --git a/crates/owlen-ollama/src/lib.rs b/crates/owlen-ollama/src/lib.rs new file mode 100644 index 0000000..e328a22 --- /dev/null +++ b/crates/owlen-ollama/src/lib.rs @@ -0,0 +1,530 @@ +//! Ollama provider for OWLEN LLM client + +use futures_util::StreamExt; +use owlen_core::{ + config::GeneralSettings, + model::ModelManager, + provider::{ChatStream, Provider, ProviderConfig}, + types::{ChatParameters, ChatRequest, ChatResponse, Message, ModelInfo, Role, TokenUsage}, + Result, +}; +use reqwest::Client; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Value}; +use std::collections::HashMap; +use std::io; +use std::time::Duration; +use tokio::sync::mpsc; +use tokio_stream::wrappers::UnboundedReceiverStream; + +const DEFAULT_TIMEOUT_SECS: u64 = 120; +const DEFAULT_MODEL_CACHE_TTL_SECS: u64 = 60; + +/// Ollama provider implementation with enhanced configuration and caching +pub struct OllamaProvider { + client: Client, + base_url: String, + model_manager: ModelManager, +} + +/// Options for configuring the Ollama provider +pub struct OllamaOptions { + pub base_url: String, + pub request_timeout: Duration, + pub model_cache_ttl: Duration, +} + +impl OllamaOptions { + pub fn new(base_url: impl Into) -> Self { + Self { + base_url: base_url.into(), + request_timeout: Duration::from_secs(DEFAULT_TIMEOUT_SECS), + model_cache_ttl: Duration::from_secs(DEFAULT_MODEL_CACHE_TTL_SECS), + } + } + + pub fn with_general(mut self, general: &GeneralSettings) -> Self { + self.model_cache_ttl = general.model_cache_ttl(); + self + } +} + +/// Ollama-specific message format +#[derive(Debug, Clone, Serialize, Deserialize)] +struct OllamaMessage { + role: String, + content: String, +} + +/// Ollama chat request format +#[derive(Debug, Serialize)] +struct OllamaChatRequest { + model: String, + messages: Vec, + stream: bool, + #[serde(flatten)] + options: HashMap, +} + +/// Ollama chat response format +#[derive(Debug, Deserialize)] +struct OllamaChatResponse { + message: Option, + done: bool, + #[serde(default)] + prompt_eval_count: Option, + #[serde(default)] + eval_count: Option, + #[serde(default)] + error: Option, +} + +#[derive(Debug, Deserialize)] +struct OllamaErrorResponse { + error: Option, +} + +/// Ollama models list response +#[derive(Debug, Deserialize)] +struct OllamaModelsResponse { + models: Vec, +} + +/// Ollama model information +#[derive(Debug, Deserialize)] +struct OllamaModelInfo { + name: String, + #[serde(default)] + details: Option, +} + +#[derive(Debug, Deserialize)] +struct OllamaModelDetails { + #[serde(default)] + family: Option, +} + +impl OllamaProvider { + /// Create a new Ollama provider with sensible defaults + pub fn new(base_url: impl Into) -> Result { + Self::with_options(OllamaOptions::new(base_url)) + } + + /// Create a provider from configuration settings + pub fn from_config(config: &ProviderConfig, general: Option<&GeneralSettings>) -> Result { + let mut options = OllamaOptions::new( + config + .base_url + .clone() + .unwrap_or_else(|| "http://localhost:11434".to_string()), + ); + + if let Some(timeout) = config + .extra + .get("timeout_secs") + .and_then(|value| value.as_u64()) + { + options.request_timeout = Duration::from_secs(timeout.max(5)); + } + + if let Some(cache_ttl) = config + .extra + .get("model_cache_ttl_secs") + .and_then(|value| value.as_u64()) + { + options.model_cache_ttl = Duration::from_secs(cache_ttl.max(5)); + } + + if let Some(general) = general { + options = options.with_general(general); + } + + Self::with_options(options) + } + + /// Create a provider from explicit options + pub fn with_options(options: OllamaOptions) -> Result { + let client = Client::builder() + .timeout(options.request_timeout) + .build() + .map_err(|e| owlen_core::Error::Config(format!("Failed to build HTTP client: {e}")))?; + + Ok(Self { + client, + base_url: options.base_url.trim_end_matches('/').to_string(), + model_manager: ModelManager::new(options.model_cache_ttl), + }) + } + + /// Accessor for the underlying model manager + pub fn model_manager(&self) -> &ModelManager { + &self.model_manager + } + + fn convert_message(message: &Message) -> OllamaMessage { + OllamaMessage { + role: match message.role { + Role::User => "user".to_string(), + Role::Assistant => "assistant".to_string(), + Role::System => "system".to_string(), + }, + content: message.content.clone(), + } + } + + fn convert_ollama_message(message: &OllamaMessage) -> Message { + let role = match message.role.as_str() { + "user" => Role::User, + "assistant" => Role::Assistant, + "system" => Role::System, + _ => Role::Assistant, + }; + + Message::new(role, message.content.clone()) + } + + fn build_options(parameters: ChatParameters) -> HashMap { + let mut options = parameters.extra; + + if let Some(temperature) = parameters.temperature { + options + .entry("temperature".to_string()) + .or_insert(json!(temperature as f64)); + } + + if let Some(max_tokens) = parameters.max_tokens { + options + .entry("num_predict".to_string()) + .or_insert(json!(max_tokens)); + } + + options + } + + async fn fetch_models(&self) -> Result> { + let url = format!("{}/api/tags", self.base_url); + + let response = self + .client + .get(&url) + .send() + .await + .map_err(|e| owlen_core::Error::Network(format!("Failed to fetch models: {e}")))?; + + if !response.status().is_success() { + let code = response.status(); + let error = parse_error_body(response).await; + return Err(owlen_core::Error::Network(format!( + "Ollama model listing failed ({code}): {error}" + ))); + } + + let body = response.text().await.map_err(|e| { + owlen_core::Error::Network(format!("Failed to read models response: {e}")) + })?; + + let ollama_response: OllamaModelsResponse = + serde_json::from_str(&body).map_err(owlen_core::Error::Serialization)?; + + let models = ollama_response + .models + .into_iter() + .map(|model| ModelInfo { + id: model.name.clone(), + name: model.name.clone(), + description: model + .details + .as_ref() + .and_then(|d| d.family.as_ref().map(|f| format!("Ollama {f} model"))), + provider: "ollama".to_string(), + context_window: None, + capabilities: vec!["chat".to_string()], + }) + .collect(); + + Ok(models) + } +} + +#[async_trait::async_trait] +impl Provider for OllamaProvider { + fn name(&self) -> &str { + "ollama" + } + + async fn list_models(&self) -> Result> { + self.model_manager + .get_or_refresh(false, || async { self.fetch_models().await }) + .await + } + + async fn chat(&self, request: ChatRequest) -> Result { + let ChatRequest { + model, + messages, + parameters, + } = request; + + let messages: Vec = messages.iter().map(Self::convert_message).collect(); + + let options = Self::build_options(parameters); + + let ollama_request = OllamaChatRequest { + model, + messages, + stream: false, + options, + }; + + let url = format!("{}/api/chat", self.base_url); + let response = self + .client + .post(&url) + .json(&ollama_request) + .send() + .await + .map_err(|e| owlen_core::Error::Network(format!("Chat request failed: {e}")))?; + + if !response.status().is_success() { + let code = response.status(); + let error = parse_error_body(response).await; + return Err(owlen_core::Error::Network(format!( + "Ollama chat failed ({code}): {error}" + ))); + } + + let body = response.text().await.map_err(|e| { + owlen_core::Error::Network(format!("Failed to read chat response: {e}")) + })?; + + let mut ollama_response: OllamaChatResponse = + serde_json::from_str(&body).map_err(owlen_core::Error::Serialization)?; + + if let Some(error) = ollama_response.error.take() { + return Err(owlen_core::Error::Provider(anyhow::anyhow!(error))); + } + + let message = match ollama_response.message { + Some(ref msg) => Self::convert_ollama_message(msg), + None => { + return Err(owlen_core::Error::Provider(anyhow::anyhow!( + "Ollama response missing message" + ))) + } + }; + + let usage = if let (Some(prompt_tokens), Some(completion_tokens)) = ( + ollama_response.prompt_eval_count, + ollama_response.eval_count, + ) { + Some(TokenUsage { + prompt_tokens, + completion_tokens, + total_tokens: prompt_tokens + completion_tokens, + }) + } else { + None + }; + + Ok(ChatResponse { + message, + usage, + is_streaming: false, + is_final: true, + }) + } + + async fn chat_stream(&self, request: ChatRequest) -> Result { + let ChatRequest { + model, + messages, + parameters, + } = request; + + let messages: Vec = messages.iter().map(Self::convert_message).collect(); + + let options = Self::build_options(parameters); + + let ollama_request = OllamaChatRequest { + model, + messages, + stream: true, + options, + }; + + let url = format!("{}/api/chat", self.base_url); + + let response = self + .client + .post(&url) + .json(&ollama_request) + .send() + .await + .map_err(|e| owlen_core::Error::Network(format!("Streaming request failed: {e}")))?; + + if !response.status().is_success() { + let code = response.status(); + let error = parse_error_body(response).await; + return Err(owlen_core::Error::Network(format!( + "Ollama streaming chat failed ({code}): {error}" + ))); + } + + let (tx, rx) = mpsc::unbounded_channel(); + let mut stream = response.bytes_stream(); + + tokio::spawn(async move { + let mut buffer = String::new(); + + while let Some(chunk) = stream.next().await { + match chunk { + Ok(bytes) => { + if let Ok(text) = String::from_utf8(bytes.to_vec()) { + buffer.push_str(&text); + + while let Some(pos) = buffer.find('\n') { + let mut line = buffer[..pos].trim().to_string(); + buffer.drain(..=pos); + + if line.is_empty() { + continue; + } + + if line.ends_with('\r') { + line.pop(); + } + + match serde_json::from_str::(&line) { + Ok(mut ollama_response) => { + if let Some(error) = ollama_response.error.take() { + let _ = tx.send(Err(owlen_core::Error::Provider( + anyhow::anyhow!(error), + ))); + break; + } + + if let Some(message) = ollama_response.message { + let mut chat_response = ChatResponse { + message: Self::convert_ollama_message(&message), + usage: None, + is_streaming: true, + is_final: ollama_response.done, + }; + + if let (Some(prompt_tokens), Some(completion_tokens)) = ( + ollama_response.prompt_eval_count, + ollama_response.eval_count, + ) { + chat_response.usage = Some(TokenUsage { + prompt_tokens, + completion_tokens, + total_tokens: prompt_tokens + completion_tokens, + }); + } + + if tx.send(Ok(chat_response)).is_err() { + break; + } + + if ollama_response.done { + break; + } + } + } + Err(e) => { + let _ = tx.send(Err(owlen_core::Error::Serialization(e))); + break; + } + } + } + } else { + let _ = tx.send(Err(owlen_core::Error::Serialization( + serde_json::Error::io(io::Error::new( + io::ErrorKind::InvalidData, + "Non UTF-8 chunk from Ollama", + )), + ))); + break; + } + } + Err(e) => { + let _ = tx.send(Err(owlen_core::Error::Network(format!( + "Stream error: {e}" + )))); + break; + } + } + } + }); + + let stream = UnboundedReceiverStream::new(rx); + Ok(Box::pin(stream)) + } + + async fn health_check(&self) -> Result<()> { + let url = format!("{}/api/version", self.base_url); + + let response = self + .client + .get(&url) + .send() + .await + .map_err(|e| owlen_core::Error::Network(format!("Health check failed: {e}")))?; + + if response.status().is_success() { + Ok(()) + } else { + Err(owlen_core::Error::Network(format!( + "Ollama health check failed: HTTP {}", + response.status() + ))) + } + } + + fn config_schema(&self) -> serde_json::Value { + serde_json::json!({ + "type": "object", + "properties": { + "base_url": { + "type": "string", + "description": "Base URL for Ollama API", + "default": "http://localhost:11434" + }, + "timeout_secs": { + "type": "integer", + "description": "HTTP request timeout in seconds", + "minimum": 5, + "default": DEFAULT_TIMEOUT_SECS + }, + "model_cache_ttl_secs": { + "type": "integer", + "description": "Seconds to cache model listings", + "minimum": 5, + "default": DEFAULT_MODEL_CACHE_TTL_SECS + } + } + }) + } +} + +async fn parse_error_body(response: reqwest::Response) -> String { + match response.bytes().await { + Ok(bytes) => { + if bytes.is_empty() { + return "unknown error".to_string(); + } + + if let Ok(err) = serde_json::from_slice::(&bytes) { + if let Some(error) = err.error { + return error; + } + } + + match String::from_utf8(bytes.to_vec()) { + Ok(text) if !text.trim().is_empty() => text, + _ => "unknown error".to_string(), + } + } + Err(_) => "unknown error".to_string(), + } +} diff --git a/crates/owlen-tui/Cargo.toml b/crates/owlen-tui/Cargo.toml new file mode 100644 index 0000000..23ca0dc --- /dev/null +++ b/crates/owlen-tui/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "owlen-tui" +version = "0.1.0" +edition = "2021" +description = "Terminal User Interface for OWLEN LLM client" + +[dependencies] +owlen-core = { path = "../owlen-core" } + +# TUI framework +ratatui = { workspace = true } +crossterm = { workspace = true } + +# Async runtime +tokio = { workspace = true } +tokio-util = { workspace = true } +futures-util = { workspace = true } + +# Utilities +anyhow = { workspace = true } +uuid = { workspace = true } + +[dev-dependencies] +tokio-test = { workspace = true } +tempfile = { workspace = true } diff --git a/crates/owlen-tui/src/app.rs b/crates/owlen-tui/src/app.rs new file mode 100644 index 0000000..517df4c --- /dev/null +++ b/crates/owlen-tui/src/app.rs @@ -0,0 +1,910 @@ +use anyhow::Result; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use tokio::sync::mpsc; +use uuid::Uuid; + +use crate::config::Config; +use crate::database::Database; +use crate::events::Event; +use crate::files::FileManager; +use crate::ollama::{Message, OllamaClient, OllamaEvent}; + +pub type AppResult = Result>; + +/// The main application state +#[derive(Debug)] +pub enum AppState { + Running, + Quit, +} + +/// Current input mode for the application +#[derive(Debug, Clone, PartialEq)] +pub enum InputMode { + /// User is in the initialization process + Init, + /// User is typing a message + Editing, + /// User is browsing the conversation + Normal, + /// User is selecting a model + ModelSelection, + /// User is viewing stats + StatsMenu, + /// User is viewing help + Help, + /// User is browsing files + FileBrowser, + /// User is managing sessions + SessionManager, + /// User is typing a filename for operations + FileInput, + /// User is typing a session name + SessionInput, +} + +/// Different steps of the initialization process +#[derive(Debug, Clone, PartialEq)] +pub enum InitState { + /// Asking user to select a backend + BackendSelection, + /// Asking user to input custom host + CustomHostInput, + /// Checking for Ollama connection + CheckingOllama, + /// Fetching available models + #[allow(dead_code)] + FetchingModels, + /// Asking user to select a model + ModelSelection, + /// Configuration is complete + Complete, +} + +/// A conversation message with metadata +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConversationMessage { + pub role: String, + pub content: String, + pub request_id: Option, + pub is_streaming: bool, +} + +/// Session statistics +#[derive(Debug, Clone, Default)] +pub struct SessionStats { + pub session_start: Option, + pub messages_sent: u32, + pub messages_received: u32, + pub total_characters_sent: u32, + pub total_characters_received: u32, + pub models_used: std::collections::HashSet, + pub errors_encountered: u32, +} + +/// Main application structure +pub struct App { + /// Current input mode + pub input_mode: InputMode, + /// Current state of the initialization process + pub init_state: InitState, + /// Selected backend type + pub backend_type: crate::config::BackendType, + /// Current input buffer + pub input: String, + /// Cursor position in input + pub input_cursor_position: usize, + /// Conversation history + pub messages: Vec, + /// Current selected model + pub selected_model: String, + /// Available models from Ollama + pub available_models: Vec, + /// Selected model index for model selection UI + pub model_selection_index: usize, + /// Ollama client for making API requests + ollama_client: OllamaClient, + /// Currently active requests (for tracking streaming responses) + active_requests: HashMap, // UUID -> message index + /// Status message to show at the bottom + pub status_message: String, + /// Scroll position in the message list + pub message_scroll: usize, + /// Error message to display + pub error_message: Option, + /// Session statistics + pub stats: SessionStats, + /// File manager for file operations + file_manager: FileManager, + /// Current file path for operations + pub current_file_path: String, + /// Available files in current directory + pub available_files: Vec, + /// Selected file index + pub file_selection_index: usize, + /// Available sessions + pub available_sessions: Vec, + /// Selected session index + pub session_selection_index: usize, + /// Input buffer for file operations + pub file_input: String, + /// Session name input + pub session_name_input: String, + /// Database for session storage + database: Database, +} + +impl App { + pub fn new(ollama_sender: mpsc::UnboundedSender, config: Config, db: &Database, is_init: bool) -> Self { + let ollama_client = OllamaClient::new( + config.general.ollama_host.clone(), + ollama_sender, + ); + + // Initialize file manager + let file_manager = FileManager::new(config.clone()); + + // Load project context if available + let mut messages = Vec::new(); + if let Ok(Some(context)) = file_manager.load_project_context() { + messages.push(ConversationMessage { + role: "system".to_string(), + content: format!("Project Context:\n{}", context), + request_id: None, + is_streaming: false, + }); + } + + let (input_mode, init_state, backend_type) = if is_init { + (InputMode::Init, InitState::BackendSelection, crate::config::BackendType::Ollama) + } else { + (InputMode::Normal, InitState::Complete, crate::config::BackendType::Ollama) + }; + + let mut stats = SessionStats::default(); + stats.session_start = Some(std::time::Instant::now()); + stats.models_used.insert(config.general.default_model.clone()); + + let app = Self { + input_mode, + init_state, + backend_type, + input: String::new(), + input_cursor_position: 0, + messages, // Use loaded messages (including project context) + selected_model: config.general.default_model.clone(), // Default model + available_models: vec![config.general.default_model.clone()], + model_selection_index: 0, + ollama_client, + active_requests: HashMap::new(), + status_message: "Press 'h' for help or 'q' to quit".to_string(), + message_scroll: 0, + error_message: None, + stats, + file_manager, + current_file_path: ".".to_string(), + available_files: Vec::new(), + file_selection_index: 0, + available_sessions: Vec::new(), + session_selection_index: 0, + file_input: String::new(), + session_name_input: String::new(), + database: db.clone(), + }; + + if is_init { + let ollama_client = app.ollama_client.clone(); + tokio::spawn(async move { + let _ = ollama_client.get_models().await; + }); + } + + app + } + + /// Handle terminal events + pub async fn handle_event(&mut self, event: Event) -> AppResult { + self.error_message = None; // Clear error message on new input + + match self.input_mode { + InputMode::Init => self.handle_init_mode_event(event).await, + InputMode::Normal => self.handle_normal_mode_event(event).await, + InputMode::Editing => self.handle_editing_mode_event(event).await, + InputMode::ModelSelection => self.handle_model_selection_event(event).await, + InputMode::StatsMenu => self.handle_stats_menu_event(event).await, + InputMode::Help => self.handle_help_event(event).await, + InputMode::FileBrowser => self.handle_file_browser_event(event).await, + InputMode::SessionManager => self.handle_session_manager_event(event).await, + InputMode::FileInput => self.handle_file_input_event(event).await, + InputMode::SessionInput => self.handle_session_input_event(event).await, + } + } + + /// Handle events in initialization mode + async fn handle_init_mode_event(&mut self, event: Event) -> AppResult { + match self.init_state { + InitState::BackendSelection => { + if event.is_up() { + self.backend_type = crate::config::BackendType::Ollama; + } + if event.is_down() { + self.backend_type = crate::config::BackendType::Custom; + } + if event.is_enter() { + match self.backend_type { + crate::config::BackendType::Ollama => { + self.init_state = InitState::CheckingOllama; + let ollama_client = self.ollama_client.clone(); + tokio::spawn(async move { + let _ = ollama_client.get_models().await; + }); + } + crate::config::BackendType::Custom => { + self.init_state = InitState::CustomHostInput; + self.input.clear(); + self.input_cursor_position = 0; + } + } + } + } + InitState::CustomHostInput => { + if event.is_escape() { + self.init_state = InitState::BackendSelection; + self.input.clear(); + self.input_cursor_position = 0; + } + if event.is_enter() && !self.input.trim().is_empty() { + // Update ollama_client with custom host + self.ollama_client = OllamaClient::new(self.input.trim().to_string(), self.ollama_client.event_sender.clone()); + self.init_state = InitState::CheckingOllama; + let ollama_client = self.ollama_client.clone(); + tokio::spawn(async move { + let _ = ollama_client.get_models().await; + }); + } + if event.is_backspace() && self.input_cursor_position > 0 { + let current_index = self.input_cursor_position; + self.input.remove(current_index - 1); + self.input_cursor_position -= 1; + } + if event.is_left() && self.input_cursor_position > 0 { + self.input_cursor_position -= 1; + } + if event.is_right() && self.input_cursor_position < self.input.len() { + self.input_cursor_position += 1; + } + if let Some(c) = event.as_char() { + self.input.insert(self.input_cursor_position, c); + self.input_cursor_position += 1; + } + } + InitState::CheckingOllama => { + // This state is handled by the initial ollama call in `App::new` + // We transition to the next state in `handle_ollama_event` + } + InitState::FetchingModels => { + // This state is handled by the initial ollama call in `App::new` + // We transition to the next state in `handle_ollama_event` + } + InitState::ModelSelection => { + if event.is_up() && self.model_selection_index > 0 { + self.model_selection_index -= 1; + } + + if event.is_down() && self.model_selection_index < self.available_models.len().saturating_sub(1) { + self.model_selection_index += 1; + } + + if event.is_enter() && !self.available_models.is_empty() && self.model_selection_index < self.available_models.len() { + self.selected_model = self.available_models[self.model_selection_index].clone(); + self.status_message = format!("Selected model: {}", self.selected_model); + self.init_state = InitState::Complete; + + // Track model change in stats + self.stats.models_used.insert(self.selected_model.clone()); + + // Save config + let config = Config { + general: crate::config::GeneralConfig { + default_model: self.selected_model.clone(), + ollama_host: self.ollama_client.base_url.clone(), + backend_type: self.backend_type.clone(), + project_context_file: "OWLEN.md".to_string(), + }, + ..Default::default() + }; + crate::config::save_config(&config)?; + + self.input_mode = InputMode::Normal; + } + } + InitState::Complete => { + self.input_mode = InputMode::Normal; + } + } + Ok(AppState::Running) + } + + /// Handle events in normal (browsing) mode + async fn handle_normal_mode_event(&mut self, event: Event) -> AppResult { + if event.is_quit() { + return Ok(AppState::Quit); + } + + if let Some(c) = event.as_char() { + match c { + 'i' => { + self.input_mode = InputMode::Editing; + self.status_message = "Type your message... (Esc to cancel, Enter to send)".to_string(); + } + 'm' => { + self.input_mode = InputMode::ModelSelection; + self.status_message = "Select model... (Enter to confirm, Esc to cancel)".to_string(); + // Refresh model list + let _ = self.ollama_client.get_models().await; + + // Set model_selection_index to the currently selected model + if let Some(index) = self.available_models.iter().position(|m| m == &self.selected_model) { + self.model_selection_index = index; + } + } + 's' => { + self.input_mode = InputMode::StatsMenu; + self.status_message = "Session Statistics (Esc to close)".to_string(); + } + 'h' => { + self.input_mode = InputMode::Help; + self.status_message = "Help - All Available Commands (Esc to close)".to_string(); + } + 'f' => { + self.input_mode = InputMode::FileBrowser; + self.status_message = "File Browser - โ†‘/โ†“:Navigate Enter:Read r:Refresh Esc:Close".to_string(); + self.refresh_file_list(); + } + 'l' => { + self.input_mode = InputMode::SessionManager; + self.status_message = "Session Manager - โ†‘/โ†“:Navigate Enter:Load s:Save d:Delete Esc:Close".to_string(); + self.refresh_session_list(); + } + 'j' => { + // Scroll down in messages + if self.message_scroll > 0 { + self.message_scroll -= 1; + } + } + 'k' => { + // Scroll up in messages + self.message_scroll += 1; + } + 'c' => { + // Clear conversation + self.messages.clear(); + self.active_requests.clear(); + self.message_scroll = 0; + self.status_message = "Conversation cleared".to_string(); + } + 'r' => { + // Refresh models + let _ = self.ollama_client.get_models().await; + self.status_message = "Refreshing models...".to_string(); + } + _ => {} + } + } + + if event.is_up() && self.message_scroll > 0 { + self.message_scroll -= 1; + } + + if event.is_down() { + self.message_scroll += 1; + } + + Ok(AppState::Running) + } + + /// Handle events in editing mode + async fn handle_editing_mode_event(&mut self, event: Event) -> AppResult { + if event.is_escape() { + self.input_mode = InputMode::Normal; + self.input.clear(); + self.input_cursor_position = 0; + self.status_message = "Message cancelled".to_string(); + return Ok(AppState::Running); + } + + if event.is_enter() && !self.input.trim().is_empty() { + let message = self.input.trim().to_string(); + self.input.clear(); + self.input_cursor_position = 0; + self.input_mode = InputMode::Normal; + + // Add user message to conversation + self.messages.push(ConversationMessage { + role: "user".to_string(), + content: message.clone(), + request_id: None, + is_streaming: false, + }); + + // Update stats + self.stats.messages_sent += 1; + self.stats.total_characters_sent += message.len() as u32; + + // Prepare messages for Ollama API (convert to API format) + let api_messages: Vec = self.messages + .iter() + .filter(|m| !m.is_streaming) // Don't include streaming messages + .map(|m| Message { + role: m.role.clone(), + content: m.content.clone(), + }) + .collect(); + + // Send to Ollama + match self.ollama_client.chat(self.selected_model.clone(), api_messages).await { + Ok(request_id) => { + // Add placeholder for assistant response + let message_index = self.messages.len(); + self.messages.push(ConversationMessage { + role: "assistant".to_string(), + content: String::new(), + request_id: Some(request_id), + is_streaming: true, + }); + + self.active_requests.insert(request_id, message_index); + self.status_message = format!("Sending message to {}...", self.selected_model); + } + Err(e) => { + self.error_message = Some(format!("Failed to send message: {}", e)); + self.status_message = "Ready".to_string(); + } + } + + return Ok(AppState::Running); + } + + if event.is_backspace() && self.input_cursor_position > 0 { + let current_index = self.input_cursor_position; + self.input.remove(current_index - 1); + self.input_cursor_position -= 1; + } + + if event.is_left() && self.input_cursor_position > 0 { + self.input_cursor_position -= 1; + } + + if event.is_right() && self.input_cursor_position < self.input.len() { + self.input_cursor_position += 1; + } + + if let Some(c) = event.as_char() { + self.input.insert(self.input_cursor_position, c); + self.input_cursor_position += 1; + } + + Ok(AppState::Running) + } + + /// Handle events in model selection mode + async fn handle_model_selection_event(&mut self, event: Event) -> AppResult { + if event.is_escape() { + self.input_mode = InputMode::Normal; + self.status_message = "Model selection cancelled".to_string(); + return Ok(AppState::Running); + } + + if event.is_enter() { + if !self.available_models.is_empty() && self.model_selection_index < self.available_models.len() { + self.selected_model = self.available_models[self.model_selection_index].clone(); + self.status_message = format!("Selected model: {}", self.selected_model); + + // Track model change in stats + self.stats.models_used.insert(self.selected_model.clone()); + + // Save config + let config = Config { + general: crate::config::GeneralConfig { + default_model: self.selected_model.clone(), + ollama_host: self.ollama_client.base_url.clone(), + backend_type: self.backend_type.clone(), + project_context_file: "OWLEN.md".to_string(), + }, + ..Default::default() + }; + if let Err(e) = crate::config::save_config(&config) { + self.status_message = format!("Failed to save config: {}", e); + } + } + self.input_mode = InputMode::Normal; + return Ok(AppState::Running); + } + + if event.is_up() && self.model_selection_index > 0 { + self.model_selection_index -= 1; + } + + if event.is_down() && self.model_selection_index < self.available_models.len().saturating_sub(1) { + self.model_selection_index += 1; + } + + Ok(AppState::Running) + } + + /// Handle events in stats menu mode + async fn handle_stats_menu_event(&mut self, event: Event) -> AppResult { + if event.is_escape() { + self.input_mode = InputMode::Normal; + self.status_message = "Press 'h' for help or 'q' to quit".to_string(); + return Ok(AppState::Running); + } + + Ok(AppState::Running) + } + + /// Handle events in help mode + async fn handle_help_event(&mut self, event: Event) -> AppResult { + if event.is_escape() { + self.input_mode = InputMode::Normal; + self.status_message = "Press 'h' for help or 'q' to quit".to_string(); + return Ok(AppState::Running); + } + + Ok(AppState::Running) + } + + /// Handle events in file browser mode + async fn handle_file_browser_event(&mut self, event: Event) -> AppResult { + if event.is_escape() { + self.input_mode = InputMode::Normal; + self.status_message = "Press 'h' for help or 'q' to quit".to_string(); + return Ok(AppState::Running); + } + + if event.is_up() && self.file_selection_index > 0 { + self.file_selection_index -= 1; + } + + if event.is_down() && self.file_selection_index < self.available_files.len().saturating_sub(1) { + self.file_selection_index += 1; + } + + if event.is_enter() && !self.available_files.is_empty() { + let selected_file = &self.available_files[self.file_selection_index]; + if let Ok(content) = self.file_manager.read_file(&selected_file.path) { + // Add file content as a system message + self.messages.push(ConversationMessage { + role: "system".to_string(), + content: format!("File: {}\n\n{}", selected_file.name, content), + request_id: None, + is_streaming: false, + }); + self.status_message = format!("Loaded file: {}", selected_file.name); + self.input_mode = InputMode::Normal; + } else { + self.status_message = format!("Failed to read file: {}", selected_file.name); + } + } + + if let Some(c) = event.as_char() { + match c { + 'r' => { + self.refresh_file_list(); + self.status_message = "File list refreshed".to_string(); + } + _ => {} + } + } + + Ok(AppState::Running) + } + + /// Handle events in session manager mode + async fn handle_session_manager_event(&mut self, event: Event) -> AppResult { + if event.is_escape() { + self.input_mode = InputMode::Normal; + self.status_message = "Press 'h' for help or 'q' to quit".to_string(); + return Ok(AppState::Running); + } + + if event.is_up() && self.session_selection_index > 0 { + self.session_selection_index -= 1; + } + + if event.is_down() && self.session_selection_index < self.available_sessions.len().saturating_sub(1) { + self.session_selection_index += 1; + } + + if event.is_enter() && !self.available_sessions.is_empty() { + // Load selected session + let session_id = &self.available_sessions[self.session_selection_index].id; + self.load_session(session_id.clone()); + } + + if let Some(c) = event.as_char() { + match c { + 's' => { + // Save current session + self.input_mode = InputMode::SessionInput; + self.session_name_input.clear(); + self.status_message = "Enter session name:".to_string(); + } + 'd' => { + // Delete selected session + if !self.available_sessions.is_empty() { + let session_id = &self.available_sessions[self.session_selection_index].id; + self.delete_session(session_id.clone()); + } + } + 'r' => { + self.refresh_session_list(); + self.status_message = "Session list refreshed".to_string(); + } + _ => {} + } + } + + Ok(AppState::Running) + } + + /// Handle events in file input mode + async fn handle_file_input_event(&mut self, event: Event) -> AppResult { + if event.is_escape() { + self.input_mode = InputMode::FileBrowser; + self.file_input.clear(); + self.status_message = "File operation cancelled".to_string(); + return Ok(AppState::Running); + } + + if event.is_enter() { + // Process file input + self.input_mode = InputMode::FileBrowser; + self.status_message = "File operation completed".to_string(); + self.file_input.clear(); + return Ok(AppState::Running); + } + + // Handle text input + if let Some(c) = event.as_char() { + if c.is_ascii_graphic() || c == ' ' { + self.file_input.push(c); + } + } + + if event.is_backspace() && !self.file_input.is_empty() { + self.file_input.pop(); + } + + Ok(AppState::Running) + } + + /// Handle events in session input mode + async fn handle_session_input_event(&mut self, event: Event) -> AppResult { + if event.is_escape() { + self.input_mode = InputMode::SessionManager; + self.session_name_input.clear(); + self.status_message = "Session save cancelled".to_string(); + return Ok(AppState::Running); + } + + if event.is_enter() && !self.session_name_input.trim().is_empty() { + // Save session with the given name + let session_name = self.session_name_input.trim().to_string(); + self.save_current_session(session_name); + self.input_mode = InputMode::SessionManager; + self.session_name_input.clear(); + return Ok(AppState::Running); + } + + // Handle text input + if let Some(c) = event.as_char() { + if c.is_ascii_graphic() || c == ' ' { + self.session_name_input.push(c); + } + } + + if event.is_backspace() && !self.session_name_input.is_empty() { + self.session_name_input.pop(); + } + + Ok(AppState::Running) + } + + /// Handle events from Ollama client + pub async fn handle_ollama_event(&mut self, event: OllamaEvent) -> AppResult<()> { + match event { + OllamaEvent::MessageChunk { request_id, content, done } => { + if let Some(&message_index) = self.active_requests.get(&request_id) { + if let Some(message) = self.messages.get_mut(message_index) { + message.content.push_str(&content); + + if done { + message.is_streaming = false; + self.active_requests.remove(&request_id); + self.status_message = "Response completed".to_string(); + + // Update stats + self.stats.messages_received += 1; + self.stats.total_characters_received += message.content.len() as u32; + } + } + } + } + OllamaEvent::Error { request_id, error } => { + if let Some(&message_index) = self.active_requests.get(&request_id) { + if let Some(message) = self.messages.get_mut(message_index) { + message.content = format!("Error: {}", error); + message.is_streaming = false; + } + self.active_requests.remove(&request_id); + + // Update error stats + self.stats.errors_encountered += 1; + } + self.error_message = Some(error); + self.status_message = "Error occurred".to_string(); + } + OllamaEvent::ModelsAvailable(models) => { + if self.input_mode == InputMode::Init { + if !models.is_empty() { + self.available_models = models; + self.model_selection_index = 0; + self.init_state = InitState::ModelSelection; + } else { + self.error_message = Some("No models found. Please make sure Ollama is running and you have downloaded some models.".to_string()); + } + } else if !models.is_empty() { + self.available_models = models; + self.model_selection_index = 0; + + // If current selected model is not in the list, use the first one + if !self.available_models.contains(&self.selected_model) && !self.available_models.is_empty() { + self.selected_model = self.available_models[0].clone(); + } + + self.status_message = format!("Found {} models", self.available_models.len()); + } else { + self.status_message = "No models available".to_string(); + } + } + } + + Ok(()) + } + + /// Get the current conversation as a displayable format + pub fn get_display_messages(&self) -> &Vec { + &self.messages + } + + /// Check if currently in editing mode + pub fn is_editing(&self) -> bool { + self.input_mode == InputMode::Editing + } + + /// Check if currently in stats menu mode + pub fn is_stats_menu(&self) -> bool { + self.input_mode == InputMode::StatsMenu + } + + /// Check if currently in help mode + pub fn is_help(&self) -> bool { + self.input_mode == InputMode::Help + } + + /// Check if currently in model selection mode + pub fn is_model_selecting(&self) -> bool { + self.input_mode == InputMode::ModelSelection + } + + /// Print final session statistics + pub fn print_final_stats(&self) { + let elapsed = if let Some(start) = self.stats.session_start { + start.elapsed() + } else { + std::time::Duration::from_secs(0) + }; + + let session_duration = format!("{}m {}s", elapsed.as_secs() / 60, elapsed.as_secs() % 60); + let models_used: Vec = self.stats.models_used.iter().cloned().collect(); + + println!("\nโ”Œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”"); + println!("โ”‚ Session Summary โ”‚"); + println!("โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค"); + println!("โ”‚ Duration: {:>16} โ”‚", session_duration); + println!("โ”‚ Messages Sent: {:>16} โ”‚", self.stats.messages_sent); + println!("โ”‚ Messages Received: {:>16} โ”‚", self.stats.messages_received); + println!("โ”‚ Characters Sent: {:>16} โ”‚", self.stats.total_characters_sent); + println!("โ”‚ Characters Rcvd: {:>16} โ”‚", self.stats.total_characters_received); + println!("โ”‚ Errors: {:>16} โ”‚", self.stats.errors_encountered); + println!("โ”‚ Models Used: {:>16} โ”‚", models_used.len()); + if !models_used.is_empty() { + println!("โ”‚ Models: {:25} โ”‚", models_used.join(", ")); + } + println!("โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜"); + } + + /// Refresh the file list for the file browser + fn refresh_file_list(&mut self) { + if let Ok(files) = self.file_manager.list_files(&self.current_file_path) { + self.available_files = files; + self.file_selection_index = 0; + } + } + + /// Refresh the session list for the session manager + fn refresh_session_list(&mut self) { + if let Ok(sessions) = self.database.get_session_summaries() { + self.available_sessions = sessions; + } else { + self.available_sessions = Vec::new(); + } + self.session_selection_index = 0; + } + + /// Load a session by ID + fn load_session(&mut self, session_id: String) { + if let Ok(Some(session)) = self.database.load_session(&session_id) { + self.messages = session.messages; + self.selected_model = session.model_used; + self.status_message = format!("Loaded session: {}", session.name); + } else { + self.status_message = "Failed to load session".to_string(); + } + self.input_mode = InputMode::Normal; + } + + /// Save the current session + fn save_current_session(&mut self, session_name: String) { + use std::time::SystemTime; + use uuid::Uuid; + + let session = crate::database::Session { + id: Uuid::new_v4().to_string(), + name: session_name.clone(), + messages: self.messages.clone(), + created_at: SystemTime::now(), + updated_at: SystemTime::now(), + model_used: self.selected_model.clone(), + }; + + if let Ok(()) = self.database.save_session(&session) { + self.status_message = format!("Saved session: {}", session_name); + } else { + self.status_message = "Failed to save session".to_string(); + } + self.refresh_session_list(); + } + + /// Delete a session by ID + fn delete_session(&mut self, session_id: String) { + if let Ok(()) = self.database.delete_session(&session_id) { + self.status_message = "Session deleted successfully".to_string(); + } else { + self.status_message = "Failed to delete session".to_string(); + } + self.refresh_session_list(); + } + + /// Check if currently in file browser mode + pub fn is_file_browser(&self) -> bool { + self.input_mode == InputMode::FileBrowser + } + + /// Check if currently in session manager mode + pub fn is_session_manager(&self) -> bool { + self.input_mode == InputMode::SessionManager + } + + /// Check if currently in file input mode + pub fn is_file_input(&self) -> bool { + self.input_mode == InputMode::FileInput + } + + /// Check if currently in session input mode + pub fn is_session_input(&self) -> bool { + self.input_mode == InputMode::SessionInput + } +} \ No newline at end of file diff --git a/crates/owlen-tui/src/chat.rs b/crates/owlen-tui/src/chat.rs new file mode 100644 index 0000000..e5b5a6d --- /dev/null +++ b/crates/owlen-tui/src/chat.rs @@ -0,0 +1,644 @@ +//! Chat-specific TUI implementation + +use anyhow::Result; +use owlen_core::provider::Provider; +use owlen_core::types::{ChatRequest, Message, Role, ChatParameters}; +use owlen_core::Error as CoreError; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use tokio::sync::mpsc; +use uuid::Uuid; + +use crate::config::Config; +use crate::database::Database; +use crate::events::Event; +use crate::ui::ChatRenderer; + +pub type ChatResult = Result>; + +/// Chat application state +#[derive(Debug)] +pub enum ChatState { + Running, + Quit, +} + +/// Chat input modes +#[derive(Debug, Clone, PartialEq)] +pub enum ChatInputMode { + Normal, + Editing, + ModelSelection, + ConversationSelection, + Help, +} + +/// A conversation message with metadata +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConversationMessage { + pub id: Uuid, + pub role: String, + pub content: String, + pub timestamp: std::time::SystemTime, + pub model_used: Option, + pub is_streaming: bool, +} + +impl ConversationMessage { + pub fn new(role: String, content: String) -> Self { + Self { + id: Uuid::new_v4(), + role, + content, + timestamp: std::time::SystemTime::now(), + model_used: None, + is_streaming: false, + } + } + + pub fn to_core_message(&self) -> Message { + let role = match self.role.as_str() { + "user" => Role::User, + "assistant" => Role::Assistant, + "system" => Role::System, + _ => Role::User, + }; + Message::new(role, self.content.clone()) + } +} + +/// Conversation metadata +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Conversation { + pub id: Uuid, + pub title: String, + pub created_at: std::time::SystemTime, + pub updated_at: std::time::SystemTime, + pub message_count: usize, + pub model_used: String, +} + +/// Session statistics +#[derive(Debug, Clone, Default)] +pub struct ChatStats { + pub session_start: Option, + pub messages_sent: u32, + pub messages_received: u32, + pub total_characters_sent: u32, + pub total_characters_received: u32, + pub models_used: std::collections::HashSet, + pub errors_encountered: u32, + pub current_conversation_id: Option, +} + +/// Chat application +pub struct ChatApp { + /// Current input mode + pub input_mode: ChatInputMode, + /// Current input buffer + pub input: String, + /// Multi-line input buffer + pub input_lines: Vec, + /// Current line in multi-line input + pub current_input_line: usize, + /// Cursor position in current line + pub input_cursor_position: usize, + /// Conversation messages + pub messages: Vec, + /// Available models + pub available_models: Vec, + /// Selected model index + pub model_selection_index: usize, + /// Current selected model + pub selected_model: String, + /// Available conversations + pub conversations: Vec, + /// Selected conversation index + pub conversation_selection_index: usize, + /// Current conversation ID + pub current_conversation_id: Option, + /// Message scroll position + pub message_scroll: usize, + /// Status message + pub status_message: String, + /// Error message + pub error_message: Option, + /// Session statistics + pub stats: ChatStats, + /// Provider for LLM requests + provider: Box, + /// Active streaming requests + active_requests: HashMap, + /// Database for persistence + database: Database, + /// Configuration + config: Config, + /// UI renderer + renderer: ChatRenderer, +} + +impl ChatApp { + pub fn new(provider: Box, config: Config, database: Database) -> ChatResult { + let renderer = ChatRenderer::new(&config); + let current_conversation_id = Uuid::new_v4(); + + let mut stats = ChatStats::default(); + stats.session_start = Some(std::time::Instant::now()); + stats.current_conversation_id = Some(current_conversation_id); + + Ok(Self { + input_mode: ChatInputMode::Normal, + input: String::new(), + input_lines: vec![String::new()], + current_input_line: 0, + input_cursor_position: 0, + messages: Vec::new(), + available_models: Vec::new(), + model_selection_index: 0, + selected_model: config.general.default_model.clone(), + conversations: Vec::new(), + conversation_selection_index: 0, + current_conversation_id: Some(current_conversation_id), + message_scroll: 0, + status_message: "Press 'h' for help".to_string(), + error_message: None, + stats, + provider, + active_requests: HashMap::new(), + database, + config, + renderer, + }) + } + + /// Handle user input events + pub async fn handle_event(&mut self, event: Event) -> ChatResult { + use crossterm::event::{KeyCode, KeyModifiers}; + + match event { + Event::Key(key) => { + self.clear_error(); + + match self.input_mode { + ChatInputMode::Normal => { + match (key.code, key.modifiers) { + (KeyCode::Char('q'), KeyModifiers::NONE) => { + return Ok(ChatState::Quit); + } + (KeyCode::Char('h'), KeyModifiers::NONE) => { + self.input_mode = ChatInputMode::Help; + } + (KeyCode::Char('m'), KeyModifiers::NONE) => { + self.refresh_models().await?; + self.input_mode = ChatInputMode::ModelSelection; + } + (KeyCode::Char('c'), KeyModifiers::NONE) => { + self.refresh_conversations().await?; + self.input_mode = ChatInputMode::ConversationSelection; + } + (KeyCode::Char('n'), KeyModifiers::NONE) => { + self.new_conversation().await?; + } + (KeyCode::Char('i'), KeyModifiers::NONE) | (KeyCode::Enter, KeyModifiers::NONE) => { + self.input_mode = ChatInputMode::Editing; + } + (KeyCode::Up, KeyModifiers::NONE) => { + self.scroll_up(); + } + (KeyCode::Down, KeyModifiers::NONE) => { + self.scroll_down(); + } + (KeyCode::PageUp, KeyModifiers::NONE) => { + self.page_up(); + } + (KeyCode::PageDown, KeyModifiers::NONE) => { + self.page_down(); + } + _ => {} + } + } + ChatInputMode::Editing => { + match (key.code, key.modifiers) { + (KeyCode::Esc, KeyModifiers::NONE) => { + self.input_mode = ChatInputMode::Normal; + self.clear_input(); + } + (KeyCode::Enter, KeyModifiers::CTRL) => { + let message = self.get_input_content(); + if !message.trim().is_empty() { + self.send_message(message).await?; + self.clear_input(); + self.input_mode = ChatInputMode::Normal; + } + } + (KeyCode::Enter, KeyModifiers::NONE) => { + self.add_input_line(); + } + (KeyCode::Backspace, KeyModifiers::NONE) => { + self.handle_backspace(); + } + (KeyCode::Delete, KeyModifiers::NONE) => { + self.handle_delete(); + } + (KeyCode::Left, KeyModifiers::NONE) => { + self.move_cursor_left(); + } + (KeyCode::Right, KeyModifiers::NONE) => { + self.move_cursor_right(); + } + (KeyCode::Up, KeyModifiers::NONE) => { + self.move_cursor_up(); + } + (KeyCode::Down, KeyModifiers::NONE) => { + self.move_cursor_down(); + } + (KeyCode::Home, KeyModifiers::NONE) => { + self.input_cursor_position = 0; + } + (KeyCode::End, KeyModifiers::NONE) => { + self.input_cursor_position = self.current_line().len(); + } + (KeyCode::Char(c), KeyModifiers::NONE) | (KeyCode::Char(c), KeyModifiers::SHIFT) => { + self.insert_char(c); + } + _ => {} + } + } + ChatInputMode::ModelSelection => { + match key.code { + KeyCode::Esc => { + self.input_mode = ChatInputMode::Normal; + } + KeyCode::Enter => { + if !self.available_models.is_empty() { + self.selected_model = self.available_models[self.model_selection_index].clone(); + self.stats.models_used.insert(self.selected_model.clone()); + self.status_message = format!("Selected model: {}", self.selected_model); + } + self.input_mode = ChatInputMode::Normal; + } + KeyCode::Up => { + if self.model_selection_index > 0 { + self.model_selection_index -= 1; + } + } + KeyCode::Down => { + if self.model_selection_index < self.available_models.len().saturating_sub(1) { + self.model_selection_index += 1; + } + } + _ => {} + } + } + ChatInputMode::ConversationSelection => { + match key.code { + KeyCode::Esc => { + self.input_mode = ChatInputMode::Normal; + } + KeyCode::Enter => { + if !self.conversations.is_empty() { + self.load_conversation(self.conversations[self.conversation_selection_index].id).await?; + } + self.input_mode = ChatInputMode::Normal; + } + KeyCode::Up => { + if self.conversation_selection_index > 0 { + self.conversation_selection_index -= 1; + } + } + KeyCode::Down => { + if self.conversation_selection_index < self.conversations.len().saturating_sub(1) { + self.conversation_selection_index += 1; + } + } + KeyCode::Char('d') => { + if !self.conversations.is_empty() { + let conv_id = self.conversations[self.conversation_selection_index].id; + self.delete_conversation(conv_id).await?; + } + } + _ => {} + } + } + ChatInputMode::Help => { + self.input_mode = ChatInputMode::Normal; + } + } + } + Event::Resize(_, _) => { + // Handle terminal resize + } + } + + Ok(ChatState::Running) + } + + /// Send a message to the LLM + async fn send_message(&mut self, content: String) -> ChatResult<()> { + let user_message = ConversationMessage::new("user".to_string(), content.clone()); + self.messages.push(user_message); + self.stats.messages_sent += 1; + self.stats.total_characters_sent += content.len() as u32; + + // Save message to database + self.save_current_conversation().await?; + + // Convert messages to core format + let core_messages: Vec = self.messages.iter() + .map(|m| m.to_core_message()) + .collect(); + + let request = ChatRequest { + model: self.selected_model.clone(), + messages: core_messages, + parameters: ChatParameters { + temperature: Some(self.config.llm.temperature), + max_tokens: self.config.llm.max_tokens, + extra: HashMap::new(), + }, + }; + + // Add placeholder for assistant response + let mut assistant_message = ConversationMessage::new("assistant".to_string(), String::new()); + assistant_message.is_streaming = true; + assistant_message.model_used = Some(self.selected_model.clone()); + let message_index = self.messages.len(); + self.messages.push(assistant_message); + + match self.provider.chat_stream(request).await { + Ok(mut stream) => { + use futures_util::StreamExt; + + while let Some(response) = stream.next().await { + match response { + Ok(chat_response) => { + if let Some(message) = self.messages.get_mut(message_index) { + message.content.push_str(&chat_response.message.content); + message.is_streaming = !chat_response.is_final; + + if chat_response.is_final { + self.stats.messages_received += 1; + self.stats.total_characters_received += message.content.len() as u32; + + // Save completed conversation + self.save_current_conversation().await?; + } + } + } + Err(e) => { + self.handle_error(format!("Streaming error: {}", e)); + break; + } + } + } + } + Err(e) => { + // Remove the placeholder message on error + self.messages.pop(); + self.handle_error(format!("Failed to send message: {}", e)); + } + } + + self.scroll_to_bottom(); + Ok(()) + } + + /// Refresh available models + async fn refresh_models(&mut self) -> ChatResult<()> { + match self.provider.list_models().await { + Ok(models) => { + self.available_models = models.into_iter().map(|m| m.id).collect(); + self.model_selection_index = 0; + + // Update selection index if current model is in the list + if let Some(index) = self.available_models.iter().position(|m| m == &self.selected_model) { + self.model_selection_index = index; + } + } + Err(e) => { + self.handle_error(format!("Failed to fetch models: {}", e)); + } + } + Ok(()) + } + + /// Refresh available conversations + async fn refresh_conversations(&mut self) -> ChatResult<()> { + match self.database.list_conversations().await { + Ok(conversations) => { + self.conversations = conversations; + self.conversation_selection_index = 0; + } + Err(e) => { + self.handle_error(format!("Failed to load conversations: {}", e)); + } + } + Ok(()) + } + + /// Create a new conversation + async fn new_conversation(&mut self) -> ChatResult<()> { + self.save_current_conversation().await?; + + self.messages.clear(); + self.current_conversation_id = Some(Uuid::new_v4()); + self.stats.current_conversation_id = self.current_conversation_id; + self.message_scroll = 0; + self.status_message = "Started new conversation".to_string(); + + Ok(()) + } + + /// Load a conversation + async fn load_conversation(&mut self, conversation_id: Uuid) -> ChatResult<()> { + // Save current conversation first + self.save_current_conversation().await?; + + match self.database.load_conversation(conversation_id).await { + Ok(messages) => { + self.messages = messages; + self.current_conversation_id = Some(conversation_id); + self.stats.current_conversation_id = Some(conversation_id); + self.message_scroll = 0; + self.scroll_to_bottom(); + self.status_message = "Conversation loaded".to_string(); + } + Err(e) => { + self.handle_error(format!("Failed to load conversation: {}", e)); + } + } + + Ok(()) + } + + /// Delete a conversation + async fn delete_conversation(&mut self, conversation_id: Uuid) -> ChatResult<()> { + match self.database.delete_conversation(conversation_id).await { + Ok(_) => { + self.refresh_conversations().await?; + self.status_message = "Conversation deleted".to_string(); + + // If we deleted the current conversation, start a new one + if self.current_conversation_id == Some(conversation_id) { + self.new_conversation().await?; + } + } + Err(e) => { + self.handle_error(format!("Failed to delete conversation: {}", e)); + } + } + + Ok(()) + } + + /// Save current conversation to database + async fn save_current_conversation(&mut self) -> ChatResult<()> { + if let Some(conversation_id) = self.current_conversation_id { + if !self.messages.is_empty() { + let _ = self.database.save_conversation(conversation_id, &self.messages).await; + } + } + Ok(()) + } + + // Input handling methods + fn get_input_content(&self) -> String { + self.input_lines.join("\n") + } + + fn clear_input(&mut self) { + self.input_lines = vec![String::new()]; + self.current_input_line = 0; + self.input_cursor_position = 0; + } + + fn add_input_line(&mut self) { + self.input_lines.insert(self.current_input_line + 1, String::new()); + self.current_input_line += 1; + self.input_cursor_position = 0; + } + + fn current_line(&self) -> &String { + &self.input_lines[self.current_input_line] + } + + fn current_line_mut(&mut self) -> &mut String { + &mut self.input_lines[self.current_input_line] + } + + fn insert_char(&mut self, c: char) { + self.current_line_mut().insert(self.input_cursor_position, c); + self.input_cursor_position += 1; + } + + fn handle_backspace(&mut self) { + if self.input_cursor_position > 0 { + self.current_line_mut().remove(self.input_cursor_position - 1); + self.input_cursor_position -= 1; + } else if self.current_input_line > 0 { + // Join current line with previous line + let current_content = self.input_lines.remove(self.current_input_line); + self.current_input_line -= 1; + self.input_cursor_position = self.current_line().len(); + self.current_line_mut().push_str(¤t_content); + } + } + + fn handle_delete(&mut self) { + if self.input_cursor_position < self.current_line().len() { + self.current_line_mut().remove(self.input_cursor_position); + } else if self.current_input_line < self.input_lines.len() - 1 { + // Join next line with current line + let next_content = self.input_lines.remove(self.current_input_line + 1); + self.current_line_mut().push_str(&next_content); + } + } + + fn move_cursor_left(&mut self) { + if self.input_cursor_position > 0 { + self.input_cursor_position -= 1; + } else if self.current_input_line > 0 { + self.current_input_line -= 1; + self.input_cursor_position = self.current_line().len(); + } + } + + fn move_cursor_right(&mut self) { + if self.input_cursor_position < self.current_line().len() { + self.input_cursor_position += 1; + } else if self.current_input_line < self.input_lines.len() - 1 { + self.current_input_line += 1; + self.input_cursor_position = 0; + } + } + + fn move_cursor_up(&mut self) { + if self.current_input_line > 0 { + self.current_input_line -= 1; + self.input_cursor_position = self.input_cursor_position.min(self.current_line().len()); + } + } + + fn move_cursor_down(&mut self) { + if self.current_input_line < self.input_lines.len() - 1 { + self.current_input_line += 1; + self.input_cursor_position = self.input_cursor_position.min(self.current_line().len()); + } + } + + // Scrolling methods + fn scroll_up(&mut self) { + if self.message_scroll > 0 { + self.message_scroll -= 1; + } + } + + fn scroll_down(&mut self) { + if self.message_scroll < self.messages.len().saturating_sub(1) { + self.message_scroll += 1; + } + } + + fn page_up(&mut self) { + self.message_scroll = self.message_scroll.saturating_sub(10); + } + + fn page_down(&mut self) { + self.message_scroll = (self.message_scroll + 10).min(self.messages.len().saturating_sub(1)); + } + + fn scroll_to_bottom(&mut self) { + self.message_scroll = self.messages.len().saturating_sub(1); + } + + // Error handling + fn handle_error>(&mut self, error: S) { + let error_msg = error.into(); + self.error_message = Some(error_msg); + self.stats.errors_encountered += 1; + } + + fn clear_error(&mut self) { + self.error_message = None; + } + + /// Get renderer for UI drawing + pub fn renderer(&self) -> &ChatRenderer { + &self.renderer + } + + /// Print final statistics + pub fn print_final_stats(&self) { + if let Some(start_time) = self.stats.session_start { + let duration = start_time.elapsed(); + println!("\n=== Chat Session Statistics ==="); + println!("Session duration: {:?}", duration); + println!("Messages sent: {}", self.stats.messages_sent); + println!("Messages received: {}", self.stats.messages_received); + println!("Characters sent: {}", self.stats.total_characters_sent); + println!("Characters received: {}", self.stats.total_characters_received); + println!("Models used: {:?}", self.stats.models_used); + println!("Errors encountered: {}", self.stats.errors_encountered); + } + } +} \ No newline at end of file diff --git a/crates/owlen-tui/src/chat_app.rs b/crates/owlen-tui/src/chat_app.rs new file mode 100644 index 0000000..dc1aa49 --- /dev/null +++ b/crates/owlen-tui/src/chat_app.rs @@ -0,0 +1,542 @@ +use anyhow::Result; +use owlen_core::{ + session::{SessionController, SessionOutcome}, + types::{ChatParameters, ChatResponse, Conversation, ModelInfo}, +}; +use tokio::sync::mpsc; +use uuid::Uuid; + +use crate::config; +use crate::events::Event; +use std::collections::HashSet; +use std::fmt; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum AppState { + Running, + Quit, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum InputMode { + Normal, + Editing, + ProviderSelection, + ModelSelection, + Help, +} + +impl fmt::Display for InputMode { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let label = match self { + InputMode::Normal => "Normal", + InputMode::Editing => "Editing", + InputMode::ModelSelection => "Model", + InputMode::ProviderSelection => "Provider", + InputMode::Help => "Help", + }; + f.write_str(label) + } +} + +/// Messages emitted by asynchronous streaming tasks +#[derive(Debug)] +pub enum SessionEvent { + StreamChunk { + message_id: Uuid, + response: ChatResponse, + }, + StreamError { + message: String, + }, +} + +pub struct ChatApp { + controller: SessionController, + pub mode: InputMode, + pub status: String, + pub error: Option, + models: Vec, // All models fetched + pub available_providers: Vec, // Unique providers from models + pub selected_provider: String, // The currently selected provider + pub selected_provider_index: usize, // Index into the available_providers list + pub selected_model: Option, // Index into the *filtered* models list + scroll: usize, + session_tx: mpsc::UnboundedSender, + streaming: HashSet, +} + +impl ChatApp { + pub fn new(controller: SessionController) -> (Self, mpsc::UnboundedReceiver) { + let (session_tx, session_rx) = mpsc::unbounded_channel(); + let app = Self { + controller, + mode: InputMode::Normal, + status: "Ready".to_string(), + error: None, + models: Vec::new(), + available_providers: Vec::new(), + selected_provider: "ollama".to_string(), // Default, will be updated in initialize_models + selected_provider_index: 0, + selected_model: None, + scroll: 0, + session_tx, + streaming: std::collections::HashSet::new(), + }; + + (app, session_rx) + } + + pub fn status_message(&self) -> &str { + &self.status + } + + pub fn error_message(&self) -> Option<&String> { + self.error.as_ref() + } + + pub fn mode(&self) -> InputMode { + self.mode + } + + pub fn conversation(&self) -> &Conversation { + self.controller.conversation() + } + + pub fn models(&self) -> Vec<&ModelInfo> { + self.models.iter() + .filter(|m| m.provider == self.selected_provider) + .collect() + } + + pub fn selected_model(&self) -> &str { + self.controller.selected_model() + } + + pub fn config(&self) -> &owlen_core::config::Config { + self.controller.config() + } + + pub fn selected_model_index(&self) -> Option { + self.selected_model + } + + pub fn scroll(&self) -> usize { + self.scroll + } + + pub fn message_count(&self) -> usize { + self.controller.conversation().messages.len() + } + + pub fn streaming_count(&self) -> usize { + self.streaming.len() + } + + pub fn formatter(&self) -> &owlen_core::formatting::MessageFormatter { + self.controller.formatter() + } + + pub fn input_buffer(&self) -> &owlen_core::input::InputBuffer { + self.controller.input_buffer() + } + + pub fn input_buffer_mut(&mut self) -> &mut owlen_core::input::InputBuffer { + self.controller.input_buffer_mut() + } + + pub async fn initialize_models(&mut self) -> Result<()> { + let config_model_name = self.controller.config().general.default_model.clone(); + let config_model_provider = self.controller.config().general.default_provider.clone(); + + let all_models = self.controller.models(false).await?; + self.models = all_models; + + // Populate available_providers + let mut providers = self.models.iter().map(|m| m.provider.clone()).collect::>(); + self.available_providers = providers.into_iter().collect(); + self.available_providers.sort(); + + // Set selected_provider based on config, or default to "ollama" if not found + self.selected_provider = self.available_providers.iter() + .find(|&p| p == &config_model_provider) + .cloned() + .unwrap_or_else(|| "ollama".to_string()); + self.selected_provider_index = self.available_providers.iter() + .position(|p| p == &self.selected_provider) + .unwrap_or(0); + + self.sync_selected_model_index(); + + // Ensure the default model is set in the controller and config + self.controller.ensure_default_model(&self.models); + + let current_model_name = self.controller.selected_model().to_string(); + let current_model_provider = self.controller.config().general.default_provider.clone(); + + if config_model_name.as_deref() != Some(¤t_model_name) || config_model_provider != current_model_provider { + if let Err(err) = config::save_config(self.controller.config()) { + self.error = Some(format!("Failed to save config: {err}")); + } else { + self.error = None; + } + } + + Ok(()) + } + + pub async fn handle_event(&mut self, event: Event) -> Result { + use crossterm::event::{KeyCode, KeyModifiers}; + + match event { + Event::Tick => { + // Future: update streaming timers + } + Event::Key(key) => match self.mode { + InputMode::Normal => match (key.code, key.modifiers) { + (KeyCode::Char('q'), KeyModifiers::NONE) + | (KeyCode::Char('c'), KeyModifiers::CONTROL) => { + return Ok(AppState::Quit); + } + (KeyCode::Char('m'), KeyModifiers::NONE) => { + self.refresh_models().await?; + self.mode = InputMode::ProviderSelection; + } + (KeyCode::Char('n'), KeyModifiers::NONE) => { + self.controller.start_new_conversation(None, None); + self.status = "Started new conversation".to_string(); + } + (KeyCode::Char('h'), KeyModifiers::NONE) => { + self.mode = InputMode::Help; + } + (KeyCode::Char('c'), KeyModifiers::NONE) => { + self.controller.clear(); + self.status = "Conversation cleared".to_string(); + } + (KeyCode::Enter, KeyModifiers::NONE) + | (KeyCode::Char('i'), KeyModifiers::NONE) => { + self.mode = InputMode::Editing; + } + (KeyCode::Up, KeyModifiers::NONE) => { + self.scroll = self.scroll.saturating_add(1); + } + (KeyCode::Down, KeyModifiers::NONE) => { + self.scroll = self.scroll.saturating_sub(1); + } + (KeyCode::Esc, KeyModifiers::NONE) => { + self.mode = InputMode::Normal; + } + _ => {} + }, + InputMode::Editing => match key.code { + KeyCode::Esc if key.modifiers.is_empty() => { + self.mode = InputMode::Normal; + self.reset_status(); + } + KeyCode::Enter if key.modifiers.contains(KeyModifiers::SHIFT) => { + self.input_buffer_mut().insert_char('\n'); + } + KeyCode::Enter if key.modifiers.is_empty() => { + self.try_send_message().await?; + self.mode = InputMode::Normal; + } + KeyCode::Enter => { + self.input_buffer_mut().insert_char('\n'); + } + KeyCode::Char('j') if key.modifiers.contains(KeyModifiers::CONTROL) => { + self.input_buffer_mut().insert_char('\n'); + } + KeyCode::Backspace => { + self.input_buffer_mut().backspace(); + } + KeyCode::Delete => { + self.input_buffer_mut().delete(); + } + KeyCode::Left => { + self.input_buffer_mut().move_left(); + } + KeyCode::Right => { + self.input_buffer_mut().move_right(); + } + KeyCode::Home => { + self.input_buffer_mut().move_home(); + } + KeyCode::End => { + self.input_buffer_mut().move_end(); + } + KeyCode::Up => { + self.input_buffer_mut().history_previous(); + } + KeyCode::Down => { + self.input_buffer_mut().history_next(); + } + KeyCode::Char(c) + if key.modifiers.is_empty() + || key.modifiers.contains(KeyModifiers::SHIFT) => + { + self.input_buffer_mut().insert_char(c); + } + KeyCode::Tab => { + self.input_buffer_mut().insert_tab(); + } + _ => {} + }, + InputMode::ProviderSelection => match key.code { + KeyCode::Esc => { + self.mode = InputMode::Normal; + } + KeyCode::Enter => { + if let Some(provider) = self.available_providers.get(self.selected_provider_index) { + self.selected_provider = provider.clone(); + self.sync_selected_model_index(); // Update model selection based on new provider + self.mode = InputMode::ModelSelection; + } + } + KeyCode::Up => { + if self.selected_provider_index > 0 { + self.selected_provider_index -= 1; + } + } + KeyCode::Down => { + if self.selected_provider_index + 1 < self.available_providers.len() { + self.selected_provider_index += 1; + } + } + _ => {} + }, + InputMode::ModelSelection => match key.code { + KeyCode::Esc => { + self.mode = InputMode::Normal; + } + KeyCode::Enter => { + if let Some(selected_model_idx) = self.selected_model { + let filtered_models = self.models(); + if let Some(model) = filtered_models.get(selected_model_idx) { + let model_id = model.id.clone(); + let model_name = model.name.clone(); + + self.controller.set_model(model_id.clone()); + self.status = format!("Using model: {}", model_name); + // Save the selected provider and model to config + self.controller.config_mut().general.default_model = Some(model_id.clone()); + self.controller.config_mut().general.default_provider = self.selected_provider.clone(); + match config::save_config(self.controller.config()) { + Ok(_) => self.error = None, + Err(err) => { + self.error = Some(format!("Failed to save config: {}", err)); + } + } + } + } + self.mode = InputMode::Normal; + } + KeyCode::Up => { + if let Some(selected_model_idx) = self.selected_model { + if selected_model_idx > 0 { + self.selected_model = Some(selected_model_idx - 1); + } + } + } + KeyCode::Down => { + if let Some(selected_model_idx) = self.selected_model { + if selected_model_idx + 1 < self.models().len() { + self.selected_model = Some(selected_model_idx + 1); + } + } + } + _ => {} + }, + InputMode::Help => match key.code { + KeyCode::Esc | KeyCode::Enter => { + self.mode = InputMode::Normal; + } + _ => {} + }, + }, + _ => {} + } + + Ok(AppState::Running) + } + + pub fn handle_session_event(&mut self, event: SessionEvent) -> Result<()> { + match event { + SessionEvent::StreamChunk { + message_id, + response, + } => { + self.controller.apply_stream_chunk(message_id, &response)?; + if response.is_final { + self.streaming.remove(&message_id); + self.status = "Response complete".to_string(); + } + } + SessionEvent::StreamError { message } => { + self.error = Some(message); + } + } + Ok(()) + } + + fn reset_status(&mut self) { + self.status = "Ready".to_string(); + self.error = None; + } + + async fn refresh_models(&mut self) -> Result<()> { + let config_model_name = self.controller.config().general.default_model.clone(); + let config_model_provider = self.controller.config().general.default_provider.clone(); + + let all_models = self.controller.models(true).await?; + if all_models.is_empty() { + self.error = Some("No models available".to_string()); + } else { + self.models = all_models; + + // Populate available_providers + let mut providers = self.models.iter().map(|m| m.provider.clone()).collect::>(); + self.available_providers = providers.into_iter().collect(); + self.available_providers.sort(); + + // Set selected_provider based on config, or default to "ollama" if not found + self.selected_provider = self.available_providers.iter() + .find(|&p| p == &config_model_provider) + .cloned() + .unwrap_or_else(|| "ollama".to_string()); + self.selected_provider_index = self.available_providers.iter() + .position(|p| p == &self.selected_provider) + .unwrap_or(0); + + self.controller.ensure_default_model(&self.models); + self.sync_selected_model_index(); + + let current_model_name = self.controller.selected_model().to_string(); + let current_model_provider = self.controller.config().general.default_provider.clone(); + + if config_model_name.as_deref() != Some(¤t_model_name) || config_model_provider != current_model_provider { + if let Err(err) = config::save_config(self.controller.config()) { + self.error = Some(format!("Failed to save config: {err}")); + } else { + self.error = None; + } + } + self.status = format!("Loaded {} models", self.models.len()); + } + Ok(()) + } + + async fn try_send_message(&mut self) -> Result<()> { + let content = self.controller.input_buffer().text().trim().to_string(); + if content.is_empty() { + self.error = Some("Cannot send empty message".to_string()); + return Ok(()); + } + + let message = self.controller.input_buffer_mut().commit_to_history(); + let mut parameters = ChatParameters::default(); + parameters.stream = self.controller.config().general.enable_streaming; + + match self.controller.send_message(message, parameters).await { + Ok(SessionOutcome::Complete(_response)) => { + self.status = "Response received".to_string(); + self.error = None; + Ok(()) + } + Ok(SessionOutcome::Streaming { + response_id, + stream, + }) => { + self.spawn_stream(response_id, stream); + match self + .controller + .mark_stream_placeholder(response_id, "Loading...") + { + Ok(_) => self.error = None, + Err(err) => { + self.error = Some(format!("Could not set loading placeholder: {}", err)); + } + } + self.status = "Waiting for response...".to_string(); + Ok(()) + } + Err(err) => { + let message = err.to_string(); + if message.to_lowercase().contains("not found") { + self.error = Some( + "Model not available. Press 'm' to pick another installed model." + .to_string(), + ); + self.status = "Model unavailable".to_string(); + let _ = self.refresh_models().await; + self.mode = InputMode::ProviderSelection; + } else { + self.error = Some(message); + self.status = "Send failed".to_string(); + } + Ok(()) + } + } + } + + fn sync_selected_model_index(&mut self) { + let current_model_id = self.controller.selected_model().to_string(); + let filtered_models: Vec<&ModelInfo> = self.models.iter() + .filter(|m| m.provider == self.selected_provider) + .collect(); + + if filtered_models.is_empty() { + self.selected_model = None; + return; + } + + if let Some(idx) = filtered_models + .iter() + .position(|m| m.id == current_model_id) + { + self.selected_model = Some(idx); + } else { + // If the current model is not in the filtered list, select the first one + self.selected_model = Some(0); + if let Some(model) = filtered_models.get(0) { + self.controller.set_model(model.id.clone()); + // Also update the config with the new model and provider + self.controller.config_mut().general.default_model = Some(model.id.clone()); + self.controller.config_mut().general.default_provider = self.selected_provider.clone(); + if let Err(err) = config::save_config(self.controller.config()) { + self.error = Some(format!("Failed to save config: {err}")); + } + } + } + } + + fn spawn_stream(&mut self, message_id: Uuid, mut stream: owlen_core::provider::ChatStream) { + let sender = self.session_tx.clone(); + self.streaming.insert(message_id); + + tokio::spawn(async move { + use futures_util::StreamExt; + + while let Some(item) = stream.next().await { + match item { + Ok(response) => { + if sender + .send(SessionEvent::StreamChunk { + message_id, + response, + }) + .is_err() + { + break; + } + } + Err(e) => { + let _ = sender.send(SessionEvent::StreamError { + message: e.to_string(), + }); + break; + } + } + } + }); + } +} diff --git a/crates/owlen-tui/src/code.rs b/crates/owlen-tui/src/code.rs new file mode 100644 index 0000000..367f96b --- /dev/null +++ b/crates/owlen-tui/src/code.rs @@ -0,0 +1,787 @@ +//! Code-specific TUI implementation + +use anyhow::Result; +use owlen_core::provider::Provider; +use owlen_core::types::{ChatRequest, Message, Role, ChatParameters}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use uuid::Uuid; + +use crate::config::Config; +use crate::database::Database; +use crate::events::Event; +use crate::files::FileManager; +use crate::ui::CodeRenderer; +use crate::chat::{ConversationMessage, ChatResult, ChatStats}; + +/// Code application state +#[derive(Debug)] +pub enum CodeState { + Running, + Quit, +} + +/// Code-specific input modes +#[derive(Debug, Clone, PartialEq)] +pub enum CodeInputMode { + Normal, + Editing, + ModelSelection, + FileBrowser, + FileSearch, + ProjectExplorer, + Help, +} + +/// File context information +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct FileContext { + pub path: String, + pub content: String, + pub language: Option, + pub line_count: usize, + pub size: usize, +} + +/// Code session with project context +#[derive(Debug, Clone)] +pub struct CodeSession { + pub project_root: Option, + pub active_files: Vec, + pub recent_files: Vec, + pub language_context: Option, +} + +/// Code application optimized for programming assistance +pub struct CodeApp { + /// Current input mode + pub input_mode: CodeInputMode, + /// Multi-line input with syntax awareness + pub input_lines: Vec, + /// Current line in input + pub current_input_line: usize, + /// Cursor position in current line + pub input_cursor_position: usize, + /// Conversation messages with code context + pub messages: Vec, + /// Available models + pub available_models: Vec, + /// Selected model index + pub model_selection_index: usize, + /// Current selected model + pub selected_model: String, + /// Code session information + pub session: CodeSession, + /// File manager for project operations + file_manager: FileManager, + /// Available files in current directory + pub available_files: Vec, + /// Selected file index + pub file_selection_index: usize, + /// Current file path for operations + pub current_file_path: String, + /// File search query + pub file_search_query: String, + /// Filtered file results + pub filtered_files: Vec, + /// Message scroll position + pub message_scroll: usize, + /// Status message + pub status_message: String, + /// Error message + pub error_message: Option, + /// Session statistics + pub stats: ChatStats, + /// Provider for LLM requests + provider: Box, + /// Active streaming requests + active_requests: HashMap, + /// Database for persistence + database: Database, + /// Configuration + config: Config, + /// UI renderer for code mode + renderer: CodeRenderer, +} + +impl CodeApp { + pub fn new(provider: Box, config: Config, database: Database) -> ChatResult { + let file_manager = FileManager::new(config.clone()); + let renderer = CodeRenderer::new(&config); + + let session = CodeSession { + project_root: std::env::current_dir().ok().map(|p| p.to_string_lossy().to_string()), + active_files: Vec::new(), + recent_files: Vec::new(), + language_context: None, + }; + + let mut stats = ChatStats::default(); + stats.session_start = Some(std::time::Instant::now()); + stats.current_conversation_id = Some(Uuid::new_v4()); + + let mut app = Self { + input_mode: CodeInputMode::Normal, + input_lines: vec![String::new()], + current_input_line: 0, + input_cursor_position: 0, + messages: Vec::new(), + available_models: Vec::new(), + model_selection_index: 0, + selected_model: config.general.default_model.clone(), + session, + file_manager, + available_files: Vec::new(), + file_selection_index: 0, + current_file_path: ".".to_string(), + file_search_query: String::new(), + filtered_files: Vec::new(), + message_scroll: 0, + status_message: "Press 'h' for help | Code Assistant Mode".to_string(), + error_message: None, + stats, + provider, + active_requests: HashMap::new(), + database, + config, + renderer, + }; + + // Add code assistant system message + app.add_code_context(); + + // Load project context if available + app.load_project_context(); + + Ok(app) + } + + /// Add code assistant context to the conversation + fn add_code_context(&mut self) { + let code_context = ConversationMessage::new( + "system".to_string(), + r#"You are OWLEN Code Assistant, an AI programming helper. You excel at: + +- Explaining code and programming concepts clearly +- Helping debug issues and providing solutions +- Suggesting improvements and optimizations +- Writing clean, efficient code in various languages +- Code reviews and best practices +- Architecture and design patterns +- Refactoring and modernization + +When helping with code: +- Provide clear, well-commented examples +- Explain your reasoning and approach +- Suggest multiple solutions when appropriate +- Consider performance, readability, and maintainability +- Ask clarifying questions when context is needed +- Use proper syntax highlighting and formatting + +Current mode: Code Assistance + +Available project context will be provided when files are opened or referenced."#.to_string(), + ); + + self.messages.push(code_context); + } + + /// Load project context from current directory + fn load_project_context(&mut self) { + if let Ok(Some(context)) = self.file_manager.load_project_context() { + let project_message = ConversationMessage::new( + "system".to_string(), + format!("Project Context:\n{}", context), + ); + self.messages.push(project_message); + } + + // Refresh file list + self.refresh_files(); + } + + /// Handle user input events + pub async fn handle_event(&mut self, event: Event) -> ChatResult { + use crossterm::event::{KeyCode, KeyModifiers}; + + match event { + Event::Key(key) => { + self.clear_error(); + + match self.input_mode { + CodeInputMode::Normal => { + match (key.code, key.modifiers) { + (KeyCode::Char('q'), KeyModifiers::NONE) => { + return Ok(CodeState::Quit); + } + (KeyCode::Char('h'), KeyModifiers::NONE) => { + self.input_mode = CodeInputMode::Help; + } + (KeyCode::Char('m'), KeyModifiers::NONE) => { + self.refresh_models().await?; + self.input_mode = CodeInputMode::ModelSelection; + } + (KeyCode::Char('f'), KeyModifiers::NONE) => { + self.refresh_files(); + self.input_mode = CodeInputMode::FileBrowser; + } + (KeyCode::Char('p'), KeyModifiers::NONE) => { + self.input_mode = CodeInputMode::ProjectExplorer; + } + (KeyCode::Char('/'), KeyModifiers::NONE) => { + self.input_mode = CodeInputMode::FileSearch; + self.file_search_query.clear(); + } + (KeyCode::Char('i'), KeyModifiers::NONE) | (KeyCode::Enter, KeyModifiers::NONE) => { + self.input_mode = CodeInputMode::Editing; + } + (KeyCode::Up, KeyModifiers::NONE) => { + self.scroll_up(); + } + (KeyCode::Down, KeyModifiers::NONE) => { + self.scroll_down(); + } + (KeyCode::PageUp, KeyModifiers::NONE) => { + self.page_up(); + } + (KeyCode::PageDown, KeyModifiers::NONE) => { + self.page_down(); + } + (KeyCode::Char('o'), KeyModifiers::NONE) => { + if !self.available_files.is_empty() && self.file_selection_index < self.available_files.len() { + self.open_file(self.available_files[self.file_selection_index].path.clone()).await?; + } + } + _ => {} + } + } + CodeInputMode::Editing => { + match (key.code, key.modifiers) { + (KeyCode::Esc, KeyModifiers::NONE) => { + self.input_mode = CodeInputMode::Normal; + self.clear_input(); + } + (KeyCode::Enter, KeyModifiers::CTRL) => { + let message = self.get_input_content(); + if !message.trim().is_empty() { + self.send_message(message).await?; + self.clear_input(); + self.input_mode = CodeInputMode::Normal; + } + } + (KeyCode::Enter, KeyModifiers::NONE) => { + self.add_input_line(); + } + (KeyCode::Tab, KeyModifiers::NONE) => { + // Add code indentation + self.insert_string(" "); + } + (KeyCode::Backspace, KeyModifiers::NONE) => { + self.handle_backspace(); + } + (KeyCode::Delete, KeyModifiers::NONE) => { + self.handle_delete(); + } + (KeyCode::Left, KeyModifiers::NONE) => { + self.move_cursor_left(); + } + (KeyCode::Right, KeyModifiers::NONE) => { + self.move_cursor_right(); + } + (KeyCode::Up, KeyModifiers::NONE) => { + self.move_cursor_up(); + } + (KeyCode::Down, KeyModifiers::NONE) => { + self.move_cursor_down(); + } + (KeyCode::Home, KeyModifiers::NONE) => { + self.input_cursor_position = 0; + } + (KeyCode::End, KeyModifiers::NONE) => { + self.input_cursor_position = self.current_line().len(); + } + (KeyCode::Char(c), KeyModifiers::NONE) | (KeyCode::Char(c), KeyModifiers::SHIFT) => { + self.insert_char(c); + } + _ => {} + } + } + CodeInputMode::ModelSelection => { + match key.code { + KeyCode::Esc => { + self.input_mode = CodeInputMode::Normal; + } + KeyCode::Enter => { + if !self.available_models.is_empty() { + self.selected_model = self.available_models[self.model_selection_index].clone(); + self.stats.models_used.insert(self.selected_model.clone()); + self.status_message = format!("Selected model: {}", self.selected_model); + } + self.input_mode = CodeInputMode::Normal; + } + KeyCode::Up => { + if self.model_selection_index > 0 { + self.model_selection_index -= 1; + } + } + KeyCode::Down => { + if self.model_selection_index < self.available_models.len().saturating_sub(1) { + self.model_selection_index += 1; + } + } + _ => {} + } + } + CodeInputMode::FileBrowser => { + match key.code { + KeyCode::Esc => { + self.input_mode = CodeInputMode::Normal; + } + KeyCode::Enter => { + if !self.available_files.is_empty() { + let file_info = &self.available_files[self.file_selection_index]; + if file_info.is_dir { + self.current_file_path = file_info.path.clone(); + self.refresh_files(); + } else { + self.open_file(file_info.path.clone()).await?; + self.input_mode = CodeInputMode::Normal; + } + } + } + KeyCode::Up => { + if self.file_selection_index > 0 { + self.file_selection_index -= 1; + } + } + KeyCode::Down => { + if self.file_selection_index < self.available_files.len().saturating_sub(1) { + self.file_selection_index += 1; + } + } + KeyCode::Backspace => { + // Go to parent directory + if let Some(parent) = std::path::Path::new(&self.current_file_path).parent() { + self.current_file_path = parent.to_string_lossy().to_string(); + self.refresh_files(); + } + } + _ => {} + } + } + CodeInputMode::FileSearch => { + match key.code { + KeyCode::Esc => { + self.input_mode = CodeInputMode::Normal; + self.file_search_query.clear(); + } + KeyCode::Enter => { + if !self.filtered_files.is_empty() { + self.open_file(self.filtered_files[0].path.clone()).await?; + self.input_mode = CodeInputMode::Normal; + self.file_search_query.clear(); + } + } + KeyCode::Backspace => { + self.file_search_query.pop(); + self.filter_files(); + } + KeyCode::Char(c) => { + self.file_search_query.push(c); + self.filter_files(); + } + _ => {} + } + } + CodeInputMode::ProjectExplorer => { + match key.code { + KeyCode::Esc => { + self.input_mode = CodeInputMode::Normal; + } + KeyCode::Char('r') => { + self.load_project_context(); + self.status_message = "Project context refreshed".to_string(); + } + _ => {} + } + } + CodeInputMode::Help => { + self.input_mode = CodeInputMode::Normal; + } + } + } + Event::Resize(_, _) => { + // Handle terminal resize + } + } + + Ok(CodeState::Running) + } + + /// Send a message to the LLM with code context + async fn send_message(&mut self, content: String) -> ChatResult<()> { + // Add file context if relevant files are active + let enhanced_content = if !self.session.active_files.is_empty() { + let mut context = String::new(); + context.push_str(&content); + context.push_str("\n\n--- Active File Context ---\n"); + + for file_ctx in &self.session.active_files { + context.push_str(&format!("File: {}\n", file_ctx.path)); + if let Some(lang) = &file_ctx.language { + context.push_str(&format!("Language: {}\n", lang)); + } + context.push_str("```\n"); + context.push_str(&file_ctx.content); + context.push_str("\n```\n\n"); + } + + context + } else { + content.clone() + }; + + let user_message = ConversationMessage::new("user".to_string(), enhanced_content); + self.messages.push(user_message); + self.stats.messages_sent += 1; + self.stats.total_characters_sent += content.len() as u32; + + // Convert messages to core format + let core_messages: Vec = self.messages.iter() + .map(|m| { + let role = match m.role.as_str() { + "user" => Role::User, + "assistant" => Role::Assistant, + "system" => Role::System, + _ => Role::User, + }; + Message::new(role, m.content.clone()) + }) + .collect(); + + let request = ChatRequest { + model: self.selected_model.clone(), + messages: core_messages, + parameters: ChatParameters { + temperature: Some(self.config.llm.temperature), + max_tokens: self.config.llm.max_tokens, + extra: HashMap::new(), + }, + }; + + // Add placeholder for assistant response + let mut assistant_message = ConversationMessage::new("assistant".to_string(), String::new()); + assistant_message.is_streaming = true; + assistant_message.model_used = Some(self.selected_model.clone()); + let message_index = self.messages.len(); + self.messages.push(assistant_message); + + match self.provider.chat_stream(request).await { + Ok(mut stream) => { + use futures_util::StreamExt; + + while let Some(response) = stream.next().await { + match response { + Ok(chat_response) => { + if let Some(message) = self.messages.get_mut(message_index) { + message.content.push_str(&chat_response.message.content); + message.is_streaming = !chat_response.is_final; + + if chat_response.is_final { + self.stats.messages_received += 1; + self.stats.total_characters_received += message.content.len() as u32; + } + } + } + Err(e) => { + self.handle_error(format!("Streaming error: {}", e)); + break; + } + } + } + } + Err(e) => { + // Remove the placeholder message on error + self.messages.pop(); + self.handle_error(format!("Failed to send message: {}", e)); + } + } + + self.scroll_to_bottom(); + Ok(()) + } + + /// Open a file and add it to the active context + async fn open_file(&mut self, file_path: String) -> ChatResult<()> { + match self.file_manager.read_file(&file_path) { + Ok(content) => { + let language = self.detect_language(&file_path); + + let file_context = FileContext { + path: file_path.clone(), + content: content.clone(), + language: language.clone(), + line_count: content.lines().count(), + size: content.len(), + }; + + // Add to active files (limit to last 5 files) + self.session.active_files.push(file_context); + if self.session.active_files.len() > 5 { + self.session.active_files.remove(0); + } + + // Update recent files + if !self.session.recent_files.contains(&file_path) { + self.session.recent_files.push(file_path.clone()); + if self.session.recent_files.len() > 10 { + self.session.recent_files.remove(0); + } + } + + // Set language context + if let Some(lang) = language { + self.session.language_context = Some(lang); + } + + self.status_message = format!("Opened: {} ({} lines)", file_path, self.session.active_files.last().unwrap().line_count); + } + Err(e) => { + self.handle_error(format!("Failed to open file: {}", e)); + } + } + + Ok(()) + } + + /// Detect programming language from file extension + fn detect_language(&self, file_path: &str) -> Option { + if let Some(extension) = std::path::Path::new(file_path).extension() { + match extension.to_str()? { + "rs" => Some("rust".to_string()), + "py" => Some("python".to_string()), + "js" => Some("javascript".to_string()), + "ts" => Some("typescript".to_string()), + "jsx" => Some("javascript".to_string()), + "tsx" => Some("typescript".to_string()), + "go" => Some("go".to_string()), + "java" => Some("java".to_string()), + "cpp" | "cxx" | "cc" => Some("cpp".to_string()), + "c" => Some("c".to_string()), + "h" | "hpp" => Some("c".to_string()), + "cs" => Some("csharp".to_string()), + "rb" => Some("ruby".to_string()), + "php" => Some("php".to_string()), + "swift" => Some("swift".to_string()), + "kt" => Some("kotlin".to_string()), + "scala" => Some("scala".to_string()), + "sh" | "bash" => Some("bash".to_string()), + "sql" => Some("sql".to_string()), + "html" => Some("html".to_string()), + "css" => Some("css".to_string()), + "scss" => Some("scss".to_string()), + "json" => Some("json".to_string()), + "yaml" | "yml" => Some("yaml".to_string()), + "toml" => Some("toml".to_string()), + "xml" => Some("xml".to_string()), + "md" => Some("markdown".to_string()), + _ => None, + } + } else { + None + } + } + + /// Refresh available models + async fn refresh_models(&mut self) -> ChatResult<()> { + match self.provider.list_models().await { + Ok(models) => { + self.available_models = models.into_iter().map(|m| m.id).collect(); + self.model_selection_index = 0; + + if let Some(index) = self.available_models.iter().position(|m| m == &self.selected_model) { + self.model_selection_index = index; + } + } + Err(e) => { + self.handle_error(format!("Failed to fetch models: {}", e)); + } + } + Ok(()) + } + + /// Refresh file list in current directory + fn refresh_files(&mut self) { + match self.file_manager.list_files(&self.current_file_path) { + Ok(files) => { + self.available_files = files; + self.file_selection_index = 0; + } + Err(e) => { + self.handle_error(format!("Failed to list files: {}", e)); + } + } + } + + /// Filter files based on search query + fn filter_files(&mut self) { + if self.file_search_query.is_empty() { + self.filtered_files = self.available_files.clone(); + } else { + self.filtered_files = self.available_files + .iter() + .filter(|file| { + file.name.to_lowercase().contains(&self.file_search_query.to_lowercase()) + }) + .cloned() + .collect(); + } + } + + // Input handling methods (similar to chat.rs but optimized for code) + fn get_input_content(&self) -> String { + self.input_lines.join("\n") + } + + fn clear_input(&mut self) { + self.input_lines = vec![String::new()]; + self.current_input_line = 0; + self.input_cursor_position = 0; + } + + fn add_input_line(&mut self) { + self.input_lines.insert(self.current_input_line + 1, String::new()); + self.current_input_line += 1; + self.input_cursor_position = 0; + } + + fn current_line(&self) -> &String { + &self.input_lines[self.current_input_line] + } + + fn current_line_mut(&mut self) -> &mut String { + &mut self.input_lines[self.current_input_line] + } + + fn insert_char(&mut self, c: char) { + self.current_line_mut().insert(self.input_cursor_position, c); + self.input_cursor_position += 1; + } + + fn insert_string(&mut self, s: &str) { + for c in s.chars() { + self.insert_char(c); + } + } + + fn handle_backspace(&mut self) { + if self.input_cursor_position > 0 { + self.current_line_mut().remove(self.input_cursor_position - 1); + self.input_cursor_position -= 1; + } else if self.current_input_line > 0 { + let current_content = self.input_lines.remove(self.current_input_line); + self.current_input_line -= 1; + self.input_cursor_position = self.current_line().len(); + self.current_line_mut().push_str(¤t_content); + } + } + + fn handle_delete(&mut self) { + if self.input_cursor_position < self.current_line().len() { + self.current_line_mut().remove(self.input_cursor_position); + } else if self.current_input_line < self.input_lines.len() - 1 { + let next_content = self.input_lines.remove(self.current_input_line + 1); + self.current_line_mut().push_str(&next_content); + } + } + + fn move_cursor_left(&mut self) { + if self.input_cursor_position > 0 { + self.input_cursor_position -= 1; + } else if self.current_input_line > 0 { + self.current_input_line -= 1; + self.input_cursor_position = self.current_line().len(); + } + } + + fn move_cursor_right(&mut self) { + if self.input_cursor_position < self.current_line().len() { + self.input_cursor_position += 1; + } else if self.current_input_line < self.input_lines.len() - 1 { + self.current_input_line += 1; + self.input_cursor_position = 0; + } + } + + fn move_cursor_up(&mut self) { + if self.current_input_line > 0 { + self.current_input_line -= 1; + self.input_cursor_position = self.input_cursor_position.min(self.current_line().len()); + } + } + + fn move_cursor_down(&mut self) { + if self.current_input_line < self.input_lines.len() - 1 { + self.current_input_line += 1; + self.input_cursor_position = self.input_cursor_position.min(self.current_line().len()); + } + } + + // Scrolling methods + fn scroll_up(&mut self) { + if self.message_scroll > 0 { + self.message_scroll -= 1; + } + } + + fn scroll_down(&mut self) { + if self.message_scroll < self.messages.len().saturating_sub(1) { + self.message_scroll += 1; + } + } + + fn page_up(&mut self) { + self.message_scroll = self.message_scroll.saturating_sub(10); + } + + fn page_down(&mut self) { + self.message_scroll = (self.message_scroll + 10).min(self.messages.len().saturating_sub(1)); + } + + fn scroll_to_bottom(&mut self) { + self.message_scroll = self.messages.len().saturating_sub(1); + } + + // Error handling + fn handle_error>(&mut self, error: S) { + let error_msg = error.into(); + self.error_message = Some(error_msg); + self.stats.errors_encountered += 1; + } + + fn clear_error(&mut self) { + self.error_message = None; + } + + /// Get renderer for UI drawing + pub fn renderer(&self) -> &CodeRenderer { + &self.renderer + } + + /// Print final statistics + pub fn print_final_stats(&self) { + if let Some(start_time) = self.stats.session_start { + let duration = start_time.elapsed(); + println!("\n=== Code Assistant Session Statistics ==="); + println!("Session duration: {:?}", duration); + println!("Messages sent: {}", self.stats.messages_sent); + println!("Messages received: {}", self.stats.messages_received); + println!("Characters sent: {}", self.stats.total_characters_sent); + println!("Characters received: {}", self.stats.total_characters_received); + println!("Models used: {:?}", self.stats.models_used); + println!("Errors encountered: {}", self.stats.errors_encountered); + println!("Active files: {}", self.session.active_files.len()); + println!("Language context: {:?}", self.session.language_context); + } + } +} \ No newline at end of file diff --git a/crates/owlen-tui/src/code_app.rs b/crates/owlen-tui/src/code_app.rs new file mode 100644 index 0000000..3529521 --- /dev/null +++ b/crates/owlen-tui/src/code_app.rs @@ -0,0 +1,43 @@ +use anyhow::Result; +use owlen_core::session::SessionController; +use tokio::sync::mpsc; + +use crate::chat_app::{AppState, ChatApp, InputMode, SessionEvent}; +use crate::events::Event; + +const DEFAULT_SYSTEM_PROMPT: &str = + "You are OWLEN Code Assistant. Provide detailed, actionable programming help."; + +pub struct CodeApp { + inner: ChatApp, +} + +impl CodeApp { + pub fn new(mut controller: SessionController) -> (Self, mpsc::UnboundedReceiver) { + controller + .conversation_mut() + .push_system_message(DEFAULT_SYSTEM_PROMPT.to_string()); + let (inner, rx) = ChatApp::new(controller); + (Self { inner }, rx) + } + + pub async fn handle_event(&mut self, event: Event) -> Result { + self.inner.handle_event(event).await + } + + pub fn handle_session_event(&mut self, event: SessionEvent) -> Result<()> { + self.inner.handle_session_event(event) + } + + pub fn mode(&self) -> InputMode { + self.inner.mode() + } + + pub fn inner(&self) -> &ChatApp { + &self.inner + } + + pub fn inner_mut(&mut self) -> &mut ChatApp { + &mut self.inner + } +} diff --git a/crates/owlen-tui/src/config.rs b/crates/owlen-tui/src/config.rs new file mode 100644 index 0000000..7433722 --- /dev/null +++ b/crates/owlen-tui/src/config.rs @@ -0,0 +1,16 @@ +pub use owlen_core::config::{ + default_config_path, ensure_ollama_config, session_timeout, Config, GeneralSettings, + InputSettings, StorageSettings, UiSettings, DEFAULT_CONFIG_PATH, +}; + +/// Attempt to load configuration from default location +pub fn try_load_config() -> Option { + Config::load(None).ok() +} + +/// Persist configuration to default path +pub fn save_config(config: &Config) -> anyhow::Result<()> { + config + .save(None) + .map_err(|e| anyhow::anyhow!(e.to_string())) +} diff --git a/crates/owlen-tui/src/database.rs b/crates/owlen-tui/src/database.rs new file mode 100644 index 0000000..bd27b29 --- /dev/null +++ b/crates/owlen-tui/src/database.rs @@ -0,0 +1,152 @@ +use sled::Db; +use anyhow::Result; +use serde::{Deserialize, Serialize}; +use std::time::SystemTime; + +use crate::app::ConversationMessage; + +const DB_PATH: &str = "~/.config/owlen/sessions.db"; + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct Session { + pub id: String, + pub name: String, + pub messages: Vec, + pub created_at: SystemTime, + pub updated_at: SystemTime, + pub model_used: String, +} + +#[derive(Clone)] +pub struct Database { + db: Db, +} + +impl Database { + pub fn new() -> Result { + let path = if let Ok(custom_path) = std::env::var("OWLEN_DB_PATH") { + custom_path + } else { + shellexpand::tilde(DB_PATH).to_string() + }; + let db = sled::open(path)?; + Ok(Self { db }) + } + + pub fn save_conversation(&self, messages: &[ConversationMessage]) -> Result<()> { + let serialized = serde_json::to_string(messages)?; + self.db.insert("last_session", serialized.as_bytes())?; + Ok(()) + } + + pub fn load_conversation(&self) -> Result>> { + if let Some(serialized) = self.db.get("last_session")? { + let serialized: &[u8] = &serialized; + let messages: Vec = serde_json::from_slice(serialized)?; + Ok(Some(messages)) + } else { + Ok(None) + } + } + + /// Save a named session + pub fn save_session(&self, session: &Session) -> Result<()> { + let key = format!("session_{}", session.id); + let serialized = serde_json::to_string(session)?; + self.db.insert(key.as_bytes(), serialized.as_bytes())?; + + // Also update the list of session IDs + self.add_session_to_list(&session.id)?; + + Ok(()) + } + + /// Load a specific session by ID + pub fn load_session(&self, session_id: &str) -> Result> { + let key = format!("session_{}", session_id); + if let Some(serialized) = self.db.get(key.as_bytes())? { + let serialized: &[u8] = &serialized; + let session: Session = serde_json::from_slice(serialized)?; + Ok(Some(session)) + } else { + Ok(None) + } + } + + /// Delete a session + pub fn delete_session(&self, session_id: &str) -> Result<()> { + let key = format!("session_{}", session_id); + self.db.remove(key.as_bytes())?; + self.remove_session_from_list(session_id)?; + Ok(()) + } + + /// List all saved sessions + pub fn list_sessions(&self) -> Result> { + let session_ids = self.get_session_list()?; + let mut sessions = Vec::new(); + + for session_id in session_ids { + if let Some(session) = self.load_session(&session_id)? { + sessions.push(session); + } + } + + // Sort by updated_at (most recent first) + sessions.sort_by(|a, b| b.updated_at.cmp(&a.updated_at)); + Ok(sessions) + } + + /// Get summary of sessions (id, name, message count, last updated) + pub fn get_session_summaries(&self) -> Result> { + let sessions = self.list_sessions()?; + let summaries = sessions.into_iter().map(|s| SessionSummary { + id: s.id, + name: s.name, + message_count: s.messages.len(), + last_updated: s.updated_at, + model_used: s.model_used, + }).collect(); + Ok(summaries) + } + + /// Internal method to maintain session list + fn add_session_to_list(&self, session_id: &str) -> Result<()> { + let mut session_ids = self.get_session_list()?; + if !session_ids.contains(&session_id.to_string()) { + session_ids.push(session_id.to_string()); + let serialized = serde_json::to_string(&session_ids)?; + self.db.insert("session_list", serialized.as_bytes())?; + } + Ok(()) + } + + /// Internal method to remove session from list + fn remove_session_from_list(&self, session_id: &str) -> Result<()> { + let mut session_ids = self.get_session_list()?; + session_ids.retain(|id| id != session_id); + let serialized = serde_json::to_string(&session_ids)?; + self.db.insert("session_list", serialized.as_bytes())?; + Ok(()) + } + + /// Get list of session IDs + fn get_session_list(&self) -> Result> { + if let Some(serialized) = self.db.get("session_list")? { + let serialized: &[u8] = &serialized; + let session_ids: Vec = serde_json::from_slice(serialized)?; + Ok(session_ids) + } else { + Ok(Vec::new()) + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct SessionSummary { + pub id: String, + pub name: String, + pub message_count: usize, + pub last_updated: SystemTime, + pub model_used: String, +} diff --git a/crates/owlen-tui/src/events.rs b/crates/owlen-tui/src/events.rs new file mode 100644 index 0000000..5280a2a --- /dev/null +++ b/crates/owlen-tui/src/events.rs @@ -0,0 +1,205 @@ +use crossterm::event::{self, KeyCode, KeyEvent, KeyEventKind, KeyModifiers}; +use std::time::Duration; +use tokio::sync::mpsc; +use tokio_util::sync::CancellationToken; + +/// Application events +#[derive(Debug, Clone)] +pub enum Event { + /// Terminal key press event + Key(KeyEvent), + /// Terminal resize event + #[allow(dead_code)] + Resize(u16, u16), + /// Tick event for regular updates + Tick, +} + +/// Event handler that captures terminal events and sends them to the application +pub struct EventHandler { + sender: mpsc::UnboundedSender, + tick_rate: Duration, + cancellation_token: CancellationToken, +} + +impl EventHandler { + pub fn new( + sender: mpsc::UnboundedSender, + cancellation_token: CancellationToken, + ) -> Self { + Self { + sender, + tick_rate: Duration::from_millis(250), // 4 times per second + cancellation_token, + } + } + + pub async fn run(&self) { + let mut last_tick = tokio::time::Instant::now(); + + loop { + if self.cancellation_token.is_cancelled() { + break; + } + + let timeout = self + .tick_rate + .checked_sub(last_tick.elapsed()) + .unwrap_or_else(|| Duration::from_secs(0)); + + if event::poll(timeout).unwrap_or(false) { + match event::read() { + Ok(event) => { + match event { + crossterm::event::Event::Key(key) => { + // Only handle KeyEventKind::Press to avoid duplicate events + if key.kind == KeyEventKind::Press { + let _ = self.sender.send(Event::Key(key)); + } + } + crossterm::event::Event::Resize(width, height) => { + let _ = self.sender.send(Event::Resize(width, height)); + } + _ => {} + } + } + Err(_) => { + // Handle error by continuing the loop + continue; + } + } + } + + if last_tick.elapsed() >= self.tick_rate { + let _ = self.sender.send(Event::Tick); + last_tick = tokio::time::Instant::now(); + } + } + } +} + +/// Helper functions for key event handling +impl Event { + /// Check if this is a quit command (Ctrl+C or 'q') + pub fn is_quit(&self) -> bool { + matches!( + self, + Event::Key(KeyEvent { + code: KeyCode::Char('q'), + modifiers: KeyModifiers::NONE, + .. + }) | Event::Key(KeyEvent { + code: KeyCode::Char('c'), + modifiers: KeyModifiers::CONTROL, + .. + }) + ) + } + + /// Check if this is an enter key press + pub fn is_enter(&self) -> bool { + matches!( + self, + Event::Key(KeyEvent { + code: KeyCode::Enter, + .. + }) + ) + } + + /// Check if this is a tab key press + #[allow(dead_code)] + pub fn is_tab(&self) -> bool { + matches!( + self, + Event::Key(KeyEvent { + code: KeyCode::Tab, + modifiers: KeyModifiers::NONE, + .. + }) + ) + } + + /// Check if this is a backspace + pub fn is_backspace(&self) -> bool { + matches!( + self, + Event::Key(KeyEvent { + code: KeyCode::Backspace, + .. + }) + ) + } + + /// Check if this is an escape key press + pub fn is_escape(&self) -> bool { + matches!( + self, + Event::Key(KeyEvent { + code: KeyCode::Esc, + .. + }) + ) + } + + /// Get the character if this is a character key event + pub fn as_char(&self) -> Option { + match self { + Event::Key(KeyEvent { + code: KeyCode::Char(c), + modifiers: KeyModifiers::NONE, + .. + }) => Some(*c), + Event::Key(KeyEvent { + code: KeyCode::Char(c), + modifiers: KeyModifiers::SHIFT, + .. + }) => Some(*c), + _ => None, + } + } + + /// Check if this is an up arrow key press + pub fn is_up(&self) -> bool { + matches!( + self, + Event::Key(KeyEvent { + code: KeyCode::Up, + .. + }) + ) + } + + /// Check if this is a down arrow key press + pub fn is_down(&self) -> bool { + matches!( + self, + Event::Key(KeyEvent { + code: KeyCode::Down, + .. + }) + ) + } + + /// Check if this is a left arrow key press + pub fn is_left(&self) -> bool { + matches!( + self, + Event::Key(KeyEvent { + code: KeyCode::Left, + .. + }) + ) + } + + /// Check if this is a right arrow key press + pub fn is_right(&self) -> bool { + matches!( + self, + Event::Key(KeyEvent { + code: KeyCode::Right, + .. + }) + ) + } +} diff --git a/crates/owlen-tui/src/files.rs b/crates/owlen-tui/src/files.rs new file mode 100644 index 0000000..0ead543 --- /dev/null +++ b/crates/owlen-tui/src/files.rs @@ -0,0 +1,269 @@ +use anyhow::{Result, Context}; +use std::fs; +use std::path::{Path, PathBuf}; +use std::time::SystemTime; +use crate::config::Config; + +#[derive(Debug, Clone)] +pub struct FileInfo { + pub path: PathBuf, + pub name: String, + pub size: u64, + pub modified: SystemTime, + pub is_readable: bool, + pub is_writable: bool, +} + +pub struct FileManager { + config: Config, +} + +impl FileManager { + pub fn new(config: Config) -> Self { + Self { config } + } + + /// Read a file and return its contents + pub fn read_file>(&self, path: P) -> Result { + let path = path.as_ref(); + let metadata = fs::metadata(path) + .with_context(|| format!("Failed to get metadata for {}", path.display()))?; + + // Check file size limit + let size_mb = metadata.len() / (1024 * 1024); + if size_mb > self.config.files.max_file_size_mb { + return Err(anyhow::anyhow!( + "File {} is too large ({} MB > {} MB limit)", + path.display(), + size_mb, + self.config.files.max_file_size_mb + )); + } + + let content = fs::read_to_string(path) + .with_context(|| format!("Failed to read file {}", path.display()))?; + + Ok(content) + } + + /// Write content to a file + pub fn write_file>(&self, path: P, content: &str) -> Result<()> { + let path = path.as_ref(); + + // Create backup if enabled + if self.config.files.backup_files && path.exists() { + self.create_backup(path)?; + } + + // Ensure parent directory exists + if let Some(parent) = path.parent() { + fs::create_dir_all(parent) + .with_context(|| format!("Failed to create directory {}", parent.display()))?; + } + + fs::write(path, content) + .with_context(|| format!("Failed to write file {}", path.display()))?; + + Ok(()) + } + + /// Create a backup of the file + fn create_backup>(&self, path: P) -> Result<()> { + let path = path.as_ref(); + let backup_path = path.with_extension(format!("{}.backup", + path.extension().and_then(|s| s.to_str()).unwrap_or("txt"))); + + fs::copy(path, &backup_path) + .with_context(|| format!("Failed to create backup at {}", backup_path.display()))?; + + Ok(()) + } + + /// List files in a directory + pub fn list_files>(&self, dir: P) -> Result> { + let dir = dir.as_ref(); + let entries = fs::read_dir(dir) + .with_context(|| format!("Failed to read directory {}", dir.display()))?; + + let mut files = Vec::new(); + + for entry in entries { + let entry = entry?; + let path = entry.path(); + + if path.is_file() { + let metadata = entry.metadata()?; + let name = entry.file_name().to_string_lossy().to_string(); + + files.push(FileInfo { + path: path.clone(), + name, + size: metadata.len(), + modified: metadata.modified()?, + is_readable: path.exists() && fs::File::open(&path).is_ok(), + is_writable: !metadata.permissions().readonly(), + }); + } + } + + // Sort by name + files.sort_by(|a, b| a.name.cmp(&b.name)); + Ok(files) + } + + /// Check if a file exists + pub fn file_exists>(&self, path: P) -> bool { + path.as_ref().exists() + } + + /// Get file info + pub fn get_file_info>(&self, path: P) -> Result { + let path = path.as_ref(); + let metadata = fs::metadata(path) + .with_context(|| format!("Failed to get metadata for {}", path.display()))?; + + Ok(FileInfo { + path: path.to_path_buf(), + name: path.file_name().unwrap_or_default().to_string_lossy().to_string(), + size: metadata.len(), + modified: metadata.modified()?, + is_readable: path.exists() && fs::File::open(&path).is_ok(), + is_writable: !metadata.permissions().readonly(), + }) + } + + /// Append content to a file + pub fn append_file>(&self, path: P, content: &str) -> Result<()> { + let path = path.as_ref(); + + use std::io::Write; + let mut file = std::fs::OpenOptions::new() + .create(true) + .append(true) + .open(path) + .with_context(|| format!("Failed to open file for appending {}", path.display()))?; + + file.write_all(content.as_bytes()) + .with_context(|| format!("Failed to append to file {}", path.display()))?; + + Ok(()) + } + + /// Load project context file (OWLEN.md) + pub fn load_project_context(&self) -> Result> { + let context_file = &self.config.general.project_context_file; + + if self.file_exists(context_file) { + match self.read_file(context_file) { + Ok(content) => Ok(Some(content)), + Err(_) => Ok(None), // File exists but can't read, return None instead of error + } + } else { + Ok(None) + } + } + + /// Create a default project context file + pub fn create_default_project_context(&self) -> Result<()> { + let context_file = &self.config.general.project_context_file; + + if !self.file_exists(context_file) { + let default_content = r#"# Project Context - OWLlama + +This file provides context about your project to the AI assistant. + +## Project Description +Describe your project here. + +## Key Files and Structure +List important files, directories, and their purposes. + +## Technologies Used +- Programming languages +- Frameworks +- Tools and dependencies + +## Development Guidelines +- Coding standards +- Best practices +- Testing approach + +## Current Focus +What you're currently working on or need help with. + +--- +*This file is automatically loaded as context for AI conversations.* +"#; + + self.write_file(context_file, default_content)?; + } + + Ok(()) + } +} + +/// Utility functions for common file operations +pub mod utils { + use super::*; + + /// Get the current working directory + pub fn get_current_dir() -> Result { + std::env::current_dir() + .context("Failed to get current directory") + } + + /// Expand tilde in path + pub fn expand_path>(path: P) -> PathBuf { + let path_str = path.as_ref().to_string_lossy(); + let expanded = shellexpand::tilde(&path_str); + PathBuf::from(expanded.as_ref()) + } + + /// Get relative path from current directory + pub fn get_relative_path>(path: P) -> Result { + let current_dir = get_current_dir()?; + let absolute_path = path.as_ref().canonicalize() + .context("Failed to canonicalize path")?; + + absolute_path.strip_prefix(¤t_dir) + .map(|p| p.to_path_buf()) + .or_else(|_| Ok(absolute_path)) + } + + /// Check if path is a text file based on extension + pub fn is_text_file>(path: P) -> bool { + let path = path.as_ref(); + if let Some(ext) = path.extension().and_then(|s| s.to_str()) { + matches!(ext.to_lowercase().as_str(), + "txt" | "md" | "rs" | "py" | "js" | "ts" | "html" | "css" | "json" | + "toml" | "yaml" | "yml" | "xml" | "csv" | "log" | "sh" | "bash" | + "c" | "cpp" | "h" | "hpp" | "java" | "go" | "php" | "rb" | "swift" | + "kt" | "scala" | "r" | "sql" | "dockerfile" | "makefile" + ) + } else { + // Files without extensions might be text (like Makefile, README, etc.) + path.file_name() + .and_then(|name| name.to_str()) + .map(|name| name.chars().all(|c| c.is_ascii())) + .unwrap_or(false) + } + } + + /// Format file size in human readable format + pub fn format_file_size(size: u64) -> String { + const UNITS: &[&str] = &["B", "KB", "MB", "GB", "TB"]; + let mut size = size as f64; + let mut unit_index = 0; + + while size >= 1024.0 && unit_index < UNITS.len() - 1 { + size /= 1024.0; + unit_index += 1; + } + + if unit_index == 0 { + format!("{} {}", size as u64, UNITS[unit_index]) + } else { + format!("{:.1} {}", size, UNITS[unit_index]) + } + } +} \ No newline at end of file diff --git a/crates/owlen-tui/src/lib.rs b/crates/owlen-tui/src/lib.rs new file mode 100644 index 0000000..9a7dce5 --- /dev/null +++ b/crates/owlen-tui/src/lib.rs @@ -0,0 +1,9 @@ +pub mod chat_app; +pub mod code_app; +pub mod config; +pub mod events; +pub mod ui; + +pub use chat_app::{AppState, ChatApp, InputMode, SessionEvent}; +pub use code_app::CodeApp; +pub use events::{Event, EventHandler}; diff --git a/crates/owlen-tui/src/ollama.rs b/crates/owlen-tui/src/ollama.rs new file mode 100644 index 0000000..17356d4 --- /dev/null +++ b/crates/owlen-tui/src/ollama.rs @@ -0,0 +1,293 @@ +use anyhow::Result; +use futures_util::StreamExt; +use reqwest::Client; +use serde::{Deserialize, Serialize}; +use tokio::sync::mpsc; +use uuid::Uuid; + +/// Events that can be sent from the Ollama client +#[derive(Debug, Clone)] +pub enum OllamaEvent { + /// Streaming response chunk + MessageChunk { + request_id: Uuid, + content: String, + done: bool, + }, + /// Error occurred during request + Error { + request_id: Uuid, + error: String, + }, + /// Available models list + ModelsAvailable(Vec), +} + +/// Message in the conversation +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Message { + pub role: String, + pub content: String, +} + +/// Request to Ollama's chat API +#[derive(Debug, Serialize)] +struct ChatRequest { + model: String, + messages: Vec, + stream: bool, +} + +/// Response from Ollama's chat API (streaming) +#[derive(Debug, Deserialize)] +struct ChatResponse { + message: MessageResponse, + done: bool, +} + +#[derive(Debug, Deserialize)] +struct MessageResponse { + content: String, +} + +/// Response from models endpoint +#[derive(Debug, Deserialize)] +struct ModelsResponse { + models: Vec, +} + +#[derive(Debug, Deserialize)] +struct ModelInfo { + name: String, +} + +/// Generate request for single completion +#[derive(Debug, Serialize)] +struct GenerateRequest { + model: String, + prompt: String, + stream: bool, +} + +/// Generate response (streaming) +#[derive(Debug, Deserialize)] +struct GenerateResponse { + response: String, + done: bool, +} + +#[derive(Clone)] +pub struct OllamaClient { + client: Client, + pub base_url: String, + pub event_sender: mpsc::UnboundedSender, +} + +impl OllamaClient { + pub fn new( + base_url: String, + event_sender: mpsc::UnboundedSender, + ) -> Self { + let client = Client::new(); + Self { + client, + base_url, + event_sender, + } + } + + /// Start a chat conversation with streaming response + pub async fn chat(&self, model: String, messages: Vec) -> Result { + let request_id = Uuid::new_v4(); + let url = format!("{}/api/chat", self.base_url); + + let request = ChatRequest { + model: model.clone(), // Clone model for potential fallback + messages, + stream: true, + }; + + let response = self.client + .post(&url) + .json(&request) + .send() + .await?; + + if response.status() == reqwest::StatusCode::NOT_FOUND { + // Fallback to generate endpoint + let prompt = request.messages.into_iter().map(|m| format!("{}: {}", m.role, m.content)).collect::>().join("\n"); + return self.generate(model, prompt).await; + } + + if !response.status().is_success() { + let error = format!("HTTP error: {}", response.status()); + self.send_error(request_id, error).await; + return Ok(request_id); + } + + let mut stream = response.bytes_stream(); + let sender = self.event_sender.clone(); + + // Spawn task to handle streaming response + tokio::spawn(async move { + while let Some(chunk) = stream.next().await { + match chunk { + Ok(bytes) => { + let text = String::from_utf8_lossy(&bytes); + // Parse each line as potential JSON + for line in text.lines() { + if line.trim().is_empty() { + continue; + } + + match serde_json::from_str::(line) { + Ok(response) => { + let _ = sender.send(OllamaEvent::MessageChunk { + request_id, + content: response.message.content, + done: response.done, + }); + + if response.done { + break; + } + } + Err(e) => { + let _ = sender.send(OllamaEvent::Error { + request_id, + error: format!("JSON parse error: {}", e), + }); + } + } + } + } + Err(e) => { + let _ = sender.send(OllamaEvent::Error { + request_id, + error: format!("Stream error: {}", e), + }); + break; + } + } + } + }); + + Ok(request_id) + } + + /// Generate a single completion (alternative to chat) + pub async fn generate(&self, model: String, prompt: String) -> Result { + let request_id = Uuid::new_v4(); + let url = format!("{}/api/generate", self.base_url); + + let request = GenerateRequest { + model, + prompt, + stream: true, + }; + + let response = self.client + .post(&url) + .json(&request) + .send() + .await?; + + if !response.status().is_success() { + let error = format!("HTTP error: {}", response.status()); + self.send_error(request_id, error).await; + return Ok(request_id); + } + + let mut stream = response.bytes_stream(); + let sender = self.event_sender.clone(); + + tokio::spawn(async move { + while let Some(chunk) = stream.next().await { + match chunk { + Ok(bytes) => { + let text = String::from_utf8_lossy(&bytes); + for line in text.lines() { + if line.trim().is_empty() { + continue; + } + + match serde_json::from_str::(line) { + Ok(response) => { + let _ = sender.send(OllamaEvent::MessageChunk { + request_id, + content: response.response, + done: response.done, + }); + + if response.done { + break; + } + } + Err(e) => { + let _ = sender.send(OllamaEvent::Error { + request_id, + error: format!("JSON parse error: {}", e), + }); + } + } + } + } + Err(e) => { + let _ = sender.send(OllamaEvent::Error { + request_id, + error: format!("Stream error: {}", e), + }); + break; + } + } + } + }); + + Ok(request_id) + } + + /// Get list of available models + pub async fn get_models(&self) -> Result<()> { + let url = format!("{}/api/tags", self.base_url); + + let response = self.client + .get(&url) + .send() + .await?; + + if response.status().is_success() { + let models_response: ModelsResponse = response.json().await?; + let model_names = models_response + .models + .into_iter() + .map(|m| m.name) + .collect(); + + let _ = self.event_sender.send(OllamaEvent::ModelsAvailable(model_names)); + } else { + let error = format!("Failed to fetch models: {}", response.status()); + // We don't have a specific request_id for this, so we'll use a nil UUID + let _ = self.event_sender.send(OllamaEvent::Error { + request_id: Uuid::nil(), + error, + }); + } + + Ok(()) + } + + async fn send_error(&self, request_id: Uuid, error: String) { + let _ = self.event_sender.send(OllamaEvent::Error { + request_id, + error, + }); + } +} + +/// Default Ollama configuration +impl Default for OllamaClient { + fn default() -> Self { + let (tx, _rx) = mpsc::unbounded_channel(); + Self::new("http://localhost:11434".to_string(), tx) + } +} \ No newline at end of file diff --git a/crates/owlen-tui/src/ui.rs b/crates/owlen-tui/src/ui.rs new file mode 100644 index 0000000..9046a35 --- /dev/null +++ b/crates/owlen-tui/src/ui.rs @@ -0,0 +1,370 @@ +use ratatui::layout::{Alignment, Constraint, Direction, Layout, Rect}; +use ratatui::style::{Color, Modifier, Style}; +use ratatui::text::{Line, Span}; +use ratatui::widgets::{Block, Borders, Clear, List, ListItem, ListState, Paragraph}; +use ratatui::Frame; + +use crate::chat_app::{ChatApp, InputMode}; +use owlen_core::types::Role; + +pub fn render_chat(frame: &mut Frame<'_>, app: &ChatApp) { + let layout = Layout::default() + .direction(Direction::Vertical) + .constraints([ + Constraint::Min(8), + Constraint::Length(5), + Constraint::Length(3), + ]) + .split(frame.area()); + + render_messages(frame, layout[0], app); + render_input(frame, layout[1], app); + render_status(frame, layout[2], app); + + match app.mode() { + InputMode::ProviderSelection => render_provider_selector(frame, app), + InputMode::ModelSelection => render_model_selector(frame, app), + InputMode::Help => render_help(frame), + _ => {} + } +} + +fn render_messages(frame: &mut Frame<'_>, area: Rect, app: &ChatApp) { + let conversation = app.conversation(); + let formatter = app.formatter(); + + let mut lines: Vec = Vec::new(); + for message in &conversation.messages { + let color = role_color(message.role.clone()); + let mut formatted = formatter.format_message(message); + let is_streaming = message + .metadata + .get("streaming") + .and_then(|v| v.as_bool()) + .unwrap_or(false); + + if let Some(first) = formatted.first_mut() { + if let Some((label, rest)) = first.split_once(':') { + let mut spans = Vec::new(); + spans.push(Span::styled( + format!("{label}:"), + color.add_modifier(Modifier::BOLD), + )); + if !rest.trim().is_empty() { + spans.push(Span::raw(format!(" {}", rest.trim_start()))); + } + if is_streaming { + spans.push(Span::styled(" โ–Œ", Style::default().fg(Color::Magenta))); + } + lines.push(Line::from(spans)); + } else { + let mut spans = vec![Span::raw(first.clone())]; + if is_streaming { + spans.push(Span::styled(" โ–Œ", Style::default().fg(Color::Magenta))); + } + lines.push(Line::from(spans)); + } + } + + for line in formatted.into_iter().skip(1) { + let mut spans = vec![Span::raw(line)]; + if is_streaming { + spans.push(Span::styled(" โ–Œ", Style::default().fg(Color::Magenta))); + } + lines.push(Line::from(spans)); + } + lines.push(Line::from("")); + } + + if lines.is_empty() { + lines.push(Line::from("No messages yet. Press 'i' to start typing.")); + } + + let mut paragraph = Paragraph::new(lines) + .block( + Block::default() + .title(Span::styled( + "Conversation", + Style::default() + .fg(Color::LightMagenta) + .add_modifier(Modifier::BOLD), + )) + .borders(Borders::ALL) + .border_style(Style::default().fg(Color::Rgb(95, 20, 135))), + ) + .wrap(ratatui::widgets::Wrap { trim: false }); + + let scroll = app.scroll().min(u16::MAX as usize) as u16; + paragraph = paragraph.scroll((scroll, 0)); + + frame.render_widget(paragraph, area); +} + +fn render_input(frame: &mut Frame<'_>, area: Rect, app: &ChatApp) { + let title = match app.mode() { + InputMode::Editing => "Input (Enter=send ยท Shift+Enter/Ctrl+J=newline)", + _ => "Input", + }; + + let input_block = Block::default() + .title(Span::styled( + title, + Style::default() + .fg(Color::LightMagenta) + .add_modifier(Modifier::BOLD), + )) + .borders(Borders::ALL) + .border_style(Style::default().fg(Color::Rgb(95, 20, 135))); + + let input_text = app.input_buffer().text().to_string(); + let paragraph = Paragraph::new(input_text.clone()) + .block(input_block) + .wrap(ratatui::widgets::Wrap { trim: false }); + + frame.render_widget(paragraph, area); + + if matches!(app.mode(), InputMode::Editing) { + let cursor_index = app.input_buffer().cursor(); + let (cursor_line, cursor_col) = cursor_position(&input_text, cursor_index); + let x = area.x + 1 + cursor_col as u16; + let y = area.y + 1 + cursor_line as u16; + frame.set_cursor_position(( + x.min(area.right().saturating_sub(1)), + y.min(area.bottom().saturating_sub(1)), + )); + } +} + +fn render_status(frame: &mut Frame<'_>, area: Rect, app: &ChatApp) { + let mut spans = Vec::new(); + spans.push(Span::styled( + " OWLEN ", + Style::default() + .fg(Color::Magenta) + .add_modifier(Modifier::BOLD), + )); + spans.push(Span::raw(" ")); + spans.push(Span::styled( + format!("Model {} ({})", app.selected_model(), app.selected_provider), + Style::default().fg(Color::LightMagenta), + )); + spans.push(Span::raw(" ")); + spans.push(Span::styled( + format!("Mode {}", app.mode()), + Style::default() + .fg(Color::LightBlue) + .add_modifier(Modifier::ITALIC), + )); + spans.push(Span::raw(" ")); + spans.push(Span::styled( + format!("Msgs {}", app.message_count()), + Style::default().fg(Color::Cyan), + )); + + if app.streaming_count() > 0 { + spans.push(Span::raw(" ")); + spans.push(Span::styled( + format!("โŸณ {}", app.streaming_count()), + Style::default() + .fg(Color::LightMagenta) + .add_modifier(Modifier::BOLD), + )); + } + + spans.push(Span::raw(" ")); + spans.push(Span::styled( + app.status_message(), + Style::default().fg(Color::LightBlue), + )); + + if let Some(error) = app.error_message() { + spans.push(Span::raw(" ")); + spans.push(Span::styled( + error, + Style::default() + .fg(Color::LightRed) + .add_modifier(Modifier::BOLD), + )); + } + + let paragraph = Paragraph::new(Line::from(spans)) + .alignment(Alignment::Left) + .block( + Block::default() + .borders(Borders::ALL) + .border_style(Style::default().fg(Color::Rgb(95, 20, 135))), + ); + + frame.render_widget(paragraph, area); +} + +fn render_provider_selector(frame: &mut Frame<'_>, app: &ChatApp) { + let area = centered_rect(60, 60, frame.area()); + frame.render_widget(Clear, area); + + let items: Vec = app + .available_providers + .iter() + .map(|provider| { + ListItem::new(Span::styled( + provider.to_string(), + Style::default() + .fg(Color::LightBlue) + .add_modifier(Modifier::BOLD), + )) + }) + .collect(); + + let list = List::new(items) + .block( + Block::default() + .title(Span::styled( + "Select Provider", + Style::default() + .fg(Color::LightMagenta) + .add_modifier(Modifier::BOLD), + )) + .borders(Borders::ALL) + .border_style(Style::default().fg(Color::Rgb(95, 20, 135))), + ) + .highlight_style( + Style::default() + .fg(Color::Magenta) + .add_modifier(Modifier::BOLD), + ) + .highlight_symbol("โ–ถ "); + + let mut state = ListState::default(); + state.select(Some(app.selected_provider_index)); + frame.render_stateful_widget(list, area, &mut state); +} + +fn render_model_selector(frame: &mut Frame<'_>, app: &ChatApp) { + let area = centered_rect(60, 60, frame.area()); + frame.render_widget(Clear, area); + + let items: Vec = app + .models() + .iter() + .map(|model| { + let label = if model.name.is_empty() { + model.id.clone() + } else { + format!("{} โ€” {}", model.id, model.name) + }; + ListItem::new(Span::styled( + label, + Style::default() + .fg(Color::LightBlue) + .add_modifier(Modifier::BOLD), + )) + }) + .collect(); + + let list = List::new(items) + .block( + Block::default() + .title(Span::styled( + format!("Select Model ({})", app.selected_provider), + Style::default() + .fg(Color::LightMagenta) + .add_modifier(Modifier::BOLD), + )) + .borders(Borders::ALL) + .border_style(Style::default().fg(Color::Rgb(95, 20, 135))), + ) + .highlight_style( + Style::default() + .fg(Color::Magenta) + .add_modifier(Modifier::BOLD), + ) + .highlight_symbol("โ–ถ "); + + let mut state = ListState::default(); + state.select(app.selected_model_index()); + frame.render_stateful_widget(list, area, &mut state); +} + +fn render_help(frame: &mut Frame<'_>) { + let area = centered_rect(70, 60, frame.area()); + frame.render_widget(Clear, area); + + let help_text = vec![ + Line::from("Controls:"), + Line::from(" i / Enter โ†’ start typing"), + Line::from(" Enter โ†’ send message"), + Line::from(" Shift+Enter โ†’ newline"), + Line::from(" Ctrl+J โ†’ newline"), + Line::from(" m โ†’ select model"), + Line::from(" n โ†’ new conversation"), + Line::from(" c โ†’ clear conversation"), + Line::from(" q โ†’ quit"), + Line::from(""), + Line::from("Press Esc to close this help."), + ]; + + let paragraph = Paragraph::new(help_text).block( + Block::default() + .title(Span::styled( + "Help", + Style::default() + .fg(Color::LightMagenta) + .add_modifier(Modifier::BOLD), + )) + .borders(Borders::ALL) + .border_style(Style::default().fg(Color::Rgb(95, 20, 135))), + ); + + frame.render_widget(paragraph, area); +} + +fn centered_rect(percent_x: u16, percent_y: u16, area: Rect) -> Rect { + let vertical = Layout::default() + .direction(Direction::Vertical) + .constraints( + [ + Constraint::Percentage((100 - percent_y) / 2), + Constraint::Percentage(percent_y), + Constraint::Percentage((100 - percent_y) / 2), + ] + .as_ref(), + ) + .split(area); + + Layout::default() + .direction(Direction::Horizontal) + .constraints( + [ + Constraint::Percentage((100 - percent_x) / 2), + Constraint::Percentage(percent_x), + Constraint::Percentage((100 - percent_x) / 2), + ] + .as_ref(), + ) + .split(vertical[1])[1] +} + +fn role_color(role: Role) -> Style { + match role { + Role::User => Style::default().fg(Color::LightBlue), + Role::Assistant => Style::default().fg(Color::LightMagenta), + Role::System => Style::default().fg(Color::Cyan), + } +} + +fn cursor_position(text: &str, cursor: usize) -> (usize, usize) { + let mut line = 0; + let mut col = 0; + for (idx, ch) in text.char_indices() { + if idx >= cursor { + break; + } + if ch == '\n' { + line += 1; + col = 0; + } else { + col += 1; + } + } + (line, col) +}