2 Commits

109 changed files with 11236 additions and 3590 deletions

20
.cargo/config.toml Normal file
View File

@@ -0,0 +1,20 @@
[target.x86_64-unknown-linux-musl]
linker = "x86_64-linux-gnu-gcc"
rustflags = ["-C", "target-feature=+crt-static", "-C", "link-arg=-lgcc"]
[target.aarch64-unknown-linux-gnu]
linker = "aarch64-linux-gnu-gcc"
[target.aarch64-unknown-linux-musl]
linker = "aarch64-linux-gnu-gcc"
rustflags = ["-C", "target-feature=+crt-static", "-C", "link-arg=-lgcc"]
[target.armv7-unknown-linux-gnueabihf]
linker = "arm-linux-gnueabihf-gcc"
[target.armv7-unknown-linux-musleabihf]
linker = "arm-linux-gnueabihf-gcc"
rustflags = ["-C", "target-feature=+crt-static", "-C", "link-arg=-lgcc"]
[target.x86_64-pc-windows-gnu]
linker = "x86_64-w64-mingw32-gcc"

39
.gitignore vendored
View File

@@ -1,12 +1,13 @@
### Custom
AGENTS.md
CLAUDE.md
### Rust template
# Generated by Cargo
# will have compiled files and executables
debug/
target/
dev/
.agents/
.env
.env.*
!.env.example
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
@@ -18,10 +19,17 @@ Cargo.lock
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb
# RustRover
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
### JetBrains template
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
.idea/
# User-specific stuff
.idea/**/workspace.xml
.idea/**/tasks.xml
@@ -52,15 +60,14 @@ Cargo.lock
# When using Gradle or Maven with auto-import, you should exclude module files,
# since they will be recreated, and may cause churn. Uncomment if using
# auto-import.
.idea/artifacts
.idea/compiler.xml
.idea/jarRepositories.xml
.idea/modules.xml
.idea/*.iml
.idea/modules
*.iml
*.ipr
.idea
# .idea/artifacts
# .idea/compiler.xml
# .idea/jarRepositories.xml
# .idea/modules.xml
# .idea/*.iml
# .idea/modules
# *.iml
# *.ipr
# CMake
cmake-build-*/
@@ -97,9 +104,3 @@ fabric.properties
# Android studio 3.1+ serialized cache file
.idea/caches/build_file_checksums.ser
### rust-analyzer template
# Can be generated by other build systems other than cargo (ex: bazelbuild/rust_rules)
rust-project.json

34
.pre-commit-config.yaml Normal file
View File

@@ -0,0 +1,34 @@
# Pre-commit hooks configuration
# See https://pre-commit.com for more information
repos:
# General file checks
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-toml
- id: check-merge-conflict
- id: check-added-large-files
args: ['--maxkb=1000']
- id: mixed-line-ending
# Rust formatting
- repo: https://github.com/doublify/pre-commit-rust
rev: v1.0
hooks:
- id: fmt
name: cargo fmt
description: Format Rust code with rustfmt
- id: cargo-check
name: cargo check
description: Check Rust code compilation
- id: clippy
name: cargo clippy
description: Lint Rust code with clippy
args: ['--all-features', '--', '-D', 'warnings']
# Optional: run on all files when config changes
default_install_hook_types: [pre-commit, pre-push]

131
.woodpecker.yml Normal file
View File

@@ -0,0 +1,131 @@
when:
event: tag
tag: v*
variables:
- &rust_image 'rust:1.83'
matrix:
include:
# Linux
- TARGET: x86_64-unknown-linux-gnu
ARTIFACT: owlen-linux-x86_64-gnu
PLATFORM: linux
EXT: ""
- TARGET: x86_64-unknown-linux-musl
ARTIFACT: owlen-linux-x86_64-musl
PLATFORM: linux
EXT: ""
- TARGET: aarch64-unknown-linux-gnu
ARTIFACT: owlen-linux-aarch64-gnu
PLATFORM: linux
EXT: ""
- TARGET: aarch64-unknown-linux-musl
ARTIFACT: owlen-linux-aarch64-musl
PLATFORM: linux
EXT: ""
- TARGET: armv7-unknown-linux-gnueabihf
ARTIFACT: owlen-linux-armv7-gnu
PLATFORM: linux
EXT: ""
- TARGET: armv7-unknown-linux-musleabihf
ARTIFACT: owlen-linux-armv7-musl
PLATFORM: linux
EXT: ""
# Windows
- TARGET: x86_64-pc-windows-gnu
ARTIFACT: owlen-windows-x86_64
PLATFORM: windows
EXT: ".exe"
steps:
- name: build
image: *rust_image
commands:
# Install cross-compilation tools
- apt-get update
- apt-get install -y musl-tools gcc-aarch64-linux-gnu g++-aarch64-linux-gnu gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf mingw-w64 zip
# Verify cross-compilers are installed
- which aarch64-linux-gnu-gcc || echo "aarch64-linux-gnu-gcc not found!"
- which arm-linux-gnueabihf-gcc || echo "arm-linux-gnueabihf-gcc not found!"
- which x86_64-w64-mingw32-gcc || echo "x86_64-w64-mingw32-gcc not found!"
# Add rust target
- rustup target add ${TARGET}
# Set up cross-compilation environment variables and build
- |
case "${TARGET}" in
aarch64-unknown-linux-gnu)
export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc
export CC_aarch64_unknown_linux_gnu=/usr/bin/aarch64-linux-gnu-gcc
export CXX_aarch64_unknown_linux_gnu=/usr/bin/aarch64-linux-gnu-g++
export AR_aarch64_unknown_linux_gnu=/usr/bin/aarch64-linux-gnu-ar
;;
aarch64-unknown-linux-musl)
export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_LINKER=/usr/bin/aarch64-linux-gnu-gcc
export CC_aarch64_unknown_linux_musl=/usr/bin/aarch64-linux-gnu-gcc
export CXX_aarch64_unknown_linux_musl=/usr/bin/aarch64-linux-gnu-g++
export AR_aarch64_unknown_linux_musl=/usr/bin/aarch64-linux-gnu-ar
;;
armv7-unknown-linux-gnueabihf)
export CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER=/usr/bin/arm-linux-gnueabihf-gcc
export CC_armv7_unknown_linux_gnueabihf=/usr/bin/arm-linux-gnueabihf-gcc
export CXX_armv7_unknown_linux_gnueabihf=/usr/bin/arm-linux-gnueabihf-g++
export AR_armv7_unknown_linux_gnueabihf=/usr/bin/arm-linux-gnueabihf-ar
;;
armv7-unknown-linux-musleabihf)
export CARGO_TARGET_ARMV7_UNKNOWN_LINUX_MUSLEABIHF_LINKER=/usr/bin/arm-linux-gnueabihf-gcc
export CC_armv7_unknown_linux_musleabihf=/usr/bin/arm-linux-gnueabihf-gcc
export CXX_armv7_unknown_linux_musleabihf=/usr/bin/arm-linux-gnueabihf-g++
export AR_armv7_unknown_linux_musleabihf=/usr/bin/arm-linux-gnueabihf-ar
;;
x86_64-pc-windows-gnu)
export CARGO_TARGET_X86_64_PC_WINDOWS_GNU_LINKER=/usr/bin/x86_64-w64-mingw32-gcc
export CC_x86_64_pc_windows_gnu=/usr/bin/x86_64-w64-mingw32-gcc
export CXX_x86_64_pc_windows_gnu=/usr/bin/x86_64-w64-mingw32-g++
export AR_x86_64_pc_windows_gnu=/usr/bin/x86_64-w64-mingw32-ar
;;
esac
# Build the project
cargo build --release --all-features --target ${TARGET}
- name: package
image: *rust_image
commands:
- apt-get update && apt-get install -y zip
- mkdir -p dist
- |
if [ "${PLATFORM}" = "windows" ]; then
cp target/${TARGET}/release/owlen.exe dist/owlen.exe
cp target/${TARGET}/release/owlen-code.exe dist/owlen-code.exe
cd dist
zip -9 ${ARTIFACT}.zip owlen.exe owlen-code.exe
cd ..
mv dist/${ARTIFACT}.zip .
sha256sum ${ARTIFACT}.zip > ${ARTIFACT}.zip.sha256
else
cp target/${TARGET}/release/owlen dist/owlen
cp target/${TARGET}/release/owlen-code dist/owlen-code
cd dist
tar czf ${ARTIFACT}.tar.gz owlen owlen-code
cd ..
mv dist/${ARTIFACT}.tar.gz .
sha256sum ${ARTIFACT}.tar.gz > ${ARTIFACT}.tar.gz.sha256
fi
- name: release
image: plugins/gitea-release
settings:
api_key:
from_secret: gitea_token
base_url: https://somegit.dev
files:
- ${ARTIFACT}.tar.gz
- ${ARTIFACT}.tar.gz.sha256
- ${ARTIFACT}.zip
- ${ARTIFACT}.zip.sha256
title: Release ${CI_COMMIT_TAG}
note: "Release ${CI_COMMIT_TAG}"

81
CHANGELOG.md Normal file
View File

@@ -0,0 +1,81 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Added
- Comprehensive documentation suite including guides for architecture, configuration, testing, and more.
- Rustdoc examples for core components like `Provider` and `SessionController`.
- Module-level documentation for `owlen-tui`.
### Changed
- The main `README.md` has been updated to be more concise and link to the new documentation.
---
## [0.1.10] - 2025-10-03
### Added
- **Material Light Theme**: A new built-in theme, `material-light`, has been added.
### Fixed
- **UI Readability**: Fixed a bug causing unreadable text in light themes.
- **Visual Selection**: The visual selection mode now correctly colors unselected text portions.
### Changed
- **Theme Colors**: The color palettes for `gruvbox`, `rose-pine`, and `monokai` have been corrected.
- **In-App Help**: The `:help` menu has been significantly expanded and updated.
## [0.1.9] - 2025-10-03
*This version corresponds to the release tagged v0.1.10 in the source repository.*
### Added
- **Material Light Theme**: A new built-in theme, `material-light`, has been added.
### Fixed
- **UI Readability**: Fixed a bug causing unreadable text in light themes.
- **Visual Selection**: The visual selection mode now correctly colors unselected text portions.
### Changed
- **Theme Colors**: The color palettes for `gruvbox`, `rose-pine`, and `monokai` have been corrected.
- **In-App Help**: The `:help` menu has been significantly expanded and updated.
## [0.1.8] - 2025-10-02
### Added
- **Command Autocompletion**: Implemented intelligent command suggestions and Tab completion in command mode.
### Changed
- **Build & CI**: Fixed cross-compilation for ARM64, ARMv7, and Windows.
## [0.1.7] - 2025-10-02
### Added
- **Tabbed Help System**: The help menu is now organized into five tabs for easier navigation.
- **Command Aliases**: Added `:o` as a short alias for `:load` / `:open`.
### Changed
- **Session Management**: Improved AI-generated session descriptions.
## [0.1.6] - 2025-10-02
### Added
- **Platform-Specific Storage**: Sessions are now saved to platform-appropriate directories (e.g., `~/.local/share/owlen` on Linux).
- **AI-Generated Session Descriptions**: Conversations can be automatically summarized on save.
### Changed
- **Migration**: Users on older versions can manually move their sessions from `~/.config/owlen/sessions` to the new platform-specific directory.
## [0.1.4] - 2025-10-01
### Added
- **Multi-Platform Builds**: Pre-built binaries are now provided for Linux (x86_64, aarch64, armv7) and Windows (x86_64).
- **AUR Package**: Owlen is now available on the Arch User Repository.
### Changed
- **Build System**: Switched from OpenSSL to rustls for better cross-platform compatibility.

121
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,121 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that are welcoming, open, and respectful.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
[security@owlibou.com](mailto:security@owlibou.com). All complaints will be
reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interaction in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.1, available at
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
[homepage]: https://www.contributor-covenant.org
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html

121
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,121 @@
# Contributing to Owlen
First off, thank you for considering contributing to Owlen! It's people like you that make Owlen such a great tool.
Following these guidelines helps to communicate that you respect the time of the developers managing and developing this open source project. In return, they should reciprocate that respect in addressing your issue, assessing changes, and helping you finalize your pull requests.
## Code of Conduct
This project and everyone participating in it is governed by the [Owlen Code of Conduct](CODE_OF_CONDUCT.md). By participating, you are expected to uphold this code. Please report unacceptable behavior.
## How Can I Contribute?
### Reporting Bugs
This is one of the most helpful ways you can contribute. Before creating a bug report, please check a few things:
1. **Check the [troubleshooting guide](docs/troubleshooting.md).** Your issue might be a common one with a known solution.
2. **Search the existing issues.** It's possible someone has already reported the same bug. If so, add a comment to the existing issue instead of creating a new one.
When you are creating a bug report, please include as many details as possible. Fill out the required template, the information it asks for helps us resolve issues faster.
### Suggesting Enhancements
If you have an idea for a new feature or an improvement to an existing one, we'd love to hear about it. Please provide as much context as you can about what you're trying to achieve.
### Your First Code Contribution
Unsure where to begin contributing to Owlen? You can start by looking through `good first issue` and `help wanted` issues.
### Pull Requests
The process for submitting a pull request is as follows:
1. **Fork the repository** and create your branch from `main`.
2. **Set up pre-commit hooks** (see [Development Setup](#development-setup) above). This will automatically format and lint your code.
3. **Make your changes.**
4. **Run the tests.**
- `cargo test --all`
5. **Commit your changes.** The pre-commit hooks will automatically run `cargo fmt`, `cargo check`, and `cargo clippy`. If you need to bypass the hooks (not recommended), use `git commit --no-verify`.
6. **Add a clear, concise commit message.** We follow the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) specification.
7. **Push to your fork** and submit a pull request to Owlen's `main` branch.
8. **Include a clear description** of the problem and solution. Include the relevant issue number if applicable.
## Development Setup
To get started with the codebase, you'll need to have Rust installed. Then, you can clone the repository and build the project:
```sh
git clone https://github.com/Owlibou/owlen.git
cd owlen
cargo build
```
### Pre-commit Hooks
We use [pre-commit](https://pre-commit.com/) to automatically run formatting and linting checks before each commit. This helps maintain code quality and consistency.
**Install pre-commit:**
```sh
# Arch Linux
sudo pacman -S pre-commit
# Other Linux/macOS
pip install pre-commit
# Verify installation
pre-commit --version
```
**Setup the hooks:**
```sh
cd owlen
pre-commit install
```
Once installed, the hooks will automatically run on every commit. You can also run them manually:
```sh
# Run on all files
pre-commit run --all-files
# Run on staged files only
pre-commit run
```
The pre-commit hooks will check:
- Code formatting (`cargo fmt`)
- Compilation (`cargo check`)
- Linting (`cargo clippy --all-features`)
- General file hygiene (trailing whitespace, EOF newlines, etc.)
## Coding Style
- We use `cargo fmt` for automated code formatting. Please run it before committing your changes.
- We use `cargo clippy` for linting. Your code should be free of any clippy warnings.
## Commit Message Conventions
We use [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) for our commit messages. This allows for automated changelog generation and makes the project history easier to read.
The basic format is:
```
<type>[optional scope]: <description>
[optional body]
[optional footer(s)]
```
**Types:** `feat`, `fix`, `docs`, `style`, `refactor`, `test`, `chore`, `build`, `ci`.
**Example:**
```
feat(provider): add support for Gemini Pro
```
Thank you for your contribution!

View File

@@ -1,18 +1,64 @@
[workspace]
members = [
"crates/app/cli",
"crates/llm/ollama",
"crates/platform/config",
"crates/platform/hooks",
"crates/platform/permissions",
"crates/tools/bash",
"crates/tools/fs",
"crates/tools/slash",
"crates/integration/mcp-client",
]
resolver = "2"
members = [
"crates/owlen-core",
"crates/owlen-tui",
"crates/owlen-cli",
"crates/owlen-ollama",
]
exclude = []
[workspace.package]
edition = "2024"
version = "0.1.9"
edition = "2021"
authors = ["Owlibou"]
license = "AGPL-3.0"
rust-version = "1.91"
repository = "https://somegit.dev/Owlibou/owlen"
homepage = "https://somegit.dev/Owlibou/owlen"
keywords = ["llm", "tui", "cli", "ollama", "chat"]
categories = ["command-line-utilities"]
[workspace.dependencies]
# Async runtime and utilities
tokio = { version = "1.0", features = ["full"] }
tokio-stream = "0.1"
tokio-util = { version = "0.7", features = ["rt"] }
futures = "0.3"
futures-util = "0.3"
# TUI framework
ratatui = "0.28"
crossterm = "0.28"
tui-textarea = "0.6"
# HTTP client and JSON handling
reqwest = { version = "0.12", default-features = false, features = ["json", "stream", "rustls-tls"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
# Utilities
uuid = { version = "1.0", features = ["v4", "serde"] }
anyhow = "1.0"
thiserror = "1.0"
# Configuration
toml = "0.8"
shellexpand = "3.1"
# Database
sled = "0.34"
# For better text handling
textwrap = "0.16"
# Async traits
async-trait = "0.1"
# CLI framework
clap = { version = "4.0", features = ["derive"] }
# Dev dependencies
tempfile = "3.8"
tokio-test = "0.4"
# For more keys and their definitions, see https://doc.rust-lang.org/cargo/reference/manifest.html

661
LICENSE Normal file
View File

@@ -0,0 +1,661 @@
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.

49
PKGBUILD Normal file
View File

@@ -0,0 +1,49 @@
# Maintainer: vikingowl <christian@nachtigall.dev>
pkgname=owlen
pkgver=0.1.9
pkgrel=1
pkgdesc="Terminal User Interface LLM client for Ollama with chat and code assistance features"
arch=('x86_64')
url="https://somegit.dev/Owlibou/owlen"
license=('AGPL-3.0-or-later')
depends=('gcc-libs')
makedepends=('cargo' 'git')
options=(!lto) # avoid LTO-linked ring symbol drop with lld
source=("$pkgname-$pkgver.tar.gz::$url/archive/v$pkgver.tar.gz")
sha256sums=('cabb1cfdfc247b5d008c6c5f94e13548bcefeba874aae9a9d45aa95ae1c085ec')
prepare() {
cd $pkgname
cargo fetch --target "$(rustc -vV | sed -n 's/host: //p')"
}
build() {
cd $pkgname
export RUSTFLAGS="${RUSTFLAGS:-} -C link-arg=-Wl,--no-as-needed"
export CARGO_PROFILE_RELEASE_LTO=false
export CARGO_TARGET_DIR=target
cargo build --frozen --release --all-features
}
check() {
cd $pkgname
export RUSTFLAGS="${RUSTFLAGS:-} -C link-arg=-Wl,--no-as-needed"
cargo test --frozen --all-features
}
package() {
cd $pkgname
# Install binaries
install -Dm755 target/release/owlen "$pkgdir/usr/bin/owlen"
install -Dm755 target/release/owlen-code "$pkgdir/usr/bin/owlen-code"
# Install documentation
install -Dm644 README.md "$pkgdir/usr/share/doc/$pkgname/README.md"
# Install built-in themes for reference
install -Dm644 themes/README.md "$pkgdir/usr/share/$pkgname/themes/README.md"
for theme in themes/*.toml; do
install -Dm644 "$theme" "$pkgdir/usr/share/$pkgname/themes/$(basename $theme)"
done
}

103
README.md Normal file
View File

@@ -0,0 +1,103 @@
# OWLEN
> Terminal-native assistant for running local language models with a comfortable TUI.
![Status](https://img.shields.io/badge/status-alpha-yellow)
![Version](https://img.shields.io/badge/version-0.1.9-blue)
![Rust](https://img.shields.io/badge/made_with-Rust-ffc832?logo=rust&logoColor=white)
![License](https://img.shields.io/badge/license-AGPL--3.0-blue)
## What Is OWLEN?
OWLEN is a Rust-powered, terminal-first interface for interacting with local large
language models. It provides a responsive chat workflow that runs against
[Ollama](https://ollama.com/) with a focus on developer productivity, vim-style navigation,
and seamless session management—all without leaving your terminal.
## Alpha Status
This project is currently in **alpha** and under active development. Core features are functional, but expect occasional bugs and breaking changes. Feedback, bug reports, and contributions are very welcome!
## Screenshots
![OWLEN TUI Layout](images/layout.png)
The OWLEN interface features a clean, multi-panel layout with vim-inspired navigation. See more screenshots in the [`images/`](images/) directory.
## Features
- **Vim-style Navigation**: Normal, editing, visual, and command modes.
- **Streaming Responses**: Real-time token streaming from Ollama.
- **Advanced Text Editing**: Multi-line input, history, and clipboard support.
- **Session Management**: Save, load, and manage conversations.
- **Theming System**: 10 built-in themes and support for custom themes.
- **Modular Architecture**: Extensible provider system (currently Ollama).
## Getting Started
### Prerequisites
- Rust 1.75+ and Cargo.
- A running Ollama instance.
- A terminal that supports 256 colors.
### Installation
#### Linux & macOS
The recommended way to install on Linux and macOS is to clone the repository and install using `cargo`.
```bash
git clone https://github.com/Owlibou/owlen.git
cd owlen
cargo install --path crates/owlen-cli
```
**Note for macOS**: While this method works, official binary releases for macOS are planned for the future.
#### Windows
The Windows build has not been thoroughly tested yet. Installation is possible via the same `cargo install` method, but it is considered experimental at this time.
### Running OWLEN
Make sure Ollama is running, then launch the application:
```bash
owlen
```
If you built from source without installing, you can run it with:
```bash
./target/release/owlen
```
## Using the TUI
OWLEN uses a modal, vim-inspired interface. Press `?` in Normal mode to view the help screen with all keybindings.
- **Normal Mode**: Navigate with `h/j/k/l`, `w/b`, `gg/G`.
- **Editing Mode**: Enter with `i` or `a`. Send messages with `Enter`.
- **Command Mode**: Enter with `:`. Access commands like `:quit`, `:save`, `:theme`.
## Documentation
For more detailed information, please refer to the following documents:
- **[CONTRIBUTING.md](CONTRIBUTING.md)**: Guidelines for contributing to the project.
- **[CHANGELOG.md](CHANGELOG.md)**: A log of changes for each version.
- **[docs/architecture.md](docs/architecture.md)**: An overview of the project's architecture.
- **[docs/troubleshooting.md](docs/troubleshooting.md)**: Help with common issues.
- **[docs/provider-implementation.md](docs/provider-implementation.md)**: A guide for adding new providers.
## Configuration
OWLEN stores its configuration in `~/.config/owlen/config.toml`. This file is created on the first run and can be customized. You can also add custom themes in `~/.config/owlen/themes/`.
See the [themes/README.md](themes/README.md) for more details on theming.
## Roadmap
We are actively working on enhancing the code client, adding more providers (OpenAI, Anthropic), and improving the overall user experience. See the [Roadmap section in the old README](https://github.com/Owlibou/owlen/blob/main/README.md?plain=1#L295) for more details.
## Contributing
Contributions are highly welcome! Please see our **[Contributing Guide](CONTRIBUTING.md)** for details on how to get started, including our code style, commit conventions, and pull request process.
## License
This project is licensed under the GNU Affero General Public License v3.0. See the [LICENSE](LICENSE) file for details.

19
SECURITY.md Normal file
View File

@@ -0,0 +1,19 @@
# Security Policy
## Supported Versions
We are currently in a pre-release phase, so only the latest version is actively supported. As we move towards a 1.0 release, this policy will be updated with specific version support.
| Version | Supported |
| ------- | ------------------ |
| < 1.0 | :white_check_mark: |
## Reporting a Vulnerability
The Owlen team and community take all security vulnerabilities seriously. Thank you for improving the security of our project. We appreciate your efforts and responsible disclosure and will make every effort to acknowledge your contributions.
To report a security vulnerability, please email the project lead at [security@owlibou.com](mailto:security@owlibou.com) with a detailed description of the issue, the steps to reproduce it, and any affected versions.
You will receive a response from us within 48 hours. If the issue is confirmed, we will release a patch as soon as possible, depending on the complexity of the issue.
Please do not report security vulnerabilities through public GitHub issues.

View File

@@ -1,22 +0,0 @@
/target
### Rust template
# Generated by Cargo
# will have compiled files and executables
debug/
target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb
### rust-analyzer template
# Can be generated by other build systems other than cargo (ex: bazelbuild/rust_rules)
rust-project.json

View File

@@ -1,28 +0,0 @@
[package]
name = "owlen"
version = "0.1.0"
edition.workspace = true
license.workspace = true
rust-version.workspace = true
[dependencies]
clap = { version = "4.5", features = ["derive"] }
tokio = { version = "1.39", features = ["macros", "rt-multi-thread"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
color-eyre = "0.6"
llm-ollama = { path = "../../llm/ollama" }
tools-fs = { path = "../../tools/fs" }
tools-bash = { path = "../../tools/bash" }
tools-slash = { path = "../../tools/slash" }
config-agent = { package = "config-agent", path = "../../platform/config" }
permissions = { path = "../../platform/permissions" }
hooks = { path = "../../platform/hooks" }
futures-util = "0.3.31"
[dev-dependencies]
assert_cmd = "2.0"
predicates = "3.1"
httpmock = "0.7"
tokio = { version = "1.39", features = ["macros", "rt-multi-thread"] }
tempfile = "3.23.0"

View File

@@ -1,580 +0,0 @@
use clap::{Parser, ValueEnum};
use color_eyre::eyre::{Result, eyre};
use config_agent::load_settings;
use futures_util::TryStreamExt;
use hooks::{HookEvent, HookManager, HookResult};
use llm_ollama::{OllamaClient, OllamaOptions, types::ChatMessage};
use permissions::{PermissionDecision, Tool};
use serde::Serialize;
use std::io::{self, Write};
use std::time::{SystemTime, UNIX_EPOCH};
#[derive(Debug, Clone, Copy, ValueEnum)]
enum OutputFormat {
Text,
Json,
StreamJson,
}
#[derive(Serialize)]
struct SessionOutput {
session_id: String,
messages: Vec<serde_json::Value>,
stats: Stats,
#[serde(skip_serializing_if = "Option::is_none")]
result: Option<serde_json::Value>,
#[serde(skip_serializing_if = "Option::is_none")]
tool: Option<String>,
}
#[derive(Serialize)]
struct Stats {
total_tokens: u64,
#[serde(skip_serializing_if = "Option::is_none")]
prompt_tokens: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
completion_tokens: Option<u64>,
duration_ms: u64,
}
#[derive(Serialize)]
struct StreamEvent {
#[serde(rename = "type")]
event_type: String,
#[serde(skip_serializing_if = "Option::is_none")]
session_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
content: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
stats: Option<Stats>,
}
fn generate_session_id() -> String {
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_millis();
format!("session-{}", timestamp)
}
fn output_tool_result(
format: OutputFormat,
tool: &str,
result: serde_json::Value,
session_id: &str,
) -> Result<()> {
match format {
OutputFormat::Text => {
// For text, just print the result as-is
if let Some(s) = result.as_str() {
println!("{}", s);
} else {
println!("{}", serde_json::to_string_pretty(&result)?);
}
}
OutputFormat::Json => {
let output = SessionOutput {
session_id: session_id.to_string(),
messages: vec![],
stats: Stats {
total_tokens: 0,
prompt_tokens: None,
completion_tokens: None,
duration_ms: 0,
},
result: Some(result),
tool: Some(tool.to_string()),
};
println!("{}", serde_json::to_string(&output)?);
}
OutputFormat::StreamJson => {
// For stream-json, emit session_start, result, and session_end
let session_start = StreamEvent {
event_type: "session_start".to_string(),
session_id: Some(session_id.to_string()),
content: None,
stats: None,
};
println!("{}", serde_json::to_string(&session_start)?);
let result_event = StreamEvent {
event_type: "tool_result".to_string(),
session_id: None,
content: Some(serde_json::to_string(&result)?),
stats: None,
};
println!("{}", serde_json::to_string(&result_event)?);
let session_end = StreamEvent {
event_type: "session_end".to_string(),
session_id: None,
content: None,
stats: Some(Stats {
total_tokens: 0,
prompt_tokens: None,
completion_tokens: None,
duration_ms: 0,
}),
};
println!("{}", serde_json::to_string(&session_end)?);
}
}
Ok(())
}
#[derive(clap::Subcommand, Debug)]
enum Cmd {
Read { path: String },
Glob { pattern: String },
Grep { root: String, pattern: String },
Write { path: String, content: String },
Edit { path: String, old_string: String, new_string: String },
Bash { command: String, #[arg(long)] timeout: Option<u64> },
Slash { command_name: String, args: Vec<String> },
}
#[derive(Parser, Debug)]
#[command(name = "code", version)]
struct Args {
#[arg(long)]
ollama_url: Option<String>,
#[arg(long)]
model: Option<String>,
#[arg(long)]
api_key: Option<String>,
#[arg(long)]
print: bool,
/// Override the permission mode (plan, acceptEdits, code)
#[arg(long)]
mode: Option<String>,
/// Output format (text, json, stream-json)
#[arg(long, value_enum, default_value = "text")]
output_format: OutputFormat,
#[arg()]
prompt: Vec<String>,
#[command(subcommand)]
cmd: Option<Cmd>,
}
#[tokio::main]
async fn main() -> Result<()> {
color_eyre::install()?;
let args = Args::parse();
let mut settings = load_settings(None).unwrap_or_default();
// Override mode if specified via CLI
if let Some(mode) = args.mode {
settings.mode = mode;
}
// Create permission manager from settings
let perms = settings.create_permission_manager();
// Create hook manager
let hook_mgr = HookManager::new(".");
// Generate session ID
let session_id = generate_session_id();
let output_format = args.output_format;
if let Some(cmd) = args.cmd {
match cmd {
Cmd::Read { path } => {
// Check permission
match perms.check(Tool::Read, None) {
PermissionDecision::Allow => {
// Check PreToolUse hook
let event = HookEvent::PreToolUse {
tool: "Read".to_string(),
args: serde_json::json!({"path": &path}),
};
match hook_mgr.execute(&event, Some(5000)).await? {
HookResult::Deny => {
return Err(eyre!("Hook denied Read operation"));
}
HookResult::Allow => {}
}
let s = tools_fs::read_file(&path)?;
output_tool_result(output_format, "Read", serde_json::json!(s), &session_id)?;
return Ok(());
}
PermissionDecision::Ask => {
return Err(eyre!(
"Permission denied: Read operation requires approval. Use --mode code to allow."
));
}
PermissionDecision::Deny => {
return Err(eyre!("Permission denied: Read operation is blocked."));
}
}
}
Cmd::Glob { pattern } => {
// Check permission
match perms.check(Tool::Glob, None) {
PermissionDecision::Allow => {
// Check PreToolUse hook
let event = HookEvent::PreToolUse {
tool: "Glob".to_string(),
args: serde_json::json!({"pattern": &pattern}),
};
match hook_mgr.execute(&event, Some(5000)).await? {
HookResult::Deny => {
return Err(eyre!("Hook denied Glob operation"));
}
HookResult::Allow => {}
}
for p in tools_fs::glob_list(&pattern)? {
println!("{}", p);
}
return Ok(());
}
PermissionDecision::Ask => {
return Err(eyre!(
"Permission denied: Glob operation requires approval. Use --mode code to allow."
));
}
PermissionDecision::Deny => {
return Err(eyre!("Permission denied: Glob operation is blocked."));
}
}
}
Cmd::Grep { root, pattern } => {
// Check permission
match perms.check(Tool::Grep, None) {
PermissionDecision::Allow => {
// Check PreToolUse hook
let event = HookEvent::PreToolUse {
tool: "Grep".to_string(),
args: serde_json::json!({"root": &root, "pattern": &pattern}),
};
match hook_mgr.execute(&event, Some(5000)).await? {
HookResult::Deny => {
return Err(eyre!("Hook denied Grep operation"));
}
HookResult::Allow => {}
}
for (path, line_number, text) in tools_fs::grep(&root, &pattern)? {
println!("{path}:{line_number}:{text}")
}
return Ok(());
}
PermissionDecision::Ask => {
return Err(eyre!(
"Permission denied: Grep operation requires approval. Use --mode code to allow."
));
}
PermissionDecision::Deny => {
return Err(eyre!("Permission denied: Grep operation is blocked."));
}
}
}
Cmd::Write { path, content } => {
// Check permission
match perms.check(Tool::Write, None) {
PermissionDecision::Allow => {
// Check PreToolUse hook
let event = HookEvent::PreToolUse {
tool: "Write".to_string(),
args: serde_json::json!({"path": &path, "content": &content}),
};
match hook_mgr.execute(&event, Some(5000)).await? {
HookResult::Deny => {
return Err(eyre!("Hook denied Write operation"));
}
HookResult::Allow => {}
}
tools_fs::write_file(&path, &content)?;
println!("File written: {}", path);
return Ok(());
}
PermissionDecision::Ask => {
return Err(eyre!(
"Permission denied: Write operation requires approval. Use --mode acceptEdits or --mode code to allow."
));
}
PermissionDecision::Deny => {
return Err(eyre!("Permission denied: Write operation is blocked."));
}
}
}
Cmd::Edit { path, old_string, new_string } => {
// Check permission
match perms.check(Tool::Edit, None) {
PermissionDecision::Allow => {
// Check PreToolUse hook
let event = HookEvent::PreToolUse {
tool: "Edit".to_string(),
args: serde_json::json!({"path": &path, "old_string": &old_string, "new_string": &new_string}),
};
match hook_mgr.execute(&event, Some(5000)).await? {
HookResult::Deny => {
return Err(eyre!("Hook denied Edit operation"));
}
HookResult::Allow => {}
}
tools_fs::edit_file(&path, &old_string, &new_string)?;
println!("File edited: {}", path);
return Ok(());
}
PermissionDecision::Ask => {
return Err(eyre!(
"Permission denied: Edit operation requires approval. Use --mode acceptEdits or --mode code to allow."
));
}
PermissionDecision::Deny => {
return Err(eyre!("Permission denied: Edit operation is blocked."));
}
}
}
Cmd::Bash { command, timeout } => {
// Check permission with command context for pattern matching
match perms.check(Tool::Bash, Some(&command)) {
PermissionDecision::Allow => {
// Check PreToolUse hook
let event = HookEvent::PreToolUse {
tool: "Bash".to_string(),
args: serde_json::json!({"command": &command, "timeout": timeout}),
};
match hook_mgr.execute(&event, Some(5000)).await? {
HookResult::Deny => {
return Err(eyre!("Hook denied Bash operation"));
}
HookResult::Allow => {}
}
let mut session = tools_bash::BashSession::new().await?;
let output = session.execute(&command, timeout).await?;
// Print stdout
if !output.stdout.is_empty() {
print!("{}", output.stdout);
}
// Print stderr to stderr
if !output.stderr.is_empty() {
eprint!("{}", output.stderr);
}
session.close().await?;
// Exit with same code as command
if !output.success {
std::process::exit(output.exit_code);
}
return Ok(());
}
PermissionDecision::Ask => {
return Err(eyre!(
"Permission denied: Bash operation requires approval. Use --mode code to allow."
));
}
PermissionDecision::Deny => {
return Err(eyre!("Permission denied: Bash operation is blocked."));
}
}
}
Cmd::Slash { command_name, args } => {
// Check permission
match perms.check(Tool::SlashCommand, None) {
PermissionDecision::Allow => {
// Check PreToolUse hook
let event = HookEvent::PreToolUse {
tool: "SlashCommand".to_string(),
args: serde_json::json!({"command_name": &command_name, "args": &args}),
};
match hook_mgr.execute(&event, Some(5000)).await? {
HookResult::Deny => {
return Err(eyre!("Hook denied SlashCommand operation"));
}
HookResult::Allow => {}
}
// Look for command file in .owlen/commands/
let command_path = format!(".owlen/commands/{}.md", command_name);
// Read the command file
let content = match tools_fs::read_file(&command_path) {
Ok(c) => c,
Err(_) => {
return Err(eyre!(
"Slash command '{}' not found at {}",
command_name,
command_path
));
}
};
// Parse with arguments
let args_refs: Vec<&str> = args.iter().map(|s| s.as_str()).collect();
let slash_cmd = tools_slash::parse_slash_command(&content, &args_refs)?;
// Resolve file references
let resolved_body = slash_cmd.resolve_file_refs()?;
// Print the resolved command body
println!("{}", resolved_body);
return Ok(());
}
PermissionDecision::Ask => {
return Err(eyre!(
"Permission denied: Slash command requires approval. Use --mode code to allow."
));
}
PermissionDecision::Deny => {
return Err(eyre!("Permission denied: Slash command is blocked."));
}
}
}
}
}
let prompt = if args.prompt.is_empty() {
"Say hello".to_string()
} else {
args.prompt.join(" ")
};
let model = args.model.unwrap_or(settings.model);
let api_key = args.api_key.or(settings.api_key);
// Use Ollama Cloud when model has "-cloud" suffix AND API key is set
let use_cloud = model.ends_with("-cloud") && api_key.is_some();
let client = if use_cloud {
OllamaClient::with_cloud().with_api_key(api_key.unwrap())
} else {
let base_url = args.ollama_url.unwrap_or(settings.ollama_url);
let mut client = OllamaClient::new(base_url);
if let Some(key) = api_key {
client = client.with_api_key(key);
}
client
};
let opts = OllamaOptions {
model,
stream: true,
};
let msgs = vec![ChatMessage {
role: "user".into(),
content: prompt.clone(),
}];
let start_time = SystemTime::now();
// Handle different output formats
match output_format {
OutputFormat::Text => {
// Text format: stream to stdout as before
let mut stream = client.chat_stream(&msgs, &opts).await?;
while let Some(chunk) = stream.try_next().await? {
if let Some(m) = chunk.message {
if let Some(c) = m.content {
print!("{c}");
io::stdout().flush()?;
}
}
if matches!(chunk.done, Some(true)) {
break;
}
}
println!(); // Newline after response
}
OutputFormat::Json => {
// JSON format: collect all chunks, then output final JSON
let mut stream = client.chat_stream(&msgs, &opts).await?;
let mut response = String::new();
while let Some(chunk) = stream.try_next().await? {
if let Some(m) = chunk.message {
if let Some(c) = m.content {
response.push_str(&c);
}
}
if matches!(chunk.done, Some(true)) {
break;
}
}
let duration_ms = start_time.elapsed().unwrap().as_millis() as u64;
// Rough token estimate (tokens ~= chars / 4)
let estimated_tokens = ((prompt.len() + response.len()) / 4) as u64;
let output = SessionOutput {
session_id,
messages: vec![
serde_json::json!({"role": "user", "content": prompt}),
serde_json::json!({"role": "assistant", "content": response}),
],
stats: Stats {
total_tokens: estimated_tokens,
prompt_tokens: Some((prompt.len() / 4) as u64),
completion_tokens: Some((response.len() / 4) as u64),
duration_ms,
},
result: None,
tool: None,
};
println!("{}", serde_json::to_string(&output)?);
}
OutputFormat::StreamJson => {
// Stream-JSON format: emit session_start, chunks, and session_end
let session_start = StreamEvent {
event_type: "session_start".to_string(),
session_id: Some(session_id.clone()),
content: None,
stats: None,
};
println!("{}", serde_json::to_string(&session_start)?);
let mut stream = client.chat_stream(&msgs, &opts).await?;
let mut response = String::new();
while let Some(chunk) = stream.try_next().await? {
if let Some(m) = chunk.message {
if let Some(c) = m.content {
response.push_str(&c);
let chunk_event = StreamEvent {
event_type: "chunk".to_string(),
session_id: None,
content: Some(c),
stats: None,
};
println!("{}", serde_json::to_string(&chunk_event)?);
}
}
if matches!(chunk.done, Some(true)) {
break;
}
}
let duration_ms = start_time.elapsed().unwrap().as_millis() as u64;
// Rough token estimate
let estimated_tokens = ((prompt.len() + response.len()) / 4) as u64;
let session_end = StreamEvent {
event_type: "session_end".to_string(),
session_id: None,
content: None,
stats: Some(Stats {
total_tokens: estimated_tokens,
prompt_tokens: Some((prompt.len() / 4) as u64),
completion_tokens: Some((response.len() / 4) as u64),
duration_ms,
}),
};
println!("{}", serde_json::to_string(&session_end)?);
}
}
Ok(())
}

View File

@@ -1,39 +0,0 @@
use assert_cmd::Command;
use httpmock::prelude::*;
use predicates::prelude::PredicateBooleanExt;
#[tokio::test]
async fn headless_streams_ndjson() {
let server = MockServer::start_async().await;
// Mock /api/chat with NDJSON lines
let body = serde_json::json!({
"model": "qwen2.5",
"messages": [{"role": "user", "content": "hello"}],
"stream": true
});
let response = concat!(
r#"{"message":{"role":"assistant","content":"Hel"}}"#,"\n",
r#"{"message":{"role":"assistant","content":"lo"}}"#,"\n",
r#"{"done":true}"#,"\n",
);
let _m = server.mock(|when, then| {
when.method(POST)
.path("/api/chat")
.json_body(body.clone());
then.status(200)
.header("content-type", "application/x-ndjson")
.body(response);
});
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("owlen"));
cmd.arg("--ollama-url").arg(server.base_url())
.arg("--model").arg("qwen2.5")
.arg("--print")
.arg("hello");
cmd.assert()
.success()
.stdout(predicates::str::contains("Hello").count(1).or(predicates::str::contains("Hel").and(predicates::str::contains("lo"))));
}

View File

@@ -1,145 +0,0 @@
use assert_cmd::Command;
use serde_json::Value;
use std::fs;
use tempfile::tempdir;
#[test]
fn print_json_has_session_id_and_stats() {
let mut cmd = Command::cargo_bin("owlen").unwrap();
cmd.arg("--output-format")
.arg("json")
.arg("Say hello");
let output = cmd.assert().success();
let stdout = String::from_utf8_lossy(&output.get_output().stdout);
// Parse JSON output
let json: Value = serde_json::from_str(&stdout).expect("Output should be valid JSON");
// Verify session_id exists
assert!(json.get("session_id").is_some(), "JSON output should have session_id");
let session_id = json["session_id"].as_str().unwrap();
assert!(!session_id.is_empty(), "session_id should not be empty");
// Verify stats exist
assert!(json.get("stats").is_some(), "JSON output should have stats");
let stats = &json["stats"];
// Check for token counts
assert!(stats.get("total_tokens").is_some(), "stats should have total_tokens");
// Check for messages
assert!(json.get("messages").is_some(), "JSON output should have messages");
}
#[test]
fn stream_json_sequence_is_well_formed() {
let mut cmd = Command::cargo_bin("owlen").unwrap();
cmd.arg("--output-format")
.arg("stream-json")
.arg("Say hello");
let output = cmd.assert().success();
let stdout = String::from_utf8_lossy(&output.get_output().stdout);
// Stream-JSON is NDJSON - each line should be valid JSON
let lines: Vec<&str> = stdout.lines().filter(|l| !l.is_empty()).collect();
assert!(!lines.is_empty(), "Stream-JSON should produce at least one event");
// Each line should be valid JSON
for (i, line) in lines.iter().enumerate() {
let json: Value = serde_json::from_str(line)
.expect(&format!("Line {} should be valid JSON: {}", i, line));
// Each event should have a type
assert!(json.get("type").is_some(), "Event should have a type field");
}
// First event should be session_start
let first: Value = serde_json::from_str(lines[0]).unwrap();
assert_eq!(first["type"].as_str().unwrap(), "session_start");
assert!(first.get("session_id").is_some());
// Last event should be session_end or complete
let last: Value = serde_json::from_str(lines[lines.len() - 1]).unwrap();
let last_type = last["type"].as_str().unwrap();
assert!(
last_type == "session_end" || last_type == "complete",
"Last event should be session_end or complete, got: {}",
last_type
);
}
#[test]
fn text_format_is_default() {
let mut cmd = Command::cargo_bin("owlen").unwrap();
cmd.arg("Say hello");
let output = cmd.assert().success();
let stdout = String::from_utf8_lossy(&output.get_output().stdout);
// Text format should not be JSON
assert!(serde_json::from_str::<Value>(&stdout).is_err(),
"Default output should be text, not JSON");
}
#[test]
fn json_format_with_tool_execution() {
let dir = tempdir().unwrap();
let file = dir.path().join("test.txt");
fs::write(&file, "hello world").unwrap();
let mut cmd = Command::cargo_bin("owlen").unwrap();
cmd.arg("--mode")
.arg("code")
.arg("--output-format")
.arg("json")
.arg("read")
.arg(file.to_str().unwrap());
let output = cmd.assert().success();
let stdout = String::from_utf8_lossy(&output.get_output().stdout);
let json: Value = serde_json::from_str(&stdout).expect("Output should be valid JSON");
// Should have result
assert!(json.get("result").is_some());
// Should have tool info
assert!(json.get("tool").is_some());
assert_eq!(json["tool"].as_str().unwrap(), "Read");
}
#[test]
fn stream_json_includes_chunk_events() {
let mut cmd = Command::cargo_bin("owlen").unwrap();
cmd.arg("--output-format")
.arg("stream-json")
.arg("Say hello");
let output = cmd.assert().success();
let stdout = String::from_utf8_lossy(&output.get_output().stdout);
let lines: Vec<&str> = stdout.lines().filter(|l| !l.is_empty()).collect();
// Should have chunk events between session_start and session_end
let chunk_events: Vec<&str> = lines.iter()
.filter(|line| {
if let Ok(json) = serde_json::from_str::<Value>(line) {
json["type"].as_str() == Some("chunk")
} else {
false
}
})
.copied()
.collect();
assert!(!chunk_events.is_empty(), "Should have at least one chunk event");
// Each chunk should have content
for chunk_line in chunk_events {
let chunk: Value = serde_json::from_str(chunk_line).unwrap();
assert!(chunk.get("content").is_some(), "Chunk should have content");
}
}

View File

@@ -1,255 +0,0 @@
use assert_cmd::Command;
use std::fs;
use tempfile::tempdir;
#[test]
fn plan_mode_allows_read_operations() {
// Create a temp file to read
let dir = tempdir().unwrap();
let file = dir.path().join("test.txt");
fs::write(&file, "hello world").unwrap();
// Read operation should work in plan mode (default)
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("owlen"));
cmd.arg("read").arg(file.to_str().unwrap());
cmd.assert().success().stdout("hello world\n");
}
#[test]
fn plan_mode_allows_glob_operations() {
let dir = tempdir().unwrap();
fs::write(dir.path().join("a.txt"), "test").unwrap();
fs::write(dir.path().join("b.txt"), "test").unwrap();
let pattern = format!("{}/*.txt", dir.path().display());
// Glob operation should work in plan mode (default)
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("owlen"));
cmd.arg("glob").arg(&pattern);
cmd.assert().success();
}
#[test]
fn plan_mode_allows_grep_operations() {
let dir = tempdir().unwrap();
fs::write(dir.path().join("test.txt"), "hello world\nfoo bar").unwrap();
// Grep operation should work in plan mode (default)
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("owlen"));
cmd.arg("grep").arg(dir.path().to_str().unwrap()).arg("hello");
cmd.assert().success();
}
#[test]
fn mode_override_via_cli_flag() {
let dir = tempdir().unwrap();
let file = dir.path().join("test.txt");
fs::write(&file, "content").unwrap();
// Test with --mode code (should also allow read)
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("owlen"));
cmd.arg("--mode")
.arg("code")
.arg("read")
.arg(file.to_str().unwrap());
cmd.assert().success().stdout("content\n");
}
#[test]
fn plan_mode_blocks_write_operations() {
let dir = tempdir().unwrap();
let file = dir.path().join("new.txt");
// Write operation should be blocked in plan mode (default)
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("owlen"));
cmd.arg("write").arg(file.to_str().unwrap()).arg("content");
cmd.assert().failure();
}
#[test]
fn plan_mode_blocks_edit_operations() {
let dir = tempdir().unwrap();
let file = dir.path().join("test.txt");
fs::write(&file, "old content").unwrap();
// Edit operation should be blocked in plan mode (default)
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("owlen"));
cmd.arg("edit")
.arg(file.to_str().unwrap())
.arg("old")
.arg("new");
cmd.assert().failure();
}
#[test]
fn accept_edits_mode_allows_write() {
let dir = tempdir().unwrap();
let file = dir.path().join("new.txt");
// Write operation should work in acceptEdits mode
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("owlen"));
cmd.arg("--mode")
.arg("acceptEdits")
.arg("write")
.arg(file.to_str().unwrap())
.arg("new content");
cmd.assert().success();
// Verify file was written
assert_eq!(fs::read_to_string(&file).unwrap(), "new content");
}
#[test]
fn accept_edits_mode_allows_edit() {
let dir = tempdir().unwrap();
let file = dir.path().join("test.txt");
fs::write(&file, "line 1\nline 2\nline 3").unwrap();
// Edit operation should work in acceptEdits mode
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("owlen"));
cmd.arg("--mode")
.arg("acceptEdits")
.arg("edit")
.arg(file.to_str().unwrap())
.arg("line 2")
.arg("modified line");
cmd.assert().success();
// Verify file was edited
assert_eq!(
fs::read_to_string(&file).unwrap(),
"line 1\nmodified line\nline 3"
);
}
#[test]
fn code_mode_allows_all_operations() {
let dir = tempdir().unwrap();
let file = dir.path().join("test.txt");
// Write in code mode
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("owlen"));
cmd.arg("--mode")
.arg("code")
.arg("write")
.arg(file.to_str().unwrap())
.arg("initial content");
cmd.assert().success();
// Edit in code mode
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("owlen"));
cmd.arg("--mode")
.arg("code")
.arg("edit")
.arg(file.to_str().unwrap())
.arg("initial")
.arg("modified");
cmd.assert().success();
assert_eq!(fs::read_to_string(&file).unwrap(), "modified content");
}
#[test]
fn plan_mode_blocks_bash_operations() {
// Bash operation should be blocked in plan mode (default)
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("owlen"));
cmd.arg("bash").arg("echo hello");
cmd.assert().failure();
}
#[test]
fn code_mode_allows_bash() {
// Bash operation should work in code mode
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("owlen"));
cmd.arg("--mode").arg("code").arg("bash").arg("echo hello");
cmd.assert().success().stdout("hello\n");
}
#[test]
fn bash_command_timeout_works() {
// Test that timeout works
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("owlen"));
cmd.arg("--mode")
.arg("code")
.arg("bash")
.arg("sleep 10")
.arg("--timeout")
.arg("1000");
cmd.assert().failure();
}
#[test]
fn slash_command_works() {
// Create .owlen/commands directory in temp dir
let dir = tempdir().unwrap();
let commands_dir = dir.path().join(".owlen/commands");
fs::create_dir_all(&commands_dir).unwrap();
// Create a test slash command
let command_content = r#"---
description: "Test command"
---
Hello from slash command!
Args: $ARGUMENTS
First: $1
"#;
let command_file = commands_dir.join("test.md");
fs::write(&command_file, command_content).unwrap();
// Execute slash command with args from the temp directory
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("owlen"));
cmd.current_dir(dir.path())
.arg("--mode")
.arg("code")
.arg("slash")
.arg("test")
.arg("arg1");
cmd.assert()
.success()
.stdout(predicates::str::contains("Hello from slash command!"))
.stdout(predicates::str::contains("Args: arg1"))
.stdout(predicates::str::contains("First: arg1"));
}
#[test]
fn slash_command_file_refs() {
let dir = tempdir().unwrap();
let commands_dir = dir.path().join(".owlen/commands");
fs::create_dir_all(&commands_dir).unwrap();
// Create a file to reference
let data_file = dir.path().join("data.txt");
fs::write(&data_file, "Referenced content").unwrap();
// Create slash command with file reference
let command_content = format!("File content: @{}", data_file.display());
fs::write(commands_dir.join("reftest.md"), command_content).unwrap();
// Execute slash command
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("owlen"));
cmd.current_dir(dir.path())
.arg("--mode")
.arg("code")
.arg("slash")
.arg("reftest");
cmd.assert()
.success()
.stdout(predicates::str::contains("Referenced content"));
}
#[test]
fn slash_command_not_found() {
let dir = tempdir().unwrap();
// Try to execute non-existent slash command
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("owlen"));
cmd.current_dir(dir.path())
.arg("--mode")
.arg("code")
.arg("slash")
.arg("nonexistent");
cmd.assert().failure();
}

View File

@@ -1,16 +0,0 @@
[package]
name = "mcp-client"
version = "0.1.0"
edition.workspace = true
license.workspace = true
rust-version.workspace = true
[dependencies]
serde = { version = "1", features = ["derive"] }
serde_json = "1"
tokio = { version = "1.39", features = ["process", "io-util", "sync", "time"] }
color-eyre = "0.6"
[dev-dependencies]
tempfile = "3.23.0"
tokio = { version = "1.39", features = ["macros", "rt-multi-thread"] }

View File

@@ -1,272 +0,0 @@
use color_eyre::eyre::{Result, eyre};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::process::Stdio;
use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader};
use tokio::process::{Child, Command};
use tokio::sync::Mutex;
/// JSON-RPC 2.0 request
#[derive(Debug, Serialize)]
struct JsonRpcRequest {
jsonrpc: String,
id: u64,
method: String,
#[serde(skip_serializing_if = "Option::is_none")]
params: Option<Value>,
}
/// JSON-RPC 2.0 response
#[derive(Debug, Deserialize)]
struct JsonRpcResponse {
jsonrpc: String,
id: u64,
#[serde(skip_serializing_if = "Option::is_none")]
result: Option<Value>,
#[serde(skip_serializing_if = "Option::is_none")]
error: Option<JsonRpcError>,
}
#[derive(Debug, Deserialize)]
struct JsonRpcError {
code: i32,
message: String,
}
/// MCP server capabilities
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct ServerCapabilities {
#[serde(default)]
pub tools: Option<ToolsCapability>,
#[serde(default)]
pub resources: Option<ResourcesCapability>,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct ToolsCapability {
#[serde(default)]
pub list_changed: Option<bool>,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct ResourcesCapability {
#[serde(default)]
pub subscribe: Option<bool>,
#[serde(default)]
pub list_changed: Option<bool>,
}
/// MCP Tool definition
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct McpTool {
pub name: String,
#[serde(default)]
pub description: Option<String>,
#[serde(default)]
pub input_schema: Option<Value>,
}
/// MCP Resource definition
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct McpResource {
pub uri: String,
#[serde(default)]
pub name: Option<String>,
#[serde(default)]
pub description: Option<String>,
#[serde(default)]
pub mime_type: Option<String>,
}
/// MCP Client over stdio transport
pub struct McpClient {
process: Mutex<Child>,
next_id: Mutex<u64>,
server_name: String,
}
impl McpClient {
/// Create a new MCP client by spawning a subprocess
pub async fn spawn(command: &str, args: &[&str], server_name: &str) -> Result<Self> {
let mut child = Command::new(command)
.args(args)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()?;
// Verify process is running
if child.try_wait()?.is_some() {
return Err(eyre!("MCP server process exited immediately"));
}
Ok(Self {
process: Mutex::new(child),
next_id: Mutex::new(1),
server_name: server_name.to_string(),
})
}
/// Initialize the MCP connection
pub async fn initialize(&self) -> Result<ServerCapabilities> {
let params = serde_json::json!({
"protocolVersion": "2024-11-05",
"capabilities": {
"roots": {
"listChanged": true
}
},
"clientInfo": {
"name": "owlen",
"version": env!("CARGO_PKG_VERSION")
}
});
let response = self.send_request("initialize", Some(params)).await?;
let capabilities = response
.get("capabilities")
.ok_or_else(|| eyre!("No capabilities in initialize response"))?;
Ok(serde_json::from_value(capabilities.clone())?)
}
/// List available tools
pub async fn list_tools(&self) -> Result<Vec<McpTool>> {
let response = self.send_request("tools/list", None).await?;
let tools = response
.get("tools")
.ok_or_else(|| eyre!("No tools in response"))?;
Ok(serde_json::from_value(tools.clone())?)
}
/// Call a tool
pub async fn call_tool(&self, name: &str, arguments: Value) -> Result<Value> {
let params = serde_json::json!({
"name": name,
"arguments": arguments
});
let response = self.send_request("tools/call", Some(params)).await?;
response
.get("content")
.cloned()
.ok_or_else(|| eyre!("No content in tool call response"))
}
/// List available resources
pub async fn list_resources(&self) -> Result<Vec<McpResource>> {
let response = self.send_request("resources/list", None).await?;
let resources = response
.get("resources")
.ok_or_else(|| eyre!("No resources in response"))?;
Ok(serde_json::from_value(resources.clone())?)
}
/// Read a resource
pub async fn read_resource(&self, uri: &str) -> Result<Value> {
let params = serde_json::json!({
"uri": uri
});
let response = self.send_request("resources/read", Some(params)).await?;
response
.get("contents")
.cloned()
.ok_or_else(|| eyre!("No contents in resource read response"))
}
/// Get the server name
pub fn server_name(&self) -> &str {
&self.server_name
}
/// Send a JSON-RPC request and get the response
async fn send_request(&self, method: &str, params: Option<Value>) -> Result<Value> {
let mut next_id = self.next_id.lock().await;
let id = *next_id;
*next_id += 1;
drop(next_id);
let request = JsonRpcRequest {
jsonrpc: "2.0".to_string(),
id,
method: method.to_string(),
params,
};
let request_json = serde_json::to_string(&request)?;
let mut process = self.process.lock().await;
// Write request
let stdin = process.stdin.as_mut().ok_or_else(|| eyre!("No stdin"))?;
stdin.write_all(request_json.as_bytes()).await?;
stdin.write_all(b"\n").await?;
stdin.flush().await?;
// Read response
let stdout = process.stdout.take().ok_or_else(|| eyre!("No stdout"))?;
let mut reader = BufReader::new(stdout);
let mut response_line = String::new();
reader.read_line(&mut response_line).await?;
// Put stdout back
process.stdout = Some(reader.into_inner());
drop(process);
let response: JsonRpcResponse = serde_json::from_str(&response_line)?;
if response.id != id {
return Err(eyre!("Response ID mismatch: expected {}, got {}", id, response.id));
}
if let Some(error) = response.error {
return Err(eyre!("MCP error {}: {}", error.code, error.message));
}
response.result.ok_or_else(|| eyre!("No result in response"))
}
/// Close the MCP connection
pub async fn close(self) -> Result<()> {
let mut process = self.process.into_inner();
// Close stdin to signal the server to exit
drop(process.stdin.take());
// Wait for process to exit (with timeout)
tokio::time::timeout(
std::time::Duration::from_secs(5),
process.wait()
).await??;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn jsonrpc_request_serializes() {
let req = JsonRpcRequest {
jsonrpc: "2.0".to_string(),
id: 1,
method: "test".to_string(),
params: Some(serde_json::json!({"key": "value"})),
};
let json = serde_json::to_string(&req).unwrap();
assert!(json.contains("\"method\":\"test\""));
assert!(json.contains("\"id\":1"));
}
}

View File

@@ -1,347 +0,0 @@
use mcp_client::McpClient;
use std::fs;
use tempfile::tempdir;
#[tokio::test]
async fn mcp_server_capability_negotiation() {
// Create a mock MCP server script
let dir = tempdir().unwrap();
let server_script = dir.path().join("mock_server.py");
let script_content = r#"#!/usr/bin/env python3
import sys
import json
def read_request():
line = sys.stdin.readline()
return json.loads(line)
def send_response(response):
sys.stdout.write(json.dumps(response) + '\n')
sys.stdout.flush()
# Main loop
while True:
try:
req = read_request()
method = req.get('method')
req_id = req.get('id')
if method == 'initialize':
send_response({
'jsonrpc': '2.0',
'id': req_id,
'result': {
'protocolVersion': '2024-11-05',
'capabilities': {
'tools': {'list_changed': True},
'resources': {'subscribe': False}
},
'serverInfo': {
'name': 'test-server',
'version': '1.0.0'
}
}
})
elif method == 'tools/list':
send_response({
'jsonrpc': '2.0',
'id': req_id,
'result': {
'tools': []
}
})
else:
send_response({
'jsonrpc': '2.0',
'id': req_id,
'error': {
'code': -32601,
'message': f'Method not found: {method}'
}
})
except EOFError:
break
except Exception as e:
sys.stderr.write(f'Error: {e}\n')
break
"#;
fs::write(&server_script, script_content).unwrap();
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
fs::set_permissions(&server_script, std::fs::Permissions::from_mode(0o755)).unwrap();
}
// Connect to the server
let client = McpClient::spawn(
"python3",
&[server_script.to_str().unwrap()],
"test-server"
).await.unwrap();
// Initialize
let capabilities = client.initialize().await.unwrap();
// Verify capabilities
assert!(capabilities.tools.is_some());
assert_eq!(capabilities.tools.unwrap().list_changed, Some(true));
client.close().await.unwrap();
}
#[tokio::test]
async fn mcp_tool_invocation() {
let dir = tempdir().unwrap();
let server_script = dir.path().join("mock_server.py");
let script_content = r#"#!/usr/bin/env python3
import sys
import json
def read_request():
line = sys.stdin.readline()
return json.loads(line)
def send_response(response):
sys.stdout.write(json.dumps(response) + '\n')
sys.stdout.flush()
while True:
try:
req = read_request()
method = req.get('method')
req_id = req.get('id')
params = req.get('params', {})
if method == 'initialize':
send_response({
'jsonrpc': '2.0',
'id': req_id,
'result': {
'protocolVersion': '2024-11-05',
'capabilities': {
'tools': {}
},
'serverInfo': {
'name': 'test-server',
'version': '1.0.0'
}
}
})
elif method == 'tools/list':
send_response({
'jsonrpc': '2.0',
'id': req_id,
'result': {
'tools': [
{
'name': 'echo',
'description': 'Echo the input',
'input_schema': {
'type': 'object',
'properties': {
'message': {'type': 'string'}
}
}
}
]
}
})
elif method == 'tools/call':
tool_name = params.get('name')
arguments = params.get('arguments', {})
if tool_name == 'echo':
send_response({
'jsonrpc': '2.0',
'id': req_id,
'result': {
'content': [
{
'type': 'text',
'text': arguments.get('message', '')
}
]
}
})
else:
send_response({
'jsonrpc': '2.0',
'id': req_id,
'error': {
'code': -32602,
'message': f'Unknown tool: {tool_name}'
}
})
else:
send_response({
'jsonrpc': '2.0',
'id': req_id,
'error': {
'code': -32601,
'message': f'Method not found: {method}'
}
})
except EOFError:
break
except Exception as e:
sys.stderr.write(f'Error: {e}\n')
break
"#;
fs::write(&server_script, script_content).unwrap();
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
fs::set_permissions(&server_script, std::fs::Permissions::from_mode(0o755)).unwrap();
}
let client = McpClient::spawn(
"python3",
&[server_script.to_str().unwrap()],
"test-server"
).await.unwrap();
client.initialize().await.unwrap();
// List tools
let tools = client.list_tools().await.unwrap();
assert_eq!(tools.len(), 1);
assert_eq!(tools[0].name, "echo");
// Call tool
let result = client.call_tool(
"echo",
serde_json::json!({"message": "Hello, MCP!"})
).await.unwrap();
// Verify result
let content = result.as_array().unwrap();
assert_eq!(content[0]["text"].as_str().unwrap(), "Hello, MCP!");
client.close().await.unwrap();
}
#[tokio::test]
async fn mcp_resource_reads() {
let dir = tempdir().unwrap();
let server_script = dir.path().join("mock_server.py");
let script_content = r#"#!/usr/bin/env python3
import sys
import json
def read_request():
line = sys.stdin.readline()
return json.loads(line)
def send_response(response):
sys.stdout.write(json.dumps(response) + '\n')
sys.stdout.flush()
while True:
try:
req = read_request()
method = req.get('method')
req_id = req.get('id')
params = req.get('params', {})
if method == 'initialize':
send_response({
'jsonrpc': '2.0',
'id': req_id,
'result': {
'protocolVersion': '2024-11-05',
'capabilities': {
'resources': {}
},
'serverInfo': {
'name': 'test-server',
'version': '1.0.0'
}
}
})
elif method == 'resources/list':
send_response({
'jsonrpc': '2.0',
'id': req_id,
'result': {
'resources': [
{
'uri': 'file:///test.txt',
'name': 'Test File',
'description': 'A test file',
'mime_type': 'text/plain'
}
]
}
})
elif method == 'resources/read':
uri = params.get('uri')
if uri == 'file:///test.txt':
send_response({
'jsonrpc': '2.0',
'id': req_id,
'result': {
'contents': [
{
'uri': uri,
'mime_type': 'text/plain',
'text': 'Hello from resource!'
}
]
}
})
else:
send_response({
'jsonrpc': '2.0',
'id': req_id,
'error': {
'code': -32602,
'message': f'Unknown resource: {uri}'
}
})
else:
send_response({
'jsonrpc': '2.0',
'id': req_id,
'error': {
'code': -32601,
'message': f'Method not found: {method}'
}
})
except EOFError:
break
except Exception as e:
sys.stderr.write(f'Error: {e}\n')
break
"#;
fs::write(&server_script, script_content).unwrap();
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
fs::set_permissions(&server_script, std::fs::Permissions::from_mode(0o755)).unwrap();
}
let client = McpClient::spawn(
"python3",
&[server_script.to_str().unwrap()],
"test-server"
).await.unwrap();
client.initialize().await.unwrap();
// List resources
let resources = client.list_resources().await.unwrap();
assert_eq!(resources.len(), 1);
assert_eq!(resources[0].uri, "file:///test.txt");
// Read resource
let contents = client.read_resource("file:///test.txt").await.unwrap();
let contents_array = contents.as_array().unwrap();
assert_eq!(contents_array[0]["text"].as_str().unwrap(), "Hello from resource!");
client.close().await.unwrap();
}

View File

@@ -1,22 +0,0 @@
/target
### Rust template
# Generated by Cargo
# will have compiled files and executables
debug/
target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb
### rust-analyzer template
# Can be generated by other build systems other than cargo (ex: bazelbuild/rust_rules)
rust-project.json

View File

@@ -1,16 +0,0 @@
[package]
name = "llm-ollama"
version = "0.1.0"
edition.workspace = true
license.workspace = true
rust-version.workspace = true
[dependencies]
reqwest = { version = "0.12", features = ["json", "stream"] }
tokio = { version = "1.39", features = ["rt-multi-thread"] }
futures = "0.3"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
thiserror = "1"
bytes = "1"
tokio-stream = "0.1.17"

View File

@@ -1,98 +0,0 @@
use crate::types::{ChatMessage, ChatResponseChunk};
use futures::{Stream, TryStreamExt};
use reqwest::Client;
use serde::Serialize;
use thiserror::Error;
#[derive(Debug, Clone)]
pub struct OllamaClient {
http: Client,
base_url: String, // e.g. "http://localhost:11434"
api_key: Option<String>, // For Ollama Cloud authentication
}
#[derive(Debug, Clone, Default)]
pub struct OllamaOptions {
pub model: String,
pub stream: bool,
}
#[derive(Error, Debug)]
pub enum OllamaError {
#[error("http: {0}")]
Http(#[from] reqwest::Error),
#[error("json: {0}")]
Json(#[from] serde_json::Error),
#[error("protocol: {0}")]
Protocol(String),
}
impl OllamaClient {
pub fn new(base_url: impl Into<String>) -> Self {
Self {
http: Client::new(),
base_url: base_url.into().trim_end_matches('/').to_string(),
api_key: None,
}
}
pub fn with_api_key(mut self, api_key: impl Into<String>) -> Self {
self.api_key = Some(api_key.into());
self
}
pub fn with_cloud() -> Self {
// Same API, different base
Self::new("https://ollama.com")
}
pub async fn chat_stream(
&self,
messages: &[ChatMessage],
opts: &OllamaOptions,
) -> Result<impl Stream<Item = Result<ChatResponseChunk, OllamaError>>, OllamaError> {
#[derive(Serialize)]
struct Body<'a> {
model: &'a str,
messages: &'a [ChatMessage],
stream: bool,
}
let url = format!("{}/api/chat", self.base_url);
let body = Body {model: &opts.model, messages, stream: true};
let mut req = self.http.post(url).json(&body);
// Add Authorization header if API key is present
if let Some(ref key) = self.api_key {
req = req.header("Authorization", format!("Bearer {}", key));
}
let resp = req.send().await?;
let bytes_stream = resp.bytes_stream();
// NDJSON parser: split by '\n', parse each as JSON and stream the results
let out = bytes_stream
.map_err(OllamaError::Http)
.map_ok(|bytes| {
// Convert the chunk to a UTF8 string and own it
let txt = String::from_utf8_lossy(&bytes).into_owned();
// Parse each nonempty line into a ChatResponseChunk
let results: Vec<Result<ChatResponseChunk, OllamaError>> = txt
.lines()
.filter_map(|line| {
let trimmed = line.trim();
if trimmed.is_empty() {
None
} else {
Some(
serde_json::from_str::<ChatResponseChunk>(trimmed)
.map_err(OllamaError::Json),
)
}
})
.collect();
futures::stream::iter(results)
})
.try_flatten(); // Stream<Item = Result<ChatResponseChunk, OllamaError>>
Ok(out)
}
}

View File

@@ -1,5 +0,0 @@
pub mod client;
pub mod types;
pub use client::{OllamaClient, OllamaOptions};
pub use types::{ChatMessage, ChatResponseChunk};

View File

@@ -1,22 +0,0 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ChatMessage {
pub role: String, // "user", | "assistant" | "system"
pub content: String,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ChatResponseChunk {
pub model: Option<String>,
pub created_at: Option<String>,
pub message: Option<ChunkMessage>,
pub done: Option<bool>,
pub total_duration: Option<u64>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ChunkMessage {
pub role: Option<String>,
pub content: Option<String>,
}

View File

@@ -1,12 +0,0 @@
use llm_ollama::{OllamaClient, OllamaOptions};
// This test stubs NDJSON by spinning a tiny local server is overkill for M0.
// Instead, test the line parser indirectly by mocking reqwest is complex.
// We'll smoke-test the client type compiles and leave end-to-end to cli tests.
#[tokio::test]
async fn client_compiles_smoke() {
let _ = OllamaClient::new("http://localhost:11434");
let _ = OllamaClient::with_cloud();
let _ = OllamaOptions { model: "qwen2.5".into(), stream: true };
}

View File

@@ -0,0 +1,5 @@
# Owlen Anthropic
This crate is a placeholder for a future `owlen-core::Provider` implementation for the Anthropic (Claude) API.
This provider is not yet implemented. Contributions are welcome!

View File

@@ -0,0 +1,45 @@
[package]
name = "owlen-cli"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
homepage.workspace = true
description = "Command-line interface for OWLEN LLM client"
[features]
default = ["chat-client"]
chat-client = []
code-client = []
[[bin]]
name = "owlen"
path = "src/main.rs"
required-features = ["chat-client"]
[[bin]]
name = "owlen-code"
path = "src/code_main.rs"
required-features = ["code-client"]
[dependencies]
owlen-core = { path = "../owlen-core" }
owlen-tui = { path = "../owlen-tui" }
owlen-ollama = { path = "../owlen-ollama" }
# CLI framework
clap = { version = "4.0", features = ["derive"] }
# Async runtime
tokio = { workspace = true }
tokio-util = { workspace = true }
# TUI framework
ratatui = { workspace = true }
crossterm = { workspace = true }
# Utilities
anyhow = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }

View File

@@ -0,0 +1,15 @@
# Owlen CLI
This crate is the command-line entry point for the Owlen application.
It is responsible for:
- Parsing command-line arguments.
- Loading the configuration.
- Initializing the providers.
- Starting the `owlen-tui` application.
There are two binaries:
- `owlen`: The main chat application.
- `owlen-code`: A specialized version for code-related tasks.

View File

@@ -0,0 +1,103 @@
//! OWLEN Code Mode - TUI client optimized for coding assistance
use anyhow::Result;
use clap::{Arg, Command};
use owlen_core::session::SessionController;
use owlen_ollama::OllamaProvider;
use owlen_tui::{config, ui, AppState, CodeApp, Event, EventHandler, SessionEvent};
use std::io;
use std::sync::Arc;
use tokio::sync::mpsc;
use tokio_util::sync::CancellationToken;
use crossterm::{
event::{DisableMouseCapture, EnableMouseCapture},
execute,
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
};
use ratatui::{backend::CrosstermBackend, Terminal};
#[tokio::main]
async fn main() -> Result<()> {
let matches = Command::new("owlen-code")
.about("OWLEN Code Mode - TUI optimized for programming assistance")
.version(env!("CARGO_PKG_VERSION"))
.arg(
Arg::new("model")
.short('m')
.long("model")
.value_name("MODEL")
.help("Preferred model to use for this session"),
)
.get_matches();
let mut config = config::try_load_config().unwrap_or_default();
if let Some(model) = matches.get_one::<String>("model") {
config.general.default_model = Some(model.clone());
}
let provider_cfg = config::ensure_ollama_config(&mut config).clone();
let provider = Arc::new(OllamaProvider::from_config(
&provider_cfg,
Some(&config.general),
)?);
let controller = SessionController::new(provider, config.clone());
let (mut app, mut session_rx) = CodeApp::new(controller);
app.inner_mut().initialize_models().await?;
let cancellation_token = CancellationToken::new();
let (event_tx, event_rx) = mpsc::unbounded_channel();
let event_handler = EventHandler::new(event_tx, cancellation_token.clone());
let event_handle = tokio::spawn(async move { event_handler.run().await });
enable_raw_mode()?;
let mut stdout = io::stdout();
execute!(stdout, EnterAlternateScreen, EnableMouseCapture)?;
let backend = CrosstermBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
let result = run_app(&mut terminal, &mut app, event_rx, &mut session_rx).await;
cancellation_token.cancel();
event_handle.await?;
config::save_config(app.inner().config())?;
disable_raw_mode()?;
execute!(
terminal.backend_mut(),
LeaveAlternateScreen,
DisableMouseCapture
)?;
terminal.show_cursor()?;
if let Err(err) = result {
println!("{err:?}");
}
Ok(())
}
async fn run_app(
terminal: &mut Terminal<CrosstermBackend<io::Stdout>>,
app: &mut CodeApp,
mut event_rx: mpsc::UnboundedReceiver<Event>,
session_rx: &mut mpsc::UnboundedReceiver<SessionEvent>,
) -> Result<()> {
loop {
terminal.draw(|f| ui::render_chat(f, app.inner_mut()))?;
tokio::select! {
Some(event) = event_rx.recv() => {
if let AppState::Quit = app.handle_event(event).await? {
return Ok(());
}
}
Some(session_event) = session_rx.recv() => {
app.handle_session_event(session_event)?;
}
}
}
}

View File

@@ -0,0 +1,124 @@
//! OWLEN CLI - Chat TUI client
use anyhow::Result;
use clap::{Arg, Command};
use owlen_core::session::SessionController;
use owlen_ollama::OllamaProvider;
use owlen_tui::{config, ui, AppState, ChatApp, Event, EventHandler, SessionEvent};
use std::io;
use std::sync::Arc;
use tokio::sync::mpsc;
use tokio_util::sync::CancellationToken;
use crossterm::{
event::{DisableBracketedPaste, DisableMouseCapture, EnableBracketedPaste, EnableMouseCapture},
execute,
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
};
use ratatui::{backend::CrosstermBackend, Terminal};
#[tokio::main]
async fn main() -> Result<()> {
let matches = Command::new("owlen")
.about("OWLEN - A chat-focused TUI client for Ollama")
.version(env!("CARGO_PKG_VERSION"))
.arg(
Arg::new("model")
.short('m')
.long("model")
.value_name("MODEL")
.help("Preferred model to use for this session"),
)
.get_matches();
let mut config = config::try_load_config().unwrap_or_default();
if let Some(model) = matches.get_one::<String>("model") {
config.general.default_model = Some(model.clone());
}
// Prepare provider from configuration
let provider_cfg = config::ensure_ollama_config(&mut config).clone();
let provider = Arc::new(OllamaProvider::from_config(
&provider_cfg,
Some(&config.general),
)?);
let controller = SessionController::new(provider, config.clone());
let (mut app, mut session_rx) = ChatApp::new(controller);
app.initialize_models().await?;
// Event infrastructure
let cancellation_token = CancellationToken::new();
let (event_tx, event_rx) = mpsc::unbounded_channel();
let event_handler = EventHandler::new(event_tx, cancellation_token.clone());
let event_handle = tokio::spawn(async move { event_handler.run().await });
// Terminal setup
enable_raw_mode()?;
let mut stdout = io::stdout();
execute!(
stdout,
EnterAlternateScreen,
EnableMouseCapture,
EnableBracketedPaste
)?;
let backend = CrosstermBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
let result = run_app(&mut terminal, &mut app, event_rx, &mut session_rx).await;
// Shutdown
cancellation_token.cancel();
event_handle.await?;
// Persist configuration updates (e.g., selected model)
config::save_config(app.config())?;
disable_raw_mode()?;
execute!(
terminal.backend_mut(),
LeaveAlternateScreen,
DisableMouseCapture,
DisableBracketedPaste
)?;
terminal.show_cursor()?;
if let Err(err) = result {
println!("{err:?}");
}
Ok(())
}
async fn run_app(
terminal: &mut Terminal<CrosstermBackend<io::Stdout>>,
app: &mut ChatApp,
mut event_rx: mpsc::UnboundedReceiver<Event>,
session_rx: &mut mpsc::UnboundedReceiver<SessionEvent>,
) -> Result<()> {
loop {
// Advance loading animation frame
app.advance_loading_animation();
terminal.draw(|f| ui::render_chat(f, app))?;
// Process any pending LLM requests AFTER UI has been drawn
app.process_pending_llm_request().await?;
tokio::select! {
Some(event) = event_rx.recv() => {
if let AppState::Quit = app.handle_event(event).await? {
return Ok(());
}
}
Some(session_event) = session_rx.recv() => {
app.handle_session_event(session_event)?;
}
// Add a timeout to keep the animation going even when there are no events
_ = tokio::time::sleep(tokio::time::Duration::from_millis(100)) => {
// This will cause the loop to continue and advance the animation
}
}
}
}

View File

@@ -0,0 +1,31 @@
[package]
name = "owlen-core"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
homepage.workspace = true
description = "Core traits and types for OWLEN LLM client"
[dependencies]
anyhow = "1.0.75"
log = "0.4.20"
serde = { version = "1.0.188", features = ["derive"] }
serde_json = "1.0.105"
thiserror = "1.0.48"
tokio = { version = "1.32.0", features = ["full"] }
unicode-segmentation = "1.11"
unicode-width = "0.1"
uuid = { version = "1.4.1", features = ["v4", "serde"] }
textwrap = "0.16.0"
futures = "0.3.28"
async-trait = "0.1.73"
toml = "0.8.0"
shellexpand = "3.1.0"
dirs = "5.0"
ratatui = { workspace = true }
[dev-dependencies]
tokio-test = { workspace = true }
tempfile = { workspace = true }

View File

@@ -0,0 +1,12 @@
# Owlen Core
This crate provides the core abstractions and data structures for the Owlen ecosystem.
It defines the essential traits and types that enable communication with various LLM providers, manage sessions, and handle configuration.
## Key Components
- **`Provider` trait**: The fundamental abstraction for all LLM providers. Implement this trait to add support for a new provider.
- **`Session`**: Represents a single conversation, managing message history and context.
- **`Model`**: Defines the structure for LLM models, including their names and properties.
- **Configuration**: Handles loading and parsing of the application's configuration.

View File

@@ -0,0 +1,407 @@
use crate::provider::ProviderConfig;
use crate::Result;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fs;
use std::path::{Path, PathBuf};
use std::time::Duration;
/// Default location for the OWLEN configuration file
pub const DEFAULT_CONFIG_PATH: &str = "~/.config/owlen/config.toml";
/// Core configuration shared by all OWLEN clients
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
/// General application settings
pub general: GeneralSettings,
/// Provider specific configuration keyed by provider name
#[serde(default)]
pub providers: HashMap<String, ProviderConfig>,
/// UI preferences that frontends can opt into
#[serde(default)]
pub ui: UiSettings,
/// Storage related options
#[serde(default)]
pub storage: StorageSettings,
/// Input handling preferences
#[serde(default)]
pub input: InputSettings,
}
impl Default for Config {
fn default() -> Self {
let mut providers = HashMap::new();
providers.insert(
"ollama".to_string(),
ProviderConfig {
provider_type: "ollama".to_string(),
base_url: Some("http://localhost:11434".to_string()),
api_key: None,
extra: HashMap::new(),
},
);
Self {
general: GeneralSettings::default(),
providers,
ui: UiSettings::default(),
storage: StorageSettings::default(),
input: InputSettings::default(),
}
}
}
impl Config {
/// Load configuration from disk, falling back to defaults when missing
pub fn load(path: Option<&Path>) -> Result<Self> {
let path = match path {
Some(path) => path.to_path_buf(),
None => default_config_path(),
};
if path.exists() {
let content = fs::read_to_string(&path)?;
let mut config: Config =
toml::from_str(&content).map_err(|e| crate::Error::Config(e.to_string()))?;
config.ensure_defaults();
Ok(config)
} else {
Ok(Config::default())
}
}
/// Persist configuration to disk
pub fn save(&self, path: Option<&Path>) -> Result<()> {
let path = match path {
Some(path) => path.to_path_buf(),
None => default_config_path(),
};
if let Some(dir) = path.parent() {
fs::create_dir_all(dir)?;
}
let content =
toml::to_string_pretty(self).map_err(|e| crate::Error::Config(e.to_string()))?;
fs::write(path, content)?;
Ok(())
}
/// Get provider configuration by provider name
pub fn provider(&self, name: &str) -> Option<&ProviderConfig> {
self.providers.get(name)
}
/// Update or insert a provider configuration
pub fn upsert_provider(&mut self, name: impl Into<String>, config: ProviderConfig) {
self.providers.insert(name.into(), config);
}
/// Resolve default model in order of priority: explicit default, first cached model, provider fallback
pub fn resolve_default_model<'a>(
&'a self,
models: &'a [crate::types::ModelInfo],
) -> Option<&'a str> {
if let Some(model) = self.general.default_model.as_deref() {
if models.iter().any(|m| m.id == model || m.name == model) {
return Some(model);
}
}
if let Some(first) = models.first() {
return Some(&first.id);
}
self.general.default_model.as_deref()
}
fn ensure_defaults(&mut self) {
if self.general.default_provider.is_empty() {
self.general.default_provider = "ollama".to_string();
}
if !self.providers.contains_key("ollama") {
self.providers.insert(
"ollama".to_string(),
ProviderConfig {
provider_type: "ollama".to_string(),
base_url: Some("http://localhost:11434".to_string()),
api_key: None,
extra: HashMap::new(),
},
);
}
}
}
/// Default configuration path with user home expansion
pub fn default_config_path() -> PathBuf {
PathBuf::from(shellexpand::tilde(DEFAULT_CONFIG_PATH).as_ref())
}
/// General behaviour settings shared across clients
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GeneralSettings {
/// Default provider name for routing
pub default_provider: String,
/// Optional default model id
#[serde(default)]
pub default_model: Option<String>,
/// Whether streaming responses are preferred
#[serde(default = "GeneralSettings::default_streaming")]
pub enable_streaming: bool,
/// Optional path to a project context file automatically injected as system prompt
#[serde(default)]
pub project_context_file: Option<String>,
/// TTL for cached model listings in seconds
#[serde(default = "GeneralSettings::default_model_cache_ttl")]
pub model_cache_ttl_secs: u64,
}
impl GeneralSettings {
fn default_streaming() -> bool {
true
}
fn default_model_cache_ttl() -> u64 {
60
}
/// Duration representation of model cache TTL
pub fn model_cache_ttl(&self) -> Duration {
Duration::from_secs(self.model_cache_ttl_secs.max(5))
}
}
impl Default for GeneralSettings {
fn default() -> Self {
Self {
default_provider: "ollama".to_string(),
default_model: Some("llama3.2:latest".to_string()),
enable_streaming: Self::default_streaming(),
project_context_file: Some("OWLEN.md".to_string()),
model_cache_ttl_secs: Self::default_model_cache_ttl(),
}
}
}
/// UI preferences that consumers can respect as needed
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UiSettings {
#[serde(default = "UiSettings::default_theme")]
pub theme: String,
#[serde(default = "UiSettings::default_word_wrap")]
pub word_wrap: bool,
#[serde(default = "UiSettings::default_max_history_lines")]
pub max_history_lines: usize,
#[serde(default = "UiSettings::default_show_role_labels")]
pub show_role_labels: bool,
#[serde(default = "UiSettings::default_wrap_column")]
pub wrap_column: u16,
}
impl UiSettings {
fn default_theme() -> String {
"default_dark".to_string()
}
fn default_word_wrap() -> bool {
true
}
fn default_max_history_lines() -> usize {
2000
}
fn default_show_role_labels() -> bool {
true
}
fn default_wrap_column() -> u16 {
100
}
}
impl Default for UiSettings {
fn default() -> Self {
Self {
theme: Self::default_theme(),
word_wrap: Self::default_word_wrap(),
max_history_lines: Self::default_max_history_lines(),
show_role_labels: Self::default_show_role_labels(),
wrap_column: Self::default_wrap_column(),
}
}
}
/// Storage related preferences
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StorageSettings {
#[serde(default = "StorageSettings::default_conversation_dir")]
pub conversation_dir: Option<String>,
#[serde(default = "StorageSettings::default_auto_save")]
pub auto_save_sessions: bool,
#[serde(default = "StorageSettings::default_max_sessions")]
pub max_saved_sessions: usize,
#[serde(default = "StorageSettings::default_session_timeout")]
pub session_timeout_minutes: u64,
#[serde(default = "StorageSettings::default_generate_descriptions")]
pub generate_descriptions: bool,
}
impl StorageSettings {
fn default_conversation_dir() -> Option<String> {
None
}
fn default_auto_save() -> bool {
true
}
fn default_max_sessions() -> usize {
25
}
fn default_session_timeout() -> u64 {
120
}
fn default_generate_descriptions() -> bool {
true
}
/// Resolve storage directory path
/// Uses platform-specific data directory if not explicitly configured:
/// - Linux: ~/.local/share/owlen/sessions
/// - Windows: %APPDATA%\owlen\sessions
/// - macOS: ~/Library/Application Support/owlen/sessions
pub fn conversation_path(&self) -> PathBuf {
if let Some(ref dir) = self.conversation_dir {
PathBuf::from(shellexpand::tilde(dir).as_ref())
} else {
// Use platform-specific data directory
dirs::data_local_dir()
.map(|d| d.join("owlen").join("sessions"))
.unwrap_or_else(|| PathBuf::from("./owlen_sessions"))
}
}
}
impl Default for StorageSettings {
fn default() -> Self {
Self {
conversation_dir: None, // Use platform-specific defaults
auto_save_sessions: Self::default_auto_save(),
max_saved_sessions: Self::default_max_sessions(),
session_timeout_minutes: Self::default_session_timeout(),
generate_descriptions: Self::default_generate_descriptions(),
}
}
}
/// Input handling preferences shared across clients
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct InputSettings {
#[serde(default = "InputSettings::default_multiline")]
pub multiline: bool,
#[serde(default = "InputSettings::default_history_size")]
pub history_size: usize,
#[serde(default = "InputSettings::default_tab_width")]
pub tab_width: u8,
#[serde(default = "InputSettings::default_confirm_send")]
pub confirm_send: bool,
}
impl InputSettings {
fn default_multiline() -> bool {
true
}
fn default_history_size() -> usize {
100
}
fn default_tab_width() -> u8 {
4
}
fn default_confirm_send() -> bool {
false
}
}
impl Default for InputSettings {
fn default() -> Self {
Self {
multiline: Self::default_multiline(),
history_size: Self::default_history_size(),
tab_width: Self::default_tab_width(),
confirm_send: Self::default_confirm_send(),
}
}
}
/// Convenience accessor for an Ollama provider entry, creating a default if missing
pub fn ensure_ollama_config(config: &mut Config) -> &ProviderConfig {
config
.providers
.entry("ollama".to_string())
.or_insert_with(|| ProviderConfig {
provider_type: "ollama".to_string(),
base_url: Some("http://localhost:11434".to_string()),
api_key: None,
extra: HashMap::new(),
})
}
/// Calculate absolute timeout for session data based on configuration
pub fn session_timeout(config: &Config) -> Duration {
Duration::from_secs(config.storage.session_timeout_minutes.max(1) * 60)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_storage_platform_specific_paths() {
let config = Config::default();
let path = config.storage.conversation_path();
// Verify it contains owlen/sessions
assert!(path.to_string_lossy().contains("owlen"));
assert!(path.to_string_lossy().contains("sessions"));
// Platform-specific checks
#[cfg(target_os = "linux")]
{
// Linux should use ~/.local/share/owlen/sessions
assert!(path.to_string_lossy().contains(".local/share"));
}
#[cfg(target_os = "windows")]
{
// Windows should use AppData
assert!(path.to_string_lossy().contains("AppData"));
}
#[cfg(target_os = "macos")]
{
// macOS should use ~/Library/Application Support
assert!(path
.to_string_lossy()
.contains("Library/Application Support"));
}
println!("Config conversation path: {}", path.display());
}
#[test]
fn test_storage_custom_path() {
let mut config = Config::default();
config.storage.conversation_dir = Some("~/custom/path".to_string());
let path = config.storage.conversation_path();
assert!(path.to_string_lossy().contains("custom/path"));
}
}

View File

@@ -0,0 +1,324 @@
use crate::storage::StorageManager;
use crate::types::{Conversation, Message};
use crate::Result;
use serde_json::{Number, Value};
use std::collections::{HashMap, VecDeque};
use std::path::{Path, PathBuf};
use std::time::{Duration, Instant};
use uuid::Uuid;
const STREAMING_FLAG: &str = "streaming";
const LAST_CHUNK_TS: &str = "last_chunk_ts";
const PLACEHOLDER_FLAG: &str = "placeholder";
/// Manage active and historical conversations, including streaming updates.
pub struct ConversationManager {
active: Conversation,
history: VecDeque<Conversation>,
message_index: HashMap<Uuid, usize>,
streaming: HashMap<Uuid, StreamingMetadata>,
max_history: usize,
}
#[derive(Debug, Clone)]
pub struct StreamingMetadata {
started: Instant,
last_update: Instant,
}
impl ConversationManager {
/// Create a new conversation manager with a default model
pub fn new(model: impl Into<String>) -> Self {
Self::with_history_capacity(model, 32)
}
/// Create with explicit history capacity
pub fn with_history_capacity(model: impl Into<String>, max_history: usize) -> Self {
let conversation = Conversation::new(model.into());
Self {
active: conversation,
history: VecDeque::new(),
message_index: HashMap::new(),
streaming: HashMap::new(),
max_history: max_history.max(1),
}
}
/// Access the active conversation
pub fn active(&self) -> &Conversation {
&self.active
}
/// Public mutable access to the active conversation
pub fn active_mut(&mut self) -> &mut Conversation {
&mut self.active
}
/// Replace the active conversation with a provided one, archiving the existing conversation if it contains data
pub fn load(&mut self, conversation: Conversation) {
if !self.active.messages.is_empty() {
self.archive_active();
}
self.message_index.clear();
for (idx, message) in conversation.messages.iter().enumerate() {
self.message_index.insert(message.id, idx);
}
self.stream_reset();
self.active = conversation;
}
/// Start a brand new conversation, archiving the previous one
pub fn start_new(&mut self, model: Option<String>, name: Option<String>) {
self.archive_active();
let model = model.unwrap_or_else(|| self.active.model.clone());
self.active = Conversation::new(model);
self.active.name = name;
self.message_index.clear();
self.stream_reset();
}
/// Archive the active conversation into history
pub fn archive_active(&mut self) {
if self.active.messages.is_empty() {
return;
}
let mut archived = self.active.clone();
archived.updated_at = std::time::SystemTime::now();
self.history.push_front(archived);
while self.history.len() > self.max_history {
self.history.pop_back();
}
}
/// Get immutable history
pub fn history(&self) -> impl Iterator<Item = &Conversation> {
self.history.iter()
}
/// Add a user message and return its identifier
pub fn push_user_message(&mut self, content: impl Into<String>) -> Uuid {
let message = Message::user(content.into());
self.register_message(message)
}
/// Add a system message and return its identifier
pub fn push_system_message(&mut self, content: impl Into<String>) -> Uuid {
let message = Message::system(content.into());
self.register_message(message)
}
/// Add an assistant message (non-streaming) and return its identifier
pub fn push_assistant_message(&mut self, content: impl Into<String>) -> Uuid {
let message = Message::assistant(content.into());
self.register_message(message)
}
/// Push an arbitrary message into the active conversation
pub fn push_message(&mut self, message: Message) -> Uuid {
self.register_message(message)
}
/// Start tracking a streaming assistant response, returning the message id to update
pub fn start_streaming_response(&mut self) -> Uuid {
let mut message = Message::assistant(String::new());
message
.metadata
.insert(STREAMING_FLAG.to_string(), Value::Bool(true));
let id = message.id;
self.register_message(message);
self.streaming.insert(
id,
StreamingMetadata {
started: Instant::now(),
last_update: Instant::now(),
},
);
id
}
/// Append streaming content to an assistant message
pub fn append_stream_chunk(
&mut self,
message_id: Uuid,
chunk: &str,
is_final: bool,
) -> Result<()> {
let index = self
.message_index
.get(&message_id)
.copied()
.ok_or_else(|| crate::Error::Unknown(format!("Unknown message id: {message_id}")))?;
let conversation = self.active_mut();
if let Some(message) = conversation.messages.get_mut(index) {
let was_placeholder = message
.metadata
.remove(PLACEHOLDER_FLAG)
.and_then(|v| v.as_bool())
.unwrap_or(false);
if was_placeholder {
message.content.clear();
}
if !chunk.is_empty() {
message.content.push_str(chunk);
}
message.timestamp = std::time::SystemTime::now();
let millis = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as u64;
message.metadata.insert(
LAST_CHUNK_TS.to_string(),
Value::Number(Number::from(millis)),
);
if is_final {
message
.metadata
.insert(STREAMING_FLAG.to_string(), Value::Bool(false));
self.streaming.remove(&message_id);
} else if let Some(info) = self.streaming.get_mut(&message_id) {
info.last_update = Instant::now();
}
}
Ok(())
}
/// Set placeholder text for a streaming message
pub fn set_stream_placeholder(
&mut self,
message_id: Uuid,
text: impl Into<String>,
) -> Result<()> {
let index = self
.message_index
.get(&message_id)
.copied()
.ok_or_else(|| crate::Error::Unknown(format!("Unknown message id: {message_id}")))?;
if let Some(message) = self.active_mut().messages.get_mut(index) {
message.content = text.into();
message.timestamp = std::time::SystemTime::now();
message
.metadata
.insert(PLACEHOLDER_FLAG.to_string(), Value::Bool(true));
}
Ok(())
}
/// Update the active model (used when user changes model mid session)
pub fn set_model(&mut self, model: impl Into<String>) {
self.active.model = model.into();
self.active.updated_at = std::time::SystemTime::now();
}
/// Provide read access to the cached streaming metadata
pub fn streaming_metadata(&self, message_id: &Uuid) -> Option<StreamingMetadata> {
self.streaming.get(message_id).cloned()
}
/// Remove inactive streaming messages that have stalled beyond the provided timeout
pub fn expire_stalled_streams(&mut self, idle_timeout: Duration) -> Vec<Uuid> {
let cutoff = Instant::now() - idle_timeout;
let mut expired = Vec::new();
self.streaming.retain(|id, meta| {
if meta.last_update < cutoff {
expired.push(*id);
false
} else {
true
}
});
expired
}
/// Clear all state
pub fn clear(&mut self) {
self.active.clear();
self.history.clear();
self.message_index.clear();
self.streaming.clear();
}
fn register_message(&mut self, message: Message) -> Uuid {
let id = message.id;
let idx;
{
let conversation = self.active_mut();
idx = conversation.messages.len();
conversation.messages.push(message);
conversation.updated_at = std::time::SystemTime::now();
}
self.message_index.insert(id, idx);
id
}
fn stream_reset(&mut self) {
self.streaming.clear();
}
/// Save the active conversation to disk
pub fn save_active(&self, storage: &StorageManager, name: Option<String>) -> Result<PathBuf> {
storage.save_conversation(&self.active, name)
}
/// Save the active conversation to disk with a description
pub fn save_active_with_description(
&self,
storage: &StorageManager,
name: Option<String>,
description: Option<String>,
) -> Result<PathBuf> {
storage.save_conversation_with_description(&self.active, name, description)
}
/// Load a conversation from disk and make it active
pub fn load_from_disk(
&mut self,
storage: &StorageManager,
path: impl AsRef<Path>,
) -> Result<()> {
let conversation = storage.load_conversation(path)?;
self.load(conversation);
Ok(())
}
/// List all saved sessions
pub fn list_saved_sessions(
storage: &StorageManager,
) -> Result<Vec<crate::storage::SessionMeta>> {
storage.list_sessions()
}
}
impl StreamingMetadata {
/// Duration since the stream started
pub fn elapsed(&self) -> Duration {
self.started.elapsed()
}
/// Duration since the last chunk was received
pub fn idle_duration(&self) -> Duration {
self.last_update.elapsed()
}
/// Timestamp when streaming started
pub fn started_at(&self) -> Instant {
self.started
}
/// Timestamp of most recent update
pub fn last_update_at(&self) -> Instant {
self.last_update
}
}

View File

@@ -0,0 +1,96 @@
use crate::types::Message;
/// Formats messages for display across different clients.
#[derive(Debug, Clone)]
pub struct MessageFormatter {
wrap_width: usize,
show_role_labels: bool,
preserve_empty_lines: bool,
}
impl MessageFormatter {
/// Create a new formatter
pub fn new(wrap_width: usize, show_role_labels: bool) -> Self {
Self {
wrap_width: wrap_width.max(20),
show_role_labels,
preserve_empty_lines: false,
}
}
/// Override whether empty lines should be preserved
pub fn with_preserve_empty(mut self, preserve: bool) -> Self {
self.preserve_empty_lines = preserve;
self
}
/// Update the wrap width
pub fn set_wrap_width(&mut self, width: usize) {
self.wrap_width = width.max(20);
}
/// Whether role labels should be shown alongside messages
pub fn show_role_labels(&self) -> bool {
self.show_role_labels
}
pub fn format_message(&self, message: &Message) -> Vec<String> {
message
.content
.trim()
.lines()
.map(|s| s.to_string())
.collect()
}
/// Extract thinking content from <think> tags, returning (content_without_think, thinking_content)
/// This handles both complete and incomplete (streaming) think tags.
pub fn extract_thinking(&self, content: &str) -> (String, Option<String>) {
let mut result = String::new();
let mut thinking = String::new();
let mut current_pos = 0;
while let Some(start_pos) = content[current_pos..].find("<think>") {
let abs_start = current_pos + start_pos;
// Add content before <think> tag to result
result.push_str(&content[current_pos..abs_start]);
// Find closing tag
if let Some(end_pos) = content[abs_start..].find("</think>") {
let abs_end = abs_start + end_pos;
let think_content = &content[abs_start + 7..abs_end]; // 7 = len("<think>")
if !thinking.is_empty() {
thinking.push_str("\n\n");
}
thinking.push_str(think_content.trim());
current_pos = abs_end + 8; // 8 = len("</think>")
} else {
// Unclosed tag - this is streaming content
// Extract everything after <think> as thinking content
let think_content = &content[abs_start + 7..]; // 7 = len("<think>")
if !thinking.is_empty() {
thinking.push_str("\n\n");
}
thinking.push_str(think_content);
current_pos = content.len();
break;
}
}
// Add remaining content
result.push_str(&content[current_pos..]);
let thinking_result = if thinking.is_empty() {
None
} else {
Some(thinking)
};
(result, thinking_result)
}
}

View File

@@ -0,0 +1,217 @@
use std::collections::VecDeque;
/// Text input buffer with history and cursor management.
#[derive(Debug, Clone)]
pub struct InputBuffer {
buffer: String,
cursor: usize,
history: VecDeque<String>,
history_index: Option<usize>,
max_history: usize,
pub multiline: bool,
tab_width: u8,
}
impl InputBuffer {
/// Create a new input buffer
pub fn new(max_history: usize, multiline: bool, tab_width: u8) -> Self {
Self {
buffer: String::new(),
cursor: 0,
history: VecDeque::with_capacity(max_history.max(1)),
history_index: None,
max_history: max_history.max(1),
multiline,
tab_width: tab_width.max(1),
}
}
/// Get current text
pub fn text(&self) -> &str {
&self.buffer
}
/// Current cursor position
pub fn cursor(&self) -> usize {
self.cursor
}
/// Replace buffer contents
pub fn set_text(&mut self, text: impl Into<String>) {
self.buffer = text.into();
self.cursor = self.buffer.len();
self.history_index = None;
}
/// Clear buffer and reset cursor
pub fn clear(&mut self) {
self.buffer.clear();
self.cursor = 0;
self.history_index = None;
}
/// Insert a character at the cursor position
pub fn insert_char(&mut self, ch: char) {
if ch == '\t' {
self.insert_tab();
return;
}
self.buffer.insert(self.cursor, ch);
self.cursor += ch.len_utf8();
}
/// Insert text at cursor
pub fn insert_text(&mut self, text: &str) {
self.buffer.insert_str(self.cursor, text);
self.cursor += text.len();
}
/// Insert spaces representing a tab
pub fn insert_tab(&mut self) {
let spaces = " ".repeat(self.tab_width as usize);
self.insert_text(&spaces);
}
/// Remove character before cursor
pub fn backspace(&mut self) {
if self.cursor == 0 {
return;
}
let prev_index = prev_char_boundary(&self.buffer, self.cursor);
self.buffer.drain(prev_index..self.cursor);
self.cursor = prev_index;
}
/// Remove character at cursor
pub fn delete(&mut self) {
if self.cursor >= self.buffer.len() {
return;
}
let next_index = next_char_boundary(&self.buffer, self.cursor);
self.buffer.drain(self.cursor..next_index);
}
/// Move cursor left by one grapheme
pub fn move_left(&mut self) {
if self.cursor == 0 {
return;
}
self.cursor = prev_char_boundary(&self.buffer, self.cursor);
}
/// Move cursor right by one grapheme
pub fn move_right(&mut self) {
if self.cursor >= self.buffer.len() {
return;
}
self.cursor = next_char_boundary(&self.buffer, self.cursor);
}
/// Move cursor to start of the buffer
pub fn move_home(&mut self) {
self.cursor = 0;
}
/// Move cursor to end of the buffer
pub fn move_end(&mut self) {
self.cursor = self.buffer.len();
}
/// Push current buffer into history, clearing the buffer afterwards
pub fn commit_to_history(&mut self) -> String {
let text = std::mem::take(&mut self.buffer);
if !text.trim().is_empty() {
self.push_history_entry(text.clone());
}
self.cursor = 0;
self.history_index = None;
text
}
/// Navigate to previous history entry
pub fn history_previous(&mut self) {
if self.history.is_empty() {
return;
}
let new_index = match self.history_index {
Some(idx) if idx + 1 < self.history.len() => idx + 1,
None => 0,
_ => return,
};
self.history_index = Some(new_index);
if let Some(entry) = self.history.get(new_index) {
self.buffer = entry.clone();
self.cursor = self.buffer.len();
}
}
/// Navigate to next history entry
pub fn history_next(&mut self) {
if self.history.is_empty() {
return;
}
if let Some(idx) = self.history_index {
if idx > 0 {
let new_idx = idx - 1;
self.history_index = Some(new_idx);
if let Some(entry) = self.history.get(new_idx) {
self.buffer = entry.clone();
self.cursor = self.buffer.len();
}
} else {
self.history_index = None;
self.buffer.clear();
self.cursor = 0;
}
} else {
self.buffer.clear();
self.cursor = 0;
}
}
/// Push a new entry into the history buffer, enforcing capacity
pub fn push_history_entry(&mut self, entry: String) {
if self
.history
.front()
.map(|existing| existing == &entry)
.unwrap_or(false)
{
return;
}
self.history.push_front(entry);
while self.history.len() > self.max_history {
self.history.pop_back();
}
}
}
fn prev_char_boundary(buffer: &str, cursor: usize) -> usize {
buffer[..cursor]
.char_indices()
.last()
.map(|(idx, _)| idx)
.unwrap_or(0)
}
fn next_char_boundary(buffer: &str, cursor: usize) -> usize {
if cursor >= buffer.len() {
return buffer.len();
}
let slice = &buffer[cursor..];
let mut iter = slice.char_indices();
iter.next();
if let Some((idx, _)) = iter.next() {
cursor + idx
} else {
buffer.len()
}
}

View File

@@ -0,0 +1,65 @@
//! Core traits and types for OWLEN LLM client
//!
//! This crate provides the foundational abstractions for building
//! LLM providers, routers, and MCP (Model Context Protocol) adapters.
pub mod config;
pub mod conversation;
pub mod formatting;
pub mod input;
pub mod model;
pub mod provider;
pub mod router;
pub mod session;
pub mod storage;
pub mod theme;
pub mod types;
pub mod ui;
pub mod wrap_cursor;
pub use config::*;
pub use conversation::*;
pub use formatting::*;
pub use input::*;
pub use model::*;
pub use provider::*;
pub use router::*;
pub use session::*;
pub use theme::*;
/// Result type used throughout the OWLEN ecosystem
pub type Result<T> = std::result::Result<T, Error>;
/// Core error types for OWLEN
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("Provider error: {0}")]
Provider(#[from] anyhow::Error),
#[error("Network error: {0}")]
Network(String),
#[error("Authentication error: {0}")]
Auth(String),
#[error("Configuration error: {0}")]
Config(String),
#[error("I/O error: {0}")]
Io(#[from] std::io::Error),
#[error("Invalid input: {0}")]
InvalidInput(String),
#[error("Operation timed out: {0}")]
Timeout(String),
#[error("Serialization error: {0}")]
Serialization(#[from] serde_json::Error),
#[error("Storage error: {0}")]
Storage(String),
#[error("Unknown error: {0}")]
Unknown(String),
}

View File

@@ -0,0 +1,84 @@
use crate::types::ModelInfo;
use crate::Result;
use std::future::Future;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::sync::RwLock;
#[derive(Default, Debug)]
struct ModelCache {
models: Vec<ModelInfo>,
last_refresh: Option<Instant>,
}
/// Caches model listings for improved selection performance
#[derive(Clone, Debug)]
pub struct ModelManager {
cache: Arc<RwLock<ModelCache>>,
ttl: Duration,
}
impl ModelManager {
/// Create a new manager with the desired cache TTL
pub fn new(ttl: Duration) -> Self {
Self {
cache: Arc::new(RwLock::new(ModelCache::default())),
ttl,
}
}
/// Get cached models, refreshing via the provided fetcher when stale. Returns the up-to-date model list.
pub async fn get_or_refresh<F, Fut>(
&self,
force_refresh: bool,
fetcher: F,
) -> Result<Vec<ModelInfo>>
where
F: FnOnce() -> Fut,
Fut: Future<Output = Result<Vec<ModelInfo>>>,
{
if !force_refresh {
if let Some(models) = self.cached_if_fresh().await {
return Ok(models);
}
}
let models = fetcher().await?;
let mut cache = self.cache.write().await;
cache.models = models.clone();
cache.last_refresh = Some(Instant::now());
Ok(models)
}
/// Return cached models without refreshing
pub async fn cached(&self) -> Vec<ModelInfo> {
self.cache.read().await.models.clone()
}
/// Drop cached models, forcing next call to refresh
pub async fn invalidate(&self) {
let mut cache = self.cache.write().await;
cache.models.clear();
cache.last_refresh = None;
}
/// Select a model by id or name from the cache
pub async fn select(&self, identifier: &str) -> Option<ModelInfo> {
let cache = self.cache.read().await;
cache
.models
.iter()
.find(|m| m.id == identifier || m.name == identifier)
.cloned()
}
async fn cached_if_fresh(&self) -> Option<Vec<ModelInfo>> {
let cache = self.cache.read().await;
let fresh = matches!(cache.last_refresh, Some(ts) if ts.elapsed() < self.ttl);
if fresh && !cache.models.is_empty() {
Some(cache.models.clone())
} else {
None
}
}
}

View File

@@ -0,0 +1,170 @@
//! Provider trait and related types
use crate::{types::*, Result};
use futures::Stream;
use std::pin::Pin;
use std::sync::Arc;
/// A stream of chat responses
pub type ChatStream = Pin<Box<dyn Stream<Item = Result<ChatResponse>> + Send>>;
/// Trait for LLM providers (Ollama, OpenAI, Anthropic, etc.)
///
/// # Example
///
/// ```
/// use std::pin::Pin;
/// use std::sync::Arc;
/// use futures::Stream;
/// use owlen_core::provider::{Provider, ProviderRegistry, ChatStream};
/// use owlen_core::types::{ChatRequest, ChatResponse, ModelInfo, Message};
/// use owlen_core::Result;
///
/// // 1. Create a mock provider
/// struct MockProvider;
///
/// #[async_trait::async_trait]
/// impl Provider for MockProvider {
/// fn name(&self) -> &str {
/// "mock"
/// }
///
/// async fn list_models(&self) -> Result<Vec<ModelInfo>> {
/// Ok(vec![ModelInfo {
/// provider: "mock".to_string(),
/// name: "mock-model".to_string(),
/// ..Default::default()
/// }])
/// }
///
/// async fn chat(&self, request: ChatRequest) -> Result<ChatResponse> {
/// let content = format!("Response to: {}", request.messages.last().unwrap().content);
/// Ok(ChatResponse {
/// model: request.model,
/// message: Message { role: "assistant".to_string(), content, ..Default::default() },
/// ..Default::default()
/// })
/// }
///
/// async fn chat_stream(&self, request: ChatRequest) -> Result<ChatStream> {
/// unimplemented!();
/// }
///
/// async fn health_check(&self) -> Result<()> {
/// Ok(())
/// }
/// }
///
/// // 2. Use the provider with a registry
/// #[tokio::main]
/// async fn main() {
/// let mut registry = ProviderRegistry::new();
/// registry.register(MockProvider);
///
/// let provider = registry.get("mock").unwrap();
/// let models = provider.list_models().await.unwrap();
/// assert_eq!(models[0].name, "mock-model");
///
/// let request = ChatRequest {
/// model: "mock-model".to_string(),
/// messages: vec![Message { role: "user".to_string(), content: "Hello".to_string(), ..Default::default() }],
/// ..Default::default()
/// };
///
/// let response = provider.chat(request).await.unwrap();
/// assert_eq!(response.message.content, "Response to: Hello");
/// }
/// ```
#[async_trait::async_trait]
pub trait Provider: Send + Sync {
/// Get the name of this provider
fn name(&self) -> &str;
/// List available models from this provider
async fn list_models(&self) -> Result<Vec<ModelInfo>>;
/// Send a chat completion request
async fn chat(&self, request: ChatRequest) -> Result<ChatResponse>;
/// Send a streaming chat completion request
async fn chat_stream(&self, request: ChatRequest) -> Result<ChatStream>;
/// Check if the provider is available/healthy
async fn health_check(&self) -> Result<()>;
/// Get provider-specific configuration schema
fn config_schema(&self) -> serde_json::Value {
serde_json::json!({})
}
}
/// Configuration for a provider
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct ProviderConfig {
/// Provider type identifier
pub provider_type: String,
/// Base URL for API calls
pub base_url: Option<String>,
/// API key or token
pub api_key: Option<String>,
/// Additional provider-specific configuration
#[serde(flatten)]
pub extra: std::collections::HashMap<String, serde_json::Value>,
}
/// A registry of providers
pub struct ProviderRegistry {
providers: std::collections::HashMap<String, Arc<dyn Provider>>,
}
impl ProviderRegistry {
/// Create a new provider registry
pub fn new() -> Self {
Self {
providers: std::collections::HashMap::new(),
}
}
/// Register a provider
pub fn register<P: Provider + 'static>(&mut self, provider: P) {
self.register_arc(Arc::new(provider));
}
/// Register an already wrapped provider
pub fn register_arc(&mut self, provider: Arc<dyn Provider>) {
let name = provider.name().to_string();
self.providers.insert(name, provider);
}
/// Get a provider by name
pub fn get(&self, name: &str) -> Option<Arc<dyn Provider>> {
self.providers.get(name).cloned()
}
/// List all registered provider names
pub fn list_providers(&self) -> Vec<String> {
self.providers.keys().cloned().collect()
}
/// Get all models from all providers
pub async fn list_all_models(&self) -> Result<Vec<ModelInfo>> {
let mut all_models = Vec::new();
for provider in self.providers.values() {
match provider.list_models().await {
Ok(mut models) => all_models.append(&mut models),
Err(_) => {
// Continue with other providers
}
}
}
Ok(all_models)
}
}
impl Default for ProviderRegistry {
fn default() -> Self {
Self::new()
}
}

View File

@@ -0,0 +1,153 @@
//! Router for managing multiple providers and routing requests
use crate::{provider::*, types::*, Result};
use std::sync::Arc;
/// A router that can distribute requests across multiple providers
pub struct Router {
registry: ProviderRegistry,
routing_rules: Vec<RoutingRule>,
default_provider: Option<String>,
}
/// A rule for routing requests to specific providers
#[derive(Debug, Clone)]
pub struct RoutingRule {
/// Pattern to match against model names
pub model_pattern: String,
/// Provider to route to
pub provider: String,
/// Priority (higher numbers are checked first)
pub priority: u32,
}
impl Router {
/// Create a new router
pub fn new() -> Self {
Self {
registry: ProviderRegistry::new(),
routing_rules: Vec::new(),
default_provider: None,
}
}
/// Register a provider with the router
pub fn register_provider<P: Provider + 'static>(&mut self, provider: P) {
self.registry.register(provider);
}
/// Set the default provider
pub fn set_default_provider(&mut self, provider_name: String) {
self.default_provider = Some(provider_name);
}
/// Add a routing rule
pub fn add_routing_rule(&mut self, rule: RoutingRule) {
self.routing_rules.push(rule);
// Sort by priority (descending)
self.routing_rules
.sort_by(|a, b| b.priority.cmp(&a.priority));
}
/// Route a request to the appropriate provider
pub async fn chat(&self, request: ChatRequest) -> Result<ChatResponse> {
let provider = self.find_provider_for_model(&request.model)?;
provider.chat(request).await
}
/// Route a streaming request to the appropriate provider
pub async fn chat_stream(&self, request: ChatRequest) -> Result<ChatStream> {
let provider = self.find_provider_for_model(&request.model)?;
provider.chat_stream(request).await
}
/// List all available models from all providers
pub async fn list_models(&self) -> Result<Vec<ModelInfo>> {
self.registry.list_all_models().await
}
/// Find the appropriate provider for a given model
fn find_provider_for_model(&self, model: &str) -> Result<Arc<dyn Provider>> {
// Check routing rules first
for rule in &self.routing_rules {
if self.matches_pattern(&rule.model_pattern, model) {
if let Some(provider) = self.registry.get(&rule.provider) {
return Ok(provider);
}
}
}
// Fall back to default provider
if let Some(default) = &self.default_provider {
if let Some(provider) = self.registry.get(default) {
return Ok(provider);
}
}
// If no default, try to find any provider that has this model
// This is a fallback for cases where routing isn't configured
for provider_name in self.registry.list_providers() {
if let Some(provider) = self.registry.get(&provider_name) {
return Ok(provider);
}
}
Err(crate::Error::Provider(anyhow::anyhow!(
"No provider found for model: {}",
model
)))
}
/// Check if a model name matches a pattern
fn matches_pattern(&self, pattern: &str, model: &str) -> bool {
// Simple pattern matching for now
// Could be extended to support more complex patterns
if pattern == "*" {
return true;
}
if let Some(prefix) = pattern.strip_suffix('*') {
return model.starts_with(prefix);
}
if let Some(suffix) = pattern.strip_prefix('*') {
return model.ends_with(suffix);
}
pattern == model
}
/// Get routing configuration
pub fn get_routing_rules(&self) -> &[RoutingRule] {
&self.routing_rules
}
/// Get the default provider name
pub fn get_default_provider(&self) -> Option<&str> {
self.default_provider.as_deref()
}
}
impl Default for Router {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_pattern_matching() {
let router = Router::new();
assert!(router.matches_pattern("*", "any-model"));
assert!(router.matches_pattern("gpt*", "gpt-4"));
assert!(router.matches_pattern("gpt*", "gpt-3.5-turbo"));
assert!(!router.matches_pattern("gpt*", "claude-3"));
assert!(router.matches_pattern("*:latest", "llama2:latest"));
assert!(router.matches_pattern("exact-match", "exact-match"));
assert!(!router.matches_pattern("exact-match", "different-model"));
}
}

View File

@@ -0,0 +1,384 @@
use crate::config::Config;
use crate::conversation::ConversationManager;
use crate::formatting::MessageFormatter;
use crate::input::InputBuffer;
use crate::model::ModelManager;
use crate::provider::{ChatStream, Provider};
use crate::types::{ChatParameters, ChatRequest, ChatResponse, Conversation, ModelInfo};
use crate::Result;
use std::sync::Arc;
use uuid::Uuid;
/// Outcome of submitting a chat request
pub enum SessionOutcome {
/// Immediate response received (non-streaming)
Complete(ChatResponse),
/// Streaming response where chunks will arrive asynchronously
Streaming {
response_id: Uuid,
stream: ChatStream,
},
}
/// High-level controller encapsulating session state and provider interactions.
///
/// This is the main entry point for managing conversations and interacting with LLM providers.
///
/// # Example
///
/// ```
/// use std::sync::Arc;
/// use owlen_core::config::Config;
/// use owlen_core::provider::{Provider, ChatStream};
/// use owlen_core::session::{SessionController, SessionOutcome};
/// use owlen_core::types::{ChatRequest, ChatResponse, ChatParameters, Message, ModelInfo};
/// use owlen_core::Result;
///
/// // Mock provider for the example
/// struct MockProvider;
/// #[async_trait::async_trait]
/// impl Provider for MockProvider {
/// fn name(&self) -> &str { "mock" }
/// async fn list_models(&self) -> Result<Vec<ModelInfo>> { Ok(vec![]) }
/// async fn chat(&self, request: ChatRequest) -> Result<ChatResponse> {
/// Ok(ChatResponse {
/// model: request.model,
/// message: Message::assistant("Hello back!".to_string()),
/// ..Default::default()
/// })
/// }
/// async fn chat_stream(&self, request: ChatRequest) -> Result<ChatStream> { unimplemented!() }
/// async fn health_check(&self) -> Result<()> { Ok(()) }
/// }
///
/// #[tokio::main]
/// async fn main() {
/// let provider = Arc::new(MockProvider);
/// let config = Config::default();
/// let mut session_controller = SessionController::new(provider, config);
///
/// // Send a message
/// let outcome = session_controller.send_message(
/// "Hello".to_string(),
/// ChatParameters { stream: false, ..Default::default() }
/// ).await.unwrap();
///
/// // Check the response
/// if let SessionOutcome::Complete(response) = outcome {
/// assert_eq!(response.message.content, "Hello back!");
/// }
///
/// // The conversation now contains both messages
/// let messages = session_controller.conversation().messages.clone();
/// assert_eq!(messages.len(), 2);
/// assert_eq!(messages[0].content, "Hello");
/// assert_eq!(messages[1].content, "Hello back!");
/// }
/// ```
pub struct SessionController {
provider: Arc<dyn Provider>,
conversation: ConversationManager,
model_manager: ModelManager,
input_buffer: InputBuffer,
formatter: MessageFormatter,
config: Config,
}
impl SessionController {
/// Create a new controller with the given provider and configuration
pub fn new(provider: Arc<dyn Provider>, config: Config) -> Self {
let model = config
.general
.default_model
.clone()
.unwrap_or_else(|| "ollama/default".to_string());
let conversation =
ConversationManager::with_history_capacity(model, config.storage.max_saved_sessions);
let formatter =
MessageFormatter::new(config.ui.wrap_column as usize, config.ui.show_role_labels)
.with_preserve_empty(config.ui.word_wrap);
let input_buffer = InputBuffer::new(
config.input.history_size,
config.input.multiline,
config.input.tab_width,
);
let model_manager = ModelManager::new(config.general.model_cache_ttl());
Self {
provider,
conversation,
model_manager,
input_buffer,
formatter,
config,
}
}
/// Access the active conversation
pub fn conversation(&self) -> &Conversation {
self.conversation.active()
}
/// Mutable access to the conversation manager
pub fn conversation_mut(&mut self) -> &mut ConversationManager {
&mut self.conversation
}
/// Access input buffer
pub fn input_buffer(&self) -> &InputBuffer {
&self.input_buffer
}
/// Mutable input buffer access
pub fn input_buffer_mut(&mut self) -> &mut InputBuffer {
&mut self.input_buffer
}
/// Formatter for rendering messages
pub fn formatter(&self) -> &MessageFormatter {
&self.formatter
}
/// Update the wrap width of the message formatter
pub fn set_formatter_wrap_width(&mut self, width: usize) {
self.formatter.set_wrap_width(width);
}
/// Access configuration
pub fn config(&self) -> &Config {
&self.config
}
/// Mutable configuration access
pub fn config_mut(&mut self) -> &mut Config {
&mut self.config
}
/// Currently selected model identifier
pub fn selected_model(&self) -> &str {
&self.conversation.active().model
}
/// Change current model for upcoming requests
pub fn set_model(&mut self, model: String) {
self.conversation.set_model(model.clone());
self.config.general.default_model = Some(model);
}
/// Retrieve cached models, refreshing from provider as needed
pub async fn models(&self, force_refresh: bool) -> Result<Vec<ModelInfo>> {
self.model_manager
.get_or_refresh(force_refresh, || async {
self.provider.list_models().await
})
.await
}
/// Attempt to select the configured default model from cached models
pub fn ensure_default_model(&mut self, models: &[ModelInfo]) {
if let Some(default) = self.config.general.default_model.clone() {
if models.iter().any(|m| m.id == default || m.name == default) {
self.set_model(default);
}
} else if let Some(model) = models.first() {
self.set_model(model.id.clone());
}
}
/// Submit a user message; optionally stream the response
pub async fn send_message(
&mut self,
content: String,
mut parameters: ChatParameters,
) -> Result<SessionOutcome> {
let streaming = parameters.stream || self.config.general.enable_streaming;
parameters.stream = streaming;
self.conversation.push_user_message(content);
self.send_request_with_current_conversation(parameters)
.await
}
/// Send a request using the current conversation without adding a new user message
pub async fn send_request_with_current_conversation(
&mut self,
mut parameters: ChatParameters,
) -> Result<SessionOutcome> {
let streaming = parameters.stream || self.config.general.enable_streaming;
parameters.stream = streaming;
let request = ChatRequest {
model: self.conversation.active().model.clone(),
messages: self.conversation.active().messages.clone(),
parameters,
};
if streaming {
match self.provider.chat_stream(request).await {
Ok(stream) => {
let response_id = self.conversation.start_streaming_response();
Ok(SessionOutcome::Streaming {
response_id,
stream,
})
}
Err(err) => {
self.conversation
.push_assistant_message(format!("Error starting stream: {}", err));
Err(err)
}
}
} else {
match self.provider.chat(request).await {
Ok(response) => {
self.conversation.push_message(response.message.clone());
Ok(SessionOutcome::Complete(response))
}
Err(err) => {
self.conversation
.push_assistant_message(format!("Error: {}", err));
Err(err)
}
}
}
}
/// Mark a streaming response message with placeholder content
pub fn mark_stream_placeholder(&mut self, message_id: Uuid, text: &str) -> Result<()> {
self.conversation
.set_stream_placeholder(message_id, text.to_string())
}
/// Apply streaming chunk to the conversation
pub fn apply_stream_chunk(&mut self, message_id: Uuid, chunk: &ChatResponse) -> Result<()> {
self.conversation
.append_stream_chunk(message_id, &chunk.message.content, chunk.is_final)
}
/// Access conversation history
pub fn history(&self) -> Vec<Conversation> {
self.conversation.history().cloned().collect()
}
/// Start a new conversation optionally targeting a specific model
pub fn start_new_conversation(&mut self, model: Option<String>, name: Option<String>) {
self.conversation.start_new(model, name);
}
/// Clear current conversation messages
pub fn clear(&mut self) {
self.conversation.clear();
}
/// Generate a short AI description for the current conversation
pub async fn generate_conversation_description(&self) -> Result<String> {
let conv = self.conversation.active();
// If conversation is empty or very short, return a simple description
if conv.messages.is_empty() {
return Ok("Empty conversation".to_string());
}
if conv.messages.len() == 1 {
let first_msg = &conv.messages[0];
let preview = first_msg.content.chars().take(50).collect::<String>();
return Ok(format!(
"{}{}",
preview,
if first_msg.content.len() > 50 {
"..."
} else {
""
}
));
}
// Build a summary prompt from the first few and last few messages
let mut summary_messages = Vec::new();
// Add system message to guide the description
summary_messages.push(crate::types::Message::system(
"Summarize this conversation in 1-2 short sentences (max 100 characters). \
Focus on the main topic or question being discussed. Be concise and descriptive."
.to_string(),
));
// Include first message
if let Some(first) = conv.messages.first() {
summary_messages.push(first.clone());
}
// Include a middle message if conversation is long enough
if conv.messages.len() > 4 {
if let Some(mid) = conv.messages.get(conv.messages.len() / 2) {
summary_messages.push(mid.clone());
}
}
// Include last message
if let Some(last) = conv.messages.last() {
if conv.messages.len() > 1 {
summary_messages.push(last.clone());
}
}
// Create a summarization request
let request = crate::types::ChatRequest {
model: conv.model.clone(),
messages: summary_messages,
parameters: crate::types::ChatParameters {
temperature: Some(0.3), // Lower temperature for more focused summaries
max_tokens: Some(50), // Keep it short
stream: false,
extra: std::collections::HashMap::new(),
},
};
// Get the summary from the provider
match self.provider.chat(request).await {
Ok(response) => {
let description = response.message.content.trim().to_string();
// If description is empty, use fallback
if description.is_empty() {
let first_msg = &conv.messages[0];
let preview = first_msg.content.chars().take(50).collect::<String>();
return Ok(format!(
"{}{}",
preview,
if first_msg.content.len() > 50 {
"..."
} else {
""
}
));
}
// Truncate if too long
let truncated = if description.len() > 100 {
format!("{}...", description.chars().take(97).collect::<String>())
} else {
description
};
Ok(truncated)
}
Err(_e) => {
// Fallback to simple description if AI generation fails
let first_msg = &conv.messages[0];
let preview = first_msg.content.chars().take(50).collect::<String>();
Ok(format!(
"{}{}",
preview,
if first_msg.content.len() > 50 {
"..."
} else {
""
}
))
}
}
}
}

View File

@@ -0,0 +1,309 @@
//! Session persistence and storage management
use crate::types::Conversation;
use crate::{Error, Result};
use serde::{Deserialize, Serialize};
use std::fs;
use std::path::{Path, PathBuf};
use std::time::SystemTime;
/// Metadata about a saved session
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SessionMeta {
/// Session file path
pub path: PathBuf,
/// Conversation ID
pub id: uuid::Uuid,
/// Optional session name
pub name: Option<String>,
/// Optional AI-generated description
pub description: Option<String>,
/// Number of messages in the conversation
pub message_count: usize,
/// Model used
pub model: String,
/// When the session was created
pub created_at: SystemTime,
/// When the session was last updated
pub updated_at: SystemTime,
}
/// Storage manager for persisting conversations
pub struct StorageManager {
sessions_dir: PathBuf,
}
impl StorageManager {
/// Create a new storage manager with the default sessions directory
pub fn new() -> Result<Self> {
let sessions_dir = Self::default_sessions_dir()?;
Self::with_directory(sessions_dir)
}
/// Create a storage manager with a custom sessions directory
pub fn with_directory(sessions_dir: PathBuf) -> Result<Self> {
// Ensure the directory exists
if !sessions_dir.exists() {
fs::create_dir_all(&sessions_dir).map_err(|e| {
Error::Storage(format!("Failed to create sessions directory: {}", e))
})?;
}
Ok(Self { sessions_dir })
}
/// Get the default sessions directory
/// - Linux: ~/.local/share/owlen/sessions
/// - Windows: %APPDATA%\owlen\sessions
/// - macOS: ~/Library/Application Support/owlen/sessions
pub fn default_sessions_dir() -> Result<PathBuf> {
let data_dir = dirs::data_local_dir()
.ok_or_else(|| Error::Storage("Could not determine data directory".to_string()))?;
Ok(data_dir.join("owlen").join("sessions"))
}
/// Save a conversation to disk
pub fn save_conversation(
&self,
conversation: &Conversation,
name: Option<String>,
) -> Result<PathBuf> {
self.save_conversation_with_description(conversation, name, None)
}
/// Save a conversation to disk with an optional description
pub fn save_conversation_with_description(
&self,
conversation: &Conversation,
name: Option<String>,
description: Option<String>,
) -> Result<PathBuf> {
let filename = if let Some(ref session_name) = name {
// Use provided name, sanitized
let sanitized = sanitize_filename(session_name);
format!("{}_{}.json", conversation.id, sanitized)
} else {
// Use conversation ID and timestamp
let timestamp = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
format!("{}_{}.json", conversation.id, timestamp)
};
let path = self.sessions_dir.join(filename);
// Create a saveable version with the name and description
let mut save_conv = conversation.clone();
if name.is_some() {
save_conv.name = name;
}
if description.is_some() {
save_conv.description = description;
}
let json = serde_json::to_string_pretty(&save_conv)
.map_err(|e| Error::Storage(format!("Failed to serialize conversation: {}", e)))?;
fs::write(&path, json)
.map_err(|e| Error::Storage(format!("Failed to write session file: {}", e)))?;
Ok(path)
}
/// Load a conversation from disk
pub fn load_conversation(&self, path: impl AsRef<Path>) -> Result<Conversation> {
let content = fs::read_to_string(path.as_ref())
.map_err(|e| Error::Storage(format!("Failed to read session file: {}", e)))?;
let conversation: Conversation = serde_json::from_str(&content)
.map_err(|e| Error::Storage(format!("Failed to parse session file: {}", e)))?;
Ok(conversation)
}
/// List all saved sessions with metadata
pub fn list_sessions(&self) -> Result<Vec<SessionMeta>> {
let mut sessions = Vec::new();
let entries = fs::read_dir(&self.sessions_dir)
.map_err(|e| Error::Storage(format!("Failed to read sessions directory: {}", e)))?;
for entry in entries {
let entry = entry
.map_err(|e| Error::Storage(format!("Failed to read directory entry: {}", e)))?;
let path = entry.path();
if path.extension().and_then(|s| s.to_str()) != Some("json") {
continue;
}
// Try to load the conversation to extract metadata
match self.load_conversation(&path) {
Ok(conv) => {
sessions.push(SessionMeta {
path: path.clone(),
id: conv.id,
name: conv.name.clone(),
description: conv.description.clone(),
message_count: conv.messages.len(),
model: conv.model.clone(),
created_at: conv.created_at,
updated_at: conv.updated_at,
});
}
Err(_) => {
// Skip files that can't be parsed
continue;
}
}
}
// Sort by updated_at, most recent first
sessions.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
Ok(sessions)
}
/// Delete a saved session
pub fn delete_session(&self, path: impl AsRef<Path>) -> Result<()> {
fs::remove_file(path.as_ref())
.map_err(|e| Error::Storage(format!("Failed to delete session file: {}", e)))
}
/// Get the sessions directory path
pub fn sessions_dir(&self) -> &Path {
&self.sessions_dir
}
}
impl Default for StorageManager {
fn default() -> Self {
Self::new().expect("Failed to create default storage manager")
}
}
/// Sanitize a filename by removing invalid characters
fn sanitize_filename(name: &str) -> String {
name.chars()
.map(|c| {
if c.is_alphanumeric() || c == '_' || c == '-' {
c
} else if c.is_whitespace() {
'_'
} else {
'-'
}
})
.collect::<String>()
.chars()
.take(50) // Limit length
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
use crate::types::Message;
use tempfile::TempDir;
#[test]
fn test_platform_specific_default_path() {
let path = StorageManager::default_sessions_dir().unwrap();
// Verify it contains owlen/sessions
assert!(path.to_string_lossy().contains("owlen"));
assert!(path.to_string_lossy().contains("sessions"));
// Platform-specific checks
#[cfg(target_os = "linux")]
{
// Linux should use ~/.local/share/owlen/sessions
assert!(path.to_string_lossy().contains(".local/share"));
}
#[cfg(target_os = "windows")]
{
// Windows should use AppData
assert!(path.to_string_lossy().contains("AppData"));
}
#[cfg(target_os = "macos")]
{
// macOS should use ~/Library/Application Support
assert!(path
.to_string_lossy()
.contains("Library/Application Support"));
}
println!("Default sessions directory: {}", path.display());
}
#[test]
fn test_sanitize_filename() {
assert_eq!(sanitize_filename("Hello World"), "Hello_World");
assert_eq!(sanitize_filename("test/path\\file"), "test-path-file");
assert_eq!(sanitize_filename("file:name?"), "file-name-");
}
#[test]
fn test_save_and_load_conversation() {
let temp_dir = TempDir::new().unwrap();
let storage = StorageManager::with_directory(temp_dir.path().to_path_buf()).unwrap();
let mut conv = Conversation::new("test-model".to_string());
conv.messages.push(Message::user("Hello".to_string()));
conv.messages
.push(Message::assistant("Hi there!".to_string()));
// Save conversation
let path = storage
.save_conversation(&conv, Some("test_session".to_string()))
.unwrap();
assert!(path.exists());
// Load conversation
let loaded = storage.load_conversation(&path).unwrap();
assert_eq!(loaded.id, conv.id);
assert_eq!(loaded.model, conv.model);
assert_eq!(loaded.messages.len(), 2);
assert_eq!(loaded.name, Some("test_session".to_string()));
}
#[test]
fn test_list_sessions() {
let temp_dir = TempDir::new().unwrap();
let storage = StorageManager::with_directory(temp_dir.path().to_path_buf()).unwrap();
// Create multiple sessions
for i in 0..3 {
let mut conv = Conversation::new("test-model".to_string());
conv.messages.push(Message::user(format!("Message {}", i)));
storage
.save_conversation(&conv, Some(format!("session_{}", i)))
.unwrap();
}
// List sessions
let sessions = storage.list_sessions().unwrap();
assert_eq!(sessions.len(), 3);
// Check that sessions are sorted by updated_at (most recent first)
for i in 0..sessions.len() - 1 {
assert!(sessions[i].updated_at >= sessions[i + 1].updated_at);
}
}
#[test]
fn test_delete_session() {
let temp_dir = TempDir::new().unwrap();
let storage = StorageManager::with_directory(temp_dir.path().to_path_buf()).unwrap();
let conv = Conversation::new("test-model".to_string());
let path = storage.save_conversation(&conv, None).unwrap();
assert!(path.exists());
storage.delete_session(&path).unwrap();
assert!(!path.exists());
}
}

View File

@@ -0,0 +1,645 @@
//! Theming system for OWLEN TUI
//!
//! Provides customizable color schemes for all UI components.
use ratatui::style::Color;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fs;
use std::path::{Path, PathBuf};
/// A complete theme definition for OWLEN TUI
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Theme {
/// Name of the theme
pub name: String,
/// Default text color
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub text: Color,
/// Default background color
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub background: Color,
/// Border color for focused panels
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub focused_panel_border: Color,
/// Border color for unfocused panels
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub unfocused_panel_border: Color,
/// Color for user message role indicator
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub user_message_role: Color,
/// Color for assistant message role indicator
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub assistant_message_role: Color,
/// Color for thinking panel title
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub thinking_panel_title: Color,
/// Background color for command bar
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub command_bar_background: Color,
/// Status line background color
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub status_background: Color,
/// Color for Normal mode indicator
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub mode_normal: Color,
/// Color for Editing mode indicator
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub mode_editing: Color,
/// Color for Model Selection mode indicator
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub mode_model_selection: Color,
/// Color for Provider Selection mode indicator
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub mode_provider_selection: Color,
/// Color for Help mode indicator
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub mode_help: Color,
/// Color for Visual mode indicator
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub mode_visual: Color,
/// Color for Command mode indicator
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub mode_command: Color,
/// Selection/highlight background color
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub selection_bg: Color,
/// Selection/highlight foreground color
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub selection_fg: Color,
/// Cursor indicator color
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub cursor: Color,
/// Placeholder text color
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub placeholder: Color,
/// Warning/error message color
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub error: Color,
/// Success/info message color
#[serde(deserialize_with = "deserialize_color")]
#[serde(serialize_with = "serialize_color")]
pub info: Color,
}
impl Default for Theme {
fn default() -> Self {
default_dark()
}
}
/// Get the default themes directory path
pub fn default_themes_dir() -> PathBuf {
let config_dir = PathBuf::from(shellexpand::tilde(crate::config::DEFAULT_CONFIG_PATH).as_ref())
.parent()
.map(|p| p.to_path_buf())
.unwrap_or_else(|| PathBuf::from("~/.config/owlen"));
config_dir.join("themes")
}
/// Load all available themes (built-in + custom)
pub fn load_all_themes() -> HashMap<String, Theme> {
let mut themes = HashMap::new();
// Load built-in themes
for (name, theme) in built_in_themes() {
themes.insert(name, theme);
}
// Load custom themes from disk
let themes_dir = default_themes_dir();
if let Ok(entries) = fs::read_dir(&themes_dir) {
for entry in entries.flatten() {
let path = entry.path();
if path.extension().and_then(|s| s.to_str()) == Some("toml") {
let name = path
.file_stem()
.and_then(|s| s.to_str())
.unwrap_or("unknown")
.to_string();
match load_theme_from_file(&path) {
Ok(theme) => {
themes.insert(name.clone(), theme);
}
Err(e) => {
eprintln!("Warning: Failed to load custom theme '{}': {}", name, e);
}
}
}
}
}
themes
}
/// Load a theme from a TOML file
pub fn load_theme_from_file(path: &Path) -> Result<Theme, String> {
let content =
fs::read_to_string(path).map_err(|e| format!("Failed to read theme file: {}", e))?;
toml::from_str(&content).map_err(|e| format!("Failed to parse theme file: {}", e))
}
/// Get a theme by name (built-in or custom)
pub fn get_theme(name: &str) -> Option<Theme> {
load_all_themes().get(name).cloned()
}
/// Get all built-in themes (embedded in the binary)
pub fn built_in_themes() -> HashMap<String, Theme> {
let mut themes = HashMap::new();
// Load embedded theme files
let embedded_themes = [
(
"default_dark",
include_str!("../../../themes/default_dark.toml"),
),
(
"default_light",
include_str!("../../../themes/default_light.toml"),
),
("gruvbox", include_str!("../../../themes/gruvbox.toml")),
("dracula", include_str!("../../../themes/dracula.toml")),
("solarized", include_str!("../../../themes/solarized.toml")),
(
"midnight-ocean",
include_str!("../../../themes/midnight-ocean.toml"),
),
("rose-pine", include_str!("../../../themes/rose-pine.toml")),
("monokai", include_str!("../../../themes/monokai.toml")),
(
"material-dark",
include_str!("../../../themes/material-dark.toml"),
),
(
"material-light",
include_str!("../../../themes/material-light.toml"),
),
];
for (name, content) in embedded_themes {
match toml::from_str::<Theme>(content) {
Ok(theme) => {
themes.insert(name.to_string(), theme);
}
Err(e) => {
eprintln!("Warning: Failed to parse built-in theme '{}': {}", name, e);
// Fallback to hardcoded version if parsing fails
if let Some(fallback) = get_fallback_theme(name) {
themes.insert(name.to_string(), fallback);
}
}
}
}
themes
}
/// Get fallback hardcoded theme (used if embedded TOML fails to parse)
fn get_fallback_theme(name: &str) -> Option<Theme> {
match name {
"default_dark" => Some(default_dark()),
"default_light" => Some(default_light()),
"gruvbox" => Some(gruvbox()),
"dracula" => Some(dracula()),
"solarized" => Some(solarized()),
"midnight-ocean" => Some(midnight_ocean()),
"rose-pine" => Some(rose_pine()),
"monokai" => Some(monokai()),
"material-dark" => Some(material_dark()),
"material-light" => Some(material_light()),
_ => None,
}
}
/// Default dark theme
fn default_dark() -> Theme {
Theme {
name: "default_dark".to_string(),
text: Color::White,
background: Color::Black,
focused_panel_border: Color::LightMagenta,
unfocused_panel_border: Color::Rgb(95, 20, 135),
user_message_role: Color::LightBlue,
assistant_message_role: Color::Yellow,
thinking_panel_title: Color::LightMagenta,
command_bar_background: Color::Black,
status_background: Color::Black,
mode_normal: Color::LightBlue,
mode_editing: Color::LightGreen,
mode_model_selection: Color::LightYellow,
mode_provider_selection: Color::LightCyan,
mode_help: Color::LightMagenta,
mode_visual: Color::Magenta,
mode_command: Color::Yellow,
selection_bg: Color::LightBlue,
selection_fg: Color::Black,
cursor: Color::Magenta,
placeholder: Color::DarkGray,
error: Color::Red,
info: Color::LightGreen,
}
}
/// Default light theme
fn default_light() -> Theme {
Theme {
name: "default_light".to_string(),
text: Color::Black,
background: Color::White,
focused_panel_border: Color::Rgb(74, 144, 226),
unfocused_panel_border: Color::Rgb(221, 221, 221),
user_message_role: Color::Rgb(0, 85, 164),
assistant_message_role: Color::Rgb(142, 68, 173),
thinking_panel_title: Color::Rgb(142, 68, 173),
command_bar_background: Color::White,
status_background: Color::White,
mode_normal: Color::Rgb(0, 85, 164),
mode_editing: Color::Rgb(46, 139, 87),
mode_model_selection: Color::Rgb(181, 137, 0),
mode_provider_selection: Color::Rgb(0, 139, 139),
mode_help: Color::Rgb(142, 68, 173),
mode_visual: Color::Rgb(142, 68, 173),
mode_command: Color::Rgb(181, 137, 0),
selection_bg: Color::Rgb(164, 200, 240),
selection_fg: Color::Black,
cursor: Color::Rgb(217, 95, 2),
placeholder: Color::Gray,
error: Color::Rgb(192, 57, 43),
info: Color::Green,
}
}
/// Gruvbox theme
fn gruvbox() -> Theme {
Theme {
name: "gruvbox".to_string(),
text: Color::Rgb(235, 219, 178), // #ebdbb2
background: Color::Rgb(40, 40, 40), // #282828
focused_panel_border: Color::Rgb(254, 128, 25), // #fe8019 (orange)
unfocused_panel_border: Color::Rgb(124, 111, 100), // #7c6f64
user_message_role: Color::Rgb(184, 187, 38), // #b8bb26 (green)
assistant_message_role: Color::Rgb(131, 165, 152), // #83a598 (blue)
thinking_panel_title: Color::Rgb(211, 134, 155), // #d3869b (purple)
command_bar_background: Color::Rgb(60, 56, 54), // #3c3836
status_background: Color::Rgb(60, 56, 54),
mode_normal: Color::Rgb(131, 165, 152), // blue
mode_editing: Color::Rgb(184, 187, 38), // green
mode_model_selection: Color::Rgb(250, 189, 47), // yellow
mode_provider_selection: Color::Rgb(142, 192, 124), // aqua
mode_help: Color::Rgb(211, 134, 155), // purple
mode_visual: Color::Rgb(254, 128, 25), // orange
mode_command: Color::Rgb(250, 189, 47), // yellow
selection_bg: Color::Rgb(80, 73, 69),
selection_fg: Color::Rgb(235, 219, 178),
cursor: Color::Rgb(254, 128, 25),
placeholder: Color::Rgb(102, 92, 84),
error: Color::Rgb(251, 73, 52), // #fb4934
info: Color::Rgb(184, 187, 38),
}
}
/// Dracula theme
fn dracula() -> Theme {
Theme {
name: "dracula".to_string(),
text: Color::Rgb(248, 248, 242), // #f8f8f2
background: Color::Rgb(40, 42, 54), // #282a36
focused_panel_border: Color::Rgb(255, 121, 198), // #ff79c6 (pink)
unfocused_panel_border: Color::Rgb(68, 71, 90), // #44475a
user_message_role: Color::Rgb(139, 233, 253), // #8be9fd (cyan)
assistant_message_role: Color::Rgb(255, 121, 198), // #ff79c6 (pink)
thinking_panel_title: Color::Rgb(189, 147, 249), // #bd93f9 (purple)
command_bar_background: Color::Rgb(68, 71, 90),
status_background: Color::Rgb(68, 71, 90),
mode_normal: Color::Rgb(139, 233, 253),
mode_editing: Color::Rgb(80, 250, 123), // #50fa7b (green)
mode_model_selection: Color::Rgb(241, 250, 140), // #f1fa8c (yellow)
mode_provider_selection: Color::Rgb(139, 233, 253),
mode_help: Color::Rgb(189, 147, 249),
mode_visual: Color::Rgb(255, 121, 198),
mode_command: Color::Rgb(241, 250, 140),
selection_bg: Color::Rgb(68, 71, 90),
selection_fg: Color::Rgb(248, 248, 242),
cursor: Color::Rgb(255, 121, 198),
placeholder: Color::Rgb(98, 114, 164),
error: Color::Rgb(255, 85, 85), // #ff5555
info: Color::Rgb(80, 250, 123),
}
}
/// Solarized Dark theme
fn solarized() -> Theme {
Theme {
name: "solarized".to_string(),
text: Color::Rgb(131, 148, 150), // #839496 (base0)
background: Color::Rgb(0, 43, 54), // #002b36 (base03)
focused_panel_border: Color::Rgb(38, 139, 210), // #268bd2 (blue)
unfocused_panel_border: Color::Rgb(7, 54, 66), // #073642 (base02)
user_message_role: Color::Rgb(42, 161, 152), // #2aa198 (cyan)
assistant_message_role: Color::Rgb(203, 75, 22), // #cb4b16 (orange)
thinking_panel_title: Color::Rgb(108, 113, 196), // #6c71c4 (violet)
command_bar_background: Color::Rgb(7, 54, 66),
status_background: Color::Rgb(7, 54, 66),
mode_normal: Color::Rgb(38, 139, 210), // blue
mode_editing: Color::Rgb(133, 153, 0), // #859900 (green)
mode_model_selection: Color::Rgb(181, 137, 0), // #b58900 (yellow)
mode_provider_selection: Color::Rgb(42, 161, 152), // cyan
mode_help: Color::Rgb(108, 113, 196), // violet
mode_visual: Color::Rgb(211, 54, 130), // #d33682 (magenta)
mode_command: Color::Rgb(181, 137, 0), // yellow
selection_bg: Color::Rgb(7, 54, 66),
selection_fg: Color::Rgb(147, 161, 161),
cursor: Color::Rgb(211, 54, 130),
placeholder: Color::Rgb(88, 110, 117),
error: Color::Rgb(220, 50, 47), // #dc322f (red)
info: Color::Rgb(133, 153, 0),
}
}
/// Midnight Ocean theme
fn midnight_ocean() -> Theme {
Theme {
name: "midnight-ocean".to_string(),
text: Color::Rgb(192, 202, 245),
background: Color::Rgb(13, 17, 23),
focused_panel_border: Color::Rgb(88, 166, 255),
unfocused_panel_border: Color::Rgb(48, 54, 61),
user_message_role: Color::Rgb(121, 192, 255),
assistant_message_role: Color::Rgb(137, 221, 255),
thinking_panel_title: Color::Rgb(158, 206, 106),
command_bar_background: Color::Rgb(22, 27, 34),
status_background: Color::Rgb(22, 27, 34),
mode_normal: Color::Rgb(121, 192, 255),
mode_editing: Color::Rgb(158, 206, 106),
mode_model_selection: Color::Rgb(255, 212, 59),
mode_provider_selection: Color::Rgb(137, 221, 255),
mode_help: Color::Rgb(255, 115, 157),
mode_visual: Color::Rgb(246, 140, 245),
mode_command: Color::Rgb(255, 212, 59),
selection_bg: Color::Rgb(56, 139, 253),
selection_fg: Color::Rgb(13, 17, 23),
cursor: Color::Rgb(246, 140, 245),
placeholder: Color::Rgb(110, 118, 129),
error: Color::Rgb(248, 81, 73),
info: Color::Rgb(158, 206, 106),
}
}
/// Rose Pine theme
fn rose_pine() -> Theme {
Theme {
name: "rose-pine".to_string(),
text: Color::Rgb(224, 222, 244), // #e0def4
background: Color::Rgb(25, 23, 36), // #191724
focused_panel_border: Color::Rgb(235, 111, 146), // #eb6f92 (love)
unfocused_panel_border: Color::Rgb(38, 35, 58), // #26233a
user_message_role: Color::Rgb(49, 116, 143), // #31748f (foam)
assistant_message_role: Color::Rgb(156, 207, 216), // #9ccfd8 (foam light)
thinking_panel_title: Color::Rgb(196, 167, 231), // #c4a7e7 (iris)
command_bar_background: Color::Rgb(38, 35, 58),
status_background: Color::Rgb(38, 35, 58),
mode_normal: Color::Rgb(156, 207, 216),
mode_editing: Color::Rgb(235, 188, 186), // #ebbcba (rose)
mode_model_selection: Color::Rgb(246, 193, 119),
mode_provider_selection: Color::Rgb(49, 116, 143),
mode_help: Color::Rgb(196, 167, 231),
mode_visual: Color::Rgb(235, 111, 146),
mode_command: Color::Rgb(246, 193, 119),
selection_bg: Color::Rgb(64, 61, 82),
selection_fg: Color::Rgb(224, 222, 244),
cursor: Color::Rgb(235, 111, 146),
placeholder: Color::Rgb(110, 106, 134),
error: Color::Rgb(235, 111, 146),
info: Color::Rgb(156, 207, 216),
}
}
/// Monokai theme
fn monokai() -> Theme {
Theme {
name: "monokai".to_string(),
text: Color::Rgb(248, 248, 242), // #f8f8f2
background: Color::Rgb(39, 40, 34), // #272822
focused_panel_border: Color::Rgb(249, 38, 114), // #f92672 (pink)
unfocused_panel_border: Color::Rgb(117, 113, 94), // #75715e
user_message_role: Color::Rgb(102, 217, 239), // #66d9ef (cyan)
assistant_message_role: Color::Rgb(174, 129, 255), // #ae81ff (purple)
thinking_panel_title: Color::Rgb(230, 219, 116), // #e6db74 (yellow)
command_bar_background: Color::Rgb(39, 40, 34),
status_background: Color::Rgb(39, 40, 34),
mode_normal: Color::Rgb(102, 217, 239),
mode_editing: Color::Rgb(166, 226, 46), // #a6e22e (green)
mode_model_selection: Color::Rgb(230, 219, 116),
mode_provider_selection: Color::Rgb(102, 217, 239),
mode_help: Color::Rgb(174, 129, 255),
mode_visual: Color::Rgb(249, 38, 114),
mode_command: Color::Rgb(230, 219, 116),
selection_bg: Color::Rgb(117, 113, 94),
selection_fg: Color::Rgb(248, 248, 242),
cursor: Color::Rgb(249, 38, 114),
placeholder: Color::Rgb(117, 113, 94),
error: Color::Rgb(249, 38, 114),
info: Color::Rgb(166, 226, 46),
}
}
/// Material Dark theme
fn material_dark() -> Theme {
Theme {
name: "material-dark".to_string(),
text: Color::Rgb(238, 255, 255), // #eeffff
background: Color::Rgb(38, 50, 56), // #263238
focused_panel_border: Color::Rgb(128, 203, 196), // #80cbc4 (cyan)
unfocused_panel_border: Color::Rgb(84, 110, 122), // #546e7a
user_message_role: Color::Rgb(130, 170, 255), // #82aaff (blue)
assistant_message_role: Color::Rgb(199, 146, 234), // #c792ea (purple)
thinking_panel_title: Color::Rgb(255, 203, 107), // #ffcb6b (yellow)
command_bar_background: Color::Rgb(33, 43, 48),
status_background: Color::Rgb(33, 43, 48),
mode_normal: Color::Rgb(130, 170, 255),
mode_editing: Color::Rgb(195, 232, 141), // #c3e88d (green)
mode_model_selection: Color::Rgb(255, 203, 107),
mode_provider_selection: Color::Rgb(128, 203, 196),
mode_help: Color::Rgb(199, 146, 234),
mode_visual: Color::Rgb(240, 113, 120), // #f07178 (red)
mode_command: Color::Rgb(255, 203, 107),
selection_bg: Color::Rgb(84, 110, 122),
selection_fg: Color::Rgb(238, 255, 255),
cursor: Color::Rgb(255, 204, 0),
placeholder: Color::Rgb(84, 110, 122),
error: Color::Rgb(240, 113, 120),
info: Color::Rgb(195, 232, 141),
}
}
/// Material Light theme
fn material_light() -> Theme {
Theme {
name: "material-light".to_string(),
text: Color::Rgb(33, 33, 33),
background: Color::Rgb(236, 239, 241),
focused_panel_border: Color::Rgb(0, 150, 136),
unfocused_panel_border: Color::Rgb(176, 190, 197),
user_message_role: Color::Rgb(68, 138, 255),
assistant_message_role: Color::Rgb(124, 77, 255),
thinking_panel_title: Color::Rgb(245, 124, 0),
command_bar_background: Color::Rgb(255, 255, 255),
status_background: Color::Rgb(255, 255, 255),
mode_normal: Color::Rgb(68, 138, 255),
mode_editing: Color::Rgb(56, 142, 60),
mode_model_selection: Color::Rgb(245, 124, 0),
mode_provider_selection: Color::Rgb(0, 150, 136),
mode_help: Color::Rgb(124, 77, 255),
mode_visual: Color::Rgb(211, 47, 47),
mode_command: Color::Rgb(245, 124, 0),
selection_bg: Color::Rgb(176, 190, 197),
selection_fg: Color::Rgb(33, 33, 33),
cursor: Color::Rgb(194, 24, 91),
placeholder: Color::Rgb(144, 164, 174),
error: Color::Rgb(211, 47, 47),
info: Color::Rgb(56, 142, 60),
}
}
// Helper functions for color serialization/deserialization
fn deserialize_color<'de, D>(deserializer: D) -> Result<Color, D::Error>
where
D: serde::Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
parse_color(&s).map_err(serde::de::Error::custom)
}
fn serialize_color<S>(color: &Color, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let s = color_to_string(color);
serializer.serialize_str(&s)
}
fn parse_color(s: &str) -> Result<Color, String> {
if let Some(hex) = s.strip_prefix('#') {
if hex.len() == 6 {
let r = u8::from_str_radix(&hex[0..2], 16)
.map_err(|_| format!("Invalid hex color: {}", s))?;
let g = u8::from_str_radix(&hex[2..4], 16)
.map_err(|_| format!("Invalid hex color: {}", s))?;
let b = u8::from_str_radix(&hex[4..6], 16)
.map_err(|_| format!("Invalid hex color: {}", s))?;
return Ok(Color::Rgb(r, g, b));
}
}
// Try named colors
match s.to_lowercase().as_str() {
"black" => Ok(Color::Black),
"red" => Ok(Color::Red),
"green" => Ok(Color::Green),
"yellow" => Ok(Color::Yellow),
"blue" => Ok(Color::Blue),
"magenta" => Ok(Color::Magenta),
"cyan" => Ok(Color::Cyan),
"gray" | "grey" => Ok(Color::Gray),
"darkgray" | "darkgrey" => Ok(Color::DarkGray),
"lightred" => Ok(Color::LightRed),
"lightgreen" => Ok(Color::LightGreen),
"lightyellow" => Ok(Color::LightYellow),
"lightblue" => Ok(Color::LightBlue),
"lightmagenta" => Ok(Color::LightMagenta),
"lightcyan" => Ok(Color::LightCyan),
"white" => Ok(Color::White),
_ => Err(format!("Unknown color: {}", s)),
}
}
fn color_to_string(color: &Color) -> String {
match color {
Color::Black => "black".to_string(),
Color::Red => "red".to_string(),
Color::Green => "green".to_string(),
Color::Yellow => "yellow".to_string(),
Color::Blue => "blue".to_string(),
Color::Magenta => "magenta".to_string(),
Color::Cyan => "cyan".to_string(),
Color::Gray => "gray".to_string(),
Color::DarkGray => "darkgray".to_string(),
Color::LightRed => "lightred".to_string(),
Color::LightGreen => "lightgreen".to_string(),
Color::LightYellow => "lightyellow".to_string(),
Color::LightBlue => "lightblue".to_string(),
Color::LightMagenta => "lightmagenta".to_string(),
Color::LightCyan => "lightcyan".to_string(),
Color::White => "white".to_string(),
Color::Rgb(r, g, b) => format!("#{:02x}{:02x}{:02x}", r, g, b),
_ => "#ffffff".to_string(),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_color_parsing() {
assert!(matches!(parse_color("#ff0000"), Ok(Color::Rgb(255, 0, 0))));
assert!(matches!(parse_color("red"), Ok(Color::Red)));
assert!(matches!(parse_color("lightblue"), Ok(Color::LightBlue)));
}
#[test]
fn test_built_in_themes() {
let themes = built_in_themes();
assert!(themes.contains_key("default_dark"));
assert!(themes.contains_key("gruvbox"));
assert!(themes.contains_key("dracula"));
}
}

View File

@@ -0,0 +1,197 @@
//! Core types used across OWLEN
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fmt;
use uuid::Uuid;
/// A message in a conversation
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct Message {
/// Unique identifier for this message
pub id: Uuid,
/// Role of the message sender (user, assistant, system)
pub role: Role,
/// Content of the message
pub content: String,
/// Optional metadata
pub metadata: HashMap<String, serde_json::Value>,
/// Timestamp when the message was created
pub timestamp: std::time::SystemTime,
}
/// Role of a message sender
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum Role {
/// Message from the user
User,
/// Message from the AI assistant
Assistant,
/// System message (prompts, context, etc.)
System,
}
impl fmt::Display for Role {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let label = match self {
Role::User => "user",
Role::Assistant => "assistant",
Role::System => "system",
};
f.write_str(label)
}
}
/// A conversation containing multiple messages
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Conversation {
/// Unique identifier for this conversation
pub id: Uuid,
/// Optional name/title for the conversation
pub name: Option<String>,
/// Optional AI-generated description of the conversation
#[serde(default)]
pub description: Option<String>,
/// Messages in chronological order
pub messages: Vec<Message>,
/// Model used for this conversation
pub model: String,
/// When the conversation was created
pub created_at: std::time::SystemTime,
/// When the conversation was last updated
pub updated_at: std::time::SystemTime,
}
/// Configuration for a chat completion request
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ChatRequest {
/// The model to use for completion
pub model: String,
/// The conversation messages
pub messages: Vec<Message>,
/// Optional parameters for the request
pub parameters: ChatParameters,
}
/// Parameters for chat completion
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct ChatParameters {
/// Temperature for randomness (0.0 to 2.0)
#[serde(skip_serializing_if = "Option::is_none")]
pub temperature: Option<f32>,
/// Maximum tokens to generate
#[serde(skip_serializing_if = "Option::is_none")]
pub max_tokens: Option<u32>,
/// Whether to stream the response
#[serde(default)]
pub stream: bool,
/// Additional provider-specific parameters
#[serde(flatten)]
#[serde(default)]
pub extra: HashMap<String, serde_json::Value>,
}
/// Response from a chat completion request
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ChatResponse {
/// The generated message
pub message: Message,
/// Token usage information
pub usage: Option<TokenUsage>,
/// Whether this is a streaming chunk
#[serde(default)]
pub is_streaming: bool,
/// Whether this is the final chunk in a stream
#[serde(default)]
pub is_final: bool,
}
/// Token usage information
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TokenUsage {
/// Tokens in the prompt
pub prompt_tokens: u32,
/// Tokens in the completion
pub completion_tokens: u32,
/// Total tokens used
pub total_tokens: u32,
}
/// Information about an available model
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ModelInfo {
/// Model identifier
pub id: String,
/// Human-readable name
pub name: String,
/// Model description
pub description: Option<String>,
/// Provider that hosts this model
pub provider: String,
/// Context window size
pub context_window: Option<u32>,
/// Additional capabilities
pub capabilities: Vec<String>,
}
impl Message {
/// Create a new message
pub fn new(role: Role, content: String) -> Self {
Self {
id: Uuid::new_v4(),
role,
content,
metadata: HashMap::new(),
timestamp: std::time::SystemTime::now(),
}
}
/// Create a user message
pub fn user(content: String) -> Self {
Self::new(Role::User, content)
}
/// Create an assistant message
pub fn assistant(content: String) -> Self {
Self::new(Role::Assistant, content)
}
/// Create a system message
pub fn system(content: String) -> Self {
Self::new(Role::System, content)
}
}
impl Conversation {
/// Create a new conversation
pub fn new(model: String) -> Self {
let now = std::time::SystemTime::now();
Self {
id: Uuid::new_v4(),
name: None,
description: None,
messages: Vec::new(),
model,
created_at: now,
updated_at: now,
}
}
/// Add a message to the conversation
pub fn add_message(&mut self, message: Message) {
self.messages.push(message);
self.updated_at = std::time::SystemTime::now();
}
/// Get the last message in the conversation
pub fn last_message(&self) -> Option<&Message> {
self.messages.last()
}
/// Clear all messages
pub fn clear(&mut self) {
self.messages.clear();
self.updated_at = std::time::SystemTime::now();
}
}

423
crates/owlen-core/src/ui.rs Normal file
View File

@@ -0,0 +1,423 @@
//! Shared UI components and state management for TUI applications
//!
//! This module contains reusable UI components that can be shared between
//! different TUI applications (chat, code, etc.)
use std::fmt;
/// Application state
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AppState {
Running,
Quit,
}
/// Input modes for TUI applications
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum InputMode {
Normal,
Editing,
ProviderSelection,
ModelSelection,
Help,
Visual,
Command,
SessionBrowser,
ThemeBrowser,
}
impl fmt::Display for InputMode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let label = match self {
InputMode::Normal => "Normal",
InputMode::Editing => "Editing",
InputMode::ModelSelection => "Model",
InputMode::ProviderSelection => "Provider",
InputMode::Help => "Help",
InputMode::Visual => "Visual",
InputMode::Command => "Command",
InputMode::SessionBrowser => "Sessions",
InputMode::ThemeBrowser => "Themes",
};
f.write_str(label)
}
}
/// Represents which panel is currently focused
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum FocusedPanel {
Chat,
Thinking,
Input,
}
/// Auto-scroll state manager for scrollable panels
#[derive(Debug, Clone)]
pub struct AutoScroll {
pub scroll: usize,
pub content_len: usize,
pub stick_to_bottom: bool,
}
impl Default for AutoScroll {
fn default() -> Self {
Self {
scroll: 0,
content_len: 0,
stick_to_bottom: true,
}
}
}
impl AutoScroll {
/// Update scroll position based on viewport height
pub fn on_viewport(&mut self, viewport_h: usize) {
let max = self.content_len.saturating_sub(viewport_h);
if self.stick_to_bottom {
self.scroll = max;
} else {
self.scroll = self.scroll.min(max);
}
}
/// Handle user scroll input
pub fn on_user_scroll(&mut self, delta: isize, viewport_h: usize) {
let max = self.content_len.saturating_sub(viewport_h) as isize;
let s = (self.scroll as isize + delta).clamp(0, max) as usize;
self.scroll = s;
self.stick_to_bottom = s as isize == max;
}
/// Scroll down half page
pub fn scroll_half_page_down(&mut self, viewport_h: usize) {
let delta = (viewport_h / 2) as isize;
self.on_user_scroll(delta, viewport_h);
}
/// Scroll up half page
pub fn scroll_half_page_up(&mut self, viewport_h: usize) {
let delta = -((viewport_h / 2) as isize);
self.on_user_scroll(delta, viewport_h);
}
/// Scroll down full page
pub fn scroll_full_page_down(&mut self, viewport_h: usize) {
let delta = viewport_h as isize;
self.on_user_scroll(delta, viewport_h);
}
/// Scroll up full page
pub fn scroll_full_page_up(&mut self, viewport_h: usize) {
let delta = -(viewport_h as isize);
self.on_user_scroll(delta, viewport_h);
}
/// Jump to top
pub fn jump_to_top(&mut self) {
self.scroll = 0;
self.stick_to_bottom = false;
}
/// Jump to bottom
pub fn jump_to_bottom(&mut self, viewport_h: usize) {
self.stick_to_bottom = true;
self.on_viewport(viewport_h);
}
}
/// Visual selection state for text selection
#[derive(Debug, Clone, Default)]
pub struct VisualSelection {
pub start: Option<(usize, usize)>, // (row, col)
pub end: Option<(usize, usize)>, // (row, col)
}
impl VisualSelection {
pub fn new() -> Self {
Self::default()
}
pub fn start_at(&mut self, pos: (usize, usize)) {
self.start = Some(pos);
self.end = Some(pos);
}
pub fn extend_to(&mut self, pos: (usize, usize)) {
self.end = Some(pos);
}
pub fn clear(&mut self) {
self.start = None;
self.end = None;
}
pub fn is_active(&self) -> bool {
self.start.is_some() && self.end.is_some()
}
pub fn get_normalized(&self) -> Option<((usize, usize), (usize, usize))> {
if let (Some(s), Some(e)) = (self.start, self.end) {
// Normalize selection so start is always before end
if s.0 < e.0 || (s.0 == e.0 && s.1 <= e.1) {
Some((s, e))
} else {
Some((e, s))
}
} else {
None
}
}
}
/// Extract text from a selection range in a list of lines
pub fn extract_text_from_selection(
lines: &[String],
start: (usize, usize),
end: (usize, usize),
) -> Option<String> {
if lines.is_empty() || start.0 >= lines.len() {
return None;
}
let start_row = start.0;
let start_col = start.1;
let end_row = end.0.min(lines.len() - 1);
let end_col = end.1;
if start_row == end_row {
// Single line selection
let line = &lines[start_row];
let chars: Vec<char> = line.chars().collect();
let start_c = start_col.min(chars.len());
let end_c = end_col.min(chars.len());
if start_c >= end_c {
return None;
}
let selected: String = chars[start_c..end_c].iter().collect();
Some(selected)
} else {
// Multi-line selection
let mut result = Vec::new();
// First line: from start_col to end
let first_line = &lines[start_row];
let first_chars: Vec<char> = first_line.chars().collect();
let start_c = start_col.min(first_chars.len());
if start_c < first_chars.len() {
result.push(first_chars[start_c..].iter().collect::<String>());
}
// Middle lines: entire lines
for row in (start_row + 1)..end_row {
if row < lines.len() {
result.push(lines[row].clone());
}
}
// Last line: from start to end_col
if end_row < lines.len() && end_row > start_row {
let last_line = &lines[end_row];
let last_chars: Vec<char> = last_line.chars().collect();
let end_c = end_col.min(last_chars.len());
if end_c > 0 {
result.push(last_chars[..end_c].iter().collect::<String>());
}
}
if result.is_empty() {
None
} else {
Some(result.join("\n"))
}
}
}
/// Cursor position for navigating scrollable content
#[derive(Debug, Clone, Copy, Default)]
pub struct CursorPosition {
pub row: usize,
pub col: usize,
}
impl CursorPosition {
pub fn new(row: usize, col: usize) -> Self {
Self { row, col }
}
pub fn move_up(&mut self, amount: usize) {
self.row = self.row.saturating_sub(amount);
}
pub fn move_down(&mut self, amount: usize, max: usize) {
self.row = (self.row + amount).min(max);
}
pub fn move_left(&mut self, amount: usize) {
self.col = self.col.saturating_sub(amount);
}
pub fn move_right(&mut self, amount: usize, max: usize) {
self.col = (self.col + amount).min(max);
}
pub fn as_tuple(&self) -> (usize, usize) {
(self.row, self.col)
}
}
/// Word boundary detection for navigation
pub fn find_next_word_boundary(line: &str, col: usize) -> Option<usize> {
let chars: Vec<char> = line.chars().collect();
if col >= chars.len() {
return Some(chars.len());
}
let mut pos = col;
let is_word_char = |c: char| c.is_alphanumeric() || c == '_';
// Skip current word
if is_word_char(chars[pos]) {
while pos < chars.len() && is_word_char(chars[pos]) {
pos += 1;
}
} else {
// Skip non-word characters
while pos < chars.len() && !is_word_char(chars[pos]) {
pos += 1;
}
}
Some(pos)
}
pub fn find_word_end(line: &str, col: usize) -> Option<usize> {
let chars: Vec<char> = line.chars().collect();
if col >= chars.len() {
return Some(chars.len());
}
let mut pos = col;
let is_word_char = |c: char| c.is_alphanumeric() || c == '_';
// If on a word character, move to end of current word
if is_word_char(chars[pos]) {
while pos < chars.len() && is_word_char(chars[pos]) {
pos += 1;
}
// Move back one to be ON the last character
pos = pos.saturating_sub(1);
} else {
// Skip non-word characters
while pos < chars.len() && !is_word_char(chars[pos]) {
pos += 1;
}
// Now on first char of next word, move to its end
while pos < chars.len() && is_word_char(chars[pos]) {
pos += 1;
}
pos = pos.saturating_sub(1);
}
Some(pos)
}
pub fn find_prev_word_boundary(line: &str, col: usize) -> Option<usize> {
let chars: Vec<char> = line.chars().collect();
if col == 0 || chars.is_empty() {
return Some(0);
}
let mut pos = col.min(chars.len());
let is_word_char = |c: char| c.is_alphanumeric() || c == '_';
// Move back one position first
pos = pos.saturating_sub(1);
// Skip non-word characters
while pos > 0 && !is_word_char(chars[pos]) {
pos -= 1;
}
// Skip word characters to find start of word
while pos > 0 && is_word_char(chars[pos - 1]) {
pos -= 1;
}
Some(pos)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_auto_scroll() {
let mut scroll = AutoScroll::default();
scroll.content_len = 100;
// Test on_viewport with stick_to_bottom
scroll.on_viewport(10);
assert_eq!(scroll.scroll, 90);
// Test user scroll up
scroll.on_user_scroll(-10, 10);
assert_eq!(scroll.scroll, 80);
assert!(!scroll.stick_to_bottom);
// Test jump to bottom
scroll.jump_to_bottom(10);
assert!(scroll.stick_to_bottom);
assert_eq!(scroll.scroll, 90);
}
#[test]
fn test_visual_selection() {
let mut selection = VisualSelection::new();
assert!(!selection.is_active());
selection.start_at((0, 0));
assert!(selection.is_active());
selection.extend_to((2, 5));
let normalized = selection.get_normalized();
assert_eq!(normalized, Some(((0, 0), (2, 5))));
selection.clear();
assert!(!selection.is_active());
}
#[test]
fn test_extract_text_single_line() {
let lines = vec!["Hello World".to_string()];
let result = extract_text_from_selection(&lines, (0, 0), (0, 5));
assert_eq!(result, Some("Hello".to_string()));
}
#[test]
fn test_extract_text_multi_line() {
let lines = vec![
"First line".to_string(),
"Second line".to_string(),
"Third line".to_string(),
];
let result = extract_text_from_selection(&lines, (0, 6), (2, 5));
assert_eq!(result, Some("line\nSecond line\nThird".to_string()));
}
#[test]
fn test_word_boundaries() {
let line = "hello world test";
assert_eq!(find_next_word_boundary(line, 0), Some(5));
assert_eq!(find_next_word_boundary(line, 5), Some(6));
assert_eq!(find_next_word_boundary(line, 6), Some(11));
assert_eq!(find_prev_word_boundary(line, 16), Some(12));
assert_eq!(find_prev_word_boundary(line, 11), Some(6));
assert_eq!(find_prev_word_boundary(line, 6), Some(0));
}
}

View File

@@ -0,0 +1,90 @@
#![allow(clippy::cast_possible_truncation)]
use unicode_segmentation::UnicodeSegmentation;
use unicode_width::UnicodeWidthStr;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct ScreenPos {
pub row: u16,
pub col: u16,
}
pub fn build_cursor_map(text: &str, width: u16) -> Vec<ScreenPos> {
assert!(width > 0);
let width = width as usize;
let mut pos_map = vec![ScreenPos { row: 0, col: 0 }; text.len() + 1];
let mut row = 0;
let mut col = 0;
let mut word_start_idx = 0;
let mut word_start_col = 0;
for (byte_offset, grapheme) in text.grapheme_indices(true) {
let grapheme_width = UnicodeWidthStr::width(grapheme);
if grapheme == "\n" {
row += 1;
col = 0;
word_start_col = 0;
word_start_idx = byte_offset + grapheme.len();
// Set position for the end of this grapheme and any intermediate bytes
let end_pos = ScreenPos {
row: row as u16,
col: col as u16,
};
for i in 1..=grapheme.len() {
if byte_offset + i < pos_map.len() {
pos_map[byte_offset + i] = end_pos;
}
}
continue;
}
if grapheme.chars().all(char::is_whitespace) {
if col + grapheme_width > width {
// Whitespace causes wrap
row += 1;
col = 1; // Position after wrapping space
word_start_col = 1;
word_start_idx = byte_offset + grapheme.len();
} else {
col += grapheme_width;
word_start_col = col;
word_start_idx = byte_offset + grapheme.len();
}
} else if col + grapheme_width > width {
if word_start_col > 0 && byte_offset == word_start_idx {
// This is the first character of a new word that won't fit, wrap it
row += 1;
col = grapheme_width;
} else if word_start_col == 0 {
// No previous word boundary, hard break
row += 1;
col = grapheme_width;
} else {
// This is part of a word already on the line, let it extend beyond width
col += grapheme_width;
}
} else {
col += grapheme_width;
}
// Set position for the end of this grapheme and any intermediate bytes
let end_pos = ScreenPos {
row: row as u16,
col: col as u16,
};
for i in 1..=grapheme.len() {
if byte_offset + i < pos_map.len() {
pos_map[byte_offset + i] = end_pos;
}
}
}
pos_map
}
pub fn byte_to_screen_pos(text: &str, byte_idx: usize, width: u16) -> ScreenPos {
let pos_map = build_cursor_map(text, width);
pos_map[byte_idx.min(text.len())]
}

View File

@@ -0,0 +1,115 @@
use owlen_core::wrap_cursor::build_cursor_map;
#[test]
fn debug_long_word_wrapping() {
// Test the exact scenario from the user's issue
let text = "asdnklasdnaklsdnkalsdnaskldaskldnaskldnaskldnaskldnaskldnaskldnaskld asdnklska dnskadl dasnksdl asdn";
let width = 50; // Approximate width from the user's example
println!("Testing long word text with width {}", width);
println!("Text: '{}'", text);
// Check what the cursor map shows
let cursor_map = build_cursor_map(text, width);
println!("\nCursor map for key positions:");
let long_word_end = text.find(' ').unwrap_or(text.len());
for i in [
0,
10,
20,
30,
40,
50,
60,
70,
long_word_end,
long_word_end + 1,
text.len(),
] {
if i <= text.len() {
let pos = cursor_map[i];
let char_at = if i < text.len() {
format!("'{}'", text.chars().nth(i).unwrap_or('?'))
} else {
"END".to_string()
};
println!(
" Byte {}: {} -> row {}, col {}",
i, char_at, pos.row, pos.col
);
}
}
// Test what my formatting function produces
let lines = format_text_with_word_wrap_debug(text, width);
println!("\nFormatted lines:");
for (i, line) in lines.iter().enumerate() {
println!(" Line {}: '{}' (length: {})", i, line, line.len());
}
// The long word should be broken up, not kept on one line
assert!(
lines[0].len() <= width as usize + 5,
"First line is too long: {} chars",
lines[0].len()
);
}
fn format_text_with_word_wrap_debug(text: &str, width: u16) -> Vec<String> {
if text.is_empty() {
return vec!["".to_string()];
}
// Use the cursor map to determine where line breaks should occur
let cursor_map = build_cursor_map(text, width);
let mut lines = Vec::new();
let mut current_line = String::new();
let mut current_row = 0;
for (byte_idx, ch) in text.char_indices() {
let pos_before = if byte_idx > 0 {
cursor_map[byte_idx]
} else {
cursor_map[0]
};
let pos_after = cursor_map[byte_idx + ch.len_utf8()];
println!(
"Processing '{}' at byte {}: before=({},{}) after=({},{})",
ch, byte_idx, pos_before.row, pos_before.col, pos_after.row, pos_after.col
);
// If the row changed, we need to start a new line
if pos_after.row > current_row {
println!(
" Row changed from {} to {}! Finishing line: '{}'",
current_row, pos_after.row, current_line
);
if !current_line.is_empty() {
lines.push(current_line.clone());
current_line.clear();
}
current_row = pos_after.row;
// If this character is a space that caused the wrap, don't include it
if ch.is_whitespace() && pos_before.row < pos_after.row {
println!(" Skipping wrapping space");
continue; // Skip the wrapping space
}
}
current_line.push(ch);
}
// Add the final line
if !current_line.is_empty() {
lines.push(current_line);
} else if lines.is_empty() {
lines.push("".to_string());
}
lines
}

View File

@@ -0,0 +1,96 @@
#![allow(non_snake_case)]
use owlen_core::wrap_cursor::{build_cursor_map, ScreenPos};
fn assert_cursor_pos(map: &[ScreenPos], byte_idx: usize, expected: ScreenPos) {
assert_eq!(map[byte_idx], expected, "Mismatch at byte {}", byte_idx);
}
#[test]
fn test_basic_wrap_at_spaces() {
let text = "hello world";
let width = 5;
let map = build_cursor_map(text, width);
assert_cursor_pos(&map, 0, ScreenPos { row: 0, col: 0 });
assert_cursor_pos(&map, 5, ScreenPos { row: 0, col: 5 }); // after "hello"
assert_cursor_pos(&map, 6, ScreenPos { row: 1, col: 1 }); // after "hello "
assert_cursor_pos(&map, 11, ScreenPos { row: 1, col: 6 }); // after "world"
}
#[test]
fn test_hard_line_break() {
let text = "a\nb";
let width = 10;
let map = build_cursor_map(text, width);
assert_cursor_pos(&map, 0, ScreenPos { row: 0, col: 0 });
assert_cursor_pos(&map, 1, ScreenPos { row: 0, col: 1 }); // after "a"
assert_cursor_pos(&map, 2, ScreenPos { row: 1, col: 0 }); // after "\n"
assert_cursor_pos(&map, 3, ScreenPos { row: 1, col: 1 }); // after "b"
}
#[test]
fn test_long_word_split() {
let text = "abcdefgh";
let width = 3;
let map = build_cursor_map(text, width);
assert_cursor_pos(&map, 0, ScreenPos { row: 0, col: 0 });
assert_cursor_pos(&map, 1, ScreenPos { row: 0, col: 1 });
assert_cursor_pos(&map, 2, ScreenPos { row: 0, col: 2 });
assert_cursor_pos(&map, 3, ScreenPos { row: 0, col: 3 });
assert_cursor_pos(&map, 4, ScreenPos { row: 1, col: 1 });
assert_cursor_pos(&map, 5, ScreenPos { row: 1, col: 2 });
assert_cursor_pos(&map, 6, ScreenPos { row: 1, col: 3 });
assert_cursor_pos(&map, 7, ScreenPos { row: 2, col: 1 });
assert_cursor_pos(&map, 8, ScreenPos { row: 2, col: 2 });
}
#[test]
fn test_trailing_spaces_preserved() {
let text = "x y";
let width = 2;
let map = build_cursor_map(text, width);
assert_cursor_pos(&map, 0, ScreenPos { row: 0, col: 0 });
assert_cursor_pos(&map, 1, ScreenPos { row: 0, col: 1 }); // after "x"
assert_cursor_pos(&map, 2, ScreenPos { row: 0, col: 2 }); // after "x "
assert_cursor_pos(&map, 3, ScreenPos { row: 1, col: 1 }); // after "x "
assert_cursor_pos(&map, 4, ScreenPos { row: 1, col: 2 }); // after "y"
}
#[test]
fn test_graphemes_emoji() {
let text = "🙂🙂a";
let width = 3;
let map = build_cursor_map(text, width);
assert_cursor_pos(&map, 0, ScreenPos { row: 0, col: 0 });
assert_cursor_pos(&map, 4, ScreenPos { row: 0, col: 2 }); // after first emoji
assert_cursor_pos(&map, 8, ScreenPos { row: 1, col: 2 }); // after second emoji
assert_cursor_pos(&map, 9, ScreenPos { row: 1, col: 3 }); // after "a"
}
#[test]
fn test_graphemes_combining() {
let text = "e\u{0301}";
let width = 10;
let map = build_cursor_map(text, width);
assert_cursor_pos(&map, 0, ScreenPos { row: 0, col: 0 });
assert_cursor_pos(&map, 1, ScreenPos { row: 0, col: 1 }); // after "e"
assert_cursor_pos(&map, 3, ScreenPos { row: 0, col: 1 }); // after combining mark
}
#[test]
fn test_exact_edge() {
let text = "abc def";
let width = 3;
let map = build_cursor_map(text, width);
assert_cursor_pos(&map, 0, ScreenPos { row: 0, col: 0 });
assert_cursor_pos(&map, 3, ScreenPos { row: 0, col: 3 }); // after "abc"
assert_cursor_pos(&map, 4, ScreenPos { row: 1, col: 1 }); // after " "
assert_cursor_pos(&map, 7, ScreenPos { row: 1, col: 4 }); // after "def"
}

View File

@@ -0,0 +1,5 @@
# Owlen Gemini
This crate is a placeholder for a future `owlen-core::Provider` implementation for the Google Gemini API.
This provider is not yet implemented. Contributions are welcome!

View File

@@ -0,0 +1,34 @@
[package]
name = "owlen-ollama"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
homepage.workspace = true
description = "Ollama provider for OWLEN LLM client"
[dependencies]
owlen-core = { path = "../owlen-core" }
# HTTP client
reqwest = { workspace = true }
# Async runtime
tokio = { workspace = true }
tokio-stream = { workspace = true }
futures = { workspace = true }
futures-util = { workspace = true }
# Serialization
serde = { workspace = true }
serde_json = { workspace = true }
# Utilities
anyhow = { workspace = true }
thiserror = { workspace = true }
uuid = { workspace = true }
async-trait = { workspace = true }
[dev-dependencies]
tokio-test = { workspace = true }

View File

@@ -0,0 +1,9 @@
# Owlen Ollama
This crate provides an implementation of the `owlen-core::Provider` trait for the [Ollama](https://ollama.ai) backend.
It allows Owlen to communicate with a local Ollama instance, sending requests and receiving responses from locally-run large language models.
## Configuration
To use this provider, you need to have Ollama installed and running. The default address is `http://localhost:11434`. You can configure this in your `config.toml` if your Ollama instance is running elsewhere.

View File

@@ -0,0 +1,530 @@
//! Ollama provider for OWLEN LLM client
use futures_util::StreamExt;
use owlen_core::{
config::GeneralSettings,
model::ModelManager,
provider::{ChatStream, Provider, ProviderConfig},
types::{ChatParameters, ChatRequest, ChatResponse, Message, ModelInfo, Role, TokenUsage},
Result,
};
use reqwest::Client;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::collections::HashMap;
use std::io;
use std::time::Duration;
use tokio::sync::mpsc;
use tokio_stream::wrappers::UnboundedReceiverStream;
const DEFAULT_TIMEOUT_SECS: u64 = 120;
const DEFAULT_MODEL_CACHE_TTL_SECS: u64 = 60;
/// Ollama provider implementation with enhanced configuration and caching
pub struct OllamaProvider {
client: Client,
base_url: String,
model_manager: ModelManager,
}
/// Options for configuring the Ollama provider
pub struct OllamaOptions {
pub base_url: String,
pub request_timeout: Duration,
pub model_cache_ttl: Duration,
}
impl OllamaOptions {
pub fn new(base_url: impl Into<String>) -> Self {
Self {
base_url: base_url.into(),
request_timeout: Duration::from_secs(DEFAULT_TIMEOUT_SECS),
model_cache_ttl: Duration::from_secs(DEFAULT_MODEL_CACHE_TTL_SECS),
}
}
pub fn with_general(mut self, general: &GeneralSettings) -> Self {
self.model_cache_ttl = general.model_cache_ttl();
self
}
}
/// Ollama-specific message format
#[derive(Debug, Clone, Serialize, Deserialize)]
struct OllamaMessage {
role: String,
content: String,
}
/// Ollama chat request format
#[derive(Debug, Serialize)]
struct OllamaChatRequest {
model: String,
messages: Vec<OllamaMessage>,
stream: bool,
#[serde(flatten)]
options: HashMap<String, Value>,
}
/// Ollama chat response format
#[derive(Debug, Deserialize)]
struct OllamaChatResponse {
message: Option<OllamaMessage>,
done: bool,
#[serde(default)]
prompt_eval_count: Option<u32>,
#[serde(default)]
eval_count: Option<u32>,
#[serde(default)]
error: Option<String>,
}
#[derive(Debug, Deserialize)]
struct OllamaErrorResponse {
error: Option<String>,
}
/// Ollama models list response
#[derive(Debug, Deserialize)]
struct OllamaModelsResponse {
models: Vec<OllamaModelInfo>,
}
/// Ollama model information
#[derive(Debug, Deserialize)]
struct OllamaModelInfo {
name: String,
#[serde(default)]
details: Option<OllamaModelDetails>,
}
#[derive(Debug, Deserialize)]
struct OllamaModelDetails {
#[serde(default)]
family: Option<String>,
}
impl OllamaProvider {
/// Create a new Ollama provider with sensible defaults
pub fn new(base_url: impl Into<String>) -> Result<Self> {
Self::with_options(OllamaOptions::new(base_url))
}
/// Create a provider from configuration settings
pub fn from_config(config: &ProviderConfig, general: Option<&GeneralSettings>) -> Result<Self> {
let mut options = OllamaOptions::new(
config
.base_url
.clone()
.unwrap_or_else(|| "http://localhost:11434".to_string()),
);
if let Some(timeout) = config
.extra
.get("timeout_secs")
.and_then(|value| value.as_u64())
{
options.request_timeout = Duration::from_secs(timeout.max(5));
}
if let Some(cache_ttl) = config
.extra
.get("model_cache_ttl_secs")
.and_then(|value| value.as_u64())
{
options.model_cache_ttl = Duration::from_secs(cache_ttl.max(5));
}
if let Some(general) = general {
options = options.with_general(general);
}
Self::with_options(options)
}
/// Create a provider from explicit options
pub fn with_options(options: OllamaOptions) -> Result<Self> {
let client = Client::builder()
.timeout(options.request_timeout)
.build()
.map_err(|e| owlen_core::Error::Config(format!("Failed to build HTTP client: {e}")))?;
Ok(Self {
client,
base_url: options.base_url.trim_end_matches('/').to_string(),
model_manager: ModelManager::new(options.model_cache_ttl),
})
}
/// Accessor for the underlying model manager
pub fn model_manager(&self) -> &ModelManager {
&self.model_manager
}
fn convert_message(message: &Message) -> OllamaMessage {
OllamaMessage {
role: match message.role {
Role::User => "user".to_string(),
Role::Assistant => "assistant".to_string(),
Role::System => "system".to_string(),
},
content: message.content.clone(),
}
}
fn convert_ollama_message(message: &OllamaMessage) -> Message {
let role = match message.role.as_str() {
"user" => Role::User,
"assistant" => Role::Assistant,
"system" => Role::System,
_ => Role::Assistant,
};
Message::new(role, message.content.clone())
}
fn build_options(parameters: ChatParameters) -> HashMap<String, Value> {
let mut options = parameters.extra;
if let Some(temperature) = parameters.temperature {
options
.entry("temperature".to_string())
.or_insert(json!(temperature as f64));
}
if let Some(max_tokens) = parameters.max_tokens {
options
.entry("num_predict".to_string())
.or_insert(json!(max_tokens));
}
options
}
async fn fetch_models(&self) -> Result<Vec<ModelInfo>> {
let url = format!("{}/api/tags", self.base_url);
let response = self
.client
.get(&url)
.send()
.await
.map_err(|e| owlen_core::Error::Network(format!("Failed to fetch models: {e}")))?;
if !response.status().is_success() {
let code = response.status();
let error = parse_error_body(response).await;
return Err(owlen_core::Error::Network(format!(
"Ollama model listing failed ({code}): {error}"
)));
}
let body = response.text().await.map_err(|e| {
owlen_core::Error::Network(format!("Failed to read models response: {e}"))
})?;
let ollama_response: OllamaModelsResponse =
serde_json::from_str(&body).map_err(owlen_core::Error::Serialization)?;
let models = ollama_response
.models
.into_iter()
.map(|model| ModelInfo {
id: model.name.clone(),
name: model.name.clone(),
description: model
.details
.as_ref()
.and_then(|d| d.family.as_ref().map(|f| format!("Ollama {f} model"))),
provider: "ollama".to_string(),
context_window: None,
capabilities: vec!["chat".to_string()],
})
.collect();
Ok(models)
}
}
#[async_trait::async_trait]
impl Provider for OllamaProvider {
fn name(&self) -> &str {
"ollama"
}
async fn list_models(&self) -> Result<Vec<ModelInfo>> {
self.model_manager
.get_or_refresh(false, || async { self.fetch_models().await })
.await
}
async fn chat(&self, request: ChatRequest) -> Result<ChatResponse> {
let ChatRequest {
model,
messages,
parameters,
} = request;
let messages: Vec<OllamaMessage> = messages.iter().map(Self::convert_message).collect();
let options = Self::build_options(parameters);
let ollama_request = OllamaChatRequest {
model,
messages,
stream: false,
options,
};
let url = format!("{}/api/chat", self.base_url);
let response = self
.client
.post(&url)
.json(&ollama_request)
.send()
.await
.map_err(|e| owlen_core::Error::Network(format!("Chat request failed: {e}")))?;
if !response.status().is_success() {
let code = response.status();
let error = parse_error_body(response).await;
return Err(owlen_core::Error::Network(format!(
"Ollama chat failed ({code}): {error}"
)));
}
let body = response.text().await.map_err(|e| {
owlen_core::Error::Network(format!("Failed to read chat response: {e}"))
})?;
let mut ollama_response: OllamaChatResponse =
serde_json::from_str(&body).map_err(owlen_core::Error::Serialization)?;
if let Some(error) = ollama_response.error.take() {
return Err(owlen_core::Error::Provider(anyhow::anyhow!(error)));
}
let message = match ollama_response.message {
Some(ref msg) => Self::convert_ollama_message(msg),
None => {
return Err(owlen_core::Error::Provider(anyhow::anyhow!(
"Ollama response missing message"
)))
}
};
let usage = if let (Some(prompt_tokens), Some(completion_tokens)) = (
ollama_response.prompt_eval_count,
ollama_response.eval_count,
) {
Some(TokenUsage {
prompt_tokens,
completion_tokens,
total_tokens: prompt_tokens + completion_tokens,
})
} else {
None
};
Ok(ChatResponse {
message,
usage,
is_streaming: false,
is_final: true,
})
}
async fn chat_stream(&self, request: ChatRequest) -> Result<ChatStream> {
let ChatRequest {
model,
messages,
parameters,
} = request;
let messages: Vec<OllamaMessage> = messages.iter().map(Self::convert_message).collect();
let options = Self::build_options(parameters);
let ollama_request = OllamaChatRequest {
model,
messages,
stream: true,
options,
};
let url = format!("{}/api/chat", self.base_url);
let response = self
.client
.post(&url)
.json(&ollama_request)
.send()
.await
.map_err(|e| owlen_core::Error::Network(format!("Streaming request failed: {e}")))?;
if !response.status().is_success() {
let code = response.status();
let error = parse_error_body(response).await;
return Err(owlen_core::Error::Network(format!(
"Ollama streaming chat failed ({code}): {error}"
)));
}
let (tx, rx) = mpsc::unbounded_channel();
let mut stream = response.bytes_stream();
tokio::spawn(async move {
let mut buffer = String::new();
while let Some(chunk) = stream.next().await {
match chunk {
Ok(bytes) => {
if let Ok(text) = String::from_utf8(bytes.to_vec()) {
buffer.push_str(&text);
while let Some(pos) = buffer.find('\n') {
let mut line = buffer[..pos].trim().to_string();
buffer.drain(..=pos);
if line.is_empty() {
continue;
}
if line.ends_with('\r') {
line.pop();
}
match serde_json::from_str::<OllamaChatResponse>(&line) {
Ok(mut ollama_response) => {
if let Some(error) = ollama_response.error.take() {
let _ = tx.send(Err(owlen_core::Error::Provider(
anyhow::anyhow!(error),
)));
break;
}
if let Some(message) = ollama_response.message {
let mut chat_response = ChatResponse {
message: Self::convert_ollama_message(&message),
usage: None,
is_streaming: true,
is_final: ollama_response.done,
};
if let (Some(prompt_tokens), Some(completion_tokens)) = (
ollama_response.prompt_eval_count,
ollama_response.eval_count,
) {
chat_response.usage = Some(TokenUsage {
prompt_tokens,
completion_tokens,
total_tokens: prompt_tokens + completion_tokens,
});
}
if tx.send(Ok(chat_response)).is_err() {
break;
}
if ollama_response.done {
break;
}
}
}
Err(e) => {
let _ = tx.send(Err(owlen_core::Error::Serialization(e)));
break;
}
}
}
} else {
let _ = tx.send(Err(owlen_core::Error::Serialization(
serde_json::Error::io(io::Error::new(
io::ErrorKind::InvalidData,
"Non UTF-8 chunk from Ollama",
)),
)));
break;
}
}
Err(e) => {
let _ = tx.send(Err(owlen_core::Error::Network(format!(
"Stream error: {e}"
))));
break;
}
}
}
});
let stream = UnboundedReceiverStream::new(rx);
Ok(Box::pin(stream))
}
async fn health_check(&self) -> Result<()> {
let url = format!("{}/api/version", self.base_url);
let response = self
.client
.get(&url)
.send()
.await
.map_err(|e| owlen_core::Error::Network(format!("Health check failed: {e}")))?;
if response.status().is_success() {
Ok(())
} else {
Err(owlen_core::Error::Network(format!(
"Ollama health check failed: HTTP {}",
response.status()
)))
}
}
fn config_schema(&self) -> serde_json::Value {
serde_json::json!({
"type": "object",
"properties": {
"base_url": {
"type": "string",
"description": "Base URL for Ollama API",
"default": "http://localhost:11434"
},
"timeout_secs": {
"type": "integer",
"description": "HTTP request timeout in seconds",
"minimum": 5,
"default": DEFAULT_TIMEOUT_SECS
},
"model_cache_ttl_secs": {
"type": "integer",
"description": "Seconds to cache model listings",
"minimum": 5,
"default": DEFAULT_MODEL_CACHE_TTL_SECS
}
}
})
}
}
async fn parse_error_body(response: reqwest::Response) -> String {
match response.bytes().await {
Ok(bytes) => {
if bytes.is_empty() {
return "unknown error".to_string();
}
if let Ok(err) = serde_json::from_slice::<OllamaErrorResponse>(&bytes) {
if let Some(error) = err.error {
return error;
}
}
match String::from_utf8(bytes.to_vec()) {
Ok(text) if !text.trim().is_empty() => text,
_ => "unknown error".to_string(),
}
}
Err(_) => "unknown error".to_string(),
}
}

View File

@@ -0,0 +1,5 @@
# Owlen OpenAI
This crate is a placeholder for a future `owlen-core::Provider` implementation for the OpenAI API.
This provider is not yet implemented. Contributions are welcome!

View File

@@ -0,0 +1,32 @@
[package]
name = "owlen-tui"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
homepage.workspace = true
description = "Terminal User Interface for OWLEN LLM client"
[dependencies]
owlen-core = { path = "../owlen-core" }
# TUI framework
ratatui = { workspace = true }
crossterm = { workspace = true }
tui-textarea = { workspace = true }
textwrap = { workspace = true }
unicode-width = "0.1"
# Async runtime
tokio = { workspace = true }
tokio-util = { workspace = true }
futures-util = { workspace = true }
# Utilities
anyhow = { workspace = true }
uuid = { workspace = true }
[dev-dependencies]
tokio-test = { workspace = true }
tempfile = { workspace = true }

View File

@@ -0,0 +1,12 @@
# Owlen TUI
This crate contains all the logic for the terminal user interface (TUI) of Owlen.
It is built using the excellent [`ratatui`](https://ratatui.rs) library and is responsible for rendering the chat interface, handling user input, and managing the application state.
## Features
- **Chat View**: A scrollable view of the conversation history.
- **Input Box**: A text input area for composing messages.
- **Model Selection**: An interface for switching between different models.
- **Event Handling**: A system for managing keyboard events and asynchronous operations.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,44 @@
use anyhow::Result;
use owlen_core::session::SessionController;
use owlen_core::ui::{AppState, InputMode};
use tokio::sync::mpsc;
use crate::chat_app::{ChatApp, SessionEvent};
use crate::events::Event;
const DEFAULT_SYSTEM_PROMPT: &str =
"You are OWLEN Code Assistant. Provide detailed, actionable programming help.";
pub struct CodeApp {
inner: ChatApp,
}
impl CodeApp {
pub fn new(mut controller: SessionController) -> (Self, mpsc::UnboundedReceiver<SessionEvent>) {
controller
.conversation_mut()
.push_system_message(DEFAULT_SYSTEM_PROMPT.to_string());
let (inner, rx) = ChatApp::new(controller);
(Self { inner }, rx)
}
pub async fn handle_event(&mut self, event: Event) -> Result<AppState> {
self.inner.handle_event(event).await
}
pub fn handle_session_event(&mut self, event: SessionEvent) -> Result<()> {
self.inner.handle_session_event(event)
}
pub fn mode(&self) -> InputMode {
self.inner.mode()
}
pub fn inner(&self) -> &ChatApp {
&self.inner
}
pub fn inner_mut(&mut self) -> &mut ChatApp {
&mut self.inner
}
}

View File

@@ -0,0 +1,16 @@
pub use owlen_core::config::{
default_config_path, ensure_ollama_config, session_timeout, Config, GeneralSettings,
InputSettings, StorageSettings, UiSettings, DEFAULT_CONFIG_PATH,
};
/// Attempt to load configuration from default location
pub fn try_load_config() -> Option<Config> {
Config::load(None).ok()
}
/// Persist configuration to default path
pub fn save_config(config: &Config) -> anyhow::Result<()> {
config
.save(None)
.map_err(|e| anyhow::anyhow!(e.to_string()))
}

View File

@@ -0,0 +1,210 @@
use crossterm::event::{self, KeyCode, KeyEvent, KeyEventKind, KeyModifiers};
use std::time::Duration;
use tokio::sync::mpsc;
use tokio_util::sync::CancellationToken;
/// Application events
#[derive(Debug, Clone)]
pub enum Event {
/// Terminal key press event
Key(KeyEvent),
/// Terminal resize event
#[allow(dead_code)]
Resize(u16, u16),
/// Paste event
Paste(String),
/// Tick event for regular updates
Tick,
}
/// Event handler that captures terminal events and sends them to the application
pub struct EventHandler {
sender: mpsc::UnboundedSender<Event>,
tick_rate: Duration,
cancellation_token: CancellationToken,
}
impl EventHandler {
pub fn new(
sender: mpsc::UnboundedSender<Event>,
cancellation_token: CancellationToken,
) -> Self {
Self {
sender,
tick_rate: Duration::from_millis(250), // 4 times per second
cancellation_token,
}
}
pub async fn run(&self) {
let mut last_tick = tokio::time::Instant::now();
loop {
if self.cancellation_token.is_cancelled() {
break;
}
let timeout = self
.tick_rate
.checked_sub(last_tick.elapsed())
.unwrap_or_else(|| Duration::from_secs(0));
if event::poll(timeout).unwrap_or(false) {
match event::read() {
Ok(event) => {
match event {
crossterm::event::Event::Key(key) => {
// Only handle KeyEventKind::Press to avoid duplicate events
if key.kind == KeyEventKind::Press {
let _ = self.sender.send(Event::Key(key));
}
}
crossterm::event::Event::Resize(width, height) => {
let _ = self.sender.send(Event::Resize(width, height));
}
crossterm::event::Event::Paste(text) => {
let _ = self.sender.send(Event::Paste(text));
}
_ => {}
}
}
Err(_) => {
// Handle error by continuing the loop
continue;
}
}
}
if last_tick.elapsed() >= self.tick_rate {
let _ = self.sender.send(Event::Tick);
last_tick = tokio::time::Instant::now();
}
}
}
}
/// Helper functions for key event handling
impl Event {
/// Check if this is a quit command (Ctrl+C or 'q')
pub fn is_quit(&self) -> bool {
matches!(
self,
Event::Key(KeyEvent {
code: KeyCode::Char('q'),
modifiers: KeyModifiers::NONE,
..
}) | Event::Key(KeyEvent {
code: KeyCode::Char('c'),
modifiers: KeyModifiers::CONTROL,
..
})
)
}
/// Check if this is an enter key press
pub fn is_enter(&self) -> bool {
matches!(
self,
Event::Key(KeyEvent {
code: KeyCode::Enter,
..
})
)
}
/// Check if this is a tab key press
#[allow(dead_code)]
pub fn is_tab(&self) -> bool {
matches!(
self,
Event::Key(KeyEvent {
code: KeyCode::Tab,
modifiers: KeyModifiers::NONE,
..
})
)
}
/// Check if this is a backspace
pub fn is_backspace(&self) -> bool {
matches!(
self,
Event::Key(KeyEvent {
code: KeyCode::Backspace,
..
})
)
}
/// Check if this is an escape key press
pub fn is_escape(&self) -> bool {
matches!(
self,
Event::Key(KeyEvent {
code: KeyCode::Esc,
..
})
)
}
/// Get the character if this is a character key event
pub fn as_char(&self) -> Option<char> {
match self {
Event::Key(KeyEvent {
code: KeyCode::Char(c),
modifiers: KeyModifiers::NONE,
..
}) => Some(*c),
Event::Key(KeyEvent {
code: KeyCode::Char(c),
modifiers: KeyModifiers::SHIFT,
..
}) => Some(*c),
_ => None,
}
}
/// Check if this is an up arrow key press
pub fn is_up(&self) -> bool {
matches!(
self,
Event::Key(KeyEvent {
code: KeyCode::Up,
..
})
)
}
/// Check if this is a down arrow key press
pub fn is_down(&self) -> bool {
matches!(
self,
Event::Key(KeyEvent {
code: KeyCode::Down,
..
})
)
}
/// Check if this is a left arrow key press
pub fn is_left(&self) -> bool {
matches!(
self,
Event::Key(KeyEvent {
code: KeyCode::Left,
..
})
)
}
/// Check if this is a right arrow key press
pub fn is_right(&self) -> bool {
matches!(
self,
Event::Key(KeyEvent {
code: KeyCode::Right,
..
})
)
}
}

View File

@@ -0,0 +1,24 @@
//! # Owlen TUI
//!
//! This crate contains all the logic for the terminal user interface (TUI) of Owlen.
//!
//! It is built using the excellent [`ratatui`](https://ratatui.rs) library and is responsible for
//! rendering the chat interface, handling user input, and managing the application state.
//!
//! ## Modules
//! - `chat_app`: The main application logic for the chat client.
//! - `code_app`: The main application logic for the experimental code client.
//! - `config`: TUI-specific configuration.
//! - `events`: Event handling for user input and other asynchronous actions.
//! - `ui`: The rendering logic for all TUI components.
pub mod chat_app;
pub mod code_app;
pub mod config;
pub mod events;
pub mod ui;
pub use chat_app::{ChatApp, SessionEvent};
pub use code_app::CodeApp;
pub use events::{Event, EventHandler};
pub use owlen_core::ui::{AppState, FocusedPanel, InputMode};

1850
crates/owlen-tui/src/ui.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,22 +0,0 @@
/target
### Rust template
# Generated by Cargo
# will have compiled files and executables
debug/
target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb
### rust-analyzer template
# Can be generated by other build systems other than cargo (ex: bazelbuild/rust_rules)
rust-project.json

View File

@@ -1,15 +0,0 @@
[package]
name = "config-agent"
version = "0.1.0"
edition.workspace = true
license.workspace = true
rust-version.workspace = true
[dependencies]
serde = { version = "1", features = ["derive"] }
directories = "5"
figment = { version = "0.10", features = ["toml", "env"] }
permissions = { path = "../permissions" }
[dev-dependencies]
tempfile = "3.23.0"

View File

@@ -1,76 +0,0 @@
use directories::ProjectDirs;
use figment::{
Figment,
providers::{Env, Format, Serialized, Toml},
};
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use permissions::{Mode, PermissionManager};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Settings {
#[serde(default = "default_ollama_url")]
pub ollama_url: String,
#[serde(default = "default_model")]
pub model: String,
#[serde(default = "default_mode")]
pub mode: String, // "plan" (read-only) for now
#[serde(default)]
pub api_key: Option<String>, // For Ollama Cloud or other API authentication
}
fn default_ollama_url() -> String {
"http://localhost:11434".into()
}
fn default_model() -> String {
"qwen3:8b".into()
}
fn default_mode() -> String {
"plan".into()
}
impl Default for Settings {
fn default() -> Self {
Self {
ollama_url: default_ollama_url(),
model: default_model(),
mode: default_mode(),
api_key: None,
}
}
}
impl Settings {
/// Create a PermissionManager based on the configured mode
pub fn create_permission_manager(&self) -> PermissionManager {
let mode = Mode::from_str(&self.mode).unwrap_or(Mode::Plan);
PermissionManager::new(mode)
}
/// Get the Mode enum from the mode string
pub fn get_mode(&self) -> Mode {
Mode::from_str(&self.mode).unwrap_or(Mode::Plan)
}
}
pub fn load_settings(project_root: Option<&str>) -> Result<Settings, figment::Error> {
let mut fig = Figment::from(Serialized::defaults(Settings::default()));
// User file: ~/.config/owlen/config.toml
if let Some(pd) = ProjectDirs::from("dev", "owlibou", "owlen") {
let user = pd.config_dir().join("config.toml");
fig = fig.merge(Toml::file(user));
}
// Project file: <root>/.owlen.toml
if let Some(root) = project_root {
fig = fig.merge(Toml::file(PathBuf::from(root).join(".owlen.toml")));
}
// Environment variables have highest precedence
fig = fig.merge(Env::prefixed("OWLEN_").split("__"));
// Support OLLAMA_API_KEY, OLLAMA_MODEL, etc. (without nesting)
fig = fig.merge(Env::prefixed("OLLAMA_"));
fig.extract()
}

View File

@@ -1,48 +0,0 @@
use config_agent::{load_settings, Settings};
use permissions::{Mode, PermissionDecision, Tool};
use std::{env, fs};
#[test]
fn precedence_env_overrides_files() {
let tmp = tempfile::tempdir().unwrap();
let project_file = tmp.path().join(".owlen.toml");
fs::write(&project_file, r#"model="local-model""#).unwrap();
unsafe { env::set_var("OWLEN_MODEL", "env-model"); }
let s = load_settings(Some(tmp.path().to_str().unwrap())).unwrap();
assert_eq!(s.model, "env-model");
}
#[test]
fn default_mode_is_plan() {
let s = Settings::default();
assert_eq!(s.mode, "plan");
}
#[test]
fn settings_create_permission_manager_with_plan_mode() {
let s = Settings::default();
let mgr = s.create_permission_manager();
// Plan mode should allow read operations
assert_eq!(mgr.check(Tool::Read, None), PermissionDecision::Allow);
// Plan mode should ask for write operations
assert_eq!(mgr.check(Tool::Write, None), PermissionDecision::Ask);
}
#[test]
fn settings_parse_mode_from_config() {
let tmp = tempfile::tempdir().unwrap();
let project_file = tmp.path().join(".owlen.toml");
fs::write(&project_file, r#"mode="code""#).unwrap();
let s = load_settings(Some(tmp.path().to_str().unwrap())).unwrap();
assert_eq!(s.mode, "code");
assert_eq!(s.get_mode(), Mode::Code);
let mgr = s.create_permission_manager();
// Code mode should allow everything
assert_eq!(mgr.check(Tool::Write, None), PermissionDecision::Allow);
assert_eq!(mgr.check(Tool::Bash, None), PermissionDecision::Allow);
}

View File

@@ -1,16 +0,0 @@
[package]
name = "hooks"
version = "0.1.0"
edition.workspace = true
license.workspace = true
rust-version.workspace = true
[dependencies]
serde = { version = "1", features = ["derive"] }
serde_json = "1"
tokio = { version = "1.39", features = ["process", "time", "io-util"] }
color-eyre = "0.6"
[dev-dependencies]
tempfile = "3.23.0"
tokio = { version = "1.39", features = ["macros", "rt-multi-thread"] }

View File

@@ -1,171 +0,0 @@
use color_eyre::eyre::{Result, eyre};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::path::PathBuf;
use std::process::Stdio;
use tokio::io::AsyncWriteExt;
use tokio::process::Command;
use tokio::time::timeout;
use std::time::Duration;
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(tag = "event", rename_all = "camelCase")]
pub enum HookEvent {
#[serde(rename_all = "camelCase")]
PreToolUse {
tool: String,
args: Value,
},
#[serde(rename_all = "camelCase")]
PostToolUse {
tool: String,
result: Value,
},
#[serde(rename_all = "camelCase")]
SessionStart {
session_id: String,
},
#[serde(rename_all = "camelCase")]
SessionEnd {
session_id: String,
},
#[serde(rename_all = "camelCase")]
UserPromptSubmit {
prompt: String,
},
PreCompact,
}
impl HookEvent {
/// Get the hook name for this event (used to find the hook script)
pub fn hook_name(&self) -> &str {
match self {
HookEvent::PreToolUse { .. } => "PreToolUse",
HookEvent::PostToolUse { .. } => "PostToolUse",
HookEvent::SessionStart { .. } => "SessionStart",
HookEvent::SessionEnd { .. } => "SessionEnd",
HookEvent::UserPromptSubmit { .. } => "UserPromptSubmit",
HookEvent::PreCompact => "PreCompact",
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum HookResult {
Allow,
Deny,
}
pub struct HookManager {
project_root: PathBuf,
}
impl HookManager {
pub fn new(project_root: &str) -> Self {
Self {
project_root: PathBuf::from(project_root),
}
}
/// Execute a hook for the given event
///
/// Returns:
/// - Ok(HookResult::Allow) if hook succeeds or doesn't exist (exit code 0 or no hook)
/// - Ok(HookResult::Deny) if hook denies (exit code 2)
/// - Err if hook fails (other exit codes) or times out
pub async fn execute(&self, event: &HookEvent, timeout_ms: Option<u64>) -> Result<HookResult> {
let hook_path = self.get_hook_path(event);
// If hook doesn't exist, allow by default
if !hook_path.exists() {
return Ok(HookResult::Allow);
}
// Serialize event to JSON
let input_json = serde_json::to_string(event)?;
// Spawn the hook process
let mut child = Command::new(&hook_path)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.current_dir(&self.project_root)
.spawn()?;
// Write JSON input to stdin
if let Some(mut stdin) = child.stdin.take() {
stdin.write_all(input_json.as_bytes()).await?;
stdin.flush().await?;
drop(stdin); // Close stdin
}
// Wait for process with timeout
let result = if let Some(ms) = timeout_ms {
timeout(Duration::from_millis(ms), child.wait_with_output()).await
} else {
Ok(child.wait_with_output().await)
};
match result {
Ok(Ok(output)) => {
// Check exit code
match output.status.code() {
Some(0) => Ok(HookResult::Allow),
Some(2) => Ok(HookResult::Deny),
Some(code) => Err(eyre!(
"Hook {} failed with exit code {}: {}",
event.hook_name(),
code,
String::from_utf8_lossy(&output.stderr)
)),
None => Err(eyre!("Hook {} terminated by signal", event.hook_name())),
}
}
Ok(Err(e)) => Err(eyre!("Failed to execute hook {}: {}", event.hook_name(), e)),
Err(_) => Err(eyre!("Hook {} timed out", event.hook_name())),
}
}
fn get_hook_path(&self, event: &HookEvent) -> PathBuf {
self.project_root
.join(".owlen")
.join("hooks")
.join(event.hook_name())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn hook_event_serializes_correctly() {
let event = HookEvent::PreToolUse {
tool: "Read".to_string(),
args: serde_json::json!({"path": "/tmp/test.txt"}),
};
let json = serde_json::to_string(&event).unwrap();
assert!(json.contains("\"event\":\"preToolUse\""));
assert!(json.contains("\"tool\":\"Read\""));
}
#[test]
fn hook_event_names() {
assert_eq!(
HookEvent::PreToolUse {
tool: "Read".to_string(),
args: serde_json::json!({}),
}
.hook_name(),
"PreToolUse"
);
assert_eq!(
HookEvent::SessionStart {
session_id: "123".to_string(),
}
.hook_name(),
"SessionStart"
);
}
}

View File

@@ -1,160 +0,0 @@
use hooks::{HookEvent, HookManager, HookResult};
use std::fs;
use tempfile::tempdir;
#[tokio::test]
async fn pretooluse_can_deny_call() {
let dir = tempdir().unwrap();
let hooks_dir = dir.path().join(".owlen/hooks");
fs::create_dir_all(&hooks_dir).unwrap();
// Create a PreToolUse hook that denies Write operations
let hook_script = r#"#!/bin/bash
INPUT=$(cat)
TOOL=$(echo "$INPUT" | grep -o '"tool":"[^"]*"' | cut -d'"' -f4)
if [ "$TOOL" = "Write" ]; then
exit 2 # Deny
fi
exit 0 # Allow
"#;
let hook_path = hooks_dir.join("PreToolUse");
fs::write(&hook_path, hook_script).unwrap();
fs::set_permissions(&hook_path, std::os::unix::fs::PermissionsExt::from_mode(0o755)).unwrap();
let manager = HookManager::new(dir.path().to_str().unwrap());
// Test Write tool (should be denied)
let write_event = HookEvent::PreToolUse {
tool: "Write".to_string(),
args: serde_json::json!({"path": "/tmp/test.txt", "content": "hello"}),
};
let result = manager.execute(&write_event, Some(5000)).await.unwrap();
assert_eq!(result, HookResult::Deny);
// Test Read tool (should be allowed)
let read_event = HookEvent::PreToolUse {
tool: "Read".to_string(),
args: serde_json::json!({"path": "/tmp/test.txt"}),
};
let result = manager.execute(&read_event, Some(5000)).await.unwrap();
assert_eq!(result, HookResult::Allow);
}
#[tokio::test]
async fn posttooluse_runs_parallel() {
let dir = tempdir().unwrap();
let hooks_dir = dir.path().join(".owlen/hooks");
fs::create_dir_all(&hooks_dir).unwrap();
let output_file = dir.path().join("hook_output.txt");
// Create a PostToolUse hook that writes to a file
let hook_script = format!(
r#"#!/bin/bash
INPUT=$(cat)
echo "Hook executed: $INPUT" >> {}
exit 0
"#,
output_file.display()
);
let hook_path = hooks_dir.join("PostToolUse");
fs::write(&hook_path, hook_script).unwrap();
fs::set_permissions(&hook_path, std::os::unix::fs::PermissionsExt::from_mode(0o755)).unwrap();
let manager = HookManager::new(dir.path().to_str().unwrap());
// Execute hook
let event = HookEvent::PostToolUse {
tool: "Read".to_string(),
result: serde_json::json!({"success": true}),
};
let result = manager.execute(&event, Some(5000)).await.unwrap();
assert_eq!(result, HookResult::Allow);
// Verify hook ran
let output = fs::read_to_string(&output_file).unwrap();
assert!(output.contains("Hook executed"));
}
#[tokio::test]
async fn sessionstart_persists_env() {
let dir = tempdir().unwrap();
let hooks_dir = dir.path().join(".owlen/hooks");
fs::create_dir_all(&hooks_dir).unwrap();
let env_file = dir.path().join(".owlen/session.env");
// Create a SessionStart hook that writes env vars to a file
let hook_script = format!(
r#"#!/bin/bash
cat > {} <<EOF
MY_VAR=hello
ANOTHER_VAR=world
EOF
exit 0
"#,
env_file.display()
);
let hook_path = hooks_dir.join("SessionStart");
fs::write(&hook_path, hook_script).unwrap();
fs::set_permissions(&hook_path, std::os::unix::fs::PermissionsExt::from_mode(0o755)).unwrap();
let manager = HookManager::new(dir.path().to_str().unwrap());
// Execute SessionStart hook
let event = HookEvent::SessionStart {
session_id: "test-123".to_string(),
};
let result = manager.execute(&event, Some(5000)).await.unwrap();
assert_eq!(result, HookResult::Allow);
// Verify env file was created
assert!(env_file.exists());
let content = fs::read_to_string(&env_file).unwrap();
assert!(content.contains("MY_VAR=hello"));
assert!(content.contains("ANOTHER_VAR=world"));
}
#[tokio::test]
async fn hook_timeout_works() {
let dir = tempdir().unwrap();
let hooks_dir = dir.path().join(".owlen/hooks");
fs::create_dir_all(&hooks_dir).unwrap();
// Create a hook that sleeps longer than the timeout
let hook_script = r#"#!/bin/bash
sleep 10
exit 0
"#;
let hook_path = hooks_dir.join("PreToolUse");
fs::write(&hook_path, hook_script).unwrap();
fs::set_permissions(&hook_path, std::os::unix::fs::PermissionsExt::from_mode(0o755)).unwrap();
let manager = HookManager::new(dir.path().to_str().unwrap());
let event = HookEvent::PreToolUse {
tool: "Read".to_string(),
args: serde_json::json!({"path": "/tmp/test.txt"}),
};
// Should timeout after 1000ms
let result = manager.execute(&event, Some(1000)).await;
assert!(result.is_err());
let err_msg = result.unwrap_err().to_string();
assert!(err_msg.contains("timeout") || err_msg.contains("timed out"));
}
#[tokio::test]
async fn hook_not_found_is_ok() {
let dir = tempdir().unwrap();
let manager = HookManager::new(dir.path().to_str().unwrap());
// No hooks directory exists, should just return Allow
let event = HookEvent::PreToolUse {
tool: "Read".to_string(),
args: serde_json::json!({"path": "/tmp/test.txt"}),
};
let result = manager.execute(&event, Some(5000)).await.unwrap();
assert_eq!(result, HookResult::Allow);
}

View File

@@ -1,10 +0,0 @@
[package]
name = "permissions"
version = "0.1.0"
edition.workspace = true
license.workspace = true
rust-version.workspace = true
[dependencies]
serde = { version = "1", features = ["derive"] }
thiserror = "1"

View File

@@ -1,240 +0,0 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum Tool {
Read,
Write,
Edit,
Bash,
Grep,
Glob,
WebFetch,
WebSearch,
NotebookRead,
NotebookEdit,
SlashCommand,
Task,
TodoWrite,
Mcp,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum Action {
Allow,
Ask,
Deny,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum Mode {
Plan, // Read-only: Read/Grep/Glob allowed, others Ask
AcceptEdits, // Auto-allow Edit/Write, Bash still Ask
Code, // Full access (all allowed)
}
impl Mode {
pub fn from_str(s: &str) -> Option<Self> {
match s.to_lowercase().as_str() {
"plan" => Some(Mode::Plan),
"acceptedits" | "accept_edits" => Some(Mode::AcceptEdits),
"code" => Some(Mode::Code),
_ => None,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum PermissionDecision {
Allow,
Ask,
Deny,
}
#[derive(Debug, Clone)]
pub struct PermissionRule {
pub tool: Tool,
pub pattern: Option<String>,
pub action: Action,
}
impl PermissionRule {
fn matches(&self, tool: Tool, context: Option<&str>) -> bool {
if self.tool != tool {
return false;
}
match (&self.pattern, context) {
(None, _) => true, // No pattern means match all
(Some(_), None) => false, // Pattern specified but no context
(Some(pattern), Some(ctx)) => {
// Support prefix matching with wildcard
if pattern.ends_with('*') {
let prefix = pattern.trim_end_matches('*');
ctx.starts_with(prefix)
} else {
// Exact match
pattern == ctx
}
}
}
}
}
#[derive(Debug)]
pub struct PermissionManager {
mode: Mode,
rules: Vec<PermissionRule>,
}
impl PermissionManager {
pub fn new(mode: Mode) -> Self {
Self {
mode,
rules: Vec::new(),
}
}
pub fn add_rule(&mut self, tool: Tool, pattern: Option<String>, action: Action) {
self.rules.push(PermissionRule {
tool,
pattern,
action,
});
}
pub fn check(&self, tool: Tool, context: Option<&str>) -> PermissionDecision {
// Check explicit rules first (most specific to least specific)
// Deny rules take precedence
for rule in &self.rules {
if rule.matches(tool, context) {
return match rule.action {
Action::Allow => PermissionDecision::Allow,
Action::Ask => PermissionDecision::Ask,
Action::Deny => PermissionDecision::Deny,
};
}
}
// Fall back to mode-based defaults
self.check_mode_default(tool)
}
fn check_mode_default(&self, tool: Tool) -> PermissionDecision {
match self.mode {
Mode::Plan => match tool {
// Read-only tools are allowed in plan mode
Tool::Read | Tool::Grep | Tool::Glob | Tool::NotebookRead => {
PermissionDecision::Allow
}
// Everything else requires asking
_ => PermissionDecision::Ask,
},
Mode::AcceptEdits => match tool {
// Read operations allowed
Tool::Read | Tool::Grep | Tool::Glob | Tool::NotebookRead => {
PermissionDecision::Allow
}
// Edit/Write operations allowed
Tool::Edit | Tool::Write | Tool::NotebookEdit => PermissionDecision::Allow,
// Bash and other dangerous operations still require asking
Tool::Bash | Tool::WebFetch | Tool::WebSearch | Tool::Mcp => PermissionDecision::Ask,
// Utility tools allowed
Tool::TodoWrite | Tool::SlashCommand | Tool::Task => PermissionDecision::Allow,
},
Mode::Code => {
// Everything allowed in code mode
PermissionDecision::Allow
}
}
}
pub fn set_mode(&mut self, mode: Mode) {
self.mode = mode;
}
pub fn mode(&self) -> Mode {
self.mode
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn pattern_exact_match() {
let rule = PermissionRule {
tool: Tool::Bash,
pattern: Some("npm test".to_string()),
action: Action::Allow,
};
assert!(rule.matches(Tool::Bash, Some("npm test")));
assert!(!rule.matches(Tool::Bash, Some("npm install")));
assert!(!rule.matches(Tool::Read, Some("npm test")));
}
#[test]
fn pattern_prefix_match() {
let rule = PermissionRule {
tool: Tool::Bash,
pattern: Some("npm test:*".to_string()),
action: Action::Allow,
};
assert!(rule.matches(Tool::Bash, Some("npm test:unit")));
assert!(rule.matches(Tool::Bash, Some("npm test:integration")));
assert!(!rule.matches(Tool::Bash, Some("npm install")));
}
#[test]
fn pattern_no_context() {
let rule = PermissionRule {
tool: Tool::Bash,
pattern: Some("npm test".to_string()),
action: Action::Allow,
};
// Pattern specified but no context provided
assert!(!rule.matches(Tool::Bash, None));
}
#[test]
fn no_pattern_matches_all() {
let rule = PermissionRule {
tool: Tool::Read,
pattern: None,
action: Action::Allow,
};
assert!(rule.matches(Tool::Read, Some("any context")));
assert!(rule.matches(Tool::Read, None));
}
#[test]
fn mcp_server_pattern_matching() {
// Allow all tools from a specific server
let rule = PermissionRule {
tool: Tool::Mcp,
pattern: Some("filesystem__*".to_string()),
action: Action::Allow,
};
assert!(rule.matches(Tool::Mcp, Some("filesystem__read_file")));
assert!(rule.matches(Tool::Mcp, Some("filesystem__write_file")));
assert!(!rule.matches(Tool::Mcp, Some("database__query")));
}
#[test]
fn mcp_exact_tool_matching() {
// Allow only a specific tool from a server
let rule = PermissionRule {
tool: Tool::Mcp,
pattern: Some("filesystem__read_file".to_string()),
action: Action::Allow,
};
assert!(rule.matches(Tool::Mcp, Some("filesystem__read_file")));
assert!(!rule.matches(Tool::Mcp, Some("filesystem__write_file")));
}
}

View File

@@ -1,85 +0,0 @@
use permissions::{PermissionManager, Mode, Tool, PermissionDecision};
#[test]
fn plan_mode_blocks_write_bash_by_default() {
let mgr = PermissionManager::new(Mode::Plan);
// Plan mode should allow read operations
assert_eq!(mgr.check(Tool::Read, None), PermissionDecision::Allow);
assert_eq!(mgr.check(Tool::Grep, None), PermissionDecision::Allow);
assert_eq!(mgr.check(Tool::Glob, None), PermissionDecision::Allow);
// Plan mode should ask for write operations
assert_eq!(mgr.check(Tool::Write, None), PermissionDecision::Ask);
assert_eq!(mgr.check(Tool::Edit, None), PermissionDecision::Ask);
// Plan mode should ask for Bash
assert_eq!(mgr.check(Tool::Bash, None), PermissionDecision::Ask);
}
#[test]
fn accept_edits_mode_allows_edit_write() {
let mgr = PermissionManager::new(Mode::AcceptEdits);
// AcceptEdits mode should allow read operations
assert_eq!(mgr.check(Tool::Read, None), PermissionDecision::Allow);
// AcceptEdits mode should allow edit/write
assert_eq!(mgr.check(Tool::Edit, None), PermissionDecision::Allow);
assert_eq!(mgr.check(Tool::Write, None), PermissionDecision::Allow);
// But still ask for Bash
assert_eq!(mgr.check(Tool::Bash, None), PermissionDecision::Ask);
}
#[test]
fn code_mode_allows_everything() {
let mgr = PermissionManager::new(Mode::Code);
assert_eq!(mgr.check(Tool::Read, None), PermissionDecision::Allow);
assert_eq!(mgr.check(Tool::Write, None), PermissionDecision::Allow);
assert_eq!(mgr.check(Tool::Edit, None), PermissionDecision::Allow);
assert_eq!(mgr.check(Tool::Bash, None), PermissionDecision::Allow);
}
#[test]
fn bash_pattern_matching() {
let mut mgr = PermissionManager::new(Mode::Plan);
// Add a rule to allow "npm test"
mgr.add_rule(Tool::Bash, Some("npm test".to_string()), permissions::Action::Allow);
// Should allow the exact command
assert_eq!(mgr.check(Tool::Bash, Some("npm test")), PermissionDecision::Allow);
// Should still ask for other commands
assert_eq!(mgr.check(Tool::Bash, Some("rm -rf /")), PermissionDecision::Ask);
}
#[test]
fn bash_prefix_matching() {
let mut mgr = PermissionManager::new(Mode::Plan);
// Add a rule to allow "npm test:*" (prefix match)
mgr.add_rule(Tool::Bash, Some("npm test:*".to_string()), permissions::Action::Allow);
// Should allow commands matching the prefix
assert_eq!(mgr.check(Tool::Bash, Some("npm test:unit")), PermissionDecision::Allow);
assert_eq!(mgr.check(Tool::Bash, Some("npm test:integration")), PermissionDecision::Allow);
// Should not allow non-matching commands
assert_eq!(mgr.check(Tool::Bash, Some("npm install")), PermissionDecision::Ask);
}
#[test]
fn deny_rules_take_precedence() {
let mut mgr = PermissionManager::new(Mode::Code);
// Even in Code mode, we can deny specific operations
mgr.add_rule(Tool::Bash, Some("rm -rf*".to_string()), permissions::Action::Deny);
assert_eq!(mgr.check(Tool::Bash, Some("rm -rf /")), PermissionDecision::Deny);
// But other commands are still allowed
assert_eq!(mgr.check(Tool::Bash, Some("ls")), PermissionDecision::Allow);
}

View File

@@ -1,14 +0,0 @@
[package]
name = "tools-bash"
version = "0.1.0"
edition.workspace = true
license.workspace = true
rust-version.workspace = true
[dependencies]
tokio = { version = "1.39", features = ["process", "io-util", "time", "sync"] }
color-eyre = "0.6"
tempfile = "3.23.0"
[dev-dependencies]
tokio = { version = "1.39", features = ["macros", "rt-multi-thread"] }

View File

@@ -1,170 +0,0 @@
use color_eyre::eyre::{Result, eyre};
use std::process::Stdio;
use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader};
use tokio::process::{Child, Command};
use tokio::sync::Mutex;
use tokio::time::{timeout, Duration};
const MAX_OUTPUT_LINES: usize = 2000;
const DEFAULT_TIMEOUT_MS: u64 = 120000; // 2 minutes
const COMMAND_DELIMITER: &str = "___OWLEN_CMD_END___";
#[derive(Debug, Clone)]
pub struct CommandOutput {
pub stdout: String,
pub stderr: String,
pub exit_code: i32,
pub success: bool,
}
pub struct BashSession {
child: Mutex<Child>,
}
impl BashSession {
/// Create a new persistent bash session
pub async fn new() -> Result<Self> {
let child = Command::new("bash")
.arg("--norc")
.arg("--noprofile")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.kill_on_drop(true)
.spawn()?;
// Verify the process started
if child.stdin.is_none() || child.stdout.is_none() || child.stderr.is_none() {
return Err(eyre!("Failed to capture bash process stdio"));
}
Ok(Self {
child: Mutex::new(child),
})
}
/// Execute a command in the persistent bash session
///
/// # Arguments
/// * `command` - The bash command to execute
/// * `timeout_ms` - Optional timeout in milliseconds (default: 2 minutes)
pub async fn execute(&mut self, command: &str, timeout_ms: Option<u64>) -> Result<CommandOutput> {
let timeout_duration = Duration::from_millis(timeout_ms.unwrap_or(DEFAULT_TIMEOUT_MS));
let result = timeout(timeout_duration, self.execute_internal(command)).await;
match result {
Ok(output) => output,
Err(_) => Err(eyre!("Command timed out after {}ms", timeout_duration.as_millis())),
}
}
async fn execute_internal(&mut self, command: &str) -> Result<CommandOutput> {
let mut child = self.child.lock().await;
// Take ownership of stdio handles
let mut stdin = child.stdin.take().ok_or_else(|| eyre!("No stdin"))?;
let stdout = child.stdout.take().ok_or_else(|| eyre!("No stdout"))?;
let stderr = child.stderr.take().ok_or_else(|| eyre!("No stderr"))?;
// Write command with delimiter and exit code capture
let full_command = format!(
"{}\necho $? > /tmp/owlen_exit_code_$$.tmp\necho '{}'\n",
command, COMMAND_DELIMITER
);
stdin.write_all(full_command.as_bytes()).await?;
stdin.flush().await?;
// Read stdout until delimiter
let mut stdout_reader = BufReader::new(stdout);
let mut stdout_lines = Vec::new();
let mut line = String::new();
loop {
line.clear();
let n = stdout_reader.read_line(&mut line).await?;
if n == 0 {
return Err(eyre!("Bash process terminated unexpectedly"));
}
if line.trim() == COMMAND_DELIMITER {
break;
}
stdout_lines.push(line.clone());
// Truncate if too many lines
if stdout_lines.len() > MAX_OUTPUT_LINES {
stdout_lines.push("<<<...output truncated...>>>\n".to_string());
break;
}
}
// Read stderr (non-blocking, best effort)
let mut stderr_reader = BufReader::new(stderr);
let mut stderr_lines = Vec::new();
let mut stderr_line = String::new();
// Try to read stderr without blocking indefinitely
while let Ok(result) = timeout(Duration::from_millis(100), stderr_reader.read_line(&mut stderr_line)).await {
match result {
Ok(n) if n > 0 => {
stderr_lines.push(stderr_line.clone());
stderr_line.clear();
if stderr_lines.len() > MAX_OUTPUT_LINES {
stderr_lines.push("<<<...stderr truncated...>>>\n".to_string());
break;
}
}
_ => break,
}
}
// Read exit code
let exit_code_cmd = "cat /tmp/owlen_exit_code_$$.tmp 2>/dev/null; rm -f /tmp/owlen_exit_code_$$.tmp\n";
stdin.write_all(exit_code_cmd.as_bytes()).await?;
stdin.flush().await?;
let mut exit_line = String::new();
stdout_reader.read_line(&mut exit_line).await?;
let exit_code: i32 = exit_line.trim().parse().unwrap_or(0);
// Restore stdio handles
child.stdin = Some(stdin);
child.stdout = Some(stdout_reader.into_inner());
child.stderr = Some(stderr_reader.into_inner());
Ok(CommandOutput {
stdout: stdout_lines.join(""),
stderr: stderr_lines.join(""),
exit_code,
success: exit_code == 0,
})
}
/// Close the bash session
pub async fn close(self) -> Result<()> {
let mut child = self.child.into_inner();
if let Some(mut stdin) = child.stdin.take() {
let _ = stdin.write_all(b"exit\n").await;
let _ = stdin.flush().await;
}
let _ = child.wait().await?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn can_create_session() {
let session = BashSession::new().await;
assert!(session.is_ok());
}
}

View File

@@ -1,107 +0,0 @@
use tools_bash::BashSession;
#[tokio::test]
async fn bash_persists_env_between_calls() {
let mut session = BashSession::new().await.unwrap();
// Set an environment variable
let output1 = session.execute("export TEST_VAR=hello", None).await.unwrap();
assert!(output1.success);
// Verify it persists in next command
let output2 = session.execute("echo $TEST_VAR", None).await.unwrap();
assert!(output2.success);
assert!(output2.stdout.contains("hello"));
session.close().await.unwrap();
}
#[tokio::test]
async fn bash_persists_cwd_between_calls() {
let mut session = BashSession::new().await.unwrap();
// Change to /tmp
let output1 = session.execute("cd /tmp", None).await.unwrap();
assert!(output1.success);
// Verify cwd persists
let output2 = session.execute("pwd", None).await.unwrap();
assert!(output2.success);
assert!(output2.stdout.trim().ends_with("/tmp"));
session.close().await.unwrap();
}
#[tokio::test]
async fn bash_command_timeout() {
let mut session = BashSession::new().await.unwrap();
// Command that sleeps for 5 seconds, but with 1 second timeout
let result = session.execute("sleep 5", Some(1000)).await;
assert!(result.is_err());
let err_msg = result.unwrap_err().to_string();
assert!(err_msg.contains("timeout") || err_msg.contains("timed out"));
session.close().await.unwrap();
}
#[tokio::test]
async fn bash_output_truncation() {
let mut session = BashSession::new().await.unwrap();
// Generate a lot of output
let output = session
.execute("for i in {1..100}; do echo 'Line '$i; done", None)
.await
.unwrap();
assert!(output.success);
// Should have output but might be truncated
assert!(!output.stdout.is_empty());
session.close().await.unwrap();
}
#[tokio::test]
async fn bash_command_failure_returns_error_code() {
let mut session = BashSession::new().await.unwrap();
let output = session.execute("false", None).await.unwrap();
assert!(!output.success);
assert_eq!(output.exit_code, 1);
session.close().await.unwrap();
}
#[tokio::test]
async fn bash_stderr_captured() {
let mut session = BashSession::new().await.unwrap();
let output = session
.execute("echo 'error message' >&2", None)
.await
.unwrap();
assert!(output.success);
assert!(output.stderr.contains("error message"));
session.close().await.unwrap();
}
#[tokio::test]
async fn bash_multiple_commands_in_sequence() {
let mut session = BashSession::new().await.unwrap();
// Set a variable
session.execute("X=1", None).await.unwrap();
// Increment it
session.execute("X=$((X + 1))", None).await.unwrap();
// Verify final value
let output = session.execute("echo $X", None).await.unwrap();
assert!(output.stdout.contains("2"));
session.close().await.unwrap();
}

View File

@@ -1,18 +0,0 @@
[package]
name = "tools-fs"
version = "0.1.0"
edition.workspace = true
license.workspace = true
rust-version.workspace = true
[dependencies]
ignore = "0.4"
walkdir = "2.5"
globset = "0.4"
grep-regex = "0.1"
grep-searcher = "0.1"
color-eyre = "0.6"
similar = "2.7"
[dev-dependencies]
tempfile = "3.23.0"

View File

@@ -1,130 +0,0 @@
use color_eyre::eyre::{Result, eyre};
use ignore::WalkBuilder;
use grep_regex::RegexMatcher;
use grep_searcher::{sinks::UTF8, SearcherBuilder};
use globset::Glob;
use std::path::Path;
pub fn read_file(path: &str) -> Result<String> {
Ok(std::fs::read_to_string(path)?)
}
pub fn write_file(path: &str, content: &str) -> Result<()> {
// Create parent directories if they don't exist
if let Some(parent) = Path::new(path).parent() {
std::fs::create_dir_all(parent)?;
}
std::fs::write(path, content)?;
Ok(())
}
pub fn edit_file(path: &str, old_string: &str, new_string: &str) -> Result<()> {
// Read the current file content
let content = std::fs::read_to_string(path)?;
// Find all occurrences of old_string
let matches: Vec<_> = content.match_indices(old_string).collect();
match matches.len() {
0 => Err(eyre!("String to replace not found in file: '{}'", old_string)),
1 => {
// Exactly one match - safe to replace
let new_content = content.replace(old_string, new_string);
// Create a backup before modifying
let backup_path = format!("{}.backup", path);
std::fs::write(&backup_path, &content)?;
// Write the new content
match std::fs::write(path, new_content) {
Ok(_) => {
// Success - remove backup
let _ = std::fs::remove_file(&backup_path);
Ok(())
}
Err(e) => {
// Failed to write - restore from backup
let _ = std::fs::rename(&backup_path, path);
Err(e.into())
}
}
}
n => Err(eyre!(
"Ambiguous replacement: found {} occurrences of '{}' in file. Please make the old_string unique.",
n,
old_string
)),
}
}
pub fn glob_list(pattern: &str) -> Result<Vec<String>> {
let glob = Glob::new(pattern)?.compile_matcher();
// Extract the literal prefix to determine the root directory
// Find the position of the first glob metacharacter
let first_glob = pattern
.find(|c| matches!(c, '*' | '?' | '[' | '{'))
.unwrap_or(pattern.len());
// Find the last directory separator before the first glob metacharacter
let root = if first_glob > 0 {
let prefix = &pattern[..first_glob];
prefix.rfind('/').map(|pos| &prefix[..pos]).unwrap_or(".")
} else {
"."
};
let mut out = Vec::new();
for result in WalkBuilder::new(root)
.standard_filters(true)
.git_ignore(true)
.git_global(false)
.git_exclude(false)
.require_git(false)
.build()
{
let entity = result?;
if entity.file_type().map(|filetype| filetype.is_file()).unwrap_or(false) {
if let Some(path) = entity.path().to_str() {
// Match against the glob pattern
if glob.is_match(path) {
out.push(path.to_string());
}
}
}
}
Ok(out)
}
pub fn grep(root: &str, pattern: &str) -> Result<Vec<(String, usize, String)>> {
let matcher = RegexMatcher::new_line_matcher(pattern)?;
let mut searcher = SearcherBuilder::new().line_number(true).build();
let mut results = Vec::new();
for result in WalkBuilder::new(root)
.standard_filters(true)
.git_ignore(true)
.git_global(false)
.git_exclude(false)
.require_git(false)
.build()
{
let entity = result?;
if !entity.file_type().map(|filetype| filetype.is_file()).unwrap_or(false) { continue; }
let path = entity.path().to_path_buf();
let mut line_hits: Vec<(usize, String)> = Vec::new();
let sink = UTF8(|line_number, line| {
line_hits.push((line_number as usize, line.to_string()));
Ok(true)
});
let _ = searcher.search_path(&matcher, &path, sink);
if !line_hits.is_empty() {
let p = path.to_string_lossy().to_string();
for (line_number, text) in line_hits {
results.push((p.clone(), line_number, text));
}
}
}
Ok(results)
}

View File

@@ -1,104 +0,0 @@
use tools_fs::{read_file, glob_list, grep, write_file, edit_file};
use std::fs;
use tempfile::tempdir;
#[test]
fn read_and_glob_respect_gitignore() {
let dir = tempdir().unwrap();
let root = dir.path();
fs::write(root.join("a.txt"), "hello").unwrap();
fs::create_dir(root.join("secret")).unwrap();
fs::write(root.join("secret/secret.txt"), "token=123").unwrap();
fs::write(root.join(".gitignore"), "secret/\n").unwrap();
let pattern = format!("{}/**/*", root.display());
let files = glob_list(&pattern).unwrap();
assert!(files.iter().any(|p| p.ends_with("a.txt")));
assert!(!files.iter().any(|p| p.contains("secret.txt")));
assert_eq!(read_file(root.join("a.txt").to_str().unwrap()).unwrap(), "hello");
}
#[test]
fn grep_finds_lines() {
let dir = tempdir().unwrap();
let root = dir.path();
fs::write(root.join("a.rs"), "fn main() { println!(\"hello\"); }").unwrap();
let hits = grep(root.to_str().unwrap(), "hello").unwrap();
assert!(hits.iter().any(|(_p, _ln, text)| text.contains("hello")));
}
#[test]
fn write_file_creates_new_file() {
let dir = tempdir().unwrap();
let file_path = dir.path().join("new.txt");
write_file(file_path.to_str().unwrap(), "new content").unwrap();
assert_eq!(read_file(file_path.to_str().unwrap()).unwrap(), "new content");
}
#[test]
fn write_file_overwrites_existing() {
let dir = tempdir().unwrap();
let file_path = dir.path().join("existing.txt");
fs::write(&file_path, "old content").unwrap();
write_file(file_path.to_str().unwrap(), "new content").unwrap();
assert_eq!(read_file(file_path.to_str().unwrap()).unwrap(), "new content");
}
#[test]
fn edit_file_replaces_exact_match() {
let dir = tempdir().unwrap();
let file_path = dir.path().join("test.txt");
let original = "line 1\nline 2\nline 3\n";
fs::write(&file_path, original).unwrap();
edit_file(file_path.to_str().unwrap(), "line 2", "modified line 2").unwrap();
let result = read_file(file_path.to_str().unwrap()).unwrap();
assert_eq!(result, "line 1\nmodified line 2\nline 3\n");
}
#[test]
fn edit_file_replaces_multiline() {
let dir = tempdir().unwrap();
let file_path = dir.path().join("test.txt");
let original = "line 1\nline 2\nline 3\nline 4\n";
fs::write(&file_path, original).unwrap();
edit_file(file_path.to_str().unwrap(), "line 2\nline 3", "new content").unwrap();
let result = read_file(file_path.to_str().unwrap()).unwrap();
assert_eq!(result, "line 1\nnew content\nline 4\n");
}
#[test]
fn edit_file_fails_on_ambiguous_match() {
let dir = tempdir().unwrap();
let file_path = dir.path().join("test.txt");
let original = "duplicate\nsome text\nduplicate\n";
fs::write(&file_path, original).unwrap();
let result = edit_file(file_path.to_str().unwrap(), "duplicate", "changed");
assert!(result.is_err());
let err_msg = result.unwrap_err().to_string();
assert!(err_msg.contains("Ambiguous") || err_msg.contains("multiple") || err_msg.contains("occurrences"));
}
#[test]
fn edit_file_fails_on_no_match() {
let dir = tempdir().unwrap();
let file_path = dir.path().join("test.txt");
let original = "line 1\nline 2\n";
fs::write(&file_path, original).unwrap();
let result = edit_file(file_path.to_str().unwrap(), "nonexistent", "changed");
assert!(result.is_err());
let err_msg = result.unwrap_err().to_string();
assert!(err_msg.contains("not found") || err_msg.contains("String to replace"));
}

View File

@@ -1,15 +0,0 @@
[package]
name = "tools-slash"
version = "0.1.0"
edition.workspace = true
license.workspace = true
rust-version.workspace = true
[dependencies]
serde = { version = "1", features = ["derive"] }
serde_yaml = "0.9"
color-eyre = "0.6"
regex = "1.12"
[dev-dependencies]
tempfile = "3.23.0"

View File

@@ -1,169 +0,0 @@
use color_eyre::eyre::{Result, eyre};
use regex::Regex;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SlashCommandMetadata {
#[serde(default)]
pub description: Option<String>,
#[serde(default)]
pub author: Option<String>,
#[serde(default)]
pub tags: Option<Vec<String>>,
#[serde(default)]
pub version: Option<String>,
#[serde(flatten)]
pub extra: HashMap<String, serde_yaml::Value>,
}
#[derive(Debug, Clone)]
pub struct SlashCommand {
pub description: Option<String>,
pub author: Option<String>,
pub tags: Option<Vec<String>>,
pub version: Option<String>,
pub body: String,
}
impl SlashCommand {
/// Resolve file references (@path) in the command body
pub fn resolve_file_refs(&self) -> Result<String> {
let re = Regex::new(r"@([^\s]+)").unwrap();
let mut result = self.body.clone();
for cap in re.captures_iter(&self.body.clone()) {
let full_match = &cap[0];
let file_path = &cap[1];
// Read the file
match std::fs::read_to_string(file_path) {
Ok(content) => {
result = result.replace(full_match, &content);
}
Err(e) => {
return Err(eyre!("Failed to read file '{}': {}", file_path, e));
}
}
}
Ok(result)
}
}
/// Parse a slash command from its content
///
/// # Arguments
/// * `content` - The full content of the slash command file (with optional frontmatter)
/// * `args` - Arguments to substitute ($ARGUMENTS, $1, $2, etc.)
pub fn parse_slash_command(content: &str, args: &[&str]) -> Result<SlashCommand> {
// Check if content starts with frontmatter (---)
let (metadata, body) = if content.trim_start().starts_with("---") {
parse_with_frontmatter(content)?
} else {
(None, content.to_string())
};
// Perform argument substitution
let body_with_args = substitute_arguments(&body, args);
Ok(SlashCommand {
description: metadata.as_ref().and_then(|m| m.description.clone()),
author: metadata.as_ref().and_then(|m| m.author.clone()),
tags: metadata.as_ref().and_then(|m| m.tags.clone()),
version: metadata.as_ref().and_then(|m| m.version.clone()),
body: body_with_args,
})
}
fn parse_with_frontmatter(content: &str) -> Result<(Option<SlashCommandMetadata>, String)> {
let lines: Vec<&str> = content.lines().collect();
// Find the end of frontmatter
let mut end_idx = None;
for (i, line) in lines.iter().enumerate().skip(1) {
if line.trim() == "---" {
end_idx = Some(i);
break;
}
}
match end_idx {
Some(idx) => {
// Extract frontmatter YAML
let frontmatter_lines = &lines[1..idx];
let frontmatter_str = frontmatter_lines.join("\n");
// Parse YAML
let metadata: SlashCommandMetadata = serde_yaml::from_str(&frontmatter_str)
.map_err(|e| eyre!("Failed to parse frontmatter YAML: {}", e))?;
// Extract body
let body = lines[(idx + 1)..].join("\n");
Ok((Some(metadata), body))
}
None => {
// Malformed frontmatter, treat entire content as body
Ok((None, content.to_string()))
}
}
}
fn substitute_arguments(body: &str, args: &[&str]) -> String {
let mut result = body.to_string();
// Replace $ARGUMENTS with all args joined by space
let all_args = args.join(" ");
result = result.replace("$ARGUMENTS", &all_args);
// Replace positional arguments $1, $2, $3, etc.
for (i, arg) in args.iter().enumerate() {
let placeholder = format!("${}", i + 1);
result = result.replace(&placeholder, arg);
}
result
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn substitute_arguments_works() {
let body = "Args: $ARGUMENTS, First: $1, Second: $2";
let result = substitute_arguments(body, &["hello", "world"]);
assert!(result.contains("Args: hello world"));
assert!(result.contains("First: hello"));
assert!(result.contains("Second: world"));
}
#[test]
fn substitute_arguments_empty() {
let body = "Args: $ARGUMENTS, First: $1";
let result = substitute_arguments(body, &[]);
assert!(result.contains("Args: ,"));
assert!(result.contains("First: $1")); // Unchanged
}
#[test]
fn parse_frontmatter_extracts_metadata() {
let content = r#"---
description: "Test"
author: "Me"
---
Body content
"#;
let (metadata, body) = parse_with_frontmatter(content).unwrap();
assert!(metadata.is_some());
let m = metadata.unwrap();
assert_eq!(m.description, Some("Test".to_string()));
assert_eq!(m.author, Some("Me".to_string()));
assert_eq!(body.trim(), "Body content");
}
}

View File

@@ -1,109 +0,0 @@
use tools_slash::parse_slash_command;
use std::fs;
use tempfile::tempdir;
#[test]
fn slash_parse_frontmatter_and_args() {
let content = r#"---
description: "Test command"
author: "Test Author"
---
This is the command body with $ARGUMENTS
First arg: $1
Second arg: $2
"#;
let cmd = parse_slash_command(content, &["arg1", "arg2"]).unwrap();
assert_eq!(cmd.description, Some("Test command".to_string()));
assert_eq!(cmd.author, Some("Test Author".to_string()));
assert!(cmd.body.contains("arg1 arg2")); // $ARGUMENTS replaced
assert!(cmd.body.contains("First arg: arg1")); // $1 replaced
assert!(cmd.body.contains("Second arg: arg2")); // $2 replaced
}
#[test]
fn slash_parse_no_frontmatter() {
let content = "Simple command without frontmatter";
let cmd = parse_slash_command(content, &[]).unwrap();
assert_eq!(cmd.description, None);
assert_eq!(cmd.author, None);
assert_eq!(cmd.body.trim(), "Simple command without frontmatter");
}
#[test]
fn slash_file_refs() {
let dir = tempdir().unwrap();
let test_file = dir.path().join("test.txt");
fs::write(&test_file, "File content here").unwrap();
let content = format!("Check this file: @{}", test_file.display());
let cmd = parse_slash_command(&content, &[]).unwrap();
let resolved = cmd.resolve_file_refs().unwrap();
assert!(resolved.contains("File content here"));
assert!(!resolved.contains(&format!("@{}", test_file.display())));
}
#[test]
fn slash_arguments_substitution() {
let content = "All args: $ARGUMENTS\nFirst: $1\nSecond: $2\nThird: $3";
let cmd = parse_slash_command(content, &["hello", "world"]).unwrap();
assert!(cmd.body.contains("All args: hello world"));
assert!(cmd.body.contains("First: hello"));
assert!(cmd.body.contains("Second: world"));
assert!(cmd.body.contains("Third: $3")); // No third arg, should remain
}
#[test]
fn slash_multiple_file_refs() {
let dir = tempdir().unwrap();
let file1 = dir.path().join("file1.txt");
let file2 = dir.path().join("file2.txt");
fs::write(&file1, "Content 1").unwrap();
fs::write(&file2, "Content 2").unwrap();
let content = format!("File 1: @{}\nFile 2: @{}", file1.display(), file2.display());
let cmd = parse_slash_command(&content, &[]).unwrap();
let resolved = cmd.resolve_file_refs().unwrap();
assert!(resolved.contains("Content 1"));
assert!(resolved.contains("Content 2"));
}
#[test]
fn slash_empty_args_leaves_placeholders() {
let content = "Args: $ARGUMENTS, First: $1, Second: $2";
let cmd = parse_slash_command(content, &[]).unwrap();
// With no args, $ARGUMENTS becomes empty, but positional args remain
assert!(cmd.body.contains("Args: ,"));
assert!(cmd.body.contains("First: $1"));
assert!(cmd.body.contains("Second: $2"));
}
#[test]
fn slash_complex_frontmatter() {
let content = r#"---
description: "Multi-line
description"
tags:
- test
- example
version: 1.0
---
Command body
"#;
let cmd = parse_slash_command(content, &[]).unwrap();
assert!(cmd.description.is_some());
assert!(cmd.description.as_ref().unwrap().contains("Multi-line"));
}

71
docs/architecture.md Normal file
View File

@@ -0,0 +1,71 @@
# Owlen Architecture
This document provides a high-level overview of the Owlen architecture. Its purpose is to help developers understand how the different parts of the application fit together.
## Core Concepts
The architecture is designed to be modular and extensible, centered around a few key concepts:
- **Providers**: Connect to various LLM APIs (Ollama, OpenAI, etc.).
- **Session**: Manages the conversation history and state.
- **TUI**: The terminal user interface, built with `ratatui`.
- **Events**: A system for handling user input and other events.
## Component Interaction
A simplified diagram of how components interact:
```
[User Input] -> [Event Loop] -> [Session Controller] -> [Provider]
^ |
| v
[TUI Renderer] <------------------------------------ [API Response]
```
1. **User Input**: The user interacts with the TUI, generating events (e.g., key presses).
2. **Event Loop**: The main event loop in `owlen-tui` captures these events.
3. **Session Controller**: The event is processed, and if it's a prompt, the session controller sends a request to the current provider.
4. **Provider**: The provider formats the request for the specific LLM API and sends it.
5. **API Response**: The LLM API returns a response.
6. **TUI Renderer**: The response is processed, the session state is updated, and the TUI is re-rendered to display the new information.
## Crate Breakdown
- `owlen-core`: Defines the core traits and data structures, like `Provider` and `Session`.
- `owlen-tui`: Contains all the logic for the terminal user interface, including event handling and rendering.
- `owlen-cli`: The command-line entry point, responsible for parsing arguments and starting the TUI.
- `owlen-ollama` / `owlen-openai` / etc.: Implementations of the `Provider` trait for specific services.
## Session Management
The session management system is responsible for tracking the state of a conversation. The two main structs are:
- **`Conversation`**: Found in `owlen-core`, this struct holds the messages of a single conversation, the model being used, and other metadata. It is a simple data container.
- **`SessionController`**: This is the high-level controller that manages the active conversation. It handles:
- Storing and retrieving conversation history via the `ConversationManager`.
- Managing the context that is sent to the LLM provider.
- Switching between different models.
- Sending requests to the provider and handling the responses (both streaming and complete).
When a user sends a message, the `SessionController` adds the message to the current `Conversation`, sends the updated message list to the `Provider`, and then adds the provider's response to the `Conversation`.
## Event Flow
The event flow is managed by the `EventHandler` in `owlen-tui`. It operates in a loop, waiting for events and dispatching them to the active application (`ChatApp` or `CodeApp`).
1. **Event Source**: Events are primarily generated by `crossterm` from user keyboard input. Asynchronous events, like responses from a `Provider`, are also fed into the event system via a `tokio::mpsc` channel.
2. **`EventHandler::next()`**: The main application loop calls this method to wait for the next event.
3. **Event Enum**: Events are defined in the `owlen_tui::events::Event` enum. This includes `Key` events, `Tick` events (for UI updates), and `Message` events (for async provider data).
4. **Dispatch**: The application's `run` method matches on the `Event` type and calls the appropriate handler function (e.g., `dispatch_key_event`).
5. **State Update**: The handler function updates the application state based on the event. For example, a key press might change the `InputMode` or modify the text in the input buffer.
6. **Re-render**: After the state is updated, the UI is re-rendered to reflect the changes.
## TUI Rendering Pipeline
The TUI is rendered on each iteration of the main application loop in `owlen-tui`. The process is as follows:
1. **`tui.draw()`**: The main loop calls this method, passing the current application state.
2. **`Terminal::draw()`**: This method, from `ratatui`, takes a closure that receives a `Frame`.
3. **UI Composition**: Inside the closure, the UI is built by composing `ratatui` widgets. The root UI is defined in `owlen_tui::ui::render`, which builds the main layout and calls other functions to render specific components (like the chat panel, input box, etc.).
4. **State-Driven Rendering**: Each rendering function takes the current application state as an argument. It uses this state to decide what and how to render. For example, the border color of a panel might change if it is focused.
5. **Buffer and Diff**: `ratatui` does not draw directly to the terminal. Instead, it renders the widgets to an in-memory buffer. It then compares this buffer to the previous buffer and only sends the necessary changes to the terminal. This is highly efficient and prevents flickering.

118
docs/configuration.md Normal file
View File

@@ -0,0 +1,118 @@
# Owlen Configuration
Owlen uses a TOML file for configuration, allowing you to customize its behavior to your liking. This document details all the available options.
## File Location
By default, Owlen looks for its configuration file at `~/.config/owlen/config.toml`.
A default configuration file is created on the first run if one doesn't exist.
## Configuration Precedence
Configuration values are resolved in the following order:
1. **Defaults**: The application has hard-coded default values for all settings.
2. **Configuration File**: Any values set in `config.toml` will override the defaults.
3. **Command-Line Arguments / In-App Changes**: Any settings changed during runtime (e.g., via the `:theme` or `:model` commands) will override the configuration file for the current session. Some of these changes (like theme and model) are automatically saved back to the configuration file.
---
## General Settings (`[general]`)
These settings control the core behavior of the application.
- `default_provider` (string, default: `"ollama"`)
The name of the provider to use by default.
- `default_model` (string, optional, default: `"llama3.2:latest"`)
The default model to use for new conversations.
- `enable_streaming` (boolean, default: `true`)
Whether to stream responses from the provider by default.
- `project_context_file` (string, optional, default: `"OWLEN.md"`)
Path to a file whose content will be automatically injected as a system prompt. This is useful for providing project-specific context.
- `model_cache_ttl_secs` (integer, default: `60`)
Time-to-live in seconds for the cached list of available models.
## UI Settings (`[ui]`)
These settings customize the look and feel of the terminal interface.
- `theme` (string, default: `"default_dark"`)
The name of the theme to use. See the [Theming Guide](https://github.com/Owlibou/owlen/blob/main/themes/README.md) for available themes.
- `word_wrap` (boolean, default: `true`)
Whether to wrap long lines in the chat view.
- `max_history_lines` (integer, default: `2000`)
The maximum number of lines to keep in the scrollback buffer for the chat history.
- `show_role_labels` (boolean, default: `true`)
Whether to show the `user` and `bot` role labels next to messages.
- `wrap_column` (integer, default: `100`)
The column at which to wrap text if `word_wrap` is enabled.
## Storage Settings (`[storage]`)
These settings control how conversations are saved and loaded.
- `conversation_dir` (string, optional, default: platform-specific)
The directory where conversation sessions are saved. If not set, a default directory is used:
- **Linux**: `~/.local/share/owlen/sessions`
- **Windows**: `%APPDATA%\owlen\sessions`
- **macOS**: `~/Library/Application Support/owlen/sessions`
- `auto_save_sessions` (boolean, default: `true`)
Whether to automatically save the session when the application exits.
- `max_saved_sessions` (integer, default: `25`)
The maximum number of saved sessions to keep.
- `session_timeout_minutes` (integer, default: `120`)
The number of minutes of inactivity before a session is considered for auto-saving as a new session.
- `generate_descriptions` (boolean, default: `true`)
Whether to automatically generate a short summary of a conversation when saving it.
## Input Settings (`[input]`)
These settings control the behavior of the text input area.
- `multiline` (boolean, default: `true`)
Whether to allow multi-line input.
- `history_size` (integer, default: `100`)
The number of sent messages to keep in the input history (accessible with `Ctrl-Up/Down`).
- `tab_width` (integer, default: `4`)
The number of spaces to insert when the `Tab` key is pressed.
- `confirm_send` (boolean, default: `false`)
If true, requires an additional confirmation before sending a message.
## Provider Settings (`[providers]`)
This section contains a table for each provider you want to configure. The key is the provider name (e.g., `ollama`).
```toml
[providers.ollama]
provider_type = "ollama"
base_url = "http://localhost:11434"
# api_key = "..."
```
- `provider_type` (string, required)
The type of the provider. Currently, only `"ollama"` is built-in.
- `base_url` (string, optional)
The base URL of the provider's API.
- `api_key` (string, optional)
The API key to use for authentication, if required.
- `extra` (table, optional)
Any additional, provider-specific parameters can be added here.

42
docs/faq.md Normal file
View File

@@ -0,0 +1,42 @@
# Frequently Asked Questions (FAQ)
### What is the difference between `owlen` and `owlen-code`?
- `owlen` is the general-purpose chat client.
- `owlen-code` is an experimental client with a system prompt that is optimized for programming and code-related questions. In the future, it will include more code-specific features like file context and syntax highlighting.
### How do I use Owlen with a different terminal?
Owlen is designed to work with most modern terminals that support 256 colors and Unicode. If you experience rendering issues, you might try:
- **WezTerm**: Excellent cross-platform, GPU-accelerated terminal.
- **Alacritty**: Another fast, GPU-accelerated terminal.
- **Kitty**: A feature-rich terminal emulator.
If issues persist, please open an issue and let us know what terminal you are using.
### What is the setup for Windows?
The Windows build is currently experimental. However, you can install it from source using `cargo` if you have the Rust toolchain installed.
1. Install Rust from [rustup.rs](https://rustup.rs).
2. Install Git for Windows.
3. Clone the repository: `git clone https://github.com/Owlibou/owlen.git`
4. Install: `cd owlen && cargo install --path crates/owlen-cli`
Official binary releases for Windows are planned for the future.
### What is the setup for macOS?
Similar to Windows, the recommended installation method for macOS is to build from source using `cargo`.
1. Install the Xcode command-line tools: `xcode-select --install`
2. Install Rust from [rustup.rs](https://rustup.rs).
3. Clone the repository: `git clone https://github.com/Owlibou/owlen.git`
4. Install: `cd owlen && cargo install --path crates/owlen-cli`
Official binary releases for macOS are planned.
### I'm getting connection failures to Ollama.
Please see the [Troubleshooting Guide](troubleshooting.md#connection-failures-to-ollama) for help with this common issue.

34
docs/migration-guide.md Normal file
View File

@@ -0,0 +1,34 @@
# Migration Guide
This guide documents breaking changes between versions of Owlen and provides instructions on how to migrate your configuration or usage.
As Owlen is currently in its alpha phase (pre-v1.0), breaking changes may occur more frequently. We will do our best to document them here.
---
## Migrating from v0.1.x to v0.2.x (Example)
*This is a template for a future migration. No breaking changes have occurred yet.*
Version 0.2.0 introduces a new configuration structure for providers.
### Configuration File Changes
Previously, your `config.toml` might have looked like this:
```toml
# old config.toml (pre-v0.2.0)
ollama_base_url = "http://localhost:11434"
```
In v0.2.0, all provider settings are now nested under a `[providers]` table. You will need to update your `config.toml` to the new format:
```toml
# new config.toml (v0.2.0+)
[providers.ollama]
base_url = "http://localhost:11434"
```
### Action Required
Update your `~/.config/owlen/config.toml` to match the new structure. If you do not, Owlen will fall back to its default provider configuration.

View File

@@ -0,0 +1,75 @@
# Provider Implementation Guide
This guide explains how to implement a new provider for Owlen. Providers are the components that connect to different LLM APIs.
## The `Provider` Trait
The core of the provider system is the `Provider` trait, located in `owlen-core`. Any new provider must implement this trait.
Here is a simplified version of the trait:
```rust
use async_trait::async_trait;
use owlen_core::model::Model;
use owlen_core::session::Session;
#[async_trait]
pub trait Provider {
/// Returns the name of the provider.
fn name(&self) -> &str;
/// Sends the session to the provider and returns the response.
async fn chat(&self, session: &Session, model: &Model) -> Result<String, anyhow::Error>;
}
```
## Creating a New Crate
1. **Create a new crate** in the `crates/` directory. For example, `owlen-myprovider`.
2. **Add dependencies** to your new crate's `Cargo.toml`. You will need `owlen-core`, `async-trait`, `tokio`, and any crates required for interacting with the new API (e.g., `reqwest`).
3. **Add the new crate to the workspace** in the root `Cargo.toml`.
## Implementing the Trait
In your new crate's `lib.rs`, you will define a struct for your provider and implement the `Provider` trait for it.
```rust
use async_trait::async_trait;
use owlen_core::model::Model;
use owlen_core::provider::Provider;
use owlen_core::session::Session;
pub struct MyProvider;
#[async_trait]
impl Provider for MyProvider {
fn name(&self) -> &str {
"my-provider"
}
async fn chat(&self, session: &Session, model: &Model) -> Result<String, anyhow::Error> {
// 1. Get the conversation history from the session.
let history = session.get_messages();
// 2. Format the request for your provider's API.
// This might involve creating a JSON body with the messages.
// 3. Send the request to the API using a client like reqwest.
// 4. Parse the response from the API.
// 5. Return the content of the response as a String.
Ok("Hello from my provider!".to_string())
}
}
```
## Integrating with Owlen
Once your provider is implemented, you will need to integrate it into the main Owlen application.
1. **Add your provider crate** as a dependency to `owlen-cli`.
2. **In `owlen-cli`, modify the provider registration** to include your new provider. This will likely involve adding it to a list of available providers that the user can select from in the configuration.
This guide provides a basic outline. For more detailed examples, you can look at the existing provider implementations, such as `owlen-ollama`.

58
docs/testing.md Normal file
View File

@@ -0,0 +1,58 @@
# Testing Guide
This guide provides instructions on how to run existing tests and how to write new tests for Owlen.
## Running Tests
The entire test suite can be run from the root of the repository using the standard `cargo test` command.
```sh
# Run all tests in the workspace
cargo test --all
# Run tests for a specific crate
cargo test -p owlen-core
```
We use `cargo clippy` for linting and `cargo fmt` for formatting. Please run these before submitting a pull request.
```sh
cargo clippy --all -- -D warnings
cargo fmt --all -- --check
```
## Writing New Tests
Tests are located in the `tests/` directory within each crate, or in a `tests` module at the bottom of the file they are testing. We follow standard Rust testing practices.
### Unit Tests
For testing specific functions or components in isolation, use unit tests. These should be placed in a `#[cfg(test)]` module in the same file as the code being tested.
```rust
// in src/my_module.rs
pub fn add(a: i32, b: i32) -> i32 {
a + b
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_add() {
assert_eq!(add(2, 2), 4);
}
}
```
### Integration Tests
For testing how different parts of the application work together, use integration tests. These should be placed in the `tests/` directory of the crate.
For example, to test the `SessionController`, you might create a mock `Provider` and simulate sending messages, as seen in the `SessionController` documentation example.
### TUI and UI Component Tests
Testing TUI components can be challenging. For UI logic in `owlen-core` (like `wrap_cursor`), we have detailed unit tests that manipulate the component's state and assert the results. For higher-level TUI components in `owlen-tui`, the focus is on testing the state management logic rather than the visual output.

40
docs/troubleshooting.md Normal file
View File

@@ -0,0 +1,40 @@
# Troubleshooting Guide
This guide is intended to help you with common issues you might encounter while using Owlen.
## Connection Failures to Ollama
If you are unable to connect to a local Ollama instance, here are a few things to check:
1. **Is Ollama running?** Make sure the Ollama service is active. You can usually check this with `ollama list`.
2. **Is the address correct?** By default, Owlen tries to connect to `http://localhost:11434`. If your Ollama instance is running on a different address or port, you will need to configure it in your `config.toml` file.
3. **Firewall issues:** Ensure that your firewall is not blocking the connection.
## Model Not Found Errors
If you get a "model not found" error, it means that the model you are trying to use is not available. For local providers like Ollama, you can use `ollama list` to see the models you have downloaded. Make sure the model name in your Owlen configuration matches one of the available models.
## Terminal Compatibility Issues
Owlen is built with `ratatui`, which supports most modern terminals. However, if you are experiencing rendering issues, please check the following:
- Your terminal supports Unicode.
- You are using a font that includes the characters being displayed.
- Try a different terminal emulator to see if the issue persists.
## Configuration File Problems
If Owlen is not behaving as you expect, there might be an issue with your configuration file.
- **Location:** The configuration file is typically located at `~/.config/owlen/config.toml`.
- **Syntax:** The configuration file is in TOML format. Make sure the syntax is correct.
- **Values:** Check that the values for your models, providers, and other settings are correct.
## Performance Tuning
If you are experiencing performance issues, you can try the following:
- **Reduce context size:** A smaller context size will result in faster responses from the LLM.
- **Use a less resource-intensive model:** Some models are faster but less capable than others.
If you are still having trouble, please [open an issue](https://github.com/Owlibou/owlen/issues) on our GitHub repository.

30
examples/basic_chat.rs Normal file
View File

@@ -0,0 +1,30 @@
// This example demonstrates a basic chat interaction without the TUI.
use owlen_core::model::Model;
use owlen_core::provider::Provider;
use owlen_core::session::Session;
use owlen_ollama::OllamaProvider; // Assuming you have an Ollama provider
#[tokio::main]
async fn main() -> Result<(), anyhow::Error> {
// This example requires a running Ollama instance.
// Make sure you have a model available, e.g., `ollama pull llama2`
let provider = OllamaProvider;
let model = Model::new("llama2"); // Change to a model you have
let mut session = Session::new("basic-chat-session");
println!("Starting basic chat with model: {}", model.name);
let user_message = "What is the capital of France?";
session.add_message("user", user_message);
println!("User: {}", user_message);
// Send the chat to the provider
let response = provider.chat(&session, &model).await?;
session.add_message("bot", &response);
println!("Bot: {}", response);
Ok(())
}

View File

@@ -0,0 +1,45 @@
// This example demonstrates how to implement a custom provider.
use async_trait::async_trait;
use owlen_core::model::Model;
use owlen_core::provider::Provider;
use owlen_core::session::Session;
// Define a struct for your custom provider.
pub struct MyCustomProvider;
// Implement the `Provider` trait for your struct.
#[async_trait]
impl Provider for MyCustomProvider {
fn name(&self) -> &str {
"custom-provider"
}
async fn chat(&self, session: &Session, model: &Model) -> Result<String, anyhow::Error> {
println!(
"Custom provider received chat request for model: {}",
model.name
);
// In a real implementation, you would send the session data to an API.
let message_count = session.get_messages().len();
Ok(format!(
"This is a custom response. You have {} messages in your session.",
message_count
))
}
}
#[tokio::main]
async fn main() -> Result<(), anyhow::Error> {
let provider = MyCustomProvider;
let model = Model::new("custom-model");
let mut session = Session::new("custom-session");
session.add_message("user", "Hello, custom provider!");
let response = provider.chat(&session, &model).await?;
println!("Provider response: {}", response);
Ok(())
}

28
examples/custom_theme.rs Normal file
View File

@@ -0,0 +1,28 @@
// This example demonstrates how to create a custom theme programmatically.
use owlen_core::theme::Theme;
use ratatui::style::{Color, Style};
fn create_custom_theme() -> Theme {
Theme {
name: "My Custom Theme".to_string(),
author: "Your Name".to_string(),
comment: "A simple custom theme".to_string(),
base: Style::default().fg(Color::White).bg(Color::Black),
user_chat: Style::default().fg(Color::Green),
bot_chat: Style::default().fg(Color::Cyan),
error: Style::default().fg(Color::Red),
info: Style::default().fg(Color::Yellow),
border: Style::default().fg(Color::Gray),
input: Style::default().fg(Color::White),
..Default::default()
}
}
fn main() {
let custom_theme = create_custom_theme();
println!("Created custom theme: {}", custom_theme.name);
println!("Author: {}", custom_theme.author);
println!("User chat color: {:?}", custom_theme.user_chat.fg);
}

View File

@@ -0,0 +1,30 @@
// This example demonstrates how to use the session controller.
use owlen_core::session::Session;
fn main() {
// Create a new session.
let mut session = Session::new("my-session");
println!("Created new session: {}", session.name);
// Add messages to the session.
session.add_message("user", "Hello, Owlen!");
session.add_message("bot", "Hello, user! How can I help you today?");
// Get the messages from the session.
let messages = session.get_messages();
println!("\nMessages in session:");
for message in messages {
println!(" {}: {}", message.role, message.content);
}
// Clear the session.
session.clear_messages();
println!("\nSession cleared.");
let messages_after_clear = session.get_messages();
println!(
"Messages in session after clear: {}",
messages_after_clear.len()
);
}

BIN
images/chat_view.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

BIN
images/help.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 103 KiB

BIN
images/layout.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

BIN
images/model_select.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

BIN
images/select_mode.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 51 KiB

89
themes/README.md Normal file
View File

@@ -0,0 +1,89 @@
# OWLEN Built-in Themes
This directory contains the built-in themes that are embedded into the OWLEN binary.
## Available Themes
- **default_dark** - High-contrast dark theme (default)
- **default_light** - Clean light theme
- **gruvbox** - Popular retro color scheme with warm tones
- **dracula** - Dark theme with vibrant purple and cyan colors
- **solarized** - Precision colors for optimal readability
- **midnight-ocean** - Deep blue oceanic theme
- **rose-pine** - Soho vibes with muted pastels
- **monokai** - Classic code editor theme
- **material-dark** - Google's Material Design dark variant
- **material-light** - Google's Material Design light variant
## Theme File Format
Each theme is defined in TOML format with the following structure:
```toml
name = "theme-name"
# Text colors
text = "#ffffff" # Main text color
placeholder = "#808080" # Placeholder/muted text
# Background colors
background = "#000000" # Main background
command_bar_background = "#111111"
status_background = "#111111"
# Border colors
focused_panel_border = "#ff00ff" # Active panel border
unfocused_panel_border = "#800080" # Inactive panel border
# Message role colors
user_message_role = "#00ffff" # User messages
assistant_message_role = "#ffff00" # Assistant messages
thinking_panel_title = "#ff00ff" # Thinking panel title
# Mode indicator colors (status bar)
mode_normal = "#00ffff"
mode_editing = "#00ff00"
mode_model_selection = "#ffff00"
mode_provider_selection = "#00ffff"
mode_help = "#ff00ff"
mode_visual = "#ff0080"
mode_command = "#ffff00"
# Selection and cursor
selection_bg = "#0000ff" # Selection background
selection_fg = "#ffffff" # Selection foreground
cursor = "#ff0080" # Cursor color
# Status colors
error = "#ff0000" # Error messages
info = "#00ff00" # Info/success messages
```
## Color Format
Colors can be specified in two formats:
1. **Hex RGB**: `#rrggbb` (e.g., `#ff0000` for red, `#ff8800` for orange)
2. **Named colors** (case-insensitive):
- **Basic**: `black`, `red`, `green`, `yellow`, `blue`, `magenta`, `cyan`, `white`
- **Gray variants**: `gray`, `grey`, `darkgray`, `darkgrey`
- **Light variants**: `lightred`, `lightgreen`, `lightyellow`, `lightblue`, `lightmagenta`, `lightcyan`
**Note**: For colors not in the named list (like orange, purple, brown), use hex RGB format.
OWLEN will display an error message on startup if a custom theme has invalid colors.
## Creating Custom Themes
To create your own theme:
1. Copy one of these files to `~/.config/owlen/themes/`
2. Rename and modify the colors
3. Set `theme = "your-theme-name"` in `~/.config/owlen/config.toml`
4. Or use `:theme your-theme-name` in OWLEN to switch
## Embedding in Binary
These theme files are embedded into the OWLEN binary at compile time using Rust's `include_str!()` macro. This ensures they're always available, even if the files are deleted from disk.
Custom themes placed in `~/.config/owlen/themes/` will override built-in themes with the same name.

23
themes/default_dark.toml Normal file
View File

@@ -0,0 +1,23 @@
name = "default_dark"
text = "white"
background = "black"
focused_panel_border = "lightmagenta"
unfocused_panel_border = "#5f1487"
user_message_role = "lightblue"
assistant_message_role = "yellow"
thinking_panel_title = "lightmagenta"
command_bar_background = "black"
status_background = "black"
mode_normal = "lightblue"
mode_editing = "lightgreen"
mode_model_selection = "lightyellow"
mode_provider_selection = "lightcyan"
mode_help = "lightmagenta"
mode_visual = "magenta"
mode_command = "yellow"
selection_bg = "lightblue"
selection_fg = "black"
cursor = "magenta"
placeholder = "darkgray"
error = "red"
info = "lightgreen"

Some files were not shown because too many files have changed in this diff Show More