6 Commits

Author SHA1 Message Date
282dcdce88 feat(config): separate Ollama into local/cloud providers, add OpenAI & Anthropic defaults, bump schema version to 1.6.0 2025-10-15 22:13:00 +02:00
b49f58bc16 feat(ollama): add cloud provider with API key handling and auth‑aware health check
Introduce `OllamaCloudProvider` that resolves the API key from configuration or the `OLLAMA_CLOUD_API_KEY` environment variable, constructs provider metadata (including timeout as numeric), and maps auth errors to `ProviderStatus::RequiresSetup`. Export the new provider in the `ollama` module. Add shared HTTP error mapping utilities (`map_http_error`, `truncated_body`) and update local provider metadata to store timeout as a number.
2025-10-15 21:07:41 +02:00
cdc425ae93 feat(ollama): add local provider implementation and request timeout support
Introduce `OllamaLocalProvider` for communicating with a local Ollama daemon, including health checks, model listing, and stream generation. Export the provider in the Ollama module. Extend `OllamaClient` to accept an optional request timeout and apply it to the underlying HTTP client configuration.
2025-10-15 21:01:18 +02:00
3525cb3949 feat(provider): add Ollama client implementation in new providers crate
- Introduce `owlen-providers` crate with Cargo.toml and lib entry.
- Expose `OllamaClient` handling HTTP communication, health checks, model listing, and streaming generation.
- Implement request building, endpoint handling, and error mapping.
- Parse Ollama tags response and generation stream lines into core types.
- Add shared module re-exports for easy integration with the provider layer.
2025-10-15 20:54:52 +02:00
9d85420bf6 feat(provider): add ProviderManager to coordinate providers and cache health status
- Introduce `ProviderManager` for registering providers, routing generate calls, listing models, and refreshing health in parallel.
- Maintain a status cache to expose the last known health of each provider.
- Update `provider` module to re‑export the new manager alongside existing types.
2025-10-15 20:37:36 +02:00
641c95131f feat(provider): add unified provider abstraction layer with ModelProvider trait and shared types 2025-10-15 20:27:30 +02:00
22 changed files with 1588 additions and 300 deletions

View File

@@ -4,6 +4,7 @@ members = [
"crates/owlen-core", "crates/owlen-core",
"crates/owlen-tui", "crates/owlen-tui",
"crates/owlen-cli", "crates/owlen-cli",
"crates/owlen-providers",
"crates/owlen-mcp-server", "crates/owlen-mcp-server",
"crates/owlen-mcp-llm-server", "crates/owlen-mcp-llm-server",
"crates/owlen-mcp-client", "crates/owlen-mcp-client",

29
config.toml Normal file
View File

@@ -0,0 +1,29 @@
[general]
default_provider = "ollama_local"
default_model = "llama3.2:latest"
[privacy]
encrypt_local_data = true
[providers.ollama_local]
enabled = true
provider_type = "ollama"
base_url = "http://localhost:11434"
[providers.ollama_cloud]
enabled = false
provider_type = "ollama_cloud"
base_url = "https://ollama.com"
api_key_env = "OLLAMA_CLOUD_API_KEY"
[providers.openai]
enabled = false
provider_type = "openai"
base_url = "https://api.openai.com/v1"
api_key_env = "OPENAI_API_KEY"
[providers.anthropic]
enabled = false
provider_type = "anthropic"
base_url = "https://api.anthropic.com/v1"
api_key_env = "ANTHROPIC_API_KEY"

View File

@@ -7,7 +7,8 @@ use clap::Subcommand;
use owlen_core::LlmProvider; use owlen_core::LlmProvider;
use owlen_core::ProviderConfig; use owlen_core::ProviderConfig;
use owlen_core::config::{ use owlen_core::config::{
self as core_config, Config, OLLAMA_CLOUD_BASE_URL, OLLAMA_CLOUD_ENDPOINT_KEY, OLLAMA_MODE_KEY, self as core_config, Config, OLLAMA_CLOUD_API_KEY_ENV, OLLAMA_CLOUD_BASE_URL,
OLLAMA_CLOUD_ENDPOINT_KEY, OLLAMA_MODE_KEY,
}; };
use owlen_core::credentials::{ApiCredentials, CredentialManager, OLLAMA_CLOUD_CREDENTIAL_ID}; use owlen_core::credentials::{ApiCredentials, CredentialManager, OLLAMA_CLOUD_CREDENTIAL_ID};
use owlen_core::encryption; use owlen_core::encryption;
@@ -17,6 +18,7 @@ use serde_json::Value;
const DEFAULT_CLOUD_ENDPOINT: &str = OLLAMA_CLOUD_BASE_URL; const DEFAULT_CLOUD_ENDPOINT: &str = OLLAMA_CLOUD_BASE_URL;
const CLOUD_ENDPOINT_KEY: &str = OLLAMA_CLOUD_ENDPOINT_KEY; const CLOUD_ENDPOINT_KEY: &str = OLLAMA_CLOUD_ENDPOINT_KEY;
const CLOUD_PROVIDER_KEY: &str = "ollama_cloud";
#[derive(Debug, Subcommand)] #[derive(Debug, Subcommand)]
pub enum CloudCommand { pub enum CloudCommand {
@@ -28,8 +30,8 @@ pub enum CloudCommand {
/// Override the cloud endpoint (default: https://ollama.com) /// Override the cloud endpoint (default: https://ollama.com)
#[arg(long)] #[arg(long)]
endpoint: Option<String>, endpoint: Option<String>,
/// Provider name to configure (default: ollama) /// Provider name to configure (default: ollama_cloud)
#[arg(long, default_value = "ollama")] #[arg(long, default_value = "ollama_cloud")]
provider: String, provider: String,
/// Overwrite the provider base URL with the cloud endpoint /// Overwrite the provider base URL with the cloud endpoint
#[arg(long)] #[arg(long)]
@@ -37,20 +39,20 @@ pub enum CloudCommand {
}, },
/// Check connectivity to Ollama Cloud /// Check connectivity to Ollama Cloud
Status { Status {
/// Provider name to check (default: ollama) /// Provider name to check (default: ollama_cloud)
#[arg(long, default_value = "ollama")] #[arg(long, default_value = "ollama_cloud")]
provider: String, provider: String,
}, },
/// List available cloud-hosted models /// List available cloud-hosted models
Models { Models {
/// Provider name to query (default: ollama) /// Provider name to query (default: ollama_cloud)
#[arg(long, default_value = "ollama")] #[arg(long, default_value = "ollama_cloud")]
provider: String, provider: String,
}, },
/// Remove stored Ollama Cloud credentials /// Remove stored Ollama Cloud credentials
Logout { Logout {
/// Provider name to clear (default: ollama) /// Provider name to clear (default: ollama_cloud)
#[arg(long, default_value = "ollama")] #[arg(long, default_value = "ollama_cloud")]
provider: String, provider: String,
}, },
} }
@@ -82,6 +84,7 @@ async fn setup(
let base_changed = { let base_changed = {
let entry = ensure_provider_entry(&mut config, &provider); let entry = ensure_provider_entry(&mut config, &provider);
entry.enabled = true;
configure_cloud_endpoint(entry, &endpoint, force_cloud_base_url) configure_cloud_endpoint(entry, &endpoint, force_cloud_base_url)
}; };
@@ -140,6 +143,7 @@ async fn status(provider: String) -> Result<()> {
let api_key = hydrate_api_key(&mut config, manager.as_ref()).await?; let api_key = hydrate_api_key(&mut config, manager.as_ref()).await?;
{ {
let entry = ensure_provider_entry(&mut config, &provider); let entry = ensure_provider_entry(&mut config, &provider);
entry.enabled = true;
configure_cloud_endpoint(entry, DEFAULT_CLOUD_ENDPOINT, false); configure_cloud_endpoint(entry, DEFAULT_CLOUD_ENDPOINT, false);
} }
@@ -190,6 +194,7 @@ async fn models(provider: String) -> Result<()> {
{ {
let entry = ensure_provider_entry(&mut config, &provider); let entry = ensure_provider_entry(&mut config, &provider);
entry.enabled = true;
configure_cloud_endpoint(entry, DEFAULT_CLOUD_ENDPOINT, false); configure_cloud_endpoint(entry, DEFAULT_CLOUD_ENDPOINT, false);
} }
@@ -245,8 +250,9 @@ async fn logout(provider: String) -> Result<()> {
.await?; .await?;
} }
if let Some(entry) = provider_entry_mut(&mut config) { if let Some(entry) = config.providers.get_mut(&provider) {
entry.api_key = None; entry.api_key = None;
entry.enabled = false;
} }
crate::config::save_config(&config)?; crate::config::save_config(&config)?;
@@ -255,28 +261,7 @@ async fn logout(provider: String) -> Result<()> {
} }
fn ensure_provider_entry<'a>(config: &'a mut Config, provider: &str) -> &'a mut ProviderConfig { fn ensure_provider_entry<'a>(config: &'a mut Config, provider: &str) -> &'a mut ProviderConfig {
if provider == "ollama" core_config::ensure_provider_config_mut(config, provider)
&& config.providers.contains_key("ollama-cloud")
&& !config.providers.contains_key("ollama")
{
if let Some(mut legacy) = config.providers.remove("ollama-cloud") {
legacy.provider_type = "ollama".to_string();
config.providers.insert("ollama".to_string(), legacy);
}
}
core_config::ensure_provider_config(config, provider);
let entry = config
.providers
.get_mut(provider)
.expect("provider entry must exist");
if entry.provider_type != "ollama" {
entry.provider_type = "ollama".to_string();
}
entry
} }
fn configure_cloud_endpoint(entry: &mut ProviderConfig, endpoint: &str, force: bool) -> bool { fn configure_cloud_endpoint(entry: &mut ProviderConfig, endpoint: &str, force: bool) -> bool {
@@ -287,6 +272,10 @@ fn configure_cloud_endpoint(entry: &mut ProviderConfig, endpoint: &str, force: b
Value::String(normalized.clone()), Value::String(normalized.clone()),
); );
if entry.api_key_env.is_none() {
entry.api_key_env = Some(OLLAMA_CLOUD_API_KEY_ENV.to_string());
}
if force if force
|| entry || entry
.base_url .base_url
@@ -298,10 +287,7 @@ fn configure_cloud_endpoint(entry: &mut ProviderConfig, endpoint: &str, force: b
} }
if force { if force {
entry.extra.insert( entry.enabled = true;
OLLAMA_MODE_KEY.to_string(),
Value::String("cloud".to_string()),
);
} }
entry.base_url != previous_base entry.base_url != previous_base
@@ -333,10 +319,11 @@ fn normalize_endpoint(endpoint: &str) -> String {
} }
fn canonical_provider_name(provider: &str) -> String { fn canonical_provider_name(provider: &str) -> String {
let normalized = provider.trim().replace('_', "-").to_ascii_lowercase(); let normalized = provider.trim().to_ascii_lowercase().replace('-', "_");
match normalized.as_str() { match normalized.as_str() {
"" => "ollama".to_string(), "" => CLOUD_PROVIDER_KEY.to_string(),
"ollama-cloud" => "ollama".to_string(), "ollama" => CLOUD_PROVIDER_KEY.to_string(),
"ollama_cloud" => CLOUD_PROVIDER_KEY.to_string(),
value => value.to_string(), value => value.to_string(),
} }
} }
@@ -362,21 +349,6 @@ fn set_env_if_missing(var: &str, value: &str) {
} }
} }
fn provider_entry_mut(config: &mut Config) -> Option<&mut ProviderConfig> {
if config.providers.contains_key("ollama") {
config.providers.get_mut("ollama")
} else {
config.providers.get_mut("ollama-cloud")
}
}
fn provider_entry(config: &Config) -> Option<&ProviderConfig> {
if let Some(entry) = config.providers.get("ollama") {
return Some(entry);
}
config.providers.get("ollama-cloud")
}
fn unlock_credential_manager( fn unlock_credential_manager(
config: &Config, config: &Config,
storage: Arc<StorageManager>, storage: Arc<StorageManager>,
@@ -463,14 +435,13 @@ async fn hydrate_api_key(
set_env_if_missing("OLLAMA_CLOUD_API_KEY", &key); set_env_if_missing("OLLAMA_CLOUD_API_KEY", &key);
} }
let Some(cfg) = provider_entry_mut(config) else { let cfg = core_config::ensure_provider_config_mut(config, CLOUD_PROVIDER_KEY);
return Ok(Some(key));
};
configure_cloud_endpoint(cfg, &credentials.endpoint, false); configure_cloud_endpoint(cfg, &credentials.endpoint, false);
return Ok(Some(key)); return Ok(Some(key));
} }
if let Some(key) = provider_entry(config) if let Some(key) = config
.provider(CLOUD_PROVIDER_KEY)
.and_then(|cfg| cfg.api_key.as_ref()) .and_then(|cfg| cfg.api_key.as_ref())
.map(|value| value.trim()) .map(|value| value.trim())
.filter(|value| !value.is_empty()) .filter(|value| !value.is_empty())
@@ -501,8 +472,8 @@ mod tests {
#[test] #[test]
fn canonicalises_provider_names() { fn canonicalises_provider_names() {
assert_eq!(canonical_provider_name("OLLAMA_CLOUD"), "ollama"); assert_eq!(canonical_provider_name("OLLAMA_CLOUD"), CLOUD_PROVIDER_KEY);
assert_eq!(canonical_provider_name(" ollama-cloud"), "ollama"); assert_eq!(canonical_provider_name(" ollama-cloud"), CLOUD_PROVIDER_KEY);
assert_eq!(canonical_provider_name(""), "ollama"); assert_eq!(canonical_provider_name(""), CLOUD_PROVIDER_KEY);
} }
} }

View File

@@ -123,7 +123,7 @@ fn build_local_provider(cfg: &Config) -> anyhow::Result<Arc<dyn Provider>> {
})?; })?;
match provider_cfg.provider_type.as_str() { match provider_cfg.provider_type.as_str() {
"ollama" | "ollama-cloud" => { "ollama" | "ollama_cloud" => {
let provider = OllamaProvider::from_config(provider_cfg, Some(&cfg.general))?; let provider = OllamaProvider::from_config(provider_cfg, Some(&cfg.general))?;
Ok(Arc::new(provider) as Arc<dyn Provider>) Ok(Arc::new(provider) as Arc<dyn Provider>)
} }
@@ -172,40 +172,16 @@ fn run_config_doctor() -> Result<()> {
changes.push("created configuration file from defaults".to_string()); changes.push("created configuration file from defaults".to_string());
} }
if !config if config.provider(&config.general.default_provider).is_none() {
.providers config.general.default_provider = "ollama_local".to_string();
.contains_key(&config.general.default_provider) changes.push("default provider missing; reset to 'ollama_local'".to_string());
{
config.general.default_provider = "ollama".to_string();
changes.push("default provider missing; reset to 'ollama'".to_string());
} }
if let Some(mut legacy) = config.providers.remove("ollama-cloud") { for key in ["ollama_local", "ollama_cloud", "openai", "anthropic"] {
legacy.provider_type = "ollama".to_string(); if !config.providers.contains_key(key) {
use std::collections::hash_map::Entry; core_config::ensure_provider_config_mut(&mut config, key);
match config.providers.entry("ollama".to_string()) { changes.push(format!("added default configuration for provider '{key}'"));
Entry::Occupied(mut existing) => {
let entry = existing.get_mut();
if entry.api_key.is_none() {
entry.api_key = legacy.api_key.take();
} }
if entry.base_url.is_none() && legacy.base_url.is_some() {
entry.base_url = legacy.base_url.take();
}
entry.extra.extend(legacy.extra);
}
Entry::Vacant(slot) => {
slot.insert(legacy);
}
}
changes.push(
"migrated legacy 'ollama-cloud' provider into unified 'ollama' entry".to_string(),
);
}
if !config.providers.contains_key("ollama") {
core_config::ensure_provider_config(&mut config, "ollama");
changes.push("added default ollama provider configuration".to_string());
} }
match config.mcp.mode { match config.mcp.mode {

View File

@@ -16,7 +16,7 @@ use std::time::Duration;
pub const DEFAULT_CONFIG_PATH: &str = "~/.config/owlen/config.toml"; pub const DEFAULT_CONFIG_PATH: &str = "~/.config/owlen/config.toml";
/// Current schema version written to `config.toml`. /// Current schema version written to `config.toml`.
pub const CONFIG_SCHEMA_VERSION: &str = "1.5.0"; pub const CONFIG_SCHEMA_VERSION: &str = "1.6.0";
/// Provider config key for forcing Ollama provider mode. /// Provider config key for forcing Ollama provider mode.
pub const OLLAMA_MODE_KEY: &str = "ollama_mode"; pub const OLLAMA_MODE_KEY: &str = "ollama_mode";
@@ -24,6 +24,18 @@ pub const OLLAMA_MODE_KEY: &str = "ollama_mode";
pub const OLLAMA_CLOUD_ENDPOINT_KEY: &str = "cloud_endpoint"; pub const OLLAMA_CLOUD_ENDPOINT_KEY: &str = "cloud_endpoint";
/// Canonical Ollama Cloud base URL. /// Canonical Ollama Cloud base URL.
pub const OLLAMA_CLOUD_BASE_URL: &str = "https://ollama.com"; pub const OLLAMA_CLOUD_BASE_URL: &str = "https://ollama.com";
/// Environment variable used for Ollama Cloud authentication.
pub const OLLAMA_CLOUD_API_KEY_ENV: &str = "OLLAMA_CLOUD_API_KEY";
/// Default base URL for local Ollama daemons.
pub const OLLAMA_LOCAL_BASE_URL: &str = "http://localhost:11434";
/// Default OpenAI API base URL.
pub const OPENAI_DEFAULT_BASE_URL: &str = "https://api.openai.com/v1";
/// Environment variable name used for OpenAI API keys.
pub const OPENAI_API_KEY_ENV: &str = "OPENAI_API_KEY";
/// Default Anthropic API base URL.
pub const ANTHROPIC_DEFAULT_BASE_URL: &str = "https://api.anthropic.com/v1";
/// Environment variable name used for Anthropic API keys.
pub const ANTHROPIC_API_KEY_ENV: &str = "ANTHROPIC_API_KEY";
/// Core configuration shared by all OWLEN clients /// Core configuration shared by all OWLEN clients
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
@@ -82,8 +94,7 @@ pub struct Config {
impl Default for Config { impl Default for Config {
fn default() -> Self { fn default() -> Self {
let mut providers = HashMap::new(); let providers = default_provider_configs();
providers.insert("ollama".to_string(), default_ollama_provider_config());
Self { Self {
schema_version: Self::default_schema_version(), schema_version: Self::default_schema_version(),
@@ -270,6 +281,8 @@ impl Config {
let content = fs::read_to_string(&path)?; let content = fs::read_to_string(&path)?;
let parsed: toml::Value = let parsed: toml::Value =
toml::from_str(&content).map_err(|e| crate::Error::Config(e.to_string()))?; toml::from_str(&content).map_err(|e| crate::Error::Config(e.to_string()))?;
let mut parsed = parsed;
migrate_legacy_provider_tables(&mut parsed);
let previous_version = parsed let previous_version = parsed
.get("schema_version") .get("schema_version")
.and_then(|value| value.as_str()) .and_then(|value| value.as_str())
@@ -326,12 +339,19 @@ impl Config {
/// Get provider configuration by provider name /// Get provider configuration by provider name
pub fn provider(&self, name: &str) -> Option<&ProviderConfig> { pub fn provider(&self, name: &str) -> Option<&ProviderConfig> {
self.providers.get(name) let key = normalize_provider_key(name);
self.providers.get(&key)
} }
/// Update or insert a provider configuration /// Update or insert a provider configuration
pub fn upsert_provider(&mut self, name: impl Into<String>, config: ProviderConfig) { pub fn upsert_provider(&mut self, name: impl Into<String>, config: ProviderConfig) {
self.providers.insert(name.into(), config); let raw = name.into();
let key = normalize_provider_key(&raw);
let mut config = config;
if config.provider_type.is_empty() {
config.provider_type = key.clone();
}
self.providers.insert(key, config);
} }
/// Resolve default model in order of priority: explicit default, first cached model, provider fallback /// Resolve default model in order of priority: explicit default, first cached model, provider fallback
@@ -353,11 +373,15 @@ impl Config {
} }
fn ensure_defaults(&mut self) { fn ensure_defaults(&mut self) {
if self.general.default_provider.is_empty() { if self.general.default_provider.is_empty() || self.general.default_provider == "ollama" {
self.general.default_provider = "ollama".to_string(); self.general.default_provider = "ollama_local".to_string();
}
let mut defaults = default_provider_configs();
for (name, default_cfg) in defaults.drain() {
self.providers.entry(name).or_insert(default_cfg);
} }
ensure_provider_config(self, "ollama");
if self.schema_version.is_empty() { if self.schema_version.is_empty() {
self.schema_version = Self::default_schema_version(); self.schema_version = Self::default_schema_version();
} }
@@ -561,6 +585,7 @@ impl Config {
self.validate_default_provider()?; self.validate_default_provider()?;
self.validate_mcp_settings()?; self.validate_mcp_settings()?;
self.validate_mcp_servers()?; self.validate_mcp_servers()?;
self.validate_providers()?;
Ok(()) Ok(())
} }
@@ -573,57 +598,92 @@ impl Config {
); );
} }
if let Some(legacy_cloud) = self.providers.remove("ollama_cloud") { self.migrate_provider_entries();
self.merge_legacy_ollama_provider(legacy_cloud); if self.general.default_provider == "ollama" {
self.general.default_provider = "ollama_local".to_string();
} }
self.ensure_defaults();
if let Some(legacy_cloud) = self.providers.remove("ollama-cloud") {
self.merge_legacy_ollama_provider(legacy_cloud);
}
if let Some(ollama) = self.providers.get_mut("ollama") {
let previous_mode = ollama
.extra
.get(OLLAMA_MODE_KEY)
.and_then(|value| value.as_str())
.map(|value| value.to_ascii_lowercase());
ensure_ollama_mode_extra(ollama);
if previous_mode.as_deref().unwrap_or("auto") == "auto"
&& is_cloud_base_url(ollama.base_url.as_ref())
{
ollama.extra.insert(
OLLAMA_MODE_KEY.to_string(),
serde_json::Value::String("cloud".to_string()),
);
}
}
self.schema_version = CONFIG_SCHEMA_VERSION.to_string(); self.schema_version = CONFIG_SCHEMA_VERSION.to_string();
} }
fn merge_legacy_ollama_provider(&mut self, mut legacy_cloud: ProviderConfig) { fn migrate_provider_entries(&mut self) {
use std::collections::hash_map::Entry; let mut migrated = default_provider_configs();
let legacy_entries = std::mem::take(&mut self.providers);
legacy_cloud.provider_type = "ollama".to_string(); for (original_key, mut legacy) in legacy_entries {
if original_key == "ollama" {
Self::merge_legacy_ollama_provider(legacy, &mut migrated);
continue;
}
match self.providers.entry("ollama".to_string()) { let normalized = normalize_provider_key(&original_key);
Entry::Occupied(mut entry) => { let entry = migrated
let target = entry.get_mut(); .entry(normalized.clone())
if target.base_url.is_none() { .or_insert_with(|| ProviderConfig {
target.base_url = legacy_cloud.base_url.take(); enabled: true,
provider_type: normalized.clone(),
base_url: None,
api_key: None,
api_key_env: None,
extra: HashMap::new(),
});
if legacy.provider_type.is_empty() {
legacy.provider_type = normalized.clone();
} }
if target.api_key.is_none() {
target.api_key = legacy_cloud.api_key.take(); entry.merge_from(legacy);
if entry.provider_type.is_empty() {
entry.provider_type = normalized;
} }
if target.extra.is_empty() && !legacy_cloud.extra.is_empty() {
target.extra = legacy_cloud.extra;
} }
ensure_ollama_mode_extra(target);
self.providers = migrated;
}
fn merge_legacy_ollama_provider(
mut legacy: ProviderConfig,
targets: &mut HashMap<String, ProviderConfig>,
) {
let mode = legacy
.extra
.remove(OLLAMA_MODE_KEY)
.and_then(|value| value.as_str().map(|s| s.trim().to_ascii_lowercase()));
let api_key_present = legacy
.api_key
.as_ref()
.map(|value| !value.trim().is_empty())
.unwrap_or(false);
let cloud_candidate =
matches!(mode.as_deref(), Some("cloud")) || is_cloud_base_url(legacy.base_url.as_ref());
let should_enable_cloud = cloud_candidate || api_key_present;
if matches!(mode.as_deref(), Some("local")) || !should_enable_cloud {
if let Some(local) = targets.get_mut("ollama_local") {
let mut copy = legacy.clone();
copy.api_key = None;
copy.api_key_env = None;
copy.enabled = true;
local.merge_from(copy);
local.enabled = true;
if local.base_url.is_none() {
local.base_url = Some(OLLAMA_LOCAL_BASE_URL.to_string());
}
}
}
if should_enable_cloud || matches!(mode.as_deref(), Some("cloud")) {
if let Some(cloud) = targets.get_mut("ollama_cloud") {
legacy.enabled = true;
cloud.merge_from(legacy);
cloud.enabled = true;
if cloud.base_url.is_none() {
cloud.base_url = Some(OLLAMA_CLOUD_BASE_URL.to_string());
}
if cloud.api_key_env.is_none() {
cloud.api_key_env = Some(OLLAMA_CLOUD_API_KEY_ENV.to_string());
} }
Entry::Vacant(entry) => {
let mut inserted = legacy_cloud;
ensure_ollama_mode_extra(&mut inserted);
entry.insert(inserted);
} }
} }
} }
@@ -693,40 +753,164 @@ impl Config {
Ok(()) Ok(())
} }
fn validate_providers(&self) -> Result<()> {
for (name, provider) in &self.providers {
if !provider.enabled {
continue;
}
match name.as_str() {
"ollama_local" => {
if is_blank(&provider.base_url) {
return Err(Error::Config(
"providers.ollama_local.base_url must be set when enabled".into(),
));
}
}
"ollama_cloud" => {
if is_blank(&provider.base_url) {
return Err(Error::Config(
"providers.ollama_cloud.base_url must be set when enabled".into(),
));
}
if is_blank(&provider.api_key) && is_blank(&provider.api_key_env) {
return Err(Error::Config(
"providers.ollama_cloud requires `api_key` or `api_key_env` when enabled"
.into(),
));
}
}
"openai" | "anthropic" => {
if is_blank(&provider.api_key) && is_blank(&provider.api_key_env) {
return Err(Error::Config(format!(
"providers.{name} requires `api_key` or `api_key_env` when enabled"
)));
}
}
_ => {}
}
}
Ok(())
}
} }
fn default_ollama_provider_config() -> ProviderConfig { fn default_provider_configs() -> HashMap<String, ProviderConfig> {
let mut config = ProviderConfig { let mut providers = HashMap::new();
provider_type: "ollama".to_string(), for name in ["ollama_local", "ollama_cloud", "openai", "anthropic"] {
base_url: Some("http://localhost:11434".to_string()), if let Some(config) = default_provider_config_for(name) {
providers.insert(name.to_string(), config);
}
}
providers
}
fn default_ollama_local_config() -> ProviderConfig {
ProviderConfig {
enabled: true,
provider_type: canonical_provider_type("ollama_local"),
base_url: Some(OLLAMA_LOCAL_BASE_URL.to_string()),
api_key: None, api_key: None,
api_key_env: None,
extra: HashMap::new(), extra: HashMap::new(),
}; }
ensure_ollama_mode_extra(&mut config);
config
} }
fn ensure_ollama_mode_extra(provider: &mut ProviderConfig) { fn default_ollama_cloud_config() -> ProviderConfig {
if provider.provider_type != "ollama" { let mut extra = HashMap::new();
extra.insert(
OLLAMA_CLOUD_ENDPOINT_KEY.to_string(),
serde_json::Value::String(OLLAMA_CLOUD_BASE_URL.to_string()),
);
ProviderConfig {
enabled: false,
provider_type: canonical_provider_type("ollama_cloud"),
base_url: Some(OLLAMA_CLOUD_BASE_URL.to_string()),
api_key: None,
api_key_env: Some(OLLAMA_CLOUD_API_KEY_ENV.to_string()),
extra,
}
}
fn default_openai_config() -> ProviderConfig {
ProviderConfig {
enabled: false,
provider_type: canonical_provider_type("openai"),
base_url: Some(OPENAI_DEFAULT_BASE_URL.to_string()),
api_key: None,
api_key_env: Some(OPENAI_API_KEY_ENV.to_string()),
extra: HashMap::new(),
}
}
fn default_anthropic_config() -> ProviderConfig {
ProviderConfig {
enabled: false,
provider_type: canonical_provider_type("anthropic"),
base_url: Some(ANTHROPIC_DEFAULT_BASE_URL.to_string()),
api_key: None,
api_key_env: Some(ANTHROPIC_API_KEY_ENV.to_string()),
extra: HashMap::new(),
}
}
fn default_provider_config_for(name: &str) -> Option<ProviderConfig> {
match name {
"ollama_local" => Some(default_ollama_local_config()),
"ollama_cloud" => Some(default_ollama_cloud_config()),
"openai" => Some(default_openai_config()),
"anthropic" => Some(default_anthropic_config()),
_ => None,
}
}
fn normalize_provider_key(name: &str) -> String {
let normalized = name.trim().to_ascii_lowercase();
match normalized.as_str() {
"ollama" | "ollama-local" => "ollama_local".to_string(),
"ollama_cloud" | "ollama-cloud" => "ollama_cloud".to_string(),
other => other.replace('-', "_"),
}
}
fn canonical_provider_type(key: &str) -> String {
match key {
"ollama_local" => "ollama".to_string(),
other => other.to_string(),
}
}
fn is_blank(value: &Option<String>) -> bool {
value.as_ref().map(|s| s.trim().is_empty()).unwrap_or(true)
}
fn migrate_legacy_provider_tables(document: &mut toml::Value) {
let Some(table) = document.as_table_mut() else {
return;
};
let mut legacy = Vec::new();
for key in ["ollama", "ollama_cloud", "ollama-cloud"] {
if let Some(entry) = table.remove(key) {
legacy.push((key.to_string(), entry));
}
}
if legacy.is_empty() {
return; return;
} }
let entry = provider let providers_entry = table
.extra .entry("providers".to_string())
.entry(OLLAMA_MODE_KEY.to_string()) .or_insert_with(|| toml::Value::Table(toml::map::Map::new()));
.or_insert_with(|| serde_json::Value::String("auto".to_string()));
if let Some(value) = entry.as_str() { if let Some(providers_table) = providers_entry.as_table_mut() {
let normalized = value.trim().to_ascii_lowercase(); for (key, value) in legacy {
if matches!(normalized.as_str(), "auto" | "local" | "cloud") { providers_table.insert(key, value);
if normalized != value {
*entry = serde_json::Value::String(normalized);
} }
} else {
*entry = serde_json::Value::String("auto".to_string());
}
} else {
*entry = serde_json::Value::String("auto".to_string());
} }
} }
@@ -1117,7 +1301,7 @@ impl GeneralSettings {
impl Default for GeneralSettings { impl Default for GeneralSettings {
fn default() -> Self { fn default() -> Self {
Self { Self {
default_provider: "ollama".to_string(), default_provider: "ollama_local".to_string(),
default_model: Some("llama3.2:latest".to_string()), default_model: Some("llama3.2:latest".to_string()),
enable_streaming: Self::default_streaming(), enable_streaming: Self::default_streaming(),
project_context_file: Some("OWLEN.md".to_string()), project_context_file: Some("OWLEN.md".to_string()),
@@ -1650,7 +1834,35 @@ impl Default for InputSettings {
/// Convenience accessor for an Ollama provider entry, creating a default if missing /// Convenience accessor for an Ollama provider entry, creating a default if missing
pub fn ensure_ollama_config(config: &mut Config) -> &ProviderConfig { pub fn ensure_ollama_config(config: &mut Config) -> &ProviderConfig {
ensure_provider_config(config, "ollama") ensure_provider_config(config, "ollama_local")
}
/// Ensure a provider configuration exists for the requested provider name and return a mutable reference.
pub fn ensure_provider_config_mut<'a>(
config: &'a mut Config,
provider_name: &str,
) -> &'a mut ProviderConfig {
let key = normalize_provider_key(provider_name);
let entry = config.providers.entry(key.clone()).or_insert_with(|| {
let mut default = default_provider_config_for(&key).unwrap_or_else(|| ProviderConfig {
enabled: true,
provider_type: canonical_provider_type(&key),
base_url: None,
api_key: None,
api_key_env: None,
extra: HashMap::new(),
});
if default.provider_type.is_empty() {
default.provider_type = canonical_provider_type(&key);
}
default
});
if entry.provider_type.is_empty() {
entry.provider_type = canonical_provider_type(&key);
}
entry
} }
/// Ensure a provider configuration exists for the requested provider name /// Ensure a provider configuration exists for the requested provider name
@@ -1658,35 +1870,8 @@ pub fn ensure_provider_config<'a>(
config: &'a mut Config, config: &'a mut Config,
provider_name: &str, provider_name: &str,
) -> &'a ProviderConfig { ) -> &'a ProviderConfig {
use std::collections::hash_map::Entry; let entry = ensure_provider_config_mut(config, provider_name);
&*entry
if matches!(provider_name, "ollama_cloud" | "ollama-cloud") {
return ensure_provider_config(config, "ollama");
}
match config.providers.entry(provider_name.to_string()) {
Entry::Occupied(mut entry) => {
ensure_ollama_mode_extra(entry.get_mut());
}
Entry::Vacant(entry) => {
let mut default = match provider_name {
"ollama" => default_ollama_provider_config(),
other => ProviderConfig {
provider_type: other.to_string(),
base_url: None,
api_key: None,
extra: HashMap::new(),
},
};
ensure_ollama_mode_extra(&mut default);
entry.insert(default);
}
}
config
.providers
.get(provider_name)
.expect("provider entry must exist")
} }
/// Calculate absolute timeout for session data based on configuration /// Calculate absolute timeout for session data based on configuration
@@ -1705,8 +1890,8 @@ mod tests {
} }
let mut config = Config::default(); let mut config = Config::default();
if let Some(ollama) = config.providers.get_mut("ollama") { if let Some(ollama_local) = config.providers.get_mut("ollama_local") {
ollama.api_key = Some("${OWLEN_TEST_API_KEY}".to_string()); ollama_local.api_key = Some("${OWLEN_TEST_API_KEY}".to_string());
} }
config config
@@ -1714,7 +1899,7 @@ mod tests {
.expect("environment expansion succeeded"); .expect("environment expansion succeeded");
assert_eq!( assert_eq!(
config.providers["ollama"].api_key.as_deref(), config.providers["ollama_local"].api_key.as_deref(),
Some("super-secret") Some("super-secret")
); );
@@ -1730,8 +1915,8 @@ mod tests {
} }
let mut config = Config::default(); let mut config = Config::default();
if let Some(ollama) = config.providers.get_mut("ollama") { if let Some(ollama_local) = config.providers.get_mut("ollama_local") {
ollama.api_key = Some("${OWLEN_TEST_MISSING}".to_string()); ollama_local.api_key = Some("${OWLEN_TEST_MISSING}".to_string());
} }
let error = config let error = config
@@ -1792,15 +1977,19 @@ mod tests {
#[test] #[test]
fn default_config_contains_local_provider() { fn default_config_contains_local_provider() {
let config = Config::default(); let config = Config::default();
assert!(config.providers.contains_key("ollama")); let local = config
let provider = config.providers.get("ollama").unwrap(); .providers
assert_eq!( .get("ollama_local")
provider .expect("default local provider");
.extra assert!(local.enabled);
.get(OLLAMA_MODE_KEY) assert_eq!(local.base_url.as_deref(), Some(OLLAMA_LOCAL_BASE_URL));
.and_then(|value| value.as_str()),
Some("auto") let cloud = config
); .providers
.get("ollama_cloud")
.expect("default cloud provider");
assert!(!cloud.enabled);
assert_eq!(cloud.api_key_env.as_deref(), Some(OLLAMA_CLOUD_API_KEY_ENV));
} }
#[test] #[test]
@@ -1808,16 +1997,10 @@ mod tests {
let mut config = Config::default(); let mut config = Config::default();
config.providers.clear(); config.providers.clear();
let cloud = ensure_provider_config(&mut config, "ollama-cloud"); let cloud = ensure_provider_config(&mut config, "ollama-cloud");
assert_eq!(cloud.provider_type, "ollama"); assert_eq!(cloud.provider_type, "ollama_cloud");
assert_eq!(cloud.base_url.as_deref(), Some("http://localhost:11434")); assert_eq!(cloud.base_url.as_deref(), Some(OLLAMA_CLOUD_BASE_URL));
assert_eq!( assert_eq!(cloud.api_key_env.as_deref(), Some(OLLAMA_CLOUD_API_KEY_ENV));
cloud assert!(config.providers.contains_key("ollama_cloud"));
.extra
.get(OLLAMA_MODE_KEY)
.and_then(|value| value.as_str()),
Some("auto")
);
assert!(config.providers.contains_key("ollama"));
assert!(!config.providers.contains_key("ollama-cloud")); assert!(!config.providers.contains_key("ollama-cloud"));
} }
@@ -1828,48 +2011,100 @@ mod tests {
config.providers.insert( config.providers.insert(
"ollama_cloud".to_string(), "ollama_cloud".to_string(),
ProviderConfig { ProviderConfig {
enabled: true,
provider_type: "ollama_cloud".to_string(), provider_type: "ollama_cloud".to_string(),
base_url: Some("https://api.ollama.com".to_string()), base_url: Some("https://api.ollama.com".to_string()),
api_key: Some("secret".to_string()), api_key: Some("secret".to_string()),
api_key_env: None,
extra: HashMap::new(), extra: HashMap::new(),
}, },
); );
config.apply_schema_migrations("1.0.0"); config.apply_schema_migrations("1.0.0");
assert!(config.providers.get("ollama_cloud").is_none()); assert!(config.providers.get("ollama_cloud").is_some());
assert!(config.providers.get("ollama-cloud").is_none()); let cloud = config
let cloud = config.providers.get("ollama").expect("migrated config"); .providers
assert_eq!(cloud.provider_type, "ollama"); .get("ollama_cloud")
.expect("migrated config");
assert!(cloud.enabled);
assert_eq!(cloud.provider_type, "ollama_cloud");
assert_eq!(cloud.base_url.as_deref(), Some("https://api.ollama.com")); assert_eq!(cloud.base_url.as_deref(), Some("https://api.ollama.com"));
assert_eq!(cloud.api_key.as_deref(), Some("secret")); assert_eq!(cloud.api_key.as_deref(), Some("secret"));
assert_eq!(
cloud
.extra
.get(OLLAMA_MODE_KEY)
.and_then(|value| value.as_str()),
Some("auto")
);
} }
#[test] #[test]
fn migration_sets_cloud_mode_for_cloud_base() { fn migration_sets_cloud_mode_for_cloud_base() {
let mut config = Config::default(); let mut config = Config::default();
if let Some(ollama) = config.providers.get_mut("ollama") { if let Some(ollama) = config.providers.get_mut("ollama_local") {
ollama.base_url = Some(OLLAMA_CLOUD_BASE_URL.to_string()); ollama.base_url = Some(OLLAMA_CLOUD_BASE_URL.to_string());
ollama.extra.remove(OLLAMA_MODE_KEY);
} }
config.apply_schema_migrations("1.4.0"); config.apply_schema_migrations("1.4.0");
let provider = config.providers.get("ollama").expect("ollama provider"); let cloud = config
assert_eq!( .providers
provider .get("ollama_cloud")
.extra .expect("cloud provider created");
.get(OLLAMA_MODE_KEY) assert!(cloud.enabled);
.and_then(|value| value.as_str()), assert_eq!(cloud.base_url.as_deref(), Some(OLLAMA_CLOUD_BASE_URL));
Some("cloud") assert_eq!(cloud.api_key_env.as_deref(), Some(OLLAMA_CLOUD_API_KEY_ENV));
}
#[test]
fn migrate_legacy_monolithic_ollama_entry() {
let mut config = Config::default();
config.providers.clear();
config.providers.insert(
"ollama".to_string(),
ProviderConfig {
enabled: true,
provider_type: "ollama".to_string(),
base_url: Some(OLLAMA_LOCAL_BASE_URL.to_string()),
api_key: None,
api_key_env: None,
extra: HashMap::new(),
},
); );
config.apply_schema_migrations("1.2.0");
let local = config
.providers
.get("ollama_local")
.expect("local provider migrated");
assert!(local.enabled);
assert_eq!(local.base_url.as_deref(), Some(OLLAMA_LOCAL_BASE_URL));
let cloud = config
.providers
.get("ollama_cloud")
.expect("cloud provider placeholder");
assert!(!cloud.enabled);
}
#[test]
fn migrate_legacy_provider_tables_moves_top_level_entries() {
let mut document: toml::Value = toml::from_str(
r#"
[ollama]
base_url = "http://localhost:11434"
[general]
default_provider = "ollama"
"#,
)
.expect("valid inline config");
migrate_legacy_provider_tables(&mut document);
let providers = document
.get("providers")
.and_then(|value| value.as_table())
.expect("providers table present");
assert!(providers.contains_key("ollama"));
assert!(providers["ollama"].get("base_url").is_some());
assert!(document.get("ollama").is_none());
} }
#[test] #[test]

View File

@@ -18,6 +18,7 @@ pub mod mcp;
pub mod mode; pub mod mode;
pub mod model; pub mod model;
pub mod oauth; pub mod oauth;
pub mod provider;
pub mod providers; pub mod providers;
pub mod router; pub mod router;
pub mod sandbox; pub mod sandbox;
@@ -50,6 +51,7 @@ pub use mcp::{
}; };
pub use mode::*; pub use mode::*;
pub use model::*; pub use model::*;
pub use provider::*;
pub use providers::*; pub use providers::*;
pub use router::*; pub use router::*;
pub use sandbox::*; pub use sandbox::*;

View File

@@ -144,17 +144,57 @@ where
/// Runtime configuration for a provider instance. /// Runtime configuration for a provider instance.
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct ProviderConfig { pub struct ProviderConfig {
/// Provider type identifier. /// Whether this provider should be activated.
#[serde(default = "ProviderConfig::default_enabled")]
pub enabled: bool,
/// Provider type identifier used to resolve implementations.
#[serde(default)]
pub provider_type: String, pub provider_type: String,
/// Base URL for API calls. /// Base URL for API calls.
#[serde(default)]
pub base_url: Option<String>, pub base_url: Option<String>,
/// API key or token material. /// API key or token material.
#[serde(default)]
pub api_key: Option<String>, pub api_key: Option<String>,
/// Environment variable holding the API key.
#[serde(default)]
pub api_key_env: Option<String>,
/// Additional provider-specific configuration. /// Additional provider-specific configuration.
#[serde(flatten)] #[serde(flatten)]
pub extra: HashMap<String, Value>, pub extra: HashMap<String, Value>,
} }
impl ProviderConfig {
const fn default_enabled() -> bool {
true
}
/// Merge the current configuration with overrides from `other`.
pub fn merge_from(&mut self, mut other: ProviderConfig) {
self.enabled = other.enabled;
if !other.provider_type.is_empty() {
self.provider_type = other.provider_type;
}
if let Some(base_url) = other.base_url.take() {
self.base_url = Some(base_url);
}
if let Some(api_key) = other.api_key.take() {
self.api_key = Some(api_key);
}
if let Some(api_key_env) = other.api_key_env.take() {
self.api_key_env = Some(api_key_env);
}
if !other.extra.is_empty() {
self.extra.extend(other.extra);
}
}
}
/// Static registry of providers available to the application. /// Static registry of providers available to the application.
pub struct ProviderRegistry { pub struct ProviderRegistry {
providers: HashMap<String, Arc<dyn Provider>>, providers: HashMap<String, Arc<dyn Provider>>,

View File

@@ -0,0 +1,227 @@
use std::collections::HashMap;
use std::sync::Arc;
use futures::stream::{FuturesUnordered, StreamExt};
use log::{debug, warn};
use tokio::sync::RwLock;
use crate::config::Config;
use crate::{Error, Result};
use super::{GenerateRequest, GenerateStream, ModelInfo, ModelProvider, ProviderStatus};
/// Model information annotated with the originating provider metadata.
#[derive(Debug, Clone)]
pub struct AnnotatedModelInfo {
pub provider_id: String,
pub provider_status: ProviderStatus,
pub model: ModelInfo,
}
/// Coordinates multiple [`ModelProvider`] implementations and tracks their
/// health state.
pub struct ProviderManager {
providers: RwLock<HashMap<String, Arc<dyn ModelProvider>>>,
status_cache: RwLock<HashMap<String, ProviderStatus>>,
}
impl ProviderManager {
/// Construct a new manager using the supplied configuration. Providers
/// defined in the configuration start with a `RequiresSetup` status so
/// that frontends can surface incomplete configuration to users.
pub fn new(config: &Config) -> Self {
let mut status_cache = HashMap::new();
for provider_id in config.providers.keys() {
status_cache.insert(provider_id.clone(), ProviderStatus::RequiresSetup);
}
Self {
providers: RwLock::new(HashMap::new()),
status_cache: RwLock::new(status_cache),
}
}
/// Register a provider instance with the manager.
pub async fn register_provider(&self, provider: Arc<dyn ModelProvider>) {
let provider_id = provider.metadata().id.clone();
debug!("registering provider {}", provider_id);
self.providers
.write()
.await
.insert(provider_id.clone(), provider);
self.status_cache
.write()
.await
.insert(provider_id, ProviderStatus::Unavailable);
}
/// Return a stream by routing the request to the designated provider.
pub async fn generate(
&self,
provider_id: &str,
request: GenerateRequest,
) -> Result<GenerateStream> {
let provider = {
let guard = self.providers.read().await;
guard.get(provider_id).cloned()
}
.ok_or_else(|| Error::Config(format!("provider '{provider_id}' not registered")))?;
match provider.generate_stream(request).await {
Ok(stream) => {
self.status_cache
.write()
.await
.insert(provider_id.to_string(), ProviderStatus::Available);
Ok(stream)
}
Err(err) => {
self.status_cache
.write()
.await
.insert(provider_id.to_string(), ProviderStatus::Unavailable);
Err(err)
}
}
}
/// List models across all providers, updating provider status along the way.
pub async fn list_all_models(&self) -> Result<Vec<AnnotatedModelInfo>> {
let providers: Vec<(String, Arc<dyn ModelProvider>)> = {
let guard = self.providers.read().await;
guard
.iter()
.map(|(id, provider)| (id.clone(), Arc::clone(provider)))
.collect()
};
let mut tasks = FuturesUnordered::new();
for (provider_id, provider) in providers {
tasks.push(async move {
let log_id = provider_id.clone();
let mut status = ProviderStatus::Unavailable;
let mut models = Vec::new();
match provider.health_check().await {
Ok(health) => {
status = health;
if matches!(status, ProviderStatus::Available) {
match provider.list_models().await {
Ok(list) => {
models = list;
}
Err(err) => {
status = ProviderStatus::Unavailable;
warn!("listing models failed for provider {}: {}", log_id, err);
}
}
}
}
Err(err) => {
warn!("health check failed for provider {}: {}", log_id, err);
}
}
(provider_id, status, models)
});
}
let mut annotated = Vec::new();
let mut status_updates = HashMap::new();
while let Some((provider_id, status, models)) = tasks.next().await {
status_updates.insert(provider_id.clone(), status);
for model in models {
annotated.push(AnnotatedModelInfo {
provider_id: provider_id.clone(),
provider_status: status,
model,
});
}
}
{
let mut guard = self.status_cache.write().await;
for (provider_id, status) in status_updates {
guard.insert(provider_id, status);
}
}
Ok(annotated)
}
/// Refresh the health of all registered providers in parallel, returning
/// the latest status snapshot.
pub async fn refresh_health(&self) -> HashMap<String, ProviderStatus> {
let providers: Vec<(String, Arc<dyn ModelProvider>)> = {
let guard = self.providers.read().await;
guard
.iter()
.map(|(id, provider)| (id.clone(), Arc::clone(provider)))
.collect()
};
let mut tasks = FuturesUnordered::new();
for (provider_id, provider) in providers {
tasks.push(async move {
let status = match provider.health_check().await {
Ok(status) => status,
Err(err) => {
warn!("health check failed for provider {}: {}", provider_id, err);
ProviderStatus::Unavailable
}
};
(provider_id, status)
});
}
let mut updates = HashMap::new();
while let Some((provider_id, status)) = tasks.next().await {
updates.insert(provider_id, status);
}
{
let mut guard = self.status_cache.write().await;
for (provider_id, status) in &updates {
guard.insert(provider_id.clone(), *status);
}
}
updates
}
/// Return the provider instance for an identifier.
pub async fn get_provider(&self, provider_id: &str) -> Option<Arc<dyn ModelProvider>> {
let guard = self.providers.read().await;
guard.get(provider_id).cloned()
}
/// List the registered provider identifiers.
pub async fn provider_ids(&self) -> Vec<String> {
let guard = self.providers.read().await;
guard.keys().cloned().collect()
}
/// Retrieve the last known status for a provider.
pub async fn provider_status(&self, provider_id: &str) -> Option<ProviderStatus> {
let guard = self.status_cache.read().await;
guard.get(provider_id).copied()
}
/// Snapshot the currently cached statuses.
pub async fn provider_statuses(&self) -> HashMap<String, ProviderStatus> {
let guard = self.status_cache.read().await;
guard.clone()
}
}
impl Default for ProviderManager {
fn default() -> Self {
Self {
providers: RwLock::new(HashMap::new()),
status_cache: RwLock::new(HashMap::new()),
}
}
}

View File

@@ -0,0 +1,36 @@
//! Unified provider abstraction layer.
//!
//! This module defines the async [`ModelProvider`] trait that all model
//! backends implement, together with a small suite of shared data structures
//! used for model discovery and streaming generation. The [`ProviderManager`]
//! orchestrates multiple providers and coordinates their health state.
mod manager;
mod types;
use std::pin::Pin;
use async_trait::async_trait;
use futures::Stream;
pub use self::{manager::*, types::*};
use crate::Result;
/// Convenience alias for the stream type yielded by [`ModelProvider::generate_stream`].
pub type GenerateStream = Pin<Box<dyn Stream<Item = Result<GenerateChunk>> + Send + 'static>>;
#[async_trait]
pub trait ModelProvider: Send + Sync {
/// Returns descriptive metadata about the provider.
fn metadata(&self) -> &ProviderMetadata;
/// Check the current health state for the provider.
async fn health_check(&self) -> Result<ProviderStatus>;
/// List all models available through the provider.
async fn list_models(&self) -> Result<Vec<ModelInfo>>;
/// Acquire a streaming response for a generation request.
async fn generate_stream(&self, request: GenerateRequest) -> Result<GenerateStream>;
}

View File

@@ -0,0 +1,124 @@
//! Shared types used by the unified provider abstraction layer.
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use serde_json::Value;
/// Categorises providers so the UI can distinguish between local and hosted
/// backends.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum ProviderType {
Local,
Cloud,
}
/// Represents the current availability state for a provider.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum ProviderStatus {
Available,
Unavailable,
RequiresSetup,
}
/// Describes core metadata for a provider implementation.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct ProviderMetadata {
pub id: String,
pub name: String,
pub provider_type: ProviderType,
pub requires_auth: bool,
#[serde(default)]
pub metadata: HashMap<String, Value>,
}
impl ProviderMetadata {
/// Construct a new metadata instance for a provider.
pub fn new(
id: impl Into<String>,
name: impl Into<String>,
provider_type: ProviderType,
requires_auth: bool,
) -> Self {
Self {
id: id.into(),
name: name.into(),
provider_type,
requires_auth,
metadata: HashMap::new(),
}
}
}
/// Information about a model that can be displayed to users.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ModelInfo {
pub name: String,
#[serde(default)]
pub size_bytes: Option<u64>,
#[serde(default)]
pub capabilities: Vec<String>,
#[serde(default)]
pub description: Option<String>,
pub provider: ProviderMetadata,
#[serde(default)]
pub metadata: HashMap<String, Value>,
}
/// Unified request for streaming text generation across providers.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct GenerateRequest {
pub model: String,
#[serde(default)]
pub prompt: Option<String>,
#[serde(default)]
pub context: Vec<String>,
#[serde(default)]
pub parameters: HashMap<String, Value>,
#[serde(default)]
pub metadata: HashMap<String, Value>,
}
impl GenerateRequest {
/// Helper for building a request from the minimum required fields.
pub fn new(model: impl Into<String>) -> Self {
Self {
model: model.into(),
prompt: None,
context: Vec::new(),
parameters: HashMap::new(),
metadata: HashMap::new(),
}
}
}
/// Streamed chunk of generation output from a model.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct GenerateChunk {
#[serde(default)]
pub text: Option<String>,
#[serde(default)]
pub is_final: bool,
#[serde(default)]
pub metadata: HashMap<String, Value>,
}
impl GenerateChunk {
/// Construct a new chunk with the provided text payload.
pub fn from_text(text: impl Into<String>) -> Self {
Self {
text: Some(text.into()),
is_final: false,
metadata: HashMap::new(),
}
}
/// Mark the chunk as the terminal item in a stream.
pub fn final_chunk() -> Self {
Self {
text: None,
is_final: true,
metadata: HashMap::new(),
}
}
}

View File

@@ -1467,9 +1467,11 @@ mod tests {
#[test] #[test]
fn explicit_local_mode_overrides_api_key() { fn explicit_local_mode_overrides_api_key() {
let mut config = ProviderConfig { let mut config = ProviderConfig {
enabled: true,
provider_type: "ollama".to_string(), provider_type: "ollama".to_string(),
base_url: Some("http://localhost:11434".to_string()), base_url: Some("http://localhost:11434".to_string()),
api_key: Some("secret-key".to_string()), api_key: Some("secret-key".to_string()),
api_key_env: None,
extra: HashMap::new(), extra: HashMap::new(),
}; };
config.extra.insert( config.extra.insert(
@@ -1486,9 +1488,11 @@ mod tests {
#[test] #[test]
fn auto_mode_prefers_explicit_local_base() { fn auto_mode_prefers_explicit_local_base() {
let config = ProviderConfig { let config = ProviderConfig {
enabled: true,
provider_type: "ollama".to_string(), provider_type: "ollama".to_string(),
base_url: Some("http://localhost:11434".to_string()), base_url: Some("http://localhost:11434".to_string()),
api_key: Some("secret-key".to_string()), api_key: Some("secret-key".to_string()),
api_key_env: None,
extra: HashMap::new(), extra: HashMap::new(),
}; };
// simulate missing explicit mode; defaults to auto // simulate missing explicit mode; defaults to auto
@@ -1502,9 +1506,11 @@ mod tests {
#[test] #[test]
fn auto_mode_with_api_key_and_no_local_probe_switches_to_cloud() { fn auto_mode_with_api_key_and_no_local_probe_switches_to_cloud() {
let mut config = ProviderConfig { let mut config = ProviderConfig {
enabled: true,
provider_type: "ollama".to_string(), provider_type: "ollama".to_string(),
base_url: None, base_url: None,
api_key: Some("secret-key".to_string()), api_key: Some("secret-key".to_string()),
api_key_env: None,
extra: HashMap::new(), extra: HashMap::new(),
}; };
config.extra.insert( config.extra.insert(
@@ -1580,9 +1586,11 @@ fn auto_mode_with_api_key_and_successful_probe_prefers_local() {
let _guard = ProbeOverrideGuard::set(Some(true)); let _guard = ProbeOverrideGuard::set(Some(true));
let mut config = ProviderConfig { let mut config = ProviderConfig {
enabled: true,
provider_type: "ollama".to_string(), provider_type: "ollama".to_string(),
base_url: None, base_url: None,
api_key: Some("secret-key".to_string()), api_key: Some("secret-key".to_string()),
api_key_env: None,
extra: HashMap::new(), extra: HashMap::new(),
}; };
config.extra.insert( config.extra.insert(
@@ -1603,9 +1611,11 @@ fn auto_mode_with_api_key_and_failed_probe_prefers_cloud() {
let _guard = ProbeOverrideGuard::set(Some(false)); let _guard = ProbeOverrideGuard::set(Some(false));
let mut config = ProviderConfig { let mut config = ProviderConfig {
enabled: true,
provider_type: "ollama".to_string(), provider_type: "ollama".to_string(),
base_url: None, base_url: None,
api_key: Some("secret-key".to_string()), api_key: Some("secret-key".to_string()),
api_key_env: None,
extra: HashMap::new(), extra: HashMap::new(),
}; };
config.extra.insert( config.extra.insert(
@@ -1622,9 +1632,11 @@ fn auto_mode_with_api_key_and_failed_probe_prefers_cloud() {
#[test] #[test]
fn annotate_scope_status_adds_capabilities_for_unavailable_scopes() { fn annotate_scope_status_adds_capabilities_for_unavailable_scopes() {
let config = ProviderConfig { let config = ProviderConfig {
enabled: true,
provider_type: "ollama".to_string(), provider_type: "ollama".to_string(),
base_url: Some("http://localhost:11434".to_string()), base_url: Some("http://localhost:11434".to_string()),
api_key: None, api_key: None,
api_key_env: None,
extra: HashMap::new(), extra: HashMap::new(),
}; };

View File

@@ -126,7 +126,7 @@ fn provider_from_config() -> Result<Arc<dyn Provider>, RpcError> {
})?; })?;
match provider_cfg.provider_type.as_str() { match provider_cfg.provider_type.as_str() {
"ollama" | "ollama-cloud" => { "ollama" | "ollama_cloud" => {
let provider = OllamaProvider::from_config(&provider_cfg, Some(&config.general)) let provider = OllamaProvider::from_config(&provider_cfg, Some(&config.general))
.map_err(|e| { .map_err(|e| {
RpcError::internal_error(format!( RpcError::internal_error(format!(
@@ -153,10 +153,12 @@ fn create_provider() -> Result<Arc<dyn Provider>, RpcError> {
} }
fn canonical_provider_name(name: &str) -> String { fn canonical_provider_name(name: &str) -> String {
if name.eq_ignore_ascii_case("ollama-cloud") { let normalized = name.trim().to_ascii_lowercase().replace('-', "_");
"ollama".to_string() match normalized.as_str() {
} else { "" => "ollama_local".to_string(),
name.to_string() "ollama" | "ollama_local" => "ollama_local".to_string(),
"ollama_cloud" => "ollama_cloud".to_string(),
other => other.to_string(),
} }
} }

View File

@@ -0,0 +1,20 @@
[package]
name = "owlen-providers"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
repository.workspace = true
homepage.workspace = true
description = "Provider implementations for OWLEN"
[dependencies]
owlen-core = { path = "../owlen-core" }
anyhow = { workspace = true }
async-trait = { workspace = true }
futures = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true }
tokio-stream = { workspace = true }
reqwest = { package = "reqwest", version = "0.11", features = ["json", "stream"] }

View File

@@ -0,0 +1,3 @@
//! Provider implementations for OWLEN.
pub mod ollama;

View File

@@ -0,0 +1,108 @@
use std::{env, time::Duration};
use async_trait::async_trait;
use owlen_core::{
Error as CoreError, Result as CoreResult,
config::OLLAMA_CLOUD_BASE_URL,
provider::{
GenerateRequest, GenerateStream, ModelInfo, ModelProvider, ProviderMetadata,
ProviderStatus, ProviderType,
},
};
use serde_json::{Number, Value};
use super::OllamaClient;
const API_KEY_ENV: &str = "OLLAMA_CLOUD_API_KEY";
/// ModelProvider implementation for the hosted Ollama Cloud service.
pub struct OllamaCloudProvider {
client: OllamaClient,
}
impl OllamaCloudProvider {
/// Construct a new cloud provider. An API key must be supplied either
/// directly or via the `OLLAMA_CLOUD_API_KEY` environment variable.
pub fn new(
base_url: Option<String>,
api_key: Option<String>,
request_timeout: Option<Duration>,
) -> CoreResult<Self> {
let (api_key, key_source) = resolve_api_key(api_key)?;
let base_url = base_url.unwrap_or_else(|| OLLAMA_CLOUD_BASE_URL.to_string());
let mut metadata =
ProviderMetadata::new("ollama_cloud", "Ollama (Cloud)", ProviderType::Cloud, true);
metadata
.metadata
.insert("base_url".into(), Value::String(base_url.clone()));
metadata.metadata.insert(
"api_key_source".into(),
Value::String(key_source.to_string()),
);
metadata
.metadata
.insert("api_key_env".into(), Value::String(API_KEY_ENV.to_string()));
if let Some(timeout) = request_timeout {
let timeout_ms = timeout.as_millis().min(u128::from(u64::MAX)) as u64;
metadata.metadata.insert(
"request_timeout_ms".into(),
Value::Number(Number::from(timeout_ms)),
);
}
let client = OllamaClient::new(&base_url, Some(api_key), metadata, request_timeout)?;
Ok(Self { client })
}
}
#[async_trait]
impl ModelProvider for OllamaCloudProvider {
fn metadata(&self) -> &ProviderMetadata {
self.client.metadata()
}
async fn health_check(&self) -> CoreResult<ProviderStatus> {
match self.client.health_check().await {
Ok(status) => Ok(status),
Err(CoreError::Auth(_)) => Ok(ProviderStatus::RequiresSetup),
Err(err) => Err(err),
}
}
async fn list_models(&self) -> CoreResult<Vec<ModelInfo>> {
self.client.list_models().await
}
async fn generate_stream(&self, request: GenerateRequest) -> CoreResult<GenerateStream> {
self.client.generate_stream(request).await
}
}
fn resolve_api_key(api_key: Option<String>) -> CoreResult<(String, &'static str)> {
let key_from_config = api_key
.as_ref()
.map(|value| value.trim())
.filter(|value| !value.is_empty())
.map(str::to_string);
if let Some(key) = key_from_config {
return Ok((key, "config"));
}
let key_from_env = env::var(API_KEY_ENV)
.ok()
.map(|value| value.trim().to_string())
.filter(|value| !value.is_empty());
if let Some(key) = key_from_env {
return Ok((key, "env"));
}
Err(CoreError::Config(
"Ollama Cloud API key not configured. Set OLLAMA_CLOUD_API_KEY or configure an API key."
.into(),
))
}

View File

@@ -0,0 +1,80 @@
use std::time::Duration;
use async_trait::async_trait;
use owlen_core::provider::{
GenerateRequest, GenerateStream, ModelInfo, ModelProvider, ProviderMetadata, ProviderStatus,
ProviderType,
};
use owlen_core::{Error as CoreError, Result as CoreResult};
use serde_json::{Number, Value};
use tokio::time::timeout;
use super::OllamaClient;
const DEFAULT_BASE_URL: &str = "http://localhost:11434";
const DEFAULT_HEALTH_TIMEOUT_SECS: u64 = 5;
/// ModelProvider implementation for a local Ollama daemon.
pub struct OllamaLocalProvider {
client: OllamaClient,
health_timeout: Duration,
}
impl OllamaLocalProvider {
/// Construct a new local provider using the shared [`OllamaClient`].
pub fn new(
base_url: Option<String>,
request_timeout: Option<Duration>,
health_timeout: Option<Duration>,
) -> CoreResult<Self> {
let base_url = base_url.unwrap_or_else(|| DEFAULT_BASE_URL.to_string());
let health_timeout =
health_timeout.unwrap_or_else(|| Duration::from_secs(DEFAULT_HEALTH_TIMEOUT_SECS));
let mut metadata =
ProviderMetadata::new("ollama_local", "Ollama (Local)", ProviderType::Local, false);
metadata
.metadata
.insert("base_url".into(), Value::String(base_url.clone()));
if let Some(timeout) = request_timeout {
let timeout_ms = timeout.as_millis().min(u128::from(u64::MAX)) as u64;
metadata.metadata.insert(
"request_timeout_ms".into(),
Value::Number(Number::from(timeout_ms)),
);
}
let client = OllamaClient::new(&base_url, None, metadata, request_timeout)?;
Ok(Self {
client,
health_timeout,
})
}
}
#[async_trait]
impl ModelProvider for OllamaLocalProvider {
fn metadata(&self) -> &ProviderMetadata {
self.client.metadata()
}
async fn health_check(&self) -> CoreResult<ProviderStatus> {
match timeout(self.health_timeout, self.client.health_check()).await {
Ok(Ok(status)) => Ok(status),
Ok(Err(CoreError::Network(_))) | Ok(Err(CoreError::Timeout(_))) => {
Ok(ProviderStatus::Unavailable)
}
Ok(Err(err)) => Err(err),
Err(_) => Ok(ProviderStatus::Unavailable),
}
}
async fn list_models(&self) -> CoreResult<Vec<ModelInfo>> {
self.client.list_models().await
}
async fn generate_stream(&self, request: GenerateRequest) -> CoreResult<GenerateStream> {
self.client.generate_stream(request).await
}
}

View File

@@ -0,0 +1,7 @@
pub mod cloud;
pub mod local;
pub mod shared;
pub use cloud::OllamaCloudProvider;
pub use local::OllamaLocalProvider;
pub use shared::OllamaClient;

View File

@@ -0,0 +1,389 @@
use std::collections::HashMap;
use std::time::Duration;
use futures::StreamExt;
use owlen_core::provider::{
GenerateChunk, GenerateRequest, GenerateStream, ModelInfo, ProviderMetadata, ProviderStatus,
};
use owlen_core::{Error as CoreError, Result as CoreResult};
use reqwest::{Client, Method, StatusCode, Url};
use serde::Deserialize;
use serde_json::{Map as JsonMap, Value};
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
const DEFAULT_TIMEOUT_SECS: u64 = 60;
/// Shared Ollama HTTP client used by both local and cloud providers.
#[derive(Clone)]
pub struct OllamaClient {
http: Client,
base_url: Url,
api_key: Option<String>,
provider_metadata: ProviderMetadata,
}
impl OllamaClient {
/// Create a new client with the given base URL and optional API key.
pub fn new(
base_url: impl AsRef<str>,
api_key: Option<String>,
provider_metadata: ProviderMetadata,
request_timeout: Option<Duration>,
) -> CoreResult<Self> {
let base_url = Url::parse(base_url.as_ref())
.map_err(|err| CoreError::Config(format!("invalid base url: {}", err)))?;
let timeout = request_timeout.unwrap_or_else(|| Duration::from_secs(DEFAULT_TIMEOUT_SECS));
let http = Client::builder()
.timeout(timeout)
.build()
.map_err(map_reqwest_error)?;
Ok(Self {
http,
base_url,
api_key,
provider_metadata,
})
}
/// Provider metadata associated with this client.
pub fn metadata(&self) -> &ProviderMetadata {
&self.provider_metadata
}
/// Perform a basic health check to determine provider availability.
pub async fn health_check(&self) -> CoreResult<ProviderStatus> {
let url = self.endpoint("api/tags")?;
let response = self
.request(Method::GET, url)
.send()
.await
.map_err(map_reqwest_error)?;
match response.status() {
status if status.is_success() => Ok(ProviderStatus::Available),
StatusCode::UNAUTHORIZED | StatusCode::FORBIDDEN => Ok(ProviderStatus::RequiresSetup),
_ => Ok(ProviderStatus::Unavailable),
}
}
/// Fetch the available models from the Ollama API.
pub async fn list_models(&self) -> CoreResult<Vec<ModelInfo>> {
let url = self.endpoint("api/tags")?;
let response = self
.request(Method::GET, url)
.send()
.await
.map_err(map_reqwest_error)?;
let status = response.status();
let bytes = response.bytes().await.map_err(map_reqwest_error)?;
if !status.is_success() {
return Err(map_http_error("tags", status, &bytes));
}
let payload: TagsResponse =
serde_json::from_slice(&bytes).map_err(CoreError::Serialization)?;
let models = payload
.models
.into_iter()
.map(|model| self.parse_model_info(model))
.collect();
Ok(models)
}
/// Request a streaming generation session from Ollama.
pub async fn generate_stream(&self, request: GenerateRequest) -> CoreResult<GenerateStream> {
let url = self.endpoint("api/generate")?;
let body = self.build_generate_body(request);
let response = self
.request(Method::POST, url)
.json(&body)
.send()
.await
.map_err(map_reqwest_error)?;
let status = response.status();
if !status.is_success() {
let bytes = response.bytes().await.map_err(map_reqwest_error)?;
return Err(map_http_error("generate", status, &bytes));
}
let stream = response.bytes_stream();
let (tx, rx) = mpsc::channel::<CoreResult<GenerateChunk>>(32);
tokio::spawn(async move {
let mut stream = stream;
let mut buffer: Vec<u8> = Vec::new();
while let Some(chunk) = stream.next().await {
match chunk {
Ok(bytes) => {
buffer.extend_from_slice(&bytes);
while let Some(pos) = buffer.iter().position(|byte| *byte == b'\n') {
let line_bytes: Vec<u8> = buffer.drain(..=pos).collect();
let line = String::from_utf8_lossy(&line_bytes).trim().to_string();
if line.is_empty() {
continue;
}
match parse_stream_line(&line) {
Ok(item) => {
if tx.send(Ok(item)).await.is_err() {
return;
}
}
Err(err) => {
let _ = tx.send(Err(err)).await;
return;
}
}
}
}
Err(err) => {
let _ = tx.send(Err(map_reqwest_error(err))).await;
return;
}
}
}
if !buffer.is_empty() {
let line = String::from_utf8_lossy(&buffer).trim().to_string();
if !line.is_empty() {
match parse_stream_line(&line) {
Ok(item) => {
let _ = tx.send(Ok(item)).await;
}
Err(err) => {
let _ = tx.send(Err(err)).await;
}
}
}
}
});
let stream = ReceiverStream::new(rx);
Ok(Box::pin(stream))
}
fn request(&self, method: Method, url: Url) -> reqwest::RequestBuilder {
let mut builder = self.http.request(method, url);
if let Some(api_key) = &self.api_key {
builder = builder.bearer_auth(api_key);
}
builder
}
fn endpoint(&self, path: &str) -> CoreResult<Url> {
self.base_url
.join(path)
.map_err(|err| CoreError::Config(format!("invalid endpoint '{}': {}", path, err)))
}
fn build_generate_body(&self, request: GenerateRequest) -> Value {
let GenerateRequest {
model,
prompt,
context,
parameters,
metadata,
} = request;
let mut body = JsonMap::new();
body.insert("model".into(), Value::String(model));
body.insert("stream".into(), Value::Bool(true));
if let Some(prompt) = prompt {
body.insert("prompt".into(), Value::String(prompt));
}
if !context.is_empty() {
let items = context.into_iter().map(Value::String).collect();
body.insert("context".into(), Value::Array(items));
}
if !parameters.is_empty() {
body.insert("options".into(), Value::Object(to_json_map(parameters)));
}
if !metadata.is_empty() {
body.insert("metadata".into(), Value::Object(to_json_map(metadata)));
}
Value::Object(body)
}
fn parse_model_info(&self, model: OllamaModel) -> ModelInfo {
let mut metadata = HashMap::new();
if let Some(digest) = model.digest {
metadata.insert("digest".to_string(), Value::String(digest));
}
if let Some(modified) = model.modified_at {
metadata.insert("modified_at".to_string(), Value::String(modified));
}
if let Some(details) = model.details {
let mut details_map = JsonMap::new();
if let Some(format) = details.format {
details_map.insert("format".into(), Value::String(format));
}
if let Some(family) = details.family {
details_map.insert("family".into(), Value::String(family));
}
if let Some(parameter_size) = details.parameter_size {
details_map.insert("parameter_size".into(), Value::String(parameter_size));
}
if let Some(quantisation) = details.quantization_level {
details_map.insert("quantization_level".into(), Value::String(quantisation));
}
if !details_map.is_empty() {
metadata.insert("details".to_string(), Value::Object(details_map));
}
}
ModelInfo {
name: model.name,
size_bytes: model.size,
capabilities: Vec::new(),
description: None,
provider: self.provider_metadata.clone(),
metadata,
}
}
}
#[derive(Debug, Deserialize)]
struct TagsResponse {
#[serde(default)]
models: Vec<OllamaModel>,
}
#[derive(Debug, Deserialize)]
struct OllamaModel {
name: String,
#[serde(default)]
size: Option<u64>,
#[serde(default)]
digest: Option<String>,
#[serde(default)]
modified_at: Option<String>,
#[serde(default)]
details: Option<OllamaModelDetails>,
}
#[derive(Debug, Deserialize)]
struct OllamaModelDetails {
#[serde(default)]
format: Option<String>,
#[serde(default)]
family: Option<String>,
#[serde(default)]
parameter_size: Option<String>,
#[serde(default)]
quantization_level: Option<String>,
}
fn to_json_map(source: HashMap<String, Value>) -> JsonMap<String, Value> {
source.into_iter().collect()
}
fn to_metadata_map(value: &Value) -> HashMap<String, Value> {
let mut metadata = HashMap::new();
if let Value::Object(obj) = value {
for (key, item) in obj {
if key == "response" || key == "done" {
continue;
}
metadata.insert(key.clone(), item.clone());
}
}
metadata
}
fn parse_stream_line(line: &str) -> CoreResult<GenerateChunk> {
let value: Value = serde_json::from_str(line).map_err(CoreError::Serialization)?;
if let Some(error) = value.get("error").and_then(Value::as_str) {
return Err(CoreError::Provider(anyhow::anyhow!(
"ollama generation error: {}",
error
)));
}
let mut chunk = GenerateChunk {
text: value
.get("response")
.and_then(Value::as_str)
.map(str::to_string),
is_final: value.get("done").and_then(Value::as_bool).unwrap_or(false),
metadata: to_metadata_map(&value),
};
if chunk.is_final && chunk.text.is_none() && chunk.metadata.is_empty() {
chunk
.metadata
.insert("status".into(), Value::String("done".into()));
}
Ok(chunk)
}
fn map_http_error(endpoint: &str, status: StatusCode, body: &[u8]) -> CoreError {
match status {
StatusCode::UNAUTHORIZED | StatusCode::FORBIDDEN => CoreError::Auth(format!(
"Ollama {} request unauthorized (status {})",
endpoint, status
)),
StatusCode::TOO_MANY_REQUESTS => CoreError::Provider(anyhow::anyhow!(
"Ollama {} request rate limited (status {})",
endpoint,
status
)),
_ => {
let snippet = truncated_body(body);
CoreError::Provider(anyhow::anyhow!(
"Ollama {} request failed: HTTP {} - {}",
endpoint,
status,
snippet
))
}
}
}
fn truncated_body(body: &[u8]) -> String {
const MAX_CHARS: usize = 512;
let text = String::from_utf8_lossy(body);
let mut value = String::new();
for (idx, ch) in text.chars().enumerate() {
if idx >= MAX_CHARS {
value.push('…');
return value;
}
value.push(ch);
}
value
}
fn map_reqwest_error(err: reqwest::Error) -> CoreError {
if err.is_timeout() {
CoreError::Timeout(err.to_string())
} else if err.is_connect() || err.is_request() {
CoreError::Network(err.to_string())
} else {
CoreError::Provider(err.into())
}
}

View File

@@ -41,7 +41,9 @@ use crate::state::{
use crate::toast::{Toast, ToastLevel, ToastManager}; use crate::toast::{Toast, ToastLevel, ToastManager};
use crate::ui::format_tool_output; use crate::ui::format_tool_output;
use crate::{commands, highlight}; use crate::{commands, highlight};
use owlen_core::config::{OLLAMA_CLOUD_BASE_URL, OLLAMA_CLOUD_ENDPOINT_KEY, OLLAMA_MODE_KEY}; use owlen_core::config::{
OLLAMA_CLOUD_API_KEY_ENV, OLLAMA_CLOUD_BASE_URL, OLLAMA_CLOUD_ENDPOINT_KEY, OLLAMA_MODE_KEY,
};
use owlen_core::credentials::{ApiCredentials, OLLAMA_CLOUD_CREDENTIAL_ID}; use owlen_core::credentials::{ApiCredentials, OLLAMA_CLOUD_CREDENTIAL_ID};
// Agent executor moved to separate binary `owlen-agent`. The TUI no longer directly // Agent executor moved to separate binary `owlen-agent`. The TUI no longer directly
// imports `AgentExecutor` to avoid a circular dependency on `owlen-cli`. // imports `AgentExecutor` to avoid a circular dependency on `owlen-cli`.
@@ -500,7 +502,7 @@ impl ChatApp {
models: Vec::new(), models: Vec::new(),
provider_scope_status: HashMap::new(), provider_scope_status: HashMap::new(),
available_providers: Vec::new(), available_providers: Vec::new(),
selected_provider: "ollama".to_string(), // Default, will be updated in initialize_models selected_provider: "ollama_local".to_string(), // Default, will be updated in initialize_models
selected_provider_index: 0, selected_provider_index: 0,
selected_model_item: None, selected_model_item: None,
model_selector_items: Vec::new(), model_selector_items: Vec::new(),
@@ -4297,7 +4299,7 @@ impl ChatApp {
self.recompute_available_providers(); self.recompute_available_providers();
if self.available_providers.is_empty() { if self.available_providers.is_empty() {
self.available_providers.push("ollama".to_string()); self.available_providers.push("ollama_local".to_string());
} }
if !config_model_provider.is_empty() { if !config_model_provider.is_empty() {
@@ -7419,14 +7421,14 @@ impl ChatApp {
for (name, provider_cfg) in provider_entries { for (name, provider_cfg) in provider_entries {
let provider_type = provider_cfg.provider_type.to_ascii_lowercase(); let provider_type = provider_cfg.provider_type.to_ascii_lowercase();
if provider_type != "ollama" && provider_type != "ollama-cloud" { if provider_type != "ollama" && provider_type != "ollama_cloud" {
continue; continue;
} }
let canonical_name = if name.eq_ignore_ascii_case("ollama-cloud") { let canonical_name = match name.trim().to_ascii_lowercase().as_str() {
"ollama".to_string() "ollama" | "ollama_local" => "ollama_local".to_string(),
} else { "ollama-cloud" | "ollama_cloud" => "ollama_cloud".to_string(),
name.clone() other => other.to_string(),
}; };
// All providers communicate via MCP LLM server (Phase 10). // All providers communicate via MCP LLM server (Phase 10).
@@ -7599,7 +7601,10 @@ impl ChatApp {
for (idx, model) in entries { for (idx, model) in entries {
let canonical = model.id.to_string(); let canonical = model.id.to_string();
let is_cloud_id = model.id.ends_with("-cloud"); let is_cloud_id = model.id.ends_with("-cloud");
let priority = if matches!(provider_lower, "ollama" | "ollama-cloud") { let priority = if matches!(
provider_lower,
"ollama" | "ollama_local" | "ollama-cloud" | "ollama_cloud"
) {
match scope { match scope {
ModelScope::Local => { ModelScope::Local => {
if is_cloud_id { if is_cloud_id {
@@ -7651,7 +7656,7 @@ impl ChatApp {
} }
if providers.is_empty() { if providers.is_empty() {
providers.insert("ollama".to_string()); providers.insert("ollama_local".to_string());
} }
self.available_providers = providers.into_iter().collect(); self.available_providers = providers.into_iter().collect();
@@ -7693,7 +7698,7 @@ impl ChatApp {
let mut items = Vec::new(); let mut items = Vec::new();
if self.available_providers.is_empty() { if self.available_providers.is_empty() {
items.push(ModelSelectorItem::header("ollama", false)); items.push(ModelSelectorItem::header("ollama_local", false));
self.model_selector_items = items; self.model_selector_items = items;
return; return;
} }
@@ -8074,7 +8079,7 @@ impl ChatApp {
self.set_model_info_visible(false); self.set_model_info_visible(false);
self.recompute_available_providers(); self.recompute_available_providers();
if self.available_providers.is_empty() { if self.available_providers.is_empty() {
self.available_providers.push("ollama".to_string()); self.available_providers.push("ollama_local".to_string());
} }
self.rebuild_model_selector_items(); self.rebuild_model_selector_items();
self.selected_model_item = None; self.selected_model_item = None;
@@ -8093,7 +8098,7 @@ impl ChatApp {
self.recompute_available_providers(); self.recompute_available_providers();
if self.available_providers.is_empty() { if self.available_providers.is_empty() {
self.available_providers.push("ollama".to_string()); self.available_providers.push("ollama_local".to_string());
} }
if !config_model_provider.is_empty() { if !config_model_provider.is_empty() {
@@ -8248,8 +8253,10 @@ impl ChatApp {
let (existing_plain_api_key, normalized_endpoint_local, base_overridden_local) = let (existing_plain_api_key, normalized_endpoint_local, base_overridden_local) =
if let Some(entry) = config.providers.get_mut(&options.provider) { if let Some(entry) = config.providers.get_mut(&options.provider) {
let existing = entry.api_key.clone(); let existing = entry.api_key.clone();
if entry.provider_type != "ollama" { entry.enabled = true;
entry.provider_type = "ollama".to_string(); entry.provider_type = "ollama_cloud".to_string();
if entry.api_key_env.is_none() {
entry.api_key_env = Some(OLLAMA_CLOUD_API_KEY_ENV.to_string());
} }
let requested = options let requested = options
.endpoint .endpoint
@@ -8271,12 +8278,6 @@ impl ChatApp {
entry.base_url = Some(normalized_endpoint_local.clone()); entry.base_url = Some(normalized_endpoint_local.clone());
base_overridden_local = true; base_overridden_local = true;
} }
if options.force_cloud_base_url {
entry.extra.insert(
OLLAMA_MODE_KEY.to_string(),
Value::String("cloud".to_string()),
);
}
(existing, normalized_endpoint_local, base_overridden_local) (existing, normalized_endpoint_local, base_overridden_local)
} else { } else {
return Err(anyhow!("Provider '{}' is not configured", options.provider)); return Err(anyhow!("Provider '{}' is not configured", options.provider));
@@ -10666,7 +10667,7 @@ struct CloudSetupOptions {
impl CloudSetupOptions { impl CloudSetupOptions {
fn parse(args: &[&str]) -> Result<Self> { fn parse(args: &[&str]) -> Result<Self> {
let mut options = CloudSetupOptions { let mut options = CloudSetupOptions {
provider: "ollama".to_string(), provider: "ollama_cloud".to_string(),
endpoint: None, endpoint: None,
api_key: None, api_key: None,
force_cloud_base_url: false, force_cloud_base_url: false,
@@ -10712,7 +10713,7 @@ impl CloudSetupOptions {
} }
if options.provider.trim().is_empty() { if options.provider.trim().is_empty() {
options.provider = "ollama".to_string(); options.provider = "ollama_cloud".to_string();
} }
options.provider = canonical_provider_name(&options.provider); options.provider = canonical_provider_name(&options.provider);
@@ -10722,10 +10723,11 @@ impl CloudSetupOptions {
} }
fn canonical_provider_name(provider: &str) -> String { fn canonical_provider_name(provider: &str) -> String {
let normalized = provider.trim().replace('_', "-").to_ascii_lowercase(); let normalized = provider.trim().to_ascii_lowercase().replace('-', "_");
match normalized.as_str() { match normalized.as_str() {
"" => "ollama".to_string(), "" => "ollama_cloud".to_string(),
"ollama-cloud" => "ollama".to_string(), "ollama" => "ollama_cloud".to_string(),
"ollama_cloud" => "ollama_cloud".to_string(),
value => value.to_string(), value => value.to_string(),
} }
} }

View File

@@ -113,45 +113,67 @@ These settings control the behavior of the text input area.
## Provider Settings (`[providers]`) ## Provider Settings (`[providers]`)
This section contains a table for each provider you want to configure. Owlen ships with two entries pre-populated: `ollama` for a local daemon and `ollama-cloud` for the hosted API. You can switch between them by changing `general.default_provider`. This section contains a table for each provider you want to configure. Owlen now ships with four entries pre-populated`ollama_local`, `ollama_cloud`, `openai`, and `anthropic`. Switch between them by updating `general.default_provider`.
```toml ```toml
[providers.ollama] [providers.ollama_local]
enabled = true
provider_type = "ollama" provider_type = "ollama"
base_url = "http://localhost:11434" base_url = "http://localhost:11434"
# api_key = "..."
[providers.ollama-cloud] [providers.ollama_cloud]
provider_type = "ollama-cloud" enabled = false
provider_type = "ollama_cloud"
base_url = "https://ollama.com" base_url = "https://ollama.com"
# api_key = "${OLLAMA_API_KEY}" api_key_env = "OLLAMA_CLOUD_API_KEY"
[providers.openai]
enabled = false
provider_type = "openai"
base_url = "https://api.openai.com/v1"
api_key_env = "OPENAI_API_KEY"
[providers.anthropic]
enabled = false
provider_type = "anthropic"
base_url = "https://api.anthropic.com/v1"
api_key_env = "ANTHROPIC_API_KEY"
``` ```
- `enabled` (boolean, default: `true`)
Whether the provider should be considered when refreshing models or issuing requests.
- `provider_type` (string, required) - `provider_type` (string, required)
The type of the provider. The built-in options are `"ollama"` (local daemon) and `"ollama-cloud"` (hosted service). Identifies which implementation to use. Local Ollama instances use `"ollama"`; the hosted service uses `"ollama_cloud"`. Third-party providers use their own identifiers (`"openai"`, `"anthropic"`, ...).
- `base_url` (string, optional) - `base_url` (string, optional)
The base URL of the provider's API. The base URL of the provider's API.
- `api_key` (string, optional) - `api_key` / `api_key_env` (string, optional)
The API key to use for authentication, if required. Authentication material. Prefer `api_key_env` to reference an environment variable so secrets remain outside of the config file.
**Note:** `ollama-cloud` now requires an API key; Owlen will refuse to start the provider without one and will hint at the missing configuration.
- `extra` (table, optional) - `extra` (table, optional)
Any additional, provider-specific parameters can be added here. Any additional, provider-specific parameters can be added here.
### Using Ollama Cloud ### Using Ollama Cloud
Owlen now ships a single unified `ollama` provider. When an API key is present, Owlen automatically routes traffic to [Ollama Cloud](https://docs.ollama.com/cloud); otherwise it talks to the local daemon. A minimal configuration looks like this: Owlen now separates the local daemon and the hosted API into two providers. Enable `ollama_cloud` once you have credentials, while keeping `ollama_local` available for on-device workloads. A minimal configuration looks like this:
```toml ```toml
[providers.ollama] [general]
provider_type = "ollama" default_provider = "ollama_local"
base_url = "http://localhost:11434" # ignored once an API key is supplied
api_key = "${OLLAMA_API_KEY}" [providers.ollama_local]
enabled = true
base_url = "http://localhost:11434"
[providers.ollama_cloud]
enabled = true
base_url = "https://ollama.com"
api_key_env = "OLLAMA_CLOUD_API_KEY"
``` ```
Requests target the same `/api/chat` endpoint documented by Ollama and automatically include the API key using a `Bearer` authorization header. If you prefer not to store the key in the config file, you can leave `api_key` unset and provide it via the `OLLAMA_API_KEY` (or `OLLAMA_CLOUD_API_KEY`) environment variable instead. You can also reference an environment variable inline (for example `api_key = "$OLLAMA_API_KEY"` or `api_key = "${OLLAMA_API_KEY}"`), which Owlen expands when the configuration is loaded. The base URL is normalised automatically—Owlen enforces HTTPS, trims trailing slashes, and accepts both `https://ollama.com` and `https://api.ollama.com` without rewriting the host. Requests target the same `/api/chat` endpoint documented by Ollama and automatically include the API key using a `Bearer` authorization header. If you prefer not to store the key in the config file, either rely on `api_key_env` or export the environment variable manually. Owlen normalises the base URL automatically—it enforces HTTPS, trims trailing slashes, and accepts both `https://ollama.com` and `https://api.ollama.com` without rewriting the host.
> **Tip:** If the official `ollama signin` flow fails on Linux v0.12.3, follow the [Linux Ollama sign-in workaround](#linux-ollama-sign-in-workaround-v0123) in the troubleshooting guide to copy keys from a working machine or register them manually. > **Tip:** If the official `ollama signin` flow fails on Linux v0.12.3, follow the [Linux Ollama sign-in workaround](#linux-ollama-sign-in-workaround-v0123) in the troubleshooting guide to copy keys from a working machine or register them manually.
@@ -166,4 +188,4 @@ owlen cloud models # List the hosted models your account can access
owlen cloud logout # Forget the stored API key owlen cloud logout # Forget the stored API key
``` ```
When `privacy.encrypt_local_data = true`, the API key is written to Owlen's encrypted credential vault instead of being persisted in plaintext. Subsequent invocations automatically load the key into the runtime environment so that the config file can remain redacted. If encryption is disabled, the key is stored under `[providers.ollama-cloud].api_key` as before. When `privacy.encrypt_local_data = true`, the API key is written to Owlen's encrypted credential vault instead of being persisted in plaintext. Subsequent invocations automatically load the key into the runtime environment so that the config file can remain redacted. If encryption is disabled, the key is stored under `[providers.ollama_cloud].api_key` as before.

View File

@@ -82,17 +82,19 @@ Ensure your provider configuration is correct. For Ollama:
```toml ```toml
[general] [general]
default_provider = "ollama" default_provider = "ollama_local"
default_model = "llama3.2:latest" # or your preferred model default_model = "llama3.2:latest" # or your preferred model
[providers.ollama] [providers.ollama_local]
enabled = true
provider_type = "ollama" provider_type = "ollama"
base_url = "http://localhost:11434" base_url = "http://localhost:11434"
[providers.ollama-cloud] [providers.ollama_cloud]
provider_type = "ollama-cloud" enabled = true # set to false if you do not use the hosted API
provider_type = "ollama_cloud"
base_url = "https://ollama.com" base_url = "https://ollama.com"
api_key = "$OLLAMA_API_KEY" # Optional: for Ollama Cloud api_key_env = "OLLAMA_CLOUD_API_KEY"
``` ```
#### Step 3: Understanding MCP Server Configuration #### Step 3: Understanding MCP Server Configuration
@@ -156,7 +158,7 @@ After updating your config:
**Solution**: **Solution**:
- Verify model availability on https://ollama.com/models - Verify model availability on https://ollama.com/models
- Remove the `-cloud` suffix from model names when using cloud provider - Remove the `-cloud` suffix from model names when using cloud provider
- Ensure `api_key` is set in `[providers.ollama-cloud]` config - Ensure `api_key`/`api_key_env` is set in `[providers.ollama_cloud]` config
### 0.1.9 Explicit Ollama Modes & Cloud Endpoint Storage ### 0.1.9 Explicit Ollama Modes & Cloud Endpoint Storage

View File

@@ -127,7 +127,7 @@ Create or edit `~/.config/owlen/config.toml`:
```toml ```toml
[general] [general]
default_provider = "ollama" default_provider = "ollama_local"
default_model = "llama3.2:latest" default_model = "llama3.2:latest"
[modes.chat] [modes.chat]