Files
owlen/crates/owlen-core/src/session.rs
vikingowl 5b202fed4f Add comprehensive documentation and examples for Owlen architecture and usage
- Include detailed architecture overview in `docs/architecture.md`.
- Add `docs/configuration.md`, detailing configuration file structure and settings.
- Provide a step-by-step provider implementation guide in `docs/provider-implementation.md`.
- Add frequently asked questions (FAQ) document in `docs/faq.md`.
- Create `docs/migration-guide.md` for future breaking changes and version upgrades.
- Introduce new examples in `examples/` showcasing basic chat, custom providers, and theming.
- Add a changelog (`CHANGELOG.md`) for tracking significant changes.
- Provide contribution guidelines (`CONTRIBUTING.md`) and a Code of Conduct (`CODE_OF_CONDUCT.md`).
2025-10-05 02:23:32 +02:00

385 lines
13 KiB
Rust

use crate::config::Config;
use crate::conversation::ConversationManager;
use crate::formatting::MessageFormatter;
use crate::input::InputBuffer;
use crate::model::ModelManager;
use crate::provider::{ChatStream, Provider};
use crate::types::{ChatParameters, ChatRequest, ChatResponse, Conversation, ModelInfo};
use crate::Result;
use std::sync::Arc;
use uuid::Uuid;
/// Outcome of submitting a chat request
pub enum SessionOutcome {
/// Immediate response received (non-streaming)
Complete(ChatResponse),
/// Streaming response where chunks will arrive asynchronously
Streaming {
response_id: Uuid,
stream: ChatStream,
},
}
/// High-level controller encapsulating session state and provider interactions.
///
/// This is the main entry point for managing conversations and interacting with LLM providers.
///
/// # Example
///
/// ```
/// use std::sync::Arc;
/// use owlen_core::config::Config;
/// use owlen_core::provider::{Provider, ChatStream};
/// use owlen_core::session::{SessionController, SessionOutcome};
/// use owlen_core::types::{ChatRequest, ChatResponse, ChatParameters, Message, ModelInfo};
/// use owlen_core::Result;
///
/// // Mock provider for the example
/// struct MockProvider;
/// #[async_trait::async_trait]
/// impl Provider for MockProvider {
/// fn name(&self) -> &str { "mock" }
/// async fn list_models(&self) -> Result<Vec<ModelInfo>> { Ok(vec![]) }
/// async fn chat(&self, request: ChatRequest) -> Result<ChatResponse> {
/// Ok(ChatResponse {
/// model: request.model,
/// message: Message::assistant("Hello back!".to_string()),
/// ..Default::default()
/// })
/// }
/// async fn chat_stream(&self, request: ChatRequest) -> Result<ChatStream> { unimplemented!() }
/// async fn health_check(&self) -> Result<()> { Ok(()) }
/// }
///
/// #[tokio::main]
/// async fn main() {
/// let provider = Arc::new(MockProvider);
/// let config = Config::default();
/// let mut session_controller = SessionController::new(provider, config);
///
/// // Send a message
/// let outcome = session_controller.send_message(
/// "Hello".to_string(),
/// ChatParameters { stream: false, ..Default::default() }
/// ).await.unwrap();
///
/// // Check the response
/// if let SessionOutcome::Complete(response) = outcome {
/// assert_eq!(response.message.content, "Hello back!");
/// }
///
/// // The conversation now contains both messages
/// let messages = session_controller.conversation().messages.clone();
/// assert_eq!(messages.len(), 2);
/// assert_eq!(messages[0].content, "Hello");
/// assert_eq!(messages[1].content, "Hello back!");
/// }
/// ```
pub struct SessionController {
provider: Arc<dyn Provider>,
conversation: ConversationManager,
model_manager: ModelManager,
input_buffer: InputBuffer,
formatter: MessageFormatter,
config: Config,
}
impl SessionController {
/// Create a new controller with the given provider and configuration
pub fn new(provider: Arc<dyn Provider>, config: Config) -> Self {
let model = config
.general
.default_model
.clone()
.unwrap_or_else(|| "ollama/default".to_string());
let conversation =
ConversationManager::with_history_capacity(model, config.storage.max_saved_sessions);
let formatter =
MessageFormatter::new(config.ui.wrap_column as usize, config.ui.show_role_labels)
.with_preserve_empty(config.ui.word_wrap);
let input_buffer = InputBuffer::new(
config.input.history_size,
config.input.multiline,
config.input.tab_width,
);
let model_manager = ModelManager::new(config.general.model_cache_ttl());
Self {
provider,
conversation,
model_manager,
input_buffer,
formatter,
config,
}
}
/// Access the active conversation
pub fn conversation(&self) -> &Conversation {
self.conversation.active()
}
/// Mutable access to the conversation manager
pub fn conversation_mut(&mut self) -> &mut ConversationManager {
&mut self.conversation
}
/// Access input buffer
pub fn input_buffer(&self) -> &InputBuffer {
&self.input_buffer
}
/// Mutable input buffer access
pub fn input_buffer_mut(&mut self) -> &mut InputBuffer {
&mut self.input_buffer
}
/// Formatter for rendering messages
pub fn formatter(&self) -> &MessageFormatter {
&self.formatter
}
/// Update the wrap width of the message formatter
pub fn set_formatter_wrap_width(&mut self, width: usize) {
self.formatter.set_wrap_width(width);
}
/// Access configuration
pub fn config(&self) -> &Config {
&self.config
}
/// Mutable configuration access
pub fn config_mut(&mut self) -> &mut Config {
&mut self.config
}
/// Currently selected model identifier
pub fn selected_model(&self) -> &str {
&self.conversation.active().model
}
/// Change current model for upcoming requests
pub fn set_model(&mut self, model: String) {
self.conversation.set_model(model.clone());
self.config.general.default_model = Some(model);
}
/// Retrieve cached models, refreshing from provider as needed
pub async fn models(&self, force_refresh: bool) -> Result<Vec<ModelInfo>> {
self.model_manager
.get_or_refresh(force_refresh, || async {
self.provider.list_models().await
})
.await
}
/// Attempt to select the configured default model from cached models
pub fn ensure_default_model(&mut self, models: &[ModelInfo]) {
if let Some(default) = self.config.general.default_model.clone() {
if models.iter().any(|m| m.id == default || m.name == default) {
self.set_model(default);
}
} else if let Some(model) = models.first() {
self.set_model(model.id.clone());
}
}
/// Submit a user message; optionally stream the response
pub async fn send_message(
&mut self,
content: String,
mut parameters: ChatParameters,
) -> Result<SessionOutcome> {
let streaming = parameters.stream || self.config.general.enable_streaming;
parameters.stream = streaming;
self.conversation.push_user_message(content);
self.send_request_with_current_conversation(parameters)
.await
}
/// Send a request using the current conversation without adding a new user message
pub async fn send_request_with_current_conversation(
&mut self,
mut parameters: ChatParameters,
) -> Result<SessionOutcome> {
let streaming = parameters.stream || self.config.general.enable_streaming;
parameters.stream = streaming;
let request = ChatRequest {
model: self.conversation.active().model.clone(),
messages: self.conversation.active().messages.clone(),
parameters,
};
if streaming {
match self.provider.chat_stream(request).await {
Ok(stream) => {
let response_id = self.conversation.start_streaming_response();
Ok(SessionOutcome::Streaming {
response_id,
stream,
})
}
Err(err) => {
self.conversation
.push_assistant_message(format!("Error starting stream: {}", err));
Err(err)
}
}
} else {
match self.provider.chat(request).await {
Ok(response) => {
self.conversation.push_message(response.message.clone());
Ok(SessionOutcome::Complete(response))
}
Err(err) => {
self.conversation
.push_assistant_message(format!("Error: {}", err));
Err(err)
}
}
}
}
/// Mark a streaming response message with placeholder content
pub fn mark_stream_placeholder(&mut self, message_id: Uuid, text: &str) -> Result<()> {
self.conversation
.set_stream_placeholder(message_id, text.to_string())
}
/// Apply streaming chunk to the conversation
pub fn apply_stream_chunk(&mut self, message_id: Uuid, chunk: &ChatResponse) -> Result<()> {
self.conversation
.append_stream_chunk(message_id, &chunk.message.content, chunk.is_final)
}
/// Access conversation history
pub fn history(&self) -> Vec<Conversation> {
self.conversation.history().cloned().collect()
}
/// Start a new conversation optionally targeting a specific model
pub fn start_new_conversation(&mut self, model: Option<String>, name: Option<String>) {
self.conversation.start_new(model, name);
}
/// Clear current conversation messages
pub fn clear(&mut self) {
self.conversation.clear();
}
/// Generate a short AI description for the current conversation
pub async fn generate_conversation_description(&self) -> Result<String> {
let conv = self.conversation.active();
// If conversation is empty or very short, return a simple description
if conv.messages.is_empty() {
return Ok("Empty conversation".to_string());
}
if conv.messages.len() == 1 {
let first_msg = &conv.messages[0];
let preview = first_msg.content.chars().take(50).collect::<String>();
return Ok(format!(
"{}{}",
preview,
if first_msg.content.len() > 50 {
"..."
} else {
""
}
));
}
// Build a summary prompt from the first few and last few messages
let mut summary_messages = Vec::new();
// Add system message to guide the description
summary_messages.push(crate::types::Message::system(
"Summarize this conversation in 1-2 short sentences (max 100 characters). \
Focus on the main topic or question being discussed. Be concise and descriptive."
.to_string(),
));
// Include first message
if let Some(first) = conv.messages.first() {
summary_messages.push(first.clone());
}
// Include a middle message if conversation is long enough
if conv.messages.len() > 4 {
if let Some(mid) = conv.messages.get(conv.messages.len() / 2) {
summary_messages.push(mid.clone());
}
}
// Include last message
if let Some(last) = conv.messages.last() {
if conv.messages.len() > 1 {
summary_messages.push(last.clone());
}
}
// Create a summarization request
let request = crate::types::ChatRequest {
model: conv.model.clone(),
messages: summary_messages,
parameters: crate::types::ChatParameters {
temperature: Some(0.3), // Lower temperature for more focused summaries
max_tokens: Some(50), // Keep it short
stream: false,
extra: std::collections::HashMap::new(),
},
};
// Get the summary from the provider
match self.provider.chat(request).await {
Ok(response) => {
let description = response.message.content.trim().to_string();
// If description is empty, use fallback
if description.is_empty() {
let first_msg = &conv.messages[0];
let preview = first_msg.content.chars().take(50).collect::<String>();
return Ok(format!(
"{}{}",
preview,
if first_msg.content.len() > 50 {
"..."
} else {
""
}
));
}
// Truncate if too long
let truncated = if description.len() > 100 {
format!("{}...", description.chars().take(97).collect::<String>())
} else {
description
};
Ok(truncated)
}
Err(_e) => {
// Fallback to simple description if AI generation fails
let first_msg = &conv.messages[0];
let preview = first_msg.content.chars().take(50).collect::<String>();
Ok(format!(
"{}{}",
preview,
if first_msg.content.len() > 50 {
"..."
} else {
""
}
))
}
}
}
}