Compare commits
2 Commits
f22259b863
...
main
Author | SHA1 | Date | |
---|---|---|---|
092c065809 | |||
e23a8d53d9 |
8
.gitignore
vendored
8
.gitignore
vendored
@@ -34,11 +34,10 @@ build/
|
||||
logs/
|
||||
*.log
|
||||
|
||||
# Database files (now includes the specific dev database)
|
||||
# Database files
|
||||
*.sqlite
|
||||
*.sqlite3
|
||||
*.db
|
||||
owlynews.sqlite3*
|
||||
|
||||
# Dependency directories
|
||||
node_modules/
|
||||
@@ -59,3 +58,8 @@ htmlcov/
|
||||
!.yarn/releases
|
||||
!.yarn/sdks
|
||||
!.yarn/versions
|
||||
backend-rust/owlynews.sqlite3
|
||||
backend-rust/target
|
||||
/backend-rust/config.toml
|
||||
/backend-rust/owlynews.sqlite3-shm
|
||||
/backend-rust/owlynews.sqlite3-wal
|
||||
|
1
backend-rust/.gitignore
vendored
1
backend-rust/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
target/
|
2574
backend-rust/Cargo.lock
generated
2574
backend-rust/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,16 +0,0 @@
|
||||
[package]
|
||||
name = "owly-news-summariser"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
axum = "0.8.4"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
sqlx = { version = "0.8", features = ["runtime-tokio", "tls-native-tls", "sqlite", "macros", "migrate", "chrono", "json"] }
|
||||
dotenv = "0.15"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
toml = "0.9.5"
|
@@ -1,171 +0,0 @@
|
||||
# Owly News Summariser - Project Roadmap
|
||||
|
||||
This document outlines the strategic approach for transforming the project through three phases: Python-to-Rust backend migration, CLI application addition, and Vue-to-Dioxus frontend migration.
|
||||
|
||||
## Project Structure Strategy
|
||||
|
||||
### Current Phase: Axum API Setup
|
||||
```
|
||||
|
||||
owly-news-summariser/
|
||||
├── src/
|
||||
│ ├── main.rs # Entry point (will evolve)
|
||||
│ ├── db.rs # Database connection & SQLx setup
|
||||
│ ├── api.rs # API module declaration
|
||||
│ ├── api/ # API-specific modules (no mod.rs needed)
|
||||
│ │ ├── routes.rs # Route definitions
|
||||
│ │ ├── middleware.rs # Custom middleware
|
||||
│ │ └── handlers.rs # Request handlers & business logic
|
||||
│ ├── models.rs # Models module declaration
|
||||
│ ├── models/ # Data models & database entities
|
||||
│ │ ├── user.rs
|
||||
│ │ ├── article.rs
|
||||
│ │ └── summary.rs
|
||||
│ ├── services.rs # Services module declaration
|
||||
│ ├── services/ # Business logic layer
|
||||
│ │ ├── news_service.rs
|
||||
│ │ └── summary_service.rs
|
||||
│ └── config.rs # Configuration management
|
||||
├── migrations/ # SQLx migrations (managed by SQLx CLI)
|
||||
├── frontend/ # Keep existing Vue frontend for now
|
||||
└── Cargo.toml
|
||||
```
|
||||
### Phase 2: Multi-Binary Structure (API + CLI)
|
||||
```
|
||||
|
||||
owly-news-summariser/
|
||||
├── src/
|
||||
│ ├── lib.rs # Shared library code
|
||||
│ ├── bin/
|
||||
│ │ ├── server.rs # API server binary
|
||||
│ │ └── cli.rs # CLI application binary
|
||||
│ ├── [same module structure as Phase 1]
|
||||
├── migrations/
|
||||
├── frontend/
|
||||
└── Cargo.toml # Updated for multiple binaries
|
||||
```
|
||||
### Phase 3: Full Rust Stack
|
||||
```
|
||||
|
||||
owly-news-summariser/
|
||||
├── src/
|
||||
│ ├── [same structure as Phase 2]
|
||||
├── migrations/
|
||||
├── frontend-dioxus/ # New Dioxus frontend
|
||||
├── frontend/ # Legacy Vue (to be removed)
|
||||
└── Cargo.toml
|
||||
```
|
||||
## Step-by-Step Process
|
||||
|
||||
### Phase 1: Axum API Implementation
|
||||
|
||||
**Step 1: Core Infrastructure Setup**
|
||||
- Set up database connection pooling with SQLx
|
||||
- Create configuration management system (environment variables, config files)
|
||||
- Establish error handling patterns with `anyhow`
|
||||
- Set up logging infrastructure
|
||||
|
||||
**Step 2: Data Layer**
|
||||
- Design your database schema and create SQLx migrations using `sqlx migrate add`
|
||||
- Create Rust structs that mirror your Python backend's data models
|
||||
- Implement database access layer with proper async patterns
|
||||
- Use SQLx's compile-time checked queries
|
||||
|
||||
**Step 3: API Layer Architecture**
|
||||
- Create modular route structure (users, articles, summaries, etc.)
|
||||
- Implement middleware for CORS, authentication, logging
|
||||
- Set up request/response serialization with Serde
|
||||
- Create proper error responses and status codes
|
||||
|
||||
**Step 4: Business Logic Migration**
|
||||
- Port your Python backend logic to Rust services
|
||||
- Maintain API compatibility with your existing Vue frontend
|
||||
- Implement proper async patterns for external API calls
|
||||
- Add comprehensive testing
|
||||
|
||||
**Step 5: Integration & Testing**
|
||||
- Test API endpoints thoroughly
|
||||
- Ensure Vue frontend works seamlessly with new Rust backend
|
||||
- Performance testing and optimization
|
||||
- Deploy and monitor
|
||||
|
||||
### Phase 2: CLI Application Addition
|
||||
|
||||
**Step 1: Restructure for Multiple Binaries**
|
||||
- Move API code to `src/bin/server.rs`
|
||||
- Create `src/bin/cli.rs` for CLI application
|
||||
- Keep shared logic in `src/lib.rs`
|
||||
- Update Cargo.toml to support multiple binaries
|
||||
|
||||
**Step 2: CLI Architecture**
|
||||
- Use clap for command-line argument parsing
|
||||
- Reuse existing services and models from the API
|
||||
- Create CLI-specific output formatting
|
||||
- Implement batch processing capabilities
|
||||
|
||||
**Step 3: Shared Core Logic**
|
||||
- Extract common functionality into library crates
|
||||
- Ensure both API and CLI can use the same business logic
|
||||
- Implement proper configuration management for both contexts
|
||||
|
||||
### Phase 3: Dioxus Frontend Migration
|
||||
|
||||
**Step 1: Parallel Development**
|
||||
- Create new `frontend-dioxus/` directory
|
||||
- Keep existing Vue frontend running during development
|
||||
- Set up Dioxus project structure with proper routing
|
||||
|
||||
**Step 2: Component Architecture**
|
||||
- Design reusable Dioxus components
|
||||
- Implement state management (similar to Pinia in Vue)
|
||||
- Create API client layer for communication with Rust backend
|
||||
|
||||
**Step 3: Feature Parity**
|
||||
- Port Vue components to Dioxus incrementally
|
||||
- Ensure UI/UX consistency
|
||||
- Implement proper error handling and loading states
|
||||
|
||||
**Step 4: Final Migration**
|
||||
- Switch production traffic to Dioxus frontend
|
||||
- Remove Vue frontend after thorough testing
|
||||
- Optimize bundle size and performance
|
||||
|
||||
## Key Strategic Considerations
|
||||
|
||||
### 1. Modern Rust Practices
|
||||
- Use modern module structure without `mod.rs` files
|
||||
- Leverage SQLx's built-in migration and connection management
|
||||
- Follow Rust 2018+ edition conventions
|
||||
|
||||
### 2. Maintain Backward Compatibility
|
||||
- Keep API contracts stable during Vue-to-Dioxus transition
|
||||
- Use feature flags for gradual rollouts
|
||||
|
||||
### 3. Shared Code Architecture
|
||||
- Design your core business logic to be framework-agnostic
|
||||
- Use workspace structure for better code organization
|
||||
- Consider extracting domain logic into separate crates
|
||||
|
||||
### 4. Testing Strategy
|
||||
- Unit tests for business logic
|
||||
- Integration tests for API endpoints
|
||||
- End-to-end tests for the full stack
|
||||
- CLI integration tests
|
||||
|
||||
### 5. Configuration Management
|
||||
- Environment-based configuration
|
||||
- Support for different deployment scenarios (API-only, CLI-only, full stack)
|
||||
- Proper secrets management
|
||||
|
||||
### 6. Database Strategy
|
||||
- Use SQLx migrations for schema evolution (`sqlx migrate add/run`)
|
||||
- Leverage compile-time checked queries with SQLx macros
|
||||
- Implement proper connection pooling and error handling
|
||||
- Let SQLx handle what it does best - don't reinvent the wheel
|
||||
|
||||
## What SQLx Handles for You
|
||||
|
||||
- **Migrations**: Use `sqlx migrate add <name>` to create, `sqlx::migrate!()` macro to embed
|
||||
- **Connection Pooling**: Built-in `SqlitePool` with configuration options
|
||||
- **Query Safety**: Compile-time checked queries prevent SQL injection and typos
|
||||
- **Type Safety**: Automatic Rust type mapping from database types
|
@@ -1,3 +0,0 @@
|
||||
[server]
|
||||
host = '127.0.0.1'
|
||||
port = 8090
|
@@ -1,31 +0,0 @@
|
||||
# URL for the Ollama service
|
||||
OLLAMA_HOST=http://localhost:11434
|
||||
|
||||
# Interval for scheduled news fetching in hours
|
||||
CRON_HOURS=1
|
||||
|
||||
# Minimum interval for scheduled news fetching in hours
|
||||
MIN_CRON_HOURS=0.5
|
||||
|
||||
# Cooldown period in minutes between manual syncs
|
||||
SYNC_COOLDOWN_MINUTES=30
|
||||
|
||||
# LLM model to use for summarization
|
||||
LLM_MODEL=qwen2:7b-instruct-q4_K_M
|
||||
LLM_MODEL=phi3:3.8b-mini-128k-instruct-q4_0
|
||||
LLM_MODEL=mistral-nemo:12b
|
||||
|
||||
# Timeout in seconds for LLM requests
|
||||
LLM_TIMEOUT_SECONDS=180
|
||||
|
||||
# Timeout in seconds for Ollama API requests
|
||||
OLLAMA_API_TIMEOUT_SECONDS=10
|
||||
|
||||
# Timeout in seconds for article fetching
|
||||
ARTICLE_FETCH_TIMEOUT=30
|
||||
|
||||
# Maximum length of article content to process
|
||||
MAX_ARTICLE_LENGTH=5000
|
||||
|
||||
# SQLite database connection string
|
||||
DB_NAME=owlynews.sqlite3
|
@@ -1,5 +0,0 @@
|
||||
DROP TABLE IF EXISTS meta;
|
||||
DROP TABLE IF EXISTS settings;
|
||||
DROP TABLE IF EXISTS feeds;
|
||||
DROP INDEX IF EXISTS idx_news_published;
|
||||
DROP TABLE IF EXISTS news;
|
@@ -1,38 +0,0 @@
|
||||
-- Initial database schema for Owly News Summariser
|
||||
|
||||
-- News table to store articles
|
||||
CREATE TABLE IF NOT EXISTS news
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
title TEXT NOT NULL,
|
||||
summary TEXT,
|
||||
url TEXT NOT NULL,
|
||||
published TIMESTAMP NOT NULL,
|
||||
country TEXT NOT NULL,
|
||||
created_at INTEGER DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
|
||||
-- Index for faster queries on published date
|
||||
CREATE INDEX IF NOT EXISTS idx_news_published ON news (published);
|
||||
|
||||
-- Feeds table to store RSS feed sources
|
||||
CREATE TABLE IF NOT EXISTS feeds
|
||||
(
|
||||
id INTEGER PRIMARY KEY,
|
||||
country TEXT,
|
||||
url TEXT UNIQUE NOT NULL
|
||||
);
|
||||
|
||||
-- Settings table for application configuration
|
||||
CREATE TABLE IF NOT EXISTS settings
|
||||
(
|
||||
key TEXT PRIMARY KEY,
|
||||
val TEXT NOT NULL
|
||||
);
|
||||
|
||||
-- Meta table for application metadata
|
||||
CREATE TABLE IF NOT EXISTS meta
|
||||
(
|
||||
key TEXT PRIMARY KEY,
|
||||
val TEXT NOT NULL
|
||||
);
|
@@ -1,18 +0,0 @@
|
||||
CREATE TABLE news_backup
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
title TEXT NOT NULL,
|
||||
summary TEXT,
|
||||
url TEXT NOT NULL,
|
||||
published TIMESTAMP NOT NULL,
|
||||
country TEXT NOT NULL,
|
||||
created_at INTEGER DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
|
||||
INSERT INTO news_backup
|
||||
SELECT id, title, summary, url, published, country, created_at
|
||||
FROM news;
|
||||
DROP TABLE news;
|
||||
ALTER TABLE news_backup
|
||||
RENAME TO news;
|
||||
CREATE INDEX IF NOT EXISTS idx_news_published ON news (published);
|
@@ -1,3 +0,0 @@
|
||||
-- Add category field to news table
|
||||
ALTER TABLE news
|
||||
ADD COLUMN category TEXT;
|
@@ -1,3 +0,0 @@
|
||||
pub mod handlers;
|
||||
pub mod middleware;
|
||||
pub mod routes;
|
@@ -1,39 +0,0 @@
|
||||
use axum::Json;
|
||||
use axum::extract::State;
|
||||
use serde_json::Value;
|
||||
use sqlx::SqlitePool;
|
||||
|
||||
pub async fn get_articles(State(pool): State<SqlitePool>) -> Result<Json<Value>, AppError> {
|
||||
// TODO: Article logic
|
||||
Ok(Json(serde_json::json!({"articles": []})))
|
||||
}
|
||||
|
||||
pub async fn get_summaries(State(pool): State<SqlitePool>) -> Result<Json<Value>, AppError> {
|
||||
// TODO: Summaries logic
|
||||
Ok(Json(serde_json::json!({"summaries": []})))
|
||||
}
|
||||
|
||||
use axum::{
|
||||
http::StatusCode,
|
||||
response::{IntoResponse, Response},
|
||||
};
|
||||
|
||||
pub struct AppError(anyhow::Error);
|
||||
|
||||
impl IntoResponse for AppError {
|
||||
fn into_response(self) -> Response {
|
||||
(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
format!("Something went wrong: {}", self.0),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
}
|
||||
|
||||
impl<E> From<E> for AppError
|
||||
where
|
||||
E: Into<anyhow::Error>, {
|
||||
fn from(err: E) -> Self {
|
||||
Self(err.into())
|
||||
}
|
||||
}
|
@@ -1,11 +0,0 @@
|
||||
use axum::Router;
|
||||
use axum::routing::get;
|
||||
use sqlx::SqlitePool;
|
||||
use crate::api::handlers;
|
||||
|
||||
pub fn routes() -> Router<SqlitePool> {
|
||||
Router::new()
|
||||
.route("/articles", get(handlers::get_articles))
|
||||
.route("/summaries", get(handlers::get_summaries))
|
||||
// Add more routes as needed
|
||||
}
|
@@ -1,100 +0,0 @@
|
||||
use serde::Deserialize;
|
||||
use std::env;
|
||||
use std::path::PathBuf;
|
||||
use toml::Value;
|
||||
use tracing::{error, info};
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct AppSettings {
|
||||
pub config_path: String,
|
||||
pub db_path: String,
|
||||
pub migration_path: String,
|
||||
pub config: Config,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct Config {
|
||||
pub server: Server,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct Server {
|
||||
pub host: String,
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct ConfigFile {
|
||||
server: Server,
|
||||
}
|
||||
|
||||
impl AppSettings {
|
||||
pub fn get_app_settings() -> Self {
|
||||
let config_file = Self::load_config_file().unwrap_or_else(|| {
|
||||
info!("Using default config values");
|
||||
ConfigFile {
|
||||
server: Server {
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: 1337,
|
||||
},
|
||||
}
|
||||
});
|
||||
|
||||
Self {
|
||||
config_path: Self::get_config_path(),
|
||||
db_path: Self::get_db_path(),
|
||||
migration_path: String::from("./migrations"),
|
||||
config: Config {
|
||||
server: config_file.server,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn load_config_file() -> Option<ConfigFile> {
|
||||
let config_path = Self::get_config_path();
|
||||
let contents = std::fs::read_to_string(&config_path)
|
||||
.map_err(|e| error!("Failed to read config file: {}", e))
|
||||
.ok()?;
|
||||
|
||||
toml::from_str(&contents)
|
||||
.map_err(|e| error!("Failed to parse TOML: {}", e))
|
||||
.ok()
|
||||
}
|
||||
|
||||
fn get_db_path() -> String {
|
||||
if cfg!(debug_assertions) {
|
||||
// Development: Use backend-rust directory
|
||||
// TODO: Change later
|
||||
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
path.push("owlynews.sqlite3");
|
||||
path.to_str().unwrap().to_string()
|
||||
} else {
|
||||
// Production: Use standard Linux applications data directory
|
||||
"/var/lib/owly-news-summariser/owlynews.sqlite3".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
fn get_config_path() -> String {
|
||||
if cfg!(debug_assertions) {
|
||||
// Development: Use backend-rust directory
|
||||
// TODO: Change later
|
||||
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
path.push("config.toml");
|
||||
path.to_str().unwrap().to_string()
|
||||
} else {
|
||||
// Production: Use standard Linux applications data directory
|
||||
"$HOME/owly-news-summariser/config.toml".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn database_url(&self) -> String {
|
||||
format!("sqlite:{}", self.db_path)
|
||||
}
|
||||
|
||||
pub fn ensure_db_directory(&self) -> Result<(), std::io::Error> {
|
||||
if let Some(parent) = std::path::Path::new(&self.db_path).parent() {
|
||||
std::fs::create_dir_all(parent)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
@@ -1,31 +0,0 @@
|
||||
use crate::config::{AppSettings};
|
||||
use anyhow::Result;
|
||||
use sqlx::migrate::Migrator;
|
||||
use sqlx::sqlite::{SqliteConnectOptions};
|
||||
use sqlx::{Pool, Sqlite, SqlitePool};
|
||||
use std::str::FromStr;
|
||||
use tracing::info;
|
||||
|
||||
pub const MIGRATOR: Migrator = sqlx::migrate!("./migrations");
|
||||
|
||||
pub async fn initialize_db(app_settings: &AppSettings) -> Result<Pool<Sqlite>> {
|
||||
app_settings.ensure_db_directory()?;
|
||||
|
||||
let options = SqliteConnectOptions::from_str(&app_settings.database_url())?
|
||||
.create_if_missing(true)
|
||||
.journal_mode(sqlx::sqlite::SqliteJournalMode::Wal)
|
||||
.foreign_keys(true);
|
||||
|
||||
let pool = SqlitePool::connect_with(options).await?;
|
||||
|
||||
MIGRATOR.run(&pool).await?;
|
||||
info!("Database migrations completed successfully");
|
||||
|
||||
Ok(pool)
|
||||
}
|
||||
|
||||
pub async fn create_pool(opts: SqliteConnectOptions) -> Result<SqlitePool> {
|
||||
let pool = SqlitePool::connect_with(opts).await?;
|
||||
|
||||
Ok(pool)
|
||||
}
|
@@ -1,74 +0,0 @@
|
||||
mod api;
|
||||
mod config;
|
||||
mod db;
|
||||
mod models;
|
||||
mod services;
|
||||
|
||||
use crate::config::{AppSettings};
|
||||
use anyhow::Result;
|
||||
use axum::Router;
|
||||
use axum::routing::get;
|
||||
use tokio::signal;
|
||||
use tracing::{info};
|
||||
use tracing_subscriber;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
tracing_subscriber::fmt()
|
||||
.with_target(false)
|
||||
.compact()
|
||||
.init();
|
||||
|
||||
let app_settings = AppSettings::get_app_settings();
|
||||
|
||||
let pool = db::initialize_db(&app_settings).await?;
|
||||
|
||||
let app = create_app(pool);
|
||||
|
||||
let listener =
|
||||
tokio::net::TcpListener::bind(format!("{}:{}", app_settings.config.server.host, app_settings.config.server.port)).await?;
|
||||
info!("Server starting on {}:{}", app_settings.config.server.host, app_settings.config.server.port);
|
||||
|
||||
axum::serve(listener, app)
|
||||
.with_graceful_shutdown(shutdown_signal())
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_app(pool: sqlx::SqlitePool) -> Router {
|
||||
Router::new()
|
||||
.route("/health", get(health_check))
|
||||
.nest("/api", api::routes::routes())
|
||||
.with_state(pool)
|
||||
}
|
||||
|
||||
async fn health_check() -> &'static str {
|
||||
"OK"
|
||||
}
|
||||
|
||||
async fn shutdown_signal() {
|
||||
let ctrl_c = async {
|
||||
signal::ctrl_c()
|
||||
.await
|
||||
.expect("failed to install CTRL+C handler");
|
||||
};
|
||||
|
||||
#[cfg(unix)]
|
||||
let terminate = async {
|
||||
signal::unix::signal(signal::unix::SignalKind::terminate())
|
||||
.expect("failed to install terminate handler")
|
||||
.recv()
|
||||
.await;
|
||||
};
|
||||
|
||||
#[cfg(not(unix))]
|
||||
let terminate = std::future::pending::<()>();
|
||||
|
||||
tokio::select! {
|
||||
_ = ctrl_c => {},
|
||||
_ = terminate => {},
|
||||
}
|
||||
|
||||
info!("Signal received, shutting down");
|
||||
}
|
@@ -1,3 +0,0 @@
|
||||
mod article;
|
||||
mod summary;
|
||||
mod user;
|
@@ -1,2 +0,0 @@
|
||||
mod summary_service;
|
||||
mod news_service;
|
@@ -12,7 +12,7 @@ LLM_MODEL = os.getenv("LLM_MODEL", "mistral-nemo:12b")
|
||||
LLM_TIMEOUT_SECONDS = int(os.getenv("LLM_TIMEOUT_SECONDS", 180))
|
||||
OLLAMA_API_TIMEOUT_SECONDS = int(os.getenv("OLLAMA_API_TIMEOUT_SECONDS", 10))
|
||||
ARTICLE_FETCH_TIMEOUT = int(os.getenv("ARTICLE_FETCH_TIMEOUT", 30))
|
||||
MAX_ARTICLE_LENGTH = int(os.getenv("MAX_ARTICLE_LENGTH", 5000))
|
||||
MAX_ARTICLE_LENGTH = int(os.getenv("MAX_ARTICLE_LENGTH", 10_000))
|
||||
|
||||
frontend_path = os.path.join(
|
||||
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
|
||||
|
@@ -209,7 +209,7 @@ class NewsFetcher:
|
||||
"format": "json",
|
||||
"options": {
|
||||
"num_gpu": 1, # Force GPU usage
|
||||
"num_ctx": 8192, # Context size
|
||||
"num_ctx": 128_000, # Context size
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -14,6 +14,7 @@ SYNC_COOLDOWN_MINUTES=30
|
||||
LLM_MODEL=qwen2:7b-instruct-q4_K_M
|
||||
LLM_MODEL=phi3:3.8b-mini-128k-instruct-q4_0
|
||||
LLM_MODEL=mistral-nemo:12b
|
||||
LLM_MODEL=cnjack/mistral-samll-3.1:24b-it-q4_K_S
|
||||
|
||||
# Timeout in seconds for LLM requests
|
||||
LLM_TIMEOUT_SECONDS=180
|
||||
|
Reference in New Issue
Block a user