Compare commits

...

3 Commits

5 changed files with 30 additions and 6 deletions

5
.gitignore vendored
View File

@@ -58,3 +58,8 @@ htmlcov/
!.yarn/releases
!.yarn/sdks
!.yarn/versions
backend-rust/owlynews.sqlite3
backend-rust/target
/backend-rust/config.toml
/backend-rust/owlynews.sqlite3-shm
/backend-rust/owlynews.sqlite3-wal

View File

@@ -12,7 +12,7 @@ LLM_MODEL = os.getenv("LLM_MODEL", "mistral-nemo:12b")
LLM_TIMEOUT_SECONDS = int(os.getenv("LLM_TIMEOUT_SECONDS", 180))
OLLAMA_API_TIMEOUT_SECONDS = int(os.getenv("OLLAMA_API_TIMEOUT_SECONDS", 10))
ARTICLE_FETCH_TIMEOUT = int(os.getenv("ARTICLE_FETCH_TIMEOUT", 30))
MAX_ARTICLE_LENGTH = int(os.getenv("MAX_ARTICLE_LENGTH", 5000))
MAX_ARTICLE_LENGTH = int(os.getenv("MAX_ARTICLE_LENGTH", 10_000))
frontend_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),

View File

@@ -15,6 +15,8 @@ import sqlite3
import time
from datetime import datetime, timedelta, timezone
from typing import Any, Dict, List, Union
import subprocess
import threading
# Third-party imports
import httpx
@@ -62,6 +64,19 @@ scheduler.add_job(
scheduler.start()
def start_frontend_build():
try:
subprocess.Popen(
["yarn", "build"],
cwd="../frontend",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
print("Frontend build started successfully")
except Exception as e:
print(f"Failed to start frontend build: {e}")
# API endpoints
@app.get("/news", response_model=List[Dict[str, Any]])
async def get_news(
@@ -200,8 +215,6 @@ async def list_feeds(db: sqlite3.Cursor = Depends(get_db)):
)
@app.post("/feeds", response_model=SuccessResponse)
async def add_feed(
feed: FeedData,
@@ -332,7 +345,6 @@ async def manual_sync(db: sqlite3.Cursor = Depends(get_db_write)): # Note: chan
)
@app.get("/meta/last-sync", response_model=TimestampResponse)
async def get_last_sync(db: sqlite3.Cursor = Depends(get_db)):
"""
@@ -406,5 +418,11 @@ async def update_cron_schedule(
return {"hours": hours}
# Mount static frontend
app.mount("/", StaticFiles(directory=frontend_path, html=True), name="static")
if __name__ == "__main__":
threading.Thread(target=start_frontend_build, daemon=True).start()
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)

View File

@@ -209,7 +209,7 @@ class NewsFetcher:
"format": "json",
"options": {
"num_gpu": 1, # Force GPU usage
"num_ctx": 8192, # Context size
"num_ctx": 128_000, # Context size
}
}

View File

@@ -14,6 +14,7 @@ SYNC_COOLDOWN_MINUTES=30
LLM_MODEL=qwen2:7b-instruct-q4_K_M
LLM_MODEL=phi3:3.8b-mini-128k-instruct-q4_0
LLM_MODEL=mistral-nemo:12b
LLM_MODEL=cnjack/mistral-samll-3.1:24b-it-q4_K_S
# Timeout in seconds for LLM requests
LLM_TIMEOUT_SECONDS=180