[refactor] optimize string formatting, cleanup imports, and implement default trait for backends

This commit is contained in:
2025-08-08 19:42:10 +02:00
parent cd076c5a91
commit e2504ec3c6
4 changed files with 30 additions and 22 deletions

View File

@@ -98,24 +98,36 @@ impl CpuBackend {
CpuBackend
}
}
impl Default for CpuBackend {
fn default() -> Self { Self::new() }
}
impl CudaBackend {
/// Create a new CUDA backend instance.
pub fn new() -> Self {
CudaBackend
}
}
impl Default for CudaBackend {
fn default() -> Self { Self::new() }
}
impl HipBackend {
/// Create a new HIP backend instance.
pub fn new() -> Self {
HipBackend
}
}
impl Default for HipBackend {
fn default() -> Self { Self::new() }
}
impl VulkanBackend {
/// Create a new Vulkan backend instance.
pub fn new() -> Self {
VulkanBackend
}
}
impl Default for VulkanBackend {
fn default() -> Self { Self::new() }
}
impl TranscribeBackend for CpuBackend {
fn kind(&self) -> BackendKind {
@@ -311,7 +323,7 @@ pub(crate) fn transcribe_with_whisper_rs(
}
// Suppress stderr from whisper/ggml during model load and inference when quiet and not verbose.
let (ctx, mut state) = crate::with_suppressed_stderr(|| {
let (_ctx, mut state) = crate::with_suppressed_stderr(|| {
let cparams = whisper_rs::WhisperContextParameters::default();
let ctx = whisper_rs::WhisperContext::new_with_params(model_str, cparams)
.with_context(|| format!("Failed to load Whisper model at {}", model.display()))?;

View File

@@ -1,8 +1,6 @@
use std::fs::{File, create_dir_all};
use std::io::{self, Read, Write};
use std::path::{Path, PathBuf};
#[cfg(unix)]
use std::os::unix::io::AsRawFd;
use anyhow::{Context, Result, anyhow};
use clap::{Parser, Subcommand};
@@ -774,7 +772,6 @@ fn main() {
mod tests {
use super::*;
use clap::CommandFactory;
use polyscribe::backend::*;
use polyscribe::format_srt_time;
use std::env as std_env;
use std::fs;

View File

@@ -81,14 +81,14 @@ fn human_size(bytes: u64) -> String {
} else if b >= KB {
format!("{:.2} KiB", b / KB)
} else {
format!("{} B", bytes)
format!("{bytes} B")
}
}
fn to_hex_lower(bytes: &[u8]) -> String {
let mut s = String::with_capacity(bytes.len() * 2);
for b in bytes {
s.push_str(&format!("{:02x}", b));
s.push_str(&format!("{b:02x}"));
}
s
}
@@ -159,8 +159,7 @@ fn fill_meta_via_head(repo: &str, name: &str) -> (Option<u64>, Option<String>) {
Err(_) => return (None, None),
};
let url = format!(
"https://huggingface.co/{}/resolve/main/ggml-{}.bin",
repo, name
"https://huggingface.co/{repo}/resolve/main/ggml-{name}.bin"
);
let resp = match head_client
.head(url)
@@ -207,8 +206,7 @@ fn hf_fetch_repo_models(client: &Client, repo: &'static str) -> Result<Vec<Model
}
// Prefer the tree endpoint for reliable size/hash metadata, then fall back to model metadata
let tree_url = format!(
"https://huggingface.co/api/models/{}/tree/main?recursive=1",
repo
"https://huggingface.co/api/models/{repo}/tree/main?recursive=1"
);
let mut out: Vec<ModelEntry> = Vec::new();
@@ -249,7 +247,7 @@ fn hf_fetch_repo_models(client: &Client, repo: &'static str) -> Result<Vec<Model
}
if out.is_empty() {
let url = format!("https://huggingface.co/api/models/{}", repo);
let url = format!("https://huggingface.co/api/models/{repo}");
let resp = client
.get(url)
.send()
@@ -286,13 +284,13 @@ fn hf_fetch_repo_models(client: &Client, repo: &'static str) -> Result<Vec<Model
}
// Fill missing metadata (size/hash) via HEAD request if necessary
if out.iter().any(|m| m.size == 0 || m.sha256.is_none()) {
if !(crate::is_no_interaction() && crate::verbose_level() < 2) {
ilog!(
"Fetching online data: completing metadata checks for models in {}...",
repo
);
}
if out.iter().any(|m| m.size == 0 || m.sha256.is_none())
&& !(crate::is_no_interaction() && crate::verbose_level() < 2)
{
ilog!(
"Fetching online data: completing metadata checks for models in {}...",
repo
);
}
for m in out.iter_mut() {
if m.size == 0 || m.sha256.is_none() {
@@ -374,8 +372,8 @@ fn format_model_list(models: &[ModelEntry]) -> String {
for m in models.iter() {
if m.base != current {
current = m.base.clone();
out.push_str("\n");
out.push_str(&format!("{}:\n", current));
out.push('\n');
out.push_str(&format!("{current}:\n"));
}
// Format without hash and with aligned columns
out.push_str(&format!(
@@ -406,7 +404,7 @@ fn prompt_select_models_two_stage(models: &[ModelEntry]) -> Result<Vec<ModelEntr
for m in models.iter() {
if m.base != last {
// models are sorted by base; avoid duplicates while preserving order
if bases.last().map(|b| b == &m.base).unwrap_or(false) == false {
if !bases.last().map(|b| b == &m.base).unwrap_or(false) {
bases.push(m.base.clone());
}
last = m.base.clone();

View File

@@ -7,6 +7,7 @@ use chrono::Local;
use serde::Deserialize;
#[derive(Deserialize)]
#[allow(dead_code)]
struct OutputEntry {
id: u64,
speaker: String,
@@ -257,7 +258,7 @@ fn cli_merge_and_separate_writes_both_kinds_of_outputs() {
#[test]
fn cli_set_speaker_names_merge_prompts_and_uses_names() {
// Also validate that -q does not suppress prompts by running with -q
use std::io::{Read as _, Write as _};
use std::io::Write as _;
use std::process::Stdio;
let exe = env!("CARGO_BIN_EXE_polyscribe");