diff --git a/src/backend.rs b/src/backend.rs index a0146e4..c985a16 100644 --- a/src/backend.rs +++ b/src/backend.rs @@ -98,24 +98,36 @@ impl CpuBackend { CpuBackend } } +impl Default for CpuBackend { + fn default() -> Self { Self::new() } +} impl CudaBackend { /// Create a new CUDA backend instance. pub fn new() -> Self { CudaBackend } } +impl Default for CudaBackend { + fn default() -> Self { Self::new() } +} impl HipBackend { /// Create a new HIP backend instance. pub fn new() -> Self { HipBackend } } +impl Default for HipBackend { + fn default() -> Self { Self::new() } +} impl VulkanBackend { /// Create a new Vulkan backend instance. pub fn new() -> Self { VulkanBackend } } +impl Default for VulkanBackend { + fn default() -> Self { Self::new() } +} impl TranscribeBackend for CpuBackend { fn kind(&self) -> BackendKind { @@ -311,7 +323,7 @@ pub(crate) fn transcribe_with_whisper_rs( } // Suppress stderr from whisper/ggml during model load and inference when quiet and not verbose. - let (ctx, mut state) = crate::with_suppressed_stderr(|| { + let (_ctx, mut state) = crate::with_suppressed_stderr(|| { let cparams = whisper_rs::WhisperContextParameters::default(); let ctx = whisper_rs::WhisperContext::new_with_params(model_str, cparams) .with_context(|| format!("Failed to load Whisper model at {}", model.display()))?; diff --git a/src/main.rs b/src/main.rs index 496f47a..a799f9d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,8 +1,6 @@ use std::fs::{File, create_dir_all}; use std::io::{self, Read, Write}; use std::path::{Path, PathBuf}; -#[cfg(unix)] -use std::os::unix::io::AsRawFd; use anyhow::{Context, Result, anyhow}; use clap::{Parser, Subcommand}; @@ -774,7 +772,6 @@ fn main() { mod tests { use super::*; use clap::CommandFactory; - use polyscribe::backend::*; use polyscribe::format_srt_time; use std::env as std_env; use std::fs; diff --git a/src/models.rs b/src/models.rs index 61aa68e..b81416c 100644 --- a/src/models.rs +++ b/src/models.rs @@ -81,14 +81,14 @@ fn human_size(bytes: u64) -> String { } else if b >= KB { format!("{:.2} KiB", b / KB) } else { - format!("{} B", bytes) + format!("{bytes} B") } } fn to_hex_lower(bytes: &[u8]) -> String { let mut s = String::with_capacity(bytes.len() * 2); for b in bytes { - s.push_str(&format!("{:02x}", b)); + s.push_str(&format!("{b:02x}")); } s } @@ -159,8 +159,7 @@ fn fill_meta_via_head(repo: &str, name: &str) -> (Option, Option) { Err(_) => return (None, None), }; let url = format!( - "https://huggingface.co/{}/resolve/main/ggml-{}.bin", - repo, name + "https://huggingface.co/{repo}/resolve/main/ggml-{name}.bin" ); let resp = match head_client .head(url) @@ -207,8 +206,7 @@ fn hf_fetch_repo_models(client: &Client, repo: &'static str) -> Result = Vec::new(); @@ -249,7 +247,7 @@ fn hf_fetch_repo_models(client: &Client, repo: &'static str) -> Result Result String { for m in models.iter() { if m.base != current { current = m.base.clone(); - out.push_str("\n"); - out.push_str(&format!("{}:\n", current)); + out.push('\n'); + out.push_str(&format!("{current}:\n")); } // Format without hash and with aligned columns out.push_str(&format!( @@ -406,7 +404,7 @@ fn prompt_select_models_two_stage(models: &[ModelEntry]) -> Result