aha 0.2.5

aha model inference library, now supports Qwen(2.5VL/3/3VL/3.5/ASR/3Embedding/3Reranker), MiniCPM4, VoxCPM/1.5, DeepSeek-OCR/2, Hunyuan-OCR, PaddleOCR-VL/1.5, RMBG2.0, GLM(ASR-Nano-2512/OCR), Fun-ASR-Nano-2512, LFM(2/2.5/2VL/2.5VL)
Documentation
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct BigVGANConfig {
    pub resblock: String,
    pub num_gpus: usize,
    pub batch_size: usize,
    pub learning_rate: f64,
    pub adam_b1: f64,
    pub adam_b2: f64,
    pub lr_decay: f64,
    pub seed: u32,
    pub upsample_rates: Vec<usize>,
    pub upsample_kernel_sizes: Vec<usize>,
    pub upsample_initial_channel: usize,
    pub resblock_kernel_sizes: Vec<usize>,
    pub resblock_dilation_sizes: Vec<Vec<usize>>,
    pub use_tanh_at_final: bool,
    pub use_bias_at_final: bool,
    pub activation: String,
    pub snake_logscale: bool,
    pub use_cqtd_instead_of_mrd: bool,
    pub cqtd_filters: usize,
    pub cqtd_max_filters: usize,
    pub cqtd_filters_scale: usize,
    pub cqtd_dilations: Vec<usize>,
    pub cqtd_hop_lengths: Vec<usize>,
    pub cqtd_n_octaves: Vec<usize>,
    pub cqtd_bins_per_octaves: Vec<usize>,
    pub mpd_reshapes: Vec<usize>,
    pub use_spectral_norm: bool,
    pub discriminator_channel_mult: usize,
    pub use_multiscale_melloss: bool,
    pub lambda_melloss: f64,
    pub clip_grad_norm: f64,
    pub segment_size: usize,
    pub num_mels: usize,
    pub num_freq: usize,
    pub n_fft: usize,
    pub hop_size: usize,
    pub win_size: usize,
    pub sampling_rate: usize,
    pub fmin: usize,
    pub fmax: Option<usize>,
    pub fmax_for_loss: Option<usize>,
    pub normalize_volume: bool,
    pub num_workers: usize,
}