aha 0.2.5

aha model inference library, now supports Qwen(2.5VL/3/3VL/3.5/ASR/3Embedding/3Reranker), MiniCPM4, VoxCPM/1.5, DeepSeek-OCR/2, Hunyuan-OCR, PaddleOCR-VL/1.5, RMBG2.0, GLM(ASR-Nano-2512/OCR), Fun-ASR-Nano-2512, LFM(2/2.5/2VL/2.5VL)
Documentation
//! CLI exec module for direct model inference
//!
//! This module provides model-specific exec implementations for the `run` subcommand.
//! Each model has its own exec module that handles input/output parsing and model invocation.

pub mod all_minilm_l6_v2;
pub mod deepseek_ocr;
pub mod fun_asr_nano;
pub mod glm_asr_nano;
pub mod glm_ocr;
pub mod hunyuan_ocr;
pub mod lfm2;
pub mod lfm2vl;
pub mod minicpm4;
pub mod paddleocr_vl;
pub mod qwen2_5vl;
pub mod qwen3;
pub mod qwen3_5;
pub mod qwen3_asr;
pub mod qwen3_embedding;
pub mod qwen3_reranker;
pub mod qwen3vl;
pub mod rmbg2_0;
pub mod voxcpm;

use anyhow::Result;

/// Trait for model exec implementations
///
/// Each model exec module implements this trait to provide
/// model-specific inference logic for CLI `run` commands.
pub trait ExecModel {
    /// Run inference with the given input and output parameters
    ///
    /// # Arguments
    /// * `input` - Input text or file path (interpretation is model-specific)
    /// * `output` - Optional output file path (if None, model will auto-generate)
    /// * `weight_path` - Path to the model weights
    ///
    /// # Returns
    /// * `Ok(())` on success
    /// * `Err(anyhow::Error)` on failure
    fn run(input: &[String], output: Option<&str>, weight_path: &str) -> Result<()>;
}