aha 0.2.5

aha model inference library, now supports Qwen(2.5VL/3/3VL/3.5/ASR/3Embedding/3Reranker), MiniCPM4, VoxCPM/1.5, DeepSeek-OCR/2, Hunyuan-OCR, PaddleOCR-VL/1.5, RMBG2.0, GLM(ASR-Nano-2512/OCR), Fun-ASR-Nano-2512, LFM(2/2.5/2VL/2.5VL)
Documentation
use aha::{
    models::{GenerateModel, lfm2::generate::Lfm2GenerateModel},
    params::chat::ChatCompletionParameters,
};
use anyhow::Result;
use rocket::futures::StreamExt;
use std::{pin::pin, time::Instant};

#[test]
fn lfm2_generate() -> Result<()> {
    // test with cuda: RUST_BACKTRACE=1 cargo test -F cuda --test test_lfm2 lfm2_generate -r -- --nocapture

    let save_dir =
        aha::utils::get_default_save_dir().ok_or(anyhow::anyhow!("Failed to get save dir"))?;
    let model_path = format!("{}/LiquidAI/LFM2-1.2B/", save_dir);
    // let model_path = format!("{}/LiquidAI/LFM2.5-1.2B-Instruct/", save_dir);
    let message = r#"
    {
        "model": "lfm2",
        "messages": [
            {
                "role": "user",
                "content": "你是谁,你如何看待AI"
            }
        ]
    }
    "#;
    let mes: ChatCompletionParameters = serde_json::from_str(message)?;
    let i_start = Instant::now();
    let mut model = Lfm2GenerateModel::init(&model_path, None, None)?;
    let i_duration = i_start.elapsed();
    println!("Time elapsed in load model is: {:?}", i_duration);

    let res = model.generate(mes)?;
    println!("generate: \n {:?}", res);
    if let Some(usage) = &res.usage {
        println!("usage: \n {:?}", usage);
    }

    Ok(())
}

#[tokio::test]
async fn lfm2_stream() -> Result<()> {
    // test with cuda: RUST_BACKTRACE=1 cargo test -F cuda --test test_lfm2 lfm2_stream -r -- --nocapture
    // test with cuda+flash-attn: RUST_BACKTRACE=1 cargo test -F cuda,flash-attn qwen3_0_6b_generate -r -- --nocapture

    let save_dir =
        aha::utils::get_default_save_dir().ok_or(anyhow::anyhow!("Failed to get save dir"))?;
    // let model_path = format!("{}/LiquidAI/LFM2-1.2B/", save_dir);
    let model_path = format!("{}/LiquidAI/LFM2.5-1.2B-Instruct/", save_dir);
    let message = r#"
    {
        "model": "lfm2",
        "messages": [
            {
                "role": "user",
                "content": "你如何看待AI"
            }
        ]
    }
    "#;
    let mes: ChatCompletionParameters = serde_json::from_str(message)?;
    let i_start = Instant::now();
    let mut model = Lfm2GenerateModel::init(&model_path, None, None)?;
    let i_duration = i_start.elapsed();
    println!("Time elapsed in load model is: {:?}", i_duration);

    let i_start = Instant::now();
    // let result = model.generate(mes)?;
    let mut stream = pin!(model.generate_stream(mes)?);
    let i_duration = i_start.elapsed();
    while let Some(token) = stream.next().await {
        println!("generate: \n {:?}", token);
    }
    // println!("generate: \n {:?}", result);
    // if let Some(usage) = &result.usage {
    //     let num_token = usage.total_tokens;
    //     let duration_secs = i_duration.as_secs_f64();
    //     let tps = num_token as f64 / duration_secs;
    //     println!("Tokens per second (TPS): {:.2}", tps);
    // }
    println!("Time elapsed in generate is: {:?}", i_duration);

    Ok(())
}