use aha::{
models::{GenerateModel, lfm2::generate::Lfm2GenerateModel},
params::chat::ChatCompletionParameters,
};
use anyhow::Result;
use rocket::futures::StreamExt;
use std::{pin::pin, time::Instant};
#[test]
fn lfm2_generate() -> Result<()> {
let save_dir =
aha::utils::get_default_save_dir().ok_or(anyhow::anyhow!("Failed to get save dir"))?;
let model_path = format!("{}/LiquidAI/LFM2-1.2B/", save_dir);
let message = r#"
{
"model": "lfm2",
"messages": [
{
"role": "user",
"content": "你是谁,你如何看待AI"
}
]
}
"#;
let mes: ChatCompletionParameters = serde_json::from_str(message)?;
let i_start = Instant::now();
let mut model = Lfm2GenerateModel::init(&model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let res = model.generate(mes)?;
println!("generate: \n {:?}", res);
if let Some(usage) = &res.usage {
println!("usage: \n {:?}", usage);
}
Ok(())
}
#[tokio::test]
async fn lfm2_stream() -> Result<()> {
let save_dir =
aha::utils::get_default_save_dir().ok_or(anyhow::anyhow!("Failed to get save dir"))?;
let model_path = format!("{}/LiquidAI/LFM2.5-1.2B-Instruct/", save_dir);
let message = r#"
{
"model": "lfm2",
"messages": [
{
"role": "user",
"content": "你如何看待AI"
}
]
}
"#;
let mes: ChatCompletionParameters = serde_json::from_str(message)?;
let i_start = Instant::now();
let mut model = Lfm2GenerateModel::init(&model_path, None, None)?;
let i_duration = i_start.elapsed();
println!("Time elapsed in load model is: {:?}", i_duration);
let i_start = Instant::now();
let mut stream = pin!(model.generate_stream(mes)?);
let i_duration = i_start.elapsed();
while let Some(token) = stream.next().await {
println!("generate: \n {:?}", token);
}
println!("Time elapsed in generate is: {:?}", i_duration);
Ok(())
}