use llm::{
builder::{LLMBackend, LLMBuilder}, chat::ChatMessage, };
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let api_key = std::env::var("HF_TOKEN").unwrap_or("gsk-TESTKEY".into());
let llm = LLMBuilder::new()
.backend(LLMBackend::HuggingFace) .api_key(api_key) .model("moonshotai/Kimi-K2-Instruct-0905")
.max_tokens(512) .temperature(0.7) .build()
.expect("Failed to build LLM");
let messages = vec![
ChatMessage::user()
.content("Tell me about quantum computing")
.build(),
ChatMessage::assistant()
.content("Quantum computing is a type of computing that uses quantum phenomena...")
.build(),
ChatMessage::user().content("What are qubits?").build(),
];
match llm.chat(&messages).await {
Ok(text) => println!("Chat response:\n{text}"),
Err(e) => eprintln!("Chat error: {e}"),
}
Ok(())
}