use llm::{
builder::{LLMBackend, LLMBuilder},
chat::ChatMessage,
};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let base_url = std::env::var("OLLAMA_URL").unwrap_or("http://127.0.0.1:11434".into());
let llm = LLMBuilder::new()
.backend(LLMBackend::Ollama) .base_url(base_url) .model("llama3.2:latest")
.max_tokens(1000) .temperature(0.7) .build()
.expect("Failed to build LLM (Ollama)");
let messages = vec![
ChatMessage::user()
.content("Hello, how do I run a local LLM in Rust?")
.build(),
ChatMessage::assistant()
.content("One way is to use Ollama with a local model!")
.build(),
ChatMessage::user()
.content("Tell me more about that")
.build(),
];
match llm.chat(&messages).await {
Ok(text) => println!("Ollama chat response:\n{text}"),
Err(e) => eprintln!("Chat error: {e}"),
}
Ok(())
}