use std::fs;
use llm::{
builder::{LLMBackend, LLMBuilder}, chat::{ChatMessage, ImageMime}, };
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let api_key = std::env::var("OPENAI_API_KEY").unwrap_or("sk-TESTKEY".into());
let llm = LLMBuilder::new()
.backend(LLMBackend::OpenAI) .api_key(api_key) .model("gpt-4o") .max_tokens(1024) .temperature(0.7) .build()
.expect("Failed to build LLM (OpenAI)");
let content = fs::read("./examples/image001.jpg").expect("The image001.jpg file should exist");
let messages = vec![
ChatMessage::user().image_url("https://media.istockphoto.com/id/1443562748/fr/photo/mignon-chat-gingembre.jpg?s=612x612&w=0&k=20&c=ygNVVnqLk9V8BWu4VQ0D21u7-daIyHUoyKlCcx3K1E8=").build(),
ChatMessage::user().image(ImageMime::JPEG, content).build(),
ChatMessage::user().content("What is in this image (image 1 and 2)?").build(),
];
match llm.chat(&messages).await {
Ok(text) => println!("Chat response:\n{text}"),
Err(e) => eprintln!("Chat error: {e}"),
}
Ok(())
}