use qai_sdk::{
core::types::{Content, GenerateOptions, Message, Prompt, ProviderSettings, Role},
ollama::create_ollama,
LanguageModel,
Result,
};
#[tokio::main]
async fn main() -> Result<()> {
let provider = create_ollama(ProviderSettings::default());
let model = provider.chat("llama3.2");
let prompt = Prompt {
messages: vec![
Message {
role: Role::System,
content: vec![Content::Text {
text: "You are a helpful local AI assistant.".to_string(),
}],
},
Message {
role: Role::User,
content: vec![Content::Text {
text: "Write a short haiku about open source models.".to_string(),
}],
},
],
};
println!("Sending request to local Ollama (llama3.2)...\n");
let options = GenerateOptions {
model_id: "llama3.2".to_string(),
temperature: Some(0.7),
..Default::default()
};
let result = model.generate(prompt, options).await?;
println!("Response:\n{}", result.text);
println!("\nTokens -> Prompt: {}, Completion: {}", result.usage.prompt_tokens, result.usage.completion_tokens);
Ok(())
}