use futures::StreamExt;
use qai_sdk::*;
use qai_sdk::LanguageModel;
#[tokio::main]
async fn main() -> Result<()> {
dotenvy::dotenv().ok();
let prompt = Prompt {
messages: vec![Message {
role: Role::User,
content: vec![Content::Text {
text: "Write a haiku about Rust programming.".to_string(),
}],
}],
};
let options = GenerateOptions {
model_id: "gpt-4o-mini".to_string(),
max_tokens: Some(100),
temperature: Some(0.9),
top_p: None,
stop_sequences: None,
tools: None,
response_format: None,
};
println!("=== OpenAI Streaming ===");
let api_key = std::env::var("OPENAI_API_KEY").unwrap_or_default();
let model = OpenAIModel::new(api_key);
let mut stream = model
.generate_stream(prompt.clone(), options.clone())
.await?;
while let Some(part) = stream.next().await {
match part {
StreamPart::TextDelta { delta } => {
print!("{}", delta); }
StreamPart::ToolCallDelta {
index,
id,
name,
arguments_delta,
} => {
println!(
"\n[Tool Call #{} id={:?} name={:?}]: {:?}",
index, id, name, arguments_delta
);
}
StreamPart::Usage { usage } => {
println!(
"\n📊 Usage: {} prompt + {} completion tokens",
usage.prompt_tokens, usage.completion_tokens
);
}
StreamPart::Finish { finish_reason } => {
println!("\n✅ Finished: {}", finish_reason);
}
StreamPart::Error { message } => {
eprintln!("\n❌ Error: {}", message);
}
}
}
println!("\n\n=== Anthropic Streaming ===");
let api_key = std::env::var("ANTHROPIC_API_KEY").unwrap_or_default();
let model = AnthropicModel::new(api_key);
let mut opts = options.clone();
opts.model_id = "claude-3-haiku-20240307".to_string();
let mut stream = model.generate_stream(prompt.clone(), opts).await?;
while let Some(part) = stream.next().await {
match part {
StreamPart::TextDelta { delta } => print!("{}", delta),
StreamPart::Usage { usage } => {
println!(
"\n📊 Usage: {} prompt + {} completion tokens",
usage.prompt_tokens, usage.completion_tokens
);
}
StreamPart::Finish { finish_reason } => {
println!("\n✅ Finished: {}", finish_reason);
}
_ => {}
}
}
println!("\n\n=== Google Gemini Streaming ===");
let api_key = std::env::var("GOOGLE_GENERATIVE_AI_API_KEY").unwrap_or_default();
let model = GoogleModel::new(api_key);
let mut opts = options.clone();
opts.model_id = "gemini-1.5-flash".to_string();
let mut stream = model.generate_stream(prompt.clone(), opts).await?;
while let Some(part) = stream.next().await {
match part {
StreamPart::TextDelta { delta } => print!("{}", delta),
StreamPart::Finish { finish_reason } => {
println!("\n✅ Finished: {}", finish_reason);
}
_ => {}
}
}
println!();
Ok(())
}