use futures::StreamExt;
use openmodex::{ChatCompletionRequest, ChatMessage, Error, OpenModex};
#[tokio::main]
async fn main() -> Result<(), Error> {
let client = OpenModex::from_env()?;
let mut stream = client
.chat()
.completions()
.create_stream(
ChatCompletionRequest::new("gpt-4o")
.message(ChatMessage::user(
"Tell me a short story about a robot learning to paint.",
))
.temperature(0.9)
.max_tokens(512),
)
.await?;
print!("Assistant: ");
while let Some(chunk) = stream.next().await {
let chunk = chunk?;
if let Some(choice) = chunk.choices.first() {
if let Some(content) = &choice.delta.content {
print!("{content}");
}
if choice.finish_reason.is_some() {
println!();
}
}
}
Ok(())
}