use futures_util::StreamExt; use std::io::{self, Write};
use chat_gpt_lib_rs::OpenAIClient;
use chat_gpt_lib_rs::api_resources::chat::{
ChatMessage, ChatRole, CreateChatCompletionRequest, create_chat_completion_stream,
};
use chat_gpt_lib_rs::error::OpenAIError;
#[tokio::main]
async fn main() -> Result<(), OpenAIError> {
dotenvy::dotenv().ok();
let client = OpenAIClient::new(None)?;
let request = CreateChatCompletionRequest {
model: "gpt-3.5-turbo".into(),
messages: vec![
ChatMessage {
role: ChatRole::System,
content: "You are a cheerful and friendly assistant.".to_string(),
name: None,
},
ChatMessage {
role: ChatRole::User,
content: "Could you write me a quick recipe for chocolate chip cookies?"
.to_string(),
name: None,
},
],
max_tokens: Some(150),
temperature: Some(0.7),
stream: Some(true), ..Default::default()
};
println!("Sending chat completion streaming request...");
let mut stream = create_chat_completion_stream(&client, &request).await?;
println!("\n\n\n*** Streaming completion: *** \n");
let mut full_response = String::new();
while let Some(chunk_result) = stream.next().await {
match chunk_result {
Ok(chunk) => {
if let Some(choice) = chunk.choices.first() {
if let Some(content) = &choice.delta.content {
print!("{}", content);
io::stdout().flush().unwrap();
full_response.push_str(content);
}
}
}
Err(e) => {
eprintln!("Stream error: {:?}", e);
}
}
}
println!("\n\nFull chat response:\n{}", full_response);
Ok(())
}