use chat_gpt_lib_rs::OpenAIClient;
use chat_gpt_lib_rs::api_resources::chat::{
ChatMessage, ChatRole, CreateChatCompletionRequest, create_chat_completion,
};
use chat_gpt_lib_rs::api_resources::models::Model;
use chat_gpt_lib_rs::error::OpenAIError;
#[tokio::main]
async fn main() -> Result<(), OpenAIError> {
dotenvy::dotenv().ok();
let client = OpenAIClient::new(None)?;
let request = CreateChatCompletionRequest {
model: Model::Gpt45Preview,
messages: vec![
ChatMessage {
role: ChatRole::System,
content: "You are a cheerful and friendly assistant.".to_string(),
name: None,
},
ChatMessage {
role: ChatRole::User,
content: "Could you write me a quick recipe for chocolate chip cookies?"
.to_string(),
name: None,
},
],
max_tokens: Some(150),
temperature: Some(0.7),
..Default::default()
};
println!("Sending chat completion request...");
let response = create_chat_completion(&client, &request).await?;
for (i, choice) in response.choices.iter().enumerate() {
println!("\n== Chat Choice {} ==", i);
println!("Assistant: {}", choice.message.content);
if let Some(reason) = &choice.finish_reason {
println!("Finish reason: {}", reason);
}
}
if let Some(usage) = &response.usage {
println!(
"\nUsage => prompt_tokens: {}, completion_tokens: {}, total_tokens: {}",
usage.prompt_tokens, usage.completion_tokens, usage.total_tokens
);
}
Ok(())
}