use api_openai::ClientApiAccessors;
use api_openai::
{
Client,
components ::
{
responses ::{ CreateResponseRequest, ResponseInput, ResponseObject },
output ::{ OutputItem, OutputContentPart },
common ::ModelIdsResponses,
},
};
#[ tokio::main ]
async fn main() -> Result< (), Box< dyn std::error::Error > >
{
println!( "OpenAI Basic Chat Example" );
println!( "{}", "=".repeat( 50 ) );
println!( "Demonstrating single-turn conversation with comprehensive configuration\n" );
let secret = api_openai::secret::Secret::load_with_fallbacks( "OPENAI_API_KEY" )
.expect( "Failed to load OPENAI_API_KEY. Please set environment variable or add to workspace secrets file." );
let env = api_openai::environment::OpenaiEnvironmentImpl::build(
secret,
None,
None,
api_openai ::environment::OpenAIRecommended::base_url().to_string(),
api_openai ::environment::OpenAIRecommended::realtime_base_url().to_string()
).expect( "Failed to create environment" );
let client = Client::build( env ).expect( "Failed to create client" );
let request = CreateResponseRequest::former()
.model( ModelIdsResponses::from( "gpt-5.1-chat-latest".to_string() ) )
.input( ResponseInput::String( "Hello! Can you explain what artificial intelligence is in simple terms?".to_string() ) )
.temperature( 0.7 )
.max_output_tokens( 1024 )
.top_p( 0.95 )
.form();
println!( "=== Request JSON Payload ===" );
let json_payload = serde_json::to_string_pretty( &request )?;
println!( "{json_payload}" );
println!();
println!( "=== cURL Command Generation ===" );
println!( "Note : cURL command generation available through response client interface" );
println!();
println!( "Sending request to OpenAI API..." );
println!( "Model : gpt-4o" );
println!( "Temperature : 0.7 (balanced creativity)" );
println!( "Max tokens : 1024" );
println!( "Top-p : 0.95 (nucleus sampling)" );
println!();
let response : ResponseObject = client.responses().create( request ).await?;
println!( "✅ Response received successfully!\n" );
println!( "=== AI Response ===" );
if let Some( OutputItem::Message( message_struct ) ) = response.output.first()
{
if let Some( OutputContentPart::Text { text, .. } ) = message_struct.content.first()
{
println!( "{text}" );
}
else
{
println!( "No text content found in response." );
}
}
else
{
println!( "No message output received in response." );
}
println!();
if let Some( usage ) = response.usage
{
println!( "=== Token Usage Information ===" );
println!( "Input tokens : {}", usage.prompt_tokens );
if let Some( completion_tokens ) = usage.completion_tokens
{
println!( "Output tokens : {completion_tokens}" );
}
println!( "Total tokens : {}", usage.total_tokens );
println!( "\n💡 Note : Token usage directly impacts API costs." );
println!( "Monitor usage to optimize both performance and expenses." );
}
else
{
println!( "No token usage information available." );
}
println!( "\n✨ Single-turn conversation completed successfully!" );
println!( "This demonstrates the fundamental OpenAI API request-response cycle." );
Ok( () )
}