use api_openai::ClientApiAccessors;
#[ allow( unused_imports ) ]
use api_openai::
{
client ::Client,
error ::OpenAIError,
api ::realtime::{ RealtimeClient, ws::WsSession },
components ::realtime_shared::
{
RealtimeSessionCreateRequest,
RealtimeClientEventInputAudioBufferAppend,
RealtimeClientEventInputAudioBufferClear,
RealtimeServerEvent,
},
components ::common::ModelIds,
};
use tracing_subscriber::{ EnvFilter, fmt }; use base64::{ engine::general_purpose::STANDARD as base64_engine, Engine as _ };
#[ tokio::main( flavor = "current_thread" ) ]
async fn main() -> Result< (), OpenAIError >
{
fmt()
.with_env_filter( EnvFilter::from_default_env().add_directive( "api_openai=trace".parse().unwrap() ) )
.init();
dotenv ::from_filename( "./secret/-secret.sh" ).ok();
tracing ::info!( "Initializing client..." );
let client = Client::new();
tracing ::info!( "Building realtime session request..." );
let request = RealtimeSessionCreateRequest::former()
.model( "gpt-4o-realtime-preview".to_string() )
.input_audio_format( "pcm16" ) .temperature( 0.7 )
.form();
tracing ::info!( "Sending request to OpenAI API to create session..." );
let session = client.realtime().create( request ).await?;
tracing ::info!( "Creating Realtime WebSocket Session Client..." );
let token = session.client_secret.value;
let session_client = WsSession::connect( client.environment().clone(), Some( &token ) ).await?;
let dummy_audio_bytes = include_bytes!("data/example.wav");
let audio_base64 = base64_engine.encode( &dummy_audio_bytes );
let iaba_append = RealtimeClientEventInputAudioBufferAppend::former()
.audio( audio_base64 )
.form();
tracing ::info!( "Sending a preliminary input_audio_buffer.append event..." );
session_client.input_audio_buffer_append( iaba_append ).await?;
tokio ::time::sleep( tokio::time::Duration::from_millis( 50 ) ).await;
tracing ::info!( "Preliminary audio append sent." );
let client_event_id = "clear-example-id";
let iabc_clear = RealtimeClientEventInputAudioBufferClear::former()
.event_id( client_event_id ) .form();
tracing ::info!( event_id = %client_event_id, "Sending input_audio_buffer.clear event..." );
session_client.input_audio_buffer_clear( iabc_clear ).await?;
tracing ::info!( "Waiting for input_audio_buffer.cleared confirmation..." );
let mut confirmation_received = false;
loop
{
let response = session_client.read_event().await;
match response
{
Ok( Some( event ) ) =>
{
match event
{
RealtimeServerEvent::InputAudioBufferCleared( cleared_event ) =>
{
println!( "\n--- Clear Confirmation Received ---" );
println!( "{cleared_event:?}" );
println!( "Successfully received input_audio_buffer.cleared confirmation." );
confirmation_received = true;
break; }
_ => { println!( "\n--- Received Other Event (while waiting for clear confirmation) --- \n{event:?}" ); }
}
}
Ok( None ) =>
{
println!( "\nWebSocket connection closed by server." );
break; }
Err( e ) =>
{
eprintln!( "\nError reading from WebSocket : {:?}", e );
return Err( e ); }
}
}
if !confirmation_received
{
eprintln!("Loop finished without receiving input_audio_buffer.cleared confirmation.");
return Err( OpenAIError::WsInvalidMessage( "Did not receive expected clear confirmation".to_string() ) );
}
Ok( () )
}