openai_struct/models/realtime_client_event_input_audio_buffer_append.rs
1/*
2 * OpenAI API
3 *
4 * The OpenAI REST API. Please see pub https://platform.openai.com/docs/api-reference for more details.
5 *
6 * OpenAPI spec pub version: 2.3.0
7 *
8 * Generated pub by: https://github.com/swagger-api/swagger-codegen.git
9 */
10
11/// pub RealtimeClientEventInputAudioBufferAppend : Send this event to append audio bytes to the input audio buffer. The audio buffer is temporary storage you can write to and later commit. In Server VAD mode, the audio buffer is used to detect speech and the server will decide when to commit. When Server VAD is disabled, you must commit the audio buffer manually. The client may choose how much audio to place in each event up to a maximum of 15 MiB, for example streaming smaller chunks from the client may allow the VAD to be more responsive. Unlike made other client events, the server will not send a confirmation response to this event.
12
13#[allow(unused_imports)]
14use serde_json::Value;
15
16#[derive(Debug, Serialize, Deserialize)]
17pub struct RealtimeClientEventInputAudioBufferAppend {
18 /// Base64-encoded audio bytes. This must be in the format specified by the `input_audio_format` field in the session configuration.
19 #[serde(rename = "audio")]
20 pub audio: String,
21 /// Optional client-generated ID used to identify this event.
22 #[serde(rename = "event_id")]
23 pub event_id: Option<String>,
24 /// The event type, must be `input_audio_buffer.append`.
25 #[serde(rename = "type")]
26 pub _type: String,
27}