/*
* OpenAI API
*
* The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details.
*
* The version of the OpenAPI document: 2.3.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
use serde::{Deserialize, Serialize};
/// RealtimeServerEventInputAudioBufferSpeechStarted : Sent by the server when in `server_vad` mode to indicate that speech has been detected in the audio buffer. This can happen any time audio is added to the buffer (unless speech is already detected). The client may want to use this event to interrupt audio playback or provide visual feedback to the user. The client should expect to receive a `input_audio_buffer.speech_stopped` event when speech stops. The `item_id` property is the ID of the user message item that will be created when speech stops and will also be included in the `input_audio_buffer.speech_stopped` event (unless the client manually commits the audio buffer during VAD activation).
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize, bon::Builder)]
pub struct RealtimeServerEventInputAudioBufferSpeechStarted {
/// The unique ID of the server event.
#[serde(rename = "event_id")]
pub event_id: String,
/// The event type, must be `input_audio_buffer.speech_started`.
#[serde(rename = "type")]
pub r#type: Type,
/// Milliseconds from the start of all audio written to the buffer during the session when speech was first detected. This will correspond to the beginning of audio sent to the model, and thus includes the `prefix_padding_ms` configured in the Session.
#[serde(rename = "audio_start_ms")]
pub audio_start_ms: i32,
/// The ID of the user message item that will be created when speech stops.
#[serde(rename = "item_id")]
pub item_id: String,
}
impl RealtimeServerEventInputAudioBufferSpeechStarted {
/// Sent by the server when in `server_vad` mode to indicate that speech has been detected in the audio buffer. This can happen any time audio is added to the buffer (unless speech is already detected). The client may want to use this event to interrupt audio playback or provide visual feedback to the user. The client should expect to receive a `input_audio_buffer.speech_stopped` event when speech stops. The `item_id` property is the ID of the user message item that will be created when speech stops and will also be included in the `input_audio_buffer.speech_stopped` event (unless the client manually commits the audio buffer during VAD activation).
pub fn new(
event_id: String,
r#type: Type,
audio_start_ms: i32,
item_id: String,
) -> RealtimeServerEventInputAudioBufferSpeechStarted {
RealtimeServerEventInputAudioBufferSpeechStarted {
event_id,
r#type,
audio_start_ms,
item_id,
}
}
}
/// The event type, must be `input_audio_buffer.speech_started`.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Type {
#[serde(rename = "input_audio_buffer.speech_started")]
InputAudioBufferSpeechStarted,
}
impl Default for Type {
fn default() -> Type {
Self::InputAudioBufferSpeechStarted
}
}
impl std::fmt::Display for RealtimeServerEventInputAudioBufferSpeechStarted {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match serde_json::to_string(self) {
Ok(s) => write!(f, "{}", s),
Err(_) => Err(std::fmt::Error),
}
}
}