async_openai/types/realtime/session.rs
1use serde::{Deserialize, Serialize};
2
3use crate::types::{
4 responses::{Prompt, ToolChoiceFunction, ToolChoiceMCP, ToolChoiceOptions},
5 MCPTool,
6};
7
8#[derive(Debug, Default, Serialize, Deserialize, Clone)]
9pub struct AudioTranscription {
10 /// The language of the input audio. Supplying the input language in
11 /// [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) format will improve accuracy and latency.
12 #[serde(skip_serializing_if = "Option::is_none")]
13 pub language: Option<String>,
14 /// The model to use for transcription. Current options are `whisper-1`,
15 /// `gpt-4o-mini-transcribe`, `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`.
16 /// Use `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
17 #[serde(skip_serializing_if = "Option::is_none")]
18 pub model: Option<String>,
19 /// An optional text to guide the model's style or continue a previous audio segment.
20 /// For `whisper-1`, the [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting). For `gpt-4o-transcribe` models
21 /// (excluding gpt-4o-transcribe-diarize), the prompt is a free text string, for example
22 /// "expect words related to technology".
23 #[serde(skip_serializing_if = "Option::is_none")]
24 pub prompt: Option<String>,
25}
26
27#[derive(Debug, Serialize, Deserialize, Clone)]
28#[serde(tag = "type")]
29pub enum RealtimeTurnDetection {
30 /// Server-side voice activity detection (VAD) which flips on when user speech is detected
31 /// and off after a period of silence.
32 #[serde(rename = "server_vad")]
33 ServerVAD {
34 /// Whether or not to automatically generate a response when a VAD stop event occurs.
35 #[serde(skip_serializing_if = "Option::is_none")]
36 create_response: Option<bool>,
37
38 /// Optional timeout after which a model response will be triggered automatically.
39 /// This is useful for situations in which a long pause from the user is unexpected,
40 /// such as a phone call. The model will effectively prompt the user to continue the
41 /// conversation based on the current context.
42 ///
43 /// The timeout value will be applied after the last model response's audio has finished
44 /// playing, i.e. it's set to the response.done time plus audio playback duration.
45 ///
46 /// An input_audio_buffer.timeout_triggered event (plus events associated with the Response)
47 /// will be emitted when the timeout is reached. Idle timeout is currently only supported
48 /// for server_vad mode.
49 #[serde(skip_serializing_if = "Option::is_none")]
50 idle_timeout_ms: Option<u32>,
51
52 /// Whether or not to automatically interrupt any ongoing response with output to
53 /// the default conversation (i.e. `conversation` of `auto`) when a VAD start event occurs.
54 #[serde(skip_serializing_if = "Option::is_none")]
55 interrupt_response: Option<bool>,
56
57 /// Used only for server_vad mode. Amount of audio to include before the VAD detected speech
58 /// (in milliseconds). Defaults to 300ms.
59 prefix_padding_ms: u32,
60 /// Used only for server_vad mode. Duration of silence to detect speech stop
61 /// (in milliseconds). Defaults to 500ms. With shorter values the model will respond
62 /// more quickly, but may jump in on short pauses from the user.
63 silence_duration_ms: u32,
64
65 /// Used only for server_vad mode. Activation threshold for VAD (0.0 to 1.0),
66 /// this defaults to 0.5. A higher threshold will require louder audio to activate
67 /// the model, and thus might perform better in noisy environments.
68 threshold: f32,
69 },
70
71 /// Server-side semantic turn detection which uses a model to determine when the user has
72 /// finished speaking.
73 #[serde(rename = "semantic_vad")]
74 SemanticVAD {
75 /// Whether or not to automatically generate a response when a VAD stop event occurs.
76 #[serde(skip_serializing_if = "Option::is_none", default)]
77 create_response: Option<bool>,
78
79 /// Used only for `semantic_vad` mode. The eagerness of the model to respond.
80 /// `low` will wait longer for the user to continue speaking, `high` will respond more
81 /// quickly. `auto` is the default and is equivalent to `medium`. `low`, `medium`, and `high`
82 /// have max timeouts of 8s, 4s, and 2s respectively.
83 eagerness: String,
84
85 /// Whether or not to automatically interrupt any ongoing response with output to
86 /// the default conversation (i.e. `conversation` of `auto`) when a VAD start event occurs.
87 #[serde(skip_serializing_if = "Option::is_none", default)]
88 interrupt_response: Option<bool>,
89 },
90}
91
92#[derive(Debug, Serialize, Deserialize, Clone)]
93pub enum MaxOutputTokens {
94 #[serde(rename = "inf")]
95 Inf,
96 #[serde(untagged)]
97 Num(u16),
98}
99
100#[derive(Debug, Serialize, Deserialize, Clone)]
101pub struct RealtimeFunctionTool {
102 /// The name of the function.
103 pub name: String,
104 /// The description of the function, including guidance on when and how to call it,
105 /// and guidance about what to tell the user when calling (if anything).
106 pub description: String,
107 /// Parameters of the function in JSON Schema.
108 pub parameters: serde_json::Value,
109}
110
111#[derive(Debug, Serialize, Deserialize, Clone)]
112#[serde(tag = "type")]
113pub enum RealtimeTool {
114 #[serde(rename = "function")]
115 Function(RealtimeFunctionTool),
116 /// Give the model access to additional tools via remote Model Context Protocol (MCP) servers.
117 /// [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
118 #[serde(rename = "mcp")]
119 MCP(MCPTool),
120}
121
122#[derive(Debug, Serialize, Deserialize, Clone)]
123#[serde(rename_all = "lowercase")]
124pub enum FunctionType {
125 Function,
126}
127
128#[derive(Debug, Serialize, Deserialize, Clone)]
129#[serde(tag = "type", rename_all = "snake_case")]
130pub enum ToolChoice {
131 /// Use this option to force the model to call a specific function.
132 Function(ToolChoiceFunction),
133 /// Use this option to force the model to call a specific tool on a remote MCP server.
134 Mcp(ToolChoiceMCP),
135
136 #[serde(untagged)]
137 Mode(ToolChoiceOptions),
138}
139
140#[derive(Debug, Serialize, Deserialize, Clone)]
141#[serde(rename_all = "lowercase")]
142pub enum RealtimeVoice {
143 Alloy,
144 Ash,
145 Ballad,
146 Coral,
147 Echo,
148 Sage,
149 Shimmer,
150 Verse,
151 Marin,
152 Cedar,
153 #[serde(untagged)]
154 Other(String),
155}
156
157#[derive(Debug, Serialize, Deserialize, Clone)]
158#[serde(tag = "type")]
159pub enum RealtimeAudioFormats {
160 /// The PCM audio format. Only a 24kHz sample rate is supported.
161 #[serde(rename = "audio/pcm")]
162 PCMAudioFormat {
163 /// The sample rate of the audio. Always 24000.
164 rate: u32,
165 },
166 /// The G.711 μ-law format.
167 #[serde(rename = "audio/pcmu")]
168 PCMUAudioFormat,
169 /// The G.711 A-law format.
170 #[serde(rename = "audio/pcma")]
171 PCMAAudioFormat,
172}
173
174#[derive(Debug, Serialize, Deserialize, Clone, Default)]
175pub struct G711ULAWAudioFormat {
176 pub sample_rate: u32,
177 pub channels: u32,
178}
179
180#[derive(Debug, Serialize, Deserialize, Clone)]
181pub struct AudioInput {
182 /// The format of the input audio.
183 pub format: RealtimeAudioFormats,
184 /// Configuration for input audio noise reduction. This can be set to null to turn off.
185 /// Noise reduction filters audio added to the input audio buffer before it is sent to VAD
186 /// and the model. Filtering the audio can improve VAD and turn detection accuracy
187 /// (reducing false positives) and model performance by improving perception of the
188 /// input audio.
189 pub noise_reduction: Option<NoiseReductionType>,
190 /// Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on.
191 /// Input audio transcription is not native to the model, since the model consumes audio directly.
192 /// Transcription runs asynchronously through [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
193 /// and should be treated as guidance of input audio content rather than precisely what the model
194 /// heard. The client can optionally set the language and prompt for transcription,
195 /// these offer additional guidance to the transcription service.
196 pub transcription: Option<AudioTranscription>,
197
198 /// Configuration for turn detection, ether Server VAD or Semantic VAD. This can
199 /// be set to null to turn off, in which case the client must manually trigger model response.
200 ///
201 /// Server VAD means that the model will detect the start and end of speech
202 /// based on audio volume and respond at the end of user speech.
203 ///
204 /// Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD)
205 /// to semantically estimate whether the user has finished speaking, then dynamically sets
206 /// a timeout based on this probability. For example, if user audio trails off with "uhhm",
207 /// the model will score a low probability of turn end and wait longer for the user to
208 /// continue speaking. This can be useful for more natural conversations, but may have a
209 /// higher latency.
210 pub turn_detection: RealtimeTurnDetection,
211}
212
213#[derive(Debug, Serialize, Deserialize, Clone)]
214pub struct AudioOutput {
215 /// The format of the output audio.
216 pub format: RealtimeAudioFormats,
217 /// The speed of the model's spoken response as a multiple of the original speed.
218 /// 1.0 is the default speed. 0.25 is the minimum speed. 1.5 is the maximum speed.
219 /// This value can only be changed in between model turns, not while a response
220 /// is in progress.
221 ///
222 /// This parameter is a post-processing adjustment to the audio after it is generated,
223 /// it's also possible to prompt the model to speak faster or slower.
224 pub speed: f32,
225 /// The voice the model uses to respond. Voice cannot be changed during the session once
226 /// the model has responded with audio at least once. Current voice options are
227 /// `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and `cedar`.
228 /// We recommend `marin` and `cedar` for best quality.
229 pub voice: RealtimeVoice,
230}
231
232#[derive(Debug, Serialize, Deserialize, Clone)]
233pub struct Audio {
234 pub input: AudioInput,
235 pub output: AudioOutput,
236}
237
238#[derive(Debug, Serialize, Deserialize, Clone)]
239#[serde(rename_all = "lowercase")]
240pub enum Tracing {
241 /// Enables tracing and sets default values for tracing configuration options. Always `auto`.
242 Auto,
243
244 #[serde(untagged)]
245 Configuration(TracingConfiguration),
246}
247
248#[derive(Debug, Serialize, Deserialize, Clone)]
249pub struct TracingConfiguration {
250 /// The group id to attach to this trace to enable filtering and grouping in the Traces Dashboard.
251 pub group_id: String,
252 /// The arbitrary metadata to attach to this trace to enable filtering in the Traces Dashboard.
253 pub metadata: serde_json::Value,
254 /// The name of the workflow to attach to this trace. This is used to name the trace in the Traces Dashboard.
255 pub workflow_name: String,
256}
257
258/// The truncation strategy to use for the session.
259#[derive(Debug, Serialize, Deserialize, Clone)]
260#[serde(rename_all = "lowercase")]
261pub enum RealtimeTruncation {
262 /// `auto` is the default truncation strategy.
263 Auto,
264 /// `disabled` will disable truncation and emit errors when the conversation exceeds the input
265 /// token limit.
266 Disabled,
267
268 /// Retain a fraction of the conversation tokens when the conversation exceeds the input token
269 /// limit. This allows you to amortize truncations across multiple turns, which can help improve
270 /// cached token usage.
271 #[serde(untagged)]
272 RetentionRatio(RetentionRatioTruncation),
273}
274
275#[derive(Debug, Serialize, Deserialize, Clone)]
276pub struct RetentionRatioTruncation {
277 /// Fraction of post-instruction conversation tokens to retain (0.0 - 1.0) when the conversation
278 /// exceeds the input token limit. Setting this to 0.8 means that messages will be dropped
279 /// until 80% of the maximum allowed tokens are used. This helps reduce the frequency of
280 /// truncations and improve cache rates.
281 pub retention_ratio: f32,
282
283 /// Use retention ratio truncation.
284 pub r#type: String,
285
286 /// Optional custom token limits for this truncation strategy. If not provided, the model's
287 /// default token limits will be used.
288 #[serde(skip_serializing_if = "Option::is_none")]
289 pub token_limits: Option<TokenLimits>,
290}
291
292#[derive(Debug, Serialize, Deserialize, Clone)]
293pub struct TokenLimits {
294 /// Maximum tokens allowed in the conversation after instructions (which including tool
295 /// definitions). For example, setting this to 5,000 would mean that truncation would occur
296 /// when the conversation exceeds 5,000 tokens after instructions. This cannot be higher
297 /// than the model's context window size minus the maximum output tokens.
298 pub post_instructions: u32,
299}
300
301#[derive(Debug, Serialize, Deserialize, Clone)]
302#[serde(tag = "type")]
303pub enum Session {
304 /// The type of session to create. Always `realtime` for the Realtime API.
305 #[serde(rename = "realtime")]
306 RealtimeSession(RealtimeSession),
307 /// The type of session to create. Always `transcription` for transcription sessions.
308 #[serde(rename = "transcription")]
309 RealtimeTranscriptionSession(RealtimeTranscriptionSession),
310}
311
312#[derive(Debug, Serialize, Deserialize, Clone)]
313#[serde(tag = "type")]
314pub enum RealtimeSessionConfiguration {
315 Realtime(RealtimeSession),
316}
317
318impl Default for RealtimeSessionConfiguration {
319 fn default() -> Self {
320 Self::Realtime(RealtimeSession::default())
321 }
322}
323
324/// Realtime session object configuration.
325/// openapi spec type: RealtimeSessionCreateRequestGA
326#[derive(Debug, Serialize, Deserialize, Clone, Default)]
327pub struct RealtimeSession {
328 #[serde(skip_serializing_if = "Option::is_none")]
329 pub audio: Option<Audio>,
330
331 /// Additional fields to include in server outputs.
332 ///
333 /// `item.input_audio_transcription.logprobs`: Include logprobs for input audio transcription.
334 #[serde(skip_serializing_if = "Option::is_none")]
335 pub include: Option<Vec<String>>,
336
337 /// The default system instructions (i.e. system message) prepended to model calls.
338 /// This field allows the client to guide the model on desired responses.
339 /// The model can be instructed on response content and format,
340 /// (e.g. "be extremely succinct", "act friendly", "here are examples of good responses")
341 /// and on audio behavior (e.g. "talk quickly", "inject emotion into your voice",
342 /// "laugh frequently"). The instructions are not guaranteed to be followed by the model, but
343 /// they provide guidance to the model on the desired behavior.
344 ///
345 /// Note that the server sets default instructions which will be used if this field is not set
346 /// and are visible in the `session.created` event at the start of the session.
347 #[serde(skip_serializing_if = "Option::is_none")]
348 pub instructions: Option<String>,
349
350 /// Maximum number of output tokens for a single assistant response,
351 /// inclusive of tool calls. Provide an integer between 1 and 4096 to limit output tokens,
352 /// or `inf` for the maximum available tokens for a given model. Defaults to `inf`.
353 #[serde(skip_serializing_if = "Option::is_none")]
354 pub max_output_tokens: Option<MaxOutputTokens>,
355
356 /// The Realtime model used for this session.
357 #[serde(skip_serializing_if = "Option::is_none")]
358 pub model: Option<String>,
359
360 /// The set of modalities the model can respond with. It defaults to
361 /// `["audio"]`, indicating that the model will respond with audio plus a transcript. `["text"]`
362 /// can be used to make the model respond with text only. It is not possible to request both
363 /// `text` and `audio` at the same time.
364 #[serde(skip_serializing_if = "Option::is_none")]
365 pub output_modalities: Option<Vec<String>>,
366
367 /// Reference to a prompt template and its variables.
368 /// [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
369 #[serde(skip_serializing_if = "Option::is_none")]
370 pub prompt: Option<Prompt>,
371
372 /// How the model chooses tools. Provide one of the string modes or force a specific
373 /// function/MCP tool.
374 #[serde(skip_serializing_if = "Option::is_none")]
375 pub tool_choice: Option<ToolChoice>,
376
377 /// Tools available to the model.
378 #[serde(skip_serializing_if = "Option::is_none")]
379 pub tools: Option<Vec<RealtimeTool>>,
380
381 /// Realtime API can write session traces to the [Traces Dashboard](https://platform.openai.com/logs?api=traces).
382 /// Set to null to disable tracing. Once tracing is enabled for a session, the configuration cannot be modified.
383 ///
384 /// `auto` will create a trace for the session with default values for the workflow name,
385 /// group id, and metadata.
386 #[serde(skip_serializing_if = "Option::is_none")]
387 pub tracing: Option<Tracing>,
388
389 /// When the number of tokens in a conversation exceeds the model's input token limit,
390 /// the conversation be truncated, meaning messages (starting from the oldest) will not be
391 /// included in the model's context. A 32k context model with 4,096 max output tokens can
392 /// only include 28,224 tokens in the context before truncation occurs. Clients can configure
393 /// truncation behavior to truncate with a lower max token limit, which is an effective way to
394 /// control token usage and cost. Truncation will reduce the number of cached tokens on the next
395 /// turn (busting the cache), since messages are dropped from the beginning of the context.
396 /// However, clients can also configure truncation to retain messages up to a fraction of the
397 /// maximum context size, which will reduce the need for future truncations and thus improve
398 /// the cache rate. Truncation can be disabled entirely, which means the server will never
399 /// truncate but would instead return an error if the conversation exceeds the model's input
400 /// token limit.
401 #[serde(skip_serializing_if = "Option::is_none")]
402 pub truncation: Option<RealtimeTruncation>,
403}
404
405/// Type of noise reduction. `near_field` is for close-talking microphones such as
406/// headphones, `far_field` is for far-field microphones such as laptop or conference
407/// room microphones.
408#[derive(Debug, Serialize, Deserialize, Clone)]
409#[serde(tag = "type", rename_all = "snake_case")]
410pub enum NoiseReductionType {
411 NearField,
412 FarField,
413}
414
415#[derive(Debug, Serialize, Deserialize, Clone)]
416pub struct TranscriptionAudio {
417 pub input: AudioInput,
418}
419
420/// Realtime transcription session object configuration.
421/// openapi spec type: RealtimeTranscriptionSessionCreateRequestGA
422#[derive(Debug, Serialize, Deserialize, Clone)]
423pub struct RealtimeTranscriptionSession {
424 /// Configuration for input and output audio.
425 pub audio: TranscriptionAudio,
426
427 /// Additional fields to include in server outputs.
428 ///
429 /// `item.input_audio_transcription.logprobs`: Include logprobs for input audio transcription.
430 #[serde(skip_serializing_if = "Option::is_none")]
431 pub include: Option<Vec<String>>,
432}