openai_struct/models/realtime_response_usage.rs
1/*
2 * OpenAI API
3 *
4 * The OpenAI REST API. Please see pub https://platform.openai.com/docs/api-reference for more details.
5 *
6 * OpenAPI spec pub version: 2.3.0
7 *
8 * Generated pub by: https://github.com/swagger-api/swagger-codegen.git
9 */
10
11/// pub RealtimeResponseUsage : Usage statistics for the Response, this will correspond to billing. A Realtime API session will maintain a conversation context and append new Items to the Conversation, thus output from previous turns (text and audio tokens) will become the input for later turns.
12
13#[allow(unused_imports)]
14use serde_json::Value;
15
16#[derive(Debug, Serialize, Deserialize)]
17pub struct RealtimeResponseUsage {
18 #[serde(rename = "input_token_details")]
19 pub input_token_details: Option<crate::models::RealtimeResponseUsageInputTokenDetails>,
20 /// The number of input tokens used in the Response, including text and audio tokens.
21 #[serde(rename = "input_tokens")]
22 pub input_tokens: Option<i32>,
23 #[serde(rename = "output_token_details")]
24 pub output_token_details: Option<crate::models::RealtimeResponseUsageOutputTokenDetails>,
25 /// The number of output tokens sent in the Response, including text and audio tokens.
26 #[serde(rename = "output_tokens")]
27 pub output_tokens: Option<i32>,
28 /// The total number of tokens in the Response including input and output text and audio tokens.
29 #[serde(rename = "total_tokens")]
30 pub total_tokens: Option<i32>,
31}