openai_tools/common/
usage.rs

1use serde::{Deserialize, Serialize};
2use std::collections::HashMap;
3
4#[derive(Debug, Clone, Default, Deserialize, Serialize)]
5pub struct CompletionTokenDetails {
6    pub reasoning_tokens: Option<usize>,
7    pub audio_tokens: Option<usize>,
8    pub accepted_prediction_tokens: Option<usize>,
9    pub rejected_prediction_tokens: Option<usize>,
10}
11
12#[derive(Debug, Clone, Default, Deserialize, Serialize)]
13pub struct PromptTokenDetails {
14    pub cached_tokens: Option<usize>,
15    pub audio_tokens: Option<usize>,
16}
17
18/// Token usage statistics for OpenAI API requests.
19///
20/// This structure contains detailed information about token consumption during
21/// API requests, including both input (prompt) and output (completion) tokens.
22/// Different fields may be populated depending on the specific API endpoint
23/// and model used.
24///
25#[derive(Debug, Clone, Default, Deserialize, Serialize)]
26pub struct Usage {
27    pub input_tokens: Option<usize>,
28    pub input_tokens_details: Option<HashMap<String, usize>>,
29    pub output_tokens: Option<usize>,
30    pub output_tokens_details: Option<HashMap<String, usize>>,
31    pub prompt_tokens: Option<usize>,
32    pub prompt_tokens_details: Option<PromptTokenDetails>,
33    pub completion_tokens: Option<usize>,
34    pub total_tokens: Option<usize>,
35    pub completion_tokens_details: Option<CompletionTokenDetails>,
36}