model_gateway_rs/model/
openai.rs

1use serde::{Deserialize, Serialize};
2
3use crate::model::llm::{ChatMessage, LlmOutput};
4
5/// Request body for chat completion.
6
7#[derive(Debug, Deserialize)]
8pub struct ChatChoice {
9    pub index: u32,
10    pub message: ChatMessage,
11    pub finish_reason: Option<String>,
12}
13
14#[derive(Debug, Deserialize)]
15pub struct ChatUsage {
16    pub prompt_tokens: u32,
17    pub completion_tokens: u32,
18    pub total_tokens: u32,
19}
20
21#[derive(Debug, Clone, Serialize)]
22pub struct OpenAiChatRequest {
23    pub model: String,
24    pub messages: Vec<ChatMessage>,
25    #[serde(skip_serializing_if = "Option::is_none")]
26    pub stream: Option<bool>,
27    #[serde(skip_serializing_if = "Option::is_none")]
28    pub temperature: Option<f32>,
29}
30
31#[derive(Debug, Deserialize)]
32pub struct OpenAiChatResponse {
33    pub id: String,
34    pub object: String,
35    pub created: u64,
36    pub model: String,
37    pub choices: Vec<ChatChoice>,
38    pub usage: Option<ChatUsage>,
39}
40
41impl OpenAiChatResponse {
42    /// Get the first choice's message content, or an empty string if not available.
43    pub fn first_message(&self) -> Option<ChatMessage> {
44        self.choices.first().map(|choice| choice.message.clone())
45    }
46}
47
48impl From<OpenAiChatResponse> for LlmOutput {
49    fn from(response: OpenAiChatResponse) -> Self {
50        let message = response.first_message();
51        let usage = response.usage.map(|u| u.total_tokens);
52        LlmOutput { message, usage }
53    }
54}