simple_llm_client/openai/
models.rs1use serde::{Deserialize, Serialize};
2
3#[derive(Debug, Serialize, Deserialize, Clone)]
4pub struct ChatCompletionRequest {
5 pub model: String,
6 pub messages: Vec<ChatMessage>,
7 pub stream: bool,
8 pub temperature: Option<f32>,
9 pub max_tokens: Option<u32>,
10}
11
12#[derive(Debug, Serialize, Deserialize, Clone)]
13pub struct ChatMessage {
14 pub role: String,
15 pub content: String,
16}
17
18#[derive(Debug, Serialize, Deserialize, Clone)]
19pub struct ChatCompletionResponse {
20 pub id: String,
21 pub model: String,
22 pub created: u64,
23 pub usage: Option<Usage>,
24 pub choices: Vec<ChatChoice>,
25 pub object: Option<String>,
26}
27
28#[derive(Debug, Serialize, Deserialize, Clone)]
29pub struct Usage {
30 pub prompt_tokens: u32,
31 pub completion_tokens: u32,
32 pub total_tokens: u32,
33}
34
35#[derive(Debug, Serialize, Deserialize, Clone)]
36pub struct ChatChoice {
37 pub index: Option<u32>,
38 pub delta: Option<ChatMessage>,
39 pub message: Option<ChatMessage>,
40 pub finish_reason: Option<String>,
41 pub citations: Option<Vec<Citation>>,
42}
43
44#[derive(Debug, Serialize, Deserialize, Clone)]
45pub struct Citation {
46 pub start: i32,
47 pub end: i32,
48 pub url: String,
49 pub text: String,
50}