openai_api_rs/v1/
model.rs

1use serde::{Deserialize, Serialize};
2
3#[derive(Debug, Deserialize, Serialize)]
4pub struct ModelsResponse {
5    pub object: Option<String>,
6    pub data: Vec<ModelResponse>,
7}
8
9#[derive(Debug, Deserialize, Serialize)]
10pub struct ModelResponse {
11    pub id: Option<String>,
12    pub name: Option<String>,
13    pub created: Option<i64>,
14    pub description: Option<String>,
15    pub architecture: Option<Architecture>,
16    pub top_provider: Option<TopProvider>,
17    pub pricing: Option<Pricing>,
18    pub canonical_slug: Option<String>,
19    pub context_length: Option<i64>,
20    pub hugging_face_id: Option<String>,
21    pub per_request_limits: Option<serde_json::Value>,
22    pub supported_parameters: Option<Vec<String>>,
23    pub object: Option<String>,
24    pub owned_by: Option<String>,
25}
26
27#[derive(Debug, Deserialize, Serialize)]
28pub struct Architecture {
29    pub input_modalities: Option<Vec<String>>,
30    pub output_modalities: Option<Vec<String>>,
31    pub tokenizer: Option<String>,
32    pub instruct_type: Option<String>,
33}
34
35#[derive(Debug, Deserialize, Serialize)]
36pub struct TopProvider {
37    pub is_moderated: Option<bool>,
38    pub context_length: Option<i64>,
39    pub max_completion_tokens: Option<i64>,
40}
41
42#[derive(Debug, Deserialize, Serialize)]
43pub struct Pricing {
44    pub prompt: Option<String>,
45    pub completion: Option<String>,
46    pub image: Option<String>,
47    pub request: Option<String>,
48    pub web_search: Option<String>,
49    pub internal_reasoning: Option<String>,
50    pub input_cache_read: Option<String>,
51    pub input_cache_write: Option<String>,
52}