openai_gpt_client/
models.rs1use std::fmt::Display;
2
3use crate::model_variants::ModelId;
4
5#[derive(Debug, serde::Deserialize)]
6pub struct Model {
7 pub id: ModelId,
8 pub object: String,
9 pub created: i64,
10 pub owned_by: String,
11 pub permission: Vec<ModelPermission>,
12 pub root: String,
13 pub parent: Option<String>,
14}
15
16impl Display for Model {
17 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
18 write!(f, "Model: {}", self.id)
19 }
20}
21
22#[derive(Debug, serde::Deserialize)]
23pub struct ModelPermission {
24 pub id: String,
25 pub object: String,
26 pub created: i64,
27 pub allow_create_engine: bool,
28 pub allow_sampling: bool,
29 pub allow_logprobs: bool,
30 pub allow_search_indices: bool,
31 pub allow_view: bool,
32 pub allow_fine_tuning: bool,
33 pub organization: String,
34 pub group: Option<String>,
35 pub is_blocking: bool,
36}
37
38pub async fn get_models() -> Result<Vec<Model>, reqwest::Error> {
39 let client = reqwest::Client::new();
40 let response = client
41 .get("https://api.openai.com/v1/models")
42 .send()
43 .await?
44 .json::<Vec<Model>>()
45 .await?;
46 Ok(response)
47}