1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
crate::ix!();

/// Body details of the API request.
#[derive(Debug, Serialize, Deserialize)]
pub struct GptRequestBody {

    /// Model used for the request.
    #[serde(with = "model_type")]
    model: GptModelType,

    /// Array of messages exchanged in the request.
    messages: Vec<GptMessage>,

    /// Maximum number of tokens to be used by the model.
    max_tokens: u32,
}

impl GptRequestBody {

    pub fn default_max_tokens() -> u32 {
        1024 
    }

    pub fn default_max_tokens_given_image(_image_b64: &str) -> u32 {
        //TODO: is this the right value?
        2048
    }

    pub fn new_basic(system_message: &str, user_message: &str) -> Self {
        Self {
            model: GptModelType::Gpt4Turbo,
            messages: vec![
                GptMessage::system_message(system_message),
                GptMessage::user_message(user_message),
            ],
            max_tokens: Self::default_max_tokens(),
        }
    }

    pub fn new_with_image(system_message: &str, user_message: &str, image_b64: &str) -> Self {
        Self {
            model: GptModelType::Gpt4o,
            messages: vec![
                GptMessage::system_message(system_message),
                GptMessage::user_message_with_image(user_message,image_b64),
            ],
            max_tokens: Self::default_max_tokens_given_image(image_b64),
        }
    }
}