gpt_batch_scribe/
gpt_request_body.rs

1crate::ix!();
2
3/// Body details of the API request.
4#[derive(Debug, Serialize, Deserialize)]
5pub struct GptRequestBody {
6
7    /// Model used for the request.
8    #[serde(with = "model_type")]
9    model: GptModelType,
10
11    /// Array of messages exchanged in the request.
12    messages: Vec<GptMessage>,
13
14    /// Maximum number of tokens to be used by the model.
15    max_tokens: u32,
16}
17
18impl GptRequestBody {
19
20    pub fn default_max_tokens() -> u32 {
21        //1024 
22        8192
23    }
24
25    pub fn default_max_tokens_given_image(_image_b64: &str) -> u32 {
26        //TODO: is this the right value?
27        2048
28    }
29
30    pub fn new_basic(model: GptModelType, system_message: &str, user_message: &str) -> Self {
31        Self {
32            model,
33            messages: vec![
34                GptMessage::system_message(system_message),
35                GptMessage::user_message(user_message),
36            ],
37            max_tokens: Self::default_max_tokens(),
38        }
39    }
40
41    pub fn new_with_image(model: GptModelType, system_message: &str, user_message: &str, image_b64: &str) -> Self {
42        Self {
43            model,
44            messages: vec![
45                GptMessage::system_message(system_message),
46                GptMessage::user_message_with_image(user_message,image_b64),
47            ],
48            max_tokens: Self::default_max_tokens_given_image(image_b64),
49        }
50    }
51}