/// Create and call llm by supplying data and common parameters
pub async fn call(system: &str, user: &Vec<&str>, temperature: f32, is_json: bool, is_chat: bool) -> Result<LlmReturn, Box<dyn std::error::Error + Send>> {
let model: String = env::var("GPT_VERSION").expect("GPT_VERSION not found in enviornment variables");
let mut messages = Vec::new();
if !system.is_empty() {
messages.push(GptMessage { role: "system".into(), content: system.into() });
}
user.iter()
.enumerate()
.for_each(|(i, c)| {
let role = if !is_chat || i % 2 == 0 { "user" } else { "model" };
messages.push(GptMessage { role: role.into(), content: c.to_string() });
});
let completion = GptCompletion {
model,
messages,
temperature,
response_format: ResponseFormat::new(is_json)
};
call_gpt_completion(&completion).await
}