use anyhow::{bail, Result};
use maplit::hashmap;
use crate::{config, openai, profile};
use crate::model::Model;
const INSTRUCTION_TEMPLATE: &str = include_str!("../resources/prompt.md");
fn get_instruction_template() -> Result<String> {
profile!("Generate instruction template");
let max_length = config::APP.max_commit_length.unwrap_or(72).to_string();
let template = mustache::compile_str(INSTRUCTION_TEMPLATE)
.map_err(|e| anyhow::anyhow!("Template compilation error: {}", e))?
.render_to_string(&hashmap! {
"max_length" => max_length
})
.map_err(|e| anyhow::anyhow!("Template rendering error: {}", e))?;
Ok(template)
}
pub fn get_instruction_token_count(model: &Model) -> Result<usize> {
profile!("Calculate instruction tokens");
let template = get_instruction_template()?;
model.count_tokens(&template)
}
pub fn create_commit_request(diff: String, max_tokens: usize, model: Model) -> Result<openai::Request> {
profile!("Prepare OpenAI request");
let max_length = config::APP.max_commit_length.unwrap_or(72).to_string();
let instruction_template = mustache::compile_str(INSTRUCTION_TEMPLATE)
.map_err(|e| anyhow::anyhow!("Template compilation error: {}", e))?
.render_to_string(&hashmap! {
"max_length" => max_length
})
.map_err(|e| anyhow::anyhow!("Template rendering error: {}", e))?;
Ok(openai::Request {
system: instruction_template,
prompt: diff,
max_tokens: max_tokens.try_into().unwrap_or(u16::MAX),
model
})
}
pub async fn generate(patch: String, remaining_tokens: usize, model: Model) -> Result<openai::Response> {
profile!("Generate commit message");
if remaining_tokens == 0 {
bail!("Maximum token count must be greater than zero")
}
let request = create_commit_request(patch, remaining_tokens, model)?;
openai::call(request).await
}
pub fn token_used(model: &Model) -> Result<usize> {
get_instruction_token_count(model)
}