1use anyhow::{anyhow, bail, Result};
2use maplit::hashmap;
3use mustache;
4use async_openai::Client;
5
6use crate::{config, debug_output, openai, profile};
7use crate::model::Model;
8use crate::config::App as Settings;
9use crate::multi_step_integration::{generate_commit_message_local, generate_commit_message_multi_step};
10
11const INSTRUCTION_TEMPLATE: &str = include_str!("../resources/prompt.md");
13
14fn get_instruction_template() -> Result<String> {
17 profile!("Generate instruction template");
18 let max_length = config::APP.max_commit_length.unwrap_or(72).to_string();
19 let template = mustache::compile_str(INSTRUCTION_TEMPLATE)
20 .map_err(|e| anyhow!("Template compilation error: {}", e))?
21 .render_to_string(&hashmap! {
22 "max_length" => max_length
23 })
24 .map_err(|e| anyhow!("Template rendering error: {}", e))?;
25 Ok(template)
26}
27
28fn create_commit_request(diff: String, max_tokens: usize, model: Model) -> Result<openai::Request> {
38 profile!("Prepare OpenAI request");
39 let template = get_instruction_template()?;
40 Ok(openai::Request {
41 system: template,
42 prompt: diff,
43 max_tokens: max_tokens.try_into().unwrap_or(u16::MAX),
44 model
45 })
46}
47
48pub async fn generate(patch: String, remaining_tokens: usize, model: Model, settings: Option<&Settings>) -> Result<openai::Response> {
65 profile!("Generate commit message");
66
67 if remaining_tokens == 0 {
68 bail!("Maximum token count must be greater than zero")
69 }
70
71 let max_length = settings
73 .and_then(|s| s.max_commit_length)
74 .or(config::APP.max_commit_length);
75
76 if let Some(custom_settings) = settings {
78 if let Some(api_key) = &custom_settings.openai_api_key {
79 if !api_key.is_empty() && api_key != "<PLACE HOLDER FOR YOUR API KEY>" {
80 let config = openai::create_openai_config(custom_settings)?;
81 let client = Client::with_config(config);
82 let model_str = model.to_string();
83
84 match generate_commit_message_multi_step(&client, &model_str, &patch, max_length).await {
85 Ok(message) => return Ok(openai::Response { response: message }),
86 Err(e) => {
87 log::warn!("Multi-step generation with custom settings failed: {e}");
88 if let Some(session) = debug_output::debug_session() {
89 session.set_multi_step_error(e.to_string());
90 }
91 }
92 }
93 }
94 }
95 } else {
96 if let Ok(api_key) = std::env::var("OPENAI_API_KEY") {
98 if !api_key.is_empty() {
99 let client = Client::new();
100 let model_str = model.to_string();
101
102 match generate_commit_message_multi_step(&client, &model_str, &patch, max_length).await {
103 Ok(message) => return Ok(openai::Response { response: message }),
104 Err(e) => {
105 log::warn!("Multi-step generation failed: {e}");
106 if let Some(session) = debug_output::debug_session() {
107 session.set_multi_step_error(e.to_string());
108 }
109 }
110 }
111 }
112 }
113 }
114
115 match generate_commit_message_local(&patch, max_length) {
117 Ok(message) => return Ok(openai::Response { response: message }),
118 Err(e) => {
119 log::warn!("Local multi-step generation failed: {e}");
120 }
121 }
122
123 if let Some(session) = debug_output::debug_session() {
125 session.set_single_step_success(true);
126 }
127
128 let request = create_commit_request(patch, remaining_tokens, model)?;
130
131 match settings {
133 Some(custom_settings) => {
134 let config = openai::create_openai_config(custom_settings)?;
136 openai::call_with_config(request, config).await
137 }
138 None => {
139 openai::call(request).await
141 }
142 }
143}
144
145pub fn token_used(model: &Model) -> Result<usize> {
146 get_instruction_token_count(model)
147}
148
149pub fn get_instruction_token_count(model: &Model) -> Result<usize> {
157 profile!("Calculate instruction tokens");
158 let template = get_instruction_template()?;
159 model.count_tokens(&template)
160}