ai/
commit.rs

1use anyhow::{anyhow, bail, Result};
2use maplit::hashmap;
3use mustache;
4use async_openai::Client;
5
6use crate::{config, debug_output, openai, profile};
7use crate::model::Model;
8use crate::config::App as Settings;
9use crate::multi_step_integration::{generate_commit_message_local, generate_commit_message_multi_step};
10
11/// The instruction template included at compile time
12const INSTRUCTION_TEMPLATE: &str = include_str!("../resources/prompt.md");
13
14/// Returns the instruction template for the AI model.
15/// This template guides the model in generating appropriate commit messages.
16fn get_instruction_template() -> Result<String> {
17  profile!("Generate instruction template");
18  let max_length = config::APP.max_commit_length.unwrap_or(72).to_string();
19  let template = mustache::compile_str(INSTRUCTION_TEMPLATE)
20    .map_err(|e| anyhow!("Template compilation error: {}", e))?
21    .render_to_string(&hashmap! {
22      "max_length" => max_length
23    })
24    .map_err(|e| anyhow!("Template rendering error: {}", e))?;
25  Ok(template)
26}
27
28/// Creates an OpenAI request for commit message generation.
29///
30/// # Arguments
31/// * `diff` - The git diff to generate a commit message for
32/// * `max_tokens` - Maximum number of tokens allowed for the response
33/// * `model` - The AI model to use for generation
34///
35/// # Returns
36/// * `Result<openai::Request>` - The prepared request
37fn create_commit_request(diff: String, max_tokens: usize, model: Model) -> Result<openai::Request> {
38  profile!("Prepare OpenAI request");
39  let template = get_instruction_template()?;
40  Ok(openai::Request {
41    system: template,
42    prompt: diff,
43    max_tokens: max_tokens.try_into().unwrap_or(u16::MAX),
44    model
45  })
46}
47
48/// Generates a commit message using the AI model.
49/// Now uses the multi-step approach by default with fallback to single-step.
50///
51/// # Arguments
52/// * `diff` - The git diff to generate a commit message for
53/// * `max_tokens` - Maximum number of tokens allowed for the response
54/// * `model` - The AI model to use for generation
55/// * `settings` - Optional application settings to customize the request
56///
57/// # Returns
58/// * `Result<openai::Response>` - The generated commit message or an error
59///
60/// # Errors
61/// Returns an error if:
62/// - max_tokens is 0
63/// - OpenAI API call fails
64pub async fn generate(patch: String, remaining_tokens: usize, model: Model, settings: Option<&Settings>) -> Result<openai::Response> {
65  profile!("Generate commit message");
66
67  if remaining_tokens == 0 {
68    bail!("Maximum token count must be greater than zero")
69  }
70
71  // Try multi-step approach first
72  let max_length = settings
73    .and_then(|s| s.max_commit_length)
74    .or(config::APP.max_commit_length);
75
76  // Use custom settings if provided
77  if let Some(custom_settings) = settings {
78    if let Some(api_key) = &custom_settings.openai_api_key {
79      if !api_key.is_empty() && api_key != "<PLACE HOLDER FOR YOUR API KEY>" {
80        let config = openai::create_openai_config(custom_settings)?;
81        let client = Client::with_config(config);
82        let model_str = model.to_string();
83
84        match generate_commit_message_multi_step(&client, &model_str, &patch, max_length).await {
85          Ok(message) => return Ok(openai::Response { response: message }),
86          Err(e) => {
87            log::warn!("Multi-step generation with custom settings failed: {e}");
88            if let Some(session) = debug_output::debug_session() {
89              session.set_multi_step_error(e.to_string());
90            }
91          }
92        }
93      }
94    }
95  } else {
96    // Try with default settings
97    if let Ok(api_key) = std::env::var("OPENAI_API_KEY") {
98      if !api_key.is_empty() {
99        let client = Client::new();
100        let model_str = model.to_string();
101
102        match generate_commit_message_multi_step(&client, &model_str, &patch, max_length).await {
103          Ok(message) => return Ok(openai::Response { response: message }),
104          Err(e) => {
105            log::warn!("Multi-step generation failed: {e}");
106            if let Some(session) = debug_output::debug_session() {
107              session.set_multi_step_error(e.to_string());
108            }
109          }
110        }
111      }
112    }
113  }
114
115  // Try local multi-step generation
116  match generate_commit_message_local(&patch, max_length) {
117    Ok(message) => return Ok(openai::Response { response: message }),
118    Err(e) => {
119      log::warn!("Local multi-step generation failed: {e}");
120    }
121  }
122
123  // Mark that we're using single-step fallback
124  if let Some(session) = debug_output::debug_session() {
125    session.set_single_step_success(true);
126  }
127
128  // Fallback to original single-step approach
129  let request = create_commit_request(patch, remaining_tokens, model)?;
130
131  // Use custom settings if provided, otherwise use global config
132  match settings {
133    Some(custom_settings) => {
134      // Create a client with custom settings
135      let config = openai::create_openai_config(custom_settings)?;
136      openai::call_with_config(request, config).await
137    }
138    None => {
139      // Use the default global config
140      openai::call(request).await
141    }
142  }
143}
144
145pub fn token_used(model: &Model) -> Result<usize> {
146  get_instruction_token_count(model)
147}
148
149/// Calculates the number of tokens used by the instruction template.
150///
151/// # Arguments
152/// * `model` - The AI model to use for token counting
153///
154/// # Returns
155/// * `Result<usize>` - The number of tokens used or an error
156pub fn get_instruction_token_count(model: &Model) -> Result<usize> {
157  profile!("Calculate instruction tokens");
158  let template = get_instruction_template()?;
159  model.count_tokens(&template)
160}