git_iris/commit/
service.rs

1use anyhow::Result;
2use std::path::Path;
3use std::sync::Arc;
4use tokio::sync::{RwLock, mpsc};
5
6use super::prompt::{create_system_prompt, create_user_prompt, process_commit_message};
7use crate::config::Config;
8use crate::context::{CommitContext, GeneratedMessage, GeneratedReview};
9use crate::git::{CommitResult, GitRepo};
10use crate::instruction_presets::{PresetType, get_instruction_preset_library};
11use crate::llm;
12use crate::log_debug;
13use crate::token_optimizer::TokenOptimizer;
14
15/// Service for handling Git commit operations with AI assistance
16pub struct IrisCommitService {
17    config: Config,
18    repo: Arc<GitRepo>,
19    provider_name: String,
20    use_gitmoji: bool,
21    verify: bool,
22    cached_context: Arc<RwLock<Option<CommitContext>>>,
23}
24
25impl IrisCommitService {
26    /// Create a new `IrisCommitService` instance
27    ///
28    /// # Arguments
29    ///
30    /// * `config` - The configuration for the service
31    /// * `repo_path` - The path to the Git repository
32    /// * `provider_name` - The name of the LLM provider to use
33    /// * `use_gitmoji` - Whether to use Gitmoji in commit messages
34    /// * `verify` - Whether to verify commits
35    ///
36    /// # Returns
37    ///
38    /// A Result containing the new `IrisCommitService` instance or an error
39    pub fn new(
40        config: Config,
41        repo_path: &Path,
42        provider_name: &str,
43        use_gitmoji: bool,
44        verify: bool,
45    ) -> Result<Self> {
46        Ok(Self {
47            config,
48            repo: Arc::new(GitRepo::new(repo_path)?),
49            provider_name: provider_name.to_string(),
50            use_gitmoji,
51            verify,
52            cached_context: Arc::new(RwLock::new(None)),
53        })
54    }
55
56    /// Check the environment for necessary prerequisites
57    pub fn check_environment(&self) -> Result<()> {
58        self.config.check_environment()
59    }
60
61    /// Get Git information for the current repository
62    pub async fn get_git_info(&self) -> Result<CommitContext> {
63        {
64            let cached_context = self.cached_context.read().await;
65            if let Some(context) = &*cached_context {
66                return Ok(context.clone());
67            }
68        }
69
70        let context = self.repo.get_git_info(&self.config).await?;
71
72        {
73            let mut cached_context = self.cached_context.write().await;
74            *cached_context = Some(context.clone());
75        }
76        Ok(context)
77    }
78
79    /// Private helper method to handle common token optimization logic
80    ///
81    /// # Arguments
82    ///
83    /// * `config_clone` - Configuration with preset and instructions
84    /// * `system_prompt` - The system prompt to use
85    /// * `context` - The commit context
86    /// * `create_user_prompt_fn` - A function that creates a user prompt from a context
87    ///
88    /// # Returns
89    ///
90    /// A tuple containing the optimized context and final user prompt
91    fn optimize_prompt<F>(
92        &self,
93        config_clone: &Config,
94        system_prompt: &str,
95        mut context: CommitContext,
96        create_user_prompt_fn: F,
97    ) -> (CommitContext, String)
98    where
99        F: Fn(&CommitContext) -> String,
100    {
101        // Get the token limit for the provider from config or default value
102        let token_limit = config_clone
103            .providers
104            .get(&self.provider_name)
105            .and_then(|p| p.token_limit)
106            .unwrap_or({
107                match self.provider_name.as_str() {
108                    "openai" => 16_000,          // Default for OpenAI
109                    "anthropic" => 100_000,      // Anthropic Claude has large context
110                    "google" | "groq" => 32_000, // Default for Google and Groq
111                    _ => 8_000,                  // Conservative default for other providers
112                }
113            });
114
115        // Create a token optimizer to count tokens
116        let optimizer = TokenOptimizer::new(token_limit);
117        let system_tokens = optimizer.count_tokens(system_prompt);
118
119        log_debug!("Token limit: {}", token_limit);
120        log_debug!("System prompt tokens: {}", system_tokens);
121
122        // Reserve tokens for system prompt and some buffer for formatting
123        // 1000 token buffer provides headroom for model responses and formatting
124        let context_token_limit = token_limit.saturating_sub(system_tokens + 1000);
125        log_debug!("Available tokens for context: {}", context_token_limit);
126
127        // Count tokens before optimization
128        let user_prompt_before = create_user_prompt_fn(&context);
129        let total_tokens_before = system_tokens + optimizer.count_tokens(&user_prompt_before);
130        log_debug!("Total tokens before optimization: {}", total_tokens_before);
131
132        // Optimize the context with remaining token budget
133        context.optimize(context_token_limit);
134
135        let user_prompt = create_user_prompt_fn(&context);
136        let user_tokens = optimizer.count_tokens(&user_prompt);
137        let total_tokens = system_tokens + user_tokens;
138
139        log_debug!("User prompt tokens after optimization: {}", user_tokens);
140        log_debug!("Total tokens after optimization: {}", total_tokens);
141
142        // If we're still over the limit, truncate the user prompt directly
143        // 100 token safety buffer ensures we stay under the limit
144        let final_user_prompt = if total_tokens > token_limit {
145            log_debug!(
146                "Total tokens {} still exceeds limit {}, truncating user prompt",
147                total_tokens,
148                token_limit
149            );
150            let max_user_tokens = token_limit.saturating_sub(system_tokens + 100);
151            optimizer.truncate_string(&user_prompt, max_user_tokens)
152        } else {
153            user_prompt
154        };
155
156        let final_tokens = system_tokens + optimizer.count_tokens(&final_user_prompt);
157        log_debug!(
158            "Final total tokens after potential truncation: {}",
159            final_tokens
160        );
161
162        (context, final_user_prompt)
163    }
164
165    /// Generate a commit message using AI
166    ///
167    /// # Arguments
168    ///
169    /// * `preset` - The instruction preset to use
170    /// * `instructions` - Custom instructions for the AI
171    ///
172    /// # Returns
173    ///
174    /// A Result containing the generated commit message or an error
175    pub async fn generate_message(
176        &self,
177        preset: &str,
178        instructions: &str,
179    ) -> anyhow::Result<GeneratedMessage> {
180        let mut config_clone = self.config.clone();
181
182        // Check if the preset exists and is valid for commits
183        if preset.is_empty() {
184            config_clone.instruction_preset = "default".to_string();
185        } else {
186            let library = get_instruction_preset_library();
187            if let Some(preset_info) = library.get_preset(preset) {
188                if preset_info.preset_type == PresetType::Review {
189                    log_debug!(
190                        "Warning: Preset '{}' is review-specific, not ideal for commits",
191                        preset
192                    );
193                }
194                config_clone.instruction_preset = preset.to_string();
195            } else {
196                log_debug!("Preset '{}' not found, using default", preset);
197                config_clone.instruction_preset = "default".to_string();
198            }
199        }
200
201        config_clone.instructions = instructions.to_string();
202
203        let context = self.get_git_info().await?;
204
205        // Create system prompt
206        let system_prompt = create_system_prompt(&config_clone)?;
207
208        // Use the shared optimization logic
209        let (_, final_user_prompt) =
210            self.optimize_prompt(&config_clone, &system_prompt, context, create_user_prompt);
211
212        let mut generated_message = llm::get_message::<GeneratedMessage>(
213            &config_clone,
214            &self.provider_name,
215            &system_prompt,
216            &final_user_prompt,
217        )
218        .await?;
219
220        // Apply gitmoji setting
221        if !self.use_gitmoji {
222            generated_message.emoji = None;
223        }
224
225        Ok(generated_message)
226    }
227
228    /// Generate a code review using AI
229    ///
230    /// # Arguments
231    ///
232    /// * `preset` - The instruction preset to use
233    /// * `instructions` - Custom instructions for the AI
234    ///
235    /// # Returns
236    ///
237    /// A Result containing the generated code review or an error
238    pub async fn generate_review(
239        &self,
240        preset: &str,
241        instructions: &str,
242    ) -> anyhow::Result<GeneratedReview> {
243        let mut config_clone = self.config.clone();
244
245        // Check if the preset exists and is valid for reviews
246        if preset.is_empty() {
247            config_clone.instruction_preset = "default".to_string();
248        } else {
249            let library = get_instruction_preset_library();
250            if let Some(preset_info) = library.get_preset(preset) {
251                if preset_info.preset_type == PresetType::Commit {
252                    log_debug!(
253                        "Warning: Preset '{}' is commit-specific, not ideal for reviews",
254                        preset
255                    );
256                }
257                config_clone.instruction_preset = preset.to_string();
258            } else {
259                log_debug!("Preset '{}' not found, using default", preset);
260                config_clone.instruction_preset = "default".to_string();
261            }
262        }
263
264        config_clone.instructions = instructions.to_string();
265
266        let context = self.get_git_info().await?;
267
268        // Create system prompt
269        let system_prompt = super::prompt::create_review_system_prompt(&config_clone)?;
270
271        // Use the shared optimization logic
272        let (_, final_user_prompt) = self.optimize_prompt(
273            &config_clone,
274            &system_prompt,
275            context,
276            super::prompt::create_review_user_prompt,
277        );
278
279        llm::get_message::<GeneratedReview>(
280            &config_clone,
281            &self.provider_name,
282            &system_prompt,
283            &final_user_prompt,
284        )
285        .await
286    }
287
288    /// Performs a commit with the given message.
289    ///
290    /// # Arguments
291    ///
292    /// * `message` - The commit message.
293    ///
294    /// # Returns
295    ///
296    /// A Result containing the `CommitResult` or an error.
297    pub fn perform_commit(&self, message: &str) -> Result<CommitResult> {
298        let processed_message = process_commit_message(message.to_string(), self.use_gitmoji);
299        log_debug!("Performing commit with message: {}", processed_message);
300
301        if !self.verify {
302            log_debug!("Skipping pre-commit hook (verify=false)");
303            return self.repo.commit(&processed_message);
304        }
305
306        // Execute pre-commit hook
307        log_debug!("Executing pre-commit hook");
308        if let Err(e) = self.repo.execute_hook("pre-commit") {
309            log_debug!("Pre-commit hook failed: {}", e);
310            return Err(e);
311        }
312        log_debug!("Pre-commit hook executed successfully");
313
314        // Perform the commit
315        match self.repo.commit(&processed_message) {
316            Ok(result) => {
317                // Execute post-commit hook
318                log_debug!("Executing post-commit hook");
319                if let Err(e) = self.repo.execute_hook("post-commit") {
320                    log_debug!("Post-commit hook failed: {}", e);
321                    // We don't fail the commit if post-commit hook fails
322                }
323                log_debug!("Commit performed successfully");
324                Ok(result)
325            }
326            Err(e) => {
327                log_debug!("Commit failed: {}", e);
328                Err(e)
329            }
330        }
331    }
332
333    /// Execute the pre-commit hook if verification is enabled
334    pub fn pre_commit(&self) -> Result<()> {
335        if self.verify {
336            self.repo.execute_hook("pre-commit")
337        } else {
338            Ok(())
339        }
340    }
341
342    /// Create a channel for message generation
343    pub fn create_message_channel(
344        &self,
345    ) -> (
346        mpsc::Sender<Result<GeneratedMessage>>,
347        mpsc::Receiver<Result<GeneratedMessage>>,
348    ) {
349        mpsc::channel(1)
350    }
351}