git_iris/commit/
service.rs

1use anyhow::Result;
2use std::path::Path;
3use std::sync::Arc;
4use tokio::sync::{RwLock, mpsc};
5
6use super::prompt::{create_system_prompt, create_user_prompt, process_commit_message};
7use super::review::GeneratedReview;
8use super::types::GeneratedMessage;
9use crate::config::Config;
10use crate::context::CommitContext;
11use crate::git::{CommitResult, GitRepo};
12use crate::instruction_presets::{PresetType, get_instruction_preset_library};
13use crate::llm;
14use crate::log_debug;
15use crate::token_optimizer::TokenOptimizer;
16
17/// Service for handling Git commit operations with AI assistance
18pub struct IrisCommitService {
19    config: Config,
20    repo: Arc<GitRepo>,
21    provider_name: String,
22    use_gitmoji: bool,
23    verify: bool,
24    cached_context: Arc<RwLock<Option<CommitContext>>>,
25}
26
27impl IrisCommitService {
28    /// Create a new `IrisCommitService` instance
29    ///
30    /// # Arguments
31    ///
32    /// * `config` - The configuration for the service
33    /// * `repo_path` - The path to the Git repository
34    /// * `provider_name` - The name of the LLM provider to use
35    /// * `use_gitmoji` - Whether to use Gitmoji in commit messages
36    /// * `verify` - Whether to verify commits
37    ///
38    /// # Returns
39    ///
40    /// A Result containing the new `IrisCommitService` instance or an error
41    pub fn new(
42        config: Config,
43        repo_path: &Path,
44        provider_name: &str,
45        use_gitmoji: bool,
46        verify: bool,
47    ) -> Result<Self> {
48        Ok(Self {
49            config,
50            repo: Arc::new(GitRepo::new(repo_path)?),
51            provider_name: provider_name.to_string(),
52            use_gitmoji,
53            verify,
54            cached_context: Arc::new(RwLock::new(None)),
55        })
56    }
57
58    /// Check the environment for necessary prerequisites
59    pub fn check_environment(&self) -> Result<()> {
60        self.config.check_environment()
61    }
62
63    /// Get Git information for the current repository
64    pub async fn get_git_info(&self) -> Result<CommitContext> {
65        {
66            let cached_context = self.cached_context.read().await;
67            if let Some(context) = &*cached_context {
68                return Ok(context.clone());
69            }
70        }
71
72        let context = self.repo.get_git_info(&self.config).await?;
73
74        {
75            let mut cached_context = self.cached_context.write().await;
76            *cached_context = Some(context.clone());
77        }
78        Ok(context)
79    }
80
81    /// Private helper method to handle common token optimization logic
82    ///
83    /// # Arguments
84    ///
85    /// * `config_clone` - Configuration with preset and instructions
86    /// * `system_prompt` - The system prompt to use
87    /// * `context` - The commit context
88    /// * `create_user_prompt_fn` - A function that creates a user prompt from a context
89    ///
90    /// # Returns
91    ///
92    /// A tuple containing the optimized context and final user prompt
93    fn optimize_prompt<F>(
94        &self,
95        config_clone: &Config,
96        system_prompt: &str,
97        mut context: CommitContext,
98        create_user_prompt_fn: F,
99    ) -> (CommitContext, String)
100    where
101        F: Fn(&CommitContext) -> String,
102    {
103        // Get the token limit for the provider from config or default value
104        let token_limit = config_clone
105            .providers
106            .get(&self.provider_name)
107            .and_then(|p| p.token_limit)
108            .unwrap_or({
109                match self.provider_name.as_str() {
110                    "openai" => 16_000,          // Default for OpenAI
111                    "anthropic" => 100_000,      // Anthropic Claude has large context
112                    "google" | "groq" => 32_000, // Default for Google and Groq
113                    _ => 8_000,                  // Conservative default for other providers
114                }
115            });
116
117        // Create a token optimizer to count tokens
118        let optimizer = TokenOptimizer::new(token_limit);
119        let system_tokens = optimizer.count_tokens(system_prompt);
120
121        log_debug!("Token limit: {}", token_limit);
122        log_debug!("System prompt tokens: {}", system_tokens);
123
124        // Reserve tokens for system prompt and some buffer for formatting
125        // 1000 token buffer provides headroom for model responses and formatting
126        let context_token_limit = token_limit.saturating_sub(system_tokens + 1000);
127        log_debug!("Available tokens for context: {}", context_token_limit);
128
129        // Count tokens before optimization
130        let user_prompt_before = create_user_prompt_fn(&context);
131        let total_tokens_before = system_tokens + optimizer.count_tokens(&user_prompt_before);
132        log_debug!("Total tokens before optimization: {}", total_tokens_before);
133
134        // Optimize the context with remaining token budget
135        context.optimize(context_token_limit);
136
137        let user_prompt = create_user_prompt_fn(&context);
138        let user_tokens = optimizer.count_tokens(&user_prompt);
139        let total_tokens = system_tokens + user_tokens;
140
141        log_debug!("User prompt tokens after optimization: {}", user_tokens);
142        log_debug!("Total tokens after optimization: {}", total_tokens);
143
144        // If we're still over the limit, truncate the user prompt directly
145        // 100 token safety buffer ensures we stay under the limit
146        let final_user_prompt = if total_tokens > token_limit {
147            log_debug!(
148                "Total tokens {} still exceeds limit {}, truncating user prompt",
149                total_tokens,
150                token_limit
151            );
152            let max_user_tokens = token_limit.saturating_sub(system_tokens + 100);
153            optimizer.truncate_string(&user_prompt, max_user_tokens)
154        } else {
155            user_prompt
156        };
157
158        let final_tokens = system_tokens + optimizer.count_tokens(&final_user_prompt);
159        log_debug!(
160            "Final total tokens after potential truncation: {}",
161            final_tokens
162        );
163
164        (context, final_user_prompt)
165    }
166
167    /// Generate a commit message using AI
168    ///
169    /// # Arguments
170    ///
171    /// * `preset` - The instruction preset to use
172    /// * `instructions` - Custom instructions for the AI
173    ///
174    /// # Returns
175    ///
176    /// A Result containing the generated commit message or an error
177    pub async fn generate_message(
178        &self,
179        preset: &str,
180        instructions: &str,
181    ) -> anyhow::Result<GeneratedMessage> {
182        let mut config_clone = self.config.clone();
183
184        // Check if the preset exists and is valid for commits
185        if preset.is_empty() {
186            config_clone.instruction_preset = "default".to_string();
187        } else {
188            let library = get_instruction_preset_library();
189            if let Some(preset_info) = library.get_preset(preset) {
190                if preset_info.preset_type == PresetType::Review {
191                    log_debug!(
192                        "Warning: Preset '{}' is review-specific, not ideal for commits",
193                        preset
194                    );
195                }
196                config_clone.instruction_preset = preset.to_string();
197            } else {
198                log_debug!("Preset '{}' not found, using default", preset);
199                config_clone.instruction_preset = "default".to_string();
200            }
201        }
202
203        config_clone.instructions = instructions.to_string();
204
205        let context = self.get_git_info().await?;
206
207        // Create system prompt
208        let system_prompt = create_system_prompt(&config_clone)?;
209
210        // Use the shared optimization logic
211        let (_, final_user_prompt) =
212            self.optimize_prompt(&config_clone, &system_prompt, context, create_user_prompt);
213
214        let mut generated_message = llm::get_message::<GeneratedMessage>(
215            &config_clone,
216            &self.provider_name,
217            &system_prompt,
218            &final_user_prompt,
219        )
220        .await?;
221
222        // Apply gitmoji setting
223        if !self.use_gitmoji {
224            generated_message.emoji = None;
225        }
226
227        Ok(generated_message)
228    }
229
230    /// Generate a code review using AI
231    ///
232    /// # Arguments
233    ///
234    /// * `preset` - The instruction preset to use
235    /// * `instructions` - Custom instructions for the AI
236    ///
237    /// # Returns
238    ///
239    /// A Result containing the generated code review or an error
240    pub async fn generate_review(
241        &self,
242        preset: &str,
243        instructions: &str,
244    ) -> anyhow::Result<GeneratedReview> {
245        let mut config_clone = self.config.clone();
246
247        // Check if the preset exists and is valid for reviews
248        if preset.is_empty() {
249            config_clone.instruction_preset = "default".to_string();
250        } else {
251            let library = get_instruction_preset_library();
252            if let Some(preset_info) = library.get_preset(preset) {
253                if preset_info.preset_type == PresetType::Commit {
254                    log_debug!(
255                        "Warning: Preset '{}' is commit-specific, not ideal for reviews",
256                        preset
257                    );
258                }
259                config_clone.instruction_preset = preset.to_string();
260            } else {
261                log_debug!("Preset '{}' not found, using default", preset);
262                config_clone.instruction_preset = "default".to_string();
263            }
264        }
265
266        config_clone.instructions = instructions.to_string();
267
268        let context = self.get_git_info().await?;
269
270        // Create system prompt
271        let system_prompt = super::prompt::create_review_system_prompt(&config_clone)?;
272
273        // Use the shared optimization logic
274        let (_, final_user_prompt) = self.optimize_prompt(
275            &config_clone,
276            &system_prompt,
277            context,
278            super::prompt::create_review_user_prompt,
279        );
280
281        llm::get_message::<GeneratedReview>(
282            &config_clone,
283            &self.provider_name,
284            &system_prompt,
285            &final_user_prompt,
286        )
287        .await
288    }
289
290    /// Performs a commit with the given message.
291    ///
292    /// # Arguments
293    ///
294    /// * `message` - The commit message.
295    ///
296    /// # Returns
297    ///
298    /// A Result containing the `CommitResult` or an error.
299    pub fn perform_commit(&self, message: &str) -> Result<CommitResult> {
300        let processed_message = process_commit_message(message.to_string(), self.use_gitmoji);
301        log_debug!("Performing commit with message: {}", processed_message);
302
303        if !self.verify {
304            log_debug!("Skipping pre-commit hook (verify=false)");
305            return self.repo.commit(&processed_message);
306        }
307
308        // Execute pre-commit hook
309        log_debug!("Executing pre-commit hook");
310        if let Err(e) = self.repo.execute_hook("pre-commit") {
311            log_debug!("Pre-commit hook failed: {}", e);
312            return Err(e);
313        }
314        log_debug!("Pre-commit hook executed successfully");
315
316        // Perform the commit
317        match self.repo.commit(&processed_message) {
318            Ok(result) => {
319                // Execute post-commit hook
320                log_debug!("Executing post-commit hook");
321                if let Err(e) = self.repo.execute_hook("post-commit") {
322                    log_debug!("Post-commit hook failed: {}", e);
323                    // We don't fail the commit if post-commit hook fails
324                }
325                log_debug!("Commit performed successfully");
326                Ok(result)
327            }
328            Err(e) => {
329                log_debug!("Commit failed: {}", e);
330                Err(e)
331            }
332        }
333    }
334
335    /// Execute the pre-commit hook if verification is enabled
336    pub fn pre_commit(&self) -> Result<()> {
337        if self.verify {
338            self.repo.execute_hook("pre-commit")
339        } else {
340            Ok(())
341        }
342    }
343
344    /// Create a channel for message generation
345    pub fn create_message_channel(
346        &self,
347    ) -> (
348        mpsc::Sender<Result<GeneratedMessage>>,
349        mpsc::Receiver<Result<GeneratedMessage>>,
350    ) {
351        mpsc::channel(1)
352    }
353}