git_iris/commit/
service.rs1use anyhow::Result;
2use std::path::Path;
3use std::sync::Arc;
4use tokio::sync::{RwLock, mpsc};
5
6use super::prompt::{create_system_prompt, create_user_prompt, process_commit_message};
7use super::review::GeneratedReview;
8use super::types::GeneratedMessage;
9use crate::config::Config;
10use crate::context::CommitContext;
11use crate::git::{CommitResult, GitRepo};
12use crate::instruction_presets::{PresetType, get_instruction_preset_library};
13use crate::llm;
14use crate::log_debug;
15use crate::token_optimizer::TokenOptimizer;
16
17pub struct IrisCommitService {
19 config: Config,
20 repo: Arc<GitRepo>,
21 provider_name: String,
22 use_gitmoji: bool,
23 verify: bool,
24 cached_context: Arc<RwLock<Option<CommitContext>>>,
25}
26
27impl IrisCommitService {
28 pub fn new(
42 config: Config,
43 repo_path: &Path,
44 provider_name: &str,
45 use_gitmoji: bool,
46 verify: bool,
47 ) -> Result<Self> {
48 Ok(Self {
49 config,
50 repo: Arc::new(GitRepo::new(repo_path)?),
51 provider_name: provider_name.to_string(),
52 use_gitmoji,
53 verify,
54 cached_context: Arc::new(RwLock::new(None)),
55 })
56 }
57
58 pub fn check_environment(&self) -> Result<()> {
60 self.config.check_environment()
61 }
62
63 pub async fn get_git_info(&self) -> Result<CommitContext> {
65 {
66 let cached_context = self.cached_context.read().await;
67 if let Some(context) = &*cached_context {
68 return Ok(context.clone());
69 }
70 }
71
72 let context = self.repo.get_git_info(&self.config).await?;
73
74 {
75 let mut cached_context = self.cached_context.write().await;
76 *cached_context = Some(context.clone());
77 }
78 Ok(context)
79 }
80
81 fn optimize_prompt<F>(
94 &self,
95 config_clone: &Config,
96 system_prompt: &str,
97 mut context: CommitContext,
98 create_user_prompt_fn: F,
99 ) -> (CommitContext, String)
100 where
101 F: Fn(&CommitContext) -> String,
102 {
103 let token_limit = config_clone
105 .providers
106 .get(&self.provider_name)
107 .and_then(|p| p.token_limit)
108 .unwrap_or({
109 match self.provider_name.as_str() {
110 "openai" => 16_000, "anthropic" => 100_000, "google" | "groq" => 32_000, _ => 8_000, }
115 });
116
117 let optimizer = TokenOptimizer::new(token_limit);
119 let system_tokens = optimizer.count_tokens(system_prompt);
120
121 log_debug!("Token limit: {}", token_limit);
122 log_debug!("System prompt tokens: {}", system_tokens);
123
124 let context_token_limit = token_limit.saturating_sub(system_tokens + 1000);
127 log_debug!("Available tokens for context: {}", context_token_limit);
128
129 let user_prompt_before = create_user_prompt_fn(&context);
131 let total_tokens_before = system_tokens + optimizer.count_tokens(&user_prompt_before);
132 log_debug!("Total tokens before optimization: {}", total_tokens_before);
133
134 context.optimize(context_token_limit);
136
137 let user_prompt = create_user_prompt_fn(&context);
138 let user_tokens = optimizer.count_tokens(&user_prompt);
139 let total_tokens = system_tokens + user_tokens;
140
141 log_debug!("User prompt tokens after optimization: {}", user_tokens);
142 log_debug!("Total tokens after optimization: {}", total_tokens);
143
144 let final_user_prompt = if total_tokens > token_limit {
147 log_debug!(
148 "Total tokens {} still exceeds limit {}, truncating user prompt",
149 total_tokens,
150 token_limit
151 );
152 let max_user_tokens = token_limit.saturating_sub(system_tokens + 100);
153 optimizer.truncate_string(&user_prompt, max_user_tokens)
154 } else {
155 user_prompt
156 };
157
158 let final_tokens = system_tokens + optimizer.count_tokens(&final_user_prompt);
159 log_debug!(
160 "Final total tokens after potential truncation: {}",
161 final_tokens
162 );
163
164 (context, final_user_prompt)
165 }
166
167 pub async fn generate_message(
178 &self,
179 preset: &str,
180 instructions: &str,
181 ) -> anyhow::Result<GeneratedMessage> {
182 let mut config_clone = self.config.clone();
183
184 if preset.is_empty() {
186 config_clone.instruction_preset = "default".to_string();
187 } else {
188 let library = get_instruction_preset_library();
189 if let Some(preset_info) = library.get_preset(preset) {
190 if preset_info.preset_type == PresetType::Review {
191 log_debug!(
192 "Warning: Preset '{}' is review-specific, not ideal for commits",
193 preset
194 );
195 }
196 config_clone.instruction_preset = preset.to_string();
197 } else {
198 log_debug!("Preset '{}' not found, using default", preset);
199 config_clone.instruction_preset = "default".to_string();
200 }
201 }
202
203 config_clone.instructions = instructions.to_string();
204
205 let context = self.get_git_info().await?;
206
207 let system_prompt = create_system_prompt(&config_clone)?;
209
210 let (_, final_user_prompt) =
212 self.optimize_prompt(&config_clone, &system_prompt, context, create_user_prompt);
213
214 let mut generated_message = llm::get_message::<GeneratedMessage>(
215 &config_clone,
216 &self.provider_name,
217 &system_prompt,
218 &final_user_prompt,
219 )
220 .await?;
221
222 if !self.use_gitmoji {
224 generated_message.emoji = None;
225 }
226
227 Ok(generated_message)
228 }
229
230 pub async fn generate_review(
241 &self,
242 preset: &str,
243 instructions: &str,
244 ) -> anyhow::Result<GeneratedReview> {
245 let mut config_clone = self.config.clone();
246
247 if preset.is_empty() {
249 config_clone.instruction_preset = "default".to_string();
250 } else {
251 let library = get_instruction_preset_library();
252 if let Some(preset_info) = library.get_preset(preset) {
253 if preset_info.preset_type == PresetType::Commit {
254 log_debug!(
255 "Warning: Preset '{}' is commit-specific, not ideal for reviews",
256 preset
257 );
258 }
259 config_clone.instruction_preset = preset.to_string();
260 } else {
261 log_debug!("Preset '{}' not found, using default", preset);
262 config_clone.instruction_preset = "default".to_string();
263 }
264 }
265
266 config_clone.instructions = instructions.to_string();
267
268 let context = self.get_git_info().await?;
269
270 let system_prompt = super::prompt::create_review_system_prompt(&config_clone)?;
272
273 let (_, final_user_prompt) = self.optimize_prompt(
275 &config_clone,
276 &system_prompt,
277 context,
278 super::prompt::create_review_user_prompt,
279 );
280
281 llm::get_message::<GeneratedReview>(
282 &config_clone,
283 &self.provider_name,
284 &system_prompt,
285 &final_user_prompt,
286 )
287 .await
288 }
289
290 pub fn perform_commit(&self, message: &str) -> Result<CommitResult> {
300 let processed_message = process_commit_message(message.to_string(), self.use_gitmoji);
301 log_debug!("Performing commit with message: {}", processed_message);
302
303 if !self.verify {
304 log_debug!("Skipping pre-commit hook (verify=false)");
305 return self.repo.commit(&processed_message);
306 }
307
308 log_debug!("Executing pre-commit hook");
310 if let Err(e) = self.repo.execute_hook("pre-commit") {
311 log_debug!("Pre-commit hook failed: {}", e);
312 return Err(e);
313 }
314 log_debug!("Pre-commit hook executed successfully");
315
316 match self.repo.commit(&processed_message) {
318 Ok(result) => {
319 log_debug!("Executing post-commit hook");
321 if let Err(e) = self.repo.execute_hook("post-commit") {
322 log_debug!("Post-commit hook failed: {}", e);
323 }
325 log_debug!("Commit performed successfully");
326 Ok(result)
327 }
328 Err(e) => {
329 log_debug!("Commit failed: {}", e);
330 Err(e)
331 }
332 }
333 }
334
335 pub fn pre_commit(&self) -> Result<()> {
337 if self.verify {
338 self.repo.execute_hook("pre-commit")
339 } else {
340 Ok(())
341 }
342 }
343
344 pub fn create_message_channel(
346 &self,
347 ) -> (
348 mpsc::Sender<Result<GeneratedMessage>>,
349 mpsc::Receiver<Result<GeneratedMessage>>,
350 ) {
351 mpsc::channel(1)
352 }
353}