Skip to main content

omni_dev/claude/
client.rs

1//! Claude client for commit message improvement.
2
3use anyhow::{Context, Result};
4use tracing::debug;
5
6use crate::claude::token_budget::{self, TokenBudget};
7use crate::claude::{ai::bedrock::BedrockAiClient, ai::claude::ClaudeAiClient};
8use crate::claude::{ai::AiClient, error::ClaudeError, prompts};
9use crate::data::{
10    amendments::AmendmentFile, context::CommitContext, DiffDetail, RepositoryView,
11    RepositoryViewForAI,
12};
13
14/// Multiplier for YAML re-serialization overhead when calculating excess chars.
15///
16/// Accounts for indentation changes, literal block markers, and other
17/// formatting differences when YAML is re-serialized after diff truncation.
18const YAML_OVERHEAD_FACTOR: f64 = 1.10;
19
20/// Result of fitting a prompt within the model's token budget.
21struct PromptWithBudget {
22    /// The user prompt (serialized from a possibly-reduced view).
23    user_prompt: String,
24    /// The level of diff detail that was used.
25    #[allow(dead_code)] // Retained for future diagnostics; set by all budget-fitting levels
26    diff_detail: DiffDetail,
27}
28
29/// Claude client for commit message improvement.
30pub struct ClaudeClient {
31    /// AI client implementation.
32    ai_client: Box<dyn AiClient>,
33}
34
35impl ClaudeClient {
36    /// Creates a new Claude client with the provided AI client implementation.
37    pub fn new(ai_client: Box<dyn AiClient>) -> Self {
38        Self { ai_client }
39    }
40
41    /// Returns metadata about the AI client.
42    pub fn get_ai_client_metadata(&self) -> crate::claude::ai::AiClientMetadata {
43        self.ai_client.get_metadata()
44    }
45
46    /// Validates that the prompt fits within the model's token budget.
47    ///
48    /// Estimates token counts and logs utilization before each AI request.
49    /// Returns an error if the prompt exceeds available input tokens.
50    fn validate_prompt_budget(&self, system_prompt: &str, user_prompt: &str) -> Result<()> {
51        let metadata = self.ai_client.get_metadata();
52        let budget = TokenBudget::from_metadata(&metadata);
53        let estimate = budget.validate_prompt(system_prompt, user_prompt)?;
54
55        debug!(
56            model = %metadata.model,
57            estimated_tokens = estimate.estimated_tokens,
58            available_tokens = estimate.available_tokens,
59            utilization_pct = format!("{:.1}%", estimate.utilization_pct),
60            "Token budget check passed"
61        );
62
63        Ok(())
64    }
65
66    /// Builds a user prompt that fits within the model's token budget,
67    /// progressively reducing diff detail if necessary.
68    ///
69    /// Tries levels in order: Full → Truncated → StatOnly → FileListOnly.
70    /// Logs a warning at each reduction level so the user knows the AI
71    /// received less context.
72    fn build_prompt_fitting_budget(
73        &self,
74        ai_view: RepositoryViewForAI,
75        system_prompt: &str,
76        build_user_prompt: impl Fn(&str) -> String,
77    ) -> Result<PromptWithBudget> {
78        let metadata = self.ai_client.get_metadata();
79        let budget = TokenBudget::from_metadata(&metadata);
80
81        // Level 1: Full diff
82        let yaml = crate::data::to_yaml(&ai_view)
83            .context("Failed to serialize repository view to YAML")?;
84        let user_prompt = build_user_prompt(&yaml);
85
86        if let Ok(estimate) = budget.validate_prompt(system_prompt, &user_prompt) {
87            debug!(
88                model = %metadata.model,
89                estimated_tokens = estimate.estimated_tokens,
90                available_tokens = estimate.available_tokens,
91                utilization_pct = format!("{:.1}%", estimate.utilization_pct),
92                diff_detail = %DiffDetail::Full,
93                "Token budget check passed"
94            );
95            return Ok(PromptWithBudget {
96                user_prompt,
97                diff_detail: DiffDetail::Full,
98            });
99        }
100
101        // Level 2: Truncated diff — calculate excess and trim
102        let system_tokens = token_budget::estimate_tokens(system_prompt);
103        let user_tokens = token_budget::estimate_tokens(&user_prompt);
104        let excess_tokens =
105            (system_tokens + user_tokens).saturating_sub(budget.available_input_tokens());
106        let excess_chars = (token_budget::tokens_to_chars(excess_tokens) as f64
107            * YAML_OVERHEAD_FACTOR)
108            .ceil() as usize;
109
110        let mut truncated_view = ai_view.clone();
111        truncated_view.truncate_diffs(excess_chars);
112
113        let yaml = crate::data::to_yaml(&truncated_view)
114            .context("Failed to serialize truncated view to YAML")?;
115        let user_prompt = build_user_prompt(&yaml);
116
117        if let Ok(estimate) = budget.validate_prompt(system_prompt, &user_prompt) {
118            debug!(
119                model = %metadata.model,
120                estimated_tokens = estimate.estimated_tokens,
121                available_tokens = estimate.available_tokens,
122                utilization_pct = format!("{:.1}%", estimate.utilization_pct),
123                diff_detail = %DiffDetail::Truncated,
124                "Token budget check passed after diff truncation"
125            );
126            tracing::warn!(
127                "Diff content truncated to fit model context window ({})",
128                metadata.model
129            );
130            return Ok(PromptWithBudget {
131                user_prompt,
132                diff_detail: DiffDetail::Truncated,
133            });
134        }
135
136        // Level 3: Stat-only — replace diff content with stat summary
137        let mut stat_view = ai_view.clone();
138        stat_view.replace_diffs_with_stat();
139
140        let yaml = crate::data::to_yaml(&stat_view)
141            .context("Failed to serialize stat-only view to YAML")?;
142        let user_prompt = build_user_prompt(&yaml);
143
144        if let Ok(estimate) = budget.validate_prompt(system_prompt, &user_prompt) {
145            debug!(
146                model = %metadata.model,
147                estimated_tokens = estimate.estimated_tokens,
148                available_tokens = estimate.available_tokens,
149                utilization_pct = format!("{:.1}%", estimate.utilization_pct),
150                diff_detail = %DiffDetail::StatOnly,
151                "Token budget check passed with stat-only diff"
152            );
153            tracing::warn!(
154                "Full diff replaced with stat summary to fit model context window ({})",
155                metadata.model
156            );
157            return Ok(PromptWithBudget {
158                user_prompt,
159                diff_detail: DiffDetail::StatOnly,
160            });
161        }
162
163        // Level 4: File-list-only — remove all diff content
164        let mut minimal_view = ai_view;
165        minimal_view.remove_diffs();
166
167        let yaml = crate::data::to_yaml(&minimal_view)
168            .context("Failed to serialize minimal view to YAML")?;
169        let user_prompt = build_user_prompt(&yaml);
170
171        let estimate = budget.validate_prompt(system_prompt, &user_prompt)?;
172        debug!(
173            model = %metadata.model,
174            estimated_tokens = estimate.estimated_tokens,
175            available_tokens = estimate.available_tokens,
176            utilization_pct = format!("{:.1}%", estimate.utilization_pct),
177            diff_detail = %DiffDetail::FileListOnly,
178            "Token budget check passed with file-list-only"
179        );
180        tracing::warn!(
181            "All diff content removed to fit model context window — only file list available ({})",
182            metadata.model
183        );
184        Ok(PromptWithBudget {
185            user_prompt,
186            diff_detail: DiffDetail::FileListOnly,
187        })
188    }
189
190    /// Sends a raw prompt to the AI client and returns the text response.
191    pub async fn send_message(&self, system_prompt: &str, user_prompt: &str) -> Result<String> {
192        self.validate_prompt_budget(system_prompt, user_prompt)?;
193        self.ai_client
194            .send_request(system_prompt, user_prompt)
195            .await
196    }
197
198    /// Creates a new Claude client with API key from environment variables.
199    pub fn from_env(model: String) -> Result<Self> {
200        // Try to get API key from environment variables
201        let api_key = std::env::var("CLAUDE_API_KEY")
202            .or_else(|_| std::env::var("ANTHROPIC_API_KEY"))
203            .map_err(|_| ClaudeError::ApiKeyNotFound)?;
204
205        let ai_client = ClaudeAiClient::new(model, api_key, None);
206        Ok(Self::new(Box::new(ai_client)))
207    }
208
209    /// Generates commit message amendments from repository view.
210    pub async fn generate_amendments(&self, repo_view: &RepositoryView) -> Result<AmendmentFile> {
211        self.generate_amendments_with_options(repo_view, false)
212            .await
213    }
214
215    /// Generates commit message amendments from repository view with options.
216    ///
217    /// If `fresh` is true, ignores existing commit messages and generates new ones
218    /// based solely on the diff content.
219    pub async fn generate_amendments_with_options(
220        &self,
221        repo_view: &RepositoryView,
222        fresh: bool,
223    ) -> Result<AmendmentFile> {
224        // Convert to AI-enhanced view with diff content
225        let ai_repo_view =
226            RepositoryViewForAI::from_repository_view_with_options(repo_view.clone(), fresh)
227                .context("Failed to enhance repository view with diff content")?;
228
229        // Build prompt with progressive diff reduction if needed
230        let fitted =
231            self.build_prompt_fitting_budget(ai_repo_view, prompts::SYSTEM_PROMPT, |yaml| {
232                prompts::generate_user_prompt(yaml)
233            })?;
234
235        // Send request using AI client
236        let content = self
237            .ai_client
238            .send_request(prompts::SYSTEM_PROMPT, &fitted.user_prompt)
239            .await?;
240
241        // Parse YAML response to AmendmentFile
242        self.parse_amendment_response(&content)
243    }
244
245    /// Generates contextual commit message amendments with enhanced intelligence.
246    pub async fn generate_contextual_amendments(
247        &self,
248        repo_view: &RepositoryView,
249        context: &CommitContext,
250    ) -> Result<AmendmentFile> {
251        self.generate_contextual_amendments_with_options(repo_view, context, false)
252            .await
253    }
254
255    /// Generates contextual commit message amendments with options.
256    ///
257    /// If `fresh` is true, ignores existing commit messages and generates new ones
258    /// based solely on the diff content.
259    pub async fn generate_contextual_amendments_with_options(
260        &self,
261        repo_view: &RepositoryView,
262        context: &CommitContext,
263        fresh: bool,
264    ) -> Result<AmendmentFile> {
265        // Convert to AI-enhanced view with diff content
266        let ai_repo_view =
267            RepositoryViewForAI::from_repository_view_with_options(repo_view.clone(), fresh)
268                .context("Failed to enhance repository view with diff content")?;
269
270        // Generate contextual prompts using intelligence
271        let prompt_style = self.ai_client.get_metadata().prompt_style();
272        let system_prompt =
273            prompts::generate_contextual_system_prompt_for_provider(context, prompt_style);
274
275        // Debug logging to troubleshoot custom commit type issue
276        match &context.project.commit_guidelines {
277            Some(guidelines) => {
278                debug!(length = guidelines.len(), "Project commit guidelines found");
279                debug!(guidelines = %guidelines, "Commit guidelines content");
280            }
281            None => {
282                debug!("No project commit guidelines found");
283            }
284        }
285
286        // Build prompt with progressive diff reduction if needed
287        let fitted = self.build_prompt_fitting_budget(ai_repo_view, &system_prompt, |yaml| {
288            prompts::generate_contextual_user_prompt(yaml, context)
289        })?;
290
291        // Send request using AI client
292        let content = self
293            .ai_client
294            .send_request(&system_prompt, &fitted.user_prompt)
295            .await?;
296
297        // Parse YAML response to AmendmentFile
298        self.parse_amendment_response(&content)
299    }
300
301    /// Parses Claude's YAML response into an AmendmentFile.
302    fn parse_amendment_response(&self, content: &str) -> Result<AmendmentFile> {
303        // Extract YAML from potential markdown wrapper
304        let yaml_content = self.extract_yaml_from_response(content);
305
306        // Try to parse YAML using our hybrid YAML parser
307        let amendment_file: AmendmentFile = crate::data::from_yaml(&yaml_content).map_err(|e| {
308            debug!(
309                error = %e,
310                content_length = content.len(),
311                yaml_length = yaml_content.len(),
312                "YAML parsing failed"
313            );
314            debug!(content = %content, "Raw Claude response");
315            debug!(yaml = %yaml_content, "Extracted YAML content");
316
317            // Try to provide more helpful error messages for common issues
318            if yaml_content.lines().any(|line| line.contains('\t')) {
319                ClaudeError::AmendmentParsingFailed("YAML parsing error: Found tab characters. YAML requires spaces for indentation.".to_string())
320            } else if yaml_content.lines().any(|line| line.trim().starts_with('-') && !line.trim().starts_with("- ")) {
321                ClaudeError::AmendmentParsingFailed("YAML parsing error: List items must have a space after the dash (- item).".to_string())
322            } else {
323                ClaudeError::AmendmentParsingFailed(format!("YAML parsing error: {}", e))
324            }
325        })?;
326
327        // Validate the parsed amendments
328        amendment_file
329            .validate()
330            .map_err(|e| ClaudeError::AmendmentParsingFailed(format!("Validation error: {}", e)))?;
331
332        Ok(amendment_file)
333    }
334
335    /// Generates AI-powered PR content (title + description) from repository view and template.
336    pub async fn generate_pr_content(
337        &self,
338        repo_view: &RepositoryView,
339        pr_template: &str,
340    ) -> Result<crate::cli::git::PrContent> {
341        // Convert to AI-enhanced view with diff content
342        let ai_repo_view = RepositoryViewForAI::from_repository_view(repo_view.clone())
343            .context("Failed to enhance repository view with diff content")?;
344
345        // Build prompt with progressive diff reduction if needed
346        let fitted = self.build_prompt_fitting_budget(
347            ai_repo_view,
348            prompts::PR_GENERATION_SYSTEM_PROMPT,
349            |yaml| prompts::generate_pr_description_prompt(yaml, pr_template),
350        )?;
351
352        // Send request using AI client
353        let content = self
354            .ai_client
355            .send_request(prompts::PR_GENERATION_SYSTEM_PROMPT, &fitted.user_prompt)
356            .await?;
357
358        // The AI response should be treated as YAML directly
359        let yaml_content = content.trim();
360
361        // Parse the YAML response using our hybrid YAML parser
362        let pr_content: crate::cli::git::PrContent = crate::data::from_yaml(yaml_content).context(
363            "Failed to parse AI response as YAML. AI may have returned malformed output.",
364        )?;
365
366        Ok(pr_content)
367    }
368
369    /// Generates AI-powered PR content with project context (title + description).
370    pub async fn generate_pr_content_with_context(
371        &self,
372        repo_view: &RepositoryView,
373        pr_template: &str,
374        context: &crate::data::context::CommitContext,
375    ) -> Result<crate::cli::git::PrContent> {
376        // Convert to AI-enhanced view with diff content
377        let ai_repo_view = RepositoryViewForAI::from_repository_view(repo_view.clone())
378            .context("Failed to enhance repository view with diff content")?;
379
380        // Generate contextual prompts for PR description with provider-specific handling
381        let prompt_style = self.ai_client.get_metadata().prompt_style();
382        let system_prompt =
383            prompts::generate_pr_system_prompt_with_context_for_provider(context, prompt_style);
384
385        // Build prompt with progressive diff reduction if needed
386        let fitted = self.build_prompt_fitting_budget(ai_repo_view, &system_prompt, |yaml| {
387            prompts::generate_pr_description_prompt_with_context(yaml, pr_template, context)
388        })?;
389
390        // Send request using AI client
391        let content = self
392            .ai_client
393            .send_request(&system_prompt, &fitted.user_prompt)
394            .await?;
395
396        // The AI response should be treated as YAML directly
397        let yaml_content = content.trim();
398
399        debug!(
400            content_length = content.len(),
401            yaml_content_length = yaml_content.len(),
402            yaml_content = %yaml_content,
403            "Extracted YAML content from AI response"
404        );
405
406        // Parse the YAML response using our hybrid YAML parser
407        let pr_content: crate::cli::git::PrContent = crate::data::from_yaml(yaml_content).context(
408            "Failed to parse AI response as YAML. AI may have returned malformed output.",
409        )?;
410
411        debug!(
412            parsed_title = %pr_content.title,
413            parsed_description_length = pr_content.description.len(),
414            parsed_description_preview = %pr_content.description.lines().take(3).collect::<Vec<_>>().join("\\n"),
415            "Successfully parsed PR content from YAML"
416        );
417
418        Ok(pr_content)
419    }
420
421    /// Checks commit messages against guidelines and returns a report.
422    ///
423    /// Validates commit messages against project guidelines or defaults,
424    /// returning a structured report with issues and suggestions.
425    pub async fn check_commits(
426        &self,
427        repo_view: &RepositoryView,
428        guidelines: Option<&str>,
429        include_suggestions: bool,
430    ) -> Result<crate::data::check::CheckReport> {
431        self.check_commits_with_scopes(repo_view, guidelines, &[], include_suggestions)
432            .await
433    }
434
435    /// Checks commit messages against guidelines with valid scopes and returns a report.
436    ///
437    /// Validates commit messages against project guidelines or defaults,
438    /// using the provided valid scopes for scope validation.
439    pub async fn check_commits_with_scopes(
440        &self,
441        repo_view: &RepositoryView,
442        guidelines: Option<&str>,
443        valid_scopes: &[crate::data::context::ScopeDefinition],
444        include_suggestions: bool,
445    ) -> Result<crate::data::check::CheckReport> {
446        self.check_commits_with_retry(repo_view, guidelines, valid_scopes, include_suggestions, 2)
447            .await
448    }
449
450    /// Checks commit messages with retry logic for parse failures.
451    async fn check_commits_with_retry(
452        &self,
453        repo_view: &RepositoryView,
454        guidelines: Option<&str>,
455        valid_scopes: &[crate::data::context::ScopeDefinition],
456        include_suggestions: bool,
457        max_retries: u32,
458    ) -> Result<crate::data::check::CheckReport> {
459        // Convert to AI-enhanced view with diff content
460        let mut ai_repo_view = RepositoryViewForAI::from_repository_view(repo_view.clone())
461            .context("Failed to enhance repository view with diff content")?;
462
463        // Run deterministic pre-validation checks before sending to AI
464        for commit in &mut ai_repo_view.commits {
465            commit.run_pre_validation_checks();
466        }
467
468        // Generate system prompt with scopes
469        let system_prompt =
470            prompts::generate_check_system_prompt_with_scopes(guidelines, valid_scopes);
471
472        // Build prompt with progressive diff reduction if needed
473        let fitted = self.build_prompt_fitting_budget(ai_repo_view, &system_prompt, |yaml| {
474            prompts::generate_check_user_prompt(yaml, include_suggestions)
475        })?;
476
477        let mut last_error = None;
478
479        for attempt in 0..=max_retries {
480            // Send request using AI client
481            match self
482                .ai_client
483                .send_request(&system_prompt, &fitted.user_prompt)
484                .await
485            {
486                Ok(content) => match self.parse_check_response(&content, repo_view) {
487                    Ok(report) => return Ok(report),
488                    Err(e) => {
489                        if attempt < max_retries {
490                            eprintln!(
491                                "warning: failed to parse AI response (attempt {}), retrying...",
492                                attempt + 1
493                            );
494                            debug!(error = %e, attempt = attempt + 1, "Check response parse failed, retrying");
495                        }
496                        last_error = Some(e);
497                    }
498                },
499                Err(e) => {
500                    if attempt < max_retries {
501                        eprintln!(
502                            "warning: AI request failed (attempt {}), retrying...",
503                            attempt + 1
504                        );
505                        debug!(error = %e, attempt = attempt + 1, "AI request failed, retrying");
506                    }
507                    last_error = Some(e);
508                }
509            }
510        }
511
512        Err(last_error.unwrap_or_else(|| anyhow::anyhow!("Check failed after retries")))
513    }
514
515    /// Parses the check response from AI.
516    fn parse_check_response(
517        &self,
518        content: &str,
519        repo_view: &RepositoryView,
520    ) -> Result<crate::data::check::CheckReport> {
521        use crate::data::check::{
522            AiCheckResponse, CheckReport, CommitCheckResult as CheckResultType,
523        };
524
525        // Extract YAML from potential markdown wrapper
526        let yaml_content = self.extract_yaml_from_check_response(content);
527
528        // Parse YAML response
529        let ai_response: AiCheckResponse = crate::data::from_yaml(&yaml_content).map_err(|e| {
530            debug!(
531                error = %e,
532                content_length = content.len(),
533                yaml_length = yaml_content.len(),
534                "Check YAML parsing failed"
535            );
536            debug!(content = %content, "Raw AI response");
537            debug!(yaml = %yaml_content, "Extracted YAML content");
538            ClaudeError::AmendmentParsingFailed(format!("Check response parsing error: {}", e))
539        })?;
540
541        // Create a map of commit hashes to original messages for lookup
542        let commit_messages: std::collections::HashMap<&str, &str> = repo_view
543            .commits
544            .iter()
545            .map(|c| (c.hash.as_str(), c.original_message.as_str()))
546            .collect();
547
548        // Convert AI response to CheckReport
549        let results: Vec<CheckResultType> = ai_response
550            .checks
551            .into_iter()
552            .map(|check| {
553                let mut result: CheckResultType = check.into();
554                // Fill in the original message from repo_view
555                if let Some(msg) = commit_messages.get(result.hash.as_str()) {
556                    result.message = msg.lines().next().unwrap_or("").to_string();
557                } else {
558                    // Try to find by prefix
559                    for (hash, msg) in &commit_messages {
560                        if hash.starts_with(&result.hash) || result.hash.starts_with(*hash) {
561                            result.message = msg.lines().next().unwrap_or("").to_string();
562                            break;
563                        }
564                    }
565                }
566                result
567            })
568            .collect();
569
570        Ok(CheckReport::new(results))
571    }
572
573    /// Extracts YAML content from check response, handling markdown wrappers.
574    fn extract_yaml_from_check_response(&self, content: &str) -> String {
575        let content = content.trim();
576
577        // If content already starts with "checks:", it's pure YAML - return as-is
578        if content.starts_with("checks:") {
579            return content.to_string();
580        }
581
582        // Try to extract from ```yaml blocks first
583        if let Some(yaml_start) = content.find("```yaml") {
584            if let Some(yaml_content) = content[yaml_start + 7..].split("```").next() {
585                return yaml_content.trim().to_string();
586            }
587        }
588
589        // Try to extract from generic ``` blocks
590        if let Some(code_start) = content.find("```") {
591            if let Some(code_content) = content[code_start + 3..].split("```").next() {
592                let potential_yaml = code_content.trim();
593                // Check if it looks like YAML (starts with expected structure)
594                if potential_yaml.starts_with("checks:") {
595                    return potential_yaml.to_string();
596                }
597            }
598        }
599
600        // If no markdown blocks found or extraction failed, return trimmed content
601        content.to_string()
602    }
603
604    /// Refines individually-generated amendments for cross-commit coherence.
605    ///
606    /// Sends commit summaries and proposed messages to the AI for a second pass
607    /// that normalizes scopes, detects rename chains, and removes redundancy.
608    pub async fn refine_amendments_coherence(
609        &self,
610        items: &[(crate::data::amendments::Amendment, String)],
611    ) -> Result<AmendmentFile> {
612        let system_prompt = prompts::AMENDMENT_COHERENCE_SYSTEM_PROMPT;
613        let user_prompt = prompts::generate_amendment_coherence_user_prompt(items);
614
615        self.validate_prompt_budget(system_prompt, &user_prompt)?;
616
617        let content = self
618            .ai_client
619            .send_request(system_prompt, &user_prompt)
620            .await?;
621
622        self.parse_amendment_response(&content)
623    }
624
625    /// Refines individually-generated check results for cross-commit coherence.
626    ///
627    /// Sends commit summaries and check outcomes to the AI for a second pass
628    /// that ensures consistent severity, detects cross-commit issues, and
629    /// normalizes scope validation.
630    pub async fn refine_checks_coherence(
631        &self,
632        items: &[(crate::data::check::CommitCheckResult, String)],
633        repo_view: &RepositoryView,
634    ) -> Result<crate::data::check::CheckReport> {
635        let system_prompt = prompts::CHECK_COHERENCE_SYSTEM_PROMPT;
636        let user_prompt = prompts::generate_check_coherence_user_prompt(items);
637
638        self.validate_prompt_budget(system_prompt, &user_prompt)?;
639
640        let content = self
641            .ai_client
642            .send_request(system_prompt, &user_prompt)
643            .await?;
644
645        self.parse_check_response(&content, repo_view)
646    }
647
648    /// Extracts YAML content from Claude response, handling markdown wrappers.
649    fn extract_yaml_from_response(&self, content: &str) -> String {
650        let content = content.trim();
651
652        // If content already starts with "amendments:", it's pure YAML - return as-is
653        if content.starts_with("amendments:") {
654            return content.to_string();
655        }
656
657        // Try to extract from ```yaml blocks first
658        if let Some(yaml_start) = content.find("```yaml") {
659            if let Some(yaml_content) = content[yaml_start + 7..].split("```").next() {
660                return yaml_content.trim().to_string();
661            }
662        }
663
664        // Try to extract from generic ``` blocks
665        if let Some(code_start) = content.find("```") {
666            if let Some(code_content) = content[code_start + 3..].split("```").next() {
667                let potential_yaml = code_content.trim();
668                // Check if it looks like YAML (starts with expected structure)
669                if potential_yaml.starts_with("amendments:") {
670                    return potential_yaml.to_string();
671                }
672            }
673        }
674
675        // If no markdown blocks found or extraction failed, return trimmed content
676        content.to_string()
677    }
678}
679
680/// Validates a beta header against the model registry.
681fn validate_beta_header(model: &str, beta_header: &Option<(String, String)>) -> Result<()> {
682    if let Some((ref key, ref value)) = beta_header {
683        let registry = crate::claude::model_config::get_model_registry();
684        let supported = registry.get_beta_headers(model);
685        if !supported
686            .iter()
687            .any(|bh| bh.key == *key && bh.value == *value)
688        {
689            let available: Vec<String> = supported
690                .iter()
691                .map(|bh| format!("{}:{}", bh.key, bh.value))
692                .collect();
693            if available.is_empty() {
694                anyhow::bail!("Model '{}' does not support any beta headers", model);
695            } else {
696                anyhow::bail!(
697                    "Beta header '{}:{}' is not supported for model '{}'. Supported: {}",
698                    key,
699                    value,
700                    model,
701                    available.join(", ")
702                );
703            }
704        }
705    }
706    Ok(())
707}
708
709/// Creates a default Claude client using environment variables and settings.
710pub fn create_default_claude_client(
711    model: Option<String>,
712    beta_header: Option<(String, String)>,
713) -> Result<ClaudeClient> {
714    use crate::claude::ai::openai::OpenAiAiClient;
715    use crate::utils::settings::{get_env_var, get_env_vars};
716
717    // Check if we should use OpenAI-compatible API (OpenAI or Ollama)
718    let use_openai = get_env_var("USE_OPENAI")
719        .map(|val| val == "true")
720        .unwrap_or(false);
721
722    let use_ollama = get_env_var("USE_OLLAMA")
723        .map(|val| val == "true")
724        .unwrap_or(false);
725
726    // Check if we should use Bedrock
727    let use_bedrock = get_env_var("CLAUDE_CODE_USE_BEDROCK")
728        .map(|val| val == "true")
729        .unwrap_or(false);
730
731    debug!(
732        use_openai = use_openai,
733        use_ollama = use_ollama,
734        use_bedrock = use_bedrock,
735        "Client selection flags"
736    );
737
738    // Handle Ollama configuration
739    if use_ollama {
740        let ollama_model = model
741            .or_else(|| get_env_var("OLLAMA_MODEL").ok())
742            .unwrap_or_else(|| "llama2".to_string());
743        validate_beta_header(&ollama_model, &beta_header)?;
744        let base_url = get_env_var("OLLAMA_BASE_URL").ok();
745        let ai_client = OpenAiAiClient::new_ollama(ollama_model, base_url, beta_header);
746        return Ok(ClaudeClient::new(Box::new(ai_client)));
747    }
748
749    // Handle OpenAI configuration
750    if use_openai {
751        debug!("Creating OpenAI client");
752        let openai_model = model
753            .or_else(|| get_env_var("OPENAI_MODEL").ok())
754            .unwrap_or_else(|| "gpt-5".to_string());
755        debug!(openai_model = %openai_model, "Selected OpenAI model");
756        validate_beta_header(&openai_model, &beta_header)?;
757
758        let api_key = get_env_vars(&["OPENAI_API_KEY", "OPENAI_AUTH_TOKEN"]).map_err(|e| {
759            debug!(error = ?e, "Failed to get OpenAI API key");
760            ClaudeError::ApiKeyNotFound
761        })?;
762        debug!("OpenAI API key found");
763
764        let ai_client = OpenAiAiClient::new_openai(openai_model, api_key, beta_header);
765        debug!("OpenAI client created successfully");
766        return Ok(ClaudeClient::new(Box::new(ai_client)));
767    }
768
769    // For Claude clients, try to get model from env vars or use default
770    let claude_model = model
771        .or_else(|| get_env_var("ANTHROPIC_MODEL").ok())
772        .unwrap_or_else(|| "claude-opus-4-1-20250805".to_string());
773    validate_beta_header(&claude_model, &beta_header)?;
774
775    if use_bedrock {
776        // Use Bedrock AI client
777        let auth_token =
778            get_env_var("ANTHROPIC_AUTH_TOKEN").map_err(|_| ClaudeError::ApiKeyNotFound)?;
779
780        let base_url =
781            get_env_var("ANTHROPIC_BEDROCK_BASE_URL").map_err(|_| ClaudeError::ApiKeyNotFound)?;
782
783        let ai_client = BedrockAiClient::new(claude_model, auth_token, base_url, beta_header);
784        return Ok(ClaudeClient::new(Box::new(ai_client)));
785    }
786
787    // Default: use standard Claude AI client
788    debug!("Falling back to Claude client");
789    let api_key = get_env_vars(&[
790        "CLAUDE_API_KEY",
791        "ANTHROPIC_API_KEY",
792        "ANTHROPIC_AUTH_TOKEN",
793    ])
794    .map_err(|_| ClaudeError::ApiKeyNotFound)?;
795
796    let ai_client = ClaudeAiClient::new(claude_model, api_key, beta_header);
797    debug!("Claude client created successfully");
798    Ok(ClaudeClient::new(Box::new(ai_client)))
799}