Skip to main content

aptu_core/ai/
provider.rs

1// SPDX-License-Identifier: Apache-2.0
2
3//! AI provider trait and shared implementations.
4//!
5//! Defines the `AiProvider` trait that all AI providers must implement,
6//! along with default implementations for shared logic like prompt building,
7//! request sending, and response parsing.
8
9use anyhow::{Context, Result};
10use async_trait::async_trait;
11use reqwest::Client;
12use secrecy::SecretString;
13use tracing::{debug, instrument};
14
15use super::AiResponse;
16use super::types::{
17    ChatCompletionRequest, ChatCompletionResponse, ChatMessage, IssueDetails, ResponseFormat,
18    TriageResponse,
19};
20use crate::history::AiStats;
21
22/// Parses JSON response from AI provider, detecting truncated responses.
23///
24/// If the JSON parsing fails with an EOF error (indicating the response was cut off),
25/// returns a `TruncatedResponse` error that can be retried. Other JSON errors are
26/// wrapped as `InvalidAIResponse`.
27///
28/// # Arguments
29///
30/// * `text` - The JSON text to parse
31/// * `provider` - The name of the AI provider (for error context)
32///
33/// # Returns
34///
35/// Parsed value of type T, or an error if parsing fails
36fn parse_ai_json<T: serde::de::DeserializeOwned>(text: &str, provider: &str) -> Result<T> {
37    match serde_json::from_str::<T>(text) {
38        Ok(value) => Ok(value),
39        Err(e) => {
40            // Check if this is an EOF error (truncated response)
41            if e.is_eof() {
42                Err(anyhow::anyhow!(
43                    crate::error::AptuError::TruncatedResponse {
44                        provider: provider.to_string(),
45                    }
46                ))
47            } else {
48                Err(anyhow::anyhow!(crate::error::AptuError::InvalidAIResponse(
49                    e
50                )))
51            }
52        }
53    }
54}
55
56/// Maximum length for issue body to stay within token limits.
57pub const MAX_BODY_LENGTH: usize = 4000;
58
59/// Maximum number of comments to include in the prompt.
60pub const MAX_COMMENTS: usize = 5;
61
62/// Maximum number of files to include in PR review prompt.
63pub const MAX_FILES: usize = 20;
64
65/// Maximum total diff size (in characters) for PR review prompt.
66pub const MAX_TOTAL_DIFF_SIZE: usize = 50_000;
67
68/// Maximum number of labels to include in the prompt.
69pub const MAX_LABELS: usize = 30;
70
71/// Maximum number of milestones to include in the prompt.
72pub const MAX_MILESTONES: usize = 10;
73
74/// AI provider trait for issue triage and creation.
75///
76/// Defines the interface that all AI providers must implement.
77/// Default implementations are provided for shared logic.
78#[async_trait]
79pub trait AiProvider: Send + Sync {
80    /// Returns the name of the provider (e.g., "gemini", "openrouter").
81    fn name(&self) -> &str;
82
83    /// Returns the API URL for this provider.
84    fn api_url(&self) -> &str;
85
86    /// Returns the environment variable name for the API key.
87    fn api_key_env(&self) -> &str;
88
89    /// Returns the HTTP client for making requests.
90    fn http_client(&self) -> &Client;
91
92    /// Returns the API key for authentication.
93    fn api_key(&self) -> &SecretString;
94
95    /// Returns the model name.
96    fn model(&self) -> &str;
97
98    /// Returns the maximum tokens for API responses.
99    fn max_tokens(&self) -> u32;
100
101    /// Returns the temperature for API requests.
102    fn temperature(&self) -> f32;
103
104    /// Returns the maximum retry attempts for rate-limited requests.
105    ///
106    /// Default implementation returns 3. Providers can override
107    /// to use a different retry limit.
108    fn max_attempts(&self) -> u32 {
109        3
110    }
111
112    /// Returns the circuit breaker for this provider (optional).
113    ///
114    /// Default implementation returns None. Providers can override
115    /// to provide circuit breaker functionality.
116    fn circuit_breaker(&self) -> Option<&super::CircuitBreaker> {
117        None
118    }
119
120    /// Builds HTTP headers for API requests.
121    ///
122    /// Default implementation includes Authorization and Content-Type headers.
123    /// Providers can override to add custom headers.
124    fn build_headers(&self) -> reqwest::header::HeaderMap {
125        let mut headers = reqwest::header::HeaderMap::new();
126        if let Ok(val) = "application/json".parse() {
127            headers.insert("Content-Type", val);
128        }
129        headers
130    }
131
132    /// Validates the model configuration.
133    ///
134    /// Default implementation does nothing. Providers can override
135    /// to enforce constraints (e.g., free tier validation).
136    fn validate_model(&self) -> Result<()> {
137        Ok(())
138    }
139
140    /// Sends a chat completion request to the provider's API (HTTP-only, no retry).
141    ///
142    /// Default implementation handles HTTP headers, error responses (401, 429).
143    /// Does not include retry logic - use `send_and_parse()` for retry behavior.
144    #[instrument(skip(self, request), fields(provider = self.name(), model = self.model()))]
145    async fn send_request_inner(
146        &self,
147        request: &ChatCompletionRequest,
148    ) -> Result<ChatCompletionResponse> {
149        use secrecy::ExposeSecret;
150        use tracing::warn;
151
152        use crate::error::AptuError;
153
154        let mut req = self.http_client().post(self.api_url());
155
156        // Add Authorization header
157        req = req.header(
158            "Authorization",
159            format!("Bearer {}", self.api_key().expose_secret()),
160        );
161
162        // Add custom headers from provider
163        for (key, value) in &self.build_headers() {
164            req = req.header(key.clone(), value.clone());
165        }
166
167        let response = req
168            .json(request)
169            .send()
170            .await
171            .context(format!("Failed to send request to {} API", self.name()))?;
172
173        // Check for HTTP errors
174        let status = response.status();
175        if !status.is_success() {
176            if status.as_u16() == 401 {
177                anyhow::bail!(
178                    "Invalid {} API key. Check your {} environment variable.",
179                    self.name(),
180                    self.api_key_env()
181                );
182            } else if status.as_u16() == 429 {
183                warn!("Rate limited by {} API", self.name());
184                // Parse Retry-After header (seconds), default to 0 if not present
185                let retry_after = response
186                    .headers()
187                    .get("Retry-After")
188                    .and_then(|h| h.to_str().ok())
189                    .and_then(|s| s.parse::<u64>().ok())
190                    .unwrap_or(0);
191                debug!(retry_after, "Parsed Retry-After header");
192                return Err(AptuError::RateLimited {
193                    provider: self.name().to_string(),
194                    retry_after,
195                }
196                .into());
197            }
198            let error_body = response.text().await.unwrap_or_default();
199            anyhow::bail!(
200                "{} API error (HTTP {}): {}",
201                self.name(),
202                status.as_u16(),
203                error_body
204            );
205        }
206
207        // Parse response
208        let completion: ChatCompletionResponse = response
209            .json()
210            .await
211            .context(format!("Failed to parse {} API response", self.name()))?;
212
213        Ok(completion)
214    }
215
216    /// Sends a chat completion request and parses the response with retry logic.
217    ///
218    /// This method wraps both HTTP request and JSON parsing in a single retry loop,
219    /// allowing truncated responses to be retried. Includes circuit breaker handling.
220    ///
221    /// # Arguments
222    ///
223    /// * `request` - The chat completion request to send
224    ///
225    /// # Returns
226    ///
227    /// A tuple of (parsed response, stats) extracted from the API response
228    ///
229    /// # Errors
230    ///
231    /// Returns an error if:
232    /// - API request fails (network, timeout, rate limit)
233    /// - Response cannot be parsed as valid JSON (including truncated responses)
234    #[instrument(skip(self, request), fields(provider = self.name(), model = self.model()))]
235    async fn send_and_parse<T: serde::de::DeserializeOwned + Send>(
236        &self,
237        request: &ChatCompletionRequest,
238    ) -> Result<(T, AiStats)> {
239        use tracing::{info, warn};
240
241        use crate::error::AptuError;
242        use crate::retry::{extract_retry_after, is_retryable_anyhow};
243
244        // Check circuit breaker before attempting request
245        if let Some(cb) = self.circuit_breaker()
246            && cb.is_open()
247        {
248            return Err(AptuError::CircuitOpen.into());
249        }
250
251        // Start timing (outside retry loop to measure total time including retries)
252        let start = std::time::Instant::now();
253
254        // Custom retry loop that respects retry_after from RateLimited errors
255        let mut attempt: u32 = 0;
256        let max_attempts: u32 = self.max_attempts();
257
258        // Helper function to avoid closure-in-expression clippy warning
259        #[allow(clippy::items_after_statements)]
260        async fn try_request<T: serde::de::DeserializeOwned>(
261            provider: &(impl AiProvider + ?Sized),
262            request: &ChatCompletionRequest,
263        ) -> Result<(T, ChatCompletionResponse)> {
264            // Send HTTP request
265            let completion = provider.send_request_inner(request).await?;
266
267            // Extract message content
268            let content = completion
269                .choices
270                .first()
271                .map(|c| c.message.content.clone())
272                .context("No response from AI model")?;
273
274            debug!(response_length = content.len(), "Received AI response");
275
276            // Parse JSON response (inside retry loop, so truncated responses are retried)
277            let parsed: T = parse_ai_json(&content, provider.name())?;
278
279            Ok((parsed, completion))
280        }
281
282        let (parsed, completion): (T, ChatCompletionResponse) = loop {
283            attempt += 1;
284
285            let result = try_request(self, request).await;
286
287            match result {
288                Ok(success) => break success,
289                Err(err) => {
290                    // Check if error is retryable
291                    if !is_retryable_anyhow(&err) || attempt >= max_attempts {
292                        return Err(err);
293                    }
294
295                    // Extract retry_after if present, otherwise use exponential backoff
296                    let delay = if let Some(retry_after_duration) = extract_retry_after(&err) {
297                        debug!(
298                            retry_after_secs = retry_after_duration.as_secs(),
299                            "Using Retry-After value from rate limit error"
300                        );
301                        retry_after_duration
302                    } else {
303                        // Use exponential backoff with jitter: 1s, 2s, 4s + 0-500ms
304                        let backoff_secs = 2_u64.pow(attempt.saturating_sub(1));
305                        let jitter_ms = fastrand::u64(0..500);
306                        std::time::Duration::from_millis(backoff_secs * 1000 + jitter_ms)
307                    };
308
309                    let error_msg = err.to_string();
310                    warn!(
311                        error = %error_msg,
312                        delay_secs = delay.as_secs(),
313                        attempt,
314                        max_attempts,
315                        "Retrying after error"
316                    );
317
318                    // Drop err before await to avoid holding non-Send value across await
319                    drop(err);
320                    tokio::time::sleep(delay).await;
321                }
322            }
323        };
324
325        // Record success in circuit breaker
326        if let Some(cb) = self.circuit_breaker() {
327            cb.record_success();
328        }
329
330        // Calculate duration (total time including any retries)
331        #[allow(clippy::cast_possible_truncation)]
332        let duration_ms = start.elapsed().as_millis() as u64;
333
334        // Build AI stats from usage info (trust API's cost field)
335        let (input_tokens, output_tokens, cost_usd) = if let Some(usage) = completion.usage {
336            (usage.prompt_tokens, usage.completion_tokens, usage.cost)
337        } else {
338            // If no usage info, default to 0
339            debug!("No usage information in API response");
340            (0, 0, None)
341        };
342
343        let ai_stats = AiStats {
344            provider: self.name().to_string(),
345            model: self.model().to_string(),
346            input_tokens,
347            output_tokens,
348            duration_ms,
349            cost_usd,
350            fallback_provider: None,
351        };
352
353        // Emit structured metrics
354        info!(
355            duration_ms,
356            input_tokens,
357            output_tokens,
358            cost_usd = ?cost_usd,
359            model = %self.model(),
360            "AI request completed"
361        );
362
363        Ok((parsed, ai_stats))
364    }
365
366    /// Analyzes a GitHub issue using the provider's API.
367    ///
368    /// Returns a structured triage response with summary, labels, questions, duplicates, and usage stats.
369    ///
370    /// # Arguments
371    ///
372    /// * `issue` - Issue details to analyze
373    ///
374    /// # Errors
375    ///
376    /// Returns an error if:
377    /// - API request fails (network, timeout, rate limit)
378    /// - Response cannot be parsed as valid JSON
379    #[instrument(skip(self, issue), fields(issue_number = issue.number, repo = %format!("{}/{}", issue.owner, issue.repo)))]
380    async fn analyze_issue(&self, issue: &IssueDetails) -> Result<AiResponse> {
381        debug!(model = %self.model(), "Calling {} API", self.name());
382
383        // Build request
384        let system_content = if let Some(override_prompt) =
385            super::context::load_system_prompt_override("triage_system").await
386        {
387            override_prompt
388        } else {
389            Self::build_system_prompt(None)
390        };
391
392        let request = ChatCompletionRequest {
393            model: self.model().to_string(),
394            messages: vec![
395                ChatMessage {
396                    role: "system".to_string(),
397                    content: system_content,
398                },
399                ChatMessage {
400                    role: "user".to_string(),
401                    content: Self::build_user_prompt(issue),
402                },
403            ],
404            response_format: Some(ResponseFormat {
405                format_type: "json_object".to_string(),
406                json_schema: None,
407            }),
408            max_tokens: Some(self.max_tokens()),
409            temperature: Some(self.temperature()),
410        };
411
412        // Send request and parse JSON with retry logic
413        let (triage, ai_stats) = self.send_and_parse::<TriageResponse>(&request).await?;
414
415        debug!(
416            input_tokens = ai_stats.input_tokens,
417            output_tokens = ai_stats.output_tokens,
418            duration_ms = ai_stats.duration_ms,
419            cost_usd = ?ai_stats.cost_usd,
420            "AI analysis complete"
421        );
422
423        Ok(AiResponse {
424            triage,
425            stats: ai_stats,
426        })
427    }
428
429    /// Creates a formatted GitHub issue using the provider's API.
430    ///
431    /// Takes raw issue title and body, formats them using AI (conventional commit style,
432    /// structured body), and returns the formatted content with suggested labels.
433    ///
434    /// # Arguments
435    ///
436    /// * `title` - Raw issue title from user
437    /// * `body` - Raw issue body/description from user
438    /// * `repo` - Repository name for context (owner/repo format)
439    ///
440    /// # Errors
441    ///
442    /// Returns an error if:
443    /// - API request fails (network, timeout, rate limit)
444    /// - Response cannot be parsed as valid JSON
445    #[instrument(skip(self), fields(repo = %repo))]
446    async fn create_issue(
447        &self,
448        title: &str,
449        body: &str,
450        repo: &str,
451    ) -> Result<(super::types::CreateIssueResponse, AiStats)> {
452        debug!(model = %self.model(), "Calling {} API for issue creation", self.name());
453
454        // Build request
455        let system_content = if let Some(override_prompt) =
456            super::context::load_system_prompt_override("create_system").await
457        {
458            override_prompt
459        } else {
460            Self::build_create_system_prompt(None)
461        };
462
463        let request = ChatCompletionRequest {
464            model: self.model().to_string(),
465            messages: vec![
466                ChatMessage {
467                    role: "system".to_string(),
468                    content: system_content,
469                },
470                ChatMessage {
471                    role: "user".to_string(),
472                    content: Self::build_create_user_prompt(title, body, repo),
473                },
474            ],
475            response_format: Some(ResponseFormat {
476                format_type: "json_object".to_string(),
477                json_schema: None,
478            }),
479            max_tokens: Some(self.max_tokens()),
480            temperature: Some(self.temperature()),
481        };
482
483        // Send request and parse JSON with retry logic
484        let (create_response, ai_stats) = self
485            .send_and_parse::<super::types::CreateIssueResponse>(&request)
486            .await?;
487
488        debug!(
489            title_len = create_response.formatted_title.len(),
490            body_len = create_response.formatted_body.len(),
491            labels = create_response.suggested_labels.len(),
492            input_tokens = ai_stats.input_tokens,
493            output_tokens = ai_stats.output_tokens,
494            duration_ms = ai_stats.duration_ms,
495            "Issue formatting complete with stats"
496        );
497
498        Ok((create_response, ai_stats))
499    }
500
501    /// Builds the system prompt for issue triage.
502    #[must_use]
503    fn build_system_prompt(custom_guidance: Option<&str>) -> String {
504        let context = super::context::load_custom_guidance(custom_guidance);
505        let schema = "{\n  \"summary\": \"A 2-3 sentence summary of what the issue is about and its impact\",\n  \"suggested_labels\": [\"label1\", \"label2\"],\n  \"clarifying_questions\": [\"question1\", \"question2\"],\n  \"potential_duplicates\": [\"#123\", \"#456\"],\n  \"related_issues\": [\n    {\n      \"number\": 789,\n      \"title\": \"Related issue title\",\n      \"reason\": \"Brief explanation of why this is related\"\n    }\n  ],\n  \"status_note\": \"Optional note about issue status (e.g., claimed, in-progress)\",\n  \"contributor_guidance\": {\n    \"beginner_friendly\": true,\n    \"reasoning\": \"1-2 sentence explanation of beginner-friendliness assessment\"\n  },\n  \"implementation_approach\": \"Optional suggestions for implementation based on repository structure\",\n  \"suggested_milestone\": \"Optional milestone title for the issue\",\n  \"complexity\": {\n    \"level\": \"low|medium|high\",\n    \"estimated_loc\": 150,\n    \"affected_areas\": [\"crates/aptu-core/src/ai/types.rs\"],\n    \"recommendation\": \"Optional decomposition recommendation for high-complexity issues\"\n  }\n}";
506        let guidelines = "Reason through each step before producing output.\n\n\
507Guidelines:\n\
508- summary: Concise explanation of the problem/request and why it matters\n\
509- suggested_labels: Prefer labels from the Available Labels list provided. Choose from: bug, enhancement, documentation, question, duplicate, invalid, wontfix. If a more specific label exists in the repository, use it instead of generic ones.\n\
510- clarifying_questions: Only include if the issue lacks critical information. Leave empty array if issue is clear. Skip questions already answered in comments.\n\
511- potential_duplicates: Only include if you detect likely duplicates from the context. Leave empty array if none. A duplicate is an issue that describes the exact same problem.\n\
512- related_issues: Include issues from the search results that are contextually related but NOT duplicates. Provide brief reasoning for each. Leave empty array if none are relevant.\n\
513- status_note: Detect if someone has claimed the issue or is working on it. Look for patterns like \"I'd like to work on this\", \"I'll submit a PR\", \"working on this\", or \"@user I've assigned you\". If claimed, set status_note to a brief description (e.g., \"Issue claimed by @username\"). If not claimed, leave as null or empty string.\n\
514- contributor_guidance: Assess whether the issue is suitable for beginners. Consider: scope (small, well-defined), file count (few files to modify), required knowledge (no deep expertise needed), clarity (clear problem statement). Set beginner_friendly to true if all factors are favorable. Provide 1-2 sentence reasoning explaining the assessment.\n\
515- implementation_approach: Based on the repository structure provided, suggest specific files or modules to modify. Reference the file paths from the repository structure. Be concrete and actionable. Leave as null or empty string if no specific guidance can be provided.\n\
516- suggested_milestone: If applicable, suggest a milestone title from the Available Milestones list. Only include if a milestone is clearly relevant to the issue. Leave as null or empty string if no milestone is appropriate.\n\
517- complexity: Always populate this field. Set `level` to low/medium/high based on estimated implementation scope: low = small, self-contained change (1-2 files, <100 LOC); medium = moderate change (3-5 files, 100-300 LOC); high = large change (5+ files, 300+ LOC or deep domain knowledge). Populate `affected_areas` with likely file paths from the repository structure. For high complexity, set `recommendation` to a concrete suggestion (e.g. 'Decompose into 3 sub-issues: CLI parsing, AI prompt update, GitHub API integration').\n\
518\n\
519Be helpful, concise, and actionable. Focus on what a maintainer needs to know.\n\
520\n\
521## Examples\n\
522\n\
523### Example 1 (happy path)\n\
524Input: Issue titled \"Add dark mode support\" with body describing a UI theme toggle request.\n\
525Output:\n\
526```json\n\
527{\n\
528  \"summary\": \"User requests dark mode support with a toggle in settings.\",\n\
529  \"suggested_labels\": [\"enhancement\", \"ui\"],\n\
530  \"clarifying_questions\": [\"Which components should be themed first?\"],\n\
531  \"potential_duplicates\": [],\n\
532  \"related_issues\": [],\n\
533  \"status_note\": \"Ready for design discussion\",\n\
534  \"contributor_guidance\": {\n\
535    \"beginner_friendly\": false,\n\
536    \"reasoning\": \"Requires understanding of the theme system and CSS. Could span multiple files.\"\n\
537  },\n\
538  \"implementation_approach\": \"Extend the existing ThemeProvider with a dark variant and persist preference to localStorage.\",\n\
539  \"suggested_milestone\": \"v2.0\",\n\
540  \"complexity\": {\n\
541    \"level\": \"medium\",\n\
542    \"estimated_loc\": 120,\n\
543    \"affected_areas\": [\"src/theme/ThemeProvider.tsx\", \"src/components/Settings.tsx\"],\n\
544    \"recommendation\": null\n\
545  }\n\
546}\n\
547```\n\
548\n\
549### Example 2 (edge case - vague report)\n\
550Input: Issue titled \"it broken\" with empty body.\n\
551Output:\n\
552```json\n\
553{\n\
554  \"summary\": \"Vague report with no reproduction steps or context.\",\n\
555  \"suggested_labels\": [\"needs-info\"],\n\
556  \"clarifying_questions\": [\"What is broken?\", \"Steps to reproduce?\", \"Expected vs actual behavior?\"],\n\
557  \"potential_duplicates\": [],\n\
558  \"related_issues\": [],\n\
559  \"status_note\": \"Blocked on clarification\",\n\
560  \"contributor_guidance\": {\n\
561    \"beginner_friendly\": false,\n\
562    \"reasoning\": \"Issue is too vague to assess or action without clarification.\"\n\
563  },\n\
564  \"implementation_approach\": \"\",\n\
565  \"suggested_milestone\": null,\n\
566  \"complexity\": {\n\
567    \"level\": \"low\",\n\
568    \"estimated_loc\": null,\n\
569    \"affected_areas\": [],\n\
570    \"recommendation\": null\n\
571  }\n\
572}\n\
573```";
574        format!(
575            "You are a senior OSS maintainer. Your mission is to produce structured triage output that helps maintainers prioritize and route incoming issues.\n\n{context}\n\nYour response MUST be valid JSON with this exact schema:\n{schema}\n\n{guidelines}"
576        )
577    }
578
579    /// Builds the user prompt containing the issue details.
580    #[must_use]
581    fn build_user_prompt(issue: &IssueDetails) -> String {
582        use std::fmt::Write;
583
584        let mut prompt = String::new();
585
586        prompt.push_str("<issue_content>\n");
587        let _ = writeln!(prompt, "Title: {}\n", issue.title);
588
589        // Truncate body if too long
590        let body = if issue.body.len() > MAX_BODY_LENGTH {
591            format!(
592                "{}...\n[Body truncated - original length: {} chars]",
593                &issue.body[..MAX_BODY_LENGTH],
594                issue.body.len()
595            )
596        } else if issue.body.is_empty() {
597            "[No description provided]".to_string()
598        } else {
599            issue.body.clone()
600        };
601        let _ = writeln!(prompt, "Body:\n{body}\n");
602
603        // Include existing labels
604        if !issue.labels.is_empty() {
605            let _ = writeln!(prompt, "Existing Labels: {}\n", issue.labels.join(", "));
606        }
607
608        // Include recent comments (limited)
609        if !issue.comments.is_empty() {
610            prompt.push_str("Recent Comments:\n");
611            for comment in issue.comments.iter().take(MAX_COMMENTS) {
612                let comment_body = if comment.body.len() > 500 {
613                    format!("{}...", &comment.body[..500])
614                } else {
615                    comment.body.clone()
616                };
617                let _ = writeln!(prompt, "- @{}: {}", comment.author, comment_body);
618            }
619            prompt.push('\n');
620        }
621
622        // Include related issues from search (for context)
623        if !issue.repo_context.is_empty() {
624            prompt.push_str("Related Issues in Repository (for context):\n");
625            for related in issue.repo_context.iter().take(10) {
626                let _ = writeln!(
627                    prompt,
628                    "- #{} [{}] {}",
629                    related.number, related.state, related.title
630                );
631            }
632            prompt.push('\n');
633        }
634
635        // Include repository structure (source files)
636        if !issue.repo_tree.is_empty() {
637            prompt.push_str("Repository Structure (source files):\n");
638            for path in issue.repo_tree.iter().take(20) {
639                let _ = writeln!(prompt, "- {path}");
640            }
641            prompt.push('\n');
642        }
643
644        // Include available labels
645        if !issue.available_labels.is_empty() {
646            prompt.push_str("Available Labels:\n");
647            for label in issue.available_labels.iter().take(MAX_LABELS) {
648                let description = if label.description.is_empty() {
649                    String::new()
650                } else {
651                    format!(" - {}", label.description)
652                };
653                let _ = writeln!(
654                    prompt,
655                    "- {} (color: #{}){}",
656                    label.name, label.color, description
657                );
658            }
659            prompt.push('\n');
660        }
661
662        // Include available milestones
663        if !issue.available_milestones.is_empty() {
664            prompt.push_str("Available Milestones:\n");
665            for milestone in issue.available_milestones.iter().take(MAX_MILESTONES) {
666                let description = if milestone.description.is_empty() {
667                    String::new()
668                } else {
669                    format!(" - {}", milestone.description)
670                };
671                let _ = writeln!(prompt, "- {}{}", milestone.title, description);
672            }
673            prompt.push('\n');
674        }
675
676        prompt.push_str("</issue_content>");
677
678        prompt
679    }
680
681    /// Builds the system prompt for issue creation/formatting.
682    #[must_use]
683    fn build_create_system_prompt(custom_guidance: Option<&str>) -> String {
684        let context = super::context::load_custom_guidance(custom_guidance);
685        format!(
686            "You are a senior developer advocate. Your mission is to produce a well-structured, professional GitHub issue from raw user input.\n\n\
687{context}\n\n\
688Your response MUST be valid JSON with this exact schema:\n\
689{{\n  \"formatted_title\": \"Well-formatted issue title following conventional commit style\",\n  \"formatted_body\": \"Professionally formatted issue body with clear sections\",\n  \"suggested_labels\": [\"label1\", \"label2\"]\n}}\n\n\
690Reason through each step before producing output.\n\n\
691Guidelines:\n\
692- formatted_title: Use conventional commit style (e.g., \"feat: add search functionality\", \"fix: resolve memory leak in parser\"). Keep it concise (under 72 characters). No period at the end.\n\
693- formatted_body: Structure the body with clear sections:\n  * Start with a brief 1-2 sentence summary if not already present\n  * Use markdown formatting with headers (## Summary, ## Details, ## Steps to Reproduce, ## Expected Behavior, ## Actual Behavior, ## Context, etc.)\n  * Keep sentences clear and concise\n  * Use bullet points for lists\n  * Improve grammar and clarity\n  * Add relevant context if missing\n\
694- suggested_labels: Suggest up to 3 relevant GitHub labels. Common ones: bug, enhancement, documentation, question, duplicate, invalid, wontfix. Choose based on the issue content.\n\n\
695Be professional but friendly. Maintain the user's intent while improving clarity and structure.\n\n\
696## Examples\n\n\
697### Example 1 (happy path)\n\
698Input: Title \"app crashes\", Body \"when i click login it crashes on android\"\n\
699Output:\n\
700```json\n\
701{{\n  \"formatted_title\": \"fix(auth): app crashes on login on Android\",\n  \"formatted_body\": \"## Description\\nThe app crashes when tapping the login button on Android.\\n\\n## Steps to Reproduce\\n1. Open the app on Android\\n2. Tap the login button\\n\\n## Expected Behavior\\nUser is authenticated and redirected to the home screen.\\n\\n## Actual Behavior\\nApp crashes immediately.\",\n  \"suggested_labels\": [\"bug\", \"android\", \"auth\"]\n}}\n\
702```\n\n\
703### Example 2 (edge case - already well-formatted)\n\
704Input: Title \"feat(api): add pagination to /users endpoint\", Body already has sections.\n\
705Output:\n\
706```json\n\
707{{\n  \"formatted_title\": \"feat(api): add pagination to /users endpoint\",\n  \"formatted_body\": \"## Description\\nAdd cursor-based pagination to the /users endpoint to support large datasets.\\n\\n## Motivation\\nThe endpoint currently returns all users at once, causing timeouts for large datasets.\",\n  \"suggested_labels\": [\"enhancement\", \"api\"]\n}}\n\
708```"
709        )
710    }
711
712    /// Builds the user prompt for issue creation/formatting.
713    #[must_use]
714    fn build_create_user_prompt(title: &str, body: &str, _repo: &str) -> String {
715        format!("Please format this GitHub issue:\n\nTitle: {title}\n\nBody:\n{body}")
716    }
717
718    /// Reviews a pull request using the provider's API.
719    ///
720    /// Analyzes PR metadata and file diffs to provide structured review feedback.
721    ///
722    /// # Arguments
723    ///
724    /// * `pr` - Pull request details including files and diffs
725    ///
726    /// # Errors
727    ///
728    /// Returns an error if:
729    /// - API request fails (network, timeout, rate limit)
730    /// - Response cannot be parsed as valid JSON
731    #[instrument(skip(self, pr), fields(pr_number = pr.number, repo = %format!("{}/{}", pr.owner, pr.repo)))]
732    async fn review_pr(
733        &self,
734        pr: &super::types::PrDetails,
735    ) -> Result<(super::types::PrReviewResponse, AiStats)> {
736        debug!(model = %self.model(), "Calling {} API for PR review", self.name());
737
738        // Build request
739        let system_content = if let Some(override_prompt) =
740            super::context::load_system_prompt_override("pr_review_system").await
741        {
742            override_prompt
743        } else {
744            Self::build_pr_review_system_prompt(None)
745        };
746
747        let request = ChatCompletionRequest {
748            model: self.model().to_string(),
749            messages: vec![
750                ChatMessage {
751                    role: "system".to_string(),
752                    content: system_content,
753                },
754                ChatMessage {
755                    role: "user".to_string(),
756                    content: Self::build_pr_review_user_prompt(pr),
757                },
758            ],
759            response_format: Some(ResponseFormat {
760                format_type: "json_object".to_string(),
761                json_schema: None,
762            }),
763            max_tokens: Some(self.max_tokens()),
764            temperature: Some(self.temperature()),
765        };
766
767        // Send request and parse JSON with retry logic
768        let (review, ai_stats) = self
769            .send_and_parse::<super::types::PrReviewResponse>(&request)
770            .await?;
771
772        debug!(
773            verdict = %review.verdict,
774            input_tokens = ai_stats.input_tokens,
775            output_tokens = ai_stats.output_tokens,
776            duration_ms = ai_stats.duration_ms,
777            "PR review complete with stats"
778        );
779
780        Ok((review, ai_stats))
781    }
782
783    /// Suggests labels for a pull request using the provider's API.
784    ///
785    /// Analyzes PR title, body, and file paths to suggest relevant labels.
786    ///
787    /// # Arguments
788    ///
789    /// * `title` - Pull request title
790    /// * `body` - Pull request description
791    /// * `file_paths` - List of file paths changed in the PR
792    ///
793    /// # Errors
794    ///
795    /// Returns an error if:
796    /// - API request fails (network, timeout, rate limit)
797    /// - Response cannot be parsed as valid JSON
798    #[instrument(skip(self), fields(title = %title))]
799    async fn suggest_pr_labels(
800        &self,
801        title: &str,
802        body: &str,
803        file_paths: &[String],
804    ) -> Result<(Vec<String>, AiStats)> {
805        debug!(model = %self.model(), "Calling {} API for PR label suggestion", self.name());
806
807        // Build request
808        let system_content = if let Some(override_prompt) =
809            super::context::load_system_prompt_override("pr_label_system").await
810        {
811            override_prompt
812        } else {
813            Self::build_pr_label_system_prompt(None)
814        };
815
816        let request = ChatCompletionRequest {
817            model: self.model().to_string(),
818            messages: vec![
819                ChatMessage {
820                    role: "system".to_string(),
821                    content: system_content,
822                },
823                ChatMessage {
824                    role: "user".to_string(),
825                    content: Self::build_pr_label_user_prompt(title, body, file_paths),
826                },
827            ],
828            response_format: Some(ResponseFormat {
829                format_type: "json_object".to_string(),
830                json_schema: None,
831            }),
832            max_tokens: Some(self.max_tokens()),
833            temperature: Some(self.temperature()),
834        };
835
836        // Send request and parse JSON with retry logic
837        let (response, ai_stats) = self
838            .send_and_parse::<super::types::PrLabelResponse>(&request)
839            .await?;
840
841        debug!(
842            label_count = response.suggested_labels.len(),
843            input_tokens = ai_stats.input_tokens,
844            output_tokens = ai_stats.output_tokens,
845            duration_ms = ai_stats.duration_ms,
846            "PR label suggestion complete with stats"
847        );
848
849        Ok((response.suggested_labels, ai_stats))
850    }
851
852    /// Builds the system prompt for PR review.
853    #[must_use]
854    fn build_pr_review_system_prompt(custom_guidance: Option<&str>) -> String {
855        let context = super::context::load_custom_guidance(custom_guidance);
856        format!(
857            "You are a senior software engineer. Your mission is to produce structured, actionable review feedback on a pull request.\n\n\
858{context}\n\n\
859Your response MUST be valid JSON with this exact schema:\n\
860{{\n  \"summary\": \"A 2-3 sentence summary of what the PR does and its impact\",\n  \"verdict\": \"approve|request_changes|comment\",\n  \"strengths\": [\"strength1\", \"strength2\"],\n  \"concerns\": [\"concern1\", \"concern2\"],\n  \"comments\": [\n    {{\n      \"file\": \"path/to/file.rs\",\n      \"line\": 42,\n      \"comment\": \"Specific feedback about this line\",\n      \"severity\": \"info|suggestion|warning|issue\",\n      \"suggested_code\": null\n    }}\n  ],\n  \"suggestions\": [\"suggestion1\", \"suggestion2\"],\n  \"disclaimer\": null\n}}\n\n\
861Reason through each step before producing output.\n\n\
862Guidelines:\n\
863- summary: Concise explanation of the changes and their purpose\n\
864- verdict: Use \"approve\" for good PRs, \"request_changes\" for blocking issues, \"comment\" for feedback without blocking\n\
865- strengths: What the PR does well (good patterns, clear code, etc.)\n\
866- concerns: Potential issues or risks (bugs, performance, security, maintainability)\n\
867- comments: Specific line-level feedback. Use severity:\n  - \"info\": Informational, no action needed\n  - \"suggestion\": Optional improvement\n  - \"warning\": Should consider changing\n  - \"issue\": Should be fixed before merge\n  - \"suggested_code\": Optional. Provide replacement lines for a one-click GitHub suggestion block when you have a small, safe, directly applicable fix (1-10 lines). Omit diff markers (+/-). Leave null for refactors, multi-file changes, or uncertain fixes.\n\
868- suggestions: General improvements that are not blocking\n\
869- disclaimer: Optional field. If the PR involves platform versions (iOS, Android, Node, Rust, Python, Java, etc.), include a disclaimer explaining that platform version validation may be inaccurate due to knowledge cutoffs. Otherwise, set to null.\n\n\
870IMPORTANT - Platform Version Exclusions:\n\
871DO NOT validate or flag platform versions (iOS, Android, Node, Rust, Python, Java, simulator availability, package versions, framework versions) as concerns or issues. These may be newer than your knowledge cutoff and flagging them creates false positives. If the PR involves platform versions, include a disclaimer field explaining that platform version validation was skipped due to knowledge cutoff limitations. Focus your review on code logic, patterns, and structure instead.\n\n\
872Focus on:\n\
8731. Correctness: Does the code do what it claims?\n\
8742. Security: Any potential vulnerabilities?\n\
8753. Performance: Any obvious inefficiencies?\n\
8764. Maintainability: Is the code clear and well-structured?\n\
8775. Testing: Are changes adequately tested?\n\n\
878Be constructive and specific. Explain why something is an issue and how to fix it.\n\n\
879## Examples\n\n\
880### Example 1 (happy path)\n\
881Input: PR adds a retry helper with tests.\n\
882Output:\n\
883```json\n\
884{{\n  \"summary\": \"Adds an exponential-backoff retry helper with unit tests.\",\n  \"verdict\": \"approve\",\n  \"strengths\": [\"Well-tested with happy and error paths\", \"Follows existing error handling patterns\"],\n  \"concerns\": [],\n  \"comments\": [],\n  \"suggestions\": [\"Consider adding a jitter parameter to reduce thundering-herd effects.\"],\n  \"disclaimer\": null\n}}\n\
885```\n\n\
886### Example 2 (edge case - missing error handling)\n\
887Input: PR adds a file parser that uses unwrap().\n\
888Output:\n\
889```json\n\
890{{\n  \"summary\": \"Adds a CSV parser but uses unwrap() on file reads.\",\n  \"verdict\": \"request_changes\",\n  \"strengths\": [\"Covers the happy path\"],\n  \"concerns\": [\"unwrap() on file open will panic on missing files\"],\n  \"comments\": [{{\"file\": \"src/parser.rs\", \"line\": 42, \"severity\": \"high\", \"comment\": \"Replace unwrap() with proper error propagation using ?\", \"suggested_code\": \"        let file = File::open(path)?;\\n\"}}],\n  \"suggestions\": [\"Return Result<_, io::Error> from parse_file instead of panicking.\"],\n  \"disclaimer\": null\n}}\n\
891```"
892        )
893    }
894
895    /// Builds the user prompt for PR review.
896    #[must_use]
897    fn build_pr_review_user_prompt(pr: &super::types::PrDetails) -> String {
898        use std::fmt::Write;
899
900        let mut prompt = String::new();
901
902        prompt.push_str("<pull_request>\n");
903        let _ = writeln!(prompt, "Title: {}\n", pr.title);
904        let _ = writeln!(prompt, "Branch: {} -> {}\n", pr.head_branch, pr.base_branch);
905
906        // PR description
907        let body = if pr.body.is_empty() {
908            "[No description provided]".to_string()
909        } else if pr.body.len() > MAX_BODY_LENGTH {
910            format!(
911                "{}...\n[Description truncated - original length: {} chars]",
912                &pr.body[..MAX_BODY_LENGTH],
913                pr.body.len()
914            )
915        } else {
916            pr.body.clone()
917        };
918        let _ = writeln!(prompt, "Description:\n{body}\n");
919
920        // File changes with limits
921        prompt.push_str("Files Changed:\n");
922        let mut total_diff_size = 0;
923        let mut files_included = 0;
924        let mut files_skipped = 0;
925
926        for file in &pr.files {
927            // Check file count limit
928            if files_included >= MAX_FILES {
929                files_skipped += 1;
930                continue;
931            }
932
933            let _ = writeln!(
934                prompt,
935                "- {} ({}) +{} -{}\n",
936                file.filename, file.status, file.additions, file.deletions
937            );
938
939            // Include patch if available (truncate large patches)
940            if let Some(patch) = &file.patch {
941                const MAX_PATCH_LENGTH: usize = 2000;
942                let patch_content = if patch.len() > MAX_PATCH_LENGTH {
943                    format!(
944                        "{}...\n[Patch truncated - original length: {} chars]",
945                        &patch[..MAX_PATCH_LENGTH],
946                        patch.len()
947                    )
948                } else {
949                    patch.clone()
950                };
951
952                // Check if adding this patch would exceed total diff size limit
953                let patch_size = patch_content.len();
954                if total_diff_size + patch_size > MAX_TOTAL_DIFF_SIZE {
955                    let _ = writeln!(
956                        prompt,
957                        "```diff\n[Patch omitted - total diff size limit reached]\n```\n"
958                    );
959                    files_skipped += 1;
960                    continue;
961                }
962
963                let _ = writeln!(prompt, "```diff\n{patch_content}\n```\n");
964                total_diff_size += patch_size;
965            }
966
967            files_included += 1;
968        }
969
970        // Add truncation message if files were skipped
971        if files_skipped > 0 {
972            let _ = writeln!(
973                prompt,
974                "\n[{files_skipped} files omitted due to size limits (MAX_FILES={MAX_FILES}, MAX_TOTAL_DIFF_SIZE={MAX_TOTAL_DIFF_SIZE})]"
975            );
976        }
977
978        prompt.push_str("</pull_request>");
979
980        prompt
981    }
982
983    /// Builds the system prompt for PR label suggestion.
984    #[must_use]
985    fn build_pr_label_system_prompt(custom_guidance: Option<&str>) -> String {
986        let context = super::context::load_custom_guidance(custom_guidance);
987        format!(
988            r#"You are a senior open-source maintainer. Your mission is to suggest the most relevant labels for a pull request based on its content.
989
990{context}
991
992Your response MUST be valid JSON with this exact schema:
993{{
994  "suggested_labels": ["label1", "label2", "label3"]
995}}
996
997Response format: json_object
998
999Reason through each step before producing output.
1000
1001Guidelines:
1002- suggested_labels: Suggest 1-3 relevant GitHub labels based on the PR content. Common labels include: bug, enhancement, documentation, feature, refactor, performance, security, testing, ci, dependencies. Choose labels that best describe the type of change.
1003- Focus on the PR title, description, and file paths to determine appropriate labels.
1004- Prefer specific labels over generic ones when possible.
1005- Only suggest labels that are commonly used in GitHub repositories.
1006
1007Be concise and practical.
1008
1009## Examples
1010
1011### Example 1 (happy path)
1012Input: PR adds OAuth2 login flow with tests.
1013Output:
1014```json
1015{{"suggested_labels": ["feature", "auth", "security"]}}
1016```
1017
1018### Example 2 (edge case - documentation only PR)
1019Input: PR fixes typos in README.
1020Output:
1021```json
1022{{"suggested_labels": ["documentation"]}}
1023```"#
1024        )
1025    }
1026
1027    /// Builds the user prompt for PR label suggestion.
1028    #[must_use]
1029    fn build_pr_label_user_prompt(title: &str, body: &str, file_paths: &[String]) -> String {
1030        use std::fmt::Write;
1031
1032        let mut prompt = String::new();
1033
1034        prompt.push_str("<pull_request>\n");
1035        let _ = writeln!(prompt, "Title: {title}\n");
1036
1037        // PR description
1038        let body_content = if body.is_empty() {
1039            "[No description provided]".to_string()
1040        } else if body.len() > MAX_BODY_LENGTH {
1041            format!(
1042                "{}...\n[Description truncated - original length: {} chars]",
1043                &body[..MAX_BODY_LENGTH],
1044                body.len()
1045            )
1046        } else {
1047            body.to_string()
1048        };
1049        let _ = writeln!(prompt, "Description:\n{body_content}\n");
1050
1051        // File paths
1052        if !file_paths.is_empty() {
1053            prompt.push_str("Files Changed:\n");
1054            for path in file_paths.iter().take(20) {
1055                let _ = writeln!(prompt, "- {path}");
1056            }
1057            if file_paths.len() > 20 {
1058                let _ = writeln!(prompt, "- ... and {} more files", file_paths.len() - 20);
1059            }
1060            prompt.push('\n');
1061        }
1062
1063        prompt.push_str("</pull_request>");
1064
1065        prompt
1066    }
1067
1068    /// Generate release notes from PR summaries.
1069    ///
1070    /// # Arguments
1071    ///
1072    /// * `prs` - List of PR summaries to synthesize
1073    /// * `version` - Version being released
1074    ///
1075    /// # Returns
1076    ///
1077    /// Structured release notes with theme, highlights, and categorized changes.
1078    #[instrument(skip(self, prs))]
1079    async fn generate_release_notes(
1080        &self,
1081        prs: Vec<super::types::PrSummary>,
1082        version: &str,
1083    ) -> Result<(super::types::ReleaseNotesResponse, AiStats)> {
1084        let prompt = Self::build_release_notes_prompt(&prs, version);
1085        let request = ChatCompletionRequest {
1086            model: self.model().to_string(),
1087            messages: vec![ChatMessage {
1088                role: "user".to_string(),
1089                content: prompt,
1090            }],
1091            response_format: Some(ResponseFormat {
1092                format_type: "json_object".to_string(),
1093                json_schema: None,
1094            }),
1095            temperature: Some(0.7),
1096            max_tokens: Some(self.max_tokens()),
1097        };
1098
1099        let (parsed, ai_stats) = self
1100            .send_and_parse::<super::types::ReleaseNotesResponse>(&request)
1101            .await?;
1102
1103        debug!(
1104            input_tokens = ai_stats.input_tokens,
1105            output_tokens = ai_stats.output_tokens,
1106            duration_ms = ai_stats.duration_ms,
1107            "Release notes generation complete with stats"
1108        );
1109
1110        Ok((parsed, ai_stats))
1111    }
1112
1113    /// Build the user prompt for release notes generation.
1114    #[must_use]
1115    fn build_release_notes_prompt(prs: &[super::types::PrSummary], version: &str) -> String {
1116        let pr_list = prs
1117            .iter()
1118            .map(|pr| {
1119                format!(
1120                    "- #{}: {} (by @{})\n  {}",
1121                    pr.number,
1122                    pr.title,
1123                    pr.author,
1124                    pr.body.lines().next().unwrap_or("")
1125                )
1126            })
1127            .collect::<Vec<_>>()
1128            .join("\n");
1129
1130        format!(
1131            r#"Generate release notes for version {version} based on these merged PRs:
1132
1133{pr_list}
1134
1135Create a curated release notes document with:
11361. A theme/title that captures the essence of this release
11372. A 1-2 sentence narrative about the release
11383. 3-5 highlighted features
11394. Categorized changes: Features, Fixes, Improvements, Documentation, Maintenance
11405. List of contributors
1141
1142Follow these conventions:
1143- No emojis
1144- Bold feature names with dash separator
1145- Include PR numbers in parentheses
1146- Group by user impact, not just commit type
1147- Filter CI/deps under Maintenance
1148
1149Your response MUST be valid JSON with this exact schema:
1150{{
1151  "theme": "Release theme title",
1152  "narrative": "1-2 sentence summary",
1153  "highlights": ["highlight1", "highlight2"],
1154  "features": ["feature1", "feature2"],
1155  "fixes": ["fix1", "fix2"],
1156  "improvements": ["improvement1"],
1157  "documentation": ["doc change1"],
1158  "maintenance": ["maintenance1"],
1159  "contributors": ["@author1", "@author2"]
1160}}"#
1161        )
1162    }
1163}
1164
1165#[cfg(test)]
1166mod tests {
1167    use super::*;
1168
1169    struct TestProvider;
1170
1171    impl AiProvider for TestProvider {
1172        fn name(&self) -> &'static str {
1173            "test"
1174        }
1175
1176        fn api_url(&self) -> &'static str {
1177            "https://test.example.com"
1178        }
1179
1180        fn api_key_env(&self) -> &'static str {
1181            "TEST_API_KEY"
1182        }
1183
1184        fn http_client(&self) -> &Client {
1185            unimplemented!()
1186        }
1187
1188        fn api_key(&self) -> &SecretString {
1189            unimplemented!()
1190        }
1191
1192        fn model(&self) -> &'static str {
1193            "test-model"
1194        }
1195
1196        fn max_tokens(&self) -> u32 {
1197            2048
1198        }
1199
1200        fn temperature(&self) -> f32 {
1201            0.3
1202        }
1203    }
1204
1205    #[test]
1206    fn test_build_system_prompt_contains_json_schema() {
1207        let prompt = TestProvider::build_system_prompt(None);
1208        assert!(prompt.contains("summary"));
1209        assert!(prompt.contains("suggested_labels"));
1210        assert!(prompt.contains("clarifying_questions"));
1211        assert!(prompt.contains("potential_duplicates"));
1212        assert!(prompt.contains("status_note"));
1213    }
1214
1215    #[test]
1216    fn test_build_user_prompt_with_delimiters() {
1217        let issue = IssueDetails::builder()
1218            .owner("test".to_string())
1219            .repo("repo".to_string())
1220            .number(1)
1221            .title("Test issue".to_string())
1222            .body("This is the body".to_string())
1223            .labels(vec!["bug".to_string()])
1224            .comments(vec![])
1225            .url("https://github.com/test/repo/issues/1".to_string())
1226            .build();
1227
1228        let prompt = TestProvider::build_user_prompt(&issue);
1229        assert!(prompt.starts_with("<issue_content>"));
1230        assert!(prompt.ends_with("</issue_content>"));
1231        assert!(prompt.contains("Title: Test issue"));
1232        assert!(prompt.contains("This is the body"));
1233        assert!(prompt.contains("Existing Labels: bug"));
1234    }
1235
1236    #[test]
1237    fn test_build_user_prompt_truncates_long_body() {
1238        let long_body = "x".repeat(5000);
1239        let issue = IssueDetails::builder()
1240            .owner("test".to_string())
1241            .repo("repo".to_string())
1242            .number(1)
1243            .title("Test".to_string())
1244            .body(long_body)
1245            .labels(vec![])
1246            .comments(vec![])
1247            .url("https://github.com/test/repo/issues/1".to_string())
1248            .build();
1249
1250        let prompt = TestProvider::build_user_prompt(&issue);
1251        assert!(prompt.contains("[Body truncated"));
1252        assert!(prompt.contains("5000 chars"));
1253    }
1254
1255    #[test]
1256    fn test_build_user_prompt_empty_body() {
1257        let issue = IssueDetails::builder()
1258            .owner("test".to_string())
1259            .repo("repo".to_string())
1260            .number(1)
1261            .title("Test".to_string())
1262            .body(String::new())
1263            .labels(vec![])
1264            .comments(vec![])
1265            .url("https://github.com/test/repo/issues/1".to_string())
1266            .build();
1267
1268        let prompt = TestProvider::build_user_prompt(&issue);
1269        assert!(prompt.contains("[No description provided]"));
1270    }
1271
1272    #[test]
1273    fn test_build_create_system_prompt_contains_json_schema() {
1274        let prompt = TestProvider::build_create_system_prompt(None);
1275        assert!(prompt.contains("formatted_title"));
1276        assert!(prompt.contains("formatted_body"));
1277        assert!(prompt.contains("suggested_labels"));
1278    }
1279
1280    #[test]
1281    fn test_build_pr_review_user_prompt_respects_file_limit() {
1282        use super::super::types::{PrDetails, PrFile};
1283
1284        let mut files = Vec::new();
1285        for i in 0..25 {
1286            files.push(PrFile {
1287                filename: format!("file{i}.rs"),
1288                status: "modified".to_string(),
1289                additions: 10,
1290                deletions: 5,
1291                patch: Some(format!("patch content {i}")),
1292            });
1293        }
1294
1295        let pr = PrDetails {
1296            owner: "test".to_string(),
1297            repo: "repo".to_string(),
1298            number: 1,
1299            title: "Test PR".to_string(),
1300            body: "Description".to_string(),
1301            head_branch: "feature".to_string(),
1302            base_branch: "main".to_string(),
1303            url: "https://github.com/test/repo/pull/1".to_string(),
1304            files,
1305            labels: vec![],
1306            head_sha: String::new(),
1307        };
1308
1309        let prompt = TestProvider::build_pr_review_user_prompt(&pr);
1310        assert!(prompt.contains("files omitted due to size limits"));
1311        assert!(prompt.contains("MAX_FILES=20"));
1312    }
1313
1314    #[test]
1315    fn test_build_pr_review_user_prompt_respects_diff_size_limit() {
1316        use super::super::types::{PrDetails, PrFile};
1317
1318        // Create patches that will exceed the limit when combined
1319        // Each patch is ~30KB, so two will exceed 50KB limit
1320        let patch1 = "x".repeat(30_000);
1321        let patch2 = "y".repeat(30_000);
1322
1323        let files = vec![
1324            PrFile {
1325                filename: "file1.rs".to_string(),
1326                status: "modified".to_string(),
1327                additions: 100,
1328                deletions: 50,
1329                patch: Some(patch1),
1330            },
1331            PrFile {
1332                filename: "file2.rs".to_string(),
1333                status: "modified".to_string(),
1334                additions: 100,
1335                deletions: 50,
1336                patch: Some(patch2),
1337            },
1338        ];
1339
1340        let pr = PrDetails {
1341            owner: "test".to_string(),
1342            repo: "repo".to_string(),
1343            number: 1,
1344            title: "Test PR".to_string(),
1345            body: "Description".to_string(),
1346            head_branch: "feature".to_string(),
1347            base_branch: "main".to_string(),
1348            url: "https://github.com/test/repo/pull/1".to_string(),
1349            files,
1350            labels: vec![],
1351            head_sha: String::new(),
1352        };
1353
1354        let prompt = TestProvider::build_pr_review_user_prompt(&pr);
1355        // Both files should be listed
1356        assert!(prompt.contains("file1.rs"));
1357        assert!(prompt.contains("file2.rs"));
1358        // The second patch should be limited - verify the prompt doesn't contain both full patches
1359        // by checking that the total size is less than what two full 30KB patches would be
1360        assert!(prompt.len() < 65_000);
1361    }
1362
1363    #[test]
1364    fn test_build_pr_review_user_prompt_with_no_patches() {
1365        use super::super::types::{PrDetails, PrFile};
1366
1367        let files = vec![PrFile {
1368            filename: "file1.rs".to_string(),
1369            status: "added".to_string(),
1370            additions: 10,
1371            deletions: 0,
1372            patch: None,
1373        }];
1374
1375        let pr = PrDetails {
1376            owner: "test".to_string(),
1377            repo: "repo".to_string(),
1378            number: 1,
1379            title: "Test PR".to_string(),
1380            body: "Description".to_string(),
1381            head_branch: "feature".to_string(),
1382            base_branch: "main".to_string(),
1383            url: "https://github.com/test/repo/pull/1".to_string(),
1384            files,
1385            labels: vec![],
1386            head_sha: String::new(),
1387        };
1388
1389        let prompt = TestProvider::build_pr_review_user_prompt(&pr);
1390        assert!(prompt.contains("file1.rs"));
1391        assert!(prompt.contains("added"));
1392        assert!(!prompt.contains("files omitted"));
1393    }
1394
1395    #[test]
1396    fn test_build_pr_label_system_prompt_contains_json_schema() {
1397        let prompt = TestProvider::build_pr_label_system_prompt(None);
1398        assert!(prompt.contains("suggested_labels"));
1399        assert!(prompt.contains("json_object"));
1400        assert!(prompt.contains("bug"));
1401        assert!(prompt.contains("enhancement"));
1402    }
1403
1404    #[test]
1405    fn test_build_pr_label_user_prompt_with_title_and_body() {
1406        let title = "feat: add new feature";
1407        let body = "This PR adds a new feature";
1408        let files = vec!["src/main.rs".to_string(), "tests/test.rs".to_string()];
1409
1410        let prompt = TestProvider::build_pr_label_user_prompt(title, body, &files);
1411        assert!(prompt.starts_with("<pull_request>"));
1412        assert!(prompt.ends_with("</pull_request>"));
1413        assert!(prompt.contains("feat: add new feature"));
1414        assert!(prompt.contains("This PR adds a new feature"));
1415        assert!(prompt.contains("src/main.rs"));
1416        assert!(prompt.contains("tests/test.rs"));
1417    }
1418
1419    #[test]
1420    fn test_build_pr_label_user_prompt_empty_body() {
1421        let title = "fix: bug fix";
1422        let body = "";
1423        let files = vec!["src/lib.rs".to_string()];
1424
1425        let prompt = TestProvider::build_pr_label_user_prompt(title, body, &files);
1426        assert!(prompt.contains("[No description provided]"));
1427        assert!(prompt.contains("src/lib.rs"));
1428    }
1429
1430    #[test]
1431    fn test_build_pr_label_user_prompt_truncates_long_body() {
1432        let title = "test";
1433        let long_body = "x".repeat(5000);
1434        let files = vec![];
1435
1436        let prompt = TestProvider::build_pr_label_user_prompt(title, &long_body, &files);
1437        assert!(prompt.contains("[Description truncated"));
1438        assert!(prompt.contains("5000 chars"));
1439    }
1440
1441    #[test]
1442    fn test_build_pr_label_user_prompt_respects_file_limit() {
1443        let title = "test";
1444        let body = "test";
1445        let mut files = Vec::new();
1446        for i in 0..25 {
1447            files.push(format!("file{i}.rs"));
1448        }
1449
1450        let prompt = TestProvider::build_pr_label_user_prompt(title, body, &files);
1451        assert!(prompt.contains("file0.rs"));
1452        assert!(prompt.contains("file19.rs"));
1453        assert!(!prompt.contains("file20.rs"));
1454        assert!(prompt.contains("... and 5 more files"));
1455    }
1456
1457    #[test]
1458    fn test_build_pr_label_user_prompt_empty_files() {
1459        let title = "test";
1460        let body = "test";
1461        let files: Vec<String> = vec![];
1462
1463        let prompt = TestProvider::build_pr_label_user_prompt(title, body, &files);
1464        assert!(prompt.contains("Title: test"));
1465        assert!(prompt.contains("Description:\ntest"));
1466        assert!(!prompt.contains("Files Changed:"));
1467    }
1468
1469    #[test]
1470    fn test_parse_ai_json_with_valid_json() {
1471        #[derive(serde::Deserialize)]
1472        struct TestResponse {
1473            message: String,
1474        }
1475
1476        let json = r#"{"message": "hello"}"#;
1477        let result: Result<TestResponse> = parse_ai_json(json, "test-provider");
1478        assert!(result.is_ok());
1479        let response = result.unwrap();
1480        assert_eq!(response.message, "hello");
1481    }
1482
1483    #[test]
1484    fn test_parse_ai_json_with_truncated_json() {
1485        #[derive(Debug, serde::Deserialize)]
1486        struct TestResponse {
1487            message: String,
1488        }
1489
1490        let json = r#"{"message": "hello"#;
1491        let result: Result<TestResponse> = parse_ai_json(json, "test-provider");
1492        assert!(result.is_err());
1493        let err = result.unwrap_err();
1494        assert!(
1495            err.to_string()
1496                .contains("Truncated response from test-provider")
1497        );
1498    }
1499
1500    #[test]
1501    fn test_parse_ai_json_with_malformed_json() {
1502        #[derive(Debug, serde::Deserialize)]
1503        struct TestResponse {
1504            message: String,
1505        }
1506
1507        let json = r#"{"message": invalid}"#;
1508        let result: Result<TestResponse> = parse_ai_json(json, "test-provider");
1509        assert!(result.is_err());
1510        let err = result.unwrap_err();
1511        assert!(err.to_string().contains("Invalid JSON response from AI"));
1512    }
1513
1514    #[test]
1515    fn test_build_system_prompt_has_senior_persona() {
1516        let prompt = TestProvider::build_system_prompt(None);
1517        assert!(
1518            prompt.contains("You are a senior"),
1519            "prompt should have senior persona"
1520        );
1521        assert!(
1522            prompt.contains("Your mission is"),
1523            "prompt should have mission statement"
1524        );
1525    }
1526
1527    #[test]
1528    fn test_build_system_prompt_has_cot_directive() {
1529        let prompt = TestProvider::build_system_prompt(None);
1530        assert!(prompt.contains("Reason through each step before producing output."));
1531    }
1532
1533    #[test]
1534    fn test_build_system_prompt_has_examples_section() {
1535        let prompt = TestProvider::build_system_prompt(None);
1536        assert!(prompt.contains("## Examples"));
1537    }
1538
1539    #[test]
1540    fn test_build_create_system_prompt_has_senior_persona() {
1541        let prompt = TestProvider::build_create_system_prompt(None);
1542        assert!(
1543            prompt.contains("You are a senior"),
1544            "prompt should have senior persona"
1545        );
1546        assert!(
1547            prompt.contains("Your mission is"),
1548            "prompt should have mission statement"
1549        );
1550    }
1551
1552    #[test]
1553    fn test_build_pr_review_system_prompt_has_senior_persona() {
1554        let prompt = TestProvider::build_pr_review_system_prompt(None);
1555        assert!(
1556            prompt.contains("You are a senior"),
1557            "prompt should have senior persona"
1558        );
1559        assert!(
1560            prompt.contains("Your mission is"),
1561            "prompt should have mission statement"
1562        );
1563    }
1564
1565    #[test]
1566    fn test_build_pr_label_system_prompt_has_senior_persona() {
1567        let prompt = TestProvider::build_pr_label_system_prompt(None);
1568        assert!(
1569            prompt.contains("You are a senior"),
1570            "prompt should have senior persona"
1571        );
1572        assert!(
1573            prompt.contains("Your mission is"),
1574            "prompt should have mission statement"
1575        );
1576    }
1577
1578    #[tokio::test]
1579    async fn test_load_system_prompt_override_returns_none_when_absent() {
1580        let result =
1581            super::super::context::load_system_prompt_override("__nonexistent_test_override__")
1582                .await;
1583        assert!(result.is_none());
1584    }
1585
1586    #[tokio::test]
1587    async fn test_load_system_prompt_override_returns_content_when_present() {
1588        use std::io::Write;
1589        let dir = tempfile::tempdir().expect("create tempdir");
1590        let file_path = dir.path().join("test_override.md");
1591        let mut f = std::fs::File::create(&file_path).expect("create file");
1592        writeln!(f, "Custom override content").expect("write file");
1593        drop(f);
1594
1595        let content = tokio::fs::read_to_string(&file_path).await.ok();
1596        assert_eq!(content.as_deref(), Some("Custom override content\n"));
1597    }
1598}