async fn handle_special_modes(context: &RefactorContext) -> Result<Option<()>> {
match &context.config.mode {
RefactorMode::SingleFile(file_path) => {
handle_single_file_refactor(
file_path.clone(),
context.config.output.format,
context.config.output.dry_run,
context.config.output.max_iterations,
)
.await?;
Ok(Some(()))
}
RefactorMode::BugReport(bug_path) => {
if bug_path.extension().and_then(|s| s.to_str()) == Some("md") {
handle_single_file_refactor(
bug_path.clone(),
context.config.output.format,
context.config.output.dry_run,
context.config.output.max_iterations,
)
.await?;
Ok(Some(()))
} else {
Ok(None) }
}
RefactorMode::GitHubIssue(url) => {
process_github_issue(url, context).await?;
Ok(Some(()))
}
RefactorMode::ProjectWide => Ok(None), }
}
async fn process_github_issue(url: &str, context: &RefactorContext) -> Result<()> {
eprintln!("🔗 GitHub issue mode: {url}");
let parsed_url = parse_github_issue_url(url)?;
eprintln!(
"📋 Processing issue #{} from {}/{}",
parsed_url.issue_number, parsed_url.owner, parsed_url.repo
);
let issue_content = fetch_github_issue_content(&parsed_url).await?;
eprintln!("📄 Issue title: {}", issue_content.title);
let target_files =
extract_target_files_from_issue(&issue_content, &context.config.project_path)?;
eprintln!("🎯 Target files identified: {}", target_files.len());
for file in target_files {
eprintln!("🔍 Analyzing file: {}", file.display());
handle_single_file_refactor(
file,
context.config.output.format,
context.config.output.dry_run,
context.config.output.max_iterations,
)
.await?;
}
Ok(())
}
fn parse_github_issue_url(url: &str) -> Result<GitHubIssueRef> {
let url_parts: Vec<&str> = url.split('/').collect();
if url_parts.len() < 7 || url_parts[2] != "github.com" || url_parts[5] != "issues" {
return Err(anyhow::anyhow!("Invalid GitHub issue URL format. Expected: https://github.com/owner/repo/issues/number"));
}
let owner = url_parts[3].to_string();
let repo = url_parts[4].to_string();
let issue_number = url_parts[6]
.parse::<u64>()
.context("Issue number must be a valid integer")?;
Ok(GitHubIssueRef {
owner,
repo,
issue_number,
})
}
#[cfg(feature = "http-client")]
async fn fetch_github_issue_content(issue_ref: &GitHubIssueRef) -> Result<GitHubIssueContent> {
use crate::services::github_integration::GitHubClient;
let client = GitHubClient::new()?;
let issue_url = format!(
"https://github.com/{}/{}/issues/{}",
issue_ref.owner, issue_ref.repo, issue_ref.issue_number
);
let issue = client
.fetch_issue(&issue_url)
.await
.context("Failed to fetch GitHub issue")?;
Ok(GitHubIssueContent {
title: issue.title.clone(),
body: issue.body.unwrap_or_default(),
number: issue_ref.issue_number,
})
}
#[cfg(not(feature = "http-client"))]
async fn fetch_github_issue_content(_issue_ref: &GitHubIssueRef) -> Result<GitHubIssueContent> {
anyhow::bail!("GitHub issue fetching requires the http-client feature")
}
fn extract_target_files_from_issue(
issue_content: &GitHubIssueContent,
project_path: &Path,
) -> Result<Vec<PathBuf>> {
let mut target_files = Vec::new();
let file_patterns = [
r"src/[a-zA-Z0-9_/]+\.rs", r"[a-zA-Z0-9_/]+\.rs", r"`[^`]+\.rs`", r"server/src/[a-zA-Z0-9_/]+\.rs", ];
let full_content = format!("{}\n{}", issue_content.title, issue_content.body);
for pattern in &file_patterns {
let re = regex::Regex::new(pattern).context(format!("Invalid regex pattern: {pattern}"))?;
for capture in re.find_iter(&full_content) {
let file_path_str = capture.as_str().trim_matches('`');
let full_path = if file_path_str.starts_with('/') {
PathBuf::from(file_path_str)
} else {
project_path.join(file_path_str)
};
if !target_files.contains(&full_path) {
target_files.push(full_path);
}
}
}
if target_files.is_empty() {
eprintln!("⚠️ No specific files mentioned in issue, analyzing main source files");
let main_candidates = [
project_path.join("src/main.rs"),
project_path.join("src/lib.rs"),
project_path.join("server/src/main.rs"),
project_path.join("server/src/lib.rs"),
];
for candidate in &main_candidates {
if candidate.exists() {
target_files.push(candidate.clone());
}
}
}
Ok(target_files)
}