1use anyhow::{anyhow, bail, Result};
17use memvid_core::{Reranker, RerankerConfig, RerankerDocument, RerankerResult};
18use reqwest::blocking::Client;
19use serde::{Deserialize, Serialize};
20use std::sync::atomic::{AtomicBool, Ordering};
21use std::time::Duration;
22use tracing::{debug, info, warn};
23
24const OPENAI_CHAT_URL: &str = "https://api.openai.com/v1/chat/completions";
26
27const DEFAULT_RERANK_MODEL: &str = "gpt-4o-mini";
29
30const REQUEST_TIMEOUT: Duration = Duration::from_secs(30);
32
33const MAX_DOCS_PER_PROMPT: usize = 20;
35
36#[derive(Debug, Serialize)]
38struct ChatRequest<'a> {
39 model: &'a str,
40 messages: Vec<ChatMessage<'a>>,
41 temperature: f32,
42 max_tokens: usize,
43}
44
45#[derive(Debug, Serialize)]
46struct ChatMessage<'a> {
47 role: &'a str,
48 content: &'a str,
49}
50
51#[derive(Debug, Deserialize)]
53struct ChatResponse {
54 choices: Vec<ChatChoice>,
55 usage: ChatUsage,
56}
57
58#[derive(Debug, Deserialize)]
59struct ChatChoice {
60 message: ChatMessageResponse,
61}
62
63#[derive(Debug, Deserialize)]
64struct ChatMessageResponse {
65 content: String,
66}
67
68#[derive(Debug, Deserialize)]
69struct ChatUsage {
70 #[allow(dead_code)]
71 prompt_tokens: usize,
72 #[allow(dead_code)]
73 completion_tokens: usize,
74 total_tokens: usize,
75}
76
77#[derive(Debug, Deserialize)]
79struct OpenAIErrorResponse {
80 error: OpenAIError,
81}
82
83#[derive(Debug, Deserialize)]
84struct OpenAIError {
85 message: String,
86 #[serde(rename = "type")]
87 error_type: String,
88}
89
90#[derive(Debug, Deserialize)]
92struct RelevanceScore {
93 id: u64,
94 score: f32,
95}
96
97#[derive(Clone)]
101pub struct OpenAIReranker {
102 api_key: String,
103 model: String,
104 config: RerankerConfig,
105 client: Client,
106 ready: std::sync::Arc<AtomicBool>,
107}
108
109impl std::fmt::Debug for OpenAIReranker {
110 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
111 f.debug_struct("OpenAIReranker")
112 .field("model", &self.model)
113 .field("max_candidates", &self.config.max_candidates)
114 .field("ready", &self.ready.load(Ordering::Relaxed))
115 .finish()
116 }
117}
118
119impl OpenAIReranker {
120 pub fn new(api_key: String, model: Option<String>, config: RerankerConfig) -> Result<Self> {
127 if api_key.is_empty() {
128 bail!("OpenAI API key cannot be empty");
129 }
130
131 let client = crate::http::blocking_client(REQUEST_TIMEOUT)
132 .map_err(|e| anyhow!("Failed to create HTTP client: {}", e))?;
133
134 Ok(Self {
135 api_key,
136 model: model.unwrap_or_else(|| DEFAULT_RERANK_MODEL.to_string()),
137 config,
138 client,
139 ready: std::sync::Arc::new(AtomicBool::new(false)),
140 })
141 }
142
143 pub fn from_env() -> Result<Self> {
145 let api_key = std::env::var("OPENAI_API_KEY")
146 .map_err(|_| anyhow!("OPENAI_API_KEY environment variable not set"))?;
147
148 let model = std::env::var("OPENAI_RERANK_MODEL").ok();
149
150 Self::new(api_key, model, RerankerConfig::default())
151 }
152
153 pub fn high_precision(api_key: String) -> Result<Self> {
155 Self::new(
156 api_key,
157 Some("gpt-4o".to_string()),
158 RerankerConfig::high_precision(),
159 )
160 }
161
162 pub fn high_recall(api_key: String) -> Result<Self> {
164 Self::new(api_key, None, RerankerConfig::high_recall())
165 }
166
167 fn build_prompt(&self, query: &str, documents: &[&RerankerDocument]) -> String {
169 let mut prompt = format!(
170 r#"You are a relevance scoring assistant. Given a query and a list of documents, score each document's relevance to the query on a scale of 0.0 to 1.0.
171
172Query: "{}"
173
174Documents:
175"#,
176 query
177 );
178
179 for (idx, doc) in documents.iter().enumerate() {
180 let preview = if doc.text.len() > 500 {
181 format!("{}...", &doc.text[..500])
182 } else {
183 doc.text.clone()
184 };
185 prompt.push_str(&format!("\n[{}] ID={}: {}\n", idx + 1, doc.id, preview));
186 }
187
188 prompt.push_str(
189 r#"
190Return a JSON array of objects with "id" and "score" fields for each document.
191Score based on semantic relevance, not just keyword matching.
192Consider:
193- Direct answers to the query
194- Related context that helps answer the query
195- Factual relevance even if wording differs
196
197Output format (JSON only, no explanation):
198[{"id": 123, "score": 0.95}, {"id": 456, "score": 0.72}, ...]
199"#,
200 );
201
202 prompt
203 }
204
205 fn parse_scores(&self, response: &str) -> Result<Vec<RelevanceScore>> {
207 let json_start = response
209 .find('[')
210 .ok_or_else(|| anyhow!("No JSON array found"))?;
211 let json_end = response
212 .rfind(']')
213 .ok_or_else(|| anyhow!("No JSON array end found"))?;
214
215 let json_str = &response[json_start..=json_end];
216 let scores: Vec<RelevanceScore> = serde_json::from_str(json_str)
217 .map_err(|e| anyhow!("Failed to parse scores: {} from: {}", e, json_str))?;
218
219 Ok(scores)
220 }
221
222 fn call_openai(&self, prompt: &str) -> Result<String> {
224 let messages = vec![
225 ChatMessage {
226 role: "system",
227 content: "You are a document relevance scoring assistant. Output only valid JSON.",
228 },
229 ChatMessage {
230 role: "user",
231 content: prompt,
232 },
233 ];
234
235 let request = ChatRequest {
236 model: &self.model,
237 messages,
238 temperature: 0.0,
239 max_tokens: 1024,
240 };
241
242 let response = self
243 .client
244 .post(OPENAI_CHAT_URL)
245 .header("Authorization", format!("Bearer {}", self.api_key))
246 .header("Content-Type", "application/json")
247 .json(&request)
248 .send()
249 .map_err(|e| anyhow!("OpenAI API request failed: {}", e))?;
250
251 let status = response.status();
252 let body = response
253 .text()
254 .map_err(|e| anyhow!("Failed to read response body: {}", e))?;
255
256 if !status.is_success() {
257 if let Ok(error_response) = serde_json::from_str::<OpenAIErrorResponse>(&body) {
258 bail!(
259 "OpenAI API error ({}): {}",
260 error_response.error.error_type,
261 error_response.error.message
262 );
263 }
264 bail!("OpenAI API request failed with status {}: {}", status, body);
265 }
266
267 let chat_response: ChatResponse = serde_json::from_str(&body)
268 .map_err(|e| anyhow!("Failed to parse OpenAI response: {}", e))?;
269
270 let content = chat_response
271 .choices
272 .first()
273 .map(|c| c.message.content.clone())
274 .ok_or_else(|| anyhow!("No response content"))?;
275
276 debug!(
277 "OpenAI rerank: {} tokens used, model={}",
278 chat_response.usage.total_tokens, self.model
279 );
280
281 Ok(content)
282 }
283
284 fn rerank_with_retry(
286 &self,
287 query: &str,
288 documents: &[&RerankerDocument],
289 max_retries: usize,
290 ) -> Result<Vec<RelevanceScore>> {
291 let prompt = self.build_prompt(query, documents);
292 let mut last_error = None;
293
294 for attempt in 0..max_retries {
295 match self.call_openai(&prompt) {
296 Ok(response) => match self.parse_scores(&response) {
297 Ok(scores) => return Ok(scores),
298 Err(e) => {
299 warn!("Failed to parse scores (attempt {}): {}", attempt + 1, e);
300 last_error = Some(e);
301 }
302 },
303 Err(e) => {
304 let error_str = e.to_string();
305 if error_str.contains("rate_limit") || error_str.contains("429") {
306 let backoff = Duration::from_millis(1000 * (1 << attempt));
307 warn!(
308 "Rate limited, retrying in {:?} (attempt {}/{})",
309 backoff,
310 attempt + 1,
311 max_retries
312 );
313 std::thread::sleep(backoff);
314 last_error = Some(e);
315 continue;
316 }
317 return Err(e);
318 }
319 }
320 }
321
322 Err(last_error.unwrap_or_else(|| anyhow!("Failed after {} retries", max_retries)))
323 }
324}
325
326impl Reranker for OpenAIReranker {
327 fn kind(&self) -> &'static str {
328 "openai"
329 }
330
331 fn rerank(
332 &self,
333 query: &str,
334 documents: &[RerankerDocument],
335 top_k: usize,
336 ) -> memvid_core::Result<Vec<RerankerResult>> {
337 if documents.is_empty() {
338 return Ok(Vec::new());
339 }
340
341 let max_candidates = self.config.max_candidates.min(documents.len());
343 let candidates: Vec<&RerankerDocument> = documents.iter().take(max_candidates).collect();
344
345 let mut all_scores: Vec<RelevanceScore> = Vec::new();
347
348 for chunk in candidates.chunks(MAX_DOCS_PER_PROMPT) {
349 let scores = self.rerank_with_retry(query, chunk, 3).map_err(|e| {
350 memvid_core::MemvidError::RerankFailed {
351 reason: e.to_string().into_boxed_str(),
352 }
353 })?;
354 all_scores.extend(scores);
355 }
356
357 let mut results: Vec<RerankerResult> = all_scores
359 .into_iter()
360 .filter_map(|score| {
361 let original_rank = documents.iter().position(|d| d.id == score.id)?;
362 if score.score < self.config.min_score {
363 return None;
364 }
365 Some(RerankerResult {
366 id: score.id,
367 score: score.score,
368 original_rank: original_rank + 1,
369 new_rank: 0, })
371 })
372 .collect();
373
374 results.sort_by(|a, b| {
376 b.score
377 .partial_cmp(&a.score)
378 .unwrap_or(std::cmp::Ordering::Equal)
379 });
380
381 let top_k = top_k.min(self.config.top_k);
383 for (idx, result) in results.iter_mut().enumerate() {
384 result.new_rank = idx + 1;
385 }
386
387 Ok(results.into_iter().take(top_k).collect())
388 }
389
390 fn is_ready(&self) -> bool {
391 self.ready.load(Ordering::Relaxed)
392 }
393
394 fn init(&mut self) -> memvid_core::Result<()> {
395 info!("Initializing OpenAI reranker with model: {}", self.model);
396
397 let test_docs = vec![RerankerDocument::new(0, "Test document")];
399 let _ = self
400 .rerank_with_retry("test query", &[&test_docs[0]], 1)
401 .map_err(|e| memvid_core::MemvidError::RerankFailed {
402 reason: format!("Failed to initialize reranker: {}", e).into_boxed_str(),
403 })?;
404
405 info!("OpenAI reranker initialized successfully");
406 self.ready.store(true, Ordering::Relaxed);
407 Ok(())
408 }
409}
410
411pub fn try_openai_reranker() -> Option<OpenAIReranker> {
413 match OpenAIReranker::from_env() {
414 Ok(reranker) => {
415 info!("OpenAI reranker available");
416 Some(reranker)
417 }
418 Err(e) => {
419 debug!("OpenAI reranker not available: {}", e);
420 None
421 }
422 }
423}
424
425#[cfg(test)]
426mod tests {
427 use super::*;
428
429 #[test]
430 fn test_empty_api_key() {
431 let result = OpenAIReranker::new(String::new(), None, RerankerConfig::default());
432 assert!(result.is_err());
433 }
434
435 #[test]
436 fn test_build_prompt() {
437 let reranker =
438 OpenAIReranker::new("test-key".to_string(), None, RerankerConfig::default()).unwrap();
439
440 let docs = vec![
441 RerankerDocument::new(1, "First document about Rust"),
442 RerankerDocument::new(2, "Second document about Python"),
443 ];
444
445 let doc_refs: Vec<&RerankerDocument> = docs.iter().collect();
446 let prompt = reranker.build_prompt("What is Rust?", &doc_refs);
447
448 assert!(prompt.contains("What is Rust?"));
449 assert!(prompt.contains("ID=1"));
450 assert!(prompt.contains("ID=2"));
451 assert!(prompt.contains("First document"));
452 assert!(prompt.contains("Second document"));
453 }
454
455 #[test]
456 fn test_parse_scores() {
457 let reranker =
458 OpenAIReranker::new("test-key".to_string(), None, RerankerConfig::default()).unwrap();
459
460 let response = r#"Here are the scores:
461[{"id": 1, "score": 0.95}, {"id": 2, "score": 0.42}]"#;
462
463 let scores = reranker.parse_scores(response).unwrap();
464 assert_eq!(scores.len(), 2);
465 assert_eq!(scores[0].id, 1);
466 assert!((scores[0].score - 0.95).abs() < 0.01);
467 assert_eq!(scores[1].id, 2);
468 assert!((scores[1].score - 0.42).abs() < 0.01);
469 }
470
471 #[test]
472 #[ignore] fn test_real_rerank() {
474 let reranker = OpenAIReranker::from_env().expect("OPENAI_API_KEY must be set");
475
476 let docs = vec![
477 RerankerDocument::new(
478 1,
479 "Rust is a systems programming language focused on safety.",
480 ),
481 RerankerDocument::new(2, "Python is great for data science and machine learning."),
482 RerankerDocument::new(3, "Rust provides memory safety without garbage collection."),
483 ];
484
485 let results = reranker.rerank("What makes Rust safe?", &docs, 2).unwrap();
486 assert!(!results.is_empty());
487 assert!(results[0].id == 1 || results[0].id == 3);
489 }
490}