1use std::collections::HashMap;
2use std::path::{Path, PathBuf};
3
4use md5::{Digest, Md5};
5use serde::{Deserialize, Serialize};
6
7#[derive(Debug, Clone, Serialize, Deserialize)]
8pub struct CodeChunk {
9 pub file_path: String,
10 pub symbol_name: String,
11 pub kind: ChunkKind,
12 pub start_line: usize,
13 pub end_line: usize,
14 pub content: String,
15 pub tokens: Vec<String>,
16 pub token_count: usize,
17}
18
19#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
20pub enum ChunkKind {
21 Function,
22 Struct,
23 Impl,
24 Module,
25 Class,
26 Method,
27 Other,
28}
29
30#[derive(Debug, Clone, Serialize, Deserialize)]
31pub struct BM25Index {
32 pub chunks: Vec<CodeChunk>,
33 pub inverted: HashMap<String, Vec<(usize, f64)>>,
34 pub avg_doc_len: f64,
35 pub doc_count: usize,
36 pub doc_freqs: HashMap<String, usize>,
37}
38
39#[derive(Debug, Clone)]
40pub struct SearchResult {
41 pub chunk_idx: usize,
42 pub score: f64,
43 pub file_path: String,
44 pub symbol_name: String,
45 pub kind: ChunkKind,
46 pub start_line: usize,
47 pub end_line: usize,
48 pub snippet: String,
49}
50
51const BM25_K1: f64 = 1.2;
52const BM25_B: f64 = 0.75;
53
54impl Default for BM25Index {
55 fn default() -> Self {
56 Self::new()
57 }
58}
59
60impl BM25Index {
61 pub fn new() -> Self {
62 Self {
63 chunks: Vec::new(),
64 inverted: HashMap::new(),
65 avg_doc_len: 0.0,
66 doc_count: 0,
67 doc_freqs: HashMap::new(),
68 }
69 }
70
71 pub fn build_from_directory(root: &Path) -> Self {
72 let mut index = Self::new();
73 let walker = ignore::WalkBuilder::new(root)
74 .hidden(true)
75 .git_ignore(true)
76 .max_depth(Some(10))
77 .build();
78
79 let mut file_count = 0usize;
80 for entry in walker.flatten() {
81 if file_count >= 2000 {
82 break;
83 }
84 let path = entry.path();
85 if !path.is_file() {
86 continue;
87 }
88 if !is_code_file(path) {
89 continue;
90 }
91 if let Ok(content) = std::fs::read_to_string(path) {
92 let rel = path
93 .strip_prefix(root)
94 .unwrap_or(path)
95 .to_string_lossy()
96 .to_string();
97 let chunks = extract_chunks(&rel, &content);
98 for chunk in chunks {
99 index.add_chunk(chunk);
100 }
101 file_count += 1;
102 }
103 }
104
105 index.finalize();
106 index
107 }
108
109 fn add_chunk(&mut self, chunk: CodeChunk) {
110 let idx = self.chunks.len();
111
112 for token in &chunk.tokens {
113 let lower = token.to_lowercase();
114 self.inverted.entry(lower).or_default().push((idx, 1.0));
115 }
116
117 self.chunks.push(chunk);
118 }
119
120 fn finalize(&mut self) {
121 self.doc_count = self.chunks.len();
122 if self.doc_count == 0 {
123 return;
124 }
125
126 let total_len: usize = self.chunks.iter().map(|c| c.token_count).sum();
127 self.avg_doc_len = total_len as f64 / self.doc_count as f64;
128
129 self.doc_freqs.clear();
130 for (term, postings) in &self.inverted {
131 let unique_docs: std::collections::HashSet<usize> =
132 postings.iter().map(|(idx, _)| *idx).collect();
133 self.doc_freqs.insert(term.clone(), unique_docs.len());
134 }
135 }
136
137 pub fn search(&self, query: &str, top_k: usize) -> Vec<SearchResult> {
138 let query_tokens = tokenize(query);
139 if query_tokens.is_empty() || self.doc_count == 0 {
140 return Vec::new();
141 }
142
143 let mut scores: HashMap<usize, f64> = HashMap::new();
144
145 for token in &query_tokens {
146 let lower = token.to_lowercase();
147 let df = *self.doc_freqs.get(&lower).unwrap_or(&0) as f64;
148 if df == 0.0 {
149 continue;
150 }
151
152 let idf = ((self.doc_count as f64 - df + 0.5) / (df + 0.5) + 1.0).ln();
153
154 if let Some(postings) = self.inverted.get(&lower) {
155 let mut doc_tfs: HashMap<usize, f64> = HashMap::new();
156 for (idx, weight) in postings {
157 *doc_tfs.entry(*idx).or_insert(0.0) += weight;
158 }
159
160 for (doc_idx, tf) in &doc_tfs {
161 let doc_len = self.chunks[*doc_idx].token_count as f64;
162 let norm_len = doc_len / self.avg_doc_len.max(1.0);
163 let bm25 = idf * (tf * (BM25_K1 + 1.0))
164 / (tf + BM25_K1 * (1.0 - BM25_B + BM25_B * norm_len));
165
166 *scores.entry(*doc_idx).or_insert(0.0) += bm25;
167 }
168 }
169 }
170
171 let mut results: Vec<SearchResult> = scores
172 .into_iter()
173 .map(|(idx, score)| {
174 let chunk = &self.chunks[idx];
175 let snippet = chunk.content.lines().take(5).collect::<Vec<_>>().join("\n");
176 SearchResult {
177 chunk_idx: idx,
178 score,
179 file_path: chunk.file_path.clone(),
180 symbol_name: chunk.symbol_name.clone(),
181 kind: chunk.kind.clone(),
182 start_line: chunk.start_line,
183 end_line: chunk.end_line,
184 snippet,
185 }
186 })
187 .collect();
188
189 results.sort_by(|a, b| {
190 b.score
191 .partial_cmp(&a.score)
192 .unwrap_or(std::cmp::Ordering::Equal)
193 });
194 results.truncate(top_k);
195 results
196 }
197
198 pub fn save(&self, root: &Path) -> std::io::Result<()> {
199 let dir = index_dir(root);
200 std::fs::create_dir_all(&dir)?;
201 let data = serde_json::to_string(self).map_err(std::io::Error::other)?;
202 std::fs::write(dir.join("bm25_index.json"), data)?;
203 Ok(())
204 }
205
206 pub fn load(root: &Path) -> Option<Self> {
207 let path = index_dir(root).join("bm25_index.json");
208 let data = std::fs::read_to_string(path).ok()?;
209 serde_json::from_str(&data).ok()
210 }
211
212 pub fn load_or_build(root: &Path) -> Self {
213 Self::load(root).unwrap_or_else(|| {
214 let built = Self::build_from_directory(root);
215 let _ = built.save(root);
216 built
217 })
218 }
219}
220
221fn index_dir(root: &Path) -> PathBuf {
222 let mut hasher = Md5::new();
223 hasher.update(root.to_string_lossy().as_bytes());
224 let hash = format!("{:x}", hasher.finalize());
225 dirs::home_dir()
226 .unwrap_or_else(|| PathBuf::from("."))
227 .join(".lean-ctx")
228 .join("vectors")
229 .join(hash)
230}
231
232fn is_code_file(path: &Path) -> bool {
233 let ext = path.extension().and_then(|e| e.to_str()).unwrap_or("");
234 matches!(
235 ext,
236 "rs" | "ts"
237 | "tsx"
238 | "js"
239 | "jsx"
240 | "py"
241 | "go"
242 | "java"
243 | "c"
244 | "cpp"
245 | "h"
246 | "hpp"
247 | "rb"
248 | "cs"
249 | "kt"
250 | "swift"
251 | "php"
252 | "scala"
253 | "ex"
254 | "exs"
255 | "zig"
256 | "lua"
257 | "dart"
258 | "vue"
259 | "svelte"
260 )
261}
262
263fn tokenize(text: &str) -> Vec<String> {
264 let mut tokens = Vec::new();
265 let mut current = String::new();
266
267 for ch in text.chars() {
268 if ch.is_alphanumeric() || ch == '_' {
269 current.push(ch);
270 } else {
271 if current.len() >= 2 {
272 tokens.push(current.clone());
273 }
274 current.clear();
275 }
276 }
277 if current.len() >= 2 {
278 tokens.push(current);
279 }
280
281 split_camel_case_tokens(&tokens)
282}
283
284fn split_camel_case_tokens(tokens: &[String]) -> Vec<String> {
285 let mut result = Vec::new();
286 for token in tokens {
287 result.push(token.clone());
288 let mut start = 0;
289 let chars: Vec<char> = token.chars().collect();
290 for i in 1..chars.len() {
291 if chars[i].is_uppercase() && (i + 1 >= chars.len() || !chars[i + 1].is_uppercase()) {
292 let part: String = chars[start..i].iter().collect();
293 if part.len() >= 2 {
294 result.push(part);
295 }
296 start = i;
297 }
298 }
299 if start > 0 {
300 let part: String = chars[start..].iter().collect();
301 if part.len() >= 2 {
302 result.push(part);
303 }
304 }
305 }
306 result
307}
308
309fn extract_chunks(file_path: &str, content: &str) -> Vec<CodeChunk> {
310 let lines: Vec<&str> = content.lines().collect();
311 if lines.is_empty() {
312 return Vec::new();
313 }
314
315 let mut chunks = Vec::new();
316 let mut i = 0;
317
318 while i < lines.len() {
319 let trimmed = lines[i].trim();
320
321 if let Some((name, kind)) = detect_symbol(trimmed) {
322 let start = i;
323 let end = find_block_end(&lines, i);
324 let block: String = lines[start..=end.min(lines.len() - 1)].to_vec().join("\n");
325 let tokens = tokenize(&block);
326 let token_count = tokens.len();
327
328 chunks.push(CodeChunk {
329 file_path: file_path.to_string(),
330 symbol_name: name,
331 kind,
332 start_line: start + 1,
333 end_line: end + 1,
334 content: block,
335 tokens,
336 token_count,
337 });
338
339 i = end + 1;
340 } else {
341 i += 1;
342 }
343 }
344
345 if chunks.is_empty() && !content.is_empty() {
346 let tokens = tokenize(content);
347 let token_count = tokens.len();
348 let snippet = lines
349 .iter()
350 .take(50)
351 .copied()
352 .collect::<Vec<_>>()
353 .join("\n");
354 chunks.push(CodeChunk {
355 file_path: file_path.to_string(),
356 symbol_name: file_path.to_string(),
357 kind: ChunkKind::Module,
358 start_line: 1,
359 end_line: lines.len(),
360 content: snippet,
361 tokens,
362 token_count,
363 });
364 }
365
366 chunks
367}
368
369fn detect_symbol(line: &str) -> Option<(String, ChunkKind)> {
370 let trimmed = line.trim();
371
372 let patterns: &[(&str, ChunkKind)] = &[
373 ("pub async fn ", ChunkKind::Function),
374 ("async fn ", ChunkKind::Function),
375 ("pub fn ", ChunkKind::Function),
376 ("fn ", ChunkKind::Function),
377 ("pub struct ", ChunkKind::Struct),
378 ("struct ", ChunkKind::Struct),
379 ("pub enum ", ChunkKind::Struct),
380 ("enum ", ChunkKind::Struct),
381 ("impl ", ChunkKind::Impl),
382 ("pub trait ", ChunkKind::Struct),
383 ("trait ", ChunkKind::Struct),
384 ("export function ", ChunkKind::Function),
385 ("export async function ", ChunkKind::Function),
386 ("export default function ", ChunkKind::Function),
387 ("function ", ChunkKind::Function),
388 ("async function ", ChunkKind::Function),
389 ("export class ", ChunkKind::Class),
390 ("class ", ChunkKind::Class),
391 ("export interface ", ChunkKind::Struct),
392 ("interface ", ChunkKind::Struct),
393 ("def ", ChunkKind::Function),
394 ("async def ", ChunkKind::Function),
395 ("class ", ChunkKind::Class),
396 ("func ", ChunkKind::Function),
397 ];
398
399 for (prefix, kind) in patterns {
400 if let Some(rest) = trimmed.strip_prefix(prefix) {
401 let name: String = rest
402 .chars()
403 .take_while(|c| c.is_alphanumeric() || *c == '_' || *c == '<')
404 .take_while(|c| *c != '<')
405 .collect();
406 if !name.is_empty() {
407 return Some((name, kind.clone()));
408 }
409 }
410 }
411
412 None
413}
414
415fn find_block_end(lines: &[&str], start: usize) -> usize {
416 let mut depth = 0i32;
417 let mut found_open = false;
418
419 for (i, line) in lines.iter().enumerate().skip(start) {
420 for ch in line.chars() {
421 match ch {
422 '{' | '(' if !found_open || depth > 0 => {
423 depth += 1;
424 found_open = true;
425 }
426 '}' | ')' if depth > 0 => {
427 depth -= 1;
428 if depth == 0 && found_open {
429 return i;
430 }
431 }
432 _ => {}
433 }
434 }
435
436 if found_open && depth <= 0 && i > start {
437 return i;
438 }
439
440 if !found_open && i > start + 2 {
441 let trimmed = lines[i].trim();
442 if trimmed.is_empty()
443 || (!trimmed.starts_with(' ') && !trimmed.starts_with('\t') && i > start)
444 {
445 return i.saturating_sub(1);
446 }
447 }
448 }
449
450 (start + 50).min(lines.len().saturating_sub(1))
451}
452
453pub fn format_search_results(results: &[SearchResult], compact: bool) -> String {
454 if results.is_empty() {
455 return "No results found.".to_string();
456 }
457
458 let mut out = String::new();
459 for (i, r) in results.iter().enumerate() {
460 if compact {
461 out.push_str(&format!(
462 "{}. {:.2} {}:{}-{} {:?} {}\n",
463 i + 1,
464 r.score,
465 r.file_path,
466 r.start_line,
467 r.end_line,
468 r.kind,
469 r.symbol_name,
470 ));
471 } else {
472 out.push_str(&format!(
473 "\n--- Result {} (score: {:.2}) ---\n{} :: {} [{:?}] (L{}-{})\n{}\n",
474 i + 1,
475 r.score,
476 r.file_path,
477 r.symbol_name,
478 r.kind,
479 r.start_line,
480 r.end_line,
481 r.snippet,
482 ));
483 }
484 }
485 out
486}
487
488#[cfg(test)]
489mod tests {
490 use super::*;
491
492 #[test]
493 fn tokenize_splits_code() {
494 let tokens = tokenize("fn calculate_total(items: Vec<Item>) -> f64");
495 assert!(tokens.contains(&"calculate_total".to_string()));
496 assert!(tokens.contains(&"items".to_string()));
497 assert!(tokens.contains(&"Vec".to_string()));
498 }
499
500 #[test]
501 fn camel_case_splitting() {
502 let tokens = split_camel_case_tokens(&["calculateTotal".to_string()]);
503 assert!(tokens.contains(&"calculateTotal".to_string()));
504 assert!(tokens.contains(&"calculate".to_string()));
505 assert!(tokens.contains(&"Total".to_string()));
506 }
507
508 #[test]
509 fn detect_rust_function() {
510 let (name, kind) =
511 detect_symbol("pub fn process_request(req: Request) -> Response {").unwrap();
512 assert_eq!(name, "process_request");
513 assert_eq!(kind, ChunkKind::Function);
514 }
515
516 #[test]
517 fn bm25_search_finds_relevant() {
518 let mut index = BM25Index::new();
519 index.add_chunk(CodeChunk {
520 file_path: "auth.rs".into(),
521 symbol_name: "validate_token".into(),
522 kind: ChunkKind::Function,
523 start_line: 1,
524 end_line: 10,
525 content: "fn validate_token(token: &str) -> bool { check_jwt_expiry(token) }".into(),
526 tokens: tokenize("fn validate_token token str bool check_jwt_expiry token"),
527 token_count: 8,
528 });
529 index.add_chunk(CodeChunk {
530 file_path: "db.rs".into(),
531 symbol_name: "connect_database".into(),
532 kind: ChunkKind::Function,
533 start_line: 1,
534 end_line: 5,
535 content: "fn connect_database(url: &str) -> Pool { create_pool(url) }".into(),
536 tokens: tokenize("fn connect_database url str Pool create_pool url"),
537 token_count: 7,
538 });
539 index.finalize();
540
541 let results = index.search("jwt token validation", 5);
542 assert!(!results.is_empty());
543 assert_eq!(results[0].symbol_name, "validate_token");
544 }
545}