1use std::collections::HashMap;
2use std::path::{Path, PathBuf};
3
4use md5::{Digest, Md5};
5use serde::{Deserialize, Serialize};
6
7#[derive(Debug, Clone, Serialize, Deserialize)]
8pub struct CodeChunk {
9 pub file_path: String,
10 pub symbol_name: String,
11 pub kind: ChunkKind,
12 pub start_line: usize,
13 pub end_line: usize,
14 pub content: String,
15 pub tokens: Vec<String>,
16 pub token_count: usize,
17}
18
19#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
20pub enum ChunkKind {
21 Function,
22 Struct,
23 Impl,
24 Module,
25 Class,
26 Method,
27 Other,
28}
29
30#[derive(Debug, Clone, Serialize, Deserialize)]
31pub struct BM25Index {
32 pub chunks: Vec<CodeChunk>,
33 pub inverted: HashMap<String, Vec<(usize, f64)>>,
34 pub avg_doc_len: f64,
35 pub doc_count: usize,
36 pub doc_freqs: HashMap<String, usize>,
37}
38
39#[derive(Debug, Clone)]
40pub struct SearchResult {
41 pub chunk_idx: usize,
42 pub score: f64,
43 pub file_path: String,
44 pub symbol_name: String,
45 pub kind: ChunkKind,
46 pub start_line: usize,
47 pub end_line: usize,
48 pub snippet: String,
49}
50
51const BM25_K1: f64 = 1.2;
52const BM25_B: f64 = 0.75;
53
54impl Default for BM25Index {
55 fn default() -> Self {
56 Self::new()
57 }
58}
59
60impl BM25Index {
61 pub fn new() -> Self {
62 Self {
63 chunks: Vec::new(),
64 inverted: HashMap::new(),
65 avg_doc_len: 0.0,
66 doc_count: 0,
67 doc_freqs: HashMap::new(),
68 }
69 }
70
71 pub fn build_from_directory(root: &Path) -> Self {
72 let mut index = Self::new();
73 let walker = ignore::WalkBuilder::new(root)
74 .hidden(true)
75 .git_ignore(true)
76 .max_depth(Some(10))
77 .build();
78
79 let mut file_count = 0usize;
80 for entry in walker.flatten() {
81 if file_count >= 2000 {
82 break;
83 }
84 let path = entry.path();
85 if !path.is_file() {
86 continue;
87 }
88 if !is_code_file(path) {
89 continue;
90 }
91 if let Ok(content) = std::fs::read_to_string(path) {
92 let rel = path
93 .strip_prefix(root)
94 .unwrap_or(path)
95 .to_string_lossy()
96 .to_string();
97 let chunks = extract_chunks(&rel, &content);
98 for chunk in chunks {
99 index.add_chunk(chunk);
100 }
101 file_count += 1;
102 }
103 }
104
105 index.finalize();
106 index
107 }
108
109 fn add_chunk(&mut self, chunk: CodeChunk) {
110 let idx = self.chunks.len();
111
112 for token in &chunk.tokens {
113 let lower = token.to_lowercase();
114 self.inverted.entry(lower).or_default().push((idx, 1.0));
115 }
116
117 self.chunks.push(chunk);
118 }
119
120 fn finalize(&mut self) {
121 self.doc_count = self.chunks.len();
122 if self.doc_count == 0 {
123 return;
124 }
125
126 let total_len: usize = self.chunks.iter().map(|c| c.token_count).sum();
127 self.avg_doc_len = total_len as f64 / self.doc_count as f64;
128
129 self.doc_freqs.clear();
130 for (term, postings) in &self.inverted {
131 let unique_docs: std::collections::HashSet<usize> =
132 postings.iter().map(|(idx, _)| *idx).collect();
133 self.doc_freqs.insert(term.clone(), unique_docs.len());
134 }
135 }
136
137 pub fn search(&self, query: &str, top_k: usize) -> Vec<SearchResult> {
138 let query_tokens = tokenize(query);
139 if query_tokens.is_empty() || self.doc_count == 0 {
140 return Vec::new();
141 }
142
143 let mut scores: HashMap<usize, f64> = HashMap::new();
144
145 for token in &query_tokens {
146 let lower = token.to_lowercase();
147 let df = *self.doc_freqs.get(&lower).unwrap_or(&0) as f64;
148 if df == 0.0 {
149 continue;
150 }
151
152 let idf = ((self.doc_count as f64 - df + 0.5) / (df + 0.5) + 1.0).ln();
153
154 if let Some(postings) = self.inverted.get(&lower) {
155 let mut doc_tfs: HashMap<usize, f64> = HashMap::new();
156 for (idx, weight) in postings {
157 *doc_tfs.entry(*idx).or_insert(0.0) += weight;
158 }
159
160 for (doc_idx, tf) in &doc_tfs {
161 let doc_len = self.chunks[*doc_idx].token_count as f64;
162 let norm_len = doc_len / self.avg_doc_len.max(1.0);
163 let bm25 = idf * (tf * (BM25_K1 + 1.0))
164 / (tf + BM25_K1 * (1.0 - BM25_B + BM25_B * norm_len));
165
166 *scores.entry(*doc_idx).or_insert(0.0) += bm25;
167 }
168 }
169 }
170
171 let mut results: Vec<SearchResult> = scores
172 .into_iter()
173 .map(|(idx, score)| {
174 let chunk = &self.chunks[idx];
175 let snippet = chunk.content.lines().take(5).collect::<Vec<_>>().join("\n");
176 SearchResult {
177 chunk_idx: idx,
178 score,
179 file_path: chunk.file_path.clone(),
180 symbol_name: chunk.symbol_name.clone(),
181 kind: chunk.kind.clone(),
182 start_line: chunk.start_line,
183 end_line: chunk.end_line,
184 snippet,
185 }
186 })
187 .collect();
188
189 results.sort_by(|a, b| {
190 b.score
191 .partial_cmp(&a.score)
192 .unwrap_or(std::cmp::Ordering::Equal)
193 });
194 results.truncate(top_k);
195 results
196 }
197
198 pub fn save(&self, root: &Path) -> std::io::Result<()> {
199 let dir = index_dir(root);
200 std::fs::create_dir_all(&dir)?;
201 let data = serde_json::to_string(self).map_err(std::io::Error::other)?;
202 std::fs::write(dir.join("bm25_index.json"), data)?;
203 Ok(())
204 }
205
206 pub fn load(root: &Path) -> Option<Self> {
207 let path = index_dir(root).join("bm25_index.json");
208 let data = std::fs::read_to_string(path).ok()?;
209 serde_json::from_str(&data).ok()
210 }
211}
212
213fn index_dir(root: &Path) -> PathBuf {
214 let mut hasher = Md5::new();
215 hasher.update(root.to_string_lossy().as_bytes());
216 let hash = format!("{:x}", hasher.finalize());
217 dirs::home_dir()
218 .unwrap_or_else(|| PathBuf::from("."))
219 .join(".lean-ctx")
220 .join("vectors")
221 .join(hash)
222}
223
224fn is_code_file(path: &Path) -> bool {
225 let ext = path.extension().and_then(|e| e.to_str()).unwrap_or("");
226 matches!(
227 ext,
228 "rs" | "ts"
229 | "tsx"
230 | "js"
231 | "jsx"
232 | "py"
233 | "go"
234 | "java"
235 | "c"
236 | "cpp"
237 | "h"
238 | "hpp"
239 | "rb"
240 | "cs"
241 | "kt"
242 | "swift"
243 | "php"
244 | "scala"
245 | "ex"
246 | "exs"
247 | "zig"
248 | "lua"
249 | "dart"
250 | "vue"
251 | "svelte"
252 )
253}
254
255fn tokenize(text: &str) -> Vec<String> {
256 let mut tokens = Vec::new();
257 let mut current = String::new();
258
259 for ch in text.chars() {
260 if ch.is_alphanumeric() || ch == '_' {
261 current.push(ch);
262 } else {
263 if current.len() >= 2 {
264 tokens.push(current.clone());
265 }
266 current.clear();
267 }
268 }
269 if current.len() >= 2 {
270 tokens.push(current);
271 }
272
273 split_camel_case_tokens(&tokens)
274}
275
276fn split_camel_case_tokens(tokens: &[String]) -> Vec<String> {
277 let mut result = Vec::new();
278 for token in tokens {
279 result.push(token.clone());
280 let mut start = 0;
281 let chars: Vec<char> = token.chars().collect();
282 for i in 1..chars.len() {
283 if chars[i].is_uppercase() && (i + 1 >= chars.len() || !chars[i + 1].is_uppercase()) {
284 let part: String = chars[start..i].iter().collect();
285 if part.len() >= 2 {
286 result.push(part);
287 }
288 start = i;
289 }
290 }
291 if start > 0 {
292 let part: String = chars[start..].iter().collect();
293 if part.len() >= 2 {
294 result.push(part);
295 }
296 }
297 }
298 result
299}
300
301fn extract_chunks(file_path: &str, content: &str) -> Vec<CodeChunk> {
302 let lines: Vec<&str> = content.lines().collect();
303 if lines.is_empty() {
304 return Vec::new();
305 }
306
307 let mut chunks = Vec::new();
308 let mut i = 0;
309
310 while i < lines.len() {
311 let trimmed = lines[i].trim();
312
313 if let Some((name, kind)) = detect_symbol(trimmed) {
314 let start = i;
315 let end = find_block_end(&lines, i);
316 let block: String = lines[start..=end.min(lines.len() - 1)].to_vec().join("\n");
317 let tokens = tokenize(&block);
318 let token_count = tokens.len();
319
320 chunks.push(CodeChunk {
321 file_path: file_path.to_string(),
322 symbol_name: name,
323 kind,
324 start_line: start + 1,
325 end_line: end + 1,
326 content: block,
327 tokens,
328 token_count,
329 });
330
331 i = end + 1;
332 } else {
333 i += 1;
334 }
335 }
336
337 if chunks.is_empty() && !content.is_empty() {
338 let tokens = tokenize(content);
339 let token_count = tokens.len();
340 let snippet = lines
341 .iter()
342 .take(50)
343 .copied()
344 .collect::<Vec<_>>()
345 .join("\n");
346 chunks.push(CodeChunk {
347 file_path: file_path.to_string(),
348 symbol_name: file_path.to_string(),
349 kind: ChunkKind::Module,
350 start_line: 1,
351 end_line: lines.len(),
352 content: snippet,
353 tokens,
354 token_count,
355 });
356 }
357
358 chunks
359}
360
361fn detect_symbol(line: &str) -> Option<(String, ChunkKind)> {
362 let trimmed = line.trim();
363
364 let patterns: &[(&str, ChunkKind)] = &[
365 ("pub async fn ", ChunkKind::Function),
366 ("async fn ", ChunkKind::Function),
367 ("pub fn ", ChunkKind::Function),
368 ("fn ", ChunkKind::Function),
369 ("pub struct ", ChunkKind::Struct),
370 ("struct ", ChunkKind::Struct),
371 ("pub enum ", ChunkKind::Struct),
372 ("enum ", ChunkKind::Struct),
373 ("impl ", ChunkKind::Impl),
374 ("pub trait ", ChunkKind::Struct),
375 ("trait ", ChunkKind::Struct),
376 ("export function ", ChunkKind::Function),
377 ("export async function ", ChunkKind::Function),
378 ("export default function ", ChunkKind::Function),
379 ("function ", ChunkKind::Function),
380 ("async function ", ChunkKind::Function),
381 ("export class ", ChunkKind::Class),
382 ("class ", ChunkKind::Class),
383 ("export interface ", ChunkKind::Struct),
384 ("interface ", ChunkKind::Struct),
385 ("def ", ChunkKind::Function),
386 ("async def ", ChunkKind::Function),
387 ("class ", ChunkKind::Class),
388 ("func ", ChunkKind::Function),
389 ];
390
391 for (prefix, kind) in patterns {
392 if let Some(rest) = trimmed.strip_prefix(prefix) {
393 let name: String = rest
394 .chars()
395 .take_while(|c| c.is_alphanumeric() || *c == '_' || *c == '<')
396 .take_while(|c| *c != '<')
397 .collect();
398 if !name.is_empty() {
399 return Some((name, kind.clone()));
400 }
401 }
402 }
403
404 None
405}
406
407fn find_block_end(lines: &[&str], start: usize) -> usize {
408 let mut depth = 0i32;
409 let mut found_open = false;
410
411 for (i, line) in lines.iter().enumerate().skip(start) {
412 for ch in line.chars() {
413 match ch {
414 '{' | '(' if !found_open || depth > 0 => {
415 depth += 1;
416 found_open = true;
417 }
418 '}' | ')' if depth > 0 => {
419 depth -= 1;
420 if depth == 0 && found_open {
421 return i;
422 }
423 }
424 _ => {}
425 }
426 }
427
428 if found_open && depth <= 0 && i > start {
429 return i;
430 }
431
432 if !found_open && i > start + 2 {
433 let trimmed = lines[i].trim();
434 if trimmed.is_empty()
435 || (!trimmed.starts_with(' ') && !trimmed.starts_with('\t') && i > start)
436 {
437 return i.saturating_sub(1);
438 }
439 }
440 }
441
442 (start + 50).min(lines.len().saturating_sub(1))
443}
444
445pub fn format_search_results(results: &[SearchResult], compact: bool) -> String {
446 if results.is_empty() {
447 return "No results found.".to_string();
448 }
449
450 let mut out = String::new();
451 for (i, r) in results.iter().enumerate() {
452 if compact {
453 out.push_str(&format!(
454 "{}. {:.2} {}:{}-{} {:?} {}\n",
455 i + 1,
456 r.score,
457 r.file_path,
458 r.start_line,
459 r.end_line,
460 r.kind,
461 r.symbol_name,
462 ));
463 } else {
464 out.push_str(&format!(
465 "\n--- Result {} (score: {:.2}) ---\n{} :: {} [{:?}] (L{}-{})\n{}\n",
466 i + 1,
467 r.score,
468 r.file_path,
469 r.symbol_name,
470 r.kind,
471 r.start_line,
472 r.end_line,
473 r.snippet,
474 ));
475 }
476 }
477 out
478}
479
480#[cfg(test)]
481mod tests {
482 use super::*;
483
484 #[test]
485 fn tokenize_splits_code() {
486 let tokens = tokenize("fn calculate_total(items: Vec<Item>) -> f64");
487 assert!(tokens.contains(&"calculate_total".to_string()));
488 assert!(tokens.contains(&"items".to_string()));
489 assert!(tokens.contains(&"Vec".to_string()));
490 }
491
492 #[test]
493 fn camel_case_splitting() {
494 let tokens = split_camel_case_tokens(&["calculateTotal".to_string()]);
495 assert!(tokens.contains(&"calculateTotal".to_string()));
496 assert!(tokens.contains(&"calculate".to_string()));
497 assert!(tokens.contains(&"Total".to_string()));
498 }
499
500 #[test]
501 fn detect_rust_function() {
502 let (name, kind) =
503 detect_symbol("pub fn process_request(req: Request) -> Response {").unwrap();
504 assert_eq!(name, "process_request");
505 assert_eq!(kind, ChunkKind::Function);
506 }
507
508 #[test]
509 fn bm25_search_finds_relevant() {
510 let mut index = BM25Index::new();
511 index.add_chunk(CodeChunk {
512 file_path: "auth.rs".into(),
513 symbol_name: "validate_token".into(),
514 kind: ChunkKind::Function,
515 start_line: 1,
516 end_line: 10,
517 content: "fn validate_token(token: &str) -> bool { check_jwt_expiry(token) }".into(),
518 tokens: tokenize("fn validate_token token str bool check_jwt_expiry token"),
519 token_count: 8,
520 });
521 index.add_chunk(CodeChunk {
522 file_path: "db.rs".into(),
523 symbol_name: "connect_database".into(),
524 kind: ChunkKind::Function,
525 start_line: 1,
526 end_line: 5,
527 content: "fn connect_database(url: &str) -> Pool { create_pool(url) }".into(),
528 tokens: tokenize("fn connect_database url str Pool create_pool url"),
529 token_count: 7,
530 });
531 index.finalize();
532
533 let results = index.search("jwt token validation", 5);
534 assert!(!results.is_empty());
535 assert_eq!(results[0].symbol_name, "validate_token");
536 }
537}