Skip to main content

ripvec_core/
embed.rs

1//! Parallel batch embedding pipeline with streaming backpressure.
2//!
3//! Two pipeline modes:
4//!
5//! - **Batch mode** (< `STREAMING_THRESHOLD` files): walk, chunk all, tokenize
6//!   all, sort by length, embed. Simple and optimal for small corpora.
7//!
8//! - **Streaming mode** (>= `STREAMING_THRESHOLD` files): three-stage pipeline
9//!   with bounded channels. Chunks flow through: rayon chunk workers ->
10//!   tokenize+batch collector -> GPU embed consumer. The GPU starts after the
11//!   first `batch_size` encodings are ready (~50ms), not after all chunks are
12//!   done. Backpressure prevents unbounded memory growth.
13//!
14//! # Batch inference
15//!
16//! Instead of one forward pass per chunk, chunks are grouped into batches
17//! of configurable size (default 32). Each batch is tokenized, padded to
18//! the longest sequence, and run as a single forward pass with shape
19//! `[batch_size, max_seq_len]`. This amortizes per-call overhead and enables
20//! SIMD across the batch dimension.
21//!
22//! # Parallelism
23//!
24//! On CPU, each rayon thread gets its own backend clone (cheap — most
25//! backends use `Arc`'d weights internally). On GPU, batches run sequentially
26//! from Rust while the device parallelizes internally.
27
28use std::path::Path;
29use std::sync::atomic::{AtomicUsize, Ordering};
30use std::time::Instant;
31
32use rayon::prelude::*;
33use tracing::{debug, info_span, instrument, warn};
34
35use crate::backend::{EmbedBackend, Encoding};
36use crate::chunk::{ChunkConfig, CodeChunk};
37
38/// Default batch size for embedding inference.
39pub const DEFAULT_BATCH_SIZE: usize = 32;
40
41/// File count threshold for switching from batch to streaming pipeline.
42///
43/// Below this, the batch path (chunk all -> tokenize all -> sort -> embed)
44/// is simpler and allows global sort-by-length optimization. Above this,
45/// streaming eliminates GPU idle time during chunking/tokenization.
46const STREAMING_THRESHOLD: usize = 1000;
47
48/// Number of batch-sized buffers in the embed channel for backpressure.
49///
50/// Keeps memory bounded: at most `RING_SIZE * batch_size` encodings in flight.
51/// Matches the ring-buffer depth documented on [`EmbedBackend`].
52const RING_SIZE: usize = 4;
53
54/// Runtime configuration for the search pipeline.
55///
56/// All tuning parameters that were previously compile-time constants are
57/// gathered here so they can be set from CLI arguments without recompiling.
58#[derive(Debug, Clone)]
59pub struct SearchConfig {
60    /// Chunks per inference call. Larger values amortize call overhead
61    /// but consume more memory. Default: 32.
62    pub batch_size: usize,
63    /// Maximum tokens fed to the model per chunk. `0` means no limit.
64    /// Capping tokens controls inference cost for minified or dense source.
65    /// BERT attention cost scales linearly with token count, and CLS pooling
66    /// means the first token's representation carries most semantic weight.
67    /// Default: 128 (7.7× faster than 512, with minimal quality loss).
68    pub max_tokens: usize,
69    /// Chunking parameters forwarded to the chunking phase.
70    pub chunk: ChunkConfig,
71    /// Force all files to be chunked as plain text (sliding windows only).
72    /// When `false` (default), files with recognized extensions use tree-sitter
73    /// semantic chunking, and unrecognized extensions fall back to sliding windows.
74    pub text_mode: bool,
75    /// MRL cascade pre-filter dimension.
76    ///
77    /// When set, [`SearchIndex`](crate::index::SearchIndex) stores a truncated
78    /// and L2-re-normalized copy of the embedding matrix at this dimension for
79    /// fast two-phase cascade search. `None` (default) disables cascade search.
80    pub cascade_dim: Option<usize>,
81    /// Optional file type filter (e.g. "rust", "python", "js").
82    ///
83    /// When set, only files matching this type (using ripgrep's built-in type
84    /// database) are collected during the walk phase.
85    pub file_type: Option<String>,
86    /// Search mode: hybrid (default), semantic, or keyword.
87    pub mode: crate::hybrid::SearchMode,
88}
89
90impl Default for SearchConfig {
91    fn default() -> Self {
92        Self {
93            batch_size: DEFAULT_BATCH_SIZE,
94            max_tokens: 0,
95            chunk: ChunkConfig::default(),
96            text_mode: false,
97            cascade_dim: None,
98            file_type: None,
99            mode: crate::hybrid::SearchMode::Hybrid,
100        }
101    }
102}
103
104/// A search result pairing a code chunk with its similarity score.
105#[derive(Debug, Clone)]
106pub struct SearchResult {
107    /// The matched code chunk.
108    pub chunk: CodeChunk,
109    /// Cosine similarity to the query (0.0 to 1.0).
110    pub similarity: f32,
111}
112
113/// Walk, chunk, and embed all files in a directory.
114///
115/// Returns the chunks and their corresponding embedding vectors.
116/// This is the building block for both one-shot search and interactive mode.
117/// The caller handles query embedding and ranking.
118///
119/// Accepts multiple backends for hybrid scheduling — chunks are distributed
120/// across all backends via work-stealing (see `embed_distributed`).
121///
122/// Automatically selects between two pipeline modes:
123/// - **Batch** (< `STREAMING_THRESHOLD` files): chunk all, tokenize all, sort
124///   by length, embed. Optimal for small corpora.
125/// - **Streaming** (>= `STREAMING_THRESHOLD` files): three-stage pipeline with
126///   bounded channels. GPU starts after the first batch is ready, not after all
127///   chunks are done. Eliminates GPU idle time during chunking/tokenization.
128///
129/// # Errors
130///
131/// Returns an error if file walking, chunking, or embedding fails.
132#[instrument(skip_all, fields(root = %root.display(), batch_size = cfg.batch_size))]
133pub fn embed_all(
134    root: &Path,
135    backends: &[&dyn EmbedBackend],
136    tokenizer: &tokenizers::Tokenizer,
137    cfg: &SearchConfig,
138    profiler: &crate::profile::Profiler,
139) -> crate::Result<(Vec<CodeChunk>, Vec<Vec<f32>>)> {
140    if backends.is_empty() {
141        return Err(crate::Error::Other(anyhow::anyhow!(
142            "no embedding backends provided"
143        )));
144    }
145
146    // Phase 1: Collect files (respects .gitignore, filters by extension)
147    let files = {
148        let _span = info_span!("walk").entered();
149        let guard = profiler.phase("walk");
150        let files = crate::walk::collect_files(root, cfg.file_type.as_deref());
151        guard.set_detail(format!("{} files", files.len()));
152        files
153    };
154
155    if files.len() >= STREAMING_THRESHOLD {
156        // Compute total source bytes for byte-based progress (known after walk).
157        let total_bytes: u64 = files
158            .iter()
159            .filter_map(|p| p.metadata().ok())
160            .map(|m| m.len())
161            .sum();
162        embed_all_streaming(&files, total_bytes, backends, tokenizer, cfg, profiler)
163    } else {
164        embed_all_batch(&files, backends, tokenizer, cfg, profiler)
165    }
166}
167
168/// Batch pipeline: chunk all -> tokenize all -> sort by length -> embed.
169///
170/// Optimal for small corpora where the global sort-by-length optimization
171/// matters more than eliminating GPU idle time.
172fn embed_all_batch(
173    files: &[std::path::PathBuf],
174    backends: &[&dyn EmbedBackend],
175    tokenizer: &tokenizers::Tokenizer,
176    cfg: &SearchConfig,
177    profiler: &crate::profile::Profiler,
178) -> crate::Result<(Vec<CodeChunk>, Vec<Vec<f32>>)> {
179    // Phase 2: Chunk all files in parallel.
180    let chunks: Vec<CodeChunk> = {
181        let _span = info_span!("chunk", file_count = files.len()).entered();
182        let chunk_start = Instant::now();
183        let text_mode = cfg.text_mode;
184        let result: Vec<CodeChunk> = files
185            .par_iter()
186            .flat_map(|path| {
187                let Some(source) = read_source(path) else {
188                    return vec![];
189                };
190                let chunks = if text_mode {
191                    crate::chunk::chunk_text(path, &source, &cfg.chunk)
192                } else {
193                    let ext = path.extension().and_then(|e| e.to_str()).unwrap_or("");
194                    match crate::languages::config_for_extension(ext) {
195                        Some(lang_config) => {
196                            crate::chunk::chunk_file(path, &source, &lang_config, &cfg.chunk)
197                        }
198                        None => crate::chunk::chunk_text(path, &source, &cfg.chunk),
199                    }
200                };
201                profiler.chunk_thread_report(chunks.len());
202                chunks
203            })
204            .collect();
205        profiler.chunk_summary(result.len(), files.len(), chunk_start.elapsed());
206        result
207    };
208
209    // Phase 3: Pre-tokenize all chunks in parallel (CPU-bound, all rayon threads)
210    let bs = cfg.batch_size.max(1);
211    let max_tokens_cfg = cfg.max_tokens;
212    let model_max = backends[0].max_tokens();
213    let _span = info_span!("embed_chunks", chunk_count = chunks.len(), batch_size = bs).entered();
214    profiler.embed_begin(chunks.len());
215
216    let all_encodings: Vec<Option<Encoding>> = chunks
217        .par_iter()
218        .map(|chunk| {
219            tokenize(
220                &chunk.enriched_content,
221                tokenizer,
222                max_tokens_cfg,
223                model_max,
224            )
225            .inspect_err(|e| {
226                warn!(file = %chunk.file_path, err = %e, "tokenization failed, skipping chunk");
227            })
228            .ok()
229        })
230        .collect();
231
232    // Sort chunks and their encodings together by descending token count.
233    // This groups similar-length sequences into the same batch, minimizing
234    // padding waste (short chunks no longer get padded to a long neighbour).
235    let mut paired: Vec<(CodeChunk, Option<Encoding>)> =
236        chunks.into_iter().zip(all_encodings).collect();
237    paired.sort_by(|a, b| {
238        let len_a = a.1.as_ref().map_or(0, |e| e.input_ids.len());
239        let len_b = b.1.as_ref().map_or(0, |e| e.input_ids.len());
240        len_b.cmp(&len_a) // descending — longest first
241    });
242    let (chunks, sorted_encodings): (Vec<CodeChunk>, Vec<Option<Encoding>>) =
243        paired.into_iter().unzip();
244
245    // Phase 4: Distribute pre-tokenized batches across all backends
246    let embeddings = embed_distributed(&sorted_encodings, backends, bs, profiler)?;
247    profiler.embed_done();
248
249    // Filter out chunks whose tokenization failed (empty embedding vectors).
250    let (chunks, embeddings): (Vec<_>, Vec<_>) = chunks
251        .into_iter()
252        .zip(embeddings)
253        .filter(|(_, emb)| !emb.is_empty())
254        .unzip();
255
256    Ok((chunks, embeddings))
257}
258
259/// Streaming pipeline: chunk -> tokenize -> batch -> embed with backpressure.
260///
261/// Three concurrent stages connected by bounded channels:
262///
263/// 1. **Chunk producers** (rayon pool, in a scoped thread): read + parse files,
264///    send chunks to channel.
265/// 2. **Tokenize + batch collector** (scoped thread): tokenize chunks, sort
266///    within batch windows, send full batches to the embed channel.
267/// 3. **Embed consumer** (main thread): calls `embed_distributed` on each
268///    batch, collects results.
269///
270/// The bounded channels provide natural backpressure: if the GPU falls behind,
271/// the tokenize stage blocks, which blocks chunk producers via the chunk channel.
272/// If chunking is fast and the GPU is slow, at most
273/// `8 * batch_size + RING_SIZE * batch_size` items are in memory.
274///
275/// Uses `std::thread::scope` so all threads can borrow the caller's stack
276/// (`tokenizer`, `backends`, `profiler`) without `'static` bounds.
277#[expect(
278    clippy::too_many_lines,
279    reason = "streaming pipeline has inherent complexity in thread coordination"
280)]
281fn embed_all_streaming(
282    files: &[std::path::PathBuf],
283    total_bytes: u64,
284    backends: &[&dyn EmbedBackend],
285    tokenizer: &tokenizers::Tokenizer,
286    cfg: &SearchConfig,
287    profiler: &crate::profile::Profiler,
288) -> crate::Result<(Vec<CodeChunk>, Vec<Vec<f32>>)> {
289    use crossbeam_channel::bounded;
290
291    let bs = cfg.batch_size.max(1);
292    let max_tokens_cfg = cfg.max_tokens;
293    let model_max = backends[0].max_tokens();
294    let file_count = files.len();
295    let text_mode = cfg.text_mode;
296    let chunk_config = cfg.chunk.clone();
297
298    // Bounded channel from chunk producers -> tokenize+batch stage.
299    // Factor of 8 gives enough buffering for rayon parallelism without
300    // unbounded growth (at most ~8 batches worth of chunks in flight).
301    let (chunk_tx, chunk_rx) = bounded::<CodeChunk>(bs * 8);
302
303    // Bounded channel from tokenize+batch stage -> embed consumer.
304    // RING_SIZE batches in flight provides enough pipeline depth for GPU
305    // to stay busy while the next batch is being tokenized.
306    let (batch_tx, batch_rx) = bounded::<Vec<(Encoding, CodeChunk)>>(RING_SIZE);
307
308    // Shared counters for profiling across the streaming pipeline.
309    let total_chunks_produced = AtomicUsize::new(0);
310    let bytes_chunked = AtomicUsize::new(0);
311    let chunk_start = Instant::now();
312
313    // All stages run inside std::thread::scope so they can borrow from the
314    // caller's stack (tokenizer, backends, profiler, files, etc.).
315    std::thread::scope(|scope| {
316        // --- Stage 1: Chunk producers (rayon inside a scoped thread) ---
317        //
318        // Spawns a scoped thread that drives rayon's par_iter. Each file is
319        // chunked independently and chunks are sent into the bounded channel.
320        // If the channel is full, rayon workers block, providing backpressure.
321        scope.spawn(|| {
322            let _span = info_span!("chunk_stream", file_count).entered();
323            files.par_iter().for_each(|path| {
324                let Some(source) = read_source(path) else {
325                    return;
326                };
327                let chunks = if text_mode {
328                    crate::chunk::chunk_text(path, &source, &chunk_config)
329                } else {
330                    let ext = path.extension().and_then(|e| e.to_str()).unwrap_or("");
331                    match crate::languages::config_for_extension(ext) {
332                        Some(lang_config) => {
333                            crate::chunk::chunk_file(path, &source, &lang_config, &chunk_config)
334                        }
335                        None => crate::chunk::chunk_text(path, &source, &chunk_config),
336                    }
337                };
338                let n = chunks.len();
339                let file_bytes = source.len();
340                for chunk in chunks {
341                    // Channel disconnected means downstream errored; stop.
342                    if chunk_tx.send(chunk).is_err() {
343                        return;
344                    }
345                }
346                profiler.chunk_thread_report(n);
347                total_chunks_produced.fetch_add(n, Ordering::Relaxed);
348                bytes_chunked.fetch_add(file_bytes, Ordering::Relaxed);
349            });
350            // chunk_tx is dropped here, closing the channel — but the borrow
351            // of chunk_tx lives until the scoped thread ends. We need to
352            // explicitly drop it so the tokenize stage sees the channel close.
353            drop(chunk_tx);
354        });
355
356        // --- Stage 2: Tokenize + batch collector (scoped thread) ---
357        //
358        // Receives individual chunks, tokenizes each (HuggingFace tokenizer
359        // is Send + Sync), and accumulates into batch-sized buffers. Within
360        // each buffer, entries are sorted by descending token count — the same
361        // padding-reduction optimization as the batch path, applied locally.
362        let tokenize_handle = scope.spawn(move || -> crate::Result<()> {
363            let _span = info_span!("tokenize_stream").entered();
364            let mut buffer: Vec<(Encoding, CodeChunk)> = Vec::with_capacity(bs);
365
366            for chunk in &chunk_rx {
367                match tokenize(
368                    &chunk.enriched_content,
369                    tokenizer,
370                    max_tokens_cfg,
371                    model_max,
372                ) {
373                    Ok(encoding) => {
374                        buffer.push((encoding, chunk));
375                        if buffer.len() >= bs {
376                            // Sort within batch by descending token count.
377                            buffer.sort_by(|a, b| b.0.input_ids.len().cmp(&a.0.input_ids.len()));
378                            let batch = std::mem::replace(&mut buffer, Vec::with_capacity(bs));
379                            if batch_tx.send(batch).is_err() {
380                                // Embed consumer dropped; stop tokenizing.
381                                return Ok(());
382                            }
383                        }
384                    }
385                    Err(e) => {
386                        warn!(
387                            file = %chunk.file_path, err = %e,
388                            "tokenization failed, skipping chunk"
389                        );
390                    }
391                }
392            }
393
394            // Flush remaining partial batch.
395            if !buffer.is_empty() {
396                buffer.sort_by(|a, b| b.0.input_ids.len().cmp(&a.0.input_ids.len()));
397                let _ = batch_tx.send(buffer);
398            }
399            // batch_tx drops here, closing the embed channel.
400
401            Ok(())
402        });
403
404        // --- Stage 3: Embed consumer (main thread within scope) ---
405        //
406        // Receives sorted batches, embeds via the backend(s), collects results.
407        // Profiler is driven from here since this thread owns the reference.
408        let _span = info_span!("embed_stream").entered();
409
410        // Total isn't known upfront in streaming mode; start at 0 and update.
411        profiler.embed_begin(0);
412
413        let mut all_chunks: Vec<CodeChunk> = Vec::new();
414        let mut all_embeddings: Vec<Vec<f32>> = Vec::new();
415        let mut embed_error: Option<crate::Error> = None;
416
417        let mut cumulative_done: usize = 0;
418        for batch in &batch_rx {
419            let batch_len = batch.len();
420            let (encodings, chunks): (Vec<Encoding>, Vec<CodeChunk>) = batch.into_iter().unzip();
421
422            // Wrap as Option<Encoding> for embed_distributed compatibility.
423            let opt_encodings: Vec<Option<Encoding>> = encodings.into_iter().map(Some).collect();
424
425            // Pass noop profiler to embed_distributed — its internal done counter
426            // resets per call (0→batch_size), which corrupts our global progress.
427            let noop = crate::profile::Profiler::noop();
428            match embed_distributed(&opt_encodings, backends, bs, &noop) {
429                Ok(batch_embeddings) => {
430                    cumulative_done += batch_len;
431                    // Byte-based progress: total_bytes known from walk, bytes_chunked
432                    // tracks how much source data has been processed through the pipeline.
433                    let processed = bytes_chunked.load(Ordering::Relaxed) as u64;
434                    profiler.embed_tick_bytes(cumulative_done, processed, total_bytes);
435
436                    for (chunk, emb) in chunks.into_iter().zip(batch_embeddings) {
437                        if !emb.is_empty() {
438                            all_chunks.push(chunk);
439                            all_embeddings.push(emb);
440                        }
441                    }
442                }
443                Err(e) => {
444                    embed_error = Some(e);
445                    // break exits the for loop; batch_rx drops naturally after.
446                    break;
447                }
448            }
449        }
450
451        // Report chunk summary now that all stages have completed (or errored).
452        let final_total = total_chunks_produced.load(Ordering::Relaxed);
453        profiler.chunk_summary(final_total, file_count, chunk_start.elapsed());
454        // Set the final total so embed_done shows the correct summary.
455        profiler.embed_begin_update_total(cumulative_done);
456        profiler.embed_tick(cumulative_done);
457        profiler.embed_done();
458
459        // Wait for tokenize thread and check for errors.
460        let tokenize_result = tokenize_handle.join();
461
462        // Error priority: embed > tokenize > thread panic.
463        if let Some(e) = embed_error {
464            return Err(e);
465        }
466        match tokenize_result {
467            Ok(Ok(())) => {}
468            Ok(Err(e)) => return Err(e),
469            Err(_) => {
470                return Err(crate::Error::Other(anyhow::anyhow!(
471                    "tokenize thread panicked"
472                )));
473            }
474        }
475
476        Ok((all_chunks, all_embeddings))
477    })
478}
479
480/// Search a directory for code chunks semantically similar to a query.
481///
482/// Walks the directory, chunks all supported files, embeds everything
483/// in parallel batches, and returns the top-k results ranked by similarity.
484///
485/// Accepts multiple backends for hybrid scheduling — the first backend
486/// (`backends[0]`) is used for query embedding.
487///
488/// All tuning parameters (batch size, token limit, chunk sizing) are
489/// controlled via [`SearchConfig`].
490///
491/// # Errors
492///
493/// Returns an error if the query cannot be tokenized or embedded.
494///
495/// # Panics
496///
497/// Panics if a per-thread backend clone fails during parallel embedding
498/// (should not happen if the backend loaded successfully).
499#[instrument(skip_all, fields(root = %root.display(), top_k, batch_size = cfg.batch_size))]
500pub fn search(
501    root: &Path,
502    query: &str,
503    backends: &[&dyn EmbedBackend],
504    tokenizer: &tokenizers::Tokenizer,
505    top_k: usize,
506    cfg: &SearchConfig,
507    profiler: &crate::profile::Profiler,
508) -> crate::Result<Vec<SearchResult>> {
509    if backends.is_empty() {
510        return Err(crate::Error::Other(anyhow::anyhow!(
511            "no embedding backends provided"
512        )));
513    }
514
515    // Phases 1, 2, 3, 4: walk, chunk, pre-tokenize, embed all files
516    let (chunks, embeddings) = embed_all(root, backends, tokenizer, cfg, profiler)?;
517
518    let t_query_start = std::time::Instant::now();
519
520    // Phase 5: Build hybrid index (semantic + BM25)
521    let hybrid = {
522        let _span = info_span!("build_hybrid_index").entered();
523        let _guard = profiler.phase("build_hybrid_index");
524        crate::hybrid::HybridIndex::new(chunks, &embeddings, cfg.cascade_dim)?
525    };
526
527    let mode = cfg.mode;
528    let effective_top_k = if top_k > 0 { top_k } else { usize::MAX };
529
530    // Phase 6: Embed query (skip for keyword-only mode)
531    let query_embedding = if mode == crate::hybrid::SearchMode::Keyword {
532        // Keyword mode: no embedding needed, use zero vector
533        let dim = hybrid.semantic.hidden_dim;
534        vec![0.0f32; dim]
535    } else {
536        let _span = info_span!("embed_query").entered();
537        let _guard = profiler.phase("embed_query");
538        let t_tok = std::time::Instant::now();
539        let enc = tokenize(query, tokenizer, cfg.max_tokens, backends[0].max_tokens())?;
540        let tok_ms = t_tok.elapsed().as_secs_f64() * 1000.0;
541        let t_emb = std::time::Instant::now();
542        let mut results = backends[0].embed_batch(&[enc])?;
543        let emb_ms = t_emb.elapsed().as_secs_f64() * 1000.0;
544        eprintln!(
545            "[search] query: tokenize={tok_ms:.1}ms embed={emb_ms:.1}ms total_since_embed_all={:.1}ms",
546            t_query_start.elapsed().as_secs_f64() * 1000.0
547        );
548        results.pop().ok_or_else(|| {
549            crate::Error::Other(anyhow::anyhow!("backend returned no embedding for query"))
550        })?
551    };
552
553    // Phase 7: Hybrid/semantic/keyword ranking
554    let ranked = {
555        let _span = info_span!("rank", chunk_count = hybrid.chunks().len()).entered();
556        let guard = profiler.phase("rank");
557        let threshold = 0.0; // all modes use 0.0; SearchIndex::rank applies its own
558        let results = hybrid.search(&query_embedding, query, effective_top_k, threshold, mode);
559        guard.set_detail(format!(
560            "{mode} top {} from {}",
561            effective_top_k.min(results.len()),
562            hybrid.chunks().len()
563        ));
564        results
565    };
566
567    let results: Vec<SearchResult> = ranked
568        .into_iter()
569        .map(|(idx, score)| SearchResult {
570            chunk: hybrid.chunks()[idx].clone(),
571            similarity: score,
572        })
573        .collect();
574
575    Ok(results)
576}
577
578/// Shared state for [`embed_distributed`] workers.
579struct DistributedState<'a> {
580    tokenized: &'a [Option<Encoding>],
581    cursor: std::sync::atomic::AtomicUsize,
582    error_flag: std::sync::atomic::AtomicBool,
583    first_error: std::sync::Mutex<Option<crate::Error>>,
584    done_counter: std::sync::atomic::AtomicUsize,
585    batch_size: usize,
586    profiler: &'a crate::profile::Profiler,
587}
588
589impl DistributedState<'_> {
590    /// Worker loop: claim batches from the shared cursor, embed, collect results.
591    fn run_worker(&self, backend: &dyn EmbedBackend) -> Vec<(usize, Vec<f32>)> {
592        use std::sync::atomic::Ordering;
593
594        let n = self.tokenized.len();
595        // GPU backends grab larger batches to amortize per-call overhead.
596        // MLX's lazy eval graph optimizer benefits from large matrices.
597        // Metal sub-batches internally via MAX_BATCH to limit padding waste.
598        let grab_size = if backend.is_gpu() {
599            self.batch_size * 4
600        } else {
601            self.batch_size
602        };
603        let mut results = Vec::new();
604
605        loop {
606            if self.error_flag.load(Ordering::Relaxed) {
607                break;
608            }
609
610            let start = self.cursor.fetch_add(grab_size, Ordering::Relaxed);
611            if start >= n {
612                break;
613            }
614            let end = (start + grab_size).min(n);
615            let batch = &self.tokenized[start..end];
616
617            // Separate valid encodings from Nones, tracking which indices succeeded
618            let mut valid = Vec::with_capacity(batch.len());
619            let mut valid_indices = Vec::with_capacity(batch.len());
620            for (i, enc) in batch.iter().enumerate() {
621                if let Some(e) = enc {
622                    // TODO(perf): cloning 3 Vecs per chunk; consider making
623                    // `EmbedBackend::embed_batch` accept `&[&Encoding]` to avoid this.
624                    valid.push(e.clone());
625                    valid_indices.push(start + i);
626                } else {
627                    results.push((start + i, vec![]));
628                }
629            }
630
631            if valid.is_empty() {
632                let done =
633                    self.done_counter.fetch_add(batch.len(), Ordering::Relaxed) + batch.len();
634                self.profiler.embed_tick(done);
635                continue;
636            }
637
638            match backend.embed_batch(&valid) {
639                Ok(batch_embeddings) => {
640                    for (idx, emb) in valid_indices.into_iter().zip(batch_embeddings) {
641                        results.push((idx, emb));
642                    }
643                    let done =
644                        self.done_counter.fetch_add(batch.len(), Ordering::Relaxed) + batch.len();
645                    self.profiler.embed_tick(done);
646                }
647                Err(e) => {
648                    self.error_flag.store(true, Ordering::Relaxed);
649                    if let Ok(mut guard) = self.first_error.lock()
650                        && guard.is_none()
651                    {
652                        *guard = Some(e);
653                    }
654                    break;
655                }
656            }
657        }
658
659        results
660    }
661}
662
663/// Distribute pre-tokenized chunks across multiple backends using work-stealing.
664///
665/// Each backend gets a dedicated worker thread. Workers compete on a shared
666/// `AtomicUsize` cursor to claim batches of chunks. GPU backends grab larger
667/// batches (`batch_size * 4`), CPU backends grab smaller ones (`batch_size`).
668/// Results are written by original chunk index — no merge step needed.
669///
670/// When `backends` has a single entry, no extra threads are spawned.
671///
672/// # Errors
673///
674/// Returns the first error from any backend. Other workers exit early
675/// when an error is detected.
676#[expect(
677    unsafe_code,
678    reason = "BLAS thread count must be set via env vars before spawning workers"
679)]
680pub(crate) fn embed_distributed(
681    tokenized: &[Option<Encoding>],
682    backends: &[&dyn EmbedBackend],
683    batch_size: usize,
684    profiler: &crate::profile::Profiler,
685) -> crate::Result<Vec<Vec<f32>>> {
686    let n = tokenized.len();
687    let state = DistributedState {
688        tokenized,
689        cursor: std::sync::atomic::AtomicUsize::new(0),
690        error_flag: std::sync::atomic::AtomicBool::new(false),
691        first_error: std::sync::Mutex::new(None),
692        done_counter: std::sync::atomic::AtomicUsize::new(0),
693        batch_size: batch_size.max(1),
694        profiler,
695    };
696
697    // Collect (index, embedding) pairs from all workers
698    let all_pairs: Vec<(usize, Vec<f32>)> =
699        if backends.len() == 1 && backends[0].supports_clone() && !backends[0].is_gpu() {
700            // Single cloneable CPU backend: spawn N workers with single-threaded BLAS.
701            //
702            // BLAS libraries (OpenBLAS, MKL) internally spawn threads for each matmul.
703            // For small matrices ([1,384]×[384,384]), this thread overhead dominates —
704            // profiling shows 80% of time in sched_yield (thread contention).
705            //
706            // Instead: force BLAS to single-thread per worker, parallelize across
707            // independent BERT inferences. Each worker gets its own cloned backend.
708            // Force BLAS libraries to single-threaded mode.
709            // We parallelize across independent BERT inferences instead.
710            // env vars don't always work (OpenBLAS may ignore after init),
711            // so also call the runtime API directly.
712            unsafe {
713                std::env::set_var("OPENBLAS_NUM_THREADS", "1");
714                std::env::set_var("MKL_NUM_THREADS", "1");
715                std::env::set_var("VECLIB_MAXIMUM_THREADS", "1"); // macOS Accelerate
716
717                // Direct FFI to set BLAS thread count — works even after init
718                #[cfg(all(not(target_os = "macos"), feature = "cpu"))]
719                {
720                    unsafe extern "C" {
721                        fn openblas_set_num_threads(num: std::ffi::c_int);
722                    }
723                    openblas_set_num_threads(1);
724                }
725            }
726
727            let num_workers = rayon::current_num_threads().max(1);
728            std::thread::scope(|s| {
729                let handles: Vec<_> = (0..num_workers)
730                    .map(|_| {
731                        s.spawn(|| {
732                            // Per-thread: force single-threaded BLAS (thread-local setting).
733                            // On macOS 15+ this calls BLASSetThreading; on Linux openblas_set_num_threads.
734                            #[cfg(any(feature = "cpu", feature = "cpu-accelerate"))]
735                            crate::backend::driver::cpu::force_single_threaded_blas();
736                            let cloned = backends[0].clone_backend();
737                            state.run_worker(cloned.as_ref())
738                        })
739                    })
740                    .collect();
741                let mut all = Vec::new();
742                for handle in handles {
743                    if let Ok(pairs) = handle.join() {
744                        all.extend(pairs);
745                    }
746                }
747                all
748            })
749        } else if backends.len() == 1 {
750            // Single non-cloneable backend (GPU or CPU ModernBERT): run on the calling thread.
751            // GPU backends handle parallelism internally; CPU uses BLAS internal
752            // multi-threading (Accelerate/OpenBLAS) for intra-GEMM parallelism.
753            state.run_worker(backends[0])
754        } else {
755            // Multiple backends: one thread per backend via std::thread::scope
756            std::thread::scope(|s| {
757                let handles: Vec<_> = backends
758                    .iter()
759                    .map(|&backend| {
760                        s.spawn(|| {
761                            // CPU backends that support cloning get a thread-local copy
762                            if backend.supports_clone() {
763                                let cloned = backend.clone_backend();
764                                state.run_worker(cloned.as_ref())
765                            } else {
766                                state.run_worker(backend)
767                            }
768                        })
769                    })
770                    .collect();
771
772                let mut all = Vec::new();
773                for handle in handles {
774                    if let Ok(pairs) = handle.join() {
775                        all.extend(pairs);
776                    } else {
777                        warn!("worker thread panicked");
778                        state
779                            .error_flag
780                            .store(true, std::sync::atomic::Ordering::Relaxed);
781                    }
782                }
783                all
784            })
785        };
786
787    // Check for errors before assembling results
788    if let Some(err) = state.first_error.into_inner().ok().flatten() {
789        return Err(err);
790    }
791
792    // Scatter results into output vec by original index
793    let mut embeddings: Vec<Vec<f32>> = vec![vec![]; n];
794    for (idx, emb) in all_pairs {
795        embeddings[idx] = emb;
796    }
797
798    Ok(embeddings)
799}
800
801/// Read a source file into a `String`, skipping binary files.
802///
803/// Reads the file as raw bytes first, checks for NUL bytes in the first 8 KB
804/// to detect binary files, then converts to UTF-8. Returns `None` (with a
805/// debug log) when the file cannot be read, is binary, or is not valid UTF-8.
806pub(crate) fn read_source(path: &Path) -> Option<String> {
807    let bytes = match std::fs::read(path) {
808        Ok(b) => b,
809        Err(e) => {
810            debug!(path = %path.display(), err = %e, "skipping file: read failed");
811            return None;
812        }
813    };
814
815    // Skip binary files: NUL byte anywhere in the first 8 KB is a reliable signal.
816    if memchr::memchr(0, &bytes[..bytes.len().min(8192)]).is_some() {
817        debug!(path = %path.display(), "skipping binary file");
818        return None;
819    }
820
821    match std::str::from_utf8(&bytes) {
822        Ok(s) => Some(s.to_string()),
823        Err(e) => {
824            debug!(path = %path.display(), err = %e, "skipping file: not valid UTF-8");
825            None
826        }
827    }
828}
829
830/// Tokenize text into an [`Encoding`] ready for model inference.
831///
832/// Delegates to [`crate::tokenize::tokenize_query`] for the core encoding,
833/// then applies an additional `max_tokens` truncation when non-zero.
834/// CLS pooling means the first token's representation carries most semantic
835/// weight, so truncation has minimal quality impact.
836fn tokenize(
837    text: &str,
838    tokenizer: &tokenizers::Tokenizer,
839    max_tokens: usize,
840    model_max_tokens: usize,
841) -> crate::Result<Encoding> {
842    let mut enc = crate::tokenize::tokenize_query(text, tokenizer, model_max_tokens)?;
843    if max_tokens > 0 {
844        let len = enc.input_ids.len().min(max_tokens);
845        enc.input_ids.truncate(len);
846        enc.attention_mask.truncate(len);
847        enc.token_type_ids.truncate(len);
848    }
849    Ok(enc)
850}
851
852/// Normalize similarity scores to `[0,1]` and apply a `PageRank` structural boost.
853///
854/// Each result's similarity is min-max normalized, then a weighted `PageRank`
855/// score is added: `final = normalized + alpha * pagerank`. This promotes
856/// architecturally important files (many dependents) in search results.
857///
858/// Called from the MCP search handler which has access to the `RepoGraph`,
859/// rather than from [`search`] directly.
860pub fn apply_structural_boost<S: ::std::hash::BuildHasher>(
861    results: &mut [SearchResult],
862    file_ranks: &std::collections::HashMap<String, f32, S>,
863    alpha: f32,
864) {
865    if results.is_empty() || alpha == 0.0 {
866        return;
867    }
868
869    let min = results
870        .iter()
871        .map(|r| r.similarity)
872        .fold(f32::INFINITY, f32::min);
873    let max = results
874        .iter()
875        .map(|r| r.similarity)
876        .fold(f32::NEG_INFINITY, f32::max);
877    let range = (max - min).max(1e-12);
878
879    for r in results.iter_mut() {
880        let normalized = (r.similarity - min) / range;
881        let pr = file_ranks.get(&r.chunk.file_path).copied().unwrap_or(0.0);
882        r.similarity = normalized + alpha * pr;
883    }
884}
885
886#[cfg(test)]
887mod tests {
888    use super::*;
889
890    #[test]
891    #[cfg(feature = "cpu")]
892    #[ignore = "loads model + embeds full source tree; run with `cargo test -- --ignored`"]
893    fn search_with_backend_trait() {
894        let backend = crate::backend::load_backend(
895            crate::backend::BackendKind::Cpu,
896            "BAAI/bge-small-en-v1.5",
897            crate::backend::DeviceHint::Cpu,
898        )
899        .unwrap();
900        let tokenizer = crate::tokenize::load_tokenizer("BAAI/bge-small-en-v1.5").unwrap();
901        let cfg = SearchConfig::default();
902        let profiler = crate::profile::Profiler::noop();
903        let dir = std::path::Path::new(env!("CARGO_MANIFEST_DIR")).join("src");
904        let results = search(
905            &dir,
906            "embedding model",
907            &[backend.as_ref()],
908            &tokenizer,
909            1,
910            &cfg,
911            &profiler,
912        );
913        assert!(results.is_ok());
914        assert!(!results.unwrap().is_empty());
915    }
916
917    #[test]
918    #[cfg(feature = "cpu")]
919    fn embed_distributed_produces_correct_count() {
920        let backend = crate::backend::load_backend(
921            crate::backend::BackendKind::Cpu,
922            "BAAI/bge-small-en-v1.5",
923            crate::backend::DeviceHint::Cpu,
924        )
925        .unwrap();
926        let tokenizer = crate::tokenize::load_tokenizer("BAAI/bge-small-en-v1.5").unwrap();
927        let profiler = crate::profile::Profiler::noop();
928
929        // Tokenize a few strings
930        let texts = ["fn hello() {}", "class Foo:", "func main() {}"];
931        let encoded: Vec<Option<Encoding>> = texts
932            .iter()
933            .map(|t| super::tokenize(t, &tokenizer, 0, 512).ok())
934            .collect();
935
936        let results =
937            super::embed_distributed(&encoded, &[backend.as_ref()], 32, &profiler).unwrap();
938
939        assert_eq!(results.len(), 3);
940        // All should be 384-dim (bge-small hidden size)
941        for (i, emb) in results.iter().enumerate() {
942            assert_eq!(emb.len(), 384, "embedding {i} should be 384-dim");
943        }
944    }
945
946    /// Truncate an embedding to `dims` dimensions and L2-normalize.
947    fn truncate_and_normalize(emb: &[f32], dims: usize) -> Vec<f32> {
948        let trunc = &emb[..dims];
949        let norm: f32 = trunc.iter().map(|x| x * x).sum::<f32>().sqrt().max(1e-12);
950        trunc.iter().map(|x| x / norm).collect()
951    }
952
953    /// Rank corpus embeddings against a query, return top-K chunk indices.
954    fn rank_topk(query: &[f32], corpus: &[Vec<f32>], k: usize) -> Vec<usize> {
955        let mut scored: Vec<(usize, f32)> = corpus
956            .iter()
957            .enumerate()
958            .map(|(i, emb)| {
959                let dot: f32 = query.iter().zip(emb).map(|(a, b)| a * b).sum();
960                (i, dot)
961            })
962            .collect();
963        scored.sort_unstable_by(|a, b| b.1.total_cmp(&a.1));
964        scored.into_iter().take(k).map(|(i, _)| i).collect()
965    }
966
967    /// MRL retrieval recall test: does truncated search retrieve the same results?
968    ///
969    /// Embeds the ripvec codebase at full dimension, then tests whether
970    /// truncating to fewer dimensions retrieves the same top-10 results.
971    /// This is the real MRL quality test — per-vector cosine is trivially 1.0
972    /// but retrieval recall can degrade if the first N dims don't preserve
973    /// relative ordering between different vectors.
974    #[test]
975    #[ignore = "loads model + embeds; run with --nocapture"]
976    #[expect(
977        clippy::cast_precision_loss,
978        reason = "top_k and overlap are small counts"
979    )]
980    fn mrl_retrieval_recall() {
981        let model = "BAAI/bge-small-en-v1.5";
982        let backends = crate::backend::detect_backends(model).unwrap();
983        let tokenizer = crate::tokenize::load_tokenizer(model).unwrap();
984        let cfg = SearchConfig::default();
985        let profiler = crate::profile::Profiler::noop();
986
987        // Embed the ripvec source tree
988        let root = std::path::Path::new(env!("CARGO_MANIFEST_DIR"))
989            .parent()
990            .unwrap()
991            .parent()
992            .unwrap();
993        eprintln!("Embedding {}", root.display());
994        let backend_refs: Vec<&dyn crate::backend::EmbedBackend> =
995            backends.iter().map(std::convert::AsRef::as_ref).collect();
996        let (chunks, embeddings) =
997            embed_all(root, &backend_refs, &tokenizer, &cfg, &profiler).unwrap();
998        let full_dim = embeddings[0].len();
999        eprintln!(
1000            "Corpus: {} chunks, {full_dim}-dim embeddings\n",
1001            chunks.len()
1002        );
1003
1004        // Test queries spanning different semantic intents
1005        let queries = [
1006            "error handling in the embedding pipeline",
1007            "tree-sitter chunking and AST parsing",
1008            "Metal GPU kernel dispatch",
1009            "file watcher for incremental reindex",
1010            "cosine similarity ranking",
1011        ];
1012
1013        let top_k = 10;
1014        let mrl_dims: Vec<usize> = [32, 64, 128, 192, 256, full_dim]
1015            .into_iter()
1016            .filter(|&d| d <= full_dim)
1017            .collect();
1018
1019        eprintln!("=== MRL Retrieval Recall@{top_k} (vs full {full_dim}-dim) ===\n");
1020
1021        for query in &queries {
1022            // Embed query at full dim
1023            let enc = tokenize(query, &tokenizer, 0, backends[0].max_tokens()).unwrap();
1024            let query_emb = backends[0].embed_batch(&[enc]).unwrap().pop().unwrap();
1025
1026            // Full-dim reference ranking
1027            let ref_topk = rank_topk(&query_emb, &embeddings, top_k);
1028
1029            eprintln!("Query: \"{query}\"");
1030            eprintln!(
1031                "  Full-dim top-1: {} ({})",
1032                chunks[ref_topk[0]].name, chunks[ref_topk[0]].file_path
1033            );
1034
1035            for &dims in &mrl_dims {
1036                // Truncate corpus and query
1037                let trunc_corpus: Vec<Vec<f32>> = embeddings
1038                    .iter()
1039                    .map(|e| truncate_and_normalize(e, dims))
1040                    .collect();
1041                let trunc_query = truncate_and_normalize(&query_emb, dims);
1042
1043                let trunc_topk = rank_topk(&trunc_query, &trunc_corpus, top_k);
1044
1045                // Recall@K: how many of the full-dim top-K appear in truncated top-K
1046                let overlap = ref_topk.iter().filter(|i| trunc_topk.contains(i)).count();
1047                let recall = overlap as f32 / top_k as f32;
1048                let marker = if dims == full_dim {
1049                    " (ref)"
1050                } else if recall >= 0.8 {
1051                    " ***"
1052                } else {
1053                    ""
1054                };
1055                eprintln!(
1056                    "  dims={dims:>3}: Recall@{top_k}={recall:.1} ({overlap}/{top_k}){marker}"
1057                );
1058            }
1059            eprintln!();
1060        }
1061    }
1062
1063    fn make_result(file_path: &str, similarity: f32) -> SearchResult {
1064        SearchResult {
1065            chunk: CodeChunk {
1066                file_path: file_path.to_string(),
1067                name: "test".to_string(),
1068                kind: "function".to_string(),
1069                start_line: 1,
1070                end_line: 10,
1071                enriched_content: String::new(),
1072                content: String::new(),
1073            },
1074            similarity,
1075        }
1076    }
1077
1078    #[test]
1079    fn structural_boost_normalizes_and_applies() {
1080        let mut results = vec![
1081            make_result("src/a.rs", 0.8),
1082            make_result("src/b.rs", 0.4),
1083            make_result("src/c.rs", 0.6),
1084        ];
1085        let mut ranks = std::collections::HashMap::new();
1086        ranks.insert("src/a.rs".to_string(), 0.5);
1087        ranks.insert("src/b.rs".to_string(), 1.0);
1088        ranks.insert("src/c.rs".to_string(), 0.0);
1089
1090        apply_structural_boost(&mut results, &ranks, 0.2);
1091
1092        // a: normalized=(0.8-0.4)/0.4=1.0, boost=0.2*0.5=0.1 => 1.1
1093        assert!((results[0].similarity - 1.1).abs() < 1e-6);
1094        // b: normalized=(0.4-0.4)/0.4=0.0, boost=0.2*1.0=0.2 => 0.2
1095        assert!((results[1].similarity - 0.2).abs() < 1e-6);
1096        // c: normalized=(0.6-0.4)/0.4=0.5, boost=0.2*0.0=0.0 => 0.5
1097        assert!((results[2].similarity - 0.5).abs() < 1e-6);
1098    }
1099
1100    #[test]
1101    fn structural_boost_noop_on_empty() {
1102        let mut results: Vec<SearchResult> = vec![];
1103        let ranks = std::collections::HashMap::new();
1104        apply_structural_boost(&mut results, &ranks, 0.2);
1105        assert!(results.is_empty());
1106    }
1107
1108    #[test]
1109    fn structural_boost_noop_on_zero_alpha() {
1110        let mut results = vec![make_result("src/a.rs", 0.8)];
1111        let mut ranks = std::collections::HashMap::new();
1112        ranks.insert("src/a.rs".to_string(), 1.0);
1113        apply_structural_boost(&mut results, &ranks, 0.0);
1114        // Should be unchanged
1115        assert!((results[0].similarity - 0.8).abs() < 1e-6);
1116    }
1117}