Skip to main content

ripvec_core/backend/
generic.rs

1//! Generic backend that pairs a [`Driver`] with a [`ModelArch`].
2//!
3//! [`GenericBackend`] implements [`EmbedBackend`] by delegating to the
4//! architecture's `forward()` method, which composes driver primitives into
5//! the full inference pipeline. This decouples weight loading from the
6//! backend interface — any `(Driver, ModelArch)` pair can serve as an
7//! embedding backend.
8//!
9//! The `_mmap` field keeps the memory-mapped safetensors file alive as long
10//! as the backend exists, since Metal zero-copy buffers reference its pages.
11
12use super::arch::ModelArch;
13use super::driver::Driver;
14use super::{EmbedBackend, Encoding};
15
16/// Generic backend that pairs a [`Driver`] with a [`ModelArch`].
17///
18/// Implements [`EmbedBackend`] by calling `arch.forward(driver, encodings)`.
19/// The driver provides hardware-specific compute primitives; the architecture
20/// orchestrates them into a full forward pass.
21///
22/// # Lifetime invariant
23///
24/// `_mmap` **must** be declared after `arch` so it is dropped last. The
25/// architecture's weight tensors reference pages in the memory-mapped file
26/// via zero-copy Metal buffers; dropping the mmap first would invalidate them.
27pub struct GenericBackend<D: Driver, A: ModelArch<D>> {
28    /// Hardware compute driver (Metal, CUDA, CPU).
29    driver: D,
30    /// Model architecture with loaded weights.
31    arch: A,
32    /// Maximum token count the model supports.
33    max_tokens: usize,
34    /// Whether this backend runs on a GPU.
35    is_gpu: bool,
36    /// Maximum encodings per forward pass. Larger batches saturate GPU SMs better
37    /// but use more memory. Default: 32 (Metal-tuned). CUDA can handle 128+.
38    max_batch: usize,
39    /// Keeps the memory-mapped safetensors file alive.
40    ///
41    /// Must outlive the weight tensors in `arch` — declared last for correct
42    /// drop order.
43    _mmap: memmap2::Mmap,
44}
45
46impl<D: Driver, A: ModelArch<D>> GenericBackend<D, A> {
47    /// Create a new generic backend from a driver, architecture, and mmap.
48    ///
49    /// The `mmap` must be the memory-mapped safetensors file whose pages back
50    /// the weight tensors stored in `arch`.
51    /// Create a new generic backend.
52    ///
53    /// For GPU backends, runs a warm-up forward pass to prime the buffer pool.
54    /// This is skipped for large models (max_tokens > 1024) where the warm-up
55    /// cost exceeds the benefit.
56    /// Create a new generic backend.
57    ///
58    /// `max_batch` controls how many encodings are sent in each forward pass.
59    /// Metal: 32 (optimal for M2 Max AMX). CUDA: 128+ (needs more work to
60    /// saturate 128 SMs on RTX 4090).
61    pub fn new(driver: D, arch: A, max_tokens: usize, is_gpu: bool, mmap: memmap2::Mmap) -> Self {
62        Self::with_max_batch(driver, arch, max_tokens, is_gpu, mmap, 32)
63    }
64
65    /// Create with explicit max batch size.
66    pub fn with_max_batch(
67        driver: D,
68        arch: A,
69        max_tokens: usize,
70        is_gpu: bool,
71        mmap: memmap2::Mmap,
72        max_batch: usize,
73    ) -> Self {
74        let backend = Self {
75            driver,
76            arch,
77            max_tokens,
78            is_gpu,
79            max_batch,
80            _mmap: mmap,
81        };
82        // Warm up buffer pool: run a dummy forward to pre-allocate Metal buffers.
83        // Without this, the first real batch pays 160-330 fresh newBufferWithLength
84        // calls. The warm-up fills the pool; subsequent batches with similar
85        // dimensions get exact-match hits (within 8× tolerance).
86        //
87        // Small models (BGE-small, 12L): batch=32 × seq=512, ~80ms.
88        // Large models (ModernBERT, 22L): batch=32 × seq=64, ~300ms.
89        //   (Smaller seq keeps cost down; 8× pool tolerance covers seq up to 512.)
90        if is_gpu && max_tokens <= 1024 {
91            let seq = if max_tokens <= 1024 {
92                512.min(max_tokens)
93            } else {
94                64
95            };
96            let mut dummy = Vec::with_capacity(32);
97            for _ in 0..32 {
98                let ids: Vec<i64> = (0..seq as i64).collect();
99                dummy.push(Encoding {
100                    input_ids: ids,
101                    attention_mask: vec![1; seq],
102                    token_type_ids: vec![0; seq],
103                });
104            }
105            let _ = backend.arch.forward(&backend.driver, &dummy);
106        }
107        backend
108    }
109}
110
111impl<D, A> EmbedBackend for GenericBackend<D, A>
112where
113    D: Driver + Send + Sync + 'static,
114    A: ModelArch<D> + Send + Sync + 'static,
115{
116    fn embed_batch(&self, encodings: &[Encoding]) -> crate::Result<Vec<Vec<f32>>> {
117        let max_batch = self.max_batch;
118        if encodings.len() <= max_batch {
119            return self.arch.forward(&self.driver, encodings);
120        }
121        let mut all = Vec::with_capacity(encodings.len());
122        for chunk in encodings.chunks(max_batch) {
123            let mut results = self.arch.forward(&self.driver, chunk)?;
124            all.append(&mut results);
125        }
126        Ok(all)
127    }
128
129    fn supports_clone(&self) -> bool {
130        false
131    }
132
133    fn clone_backend(&self) -> Box<dyn EmbedBackend> {
134        panic!("GenericBackend does not support cloning")
135    }
136
137    fn is_gpu(&self) -> bool {
138        self.is_gpu
139    }
140
141    fn max_tokens(&self) -> usize {
142        self.max_tokens
143    }
144}