Skip to main content

hermes_core/segment/
vector_data.rs

1//! Vector index data structures shared between builder and reader
2
3use std::io;
4use std::mem::size_of;
5
6use serde::{Deserialize, Serialize};
7
8use crate::directories::{AsyncFileRead, LazyFileSlice, OwnedBytes};
9use crate::dsl::DenseVectorQuantization;
10use crate::segment::format::{DOC_ID_ENTRY_SIZE, FLAT_BINARY_HEADER_SIZE, FLAT_BINARY_MAGIC};
11use crate::structures::simd::{batch_f32_to_f16, batch_f32_to_u8, f16_to_f32, u8_to_f32};
12
13/// Dequantize raw bytes to f32 based on storage quantization.
14///
15/// `raw` is the quantized byte slice, `out` receives the f32 values.
16/// `num_floats` is the number of f32 values to produce (= num_vectors × dim).
17/// Data-first file layout guarantees alignment for f32/f16 access.
18#[inline]
19pub fn dequantize_raw(
20    raw: &[u8],
21    quant: DenseVectorQuantization,
22    num_floats: usize,
23    out: &mut [f32],
24) {
25    debug_assert!(out.len() >= num_floats);
26    match quant {
27        DenseVectorQuantization::F32 => {
28            debug_assert!(
29                (raw.as_ptr() as usize).is_multiple_of(std::mem::align_of::<f32>()),
30                "f32 vector data not 4-byte aligned"
31            );
32            out[..num_floats].copy_from_slice(unsafe {
33                std::slice::from_raw_parts(raw.as_ptr() as *const f32, num_floats)
34            });
35        }
36        DenseVectorQuantization::F16 => {
37            debug_assert!(
38                (raw.as_ptr() as usize).is_multiple_of(std::mem::align_of::<u16>()),
39                "f16 vector data not 2-byte aligned"
40            );
41            let f16_slice =
42                unsafe { std::slice::from_raw_parts(raw.as_ptr() as *const u16, num_floats) };
43            for (i, &h) in f16_slice.iter().enumerate() {
44                out[i] = f16_to_f32(h);
45            }
46        }
47        DenseVectorQuantization::UInt8 => {
48            for (i, &b) in raw.iter().enumerate().take(num_floats) {
49                out[i] = u8_to_f32(b);
50            }
51        }
52    }
53}
54
55/// Flat vector binary format helpers for writing.
56///
57/// Binary format v3:
58/// ```text
59/// [magic(u32)][dim(u32)][num_vectors(u32)][quant_type(u8)][padding(3)]
60/// [vectors: N×dim×element_size]
61/// [doc_ids: N×(u32+u16)]
62/// ```
63///
64/// `element_size` is determined by `quant_type`: f32=4, f16=2, uint8=1.
65/// Reading is handled by [`LazyFlatVectorData`] which loads only doc_ids into memory
66/// and accesses vector data lazily via mmap-backed range reads.
67pub struct FlatVectorData;
68
69impl FlatVectorData {
70    /// Write the binary header to a writer.
71    pub fn write_binary_header(
72        dim: usize,
73        num_vectors: usize,
74        quant: DenseVectorQuantization,
75        writer: &mut dyn std::io::Write,
76    ) -> std::io::Result<()> {
77        writer.write_all(&FLAT_BINARY_MAGIC.to_le_bytes())?;
78        writer.write_all(&(dim as u32).to_le_bytes())?;
79        writer.write_all(&(num_vectors as u32).to_le_bytes())?;
80        writer.write_all(&[quant.tag(), 0, 0, 0])?; // quant_type + 3 bytes padding
81        Ok(())
82    }
83
84    /// Compute the serialized size without actually serializing.
85    pub fn serialized_binary_size(
86        dim: usize,
87        num_vectors: usize,
88        quant: DenseVectorQuantization,
89    ) -> usize {
90        FLAT_BINARY_HEADER_SIZE
91            + num_vectors * dim * quant.element_size()
92            + num_vectors * DOC_ID_ENTRY_SIZE
93    }
94
95    /// Stream from flat f32 storage to a writer, quantizing on write.
96    ///
97    /// `flat_vectors` is contiguous storage of dim*n f32 floats.
98    /// Vectors are quantized to the specified format before writing.
99    pub fn serialize_binary_from_flat_streaming(
100        dim: usize,
101        flat_vectors: &[f32],
102        doc_ids: &[(u32, u16)],
103        quant: DenseVectorQuantization,
104        writer: &mut dyn std::io::Write,
105    ) -> std::io::Result<()> {
106        let num_vectors = doc_ids.len();
107        Self::write_binary_header(dim, num_vectors, quant, writer)?;
108
109        match quant {
110            DenseVectorQuantization::F32 => {
111                let bytes: &[u8] = unsafe {
112                    std::slice::from_raw_parts(
113                        flat_vectors.as_ptr() as *const u8,
114                        std::mem::size_of_val(flat_vectors),
115                    )
116                };
117                writer.write_all(bytes)?;
118            }
119            DenseVectorQuantization::F16 => {
120                let mut buf = vec![0u16; dim];
121                for v in flat_vectors.chunks_exact(dim) {
122                    batch_f32_to_f16(v, &mut buf);
123                    let bytes: &[u8] =
124                        unsafe { std::slice::from_raw_parts(buf.as_ptr() as *const u8, dim * 2) };
125                    writer.write_all(bytes)?;
126                }
127            }
128            DenseVectorQuantization::UInt8 => {
129                let mut buf = vec![0u8; dim];
130                for v in flat_vectors.chunks_exact(dim) {
131                    batch_f32_to_u8(v, &mut buf);
132                    writer.write_all(&buf)?;
133                }
134            }
135        }
136
137        for &(doc_id, ordinal) in doc_ids {
138            writer.write_all(&doc_id.to_le_bytes())?;
139            writer.write_all(&ordinal.to_le_bytes())?;
140        }
141
142        Ok(())
143    }
144
145    /// Write raw pre-quantized vector bytes to a writer (for merger streaming).
146    ///
147    /// `raw_bytes` is already in the target quantized format.
148    pub fn write_raw_vector_bytes(
149        raw_bytes: &[u8],
150        writer: &mut dyn std::io::Write,
151    ) -> std::io::Result<()> {
152        writer.write_all(raw_bytes)
153    }
154}
155
156/// Lazy flat vector data — doc_ids in memory, vectors accessed via range reads.
157///
158/// Only the doc_id index (~6 bytes/vector) is loaded into memory.
159/// Vector data stays on disk and is accessed via mmap-backed range reads.
160/// Element size depends on quantization: f32=4, f16=2, uint8=1 bytes/dim.
161///
162/// Used for:
163/// - Brute-force search (batched scoring with native-precision SIMD)
164/// - Reranking (read individual vectors by doc_id via binary search)
165/// - doc() hydration (dequantize to f32 for stored documents)
166/// - Merge streaming (chunked raw vector bytes + doc_id iteration)
167#[derive(Debug, Clone)]
168pub struct LazyFlatVectorData {
169    /// Vector dimension
170    pub dim: usize,
171    /// Total number of vectors
172    pub num_vectors: usize,
173    /// Storage quantization type
174    pub quantization: DenseVectorQuantization,
175    /// In-memory doc_id index: (doc_id, ordinal) per vector
176    pub doc_ids: Vec<(u32, u16)>,
177    /// Lazy handle to this field's flat data region in the .vectors file
178    handle: LazyFileSlice,
179    /// Byte offset within handle where raw vector data starts (after header)
180    vectors_offset: u64,
181    /// Bytes per vector element (cached from quantization.element_size())
182    element_size: usize,
183}
184
185impl LazyFlatVectorData {
186    /// Open from a lazy file slice pointing to the flat binary data region.
187    ///
188    /// Reads header (16 bytes) + doc_ids (~6 bytes/vector) into memory.
189    /// Vector data stays lazy on disk.
190    pub async fn open(handle: LazyFileSlice) -> io::Result<Self> {
191        // Read header: magic(4) + dim(4) + num_vectors(4) + quant_type(1) + pad(3) = 16 bytes
192        let header = handle
193            .read_bytes_range(0..FLAT_BINARY_HEADER_SIZE as u64)
194            .await?;
195        let hdr = header.as_slice();
196
197        let magic = u32::from_le_bytes([hdr[0], hdr[1], hdr[2], hdr[3]]);
198        if magic != FLAT_BINARY_MAGIC {
199            return Err(io::Error::new(
200                io::ErrorKind::InvalidData,
201                "Invalid FlatVectorData binary magic",
202            ));
203        }
204
205        let dim = u32::from_le_bytes([hdr[4], hdr[5], hdr[6], hdr[7]]) as usize;
206        let num_vectors = u32::from_le_bytes([hdr[8], hdr[9], hdr[10], hdr[11]]) as usize;
207        let quantization = DenseVectorQuantization::from_tag(hdr[12]).ok_or_else(|| {
208            io::Error::new(
209                io::ErrorKind::InvalidData,
210                format!("Unknown quantization tag: {}", hdr[12]),
211            )
212        })?;
213        let element_size = quantization.element_size();
214
215        // Read doc_ids section (small: 6 bytes per vector)
216        let vectors_byte_len = num_vectors * dim * element_size;
217        let doc_ids_start = (FLAT_BINARY_HEADER_SIZE + vectors_byte_len) as u64;
218        let doc_ids_byte_len = (num_vectors * DOC_ID_ENTRY_SIZE) as u64;
219
220        let doc_ids_bytes = handle
221            .read_bytes_range(doc_ids_start..doc_ids_start + doc_ids_byte_len)
222            .await?;
223        let d = doc_ids_bytes.as_slice();
224
225        let mut doc_ids = Vec::with_capacity(num_vectors);
226        for i in 0..num_vectors {
227            let off = i * DOC_ID_ENTRY_SIZE;
228            let doc_id = u32::from_le_bytes([d[off], d[off + 1], d[off + 2], d[off + 3]]);
229            let ordinal = u16::from_le_bytes([d[off + 4], d[off + 5]]);
230            doc_ids.push((doc_id, ordinal));
231        }
232
233        Ok(Self {
234            dim,
235            num_vectors,
236            quantization,
237            doc_ids,
238            handle,
239            vectors_offset: FLAT_BINARY_HEADER_SIZE as u64,
240            element_size,
241        })
242    }
243
244    /// Read a single vector by index, dequantized to f32.
245    ///
246    /// `out` must have length >= `self.dim`. Returns `Ok(())` on success.
247    /// Used for ANN training and doc() hydration where f32 is needed.
248    pub async fn read_vector_into(&self, idx: usize, out: &mut [f32]) -> io::Result<()> {
249        debug_assert!(out.len() >= self.dim);
250        let vec_byte_len = self.dim * self.element_size;
251        let byte_offset = self.vectors_offset + (idx * vec_byte_len) as u64;
252        let bytes = self
253            .handle
254            .read_bytes_range(byte_offset..byte_offset + vec_byte_len as u64)
255            .await?;
256        let raw = bytes.as_slice();
257
258        dequantize_raw(raw, self.quantization, self.dim, out);
259        Ok(())
260    }
261
262    /// Read a single vector by index, dequantized to f32 (allocates a new Vec<f32>).
263    pub async fn get_vector(&self, idx: usize) -> io::Result<Vec<f32>> {
264        let mut vector = vec![0f32; self.dim];
265        self.read_vector_into(idx, &mut vector).await?;
266        Ok(vector)
267    }
268
269    /// Read a single vector's raw bytes (no dequantization) into a caller-provided buffer.
270    ///
271    /// `out` must have length >= `self.vector_byte_size()`.
272    /// Used for native-precision reranking where raw quantized bytes are scored directly.
273    pub async fn read_vector_raw_into(&self, idx: usize, out: &mut [u8]) -> io::Result<()> {
274        let vbs = self.vector_byte_size();
275        debug_assert!(out.len() >= vbs);
276        let byte_offset = self.vectors_offset + (idx * vbs) as u64;
277        let bytes = self
278            .handle
279            .read_bytes_range(byte_offset..byte_offset + vbs as u64)
280            .await?;
281        out[..vbs].copy_from_slice(bytes.as_slice());
282        Ok(())
283    }
284
285    /// Read a contiguous batch of raw quantized bytes by index range.
286    ///
287    /// Returns raw bytes for vectors `[start_idx..start_idx+count)`.
288    /// Bytes are in native quantized format — pass to `batch_cosine_scores_f16/u8`
289    /// or `batch_cosine_scores` (for f32) for scoring.
290    pub async fn read_vectors_batch(
291        &self,
292        start_idx: usize,
293        count: usize,
294    ) -> io::Result<OwnedBytes> {
295        debug_assert!(start_idx + count <= self.num_vectors);
296        let vec_byte_len = self.dim * self.element_size;
297        let byte_offset = self.vectors_offset + (start_idx * vec_byte_len) as u64;
298        let byte_len = (count * vec_byte_len) as u64;
299        self.handle
300            .read_bytes_range(byte_offset..byte_offset + byte_len)
301            .await
302    }
303
304    /// Find flat indexes for a given doc_id via binary search on sorted doc_ids.
305    ///
306    /// doc_ids are sorted by (doc_id, ordinal) — segment builder adds docs
307    /// sequentially. Returns a slice of (doc_id, ordinal) entries; the position
308    /// of each entry in `self.doc_ids` is its flat vector index.
309    ///
310    /// Returns `(start_index, slice)` where start_index is the position in doc_ids.
311    pub fn flat_indexes_for_doc(&self, doc_id: u32) -> (usize, &[(u32, u16)]) {
312        let start = self.doc_ids.partition_point(|&(id, _)| id < doc_id);
313        let end = start + self.doc_ids[start..].partition_point(|&(id, _)| id == doc_id);
314        (start, &self.doc_ids[start..end])
315    }
316
317    /// Get doc_id and ordinal at index (from in-memory index).
318    #[inline]
319    pub fn get_doc_id(&self, idx: usize) -> (u32, u16) {
320        self.doc_ids[idx]
321    }
322
323    /// Bytes per vector in storage.
324    #[inline]
325    pub fn vector_byte_size(&self) -> usize {
326        self.dim * self.element_size
327    }
328
329    /// Total byte length of raw vector data (for chunked merger streaming).
330    pub fn vector_bytes_len(&self) -> u64 {
331        (self.num_vectors as u64) * (self.vector_byte_size() as u64)
332    }
333
334    /// Byte offset where vector data starts (for direct handle access in merger).
335    pub fn vectors_byte_offset(&self) -> u64 {
336        self.vectors_offset
337    }
338
339    /// Access the underlying lazy file handle (for chunked byte-range reads in merger).
340    pub fn handle(&self) -> &LazyFileSlice {
341        &self.handle
342    }
343
344    /// Estimated memory usage (only doc_ids are in memory).
345    pub fn estimated_memory_bytes(&self) -> usize {
346        self.doc_ids.capacity() * size_of::<(u32, u16)>() + size_of::<Self>()
347    }
348}
349
350/// IVF-RaBitQ index data (codebook + cluster assignments)
351///
352/// Centroids are stored at the index level (`field_X_centroids.bin`),
353/// not duplicated per segment.
354#[derive(Debug, Clone, Serialize, Deserialize)]
355pub struct IVFRaBitQIndexData {
356    pub index: crate::structures::IVFRaBitQIndex,
357    pub codebook: crate::structures::RaBitQCodebook,
358}
359
360impl IVFRaBitQIndexData {
361    pub fn to_bytes(&self) -> std::io::Result<Vec<u8>> {
362        bincode::serde::encode_to_vec(self, bincode::config::standard())
363            .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))
364    }
365
366    pub fn from_bytes(data: &[u8]) -> std::io::Result<Self> {
367        bincode::serde::decode_from_slice(data, bincode::config::standard())
368            .map(|(v, _)| v)
369            .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))
370    }
371}
372
373/// ScaNN index data (codebook + cluster assignments)
374///
375/// Centroids are stored at the index level (`field_X_centroids.bin`),
376/// not duplicated per segment.
377#[derive(Debug, Clone, Serialize, Deserialize)]
378pub struct ScaNNIndexData {
379    pub index: crate::structures::IVFPQIndex,
380    pub codebook: crate::structures::PQCodebook,
381}
382
383impl ScaNNIndexData {
384    pub fn to_bytes(&self) -> std::io::Result<Vec<u8>> {
385        bincode::serde::encode_to_vec(self, bincode::config::standard())
386            .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))
387    }
388
389    pub fn from_bytes(data: &[u8]) -> std::io::Result<Self> {
390        bincode::serde::decode_from_slice(data, bincode::config::standard())
391            .map(|(v, _)| v)
392            .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))
393    }
394}