Skip to main content

hermes_core/segment/builder/
mod.rs

1//! Streaming segment builder with optimized memory usage
2//!
3//! Key optimizations:
4//! - **String interning**: Terms are interned using `lasso` to avoid repeated allocations
5//! - **hashbrown HashMap**: O(1) average insertion instead of BTreeMap's O(log n)
6//! - **Streaming document store**: Documents written to disk immediately
7//! - **Zero-copy store build**: Pre-serialized doc bytes passed directly to compressor
8//! - **Parallel posting serialization**: Rayon parallel sort + serialize
9//! - **Inline posting fast path**: Small terms skip PostingList/BlockPostingList entirely
10
11pub(crate) mod bmp;
12mod config;
13mod dense;
14#[cfg(feature = "diagnostics")]
15mod diagnostics;
16mod postings;
17pub(crate) mod simhash;
18mod sparse;
19mod store;
20
21pub use config::{MemoryBreakdown, SegmentBuilderConfig, SegmentBuilderStats};
22
23use std::fs::{File, OpenOptions};
24use std::io::{BufWriter, Write};
25use std::mem::size_of;
26use std::path::PathBuf;
27
28use hashbrown::HashMap;
29use lasso::{Rodeo, Spur};
30use rustc_hash::FxHashMap;
31
32use super::types::{FieldStats, SegmentFiles, SegmentId, SegmentMeta};
33use std::sync::Arc;
34
35use crate::directories::{Directory, DirectoryWriter};
36use crate::dsl::{Document, Field, FieldType, FieldValue, Schema};
37use crate::tokenizer::BoxedTokenizer;
38use crate::{DocId, Result};
39
40use dense::DenseVectorBuilder;
41use postings::{CompactPosting, PositionPostingListBuilder, PostingListBuilder, TermKey};
42use sparse::SparseVectorBuilder;
43
44/// Size of the document store buffer before writing to disk
45const STORE_BUFFER_SIZE: usize = 16 * 1024 * 1024; // 16MB
46
47/// Memory overhead per new term in the inverted index:
48/// HashMap entry control byte + padding + TermKey + PostingListBuilder + Vec header
49const NEW_TERM_OVERHEAD: usize = size_of::<TermKey>() + size_of::<PostingListBuilder>() + 24;
50
51/// Memory overhead per newly interned string: Spur + arena pointers (2 × usize)
52const INTERN_OVERHEAD: usize = size_of::<Spur>() + 2 * size_of::<usize>();
53
54/// Memory overhead per new term in the position index
55const NEW_POS_TERM_OVERHEAD: usize =
56    size_of::<TermKey>() + size_of::<PositionPostingListBuilder>() + 24;
57
58/// Segment builder with optimized memory usage
59///
60/// Features:
61/// - Streams documents to disk immediately (no in-memory document storage)
62/// - Uses string interning for terms (reduced allocations)
63/// - Uses hashbrown HashMap (faster than BTreeMap)
64pub struct SegmentBuilder {
65    schema: Arc<Schema>,
66    config: SegmentBuilderConfig,
67    tokenizers: FxHashMap<Field, BoxedTokenizer>,
68
69    /// String interner for terms - O(1) lookup and deduplication
70    term_interner: Rodeo,
71
72    /// Inverted index: term key -> posting list
73    inverted_index: HashMap<TermKey, PostingListBuilder>,
74
75    /// Streaming document store writer
76    store_file: BufWriter<File>,
77    store_path: PathBuf,
78
79    /// Document count
80    next_doc_id: DocId,
81
82    /// Per-field statistics for BM25F
83    field_stats: FxHashMap<u32, FieldStats>,
84
85    /// Per-document field lengths stored compactly
86    /// Uses a flat Vec instead of Vec<HashMap> for better cache locality
87    /// Layout: [doc0_field0_len, doc0_field1_len, ..., doc1_field0_len, ...]
88    doc_field_lengths: Vec<u32>,
89    num_indexed_fields: usize,
90    field_to_slot: FxHashMap<u32, usize>,
91
92    /// Reusable buffer for per-document term frequency aggregation
93    /// Avoids allocating a new hashmap for each document
94    local_tf_buffer: FxHashMap<Spur, u32>,
95
96    /// Reusable buffer for per-document position tracking (when positions enabled)
97    /// Avoids allocating a new hashmap for each text field per document
98    local_positions: FxHashMap<Spur, Vec<u32>>,
99
100    /// Reusable buffer for tokenization to avoid per-token String allocations
101    token_buffer: String,
102
103    /// Reusable buffer for numeric field term encoding (avoids format!() alloc per call)
104    numeric_buffer: String,
105
106    /// Dense vector storage per field: field -> (doc_ids, vectors)
107    /// Vectors are stored as flat f32 arrays for efficient RaBitQ indexing
108    dense_vectors: FxHashMap<u32, DenseVectorBuilder>,
109
110    /// Sparse vector storage per field: field -> SparseVectorBuilder
111    /// Uses proper BlockSparsePostingList with configurable quantization
112    sparse_vectors: FxHashMap<u32, SparseVectorBuilder>,
113
114    /// Position index for fields with positions enabled
115    /// term key -> position posting list
116    position_index: HashMap<TermKey, PositionPostingListBuilder>,
117
118    /// Fields that have position tracking enabled, with their mode
119    position_enabled_fields: FxHashMap<u32, Option<crate::dsl::PositionMode>>,
120
121    /// Current element ordinal for multi-valued fields (reset per document)
122    current_element_ordinal: FxHashMap<u32, u32>,
123
124    /// Incrementally tracked memory estimate (avoids expensive stats() calls)
125    estimated_memory: usize,
126
127    /// Reusable buffer for document serialization (avoids per-document allocation)
128    doc_serialize_buffer: Vec<u8>,
129
130    /// Fast-field columnar writers per field_id (only for fields with fast=true)
131    fast_fields: FxHashMap<u32, crate::structures::fast_field::FastFieldWriter>,
132
133    /// Per-ordinal SimHash for BMP block clustering: field_id → (doc_id, ordinal) → simhash
134    ordinal_simhashes: FxHashMap<u32, FxHashMap<(DocId, u16), u64>>,
135}
136
137impl SegmentBuilder {
138    /// Create a new segment builder
139    pub fn new(schema: Arc<Schema>, config: SegmentBuilderConfig) -> Result<Self> {
140        let segment_id = uuid::Uuid::new_v4();
141        let store_path = config
142            .temp_dir
143            .join(format!("hermes_store_{}.tmp", segment_id));
144
145        let store_file = BufWriter::with_capacity(
146            STORE_BUFFER_SIZE,
147            OpenOptions::new()
148                .create(true)
149                .write(true)
150                .truncate(true)
151                .open(&store_path)?,
152        );
153
154        // Count indexed fields, track positions, and auto-configure tokenizers
155        let registry = crate::tokenizer::TokenizerRegistry::new();
156        let mut num_indexed_fields = 0;
157        let mut field_to_slot = FxHashMap::default();
158        let mut position_enabled_fields = FxHashMap::default();
159        let mut tokenizers = FxHashMap::default();
160        for (field, entry) in schema.fields() {
161            if entry.indexed && matches!(entry.field_type, FieldType::Text) {
162                field_to_slot.insert(field.0, num_indexed_fields);
163                num_indexed_fields += 1;
164                if entry.positions.is_some() {
165                    position_enabled_fields.insert(field.0, entry.positions);
166                }
167                if let Some(ref tok_name) = entry.tokenizer
168                    && let Some(tokenizer) = registry.get(tok_name)
169                {
170                    tokenizers.insert(field, tokenizer);
171                }
172            }
173        }
174
175        // Initialize fast-field writers for fields with fast=true
176        use crate::structures::fast_field::{FastFieldColumnType, FastFieldWriter};
177        let mut fast_fields = FxHashMap::default();
178        for (field, entry) in schema.fields() {
179            if entry.fast {
180                let writer = if entry.multi {
181                    match entry.field_type {
182                        FieldType::U64 => {
183                            FastFieldWriter::new_numeric_multi(FastFieldColumnType::U64)
184                        }
185                        FieldType::I64 => {
186                            FastFieldWriter::new_numeric_multi(FastFieldColumnType::I64)
187                        }
188                        FieldType::F64 => {
189                            FastFieldWriter::new_numeric_multi(FastFieldColumnType::F64)
190                        }
191                        FieldType::Text => FastFieldWriter::new_text_multi(),
192                        _ => continue,
193                    }
194                } else {
195                    match entry.field_type {
196                        FieldType::U64 => FastFieldWriter::new_numeric(FastFieldColumnType::U64),
197                        FieldType::I64 => FastFieldWriter::new_numeric(FastFieldColumnType::I64),
198                        FieldType::F64 => FastFieldWriter::new_numeric(FastFieldColumnType::F64),
199                        FieldType::Text => FastFieldWriter::new_text(),
200                        _ => continue,
201                    }
202                };
203                fast_fields.insert(field.0, writer);
204            }
205        }
206
207        Ok(Self {
208            schema,
209            tokenizers,
210            term_interner: Rodeo::new(),
211            inverted_index: HashMap::with_capacity(config.posting_map_capacity),
212            store_file,
213            store_path,
214            next_doc_id: 0,
215            field_stats: FxHashMap::default(),
216            doc_field_lengths: Vec::new(),
217            num_indexed_fields,
218            field_to_slot,
219            local_tf_buffer: FxHashMap::default(),
220            local_positions: FxHashMap::default(),
221            token_buffer: String::with_capacity(64),
222            numeric_buffer: String::with_capacity(32),
223            config,
224            dense_vectors: FxHashMap::default(),
225            sparse_vectors: FxHashMap::default(),
226            position_index: HashMap::new(),
227            position_enabled_fields,
228            current_element_ordinal: FxHashMap::default(),
229            estimated_memory: 0,
230            doc_serialize_buffer: Vec::with_capacity(256),
231            fast_fields,
232            ordinal_simhashes: FxHashMap::default(),
233        })
234    }
235
236    pub fn set_tokenizer(&mut self, field: Field, tokenizer: BoxedTokenizer) {
237        self.tokenizers.insert(field, tokenizer);
238    }
239
240    /// Get the current element ordinal for a field and increment it.
241    /// Used for multi-valued fields (text, dense_vector, sparse_vector).
242    fn next_element_ordinal(&mut self, field_id: u32) -> u32 {
243        let ordinal = *self.current_element_ordinal.get(&field_id).unwrap_or(&0);
244        *self.current_element_ordinal.entry(field_id).or_insert(0) += 1;
245        ordinal
246    }
247
248    pub fn num_docs(&self) -> u32 {
249        self.next_doc_id
250    }
251
252    /// Fast O(1) memory estimate - updated incrementally during indexing
253    #[inline]
254    pub fn estimated_memory_bytes(&self) -> usize {
255        self.estimated_memory
256    }
257
258    /// Count total unique sparse dimensions across all fields
259    pub fn sparse_dim_count(&self) -> usize {
260        self.sparse_vectors.values().map(|b| b.postings.len()).sum()
261    }
262
263    /// Get current statistics for debugging performance (expensive - iterates all data)
264    pub fn stats(&self) -> SegmentBuilderStats {
265        use std::mem::size_of;
266
267        let postings_in_memory: usize =
268            self.inverted_index.values().map(|p| p.postings.len()).sum();
269
270        // Size constants computed from actual types
271        let compact_posting_size = size_of::<CompactPosting>();
272        let vec_overhead = size_of::<Vec<u8>>(); // Vec header: ptr + len + cap = 24 bytes on 64-bit
273        let term_key_size = size_of::<TermKey>();
274        let posting_builder_size = size_of::<PostingListBuilder>();
275        let spur_size = size_of::<lasso::Spur>();
276        let sparse_entry_size = size_of::<(DocId, u16, f32)>();
277
278        // hashbrown HashMap entry overhead: key + value + 1 byte control + padding
279        // Measured: ~(key_size + value_size + 8) per entry on average
280        let hashmap_entry_base_overhead = 8usize;
281
282        // FxHashMap uses same layout as hashbrown
283        let fxhashmap_entry_overhead = hashmap_entry_base_overhead;
284
285        // Postings memory
286        let postings_bytes: usize = self
287            .inverted_index
288            .values()
289            .map(|p| p.postings.capacity() * compact_posting_size + vec_overhead)
290            .sum();
291
292        // Inverted index overhead
293        let index_overhead_bytes = self.inverted_index.len()
294            * (term_key_size + posting_builder_size + hashmap_entry_base_overhead);
295
296        // Term interner: Rodeo stores strings + metadata
297        // Rodeo internal: string bytes + Spur + arena overhead (~2 pointers per string)
298        let interner_arena_overhead = 2 * size_of::<usize>();
299        let avg_term_len = 8; // Estimated average term length
300        let interner_bytes =
301            self.term_interner.len() * (avg_term_len + spur_size + interner_arena_overhead);
302
303        // Doc field lengths
304        let field_lengths_bytes =
305            self.doc_field_lengths.capacity() * size_of::<u32>() + vec_overhead;
306
307        // Dense vectors
308        let mut dense_vectors_bytes: usize = 0;
309        let mut dense_vector_count: usize = 0;
310        let doc_id_ordinal_size = size_of::<(DocId, u16)>();
311        for b in self.dense_vectors.values() {
312            dense_vectors_bytes += b.vectors.capacity() * size_of::<f32>()
313                + b.doc_ids.capacity() * doc_id_ordinal_size
314                + 2 * vec_overhead; // Two Vecs
315            dense_vector_count += b.doc_ids.len();
316        }
317
318        // Local buffers
319        let local_tf_entry_size = spur_size + size_of::<u32>() + fxhashmap_entry_overhead;
320        let local_tf_buffer_bytes = self.local_tf_buffer.capacity() * local_tf_entry_size;
321
322        // Sparse vectors
323        let mut sparse_vectors_bytes: usize = 0;
324        for builder in self.sparse_vectors.values() {
325            for postings in builder.postings.values() {
326                sparse_vectors_bytes += postings.capacity() * sparse_entry_size + vec_overhead;
327            }
328            // Inner FxHashMap overhead: u32 key + Vec value ptr + overhead
329            let inner_entry_size = size_of::<u32>() + vec_overhead + fxhashmap_entry_overhead;
330            sparse_vectors_bytes += builder.postings.len() * inner_entry_size;
331        }
332        // Outer FxHashMap overhead
333        let outer_sparse_entry_size =
334            size_of::<u32>() + size_of::<SparseVectorBuilder>() + fxhashmap_entry_overhead;
335        sparse_vectors_bytes += self.sparse_vectors.len() * outer_sparse_entry_size;
336
337        // Position index
338        let mut position_index_bytes: usize = 0;
339        for pos_builder in self.position_index.values() {
340            for (_, positions) in &pos_builder.postings {
341                position_index_bytes += positions.capacity() * size_of::<u32>() + vec_overhead;
342            }
343            // Vec<(DocId, Vec<u32>)> entry size
344            let pos_entry_size = size_of::<DocId>() + vec_overhead;
345            position_index_bytes += pos_builder.postings.capacity() * pos_entry_size;
346        }
347        // HashMap overhead for position_index
348        let pos_index_entry_size =
349            term_key_size + size_of::<PositionPostingListBuilder>() + hashmap_entry_base_overhead;
350        position_index_bytes += self.position_index.len() * pos_index_entry_size;
351
352        let estimated_memory_bytes = postings_bytes
353            + index_overhead_bytes
354            + interner_bytes
355            + field_lengths_bytes
356            + dense_vectors_bytes
357            + local_tf_buffer_bytes
358            + sparse_vectors_bytes
359            + position_index_bytes;
360
361        let memory_breakdown = MemoryBreakdown {
362            postings_bytes,
363            index_overhead_bytes,
364            interner_bytes,
365            field_lengths_bytes,
366            dense_vectors_bytes,
367            dense_vector_count,
368            sparse_vectors_bytes,
369            position_index_bytes,
370        };
371
372        SegmentBuilderStats {
373            num_docs: self.next_doc_id,
374            unique_terms: self.inverted_index.len(),
375            postings_in_memory,
376            interned_strings: self.term_interner.len(),
377            doc_field_lengths_size: self.doc_field_lengths.len(),
378            estimated_memory_bytes,
379            memory_breakdown,
380        }
381    }
382
383    /// Add a document - streams to disk immediately
384    pub fn add_document(&mut self, doc: Document) -> Result<DocId> {
385        let doc_id = self.next_doc_id;
386        self.next_doc_id += 1;
387
388        // Initialize field lengths for this document
389        let base_idx = self.doc_field_lengths.len();
390        self.doc_field_lengths
391            .resize(base_idx + self.num_indexed_fields, 0);
392        self.estimated_memory += self.num_indexed_fields * std::mem::size_of::<u32>();
393
394        // Reset element ordinals for this document (for multi-valued fields)
395        self.current_element_ordinal.clear();
396
397        for (field, value) in doc.field_values() {
398            let Some(entry) = self.schema.get_field_entry(*field) else {
399                continue;
400            };
401
402            // Dense vectors are written to .vectors when indexed || stored
403            // Other field types require indexed or fast
404            if !matches!(&entry.field_type, FieldType::DenseVector) && !entry.indexed && !entry.fast
405            {
406                continue;
407            }
408
409            match (&entry.field_type, value) {
410                (FieldType::Text, FieldValue::Text(text)) => {
411                    if entry.indexed {
412                        let element_ordinal = self.next_element_ordinal(field.0);
413                        let token_count =
414                            self.index_text_field(*field, doc_id, text, element_ordinal)?;
415
416                        let stats = self.field_stats.entry(field.0).or_default();
417                        stats.total_tokens += token_count as u64;
418                        if element_ordinal == 0 {
419                            stats.doc_count += 1;
420                        }
421
422                        if let Some(&slot) = self.field_to_slot.get(&field.0) {
423                            self.doc_field_lengths[base_idx + slot] = token_count;
424                        }
425                    }
426
427                    // Fast-field: store raw text for text ordinal column
428                    if let Some(ff) = self.fast_fields.get_mut(&field.0) {
429                        ff.add_text(doc_id, text);
430                    }
431                }
432                (FieldType::U64, FieldValue::U64(v)) => {
433                    if entry.indexed {
434                        self.index_numeric_field(*field, doc_id, *v)?;
435                    }
436                    if let Some(ff) = self.fast_fields.get_mut(&field.0) {
437                        ff.add_u64(doc_id, *v);
438                    }
439                }
440                (FieldType::I64, FieldValue::I64(v)) => {
441                    if entry.indexed {
442                        self.index_numeric_field(*field, doc_id, *v as u64)?;
443                    }
444                    if let Some(ff) = self.fast_fields.get_mut(&field.0) {
445                        ff.add_i64(doc_id, *v);
446                    }
447                }
448                (FieldType::F64, FieldValue::F64(v)) => {
449                    if entry.indexed {
450                        self.index_numeric_field(*field, doc_id, v.to_bits())?;
451                    }
452                    if let Some(ff) = self.fast_fields.get_mut(&field.0) {
453                        ff.add_f64(doc_id, *v);
454                    }
455                }
456                (FieldType::DenseVector, FieldValue::DenseVector(vec))
457                    if entry.indexed || entry.stored =>
458                {
459                    let ordinal = self.next_element_ordinal(field.0);
460                    self.index_dense_vector_field(*field, doc_id, ordinal as u16, vec)?;
461                }
462                (FieldType::SparseVector, FieldValue::SparseVector(entries)) => {
463                    let has_simhash = entry.simhash;
464                    // Extract config before mutable borrows below
465                    let sparse_cfg = entry.sparse_vector_config.as_ref();
466                    let wt = sparse_cfg.map(|c| c.weight_threshold).unwrap_or(0.0);
467                    let mw = sparse_cfg.and_then(|c| c.max_weight).unwrap_or(5.0);
468                    let ordinal = self.next_element_ordinal(field.0);
469                    self.index_sparse_vector_field(*field, doc_id, ordinal as u16, entries)?;
470                    // V12: compute simhash for every ordinal (not just ordinal 0)
471                    // Uses quantized u8 impacts (matching reorder_bmp_blob) so that
472                    // build-time and reorder-time SimHash are identical.
473                    if has_simhash {
474                        let h = simhash::simhash_from_sparse_vector(entries, wt, mw);
475                        let is_new_field = !self.ordinal_simhashes.contains_key(&field.0);
476                        self.ordinal_simhashes
477                            .entry(field.0)
478                            .or_default()
479                            .insert((doc_id, ordinal as u16), h);
480                        // Memory: (doc_id, ordinal) key (6B) + u64 hash (8B) + hashmap overhead (~8B)
481                        self.estimated_memory += size_of::<(DocId, u16)>() + size_of::<u64>() + 8;
482                        if is_new_field {
483                            self.estimated_memory +=
484                                size_of::<u32>() + size_of::<FxHashMap<(DocId, u16), u64>>() + 8;
485                        }
486                    }
487                }
488                _ => {}
489            }
490        }
491
492        // Stream document to disk immediately
493        self.write_document_to_store(&doc)?;
494
495        Ok(doc_id)
496    }
497
498    /// Index a text field using interned terms
499    ///
500    /// Uses a custom tokenizer when set for the field (via `set_tokenizer`),
501    /// otherwise falls back to an inline zero-allocation path (split_whitespace
502    /// + lowercase + strip non-alphanumeric).
503    ///
504    /// If position recording is enabled for this field, also records token positions
505    /// encoded as (element_ordinal << 20) | token_position.
506    fn index_text_field(
507        &mut self,
508        field: Field,
509        doc_id: DocId,
510        text: &str,
511        element_ordinal: u32,
512    ) -> Result<u32> {
513        use crate::dsl::PositionMode;
514
515        let field_id = field.0;
516        let position_mode = self
517            .position_enabled_fields
518            .get(&field_id)
519            .copied()
520            .flatten();
521
522        // Phase 1: Aggregate term frequencies within this document
523        // Also collect positions if enabled
524        // Reuse buffers to avoid allocations
525        self.local_tf_buffer.clear();
526        // Clear position Vecs in-place (keeps allocated capacity for reuse)
527        for v in self.local_positions.values_mut() {
528            v.clear();
529        }
530
531        let mut token_position = 0u32;
532
533        // Tokenize: use custom tokenizer if set, else inline zero-alloc path.
534        // The owned Vec<Token> is computed first so the immutable borrow of
535        // self.tokenizers ends before we mutate other fields.
536        let custom_tokens = self.tokenizers.get(&field).map(|t| t.tokenize(text));
537
538        if let Some(tokens) = custom_tokens {
539            // Custom tokenizer path
540            for token in &tokens {
541                let term_spur = if let Some(spur) = self.term_interner.get(&token.text) {
542                    spur
543                } else {
544                    let spur = self.term_interner.get_or_intern(&token.text);
545                    self.estimated_memory += token.text.len() + INTERN_OVERHEAD;
546                    spur
547                };
548                *self.local_tf_buffer.entry(term_spur).or_insert(0) += 1;
549
550                if let Some(mode) = position_mode {
551                    let encoded_pos = match mode {
552                        PositionMode::Ordinal => element_ordinal << 20,
553                        PositionMode::TokenPosition => token.position,
554                        PositionMode::Full => (element_ordinal << 20) | token.position,
555                    };
556                    self.local_positions
557                        .entry(term_spur)
558                        .or_default()
559                        .push(encoded_pos);
560                }
561            }
562            token_position = tokens.len() as u32;
563        } else {
564            // Inline zero-allocation path: split_whitespace + lowercase + strip non-alphanumeric
565            for word in text.split_whitespace() {
566                self.token_buffer.clear();
567                for c in word.chars() {
568                    if c.is_alphanumeric() {
569                        for lc in c.to_lowercase() {
570                            self.token_buffer.push(lc);
571                        }
572                    }
573                }
574
575                if self.token_buffer.is_empty() {
576                    continue;
577                }
578
579                let term_spur = if let Some(spur) = self.term_interner.get(&self.token_buffer) {
580                    spur
581                } else {
582                    let spur = self.term_interner.get_or_intern(&self.token_buffer);
583                    self.estimated_memory += self.token_buffer.len() + INTERN_OVERHEAD;
584                    spur
585                };
586                *self.local_tf_buffer.entry(term_spur).or_insert(0) += 1;
587
588                if let Some(mode) = position_mode {
589                    let encoded_pos = match mode {
590                        PositionMode::Ordinal => element_ordinal << 20,
591                        PositionMode::TokenPosition => token_position,
592                        PositionMode::Full => (element_ordinal << 20) | token_position,
593                    };
594                    self.local_positions
595                        .entry(term_spur)
596                        .or_default()
597                        .push(encoded_pos);
598                }
599
600                token_position += 1;
601            }
602        }
603
604        // Phase 2: Insert aggregated terms into inverted index
605        // Now we only do one inverted_index lookup per unique term in doc
606        for (&term_spur, &tf) in &self.local_tf_buffer {
607            let term_key = TermKey {
608                field: field_id,
609                term: term_spur,
610            };
611
612            match self.inverted_index.entry(term_key) {
613                hashbrown::hash_map::Entry::Occupied(mut o) => {
614                    o.get_mut().add(doc_id, tf);
615                    self.estimated_memory += size_of::<CompactPosting>();
616                }
617                hashbrown::hash_map::Entry::Vacant(v) => {
618                    let mut posting = PostingListBuilder::new();
619                    posting.add(doc_id, tf);
620                    v.insert(posting);
621                    self.estimated_memory += size_of::<CompactPosting>() + NEW_TERM_OVERHEAD;
622                }
623            }
624
625            if position_mode.is_some()
626                && let Some(positions) = self.local_positions.get(&term_spur)
627            {
628                match self.position_index.entry(term_key) {
629                    hashbrown::hash_map::Entry::Occupied(mut o) => {
630                        for &pos in positions {
631                            o.get_mut().add_position(doc_id, pos);
632                        }
633                        self.estimated_memory += positions.len() * size_of::<u32>();
634                    }
635                    hashbrown::hash_map::Entry::Vacant(v) => {
636                        let mut pos_posting = PositionPostingListBuilder::new();
637                        for &pos in positions {
638                            pos_posting.add_position(doc_id, pos);
639                        }
640                        self.estimated_memory +=
641                            positions.len() * size_of::<u32>() + NEW_POS_TERM_OVERHEAD;
642                        v.insert(pos_posting);
643                    }
644                }
645            }
646        }
647
648        Ok(token_position)
649    }
650
651    fn index_numeric_field(&mut self, field: Field, doc_id: DocId, value: u64) -> Result<()> {
652        use std::fmt::Write;
653
654        self.numeric_buffer.clear();
655        write!(self.numeric_buffer, "__num_{}", value).unwrap();
656        let term_spur = if let Some(spur) = self.term_interner.get(&self.numeric_buffer) {
657            spur
658        } else {
659            let spur = self.term_interner.get_or_intern(&self.numeric_buffer);
660            self.estimated_memory += self.numeric_buffer.len() + INTERN_OVERHEAD;
661            spur
662        };
663
664        let term_key = TermKey {
665            field: field.0,
666            term: term_spur,
667        };
668
669        match self.inverted_index.entry(term_key) {
670            hashbrown::hash_map::Entry::Occupied(mut o) => {
671                o.get_mut().add(doc_id, 1);
672                self.estimated_memory += size_of::<CompactPosting>();
673            }
674            hashbrown::hash_map::Entry::Vacant(v) => {
675                let mut posting = PostingListBuilder::new();
676                posting.add(doc_id, 1);
677                v.insert(posting);
678                self.estimated_memory += size_of::<CompactPosting>() + NEW_TERM_OVERHEAD;
679            }
680        }
681
682        Ok(())
683    }
684
685    /// Index a dense vector field with ordinal tracking
686    fn index_dense_vector_field(
687        &mut self,
688        field: Field,
689        doc_id: DocId,
690        ordinal: u16,
691        vector: &[f32],
692    ) -> Result<()> {
693        let dim = vector.len();
694
695        let builder = self
696            .dense_vectors
697            .entry(field.0)
698            .or_insert_with(|| DenseVectorBuilder::new(dim));
699
700        // Verify dimension consistency
701        if builder.dim != dim && builder.len() > 0 {
702            return Err(crate::Error::Schema(format!(
703                "Dense vector dimension mismatch: expected {}, got {}",
704                builder.dim, dim
705            )));
706        }
707
708        builder.add(doc_id, ordinal, vector);
709
710        self.estimated_memory += std::mem::size_of_val(vector) + size_of::<(DocId, u16)>();
711
712        Ok(())
713    }
714
715    /// Index a sparse vector field using dedicated sparse posting lists
716    ///
717    /// Collects (doc_id, ordinal, weight) postings per dimension. During commit, these are
718    /// converted to BlockSparsePostingList with proper quantization from SparseVectorConfig.
719    ///
720    /// Weights below the configured `weight_threshold` are not indexed.
721    fn index_sparse_vector_field(
722        &mut self,
723        field: Field,
724        doc_id: DocId,
725        ordinal: u16,
726        entries: &[(u32, f32)],
727    ) -> Result<()> {
728        // Get weight threshold from field config (default 0.0 = no filtering)
729        let weight_threshold = self
730            .schema
731            .get_field_entry(field)
732            .and_then(|entry| entry.sparse_vector_config.as_ref())
733            .map(|config| config.weight_threshold)
734            .unwrap_or(0.0);
735
736        let builder = self
737            .sparse_vectors
738            .entry(field.0)
739            .or_insert_with(SparseVectorBuilder::new);
740
741        builder.inc_vector_count();
742
743        for &(dim_id, weight) in entries {
744            // Skip weights below threshold
745            if weight.abs() < weight_threshold {
746                continue;
747            }
748
749            let is_new_dim = !builder.postings.contains_key(&dim_id);
750            builder.add(dim_id, doc_id, ordinal, weight);
751            self.estimated_memory += size_of::<(DocId, u16, f32)>();
752            if is_new_dim {
753                // HashMap entry overhead + Vec header
754                self.estimated_memory += size_of::<u32>() + size_of::<Vec<(DocId, u16, f32)>>() + 8; // 8 = hashmap control byte + padding
755            }
756        }
757
758        Ok(())
759    }
760
761    /// Write document to streaming store (reuses internal buffer to avoid per-doc allocation)
762    fn write_document_to_store(&mut self, doc: &Document) -> Result<()> {
763        use byteorder::{LittleEndian, WriteBytesExt};
764
765        super::store::serialize_document_into(doc, &self.schema, &mut self.doc_serialize_buffer)?;
766
767        self.store_file
768            .write_u32::<LittleEndian>(self.doc_serialize_buffer.len() as u32)?;
769        self.store_file.write_all(&self.doc_serialize_buffer)?;
770
771        Ok(())
772    }
773
774    /// Build the final segment
775    ///
776    /// Streams all data directly to disk via StreamingWriter to avoid buffering
777    /// entire serialized outputs in memory. Each phase consumes and drops its
778    /// source data before the next phase begins.
779    pub async fn build<D: Directory + DirectoryWriter>(
780        mut self,
781        dir: &D,
782        segment_id: SegmentId,
783        trained: Option<&super::TrainedVectorStructures>,
784    ) -> Result<SegmentMeta> {
785        // Flush any buffered data
786        self.store_file.flush()?;
787
788        let files = SegmentFiles::new(segment_id.0);
789
790        // Phase 1: Stream positions directly to disk (consumes position_index)
791        let position_index = std::mem::take(&mut self.position_index);
792        let position_offsets = if !position_index.is_empty() {
793            let mut pos_writer = dir.streaming_writer(&files.positions).await?;
794            let offsets = postings::build_positions_streaming(
795                position_index,
796                &self.term_interner,
797                &mut *pos_writer,
798            )?;
799            pos_writer.finish()?;
800            offsets
801        } else {
802            FxHashMap::default()
803        };
804
805        // Phase 2: 4-way parallel build — postings, store, dense vectors, sparse vectors
806        // These are fully independent: different source data, different output files.
807        let inverted_index = std::mem::take(&mut self.inverted_index);
808        let term_interner = std::mem::replace(&mut self.term_interner, Rodeo::new());
809        let store_path = self.store_path.clone();
810        let num_compression_threads = self.config.num_compression_threads;
811        let compression_level = self.config.compression_level;
812        let dense_vectors = std::mem::take(&mut self.dense_vectors);
813        let mut sparse_vectors = std::mem::take(&mut self.sparse_vectors);
814        let ordinal_simhashes = std::mem::take(&mut self.ordinal_simhashes);
815        let schema = &self.schema;
816
817        // Pre-create all streaming writers (async) before entering sync rayon scope
818        // Wrapped in OffsetWriter to track bytes written per phase.
819        let mut term_dict_writer =
820            super::OffsetWriter::new(dir.streaming_writer(&files.term_dict).await?);
821        let mut postings_writer =
822            super::OffsetWriter::new(dir.streaming_writer(&files.postings).await?);
823        let mut store_writer = super::OffsetWriter::new(dir.streaming_writer(&files.store).await?);
824        let mut vectors_writer = if !dense_vectors.is_empty() {
825            Some(super::OffsetWriter::new(
826                dir.streaming_writer(&files.vectors).await?,
827            ))
828        } else {
829            None
830        };
831        let mut sparse_writer = if !sparse_vectors.is_empty() {
832            Some(super::OffsetWriter::new(
833                dir.streaming_writer(&files.sparse).await?,
834            ))
835        } else {
836            None
837        };
838        let mut fast_fields = std::mem::take(&mut self.fast_fields);
839        let num_docs = self.next_doc_id;
840        let mut fast_writer = if !fast_fields.is_empty() {
841            Some(super::OffsetWriter::new(
842                dir.streaming_writer(&files.fast).await?,
843            ))
844        } else {
845            None
846        };
847
848        let ((postings_result, store_result), ((vectors_result, sparse_result), fast_result)) =
849            rayon::join(
850                || {
851                    rayon::join(
852                        || {
853                            postings::build_postings_streaming(
854                                inverted_index,
855                                term_interner,
856                                &position_offsets,
857                                &mut term_dict_writer,
858                                &mut postings_writer,
859                            )
860                        },
861                        || {
862                            store::build_store_streaming(
863                                &store_path,
864                                num_compression_threads,
865                                compression_level,
866                                &mut store_writer,
867                                num_docs,
868                            )
869                        },
870                    )
871                },
872                || {
873                    rayon::join(
874                        || {
875                            rayon::join(
876                                || -> Result<()> {
877                                    if let Some(ref mut w) = vectors_writer {
878                                        dense::build_vectors_streaming(
879                                            dense_vectors,
880                                            schema,
881                                            trained,
882                                            w,
883                                        )?;
884                                    }
885                                    Ok(())
886                                },
887                                || -> Result<()> {
888                                    if let Some(ref mut w) = sparse_writer {
889                                        sparse::build_sparse_streaming(
890                                            &mut sparse_vectors,
891                                            schema,
892                                            &ordinal_simhashes,
893                                            w,
894                                        )?;
895                                    }
896                                    Ok(())
897                                },
898                            )
899                        },
900                        || -> Result<()> {
901                            if let Some(ref mut w) = fast_writer {
902                                build_fast_fields_streaming(&mut fast_fields, num_docs, w)?;
903                            }
904                            Ok(())
905                        },
906                    )
907                },
908            );
909        postings_result?;
910        store_result?;
911        vectors_result?;
912        sparse_result?;
913        fast_result?;
914
915        let term_dict_bytes = term_dict_writer.offset() as usize;
916        let postings_bytes = postings_writer.offset() as usize;
917        let store_bytes = store_writer.offset() as usize;
918        let vectors_bytes = vectors_writer.as_ref().map_or(0, |w| w.offset() as usize);
919        let sparse_bytes = sparse_writer.as_ref().map_or(0, |w| w.offset() as usize);
920        let fast_bytes = fast_writer.as_ref().map_or(0, |w| w.offset() as usize);
921
922        term_dict_writer.finish()?;
923        postings_writer.finish()?;
924        store_writer.finish()?;
925        if let Some(w) = vectors_writer {
926            w.finish()?;
927        }
928        if let Some(w) = sparse_writer {
929            w.finish()?;
930        }
931        if let Some(w) = fast_writer {
932            w.finish()?;
933        }
934        drop(position_offsets);
935        drop(sparse_vectors);
936
937        log::info!(
938            "[segment_build] {} docs: term_dict={}, postings={}, store={}, vectors={}, sparse={}, fast={}",
939            num_docs,
940            super::format_bytes(term_dict_bytes),
941            super::format_bytes(postings_bytes),
942            super::format_bytes(store_bytes),
943            super::format_bytes(vectors_bytes),
944            super::format_bytes(sparse_bytes),
945            super::format_bytes(fast_bytes),
946        );
947
948        let meta = SegmentMeta {
949            id: segment_id.0,
950            num_docs: self.next_doc_id,
951            field_stats: self.field_stats.clone(),
952        };
953
954        dir.write(&files.meta, &meta.serialize()?).await?;
955
956        // Cleanup temp files
957        let _ = std::fs::remove_file(&self.store_path);
958
959        Ok(meta)
960    }
961}
962
963/// Serialize all fast-field columns to a `.fast` file.
964fn build_fast_fields_streaming(
965    fast_fields: &mut FxHashMap<u32, crate::structures::fast_field::FastFieldWriter>,
966    num_docs: u32,
967    writer: &mut dyn Write,
968) -> Result<()> {
969    use crate::structures::fast_field::{FastFieldTocEntry, write_fast_field_toc_and_footer};
970
971    if fast_fields.is_empty() {
972        return Ok(());
973    }
974
975    // Sort fields by id for deterministic output
976    let mut field_ids: Vec<u32> = fast_fields.keys().copied().collect();
977    field_ids.sort_unstable();
978
979    let mut toc_entries: Vec<FastFieldTocEntry> = Vec::with_capacity(field_ids.len());
980    let mut current_offset = 0u64;
981
982    for &field_id in &field_ids {
983        let ff = fast_fields.get_mut(&field_id).unwrap();
984        ff.pad_to(num_docs);
985
986        let (mut toc, bytes_written) = ff.serialize(writer, current_offset)?;
987        toc.field_id = field_id;
988        current_offset += bytes_written;
989        toc_entries.push(toc);
990    }
991
992    // Write TOC + footer
993    let toc_offset = current_offset;
994    write_fast_field_toc_and_footer(writer, toc_offset, &toc_entries)?;
995
996    Ok(())
997}
998
999impl Drop for SegmentBuilder {
1000    fn drop(&mut self) {
1001        // Cleanup temp files on drop
1002        let _ = std::fs::remove_file(&self.store_path);
1003    }
1004}