1mod dense_vectors;
4mod sparse_vectors;
5
6use std::cmp::Ordering;
7use std::collections::BinaryHeap;
8use std::io::Write;
9use std::sync::Arc;
10
11use rustc_hash::FxHashMap;
12
13use super::reader::SegmentReader;
14use super::store::StoreMerger;
15use super::types::{FieldStats, SegmentFiles, SegmentId, SegmentMeta};
16use crate::Result;
17use crate::directories::{Directory, DirectoryWriter, StreamingWriter};
18use crate::dsl::Schema;
19use crate::structures::{
20 BlockPostingList, PositionPostingList, PostingList, SSTableWriter, TERMINATED, TermInfo,
21};
22
23pub(crate) struct OffsetWriter {
28 inner: Box<dyn StreamingWriter>,
29 offset: u64,
30}
31
32impl OffsetWriter {
33 fn new(inner: Box<dyn StreamingWriter>) -> Self {
34 Self { inner, offset: 0 }
35 }
36
37 fn offset(&self) -> u64 {
39 self.offset
40 }
41
42 fn finish(self) -> std::io::Result<()> {
44 self.inner.finish()
45 }
46}
47
48impl Write for OffsetWriter {
49 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
50 let n = self.inner.write(buf)?;
51 self.offset += n as u64;
52 Ok(n)
53 }
54
55 fn flush(&mut self) -> std::io::Result<()> {
56 self.inner.flush()
57 }
58}
59
60fn format_bytes(bytes: usize) -> String {
62 if bytes >= 1024 * 1024 * 1024 {
63 format!("{:.2} GB", bytes as f64 / (1024.0 * 1024.0 * 1024.0))
64 } else if bytes >= 1024 * 1024 {
65 format!("{:.2} MB", bytes as f64 / (1024.0 * 1024.0))
66 } else if bytes >= 1024 {
67 format!("{:.2} KB", bytes as f64 / 1024.0)
68 } else {
69 format!("{} B", bytes)
70 }
71}
72
73fn doc_offsets(segments: &[SegmentReader]) -> Vec<u32> {
75 let mut offsets = Vec::with_capacity(segments.len());
76 let mut acc = 0u32;
77 for seg in segments {
78 offsets.push(acc);
79 acc += seg.num_docs();
80 }
81 offsets
82}
83
84#[derive(Debug, Clone, Default)]
86pub struct MergeStats {
87 pub terms_processed: usize,
89 pub peak_memory_bytes: usize,
91 pub term_dict_bytes: usize,
93 pub postings_bytes: usize,
95 pub store_bytes: usize,
97 pub vectors_bytes: usize,
99 pub sparse_bytes: usize,
101}
102
103struct MergeEntry {
105 key: Vec<u8>,
106 term_info: TermInfo,
107 segment_idx: usize,
108 doc_offset: u32,
109}
110
111impl PartialEq for MergeEntry {
112 fn eq(&self, other: &Self) -> bool {
113 self.key == other.key
114 }
115}
116
117impl Eq for MergeEntry {}
118
119impl PartialOrd for MergeEntry {
120 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
121 Some(self.cmp(other))
122 }
123}
124
125impl Ord for MergeEntry {
126 fn cmp(&self, other: &Self) -> Ordering {
127 other.key.cmp(&self.key)
129 }
130}
131
132pub use super::types::TrainedVectorStructures;
134
135pub struct SegmentMerger {
137 schema: Arc<Schema>,
138}
139
140impl SegmentMerger {
141 pub fn new(schema: Arc<Schema>) -> Self {
142 Self { schema }
143 }
144
145 pub async fn merge<D: Directory + DirectoryWriter>(
155 &self,
156 dir: &D,
157 segments: &[SegmentReader],
158 new_segment_id: SegmentId,
159 trained: Option<&TrainedVectorStructures>,
160 ) -> Result<(SegmentMeta, MergeStats)> {
161 let mut stats = MergeStats::default();
162 let files = SegmentFiles::new(new_segment_id.0);
163
164 let mut postings_writer = OffsetWriter::new(dir.streaming_writer(&files.postings).await?);
166 let mut positions_writer = OffsetWriter::new(dir.streaming_writer(&files.positions).await?);
167 let mut term_dict_writer = OffsetWriter::new(dir.streaming_writer(&files.term_dict).await?);
168
169 let terms_processed = self
170 .merge_postings(
171 segments,
172 &mut term_dict_writer,
173 &mut postings_writer,
174 &mut positions_writer,
175 &mut stats,
176 )
177 .await?;
178 stats.terms_processed = terms_processed;
179 stats.postings_bytes = postings_writer.offset() as usize;
180 stats.term_dict_bytes = term_dict_writer.offset() as usize;
181 let positions_bytes = positions_writer.offset();
182
183 postings_writer.finish()?;
184 term_dict_writer.finish()?;
185 if positions_bytes > 0 {
186 positions_writer.finish()?;
187 } else {
188 drop(positions_writer);
189 let _ = dir.delete(&files.positions).await;
190 }
191
192 {
194 let mut store_writer = OffsetWriter::new(dir.streaming_writer(&files.store).await?);
195 {
196 let mut store_merger = StoreMerger::new(&mut store_writer);
197 for segment in segments {
198 if segment.store_has_dict() {
199 store_merger
200 .append_store_recompressing(segment.store())
201 .await
202 .map_err(crate::Error::Io)?;
203 } else {
204 let raw_blocks = segment.store_raw_blocks();
205 let data_slice = segment.store_data_slice();
206 store_merger.append_store(data_slice, &raw_blocks).await?;
207 }
208 }
209 store_merger.finish()?;
210 }
211 stats.store_bytes = store_writer.offset() as usize;
212 store_writer.finish()?;
213 }
214
215 let vectors_bytes = self
217 .merge_dense_vectors(dir, segments, &files, trained)
218 .await?;
219 stats.vectors_bytes = vectors_bytes;
220
221 let sparse_bytes = self.merge_sparse_vectors(dir, segments, &files).await?;
223 stats.sparse_bytes = sparse_bytes;
224
225 let mut merged_field_stats: FxHashMap<u32, FieldStats> = FxHashMap::default();
227 for segment in segments {
228 for (&field_id, field_stats) in &segment.meta().field_stats {
229 let entry = merged_field_stats.entry(field_id).or_default();
230 entry.total_tokens += field_stats.total_tokens;
231 entry.doc_count += field_stats.doc_count;
232 }
233 }
234
235 let total_docs: u32 = segments.iter().map(|s| s.num_docs()).sum();
236 let meta = SegmentMeta {
237 id: new_segment_id.0,
238 num_docs: total_docs,
239 field_stats: merged_field_stats,
240 };
241
242 dir.write(&files.meta, &meta.serialize()?).await?;
243
244 let label = if trained.is_some() {
245 "ANN merge"
246 } else {
247 "Merge"
248 };
249 log::info!(
250 "{} complete: {} docs, {} terms, term_dict={}, postings={}, store={}, vectors={}, sparse={}",
251 label,
252 total_docs,
253 stats.terms_processed,
254 format_bytes(stats.term_dict_bytes),
255 format_bytes(stats.postings_bytes),
256 format_bytes(stats.store_bytes),
257 format_bytes(stats.vectors_bytes),
258 format_bytes(stats.sparse_bytes),
259 );
260
261 Ok((meta, stats))
262 }
263
264 async fn merge_postings(
276 &self,
277 segments: &[SegmentReader],
278 term_dict: &mut OffsetWriter,
279 postings_out: &mut OffsetWriter,
280 positions_out: &mut OffsetWriter,
281 stats: &mut MergeStats,
282 ) -> Result<usize> {
283 let doc_offs = doc_offsets(segments);
284
285 for (i, segment) in segments.iter().enumerate() {
287 log::debug!("Prefetching term dict for segment {} ...", i);
288 segment.prefetch_term_dict().await?;
289 }
290
291 let mut iterators: Vec<_> = segments.iter().map(|s| s.term_dict_iter()).collect();
293
294 let mut heap: BinaryHeap<MergeEntry> = BinaryHeap::new();
296 for (seg_idx, iter) in iterators.iter_mut().enumerate() {
297 if let Some((key, term_info)) = iter.next().await.map_err(crate::Error::from)? {
298 heap.push(MergeEntry {
299 key,
300 term_info,
301 segment_idx: seg_idx,
302 doc_offset: doc_offs[seg_idx],
303 });
304 }
305 }
306
307 let mut term_results: Vec<(Vec<u8>, TermInfo)> = Vec::new();
310 let mut terms_processed = 0usize;
311 let mut serialize_buf: Vec<u8> = Vec::new();
312
313 while !heap.is_empty() {
314 let first = heap.pop().unwrap();
316 let current_key = first.key.clone();
317
318 let mut sources: Vec<(usize, TermInfo, u32)> =
320 vec![(first.segment_idx, first.term_info, first.doc_offset)];
321
322 if let Some((key, term_info)) = iterators[first.segment_idx]
324 .next()
325 .await
326 .map_err(crate::Error::from)?
327 {
328 heap.push(MergeEntry {
329 key,
330 term_info,
331 segment_idx: first.segment_idx,
332 doc_offset: doc_offs[first.segment_idx],
333 });
334 }
335
336 while let Some(entry) = heap.peek() {
338 if entry.key != current_key {
339 break;
340 }
341 let entry = heap.pop().unwrap();
342 sources.push((entry.segment_idx, entry.term_info, entry.doc_offset));
343
344 if let Some((key, term_info)) = iterators[entry.segment_idx]
346 .next()
347 .await
348 .map_err(crate::Error::from)?
349 {
350 heap.push(MergeEntry {
351 key,
352 term_info,
353 segment_idx: entry.segment_idx,
354 doc_offset: doc_offs[entry.segment_idx],
355 });
356 }
357 }
358
359 let term_info = self
361 .merge_term(
362 segments,
363 &sources,
364 postings_out,
365 positions_out,
366 &mut serialize_buf,
367 )
368 .await?;
369
370 term_results.push((current_key, term_info));
371 terms_processed += 1;
372
373 if terms_processed.is_multiple_of(100_000) {
375 log::debug!("Merge progress: {} terms processed", terms_processed);
376 }
377 }
378
379 let results_mem = term_results.capacity() * std::mem::size_of::<(Vec<u8>, TermInfo)>();
381 stats.peak_memory_bytes = stats.peak_memory_bytes.max(results_mem);
382
383 log::info!(
384 "[merge] complete: terms={}, segments={}, term_buffer={:.2} MB, postings={}, positions={}",
385 terms_processed,
386 segments.len(),
387 results_mem as f64 / (1024.0 * 1024.0),
388 format_bytes(postings_out.offset() as usize),
389 format_bytes(positions_out.offset() as usize),
390 );
391
392 let mut writer = SSTableWriter::<TermInfo>::new(term_dict);
394 for (key, term_info) in term_results {
395 writer.insert(&key, &term_info)?;
396 }
397 writer.finish()?;
398
399 Ok(terms_processed)
400 }
401
402 async fn merge_term(
408 &self,
409 segments: &[SegmentReader],
410 sources: &[(usize, TermInfo, u32)],
411 postings_out: &mut OffsetWriter,
412 positions_out: &mut OffsetWriter,
413 buf: &mut Vec<u8>,
414 ) -> Result<TermInfo> {
415 let mut sorted: Vec<_> = sources.to_vec();
416 sorted.sort_by_key(|(_, _, off)| *off);
417
418 let any_positions = sorted.iter().any(|(_, ti, _)| ti.position_info().is_some());
419 let all_external = sorted.iter().all(|(_, ti, _)| ti.external_info().is_some());
420
421 let (posting_offset, posting_len, doc_count) = if all_external && sorted.len() > 1 {
423 let mut block_sources = Vec::with_capacity(sorted.len());
425 for (seg_idx, ti, doc_off) in &sorted {
426 let (off, len) = ti.external_info().unwrap();
427 let bytes = segments[*seg_idx].read_postings(off, len).await?;
428 let bpl = BlockPostingList::deserialize(&mut bytes.as_slice())?;
429 block_sources.push((bpl, *doc_off));
430 }
431 let merged = BlockPostingList::concatenate_blocks(&block_sources)?;
432 let offset = postings_out.offset();
433 buf.clear();
434 merged.serialize(buf)?;
435 postings_out.write_all(buf)?;
436 (offset, buf.len() as u32, merged.doc_count())
437 } else {
438 let mut merged = PostingList::new();
440 for (seg_idx, ti, doc_off) in &sorted {
441 if let Some((ids, tfs)) = ti.decode_inline() {
442 for (id, tf) in ids.into_iter().zip(tfs) {
443 merged.add(id + doc_off, tf);
444 }
445 } else {
446 let (off, len) = ti.external_info().unwrap();
447 let bytes = segments[*seg_idx].read_postings(off, len).await?;
448 let bpl = BlockPostingList::deserialize(&mut bytes.as_slice())?;
449 let mut it = bpl.iterator();
450 while it.doc() != TERMINATED {
451 merged.add(it.doc() + doc_off, it.term_freq());
452 it.advance();
453 }
454 }
455 }
456 if !any_positions {
458 let ids: Vec<u32> = merged.iter().map(|p| p.doc_id).collect();
459 let tfs: Vec<u32> = merged.iter().map(|p| p.term_freq).collect();
460 if let Some(inline) = TermInfo::try_inline(&ids, &tfs) {
461 return Ok(inline);
462 }
463 }
464 let offset = postings_out.offset();
465 let block = BlockPostingList::from_posting_list(&merged)?;
466 buf.clear();
467 block.serialize(buf)?;
468 postings_out.write_all(buf)?;
469 (offset, buf.len() as u32, merged.doc_count())
470 };
471
472 if any_positions {
474 let mut pos_sources = Vec::new();
475 for (seg_idx, ti, doc_off) in &sorted {
476 if let Some((pos_off, pos_len)) = ti.position_info()
477 && let Some(bytes) = segments[*seg_idx]
478 .read_position_bytes(pos_off, pos_len)
479 .await?
480 {
481 let pl = PositionPostingList::deserialize(&mut bytes.as_slice())
482 .map_err(crate::Error::Io)?;
483 pos_sources.push((pl, *doc_off));
484 }
485 }
486 if !pos_sources.is_empty() {
487 let merged = PositionPostingList::concatenate_blocks(&pos_sources)
488 .map_err(crate::Error::Io)?;
489 let offset = positions_out.offset();
490 buf.clear();
491 merged.serialize(buf).map_err(crate::Error::Io)?;
492 positions_out.write_all(buf)?;
493 return Ok(TermInfo::external_with_positions(
494 posting_offset,
495 posting_len,
496 doc_count,
497 offset,
498 buf.len() as u32,
499 ));
500 }
501 }
502
503 Ok(TermInfo::external(posting_offset, posting_len, doc_count))
504 }
505}
506
507pub async fn delete_segment<D: Directory + DirectoryWriter>(
509 dir: &D,
510 segment_id: SegmentId,
511) -> Result<()> {
512 let files = SegmentFiles::new(segment_id.0);
513 let _ = dir.delete(&files.term_dict).await;
514 let _ = dir.delete(&files.postings).await;
515 let _ = dir.delete(&files.store).await;
516 let _ = dir.delete(&files.meta).await;
517 let _ = dir.delete(&files.vectors).await;
518 let _ = dir.delete(&files.sparse).await;
519 let _ = dir.delete(&files.positions).await;
520 Ok(())
521}