1use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
9use std::io::{self, Cursor, Read, Write};
10
11use super::config::WeightQuantization;
12use crate::DocId;
13use crate::structures::postings::TERMINATED;
14use crate::structures::simd;
15
16pub const BLOCK_SIZE: usize = 128;
17
18#[derive(Debug, Clone, Copy)]
19pub struct BlockHeader {
20 pub count: u16,
21 pub doc_id_bits: u8,
22 pub ordinal_bits: u8,
23 pub weight_quant: WeightQuantization,
24 pub first_doc_id: DocId,
25 pub max_weight: f32,
26}
27
28impl BlockHeader {
29 pub const SIZE: usize = 16;
30
31 pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
32 w.write_u16::<LittleEndian>(self.count)?;
33 w.write_u8(self.doc_id_bits)?;
34 w.write_u8(self.ordinal_bits)?;
35 w.write_u8(self.weight_quant as u8)?;
36 w.write_u8(0)?;
37 w.write_u16::<LittleEndian>(0)?;
38 w.write_u32::<LittleEndian>(self.first_doc_id)?;
39 w.write_f32::<LittleEndian>(self.max_weight)?;
40 Ok(())
41 }
42
43 pub fn read<R: Read>(r: &mut R) -> io::Result<Self> {
44 let count = r.read_u16::<LittleEndian>()?;
45 let doc_id_bits = r.read_u8()?;
46 let ordinal_bits = r.read_u8()?;
47 let weight_quant_byte = r.read_u8()?;
48 let _ = r.read_u8()?;
49 let _ = r.read_u16::<LittleEndian>()?;
50 let first_doc_id = r.read_u32::<LittleEndian>()?;
51 let max_weight = r.read_f32::<LittleEndian>()?;
52
53 let weight_quant = WeightQuantization::from_u8(weight_quant_byte)
54 .ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Invalid weight quant"))?;
55
56 Ok(Self {
57 count,
58 doc_id_bits,
59 ordinal_bits,
60 weight_quant,
61 first_doc_id,
62 max_weight,
63 })
64 }
65}
66
67#[derive(Debug, Clone)]
68pub struct SparseBlock {
69 pub header: BlockHeader,
70 pub doc_ids_data: Vec<u8>,
71 pub ordinals_data: Vec<u8>,
72 pub weights_data: Vec<u8>,
73}
74
75impl SparseBlock {
76 pub fn from_postings(
77 postings: &[(DocId, u16, f32)],
78 weight_quant: WeightQuantization,
79 ) -> io::Result<Self> {
80 assert!(!postings.is_empty() && postings.len() <= BLOCK_SIZE);
81
82 let count = postings.len();
83 let first_doc_id = postings[0].0;
84
85 let mut deltas = Vec::with_capacity(count);
87 let mut prev = first_doc_id;
88 for &(doc_id, _, _) in postings {
89 deltas.push(doc_id.saturating_sub(prev));
90 prev = doc_id;
91 }
92 deltas[0] = 0;
93
94 let doc_id_bits = find_optimal_bit_width(&deltas[1..]);
95 let ordinals: Vec<u16> = postings.iter().map(|(_, o, _)| *o).collect();
96 let max_ordinal = ordinals.iter().copied().max().unwrap_or(0);
97 let ordinal_bits = if max_ordinal == 0 {
98 0
99 } else {
100 bits_needed_u16(max_ordinal)
101 };
102
103 let weights: Vec<f32> = postings.iter().map(|(_, _, w)| *w).collect();
104 let max_weight = weights.iter().copied().fold(0.0f32, f32::max);
105
106 let doc_ids_data = pack_bit_array(&deltas[1..], doc_id_bits);
107 let ordinals_data = if ordinal_bits > 0 {
108 pack_bit_array_u16(&ordinals, ordinal_bits)
109 } else {
110 Vec::new()
111 };
112 let weights_data = encode_weights(&weights, weight_quant)?;
113
114 Ok(Self {
115 header: BlockHeader {
116 count: count as u16,
117 doc_id_bits,
118 ordinal_bits,
119 weight_quant,
120 first_doc_id,
121 max_weight,
122 },
123 doc_ids_data,
124 ordinals_data,
125 weights_data,
126 })
127 }
128
129 pub fn decode_doc_ids(&self) -> Vec<DocId> {
130 let count = self.header.count as usize;
131 let mut doc_ids = Vec::with_capacity(count);
132 doc_ids.push(self.header.first_doc_id);
133
134 if count > 1 {
135 let deltas = unpack_bit_array(&self.doc_ids_data, self.header.doc_id_bits, count - 1);
136 let mut prev = self.header.first_doc_id;
137 for delta in deltas {
138 prev += delta;
139 doc_ids.push(prev);
140 }
141 }
142 doc_ids
143 }
144
145 pub fn decode_ordinals(&self) -> Vec<u16> {
146 let count = self.header.count as usize;
147 if self.header.ordinal_bits == 0 {
148 vec![0u16; count]
149 } else {
150 unpack_bit_array_u16(&self.ordinals_data, self.header.ordinal_bits, count)
151 }
152 }
153
154 pub fn decode_weights(&self) -> Vec<f32> {
155 decode_weights(
156 &self.weights_data,
157 self.header.weight_quant,
158 self.header.count as usize,
159 )
160 }
161
162 pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
163 self.header.write(w)?;
164 w.write_u16::<LittleEndian>(self.doc_ids_data.len() as u16)?;
165 w.write_u16::<LittleEndian>(self.ordinals_data.len() as u16)?;
166 w.write_u16::<LittleEndian>(self.weights_data.len() as u16)?;
167 w.write_u16::<LittleEndian>(0)?;
168 w.write_all(&self.doc_ids_data)?;
169 w.write_all(&self.ordinals_data)?;
170 w.write_all(&self.weights_data)?;
171 Ok(())
172 }
173
174 pub fn read<R: Read>(r: &mut R) -> io::Result<Self> {
175 let header = BlockHeader::read(r)?;
176 let doc_ids_len = r.read_u16::<LittleEndian>()? as usize;
177 let ordinals_len = r.read_u16::<LittleEndian>()? as usize;
178 let weights_len = r.read_u16::<LittleEndian>()? as usize;
179 let _ = r.read_u16::<LittleEndian>()?;
180
181 let mut doc_ids_data = vec![0u8; doc_ids_len];
182 r.read_exact(&mut doc_ids_data)?;
183 let mut ordinals_data = vec![0u8; ordinals_len];
184 r.read_exact(&mut ordinals_data)?;
185 let mut weights_data = vec![0u8; weights_len];
186 r.read_exact(&mut weights_data)?;
187
188 Ok(Self {
189 header,
190 doc_ids_data,
191 ordinals_data,
192 weights_data,
193 })
194 }
195
196 pub fn with_doc_offset(&self, doc_offset: u32) -> Self {
202 Self {
203 header: BlockHeader {
204 first_doc_id: self.header.first_doc_id + doc_offset,
205 ..self.header
206 },
207 doc_ids_data: self.doc_ids_data.clone(),
208 ordinals_data: self.ordinals_data.clone(),
209 weights_data: self.weights_data.clone(),
210 }
211 }
212}
213
214#[derive(Debug, Clone)]
219pub struct BlockSparsePostingList {
220 pub doc_count: u32,
221 pub blocks: Vec<SparseBlock>,
222}
223
224impl BlockSparsePostingList {
225 pub fn from_postings_with_block_size(
227 postings: &[(DocId, u16, f32)],
228 weight_quant: WeightQuantization,
229 block_size: usize,
230 ) -> io::Result<Self> {
231 if postings.is_empty() {
232 return Ok(Self {
233 doc_count: 0,
234 blocks: Vec::new(),
235 });
236 }
237
238 let block_size = block_size.max(16); let mut blocks = Vec::new();
240 for chunk in postings.chunks(block_size) {
241 blocks.push(SparseBlock::from_postings(chunk, weight_quant)?);
242 }
243
244 let mut unique_docs = 1u32;
249 for i in 1..postings.len() {
250 if postings[i].0 != postings[i - 1].0 {
251 unique_docs += 1;
252 }
253 }
254
255 Ok(Self {
256 doc_count: unique_docs,
257 blocks,
258 })
259 }
260
261 pub fn from_postings(
263 postings: &[(DocId, u16, f32)],
264 weight_quant: WeightQuantization,
265 ) -> io::Result<Self> {
266 Self::from_postings_with_block_size(postings, weight_quant, BLOCK_SIZE)
267 }
268
269 pub fn doc_count(&self) -> u32 {
270 self.doc_count
271 }
272
273 pub fn num_blocks(&self) -> usize {
274 self.blocks.len()
275 }
276
277 pub fn global_max_weight(&self) -> f32 {
278 self.blocks
279 .iter()
280 .map(|b| b.header.max_weight)
281 .fold(0.0f32, f32::max)
282 }
283
284 pub fn block_max_weight(&self, block_idx: usize) -> Option<f32> {
285 self.blocks.get(block_idx).map(|b| b.header.max_weight)
286 }
287
288 pub fn size_bytes(&self) -> usize {
290 use std::mem::size_of;
291
292 let header_size = size_of::<u32>() * 2; let blocks_size: usize = self
294 .blocks
295 .iter()
296 .map(|b| {
297 size_of::<BlockHeader>()
298 + b.doc_ids_data.len()
299 + b.ordinals_data.len()
300 + b.weights_data.len()
301 })
302 .sum();
303 header_size + blocks_size
304 }
305
306 pub fn iterator(&self) -> BlockSparsePostingIterator<'_> {
307 BlockSparsePostingIterator::new(self)
308 }
309
310 pub fn serialize<W: Write>(&self, w: &mut W) -> io::Result<()> {
319 use super::SparseSkipEntry;
320
321 w.write_u32::<LittleEndian>(self.doc_count)?;
322 w.write_f32::<LittleEndian>(self.global_max_weight())?;
323 w.write_u32::<LittleEndian>(self.blocks.len() as u32)?;
324
325 let mut block_bytes: Vec<Vec<u8>> = Vec::with_capacity(self.blocks.len());
327 for block in &self.blocks {
328 let mut buf = Vec::new();
329 block.write(&mut buf)?;
330 block_bytes.push(buf);
331 }
332
333 let mut offset = 0u32;
335 for (block, bytes) in self.blocks.iter().zip(block_bytes.iter()) {
336 let doc_ids = block.decode_doc_ids();
337 let first_doc = doc_ids.first().copied().unwrap_or(0);
338 let last_doc = doc_ids.last().copied().unwrap_or(0);
339 let length = bytes.len() as u32;
340
341 let entry =
342 SparseSkipEntry::new(first_doc, last_doc, offset, length, block.header.max_weight);
343 entry.write(w)?;
344 offset += length;
345 }
346
347 for bytes in block_bytes {
349 w.write_all(&bytes)?;
350 }
351
352 Ok(())
353 }
354
355 pub fn deserialize<R: Read>(r: &mut R) -> io::Result<Self> {
358 use super::SparseSkipEntry;
359
360 let doc_count = r.read_u32::<LittleEndian>()?;
361 let _global_max_weight = r.read_f32::<LittleEndian>()?;
362 let num_blocks = r.read_u32::<LittleEndian>()? as usize;
363
364 for _ in 0..num_blocks {
366 let _ = SparseSkipEntry::read(r)?;
367 }
368
369 let mut blocks = Vec::with_capacity(num_blocks);
371 for _ in 0..num_blocks {
372 blocks.push(SparseBlock::read(r)?);
373 }
374 Ok(Self { doc_count, blocks })
375 }
376
377 pub fn deserialize_header<R: Read>(
380 r: &mut R,
381 ) -> io::Result<(u32, f32, Vec<super::SparseSkipEntry>, usize)> {
382 use super::SparseSkipEntry;
383
384 let doc_count = r.read_u32::<LittleEndian>()?;
385 let global_max_weight = r.read_f32::<LittleEndian>()?;
386 let num_blocks = r.read_u32::<LittleEndian>()? as usize;
387
388 let mut entries = Vec::with_capacity(num_blocks);
389 for _ in 0..num_blocks {
390 entries.push(SparseSkipEntry::read(r)?);
391 }
392
393 let header_size = 4 + 4 + 4 + num_blocks * SparseSkipEntry::SIZE;
395
396 Ok((doc_count, global_max_weight, entries, header_size))
397 }
398
399 pub fn decode_all(&self) -> Vec<(DocId, u16, f32)> {
400 let mut result = Vec::with_capacity(self.doc_count as usize);
401 for block in &self.blocks {
402 let doc_ids = block.decode_doc_ids();
403 let ordinals = block.decode_ordinals();
404 let weights = block.decode_weights();
405 for i in 0..block.header.count as usize {
406 result.push((doc_ids[i], ordinals[i], weights[i]));
407 }
408 }
409 result
410 }
411
412 pub fn merge_with_offsets(lists: &[(&BlockSparsePostingList, u32)]) -> Self {
423 if lists.is_empty() {
424 return Self {
425 doc_count: 0,
426 blocks: Vec::new(),
427 };
428 }
429
430 let total_blocks: usize = lists.iter().map(|(pl, _)| pl.blocks.len()).sum();
432 let total_docs: u32 = lists.iter().map(|(pl, _)| pl.doc_count).sum();
433
434 let mut merged_blocks = Vec::with_capacity(total_blocks);
435
436 for (posting_list, doc_offset) in lists {
438 for block in &posting_list.blocks {
439 merged_blocks.push(block.with_doc_offset(*doc_offset));
440 }
441 }
442
443 Self {
444 doc_count: total_docs,
445 blocks: merged_blocks,
446 }
447 }
448
449 fn find_block(&self, target: DocId) -> Option<usize> {
450 if self.blocks.is_empty() {
451 return None;
452 }
453 let idx = self
456 .blocks
457 .partition_point(|b| b.header.first_doc_id <= target);
458 if idx == 0 {
459 Some(0)
461 } else {
462 Some(idx - 1)
463 }
464 }
465}
466
467pub struct BlockSparsePostingIterator<'a> {
472 posting_list: &'a BlockSparsePostingList,
473 block_idx: usize,
474 in_block_idx: usize,
475 current_doc_ids: Vec<DocId>,
476 current_weights: Vec<f32>,
477 exhausted: bool,
478}
479
480impl<'a> BlockSparsePostingIterator<'a> {
481 fn new(posting_list: &'a BlockSparsePostingList) -> Self {
482 let mut iter = Self {
483 posting_list,
484 block_idx: 0,
485 in_block_idx: 0,
486 current_doc_ids: Vec::new(),
487 current_weights: Vec::new(),
488 exhausted: posting_list.blocks.is_empty(),
489 };
490 if !iter.exhausted {
491 iter.load_block(0);
492 }
493 iter
494 }
495
496 fn load_block(&mut self, block_idx: usize) {
497 if let Some(block) = self.posting_list.blocks.get(block_idx) {
498 self.current_doc_ids = block.decode_doc_ids();
499 self.current_weights = block.decode_weights();
500 self.block_idx = block_idx;
501 self.in_block_idx = 0;
502 }
503 }
504
505 pub fn doc(&self) -> DocId {
506 if self.exhausted {
507 TERMINATED
508 } else {
509 self.current_doc_ids
510 .get(self.in_block_idx)
511 .copied()
512 .unwrap_or(TERMINATED)
513 }
514 }
515
516 pub fn weight(&self) -> f32 {
517 self.current_weights
518 .get(self.in_block_idx)
519 .copied()
520 .unwrap_or(0.0)
521 }
522
523 pub fn ordinal(&self) -> u16 {
524 if let Some(block) = self.posting_list.blocks.get(self.block_idx) {
525 let ordinals = block.decode_ordinals();
526 ordinals.get(self.in_block_idx).copied().unwrap_or(0)
527 } else {
528 0
529 }
530 }
531
532 pub fn advance(&mut self) -> DocId {
533 if self.exhausted {
534 return TERMINATED;
535 }
536 self.in_block_idx += 1;
537 if self.in_block_idx >= self.current_doc_ids.len() {
538 self.block_idx += 1;
539 if self.block_idx >= self.posting_list.blocks.len() {
540 self.exhausted = true;
541 } else {
542 self.load_block(self.block_idx);
543 }
544 }
545 self.doc()
546 }
547
548 pub fn seek(&mut self, target: DocId) -> DocId {
549 if self.exhausted {
550 return TERMINATED;
551 }
552 if self.doc() >= target {
553 return self.doc();
554 }
555
556 if let Some(&last_doc) = self.current_doc_ids.last()
558 && last_doc >= target
559 {
560 while !self.exhausted && self.doc() < target {
561 self.in_block_idx += 1;
562 if self.in_block_idx >= self.current_doc_ids.len() {
563 self.block_idx += 1;
564 if self.block_idx >= self.posting_list.blocks.len() {
565 self.exhausted = true;
566 } else {
567 self.load_block(self.block_idx);
568 }
569 }
570 }
571 return self.doc();
572 }
573
574 if let Some(block_idx) = self.posting_list.find_block(target) {
576 self.load_block(block_idx);
577 while self.in_block_idx < self.current_doc_ids.len()
578 && self.current_doc_ids[self.in_block_idx] < target
579 {
580 self.in_block_idx += 1;
581 }
582 if self.in_block_idx >= self.current_doc_ids.len() {
583 self.block_idx += 1;
584 if self.block_idx >= self.posting_list.blocks.len() {
585 self.exhausted = true;
586 } else {
587 self.load_block(self.block_idx);
588 }
589 }
590 } else {
591 self.exhausted = true;
592 }
593 self.doc()
594 }
595
596 pub fn skip_to_next_block(&mut self) -> DocId {
599 if self.exhausted {
600 return TERMINATED;
601 }
602 let next = self.block_idx + 1;
603 if next >= self.posting_list.blocks.len() {
604 self.exhausted = true;
605 return TERMINATED;
606 }
607 self.load_block(next);
608 self.doc()
609 }
610
611 pub fn is_exhausted(&self) -> bool {
612 self.exhausted
613 }
614
615 pub fn current_block_max_weight(&self) -> f32 {
616 self.posting_list
617 .blocks
618 .get(self.block_idx)
619 .map(|b| b.header.max_weight)
620 .unwrap_or(0.0)
621 }
622
623 pub fn current_block_max_contribution(&self, query_weight: f32) -> f32 {
624 query_weight * self.current_block_max_weight()
625 }
626}
627
628fn find_optimal_bit_width(values: &[u32]) -> u8 {
633 if values.is_empty() {
634 return 0;
635 }
636 let max_val = values.iter().copied().max().unwrap_or(0);
637 simd::bits_needed(max_val)
638}
639
640fn bits_needed_u16(val: u16) -> u8 {
641 if val == 0 {
642 0
643 } else {
644 16 - val.leading_zeros() as u8
645 }
646}
647
648fn pack_bit_array(values: &[u32], bits: u8) -> Vec<u8> {
649 if bits == 0 || values.is_empty() {
650 return Vec::new();
651 }
652 let total_bytes = (values.len() * bits as usize).div_ceil(8);
653 let mut result = vec![0u8; total_bytes];
654 let mut bit_pos = 0usize;
655 for &val in values {
656 pack_value(&mut result, bit_pos, val & ((1u32 << bits) - 1), bits);
657 bit_pos += bits as usize;
658 }
659 result
660}
661
662fn pack_bit_array_u16(values: &[u16], bits: u8) -> Vec<u8> {
663 if bits == 0 || values.is_empty() {
664 return Vec::new();
665 }
666 let total_bytes = (values.len() * bits as usize).div_ceil(8);
667 let mut result = vec![0u8; total_bytes];
668 let mut bit_pos = 0usize;
669 for &val in values {
670 pack_value(
671 &mut result,
672 bit_pos,
673 (val as u32) & ((1u32 << bits) - 1),
674 bits,
675 );
676 bit_pos += bits as usize;
677 }
678 result
679}
680
681#[inline]
682fn pack_value(data: &mut [u8], bit_pos: usize, val: u32, bits: u8) {
683 let mut remaining = bits as usize;
684 let mut val = val;
685 let mut byte = bit_pos / 8;
686 let mut offset = bit_pos % 8;
687 while remaining > 0 {
688 let space = 8 - offset;
689 let to_write = remaining.min(space);
690 let mask = (1u32 << to_write) - 1;
691 data[byte] |= ((val & mask) as u8) << offset;
692 val >>= to_write;
693 remaining -= to_write;
694 byte += 1;
695 offset = 0;
696 }
697}
698
699fn unpack_bit_array(data: &[u8], bits: u8, count: usize) -> Vec<u32> {
700 if bits == 0 || count == 0 {
701 return vec![0; count];
702 }
703 let mut result = Vec::with_capacity(count);
704 let mut bit_pos = 0usize;
705 for _ in 0..count {
706 result.push(unpack_value(data, bit_pos, bits));
707 bit_pos += bits as usize;
708 }
709 result
710}
711
712fn unpack_bit_array_u16(data: &[u8], bits: u8, count: usize) -> Vec<u16> {
713 if bits == 0 || count == 0 {
714 return vec![0; count];
715 }
716 let mut result = Vec::with_capacity(count);
717 let mut bit_pos = 0usize;
718 for _ in 0..count {
719 result.push(unpack_value(data, bit_pos, bits) as u16);
720 bit_pos += bits as usize;
721 }
722 result
723}
724
725#[inline]
726fn unpack_value(data: &[u8], bit_pos: usize, bits: u8) -> u32 {
727 let mut val = 0u32;
728 let mut remaining = bits as usize;
729 let mut byte = bit_pos / 8;
730 let mut offset = bit_pos % 8;
731 let mut shift = 0;
732 while remaining > 0 {
733 let space = 8 - offset;
734 let to_read = remaining.min(space);
735 let mask = (1u8 << to_read) - 1;
736 val |= (((data.get(byte).copied().unwrap_or(0) >> offset) & mask) as u32) << shift;
737 remaining -= to_read;
738 shift += to_read;
739 byte += 1;
740 offset = 0;
741 }
742 val
743}
744
745fn encode_weights(weights: &[f32], quant: WeightQuantization) -> io::Result<Vec<u8>> {
750 let mut data = Vec::new();
751 match quant {
752 WeightQuantization::Float32 => {
753 for &w in weights {
754 data.write_f32::<LittleEndian>(w)?;
755 }
756 }
757 WeightQuantization::Float16 => {
758 use half::f16;
759 for &w in weights {
760 data.write_u16::<LittleEndian>(f16::from_f32(w).to_bits())?;
761 }
762 }
763 WeightQuantization::UInt8 => {
764 let min = weights.iter().copied().fold(f32::INFINITY, f32::min);
765 let max = weights.iter().copied().fold(f32::NEG_INFINITY, f32::max);
766 let range = max - min;
767 let scale = if range < f32::EPSILON {
768 1.0
769 } else {
770 range / 255.0
771 };
772 data.write_f32::<LittleEndian>(scale)?;
773 data.write_f32::<LittleEndian>(min)?;
774 for &w in weights {
775 data.write_u8(((w - min) / scale).round() as u8)?;
776 }
777 }
778 WeightQuantization::UInt4 => {
779 let min = weights.iter().copied().fold(f32::INFINITY, f32::min);
780 let max = weights.iter().copied().fold(f32::NEG_INFINITY, f32::max);
781 let range = max - min;
782 let scale = if range < f32::EPSILON {
783 1.0
784 } else {
785 range / 15.0
786 };
787 data.write_f32::<LittleEndian>(scale)?;
788 data.write_f32::<LittleEndian>(min)?;
789 let mut i = 0;
790 while i < weights.len() {
791 let q1 = ((weights[i] - min) / scale).round() as u8 & 0x0F;
792 let q2 = if i + 1 < weights.len() {
793 ((weights[i + 1] - min) / scale).round() as u8 & 0x0F
794 } else {
795 0
796 };
797 data.write_u8((q2 << 4) | q1)?;
798 i += 2;
799 }
800 }
801 }
802 Ok(data)
803}
804
805fn decode_weights(data: &[u8], quant: WeightQuantization, count: usize) -> Vec<f32> {
806 let mut cursor = Cursor::new(data);
807 let mut weights = Vec::with_capacity(count);
808 match quant {
809 WeightQuantization::Float32 => {
810 for _ in 0..count {
811 weights.push(cursor.read_f32::<LittleEndian>().unwrap_or(0.0));
812 }
813 }
814 WeightQuantization::Float16 => {
815 use half::f16;
816 for _ in 0..count {
817 let bits = cursor.read_u16::<LittleEndian>().unwrap_or(0);
818 weights.push(f16::from_bits(bits).to_f32());
819 }
820 }
821 WeightQuantization::UInt8 => {
822 let scale = cursor.read_f32::<LittleEndian>().unwrap_or(1.0);
823 let min = cursor.read_f32::<LittleEndian>().unwrap_or(0.0);
824 for _ in 0..count {
825 let q = cursor.read_u8().unwrap_or(0);
826 weights.push(q as f32 * scale + min);
827 }
828 }
829 WeightQuantization::UInt4 => {
830 let scale = cursor.read_f32::<LittleEndian>().unwrap_or(1.0);
831 let min = cursor.read_f32::<LittleEndian>().unwrap_or(0.0);
832 let mut i = 0;
833 while i < count {
834 let byte = cursor.read_u8().unwrap_or(0);
835 weights.push((byte & 0x0F) as f32 * scale + min);
836 i += 1;
837 if i < count {
838 weights.push((byte >> 4) as f32 * scale + min);
839 i += 1;
840 }
841 }
842 }
843 }
844 weights
845}
846
847#[cfg(test)]
848mod tests {
849 use super::*;
850
851 #[test]
852 fn test_block_roundtrip() {
853 let postings = vec![
854 (10u32, 0u16, 1.5f32),
855 (15, 0, 2.0),
856 (20, 1, 0.5),
857 (100, 0, 3.0),
858 ];
859 let block = SparseBlock::from_postings(&postings, WeightQuantization::Float32).unwrap();
860
861 assert_eq!(block.decode_doc_ids(), vec![10, 15, 20, 100]);
862 assert_eq!(block.decode_ordinals(), vec![0, 0, 1, 0]);
863 let weights = block.decode_weights();
864 assert!((weights[0] - 1.5).abs() < 0.01);
865 }
866
867 #[test]
868 fn test_posting_list() {
869 let postings: Vec<(DocId, u16, f32)> =
870 (0..300).map(|i| (i * 2, 0, i as f32 * 0.1)).collect();
871 let list =
872 BlockSparsePostingList::from_postings(&postings, WeightQuantization::Float32).unwrap();
873
874 assert_eq!(list.doc_count(), 300);
875 assert_eq!(list.num_blocks(), 3);
876
877 let mut iter = list.iterator();
878 assert_eq!(iter.doc(), 0);
879 iter.advance();
880 assert_eq!(iter.doc(), 2);
881 }
882
883 #[test]
884 fn test_serialization() {
885 let postings = vec![(1u32, 0u16, 0.5f32), (10, 1, 1.5), (100, 0, 2.5)];
886 let list =
887 BlockSparsePostingList::from_postings(&postings, WeightQuantization::UInt8).unwrap();
888
889 let mut buf = Vec::new();
890 list.serialize(&mut buf).unwrap();
891 let list2 = BlockSparsePostingList::deserialize(&mut Cursor::new(&buf)).unwrap();
892
893 assert_eq!(list.doc_count(), list2.doc_count());
894 }
895
896 #[test]
897 fn test_seek() {
898 let postings: Vec<(DocId, u16, f32)> = (0..500).map(|i| (i * 3, 0, i as f32)).collect();
899 let list =
900 BlockSparsePostingList::from_postings(&postings, WeightQuantization::Float32).unwrap();
901
902 let mut iter = list.iterator();
903 assert_eq!(iter.seek(300), 300);
904 assert_eq!(iter.seek(301), 303);
905 assert_eq!(iter.seek(2000), TERMINATED);
906 }
907
908 #[test]
909 fn test_merge_with_offsets() {
910 let postings1: Vec<(DocId, u16, f32)> = vec![(0, 0, 1.0), (5, 0, 2.0), (10, 1, 3.0)];
912 let list1 =
913 BlockSparsePostingList::from_postings(&postings1, WeightQuantization::Float32).unwrap();
914
915 let postings2: Vec<(DocId, u16, f32)> = vec![(0, 0, 4.0), (3, 1, 5.0), (7, 0, 6.0)];
917 let list2 =
918 BlockSparsePostingList::from_postings(&postings2, WeightQuantization::Float32).unwrap();
919
920 let merged = BlockSparsePostingList::merge_with_offsets(&[(&list1, 0), (&list2, 100)]);
922
923 assert_eq!(merged.doc_count(), 6);
924
925 let decoded = merged.decode_all();
927 assert_eq!(decoded.len(), 6);
928
929 assert_eq!(decoded[0].0, 0);
931 assert_eq!(decoded[1].0, 5);
932 assert_eq!(decoded[2].0, 10);
933
934 assert_eq!(decoded[3].0, 100); assert_eq!(decoded[4].0, 103); assert_eq!(decoded[5].0, 107); assert!((decoded[0].2 - 1.0).abs() < 0.01);
941 assert!((decoded[3].2 - 4.0).abs() < 0.01);
942
943 assert_eq!(decoded[2].1, 1); assert_eq!(decoded[4].1, 1); }
947
948 #[test]
949 fn test_merge_with_offsets_multi_block() {
950 let postings1: Vec<(DocId, u16, f32)> = (0..200).map(|i| (i * 2, 0, i as f32)).collect();
952 let list1 =
953 BlockSparsePostingList::from_postings(&postings1, WeightQuantization::Float32).unwrap();
954 assert!(list1.num_blocks() > 1, "Should have multiple blocks");
955
956 let postings2: Vec<(DocId, u16, f32)> = (0..150).map(|i| (i * 3, 1, i as f32)).collect();
957 let list2 =
958 BlockSparsePostingList::from_postings(&postings2, WeightQuantization::Float32).unwrap();
959
960 let merged = BlockSparsePostingList::merge_with_offsets(&[(&list1, 0), (&list2, 1000)]);
962
963 assert_eq!(merged.doc_count(), 350);
964 assert_eq!(merged.num_blocks(), list1.num_blocks() + list2.num_blocks());
965
966 let mut iter = merged.iterator();
968
969 assert_eq!(iter.doc(), 0);
971
972 let doc = iter.seek(1000);
974 assert_eq!(doc, 1000); iter.advance();
978 assert_eq!(iter.doc(), 1003); }
980
981 #[test]
982 fn test_merge_with_offsets_serialize_roundtrip() {
983 let postings1: Vec<(DocId, u16, f32)> = vec![(0, 0, 1.0), (5, 0, 2.0), (10, 1, 3.0)];
985 let list1 =
986 BlockSparsePostingList::from_postings(&postings1, WeightQuantization::Float32).unwrap();
987
988 let postings2: Vec<(DocId, u16, f32)> = vec![(0, 0, 4.0), (3, 1, 5.0), (7, 0, 6.0)];
989 let list2 =
990 BlockSparsePostingList::from_postings(&postings2, WeightQuantization::Float32).unwrap();
991
992 let merged = BlockSparsePostingList::merge_with_offsets(&[(&list1, 0), (&list2, 100)]);
994
995 let mut bytes = Vec::new();
997 merged.serialize(&mut bytes).unwrap();
998
999 let mut cursor = std::io::Cursor::new(&bytes);
1001 let loaded = BlockSparsePostingList::deserialize(&mut cursor).unwrap();
1002
1003 let decoded = loaded.decode_all();
1005 assert_eq!(decoded.len(), 6);
1006
1007 assert_eq!(decoded[0].0, 0);
1009 assert_eq!(decoded[1].0, 5);
1010 assert_eq!(decoded[2].0, 10);
1011
1012 assert_eq!(decoded[3].0, 100, "First doc of seg2 should be 0+100=100");
1014 assert_eq!(decoded[4].0, 103, "Second doc of seg2 should be 3+100=103");
1015 assert_eq!(decoded[5].0, 107, "Third doc of seg2 should be 7+100=107");
1016
1017 let mut iter = loaded.iterator();
1019 assert_eq!(iter.doc(), 0);
1020 iter.advance();
1021 assert_eq!(iter.doc(), 5);
1022 iter.advance();
1023 assert_eq!(iter.doc(), 10);
1024 iter.advance();
1025 assert_eq!(iter.doc(), 100);
1026 iter.advance();
1027 assert_eq!(iter.doc(), 103);
1028 iter.advance();
1029 assert_eq!(iter.doc(), 107);
1030 }
1031
1032 #[test]
1033 fn test_merge_seek_after_roundtrip() {
1034 let postings1: Vec<(DocId, u16, f32)> = (0..200).map(|i| (i * 2, 0, 1.0)).collect();
1036 let list1 =
1037 BlockSparsePostingList::from_postings(&postings1, WeightQuantization::Float32).unwrap();
1038
1039 let postings2: Vec<(DocId, u16, f32)> = (0..150).map(|i| (i * 3, 0, 2.0)).collect();
1040 let list2 =
1041 BlockSparsePostingList::from_postings(&postings2, WeightQuantization::Float32).unwrap();
1042
1043 let merged = BlockSparsePostingList::merge_with_offsets(&[(&list1, 0), (&list2, 1000)]);
1045
1046 let mut bytes = Vec::new();
1048 merged.serialize(&mut bytes).unwrap();
1049 let loaded =
1050 BlockSparsePostingList::deserialize(&mut std::io::Cursor::new(&bytes)).unwrap();
1051
1052 let mut iter = loaded.iterator();
1054
1055 let doc = iter.seek(100);
1057 assert_eq!(doc, 100, "Seek to 100 in segment 1");
1058
1059 let doc = iter.seek(1000);
1061 assert_eq!(doc, 1000, "Seek to 1000 (first doc of segment 2)");
1062
1063 let doc = iter.seek(1050);
1065 assert!(
1066 doc >= 1050,
1067 "Seek to 1050 should find doc >= 1050, got {}",
1068 doc
1069 );
1070
1071 let doc = iter.seek(500);
1073 assert!(
1074 doc >= 1050,
1075 "Seek backwards should not go back, got {}",
1076 doc
1077 );
1078
1079 let mut iter2 = loaded.iterator();
1081
1082 let mut count = 0;
1084 let mut prev_doc = 0;
1085 while iter2.doc() != super::TERMINATED {
1086 let current = iter2.doc();
1087 if count > 0 {
1088 assert!(
1089 current > prev_doc,
1090 "Docs should be monotonically increasing: {} vs {}",
1091 prev_doc,
1092 current
1093 );
1094 }
1095 prev_doc = current;
1096 iter2.advance();
1097 count += 1;
1098 }
1099 assert_eq!(count, 350, "Should have 350 total docs");
1100 }
1101
1102 #[test]
1103 fn test_doc_count_multi_value() {
1104 let postings: Vec<(DocId, u16, f32)> = vec![
1107 (0, 0, 1.0),
1108 (0, 1, 1.5),
1109 (0, 2, 2.0),
1110 (5, 0, 3.0),
1111 (5, 1, 3.5),
1112 (10, 0, 4.0),
1113 ];
1114 let list =
1115 BlockSparsePostingList::from_postings(&postings, WeightQuantization::Float32).unwrap();
1116
1117 assert_eq!(list.doc_count(), 3);
1119
1120 let decoded = list.decode_all();
1122 assert_eq!(decoded.len(), 6);
1123 }
1124
1125 #[test]
1129 fn test_zero_copy_merge_patches_first_doc_id() {
1130 use crate::structures::SparseSkipEntry;
1131
1132 let postings1: Vec<(DocId, u16, f32)> = (0..200).map(|i| (i * 2, 0, i as f32)).collect();
1134 let list1 =
1135 BlockSparsePostingList::from_postings(&postings1, WeightQuantization::Float32).unwrap();
1136 assert!(list1.num_blocks() > 1);
1137
1138 let postings2: Vec<(DocId, u16, f32)> = (0..150).map(|i| (i * 3, 1, i as f32)).collect();
1139 let list2 =
1140 BlockSparsePostingList::from_postings(&postings2, WeightQuantization::Float32).unwrap();
1141
1142 let mut bytes1 = Vec::new();
1144 list1.serialize(&mut bytes1).unwrap();
1145 let mut bytes2 = Vec::new();
1146 list2.serialize(&mut bytes2).unwrap();
1147
1148 fn parse_raw(data: &[u8]) -> (u32, f32, Vec<SparseSkipEntry>, &[u8]) {
1150 let doc_count = u32::from_le_bytes(data[0..4].try_into().unwrap());
1151 let global_max = f32::from_le_bytes(data[4..8].try_into().unwrap());
1152 let num_blocks = u32::from_le_bytes(data[8..12].try_into().unwrap()) as usize;
1153 let mut pos = 12;
1154 let mut skip = Vec::new();
1155 for _ in 0..num_blocks {
1156 let first_doc = u32::from_le_bytes(data[pos..pos + 4].try_into().unwrap());
1157 let last_doc = u32::from_le_bytes(data[pos + 4..pos + 8].try_into().unwrap());
1158 let offset = u32::from_le_bytes(data[pos + 8..pos + 12].try_into().unwrap());
1159 let length = u32::from_le_bytes(data[pos + 12..pos + 16].try_into().unwrap());
1160 let max_w = f32::from_le_bytes(data[pos + 16..pos + 20].try_into().unwrap());
1161 skip.push(SparseSkipEntry::new(
1162 first_doc, last_doc, offset, length, max_w,
1163 ));
1164 pos += 20;
1165 }
1166 (doc_count, global_max, skip, &data[pos..])
1167 }
1168
1169 let (dc1, gm1, skip1, raw1) = parse_raw(&bytes1);
1170 let (dc2, gm2, skip2, raw2) = parse_raw(&bytes2);
1171
1172 let doc_offset: u32 = 1000; let total_docs = dc1 + dc2;
1175 let global_max = gm1.max(gm2);
1176 let total_blocks = (skip1.len() + skip2.len()) as u32;
1177
1178 let mut output = Vec::new();
1179 output.extend_from_slice(&total_docs.to_le_bytes());
1181 output.extend_from_slice(&global_max.to_le_bytes());
1182 output.extend_from_slice(&total_blocks.to_le_bytes());
1183
1184 let mut block_data_offset = 0u32;
1186 for entry in &skip1 {
1187 let adjusted = SparseSkipEntry::new(
1188 entry.first_doc,
1189 entry.last_doc,
1190 block_data_offset + entry.offset,
1191 entry.length,
1192 entry.max_weight,
1193 );
1194 adjusted.write(&mut output).unwrap();
1195 }
1196 if let Some(last) = skip1.last() {
1197 block_data_offset += last.offset + last.length;
1198 }
1199 for entry in &skip2 {
1200 let adjusted = SparseSkipEntry::new(
1201 entry.first_doc + doc_offset,
1202 entry.last_doc + doc_offset,
1203 block_data_offset + entry.offset,
1204 entry.length,
1205 entry.max_weight,
1206 );
1207 adjusted.write(&mut output).unwrap();
1208 }
1209
1210 output.extend_from_slice(raw1);
1212
1213 const FIRST_DOC_ID_OFFSET: usize = 8;
1214 let mut buf2 = raw2.to_vec();
1215 for entry in &skip2 {
1216 let off = entry.offset as usize + FIRST_DOC_ID_OFFSET;
1217 if off + 4 <= buf2.len() {
1218 let old = u32::from_le_bytes(buf2[off..off + 4].try_into().unwrap());
1219 let patched = (old + doc_offset).to_le_bytes();
1220 buf2[off..off + 4].copy_from_slice(&patched);
1221 }
1222 }
1223 output.extend_from_slice(&buf2);
1224
1225 let loaded = BlockSparsePostingList::deserialize(&mut Cursor::new(&output)).unwrap();
1227 assert_eq!(loaded.doc_count(), 350);
1228
1229 let mut iter = loaded.iterator();
1230
1231 assert_eq!(iter.doc(), 0);
1233 let doc = iter.seek(100);
1234 assert_eq!(doc, 100);
1235 let doc = iter.seek(398);
1236 assert_eq!(doc, 398);
1237
1238 let doc = iter.seek(1000);
1240 assert_eq!(doc, 1000, "First doc of segment 2 should be 1000");
1241 iter.advance();
1242 assert_eq!(iter.doc(), 1003, "Second doc of segment 2 should be 1003");
1243 let doc = iter.seek(1447);
1244 assert_eq!(doc, 1447, "Last doc of segment 2 should be 1447");
1245
1246 iter.advance();
1248 assert_eq!(iter.doc(), super::TERMINATED);
1249
1250 let reference =
1252 BlockSparsePostingList::merge_with_offsets(&[(&list1, 0), (&list2, doc_offset)]);
1253 let mut ref_iter = reference.iterator();
1254 let mut zc_iter = loaded.iterator();
1255 while ref_iter.doc() != super::TERMINATED {
1256 assert_eq!(
1257 ref_iter.doc(),
1258 zc_iter.doc(),
1259 "Zero-copy and reference merge should produce identical doc_ids"
1260 );
1261 assert!(
1262 (ref_iter.weight() - zc_iter.weight()).abs() < 0.01,
1263 "Weights should match: {} vs {}",
1264 ref_iter.weight(),
1265 zc_iter.weight()
1266 );
1267 ref_iter.advance();
1268 zc_iter.advance();
1269 }
1270 assert_eq!(zc_iter.doc(), super::TERMINATED);
1271 }
1272
1273 #[test]
1274 fn test_doc_count_single_value() {
1275 let postings: Vec<(DocId, u16, f32)> =
1277 vec![(0, 0, 1.0), (5, 0, 2.0), (10, 0, 3.0), (15, 0, 4.0)];
1278 let list =
1279 BlockSparsePostingList::from_postings(&postings, WeightQuantization::Float32).unwrap();
1280
1281 assert_eq!(list.doc_count(), 4);
1283 }
1284
1285 #[test]
1286 fn test_doc_count_multi_value_serialization_roundtrip() {
1287 let postings: Vec<(DocId, u16, f32)> =
1289 vec![(0, 0, 1.0), (0, 1, 1.5), (5, 0, 2.0), (5, 1, 2.5)];
1290 let list =
1291 BlockSparsePostingList::from_postings(&postings, WeightQuantization::Float32).unwrap();
1292 assert_eq!(list.doc_count(), 2);
1293
1294 let mut buf = Vec::new();
1295 list.serialize(&mut buf).unwrap();
1296 let loaded = BlockSparsePostingList::deserialize(&mut Cursor::new(&buf)).unwrap();
1297 assert_eq!(loaded.doc_count(), 2);
1298 }
1299
1300 #[test]
1301 fn test_merge_preserves_weights_and_ordinals() {
1302 let postings1: Vec<(DocId, u16, f32)> = vec![(0, 0, 1.5), (5, 1, 2.5), (10, 2, 3.5)];
1304 let list1 =
1305 BlockSparsePostingList::from_postings(&postings1, WeightQuantization::Float32).unwrap();
1306
1307 let postings2: Vec<(DocId, u16, f32)> = vec![(0, 0, 4.5), (3, 1, 5.5), (7, 3, 6.5)];
1308 let list2 =
1309 BlockSparsePostingList::from_postings(&postings2, WeightQuantization::Float32).unwrap();
1310
1311 let merged = BlockSparsePostingList::merge_with_offsets(&[(&list1, 0), (&list2, 100)]);
1313
1314 let mut bytes = Vec::new();
1316 merged.serialize(&mut bytes).unwrap();
1317 let loaded =
1318 BlockSparsePostingList::deserialize(&mut std::io::Cursor::new(&bytes)).unwrap();
1319
1320 let mut iter = loaded.iterator();
1322
1323 assert_eq!(iter.doc(), 0);
1325 assert!(
1326 (iter.weight() - 1.5).abs() < 0.01,
1327 "Weight should be 1.5, got {}",
1328 iter.weight()
1329 );
1330 assert_eq!(iter.ordinal(), 0);
1331
1332 iter.advance();
1333 assert_eq!(iter.doc(), 5);
1334 assert!(
1335 (iter.weight() - 2.5).abs() < 0.01,
1336 "Weight should be 2.5, got {}",
1337 iter.weight()
1338 );
1339 assert_eq!(iter.ordinal(), 1);
1340
1341 iter.advance();
1342 assert_eq!(iter.doc(), 10);
1343 assert!(
1344 (iter.weight() - 3.5).abs() < 0.01,
1345 "Weight should be 3.5, got {}",
1346 iter.weight()
1347 );
1348 assert_eq!(iter.ordinal(), 2);
1349
1350 iter.advance();
1352 assert_eq!(iter.doc(), 100);
1353 assert!(
1354 (iter.weight() - 4.5).abs() < 0.01,
1355 "Weight should be 4.5, got {}",
1356 iter.weight()
1357 );
1358 assert_eq!(iter.ordinal(), 0);
1359
1360 iter.advance();
1361 assert_eq!(iter.doc(), 103);
1362 assert!(
1363 (iter.weight() - 5.5).abs() < 0.01,
1364 "Weight should be 5.5, got {}",
1365 iter.weight()
1366 );
1367 assert_eq!(iter.ordinal(), 1);
1368
1369 iter.advance();
1370 assert_eq!(iter.doc(), 107);
1371 assert!(
1372 (iter.weight() - 6.5).abs() < 0.01,
1373 "Weight should be 6.5, got {}",
1374 iter.weight()
1375 );
1376 assert_eq!(iter.ordinal(), 3);
1377
1378 iter.advance();
1380 assert_eq!(iter.doc(), super::TERMINATED);
1381 }
1382
1383 #[test]
1384 fn test_merge_global_max_weight() {
1385 let postings1: Vec<(DocId, u16, f32)> = vec![
1387 (0, 0, 3.0),
1388 (1, 0, 7.0), (2, 0, 2.0),
1390 ];
1391 let list1 =
1392 BlockSparsePostingList::from_postings(&postings1, WeightQuantization::Float32).unwrap();
1393
1394 let postings2: Vec<(DocId, u16, f32)> = vec![
1395 (0, 0, 5.0),
1396 (1, 0, 4.0),
1397 (2, 0, 6.0), ];
1399 let list2 =
1400 BlockSparsePostingList::from_postings(&postings2, WeightQuantization::Float32).unwrap();
1401
1402 assert!((list1.global_max_weight() - 7.0).abs() < 0.01);
1404 assert!((list2.global_max_weight() - 6.0).abs() < 0.01);
1405
1406 let merged = BlockSparsePostingList::merge_with_offsets(&[(&list1, 0), (&list2, 100)]);
1408
1409 assert!(
1411 (merged.global_max_weight() - 7.0).abs() < 0.01,
1412 "Global max should be 7.0, got {}",
1413 merged.global_max_weight()
1414 );
1415
1416 let mut bytes = Vec::new();
1418 merged.serialize(&mut bytes).unwrap();
1419 let loaded =
1420 BlockSparsePostingList::deserialize(&mut std::io::Cursor::new(&bytes)).unwrap();
1421
1422 assert!(
1423 (loaded.global_max_weight() - 7.0).abs() < 0.01,
1424 "After roundtrip, global max should still be 7.0, got {}",
1425 loaded.global_max_weight()
1426 );
1427 }
1428
1429 #[test]
1430 fn test_scoring_simulation_after_merge() {
1431 let postings1: Vec<(DocId, u16, f32)> = vec![
1433 (0, 0, 0.5), (5, 0, 0.8), ];
1436 let list1 =
1437 BlockSparsePostingList::from_postings(&postings1, WeightQuantization::Float32).unwrap();
1438
1439 let postings2: Vec<(DocId, u16, f32)> = vec![
1440 (0, 0, 0.6), (3, 0, 0.9), ];
1443 let list2 =
1444 BlockSparsePostingList::from_postings(&postings2, WeightQuantization::Float32).unwrap();
1445
1446 let merged = BlockSparsePostingList::merge_with_offsets(&[(&list1, 0), (&list2, 100)]);
1448
1449 let mut bytes = Vec::new();
1451 merged.serialize(&mut bytes).unwrap();
1452 let loaded =
1453 BlockSparsePostingList::deserialize(&mut std::io::Cursor::new(&bytes)).unwrap();
1454
1455 let query_weight = 2.0f32;
1457 let mut iter = loaded.iterator();
1458
1459 assert_eq!(iter.doc(), 0);
1462 let score = query_weight * iter.weight();
1463 assert!(
1464 (score - 1.0).abs() < 0.01,
1465 "Doc 0 score should be 1.0, got {}",
1466 score
1467 );
1468
1469 iter.advance();
1470 assert_eq!(iter.doc(), 5);
1472 let score = query_weight * iter.weight();
1473 assert!(
1474 (score - 1.6).abs() < 0.01,
1475 "Doc 5 score should be 1.6, got {}",
1476 score
1477 );
1478
1479 iter.advance();
1480 assert_eq!(iter.doc(), 100);
1482 let score = query_weight * iter.weight();
1483 assert!(
1484 (score - 1.2).abs() < 0.01,
1485 "Doc 100 score should be 1.2, got {}",
1486 score
1487 );
1488
1489 iter.advance();
1490 assert_eq!(iter.doc(), 103);
1492 let score = query_weight * iter.weight();
1493 assert!(
1494 (score - 1.8).abs() < 0.01,
1495 "Doc 103 score should be 1.8, got {}",
1496 score
1497 );
1498 }
1499}