1use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
9use std::io::{self, Cursor, Read, Write};
10
11use super::config::WeightQuantization;
12use crate::DocId;
13use crate::structures::postings::TERMINATED;
14use crate::structures::simd;
15
16pub const BLOCK_SIZE: usize = 128;
17
18#[derive(Debug, Clone, Copy)]
19pub struct BlockHeader {
20 pub count: u16,
21 pub doc_id_bits: u8,
22 pub ordinal_bits: u8,
23 pub weight_quant: WeightQuantization,
24 pub first_doc_id: DocId,
25 pub max_weight: f32,
26}
27
28impl BlockHeader {
29 pub const SIZE: usize = 16;
30
31 pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
32 w.write_u16::<LittleEndian>(self.count)?;
33 w.write_u8(self.doc_id_bits)?;
34 w.write_u8(self.ordinal_bits)?;
35 w.write_u8(self.weight_quant as u8)?;
36 w.write_u8(0)?;
37 w.write_u16::<LittleEndian>(0)?;
38 w.write_u32::<LittleEndian>(self.first_doc_id)?;
39 w.write_f32::<LittleEndian>(self.max_weight)?;
40 Ok(())
41 }
42
43 pub fn read<R: Read>(r: &mut R) -> io::Result<Self> {
44 let count = r.read_u16::<LittleEndian>()?;
45 let doc_id_bits = r.read_u8()?;
46 let ordinal_bits = r.read_u8()?;
47 let weight_quant_byte = r.read_u8()?;
48 let _ = r.read_u8()?;
49 let _ = r.read_u16::<LittleEndian>()?;
50 let first_doc_id = r.read_u32::<LittleEndian>()?;
51 let max_weight = r.read_f32::<LittleEndian>()?;
52
53 let weight_quant = WeightQuantization::from_u8(weight_quant_byte)
54 .ok_or_else(|| io::Error::new(io::ErrorKind::InvalidData, "Invalid weight quant"))?;
55
56 Ok(Self {
57 count,
58 doc_id_bits,
59 ordinal_bits,
60 weight_quant,
61 first_doc_id,
62 max_weight,
63 })
64 }
65}
66
67#[derive(Debug, Clone)]
68pub struct SparseBlock {
69 pub header: BlockHeader,
70 pub doc_ids_data: Vec<u8>,
71 pub ordinals_data: Vec<u8>,
72 pub weights_data: Vec<u8>,
73}
74
75impl SparseBlock {
76 pub fn from_postings(
77 postings: &[(DocId, u16, f32)],
78 weight_quant: WeightQuantization,
79 ) -> io::Result<Self> {
80 assert!(!postings.is_empty() && postings.len() <= BLOCK_SIZE);
81
82 let count = postings.len();
83 let first_doc_id = postings[0].0;
84
85 let mut deltas = Vec::with_capacity(count);
87 let mut prev = first_doc_id;
88 for &(doc_id, _, _) in postings {
89 deltas.push(doc_id.saturating_sub(prev));
90 prev = doc_id;
91 }
92 deltas[0] = 0;
93
94 let doc_id_bits = find_optimal_bit_width(&deltas[1..]);
95 let ordinals: Vec<u16> = postings.iter().map(|(_, o, _)| *o).collect();
96 let max_ordinal = ordinals.iter().copied().max().unwrap_or(0);
97 let ordinal_bits = if max_ordinal == 0 {
98 0
99 } else {
100 bits_needed_u16(max_ordinal)
101 };
102
103 let weights: Vec<f32> = postings.iter().map(|(_, _, w)| *w).collect();
104 let max_weight = weights.iter().copied().fold(0.0f32, f32::max);
105
106 let doc_ids_data = pack_bit_array(&deltas[1..], doc_id_bits);
107 let ordinals_data = if ordinal_bits > 0 {
108 pack_bit_array_u16(&ordinals, ordinal_bits)
109 } else {
110 Vec::new()
111 };
112 let weights_data = encode_weights(&weights, weight_quant)?;
113
114 Ok(Self {
115 header: BlockHeader {
116 count: count as u16,
117 doc_id_bits,
118 ordinal_bits,
119 weight_quant,
120 first_doc_id,
121 max_weight,
122 },
123 doc_ids_data,
124 ordinals_data,
125 weights_data,
126 })
127 }
128
129 pub fn decode_doc_ids(&self) -> Vec<DocId> {
130 let count = self.header.count as usize;
131 let mut doc_ids = Vec::with_capacity(count);
132 doc_ids.push(self.header.first_doc_id);
133
134 if count > 1 {
135 let deltas = unpack_bit_array(&self.doc_ids_data, self.header.doc_id_bits, count - 1);
136 let mut prev = self.header.first_doc_id;
137 for delta in deltas {
138 prev += delta;
139 doc_ids.push(prev);
140 }
141 }
142 doc_ids
143 }
144
145 pub fn decode_ordinals(&self) -> Vec<u16> {
146 let count = self.header.count as usize;
147 if self.header.ordinal_bits == 0 {
148 vec![0u16; count]
149 } else {
150 unpack_bit_array_u16(&self.ordinals_data, self.header.ordinal_bits, count)
151 }
152 }
153
154 pub fn decode_weights(&self) -> Vec<f32> {
155 decode_weights(
156 &self.weights_data,
157 self.header.weight_quant,
158 self.header.count as usize,
159 )
160 }
161
162 pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
163 self.header.write(w)?;
164 w.write_u16::<LittleEndian>(self.doc_ids_data.len() as u16)?;
165 w.write_u16::<LittleEndian>(self.ordinals_data.len() as u16)?;
166 w.write_u16::<LittleEndian>(self.weights_data.len() as u16)?;
167 w.write_u16::<LittleEndian>(0)?;
168 w.write_all(&self.doc_ids_data)?;
169 w.write_all(&self.ordinals_data)?;
170 w.write_all(&self.weights_data)?;
171 Ok(())
172 }
173
174 pub fn read<R: Read>(r: &mut R) -> io::Result<Self> {
175 let header = BlockHeader::read(r)?;
176 let doc_ids_len = r.read_u16::<LittleEndian>()? as usize;
177 let ordinals_len = r.read_u16::<LittleEndian>()? as usize;
178 let weights_len = r.read_u16::<LittleEndian>()? as usize;
179 let _ = r.read_u16::<LittleEndian>()?;
180
181 let mut doc_ids_data = vec![0u8; doc_ids_len];
182 r.read_exact(&mut doc_ids_data)?;
183 let mut ordinals_data = vec![0u8; ordinals_len];
184 r.read_exact(&mut ordinals_data)?;
185 let mut weights_data = vec![0u8; weights_len];
186 r.read_exact(&mut weights_data)?;
187
188 Ok(Self {
189 header,
190 doc_ids_data,
191 ordinals_data,
192 weights_data,
193 })
194 }
195
196 pub fn with_doc_offset(&self, doc_offset: u32) -> Self {
202 Self {
203 header: BlockHeader {
204 first_doc_id: self.header.first_doc_id + doc_offset,
205 ..self.header
206 },
207 doc_ids_data: self.doc_ids_data.clone(),
208 ordinals_data: self.ordinals_data.clone(),
209 weights_data: self.weights_data.clone(),
210 }
211 }
212}
213
214#[derive(Debug, Clone)]
219pub struct BlockSparsePostingList {
220 pub doc_count: u32,
221 pub blocks: Vec<SparseBlock>,
222}
223
224impl BlockSparsePostingList {
225 pub fn from_postings_with_block_size(
227 postings: &[(DocId, u16, f32)],
228 weight_quant: WeightQuantization,
229 block_size: usize,
230 ) -> io::Result<Self> {
231 if postings.is_empty() {
232 return Ok(Self {
233 doc_count: 0,
234 blocks: Vec::new(),
235 });
236 }
237
238 let block_size = block_size.max(16); let mut blocks = Vec::new();
240 for chunk in postings.chunks(block_size) {
241 blocks.push(SparseBlock::from_postings(chunk, weight_quant)?);
242 }
243
244 Ok(Self {
245 doc_count: postings.len() as u32,
246 blocks,
247 })
248 }
249
250 pub fn from_postings(
252 postings: &[(DocId, u16, f32)],
253 weight_quant: WeightQuantization,
254 ) -> io::Result<Self> {
255 Self::from_postings_with_block_size(postings, weight_quant, BLOCK_SIZE)
256 }
257
258 pub fn doc_count(&self) -> u32 {
259 self.doc_count
260 }
261
262 pub fn num_blocks(&self) -> usize {
263 self.blocks.len()
264 }
265
266 pub fn global_max_weight(&self) -> f32 {
267 self.blocks
268 .iter()
269 .map(|b| b.header.max_weight)
270 .fold(0.0f32, f32::max)
271 }
272
273 pub fn block_max_weight(&self, block_idx: usize) -> Option<f32> {
274 self.blocks.get(block_idx).map(|b| b.header.max_weight)
275 }
276
277 pub fn size_bytes(&self) -> usize {
279 use std::mem::size_of;
280
281 let header_size = size_of::<u32>() * 2; let blocks_size: usize = self
283 .blocks
284 .iter()
285 .map(|b| {
286 size_of::<BlockHeader>()
287 + b.doc_ids_data.len()
288 + b.ordinals_data.len()
289 + b.weights_data.len()
290 })
291 .sum();
292 header_size + blocks_size
293 }
294
295 pub fn iterator(&self) -> BlockSparsePostingIterator<'_> {
296 BlockSparsePostingIterator::new(self)
297 }
298
299 pub fn serialize<W: Write>(&self, w: &mut W) -> io::Result<()> {
300 w.write_u32::<LittleEndian>(self.doc_count)?;
301 w.write_u32::<LittleEndian>(self.blocks.len() as u32)?;
302 for block in &self.blocks {
303 block.write(w)?;
304 }
305 Ok(())
306 }
307
308 pub fn deserialize<R: Read>(r: &mut R) -> io::Result<Self> {
309 let doc_count = r.read_u32::<LittleEndian>()?;
310 let num_blocks = r.read_u32::<LittleEndian>()? as usize;
311 let mut blocks = Vec::with_capacity(num_blocks);
312 for _ in 0..num_blocks {
313 blocks.push(SparseBlock::read(r)?);
314 }
315 Ok(Self { doc_count, blocks })
316 }
317
318 pub fn decode_all(&self) -> Vec<(DocId, u16, f32)> {
319 let mut result = Vec::with_capacity(self.doc_count as usize);
320 for block in &self.blocks {
321 let doc_ids = block.decode_doc_ids();
322 let ordinals = block.decode_ordinals();
323 let weights = block.decode_weights();
324 for i in 0..block.header.count as usize {
325 result.push((doc_ids[i], ordinals[i], weights[i]));
326 }
327 }
328 result
329 }
330
331 pub fn merge_with_offsets(lists: &[(&BlockSparsePostingList, u32)]) -> Self {
342 if lists.is_empty() {
343 return Self {
344 doc_count: 0,
345 blocks: Vec::new(),
346 };
347 }
348
349 let total_blocks: usize = lists.iter().map(|(pl, _)| pl.blocks.len()).sum();
351 let total_docs: u32 = lists.iter().map(|(pl, _)| pl.doc_count).sum();
352
353 let mut merged_blocks = Vec::with_capacity(total_blocks);
354
355 for (posting_list, doc_offset) in lists {
357 for block in &posting_list.blocks {
358 merged_blocks.push(block.with_doc_offset(*doc_offset));
359 }
360 }
361
362 Self {
363 doc_count: total_docs,
364 blocks: merged_blocks,
365 }
366 }
367
368 fn find_block(&self, target: DocId) -> Option<usize> {
369 let mut lo = 0;
370 let mut hi = self.blocks.len();
371 while lo < hi {
372 let mid = lo + (hi - lo) / 2;
373 let block = &self.blocks[mid];
374 let doc_ids = block.decode_doc_ids();
375 let last_doc = doc_ids.last().copied().unwrap_or(block.header.first_doc_id);
376 if last_doc < target {
377 lo = mid + 1;
378 } else {
379 hi = mid;
380 }
381 }
382 if lo < self.blocks.len() {
383 Some(lo)
384 } else {
385 None
386 }
387 }
388}
389
390pub struct BlockSparsePostingIterator<'a> {
395 posting_list: &'a BlockSparsePostingList,
396 block_idx: usize,
397 in_block_idx: usize,
398 current_doc_ids: Vec<DocId>,
399 current_weights: Vec<f32>,
400 exhausted: bool,
401}
402
403impl<'a> BlockSparsePostingIterator<'a> {
404 fn new(posting_list: &'a BlockSparsePostingList) -> Self {
405 let mut iter = Self {
406 posting_list,
407 block_idx: 0,
408 in_block_idx: 0,
409 current_doc_ids: Vec::new(),
410 current_weights: Vec::new(),
411 exhausted: posting_list.blocks.is_empty(),
412 };
413 if !iter.exhausted {
414 iter.load_block(0);
415 }
416 iter
417 }
418
419 fn load_block(&mut self, block_idx: usize) {
420 if let Some(block) = self.posting_list.blocks.get(block_idx) {
421 self.current_doc_ids = block.decode_doc_ids();
422 self.current_weights = block.decode_weights();
423 self.block_idx = block_idx;
424 self.in_block_idx = 0;
425 }
426 }
427
428 pub fn doc(&self) -> DocId {
429 if self.exhausted {
430 TERMINATED
431 } else {
432 self.current_doc_ids
433 .get(self.in_block_idx)
434 .copied()
435 .unwrap_or(TERMINATED)
436 }
437 }
438
439 pub fn weight(&self) -> f32 {
440 self.current_weights
441 .get(self.in_block_idx)
442 .copied()
443 .unwrap_or(0.0)
444 }
445
446 pub fn ordinal(&self) -> u16 {
447 if let Some(block) = self.posting_list.blocks.get(self.block_idx) {
448 let ordinals = block.decode_ordinals();
449 ordinals.get(self.in_block_idx).copied().unwrap_or(0)
450 } else {
451 0
452 }
453 }
454
455 pub fn advance(&mut self) -> DocId {
456 if self.exhausted {
457 return TERMINATED;
458 }
459 self.in_block_idx += 1;
460 if self.in_block_idx >= self.current_doc_ids.len() {
461 self.block_idx += 1;
462 if self.block_idx >= self.posting_list.blocks.len() {
463 self.exhausted = true;
464 } else {
465 self.load_block(self.block_idx);
466 }
467 }
468 self.doc()
469 }
470
471 pub fn seek(&mut self, target: DocId) -> DocId {
472 if self.exhausted {
473 return TERMINATED;
474 }
475 if self.doc() >= target {
476 return self.doc();
477 }
478
479 if let Some(&last_doc) = self.current_doc_ids.last()
481 && last_doc >= target
482 {
483 while !self.exhausted && self.doc() < target {
484 self.in_block_idx += 1;
485 if self.in_block_idx >= self.current_doc_ids.len() {
486 self.block_idx += 1;
487 if self.block_idx >= self.posting_list.blocks.len() {
488 self.exhausted = true;
489 } else {
490 self.load_block(self.block_idx);
491 }
492 }
493 }
494 return self.doc();
495 }
496
497 if let Some(block_idx) = self.posting_list.find_block(target) {
499 self.load_block(block_idx);
500 while self.in_block_idx < self.current_doc_ids.len()
501 && self.current_doc_ids[self.in_block_idx] < target
502 {
503 self.in_block_idx += 1;
504 }
505 if self.in_block_idx >= self.current_doc_ids.len() {
506 self.block_idx += 1;
507 if self.block_idx >= self.posting_list.blocks.len() {
508 self.exhausted = true;
509 } else {
510 self.load_block(self.block_idx);
511 }
512 }
513 } else {
514 self.exhausted = true;
515 }
516 self.doc()
517 }
518
519 pub fn is_exhausted(&self) -> bool {
520 self.exhausted
521 }
522
523 pub fn current_block_max_weight(&self) -> f32 {
524 self.posting_list
525 .blocks
526 .get(self.block_idx)
527 .map(|b| b.header.max_weight)
528 .unwrap_or(0.0)
529 }
530
531 pub fn current_block_max_contribution(&self, query_weight: f32) -> f32 {
532 query_weight * self.current_block_max_weight()
533 }
534}
535
536fn find_optimal_bit_width(values: &[u32]) -> u8 {
541 if values.is_empty() {
542 return 0;
543 }
544 let max_val = values.iter().copied().max().unwrap_or(0);
545 simd::bits_needed(max_val)
546}
547
548fn bits_needed_u16(val: u16) -> u8 {
549 if val == 0 {
550 0
551 } else {
552 16 - val.leading_zeros() as u8
553 }
554}
555
556fn pack_bit_array(values: &[u32], bits: u8) -> Vec<u8> {
557 if bits == 0 || values.is_empty() {
558 return Vec::new();
559 }
560 let total_bytes = (values.len() * bits as usize).div_ceil(8);
561 let mut result = vec![0u8; total_bytes];
562 let mut bit_pos = 0usize;
563 for &val in values {
564 pack_value(&mut result, bit_pos, val & ((1u32 << bits) - 1), bits);
565 bit_pos += bits as usize;
566 }
567 result
568}
569
570fn pack_bit_array_u16(values: &[u16], bits: u8) -> Vec<u8> {
571 if bits == 0 || values.is_empty() {
572 return Vec::new();
573 }
574 let total_bytes = (values.len() * bits as usize).div_ceil(8);
575 let mut result = vec![0u8; total_bytes];
576 let mut bit_pos = 0usize;
577 for &val in values {
578 pack_value(
579 &mut result,
580 bit_pos,
581 (val as u32) & ((1u32 << bits) - 1),
582 bits,
583 );
584 bit_pos += bits as usize;
585 }
586 result
587}
588
589#[inline]
590fn pack_value(data: &mut [u8], bit_pos: usize, val: u32, bits: u8) {
591 let mut remaining = bits as usize;
592 let mut val = val;
593 let mut byte = bit_pos / 8;
594 let mut offset = bit_pos % 8;
595 while remaining > 0 {
596 let space = 8 - offset;
597 let to_write = remaining.min(space);
598 let mask = (1u32 << to_write) - 1;
599 data[byte] |= ((val & mask) as u8) << offset;
600 val >>= to_write;
601 remaining -= to_write;
602 byte += 1;
603 offset = 0;
604 }
605}
606
607fn unpack_bit_array(data: &[u8], bits: u8, count: usize) -> Vec<u32> {
608 if bits == 0 || count == 0 {
609 return vec![0; count];
610 }
611 let mut result = Vec::with_capacity(count);
612 let mut bit_pos = 0usize;
613 for _ in 0..count {
614 result.push(unpack_value(data, bit_pos, bits));
615 bit_pos += bits as usize;
616 }
617 result
618}
619
620fn unpack_bit_array_u16(data: &[u8], bits: u8, count: usize) -> Vec<u16> {
621 if bits == 0 || count == 0 {
622 return vec![0; count];
623 }
624 let mut result = Vec::with_capacity(count);
625 let mut bit_pos = 0usize;
626 for _ in 0..count {
627 result.push(unpack_value(data, bit_pos, bits) as u16);
628 bit_pos += bits as usize;
629 }
630 result
631}
632
633#[inline]
634fn unpack_value(data: &[u8], bit_pos: usize, bits: u8) -> u32 {
635 let mut val = 0u32;
636 let mut remaining = bits as usize;
637 let mut byte = bit_pos / 8;
638 let mut offset = bit_pos % 8;
639 let mut shift = 0;
640 while remaining > 0 {
641 let space = 8 - offset;
642 let to_read = remaining.min(space);
643 let mask = (1u8 << to_read) - 1;
644 val |= (((data.get(byte).copied().unwrap_or(0) >> offset) & mask) as u32) << shift;
645 remaining -= to_read;
646 shift += to_read;
647 byte += 1;
648 offset = 0;
649 }
650 val
651}
652
653fn encode_weights(weights: &[f32], quant: WeightQuantization) -> io::Result<Vec<u8>> {
658 let mut data = Vec::new();
659 match quant {
660 WeightQuantization::Float32 => {
661 for &w in weights {
662 data.write_f32::<LittleEndian>(w)?;
663 }
664 }
665 WeightQuantization::Float16 => {
666 use half::f16;
667 for &w in weights {
668 data.write_u16::<LittleEndian>(f16::from_f32(w).to_bits())?;
669 }
670 }
671 WeightQuantization::UInt8 => {
672 let min = weights.iter().copied().fold(f32::INFINITY, f32::min);
673 let max = weights.iter().copied().fold(f32::NEG_INFINITY, f32::max);
674 let range = max - min;
675 let scale = if range < f32::EPSILON {
676 1.0
677 } else {
678 range / 255.0
679 };
680 data.write_f32::<LittleEndian>(scale)?;
681 data.write_f32::<LittleEndian>(min)?;
682 for &w in weights {
683 data.write_u8(((w - min) / scale).round() as u8)?;
684 }
685 }
686 WeightQuantization::UInt4 => {
687 let min = weights.iter().copied().fold(f32::INFINITY, f32::min);
688 let max = weights.iter().copied().fold(f32::NEG_INFINITY, f32::max);
689 let range = max - min;
690 let scale = if range < f32::EPSILON {
691 1.0
692 } else {
693 range / 15.0
694 };
695 data.write_f32::<LittleEndian>(scale)?;
696 data.write_f32::<LittleEndian>(min)?;
697 let mut i = 0;
698 while i < weights.len() {
699 let q1 = ((weights[i] - min) / scale).round() as u8 & 0x0F;
700 let q2 = if i + 1 < weights.len() {
701 ((weights[i + 1] - min) / scale).round() as u8 & 0x0F
702 } else {
703 0
704 };
705 data.write_u8((q2 << 4) | q1)?;
706 i += 2;
707 }
708 }
709 }
710 Ok(data)
711}
712
713fn decode_weights(data: &[u8], quant: WeightQuantization, count: usize) -> Vec<f32> {
714 let mut cursor = Cursor::new(data);
715 let mut weights = Vec::with_capacity(count);
716 match quant {
717 WeightQuantization::Float32 => {
718 for _ in 0..count {
719 weights.push(cursor.read_f32::<LittleEndian>().unwrap_or(0.0));
720 }
721 }
722 WeightQuantization::Float16 => {
723 use half::f16;
724 for _ in 0..count {
725 let bits = cursor.read_u16::<LittleEndian>().unwrap_or(0);
726 weights.push(f16::from_bits(bits).to_f32());
727 }
728 }
729 WeightQuantization::UInt8 => {
730 let scale = cursor.read_f32::<LittleEndian>().unwrap_or(1.0);
731 let min = cursor.read_f32::<LittleEndian>().unwrap_or(0.0);
732 for _ in 0..count {
733 let q = cursor.read_u8().unwrap_or(0);
734 weights.push(q as f32 * scale + min);
735 }
736 }
737 WeightQuantization::UInt4 => {
738 let scale = cursor.read_f32::<LittleEndian>().unwrap_or(1.0);
739 let min = cursor.read_f32::<LittleEndian>().unwrap_or(0.0);
740 let mut i = 0;
741 while i < count {
742 let byte = cursor.read_u8().unwrap_or(0);
743 weights.push((byte & 0x0F) as f32 * scale + min);
744 i += 1;
745 if i < count {
746 weights.push((byte >> 4) as f32 * scale + min);
747 i += 1;
748 }
749 }
750 }
751 }
752 weights
753}
754
755#[cfg(test)]
756mod tests {
757 use super::*;
758
759 #[test]
760 fn test_block_roundtrip() {
761 let postings = vec![
762 (10u32, 0u16, 1.5f32),
763 (15, 0, 2.0),
764 (20, 1, 0.5),
765 (100, 0, 3.0),
766 ];
767 let block = SparseBlock::from_postings(&postings, WeightQuantization::Float32).unwrap();
768
769 assert_eq!(block.decode_doc_ids(), vec![10, 15, 20, 100]);
770 assert_eq!(block.decode_ordinals(), vec![0, 0, 1, 0]);
771 let weights = block.decode_weights();
772 assert!((weights[0] - 1.5).abs() < 0.01);
773 }
774
775 #[test]
776 fn test_posting_list() {
777 let postings: Vec<(DocId, u16, f32)> =
778 (0..300).map(|i| (i * 2, 0, i as f32 * 0.1)).collect();
779 let list =
780 BlockSparsePostingList::from_postings(&postings, WeightQuantization::Float32).unwrap();
781
782 assert_eq!(list.doc_count(), 300);
783 assert_eq!(list.num_blocks(), 3);
784
785 let mut iter = list.iterator();
786 assert_eq!(iter.doc(), 0);
787 iter.advance();
788 assert_eq!(iter.doc(), 2);
789 }
790
791 #[test]
792 fn test_serialization() {
793 let postings = vec![(1u32, 0u16, 0.5f32), (10, 1, 1.5), (100, 0, 2.5)];
794 let list =
795 BlockSparsePostingList::from_postings(&postings, WeightQuantization::UInt8).unwrap();
796
797 let mut buf = Vec::new();
798 list.serialize(&mut buf).unwrap();
799 let list2 = BlockSparsePostingList::deserialize(&mut Cursor::new(&buf)).unwrap();
800
801 assert_eq!(list.doc_count(), list2.doc_count());
802 }
803
804 #[test]
805 fn test_seek() {
806 let postings: Vec<(DocId, u16, f32)> = (0..500).map(|i| (i * 3, 0, i as f32)).collect();
807 let list =
808 BlockSparsePostingList::from_postings(&postings, WeightQuantization::Float32).unwrap();
809
810 let mut iter = list.iterator();
811 assert_eq!(iter.seek(300), 300);
812 assert_eq!(iter.seek(301), 303);
813 assert_eq!(iter.seek(2000), TERMINATED);
814 }
815
816 #[test]
817 fn test_merge_with_offsets() {
818 let postings1: Vec<(DocId, u16, f32)> = vec![(0, 0, 1.0), (5, 0, 2.0), (10, 1, 3.0)];
820 let list1 =
821 BlockSparsePostingList::from_postings(&postings1, WeightQuantization::Float32).unwrap();
822
823 let postings2: Vec<(DocId, u16, f32)> = vec![(0, 0, 4.0), (3, 1, 5.0), (7, 0, 6.0)];
825 let list2 =
826 BlockSparsePostingList::from_postings(&postings2, WeightQuantization::Float32).unwrap();
827
828 let merged = BlockSparsePostingList::merge_with_offsets(&[(&list1, 0), (&list2, 100)]);
830
831 assert_eq!(merged.doc_count(), 6);
832
833 let decoded = merged.decode_all();
835 assert_eq!(decoded.len(), 6);
836
837 assert_eq!(decoded[0].0, 0);
839 assert_eq!(decoded[1].0, 5);
840 assert_eq!(decoded[2].0, 10);
841
842 assert_eq!(decoded[3].0, 100); assert_eq!(decoded[4].0, 103); assert_eq!(decoded[5].0, 107); assert!((decoded[0].2 - 1.0).abs() < 0.01);
849 assert!((decoded[3].2 - 4.0).abs() < 0.01);
850
851 assert_eq!(decoded[2].1, 1); assert_eq!(decoded[4].1, 1); }
855
856 #[test]
857 fn test_merge_with_offsets_multi_block() {
858 let postings1: Vec<(DocId, u16, f32)> = (0..200).map(|i| (i * 2, 0, i as f32)).collect();
860 let list1 =
861 BlockSparsePostingList::from_postings(&postings1, WeightQuantization::Float32).unwrap();
862 assert!(list1.num_blocks() > 1, "Should have multiple blocks");
863
864 let postings2: Vec<(DocId, u16, f32)> = (0..150).map(|i| (i * 3, 1, i as f32)).collect();
865 let list2 =
866 BlockSparsePostingList::from_postings(&postings2, WeightQuantization::Float32).unwrap();
867
868 let merged = BlockSparsePostingList::merge_with_offsets(&[(&list1, 0), (&list2, 1000)]);
870
871 assert_eq!(merged.doc_count(), 350);
872 assert_eq!(merged.num_blocks(), list1.num_blocks() + list2.num_blocks());
873
874 let mut iter = merged.iterator();
876
877 assert_eq!(iter.doc(), 0);
879
880 let doc = iter.seek(1000);
882 assert_eq!(doc, 1000); iter.advance();
886 assert_eq!(iter.doc(), 1003); }
888
889 #[test]
890 fn test_merge_with_offsets_serialize_roundtrip() {
891 let postings1: Vec<(DocId, u16, f32)> = vec![(0, 0, 1.0), (5, 0, 2.0), (10, 1, 3.0)];
893 let list1 =
894 BlockSparsePostingList::from_postings(&postings1, WeightQuantization::Float32).unwrap();
895
896 let postings2: Vec<(DocId, u16, f32)> = vec![(0, 0, 4.0), (3, 1, 5.0), (7, 0, 6.0)];
897 let list2 =
898 BlockSparsePostingList::from_postings(&postings2, WeightQuantization::Float32).unwrap();
899
900 let merged = BlockSparsePostingList::merge_with_offsets(&[(&list1, 0), (&list2, 100)]);
902
903 let mut bytes = Vec::new();
905 merged.serialize(&mut bytes).unwrap();
906
907 let mut cursor = std::io::Cursor::new(&bytes);
909 let loaded = BlockSparsePostingList::deserialize(&mut cursor).unwrap();
910
911 let decoded = loaded.decode_all();
913 assert_eq!(decoded.len(), 6);
914
915 assert_eq!(decoded[0].0, 0);
917 assert_eq!(decoded[1].0, 5);
918 assert_eq!(decoded[2].0, 10);
919
920 assert_eq!(decoded[3].0, 100, "First doc of seg2 should be 0+100=100");
922 assert_eq!(decoded[4].0, 103, "Second doc of seg2 should be 3+100=103");
923 assert_eq!(decoded[5].0, 107, "Third doc of seg2 should be 7+100=107");
924
925 let mut iter = loaded.iterator();
927 assert_eq!(iter.doc(), 0);
928 iter.advance();
929 assert_eq!(iter.doc(), 5);
930 iter.advance();
931 assert_eq!(iter.doc(), 10);
932 iter.advance();
933 assert_eq!(iter.doc(), 100);
934 iter.advance();
935 assert_eq!(iter.doc(), 103);
936 iter.advance();
937 assert_eq!(iter.doc(), 107);
938 }
939
940 #[test]
941 fn test_merge_seek_after_roundtrip() {
942 let postings1: Vec<(DocId, u16, f32)> = (0..200).map(|i| (i * 2, 0, 1.0)).collect();
944 let list1 =
945 BlockSparsePostingList::from_postings(&postings1, WeightQuantization::Float32).unwrap();
946
947 let postings2: Vec<(DocId, u16, f32)> = (0..150).map(|i| (i * 3, 0, 2.0)).collect();
948 let list2 =
949 BlockSparsePostingList::from_postings(&postings2, WeightQuantization::Float32).unwrap();
950
951 let merged = BlockSparsePostingList::merge_with_offsets(&[(&list1, 0), (&list2, 1000)]);
953
954 let mut bytes = Vec::new();
956 merged.serialize(&mut bytes).unwrap();
957 let loaded =
958 BlockSparsePostingList::deserialize(&mut std::io::Cursor::new(&bytes)).unwrap();
959
960 let mut iter = loaded.iterator();
962
963 let doc = iter.seek(100);
965 assert_eq!(doc, 100, "Seek to 100 in segment 1");
966
967 let doc = iter.seek(1000);
969 assert_eq!(doc, 1000, "Seek to 1000 (first doc of segment 2)");
970
971 let doc = iter.seek(1050);
973 assert!(
974 doc >= 1050,
975 "Seek to 1050 should find doc >= 1050, got {}",
976 doc
977 );
978
979 let doc = iter.seek(500);
981 assert!(
982 doc >= 1050,
983 "Seek backwards should not go back, got {}",
984 doc
985 );
986
987 let mut iter2 = loaded.iterator();
989
990 let mut count = 0;
992 let mut prev_doc = 0;
993 while iter2.doc() != super::TERMINATED {
994 let current = iter2.doc();
995 if count > 0 {
996 assert!(
997 current > prev_doc,
998 "Docs should be monotonically increasing: {} vs {}",
999 prev_doc,
1000 current
1001 );
1002 }
1003 prev_doc = current;
1004 iter2.advance();
1005 count += 1;
1006 }
1007 assert_eq!(count, 350, "Should have 350 total docs");
1008 }
1009
1010 #[test]
1011 fn test_merge_preserves_weights_and_ordinals() {
1012 let postings1: Vec<(DocId, u16, f32)> = vec![(0, 0, 1.5), (5, 1, 2.5), (10, 2, 3.5)];
1014 let list1 =
1015 BlockSparsePostingList::from_postings(&postings1, WeightQuantization::Float32).unwrap();
1016
1017 let postings2: Vec<(DocId, u16, f32)> = vec![(0, 0, 4.5), (3, 1, 5.5), (7, 3, 6.5)];
1018 let list2 =
1019 BlockSparsePostingList::from_postings(&postings2, WeightQuantization::Float32).unwrap();
1020
1021 let merged = BlockSparsePostingList::merge_with_offsets(&[(&list1, 0), (&list2, 100)]);
1023
1024 let mut bytes = Vec::new();
1026 merged.serialize(&mut bytes).unwrap();
1027 let loaded =
1028 BlockSparsePostingList::deserialize(&mut std::io::Cursor::new(&bytes)).unwrap();
1029
1030 let mut iter = loaded.iterator();
1032
1033 assert_eq!(iter.doc(), 0);
1035 assert!(
1036 (iter.weight() - 1.5).abs() < 0.01,
1037 "Weight should be 1.5, got {}",
1038 iter.weight()
1039 );
1040 assert_eq!(iter.ordinal(), 0);
1041
1042 iter.advance();
1043 assert_eq!(iter.doc(), 5);
1044 assert!(
1045 (iter.weight() - 2.5).abs() < 0.01,
1046 "Weight should be 2.5, got {}",
1047 iter.weight()
1048 );
1049 assert_eq!(iter.ordinal(), 1);
1050
1051 iter.advance();
1052 assert_eq!(iter.doc(), 10);
1053 assert!(
1054 (iter.weight() - 3.5).abs() < 0.01,
1055 "Weight should be 3.5, got {}",
1056 iter.weight()
1057 );
1058 assert_eq!(iter.ordinal(), 2);
1059
1060 iter.advance();
1062 assert_eq!(iter.doc(), 100);
1063 assert!(
1064 (iter.weight() - 4.5).abs() < 0.01,
1065 "Weight should be 4.5, got {}",
1066 iter.weight()
1067 );
1068 assert_eq!(iter.ordinal(), 0);
1069
1070 iter.advance();
1071 assert_eq!(iter.doc(), 103);
1072 assert!(
1073 (iter.weight() - 5.5).abs() < 0.01,
1074 "Weight should be 5.5, got {}",
1075 iter.weight()
1076 );
1077 assert_eq!(iter.ordinal(), 1);
1078
1079 iter.advance();
1080 assert_eq!(iter.doc(), 107);
1081 assert!(
1082 (iter.weight() - 6.5).abs() < 0.01,
1083 "Weight should be 6.5, got {}",
1084 iter.weight()
1085 );
1086 assert_eq!(iter.ordinal(), 3);
1087
1088 iter.advance();
1090 assert_eq!(iter.doc(), super::TERMINATED);
1091 }
1092
1093 #[test]
1094 fn test_merge_global_max_weight() {
1095 let postings1: Vec<(DocId, u16, f32)> = vec![
1097 (0, 0, 3.0),
1098 (1, 0, 7.0), (2, 0, 2.0),
1100 ];
1101 let list1 =
1102 BlockSparsePostingList::from_postings(&postings1, WeightQuantization::Float32).unwrap();
1103
1104 let postings2: Vec<(DocId, u16, f32)> = vec![
1105 (0, 0, 5.0),
1106 (1, 0, 4.0),
1107 (2, 0, 6.0), ];
1109 let list2 =
1110 BlockSparsePostingList::from_postings(&postings2, WeightQuantization::Float32).unwrap();
1111
1112 assert!((list1.global_max_weight() - 7.0).abs() < 0.01);
1114 assert!((list2.global_max_weight() - 6.0).abs() < 0.01);
1115
1116 let merged = BlockSparsePostingList::merge_with_offsets(&[(&list1, 0), (&list2, 100)]);
1118
1119 assert!(
1121 (merged.global_max_weight() - 7.0).abs() < 0.01,
1122 "Global max should be 7.0, got {}",
1123 merged.global_max_weight()
1124 );
1125
1126 let mut bytes = Vec::new();
1128 merged.serialize(&mut bytes).unwrap();
1129 let loaded =
1130 BlockSparsePostingList::deserialize(&mut std::io::Cursor::new(&bytes)).unwrap();
1131
1132 assert!(
1133 (loaded.global_max_weight() - 7.0).abs() < 0.01,
1134 "After roundtrip, global max should still be 7.0, got {}",
1135 loaded.global_max_weight()
1136 );
1137 }
1138
1139 #[test]
1140 fn test_scoring_simulation_after_merge() {
1141 let postings1: Vec<(DocId, u16, f32)> = vec![
1143 (0, 0, 0.5), (5, 0, 0.8), ];
1146 let list1 =
1147 BlockSparsePostingList::from_postings(&postings1, WeightQuantization::Float32).unwrap();
1148
1149 let postings2: Vec<(DocId, u16, f32)> = vec![
1150 (0, 0, 0.6), (3, 0, 0.9), ];
1153 let list2 =
1154 BlockSparsePostingList::from_postings(&postings2, WeightQuantization::Float32).unwrap();
1155
1156 let merged = BlockSparsePostingList::merge_with_offsets(&[(&list1, 0), (&list2, 100)]);
1158
1159 let mut bytes = Vec::new();
1161 merged.serialize(&mut bytes).unwrap();
1162 let loaded =
1163 BlockSparsePostingList::deserialize(&mut std::io::Cursor::new(&bytes)).unwrap();
1164
1165 let query_weight = 2.0f32;
1167 let mut iter = loaded.iterator();
1168
1169 assert_eq!(iter.doc(), 0);
1172 let score = query_weight * iter.weight();
1173 assert!(
1174 (score - 1.0).abs() < 0.01,
1175 "Doc 0 score should be 1.0, got {}",
1176 score
1177 );
1178
1179 iter.advance();
1180 assert_eq!(iter.doc(), 5);
1182 let score = query_weight * iter.weight();
1183 assert!(
1184 (score - 1.6).abs() < 0.01,
1185 "Doc 5 score should be 1.6, got {}",
1186 score
1187 );
1188
1189 iter.advance();
1190 assert_eq!(iter.doc(), 100);
1192 let score = query_weight * iter.weight();
1193 assert!(
1194 (score - 1.2).abs() < 0.01,
1195 "Doc 100 score should be 1.2, got {}",
1196 score
1197 );
1198
1199 iter.advance();
1200 assert_eq!(iter.doc(), 103);
1202 let score = query_weight * iter.weight();
1203 assert!(
1204 (score - 1.8).abs() < 0.01,
1205 "Doc 103 score should be 1.8, got {}",
1206 score
1207 );
1208 }
1209}