1mod block;
27mod config;
28mod partitioner;
29
30pub use block::{BlockSparsePostingIterator, BlockSparsePostingList, SparseBlock};
31pub use config::{
32 IndexSize, QueryWeighting, SparseEntry, SparseQueryConfig, SparseVector, SparseVectorConfig,
33 WeightQuantization,
34};
35pub use partitioner::optimal_partition;
36
37use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
38use std::io::{self, Read, Write};
39
40use super::posting_common::{read_vint, write_vint};
41use crate::DocId;
42
43#[derive(Debug, Clone, Copy)]
45pub struct SparsePosting {
46 pub doc_id: DocId,
47 pub weight: f32,
48}
49
50pub const SPARSE_BLOCK_SIZE: usize = 128;
52
53#[derive(Debug, Clone, Copy, PartialEq)]
58pub struct SparseSkipEntry {
59 pub first_doc: DocId,
61 pub last_doc: DocId,
63 pub offset: u64,
65 pub length: u32,
67 pub max_weight: f32,
69}
70
71impl SparseSkipEntry {
72 pub const SIZE: usize = 24; pub fn new(
76 first_doc: DocId,
77 last_doc: DocId,
78 offset: u64,
79 length: u32,
80 max_weight: f32,
81 ) -> Self {
82 Self {
83 first_doc,
84 last_doc,
85 offset,
86 length,
87 max_weight,
88 }
89 }
90
91 #[inline]
96 pub fn block_max_contribution(&self, query_weight: f32) -> f32 {
97 query_weight * self.max_weight
98 }
99
100 #[inline]
102 pub fn from_bytes(b: &[u8]) -> Self {
103 Self {
104 first_doc: u32::from_le_bytes(b[0..4].try_into().unwrap()),
105 last_doc: u32::from_le_bytes(b[4..8].try_into().unwrap()),
106 offset: u64::from_le_bytes(b[8..16].try_into().unwrap()),
107 length: u32::from_le_bytes(b[16..20].try_into().unwrap()),
108 max_weight: f32::from_le_bytes(b[20..24].try_into().unwrap()),
109 }
110 }
111
112 #[inline]
114 pub fn write_to_vec(&self, buf: &mut Vec<u8>) {
115 buf.extend_from_slice(&self.first_doc.to_le_bytes());
116 buf.extend_from_slice(&self.last_doc.to_le_bytes());
117 buf.extend_from_slice(&self.offset.to_le_bytes());
118 buf.extend_from_slice(&self.length.to_le_bytes());
119 buf.extend_from_slice(&self.max_weight.to_le_bytes());
120 }
121
122 #[inline]
124 pub fn read_at(skip_bytes: &[u8], idx: usize) -> Self {
125 let off = idx * Self::SIZE;
126 Self::from_bytes(&skip_bytes[off..off + Self::SIZE])
127 }
128
129 pub fn write<W: Write + ?Sized>(&self, writer: &mut W) -> io::Result<()> {
131 writer.write_u32::<LittleEndian>(self.first_doc)?;
132 writer.write_u32::<LittleEndian>(self.last_doc)?;
133 writer.write_u64::<LittleEndian>(self.offset)?;
134 writer.write_u32::<LittleEndian>(self.length)?;
135 writer.write_f32::<LittleEndian>(self.max_weight)?;
136 Ok(())
137 }
138
139 pub fn read<R: Read>(reader: &mut R) -> io::Result<Self> {
141 let first_doc = reader.read_u32::<LittleEndian>()?;
142 let last_doc = reader.read_u32::<LittleEndian>()?;
143 let offset = reader.read_u64::<LittleEndian>()?;
144 let length = reader.read_u32::<LittleEndian>()?;
145 let max_weight = reader.read_f32::<LittleEndian>()?;
146 Ok(Self {
147 first_doc,
148 last_doc,
149 offset,
150 length,
151 max_weight,
152 })
153 }
154}
155
156#[derive(Debug, Clone, Default)]
158pub struct SparseSkipList {
159 entries: Vec<SparseSkipEntry>,
160 global_max_weight: f32,
162}
163
164impl SparseSkipList {
165 pub fn new() -> Self {
166 Self::default()
167 }
168
169 pub fn push(
171 &mut self,
172 first_doc: DocId,
173 last_doc: DocId,
174 offset: u64,
175 length: u32,
176 max_weight: f32,
177 ) {
178 self.global_max_weight = self.global_max_weight.max(max_weight);
179 self.entries.push(SparseSkipEntry::new(
180 first_doc, last_doc, offset, length, max_weight,
181 ));
182 }
183
184 pub fn len(&self) -> usize {
186 self.entries.len()
187 }
188
189 pub fn is_empty(&self) -> bool {
190 self.entries.is_empty()
191 }
192
193 pub fn get(&self, index: usize) -> Option<&SparseSkipEntry> {
195 self.entries.get(index)
196 }
197
198 pub fn global_max_weight(&self) -> f32 {
200 self.global_max_weight
201 }
202
203 pub fn find_block(&self, target: DocId) -> Option<usize> {
205 if self.entries.is_empty() {
206 return None;
207 }
208 let idx = self.entries.partition_point(|e| e.last_doc < target);
210 if idx < self.entries.len() {
211 Some(idx)
212 } else {
213 None
214 }
215 }
216
217 pub fn iter(&self) -> impl Iterator<Item = &SparseSkipEntry> {
219 self.entries.iter()
220 }
221
222 pub fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
224 writer.write_u32::<LittleEndian>(self.entries.len() as u32)?;
225 writer.write_f32::<LittleEndian>(self.global_max_weight)?;
226 for entry in &self.entries {
227 entry.write(writer)?;
228 }
229 Ok(())
230 }
231
232 pub fn read<R: Read>(reader: &mut R) -> io::Result<Self> {
234 let count = reader.read_u32::<LittleEndian>()? as usize;
235 let global_max_weight = reader.read_f32::<LittleEndian>()?;
236 let mut entries = Vec::with_capacity(count);
237 for _ in 0..count {
238 entries.push(SparseSkipEntry::read(reader)?);
239 }
240 Ok(Self {
241 entries,
242 global_max_weight,
243 })
244 }
245}
246
247#[derive(Debug, Clone)]
253pub struct SparsePostingList {
254 quantization: WeightQuantization,
256 scale: f32,
258 min_val: f32,
260 doc_count: u32,
262 data: Vec<u8>,
264}
265
266impl SparsePostingList {
267 pub fn from_postings(
269 postings: &[(DocId, f32)],
270 quantization: WeightQuantization,
271 ) -> io::Result<Self> {
272 if postings.is_empty() {
273 return Ok(Self {
274 quantization,
275 scale: 1.0,
276 min_val: 0.0,
277 doc_count: 0,
278 data: Vec::new(),
279 });
280 }
281
282 let weights: Vec<f32> = postings.iter().map(|(_, w)| *w).collect();
284 let min_val = weights.iter().cloned().fold(f32::INFINITY, f32::min);
285 let max_val = weights.iter().cloned().fold(f32::NEG_INFINITY, f32::max);
286
287 let (scale, adjusted_min) = match quantization {
288 WeightQuantization::Float32 | WeightQuantization::Float16 => (1.0, 0.0),
289 WeightQuantization::UInt8 => {
290 let range = max_val - min_val;
291 if range < f32::EPSILON {
292 (1.0, min_val)
293 } else {
294 (range / 255.0, min_val)
295 }
296 }
297 WeightQuantization::UInt4 => {
298 let range = max_val - min_val;
299 if range < f32::EPSILON {
300 (1.0, min_val)
301 } else {
302 (range / 15.0, min_val)
303 }
304 }
305 };
306
307 let mut data = Vec::new();
308
309 let mut prev_doc_id = 0u32;
311 for (doc_id, _) in postings {
312 let delta = doc_id - prev_doc_id;
313 write_vint(&mut data, delta as u64)?;
314 prev_doc_id = *doc_id;
315 }
316
317 match quantization {
319 WeightQuantization::Float32 => {
320 for (_, weight) in postings {
321 data.write_f32::<LittleEndian>(*weight)?;
322 }
323 }
324 WeightQuantization::Float16 => {
325 use half::slice::HalfFloatSliceExt;
327 let weights: Vec<f32> = postings.iter().map(|(_, w)| *w).collect();
328 let mut f16_slice: Vec<half::f16> = vec![half::f16::ZERO; weights.len()];
329 f16_slice.convert_from_f32_slice(&weights);
330 for h in f16_slice {
331 data.write_u16::<LittleEndian>(h.to_bits())?;
332 }
333 }
334 WeightQuantization::UInt8 => {
335 for (_, weight) in postings {
336 let quantized = ((*weight - adjusted_min) / scale).round() as u8;
337 data.write_u8(quantized)?;
338 }
339 }
340 WeightQuantization::UInt4 => {
341 let mut i = 0;
343 while i < postings.len() {
344 let q1 = ((postings[i].1 - adjusted_min) / scale).round() as u8 & 0x0F;
345 let q2 = if i + 1 < postings.len() {
346 ((postings[i + 1].1 - adjusted_min) / scale).round() as u8 & 0x0F
347 } else {
348 0
349 };
350 data.write_u8((q2 << 4) | q1)?;
351 i += 2;
352 }
353 }
354 }
355
356 Ok(Self {
357 quantization,
358 scale,
359 min_val: adjusted_min,
360 doc_count: postings.len() as u32,
361 data,
362 })
363 }
364
365 pub fn serialize<W: Write>(&self, writer: &mut W) -> io::Result<()> {
367 writer.write_u8(self.quantization as u8)?;
368 writer.write_f32::<LittleEndian>(self.scale)?;
369 writer.write_f32::<LittleEndian>(self.min_val)?;
370 writer.write_u32::<LittleEndian>(self.doc_count)?;
371 writer.write_u32::<LittleEndian>(self.data.len() as u32)?;
372 writer.write_all(&self.data)?;
373 Ok(())
374 }
375
376 pub fn deserialize<R: Read>(reader: &mut R) -> io::Result<Self> {
378 let quant_byte = reader.read_u8()?;
379 let quantization = WeightQuantization::from_u8(quant_byte).ok_or_else(|| {
380 io::Error::new(io::ErrorKind::InvalidData, "Invalid quantization type")
381 })?;
382 let scale = reader.read_f32::<LittleEndian>()?;
383 let min_val = reader.read_f32::<LittleEndian>()?;
384 let doc_count = reader.read_u32::<LittleEndian>()?;
385 let data_len = reader.read_u32::<LittleEndian>()? as usize;
386 let mut data = vec![0u8; data_len];
387 reader.read_exact(&mut data)?;
388
389 Ok(Self {
390 quantization,
391 scale,
392 min_val,
393 doc_count,
394 data,
395 })
396 }
397
398 pub fn doc_count(&self) -> u32 {
400 self.doc_count
401 }
402
403 pub fn quantization(&self) -> WeightQuantization {
405 self.quantization
406 }
407
408 pub fn iterator(&self) -> SparsePostingIterator<'_> {
410 SparsePostingIterator::new(self)
411 }
412
413 pub fn decode_all(&self) -> io::Result<Vec<(DocId, f32)>> {
415 let mut result = Vec::with_capacity(self.doc_count as usize);
416 let mut iter = self.iterator();
417
418 while !iter.exhausted {
419 result.push((iter.doc_id, iter.weight));
420 iter.advance();
421 }
422
423 Ok(result)
424 }
425}
426
427pub struct SparsePostingIterator<'a> {
429 posting_list: &'a SparsePostingList,
430 doc_id_offset: usize,
432 weight_offset: usize,
434 index: usize,
436 doc_id: DocId,
438 weight: f32,
440 exhausted: bool,
442}
443
444impl<'a> SparsePostingIterator<'a> {
445 fn new(posting_list: &'a SparsePostingList) -> Self {
446 let mut iter = Self {
447 posting_list,
448 doc_id_offset: 0,
449 weight_offset: 0,
450 index: 0,
451 doc_id: 0,
452 weight: 0.0,
453 exhausted: posting_list.doc_count == 0,
454 };
455
456 if !iter.exhausted {
457 iter.weight_offset = iter.calculate_weight_offset();
459 iter.load_current();
460 }
461
462 iter
463 }
464
465 fn calculate_weight_offset(&self) -> usize {
466 let mut offset = 0;
468 let mut reader = &self.posting_list.data[..];
469
470 for _ in 0..self.posting_list.doc_count {
471 if read_vint(&mut reader).is_ok() {
472 offset = self.posting_list.data.len() - reader.len();
473 }
474 }
475
476 offset
477 }
478
479 fn load_current(&mut self) {
480 if self.index >= self.posting_list.doc_count as usize {
481 self.exhausted = true;
482 return;
483 }
484
485 let mut reader = &self.posting_list.data[self.doc_id_offset..];
487 if let Ok(delta) = read_vint(&mut reader) {
488 self.doc_id = self.doc_id.wrapping_add(delta as u32);
489 self.doc_id_offset = self.posting_list.data.len() - reader.len();
490 }
491
492 let weight_idx = self.index;
494 let pl = self.posting_list;
495
496 self.weight = match pl.quantization {
497 WeightQuantization::Float32 => {
498 let offset = self.weight_offset + weight_idx * 4;
499 if offset + 4 <= pl.data.len() {
500 let bytes = &pl.data[offset..offset + 4];
501 f32::from_le_bytes([bytes[0], bytes[1], bytes[2], bytes[3]])
502 } else {
503 0.0
504 }
505 }
506 WeightQuantization::Float16 => {
507 let offset = self.weight_offset + weight_idx * 2;
508 if offset + 2 <= pl.data.len() {
509 let bits = u16::from_le_bytes([pl.data[offset], pl.data[offset + 1]]);
510 half::f16::from_bits(bits).to_f32()
511 } else {
512 0.0
513 }
514 }
515 WeightQuantization::UInt8 => {
516 let offset = self.weight_offset + weight_idx;
517 if offset < pl.data.len() {
518 let quantized = pl.data[offset];
519 quantized as f32 * pl.scale + pl.min_val
520 } else {
521 0.0
522 }
523 }
524 WeightQuantization::UInt4 => {
525 let byte_offset = self.weight_offset + weight_idx / 2;
526 if byte_offset < pl.data.len() {
527 let byte = pl.data[byte_offset];
528 let quantized = if weight_idx.is_multiple_of(2) {
529 byte & 0x0F
530 } else {
531 (byte >> 4) & 0x0F
532 };
533 quantized as f32 * pl.scale + pl.min_val
534 } else {
535 0.0
536 }
537 }
538 };
539 }
540
541 pub fn doc(&self) -> DocId {
543 if self.exhausted {
544 super::TERMINATED
545 } else {
546 self.doc_id
547 }
548 }
549
550 pub fn weight(&self) -> f32 {
552 if self.exhausted { 0.0 } else { self.weight }
553 }
554
555 pub fn advance(&mut self) -> DocId {
557 if self.exhausted {
558 return super::TERMINATED;
559 }
560
561 self.index += 1;
562 if self.index >= self.posting_list.doc_count as usize {
563 self.exhausted = true;
564 return super::TERMINATED;
565 }
566
567 self.load_current();
568 self.doc_id
569 }
570
571 pub fn seek(&mut self, target: DocId) -> DocId {
573 while !self.exhausted && self.doc_id < target {
574 self.advance();
575 }
576 self.doc()
577 }
578}
579
580#[cfg(test)]
581mod tests {
582 use super::*;
583
584 #[test]
585 fn test_sparse_vector_dot_product() {
586 let v1 = SparseVector::from_entries(&[0, 2, 5], &[1.0, 2.0, 3.0]);
587 let v2 = SparseVector::from_entries(&[1, 2, 5], &[1.0, 4.0, 2.0]);
588
589 assert!((v1.dot(&v2) - 14.0).abs() < 1e-6);
591 }
592
593 #[test]
594 fn test_sparse_posting_list_float32() {
595 let postings = vec![(0, 1.5), (5, 2.3), (10, 0.8), (100, 3.15)];
596 let pl = SparsePostingList::from_postings(&postings, WeightQuantization::Float32).unwrap();
597
598 assert_eq!(pl.doc_count(), 4);
599
600 let mut iter = pl.iterator();
601 assert_eq!(iter.doc(), 0);
602 assert!((iter.weight() - 1.5).abs() < 1e-6);
603
604 iter.advance();
605 assert_eq!(iter.doc(), 5);
606 assert!((iter.weight() - 2.3).abs() < 1e-6);
607
608 iter.advance();
609 assert_eq!(iter.doc(), 10);
610
611 iter.advance();
612 assert_eq!(iter.doc(), 100);
613 assert!((iter.weight() - 3.15).abs() < 1e-6);
614
615 iter.advance();
616 assert_eq!(iter.doc(), super::super::TERMINATED);
617 }
618
619 #[test]
620 fn test_sparse_posting_list_uint8() {
621 let postings = vec![(0, 0.0), (5, 0.5), (10, 1.0)];
622 let pl = SparsePostingList::from_postings(&postings, WeightQuantization::UInt8).unwrap();
623
624 let decoded = pl.decode_all().unwrap();
625 assert_eq!(decoded.len(), 3);
626
627 assert!(decoded[0].1 < decoded[1].1);
629 assert!(decoded[1].1 < decoded[2].1);
630 }
631
632 #[test]
633 fn test_block_sparse_posting_list() {
634 let postings: Vec<(DocId, u16, f32)> =
636 (0..300).map(|i| (i * 2, 0, (i as f32) * 0.1)).collect();
637
638 let pl =
639 BlockSparsePostingList::from_postings(&postings, WeightQuantization::Float32).unwrap();
640
641 assert_eq!(pl.doc_count(), 300);
642 assert!(pl.num_blocks() >= 2);
643
644 let mut iter = pl.iterator();
646 for (expected_doc, _, expected_weight) in &postings {
647 assert_eq!(iter.doc(), *expected_doc);
648 assert!((iter.weight() - expected_weight).abs() < 1e-6);
649 iter.advance();
650 }
651 assert_eq!(iter.doc(), super::super::TERMINATED);
652 }
653
654 #[test]
655 fn test_block_sparse_seek() {
656 let postings: Vec<(DocId, u16, f32)> = (0..500).map(|i| (i * 3, 0, i as f32)).collect();
657
658 let pl =
659 BlockSparsePostingList::from_postings(&postings, WeightQuantization::Float32).unwrap();
660
661 let mut iter = pl.iterator();
662
663 assert_eq!(iter.seek(300), 300);
665
666 assert_eq!(iter.seek(301), 303);
668
669 assert_eq!(iter.seek(2000), super::super::TERMINATED);
671 }
672
673 #[test]
674 fn test_serialization_roundtrip() {
675 let postings: Vec<(DocId, u16, f32)> = vec![(0, 0, 1.0), (10, 0, 2.0), (100, 0, 3.0)];
676
677 for quant in [
678 WeightQuantization::Float32,
679 WeightQuantization::Float16,
680 WeightQuantization::UInt8,
681 ] {
682 let pl = BlockSparsePostingList::from_postings(&postings, quant).unwrap();
683
684 let (block_data, skip_entries) = pl.serialize().unwrap();
685 let pl2 =
686 BlockSparsePostingList::from_parts(pl.doc_count(), &block_data, &skip_entries)
687 .unwrap();
688
689 assert_eq!(pl.doc_count(), pl2.doc_count());
690
691 let mut iter1 = pl.iterator();
693 let mut iter2 = pl2.iterator();
694
695 while iter1.doc() != super::super::TERMINATED {
696 assert_eq!(iter1.doc(), iter2.doc());
697 assert!((iter1.weight() - iter2.weight()).abs() < 0.1);
698 iter1.advance();
699 iter2.advance();
700 }
701 }
702 }
703
704 #[test]
705 fn test_concatenate() {
706 let postings1: Vec<(DocId, u16, f32)> = vec![(0, 0, 1.0), (5, 1, 2.0)];
707 let postings2: Vec<(DocId, u16, f32)> = vec![(0, 0, 3.0), (10, 1, 4.0)];
708
709 let pl1 =
710 BlockSparsePostingList::from_postings(&postings1, WeightQuantization::Float32).unwrap();
711 let pl2 =
712 BlockSparsePostingList::from_postings(&postings2, WeightQuantization::Float32).unwrap();
713
714 let mut all: Vec<(DocId, u16, f32)> = pl1.decode_all();
716 for (doc_id, ord, w) in pl2.decode_all() {
717 all.push((doc_id + 100, ord, w));
718 }
719 let merged =
720 BlockSparsePostingList::from_postings(&all, WeightQuantization::Float32).unwrap();
721
722 assert_eq!(merged.doc_count(), 4);
723
724 let decoded = merged.decode_all();
725 assert_eq!(decoded[0], (0, 0, 1.0));
726 assert_eq!(decoded[1], (5, 1, 2.0));
727 assert_eq!(decoded[2], (100, 0, 3.0));
728 assert_eq!(decoded[3], (110, 1, 4.0));
729 }
730
731 #[test]
732 fn test_sparse_vector_config() {
733 let default = SparseVectorConfig::default();
735 assert_eq!(default.index_size, IndexSize::U32);
736 assert_eq!(default.weight_quantization, WeightQuantization::Float32);
737 assert_eq!(default.bytes_per_entry(), 8.0); let splade = SparseVectorConfig::splade();
741 assert_eq!(splade.index_size, IndexSize::U16);
742 assert_eq!(splade.weight_quantization, WeightQuantization::UInt8);
743 assert_eq!(splade.bytes_per_entry(), 3.0); assert_eq!(splade.weight_threshold, 0.01);
745 assert_eq!(splade.pruning, Some(0.1));
746 assert!(splade.query_config.is_some());
747 let query_cfg = splade.query_config.as_ref().unwrap();
748 assert_eq!(query_cfg.heap_factor, 0.8);
749 assert_eq!(query_cfg.max_query_dims, Some(20));
750
751 let compact = SparseVectorConfig::compact();
753 assert_eq!(compact.index_size, IndexSize::U16);
754 assert_eq!(compact.weight_quantization, WeightQuantization::UInt4);
755 assert_eq!(compact.bytes_per_entry(), 2.5); let conservative = SparseVectorConfig::conservative();
759 assert_eq!(conservative.index_size, IndexSize::U32);
760 assert_eq!(
761 conservative.weight_quantization,
762 WeightQuantization::Float16
763 );
764 assert_eq!(conservative.weight_threshold, 0.005);
765 assert_eq!(conservative.pruning, None);
766
767 let byte = splade.to_byte();
769 let restored = SparseVectorConfig::from_byte(byte).unwrap();
770 assert_eq!(restored.index_size, splade.index_size);
771 assert_eq!(restored.weight_quantization, splade.weight_quantization);
772 }
775
776 #[test]
777 fn test_index_size() {
778 assert_eq!(IndexSize::U16.bytes(), 2);
779 assert_eq!(IndexSize::U32.bytes(), 4);
780 assert_eq!(IndexSize::U16.max_value(), 65535);
781 assert_eq!(IndexSize::U32.max_value(), u32::MAX);
782 }
783
784 #[test]
785 fn test_block_max_weight() {
786 let postings: Vec<(DocId, u16, f32)> = (0..300)
787 .map(|i| (i as DocId, 0, (i as f32) * 0.1))
788 .collect();
789
790 let pl =
791 BlockSparsePostingList::from_postings(&postings, WeightQuantization::Float32).unwrap();
792
793 assert!((pl.global_max_weight() - 29.9).abs() < 0.01);
794 assert!(pl.num_blocks() >= 3);
795
796 let block0_max = pl.block_max_weight(0).unwrap();
797 assert!((block0_max - 12.7).abs() < 0.01);
798
799 let block1_max = pl.block_max_weight(1).unwrap();
800 assert!((block1_max - 25.5).abs() < 0.01);
801
802 let block2_max = pl.block_max_weight(2).unwrap();
803 assert!((block2_max - 29.9).abs() < 0.01);
804
805 let query_weight = 2.0;
807 let mut iter = pl.iterator();
808 assert!((iter.current_block_max_weight() - 12.7).abs() < 0.01);
809 assert!((iter.current_block_max_contribution(query_weight) - 25.4).abs() < 0.1);
810
811 iter.seek(128);
812 assert!((iter.current_block_max_weight() - 25.5).abs() < 0.01);
813 }
814
815 #[test]
816 fn test_sparse_skip_list_serialization() {
817 let mut skip_list = SparseSkipList::new();
818 skip_list.push(0, 127, 0, 50, 12.7);
819 skip_list.push(128, 255, 100, 60, 25.5);
820 skip_list.push(256, 299, 200, 40, 29.9);
821
822 assert_eq!(skip_list.len(), 3);
823 assert!((skip_list.global_max_weight() - 29.9).abs() < 0.01);
824
825 let mut buffer = Vec::new();
827 skip_list.write(&mut buffer).unwrap();
828
829 let restored = SparseSkipList::read(&mut buffer.as_slice()).unwrap();
831
832 assert_eq!(restored.len(), 3);
833 assert!((restored.global_max_weight() - 29.9).abs() < 0.01);
834
835 let e0 = restored.get(0).unwrap();
837 assert_eq!(e0.first_doc, 0);
838 assert_eq!(e0.last_doc, 127);
839 assert!((e0.max_weight - 12.7).abs() < 0.01);
840
841 let e1 = restored.get(1).unwrap();
842 assert_eq!(e1.first_doc, 128);
843 assert!((e1.max_weight - 25.5).abs() < 0.01);
844 }
845}