lsm_tree/segment/
file_offsets.rs

1// Copyright (c) 2024-present, fjall-rs
2// This source code is licensed under both the Apache 2.0 and MIT License
3// (found in the LICENSE-* files in the repository)
4
5use super::block::offset::BlockOffset;
6use crate::coding::{Decode, DecodeError, Encode, EncodeError};
7use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
8use std::io::{Read, Write};
9
10#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
11pub struct FileOffsets {
12    pub metadata_ptr: BlockOffset,
13    pub index_block_ptr: BlockOffset,
14    pub tli_ptr: BlockOffset,
15    pub bloom_ptr: BlockOffset,
16
17    // TODO: #46 https://github.com/fjall-rs/lsm-tree/issues/46
18    pub range_filter_ptr: BlockOffset,
19
20    // TODO: #2 https://github.com/fjall-rs/lsm-tree/issues/2
21    pub range_tombstones_ptr: BlockOffset,
22
23    // TODO: prefix filter for l0, l1?
24    pub pfx_ptr: BlockOffset,
25}
26
27impl FileOffsets {
28    /// Returns the on-disk size
29    #[must_use]
30    pub const fn serialized_len() -> usize {
31        7 * std::mem::size_of::<u64>()
32    }
33}
34
35impl Encode for FileOffsets {
36    fn encode_into<W: Write>(&self, writer: &mut W) -> Result<(), EncodeError> {
37        writer.write_u64::<BigEndian>(*self.metadata_ptr)?;
38        writer.write_u64::<BigEndian>(*self.index_block_ptr)?;
39        writer.write_u64::<BigEndian>(*self.tli_ptr)?;
40        writer.write_u64::<BigEndian>(*self.bloom_ptr)?;
41        writer.write_u64::<BigEndian>(*self.range_filter_ptr)?;
42        writer.write_u64::<BigEndian>(*self.range_tombstones_ptr)?;
43        writer.write_u64::<BigEndian>(*self.pfx_ptr)?;
44        Ok(())
45    }
46}
47
48impl Decode for FileOffsets {
49    fn decode_from<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
50        let metadata_ptr = reader.read_u64::<BigEndian>()?;
51        let index_block_ptr = reader.read_u64::<BigEndian>()?;
52        let tli_ptr = reader.read_u64::<BigEndian>()?;
53        let bloom_ptr = reader.read_u64::<BigEndian>()?;
54        let rf_ptr = reader.read_u64::<BigEndian>()?;
55        let range_tombstones_ptr = reader.read_u64::<BigEndian>()?;
56        let pfx_ptr = reader.read_u64::<BigEndian>()?;
57
58        Ok(Self {
59            index_block_ptr: BlockOffset(index_block_ptr),
60            tli_ptr: BlockOffset(tli_ptr),
61            bloom_ptr: BlockOffset(bloom_ptr),
62            range_filter_ptr: BlockOffset(rf_ptr),
63            range_tombstones_ptr: BlockOffset(range_tombstones_ptr),
64            pfx_ptr: BlockOffset(pfx_ptr),
65            metadata_ptr: BlockOffset(metadata_ptr),
66        })
67    }
68}
69
70#[cfg(test)]
71mod tests {
72    use super::*;
73    use std::io::Cursor;
74    use test_log::test;
75
76    #[test]
77    fn file_offsets_roundtrip() -> crate::Result<()> {
78        let before = FileOffsets {
79            bloom_ptr: BlockOffset(15),
80            index_block_ptr: BlockOffset(14),
81            metadata_ptr: BlockOffset(17),
82            pfx_ptr: BlockOffset(18),
83            range_filter_ptr: BlockOffset(13),
84            range_tombstones_ptr: BlockOffset(5),
85            tli_ptr: BlockOffset(4),
86        };
87
88        let buf = before.encode_into_vec();
89
90        let mut cursor = Cursor::new(buf);
91        let after = FileOffsets::decode_from(&mut cursor)?;
92
93        assert_eq!(after, before);
94
95        Ok(())
96    }
97
98    #[test]
99    fn file_offsets_serialized_len() {
100        let buf = FileOffsets::default().encode_into_vec();
101        assert_eq!(FileOffsets::serialized_len(), buf.len());
102    }
103}