1use crate::{read_u64, safe_write, write_u64, Address, GrowFailed, Memory, Storable};
58use std::borrow::Cow;
59use std::fmt;
60use std::marker::PhantomData;
61
62#[cfg(test)]
63mod tests;
64
65pub const INDEX_MAGIC: &[u8; 3] = b"GLI";
67pub const DATA_MAGIC: &[u8; 3] = b"GLD";
69
70const LAYOUT_VERSION: u8 = 1;
72
73const HEADER_V1_SIZE: u64 = 4;
75
76const RESERVED_HEADER_SIZE: u64 = 28;
78
79const HEADER_OFFSET: u64 = HEADER_V1_SIZE + RESERVED_HEADER_SIZE;
81
82struct HeaderV1 {
83 magic: [u8; 3],
84 version: u8,
85}
86
87#[derive(Debug, PartialEq, Eq)]
88pub enum InitError {
89 IncompatibleDataVersion {
90 last_supported_version: u8,
91 decoded_version: u8,
92 },
93 IncompatibleIndexVersion {
94 last_supported_version: u8,
95 decoded_version: u8,
96 },
97 InvalidIndex,
98}
99
100impl fmt::Display for InitError {
101 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
102 match self {
103 InitError::IncompatibleDataVersion {
104 last_supported_version,
105 decoded_version,
106 } => write!(
107 f,
108 "Incompatible data version: last supported version is {}, but decoded version is {}",
109 last_supported_version, decoded_version
110 ),
111 InitError::IncompatibleIndexVersion {
112 last_supported_version,
113 decoded_version,
114 } => write!(
115 f,
116 "Incompatible index version: last supported version is {}, but decoded version is {}",
117 last_supported_version, decoded_version
118 ),
119 InitError::InvalidIndex => write!(f, "Invalid index"),
120 }
121 }
122}
123
124#[derive(Debug, PartialEq, Eq)]
125pub enum WriteError {
126 GrowFailed { current_size: u64, delta: u64 },
127}
128
129impl From<GrowFailed> for WriteError {
130 fn from(
131 GrowFailed {
132 current_size,
133 delta,
134 }: GrowFailed,
135 ) -> Self {
136 Self::GrowFailed {
137 current_size,
138 delta,
139 }
140 }
141}
142
143#[derive(Debug, PartialEq, Eq)]
144pub struct NoSuchEntry;
145
146pub struct Log<T: Storable, INDEX: Memory, DATA: Memory> {
148 index_memory: INDEX,
149 data_memory: DATA,
150 _marker: PhantomData<T>,
151}
152
153impl<T: Storable, INDEX: Memory, DATA: Memory> Log<T, INDEX, DATA> {
154 pub fn new(index_memory: INDEX, data_memory: DATA) -> Self {
156 let log = Self {
157 index_memory,
158 data_memory,
159 _marker: PhantomData,
160 };
161 Self::write_header(
162 &log.index_memory,
163 &HeaderV1 {
164 magic: *INDEX_MAGIC,
165 version: LAYOUT_VERSION,
166 },
167 );
168 Self::write_header(
169 &log.data_memory,
170 &HeaderV1 {
171 magic: *DATA_MAGIC,
172 version: LAYOUT_VERSION,
173 },
174 );
175
176 write_u64(&log.index_memory, Address::from(HEADER_OFFSET), 0);
178 log
179 }
180
181 pub fn init(index_memory: INDEX, data_memory: DATA) -> Result<Self, InitError> {
185 if data_memory.size() == 0 {
187 return Ok(Self::new(index_memory, data_memory));
188 }
189 let data_header = Self::read_header(&data_memory);
190 if &data_header.magic != DATA_MAGIC {
191 return Ok(Self::new(index_memory, data_memory));
192 }
193
194 if data_header.version != LAYOUT_VERSION {
195 return Err(InitError::IncompatibleDataVersion {
196 last_supported_version: LAYOUT_VERSION,
197 decoded_version: data_header.version,
198 });
199 }
200
201 let index_header = Self::read_header(&index_memory);
202 if &index_header.magic != INDEX_MAGIC {
203 return Err(InitError::InvalidIndex);
204 }
205
206 if index_header.version != LAYOUT_VERSION {
207 return Err(InitError::IncompatibleIndexVersion {
208 last_supported_version: LAYOUT_VERSION,
209 decoded_version: index_header.version,
210 });
211 }
212
213 #[cfg(debug_assertions)]
214 {
215 assert_eq!(Ok(()), Self::validate_v1_index(&index_memory));
216 }
217
218 Ok(Self {
219 index_memory,
220 data_memory,
221 _marker: PhantomData,
222 })
223 }
224
225 fn write_header(memory: &impl Memory, header: &HeaderV1) {
227 if memory.size() < 1 {
228 assert!(
229 memory.grow(1) != -1,
230 "failed to allocate the first memory page"
231 );
232 }
233 memory.write(0, &header.magic);
234 memory.write(3, &[header.version]);
235 }
236
237 fn read_header(memory: &impl Memory) -> HeaderV1 {
240 let mut magic = [0u8; 3];
241 let mut version = [0u8; 1];
242 memory.read(0, &mut magic);
243 memory.read(3, &mut version);
244 HeaderV1 {
245 magic,
246 version: version[0],
247 }
248 }
249
250 #[cfg(debug_assertions)]
251 fn validate_v1_index(memory: &INDEX) -> Result<(), String> {
252 let num_entries = read_u64(memory, Address::from(HEADER_OFFSET));
253
254 if num_entries == 0 {
255 return Ok(());
256 }
257
258 let mut prev_entry = read_u64(memory, Address::from(HEADER_OFFSET + 8));
260 for i in 1..num_entries {
261 let entry = read_u64(memory, Address::from(HEADER_OFFSET + 8 + i * 8));
262 if entry < prev_entry {
263 return Err(format!("invalid entry I[{i}]: {entry} < {prev_entry}"));
264 }
265 prev_entry = entry;
266 }
267 Ok(())
268 }
269
270 pub fn into_memories(self) -> (INDEX, DATA) {
272 (self.index_memory, self.data_memory)
273 }
274
275 pub fn is_empty(&self) -> bool {
277 self.len() == 0
278 }
279
280 pub fn index_size_bytes(&self) -> u64 {
282 let num_entries = read_u64(&self.index_memory, Address::from(HEADER_OFFSET));
283 self.index_entry_offset(num_entries).get()
284 }
285
286 pub fn data_size_bytes(&self) -> u64 {
288 self.log_size_bytes() + HEADER_OFFSET
289 }
290
291 pub fn log_size_bytes(&self) -> u64 {
293 let num_entries = self.len();
294 if num_entries == 0 {
295 0
296 } else {
297 read_u64(&self.index_memory, self.index_entry_offset(num_entries - 1))
298 }
299 }
300
301 pub fn len(&self) -> u64 {
303 read_u64(&self.index_memory, Address::from(HEADER_OFFSET))
304 }
305
306 pub fn get(&self, idx: u64) -> Option<T> {
309 let mut buf = vec![];
310 self.read_entry(idx, &mut buf).ok()?;
311 Some(T::from_bytes(Cow::Owned(buf)))
312 }
313
314 pub fn iter(&self) -> Iter<'_, T, INDEX, DATA> {
316 Iter {
317 log: self,
318 buf: vec![],
319 pos: 0,
320 }
321 }
322
323 pub fn read_entry(&self, idx: u64, buf: &mut Vec<u8>) -> Result<(), NoSuchEntry> {
331 let (offset, len) = self.entry_meta(idx).ok_or(NoSuchEntry)?;
332 buf.resize(len, 0);
333 self.data_memory.read(HEADER_OFFSET + offset, buf);
334 Ok(())
335 }
336
337 pub fn append(&self, item: &T) -> Result<u64, WriteError> {
342 let idx = self.len();
343 let data_offset = if idx == 0 {
344 0
345 } else {
346 read_u64(&self.index_memory, self.index_entry_offset(idx - 1))
347 };
348
349 let bytes = item.to_bytes();
350 let new_offset = data_offset
351 .checked_add(bytes.len() as u64)
352 .expect("address overflow");
353
354 let entry_offset = HEADER_OFFSET
355 .checked_add(data_offset)
356 .expect("address overflow");
357
358 debug_assert!(new_offset >= data_offset);
359
360 safe_write(&self.data_memory, entry_offset, &bytes[..])?;
362
363 safe_write(
365 &self.index_memory,
366 self.index_entry_offset(idx).get(),
367 &new_offset.to_le_bytes(),
368 )?;
369 write_u64(&self.index_memory, Address::from(HEADER_OFFSET), idx + 1);
371
372 debug_assert_eq!(self.get(idx).unwrap().to_bytes(), bytes);
373
374 Ok(idx)
375 }
376
377 fn entry_meta(&self, idx: u64) -> Option<(u64, usize)> {
379 if self.len() <= idx {
380 return None;
381 }
382
383 if idx == 0 {
384 Some((
385 0,
386 read_u64(&self.index_memory, self.index_entry_offset(0)) as usize,
387 ))
388 } else {
389 let offset = read_u64(&self.index_memory, self.index_entry_offset(idx - 1));
390 let next = read_u64(&self.index_memory, self.index_entry_offset(idx));
391
392 debug_assert!(offset <= next);
393
394 Some((offset, (next - offset) as usize))
395 }
396 }
397
398 fn index_entry_offset(&self, idx: u64) -> Address {
400 Address::from(
401 HEADER_OFFSET + std::mem::size_of::<u64>() as u64 + idx * (std::mem::size_of::<u64>() as u64), )
404 }
405}
406
407pub struct Iter<'a, T, I, D>
408where
409 T: Storable,
410 I: Memory,
411 D: Memory,
412{
413 log: &'a Log<T, I, D>,
414 buf: Vec<u8>,
415 pos: u64,
416}
417
418impl<T, I, D> Iterator for Iter<'_, T, I, D>
419where
420 T: Storable,
421 I: Memory,
422 D: Memory,
423{
424 type Item = T;
425
426 fn next(&mut self) -> Option<T> {
427 match self.log.read_entry(self.pos, &mut self.buf) {
428 Ok(()) => {
429 self.pos = self.pos.saturating_add(1);
430 Some(T::from_bytes(Cow::Borrowed(&self.buf)))
431 }
432 Err(NoSuchEntry) => None,
433 }
434 }
435
436 fn size_hint(&self) -> (usize, Option<usize>) {
437 (self.log.len().saturating_sub(self.pos) as usize, None)
438 }
439
440 fn count(self) -> usize {
441 let n = self.log.len().saturating_sub(self.pos);
442 if n > usize::MAX as u64 {
443 panic!("The number of items in the log {n} does not fit into usize");
444 }
445 n as usize
446 }
447
448 fn nth(&mut self, n: usize) -> Option<T> {
449 self.pos = self.pos.saturating_add(n as u64);
450 self.next()
451 }
452}