1use std::collections::HashMap;
2use std::sync::{Arc, Mutex};
3
4#[derive(Clone, Default)]
5pub struct DataStore {
6 inner: Arc<Mutex<HashMap<(u16, u32), u8>>>,
7}
8
9impl DataStore {
10 pub fn new() -> Self {
11 Self::default()
12 }
13
14 pub fn read_bytes(&self, db: u16, start: u32, count: u32) -> Vec<u8> {
15 let inner = self.inner.lock().unwrap();
16 let end = start.saturating_add(count);
17 (start..end)
18 .map(|offset| *inner.get(&(db, offset)).unwrap_or(&0))
19 .collect()
20 }
21
22 pub fn write_bytes(&self, db: u16, start: u32, data: &[u8]) {
23 let mut inner = self.inner.lock().unwrap();
24 for (i, &byte) in data.iter().enumerate() {
25 if let Some(offset) = start.checked_add(i as u32) {
26 inner.insert((db, offset), byte);
27 }
28 }
29 }
30}
31
32#[cfg(test)]
33mod tests {
34 use super::*;
35
36 #[test]
37 fn read_unset_returns_zeros() {
38 let store = DataStore::new();
39 let data = store.read_bytes(1, 0, 4);
40 assert_eq!(data, vec![0, 0, 0, 0]);
41 }
42
43 #[test]
44 fn write_then_read_roundtrip() {
45 let store = DataStore::new();
46 store.write_bytes(1, 0, &[0xDE, 0xAD, 0xBE, 0xEF]);
47 let data = store.read_bytes(1, 0, 4);
48 assert_eq!(data, vec![0xDE, 0xAD, 0xBE, 0xEF]);
49 }
50
51 #[test]
52 fn partial_read_within_written_range() {
53 let store = DataStore::new();
54 store.write_bytes(1, 0, &[0x01, 0x02, 0x03, 0x04]);
55 let data = store.read_bytes(1, 1, 2);
56 assert_eq!(data, vec![0x02, 0x03]);
57 }
58
59 #[test]
60 fn write_to_different_dbs_isolated() {
61 let store = DataStore::new();
62 store.write_bytes(1, 0, &[0xAA]);
63 store.write_bytes(2, 0, &[0xBB]);
64 assert_eq!(store.read_bytes(1, 0, 1), vec![0xAA]);
65 assert_eq!(store.read_bytes(2, 0, 1), vec![0xBB]);
66 }
67}