Skip to main content

wasi_dbms_memory/
lib.rs

1// Rust guideline compliant 2026-03-30
2
3//! WASI file-backed [`MemoryProvider`] implementation for `wasm-dbms`.
4//!
5//! This crate provides [`WasiMemoryProvider`], a persistent storage backend
6//! that uses a single flat file on the filesystem. It enables `wasm-dbms`
7//! to run on any WASI-compliant runtime (Wasmer, Wasmtime, WasmEdge, etc.)
8//! with durable data persistence.
9//!
10//! The backing file is byte-for-byte equivalent to IC stable memory:
11//! a contiguous sequence of 64 KiB pages, zero-filled on allocation.
12
13use std::fs::{File, OpenOptions};
14use std::io::{Read as _, Seek as _, Write as _};
15use std::path::{Path, PathBuf};
16
17use wasm_dbms_api::memory::{MemoryError, MemoryResult};
18use wasm_dbms_memory::MemoryProvider;
19
20/// Size of a single memory page in bytes (64 KiB).
21const PAGE_SIZE: u64 = 65536;
22
23/// File-backed [`MemoryProvider`] for WASI runtimes.
24///
25/// Persists database pages as a single flat file on the filesystem.
26/// Each page is 64 KiB, matching the WASM memory page size. The file
27/// layout is byte-for-byte equivalent to IC stable memory, enabling
28/// portable database snapshots.
29///
30/// Single-writer access is the caller's responsibility. WASM is
31/// single-threaded by default and WASI lock support varies across runtimes.
32///
33/// # Examples
34///
35/// ```no_run
36/// use wasi_dbms_memory::WasiMemoryProvider;
37/// use wasm_dbms_memory::MemoryProvider;
38///
39/// let mut provider = WasiMemoryProvider::new("./data/mydb.bin").unwrap();
40/// provider.grow(1).unwrap(); // allocate 1 page (64 KiB)
41///
42/// let data = b"hello";
43/// provider.write(0, data).unwrap();
44///
45/// let mut buf = vec![0u8; 5];
46/// provider.read(0, &mut buf).unwrap();
47/// assert_eq!(&buf, data);
48/// ```
49#[derive(Debug)]
50pub struct WasiMemoryProvider {
51    file: File,
52    path: PathBuf,
53    pages: u64,
54}
55
56impl WasiMemoryProvider {
57    /// Opens or creates a file-backed memory provider at `path`.
58    ///
59    /// If the file exists, the page count is inferred from the file size.
60    /// If the file does not exist, it is created empty (0 pages).
61    ///
62    /// # Errors
63    ///
64    /// Returns [`MemoryError::ProviderError`] if:
65    /// - The file cannot be opened or created.
66    /// - The existing file size is not a multiple of the page size (64 KiB).
67    pub fn new(path: impl AsRef<Path>) -> MemoryResult<Self> {
68        let path = path.as_ref();
69
70        let file = OpenOptions::new()
71            .read(true)
72            .write(true)
73            .create(true)
74            .truncate(false)
75            .open(path)
76            .map_err(|e| MemoryError::ProviderError(e.to_string()))?;
77
78        let file_size = file
79            .metadata()
80            .map_err(|e| MemoryError::ProviderError(e.to_string()))?
81            .len();
82
83        // reject files whose size isn't page-aligned
84        if file_size % PAGE_SIZE != 0 {
85            return Err(MemoryError::ProviderError(format!(
86                "file size {file_size} is not a multiple of page size {PAGE_SIZE}"
87            )));
88        }
89
90        let pages = file_size / PAGE_SIZE;
91
92        Ok(Self {
93            file,
94            path: path.to_path_buf(),
95            pages,
96        })
97    }
98
99    /// Returns the path to the backing file.
100    pub fn path(&self) -> &Path {
101        &self.path
102    }
103
104    /// Seeks the file handle to `offset`.
105    fn seek_to(&mut self, offset: u64) -> MemoryResult<()> {
106        self.file
107            .seek(std::io::SeekFrom::Start(offset))
108            .map_err(|e| MemoryError::ProviderError(e.to_string()))?;
109        Ok(())
110    }
111}
112
113impl TryFrom<&Path> for WasiMemoryProvider {
114    type Error = MemoryError;
115
116    fn try_from(path: &Path) -> Result<Self, Self::Error> {
117        Self::new(path)
118    }
119}
120
121impl TryFrom<PathBuf> for WasiMemoryProvider {
122    type Error = MemoryError;
123
124    fn try_from(path: PathBuf) -> Result<Self, Self::Error> {
125        Self::new(path.as_path())
126    }
127}
128
129impl MemoryProvider for WasiMemoryProvider {
130    const PAGE_SIZE: u64 = PAGE_SIZE;
131
132    fn size(&self) -> u64 {
133        self.pages * Self::PAGE_SIZE
134    }
135
136    fn pages(&self) -> u64 {
137        self.pages
138    }
139
140    fn grow(&mut self, new_pages: u64) -> MemoryResult<u64> {
141        let previous_pages = self.pages;
142        let new_size = self.size() + new_pages * Self::PAGE_SIZE;
143
144        // extend with zeros via set_len
145        self.file
146            .set_len(new_size)
147            .map_err(|e| MemoryError::ProviderError(e.to_string()))?;
148
149        self.pages += new_pages;
150        Ok(previous_pages)
151    }
152
153    fn read(&mut self, offset: u64, buf: &mut [u8]) -> MemoryResult<()> {
154        if offset + buf.len() as u64 > self.size() {
155            return Err(MemoryError::OutOfBounds);
156        }
157
158        self.seek_to(offset)?;
159        self.file
160            .read_exact(buf)
161            .map_err(|e| MemoryError::ProviderError(e.to_string()))
162    }
163
164    fn write(&mut self, offset: u64, buf: &[u8]) -> MemoryResult<()> {
165        if offset + buf.len() as u64 > self.size() {
166            return Err(MemoryError::OutOfBounds);
167        }
168
169        self.seek_to(offset)?;
170        self.file
171            .write_all(buf)
172            .map_err(|e| MemoryError::ProviderError(e.to_string()))
173    }
174}
175
176#[cfg(test)]
177mod tests {
178
179    use std::sync::atomic::{AtomicU64, Ordering};
180
181    use super::*;
182
183    /// Atomic counter to generate unique temp file paths across tests.
184    static COUNTER: AtomicU64 = AtomicU64::new(0);
185
186    /// Creates a unique temporary database file path.
187    ///
188    /// Returns the parent directory path and the file path. The caller is
189    /// responsible for cleanup (tests run in a temp dir so OS handles it).
190    fn temp_db_path() -> PathBuf {
191        let id = COUNTER.fetch_add(1, Ordering::Relaxed);
192        let dir = std::env::temp_dir().join("wasi_dbms_tests");
193        std::fs::create_dir_all(&dir).unwrap();
194        dir.join(format!("test_{id}.db"))
195    }
196
197    /// Removes the temp file if it exists (best-effort cleanup).
198    fn cleanup(path: &Path) {
199        let _ = std::fs::remove_file(path);
200    }
201
202    #[test]
203    fn test_should_create_new_empty_file() {
204        let path = temp_db_path();
205        let provider = WasiMemoryProvider::new(&path).unwrap();
206        assert_eq!(provider.pages(), 0);
207        assert_eq!(provider.size(), 0);
208        assert!(path.exists());
209        cleanup(&path);
210    }
211
212    #[test]
213    fn test_should_open_existing_file_with_correct_page_count() {
214        let path = temp_db_path();
215
216        // create a file with exactly 2 pages
217        {
218            let mut f = File::create(&path).unwrap();
219            f.write_all(&vec![0u8; PAGE_SIZE as usize * 2]).unwrap();
220        }
221
222        let provider = WasiMemoryProvider::new(&path).unwrap();
223        assert_eq!(provider.pages(), 2);
224        assert_eq!(provider.size(), PAGE_SIZE * 2);
225        cleanup(&path);
226    }
227
228    #[test]
229    fn test_should_reject_non_page_aligned_file() {
230        let path = temp_db_path();
231
232        {
233            let mut f = File::create(&path).unwrap();
234            f.write_all(&vec![0u8; PAGE_SIZE as usize + 100]).unwrap();
235        }
236
237        let result = WasiMemoryProvider::new(&path);
238        assert!(result.is_err());
239        assert!(matches!(
240            result.err().unwrap(),
241            MemoryError::ProviderError(_)
242        ));
243        cleanup(&path);
244    }
245
246    #[test]
247    fn test_should_grow_memory() {
248        let path = temp_db_path();
249        let mut provider = WasiMemoryProvider::new(&path).unwrap();
250        assert_eq!(provider.pages(), 0);
251
252        let previous = provider.grow(2).unwrap();
253        assert_eq!(previous, 0);
254        assert_eq!(provider.pages(), 2);
255        assert_eq!(provider.size(), PAGE_SIZE * 2);
256
257        let previous = provider.grow(1).unwrap();
258        assert_eq!(previous, 2);
259        assert_eq!(provider.pages(), 3);
260        assert_eq!(provider.size(), PAGE_SIZE * 3);
261        cleanup(&path);
262    }
263
264    #[test]
265    fn test_should_read_and_write() {
266        let path = temp_db_path();
267        let mut provider = WasiMemoryProvider::new(&path).unwrap();
268        provider.grow(1).unwrap();
269
270        let data = vec![1, 2, 3, 4, 5];
271        provider.write(0, &data).unwrap();
272
273        let mut buf = vec![0u8; 5];
274        provider.read(0, &mut buf).unwrap();
275        assert_eq!(buf, data);
276        cleanup(&path);
277    }
278
279    #[test]
280    fn test_should_write_at_arbitrary_offset() {
281        let path = temp_db_path();
282        let mut provider = WasiMemoryProvider::new(&path).unwrap();
283        provider.grow(1).unwrap();
284
285        let data = vec![0xAA, 0xBB, 0xCC];
286        provider.write(100, &data).unwrap();
287
288        // verify zeroed region before
289        let mut before = vec![0xFFu8; 100];
290        provider.read(0, &mut before).unwrap();
291        assert!(before.iter().all(|&b| b == 0));
292
293        // verify written data
294        let mut buf = vec![0u8; 3];
295        provider.read(100, &mut buf).unwrap();
296        assert_eq!(buf, data);
297        cleanup(&path);
298    }
299
300    #[test]
301    fn test_should_overwrite_existing_data() {
302        let path = temp_db_path();
303        let mut provider = WasiMemoryProvider::new(&path).unwrap();
304        provider.grow(1).unwrap();
305
306        provider.write(0, &[1, 2, 3]).unwrap();
307        provider.write(0, &[4, 5, 6]).unwrap();
308
309        let mut buf = vec![0u8; 3];
310        provider.read(0, &mut buf).unwrap();
311        assert_eq!(buf, vec![4, 5, 6]);
312        cleanup(&path);
313    }
314
315    #[test]
316    fn test_should_read_and_write_across_page_boundary() {
317        let path = temp_db_path();
318        let mut provider = WasiMemoryProvider::new(&path).unwrap();
319        provider.grow(2).unwrap();
320
321        // write data spanning two pages
322        let offset = PAGE_SIZE - 3;
323        let data = vec![0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE];
324        provider.write(offset, &data).unwrap();
325
326        let mut buf = vec![0u8; 6];
327        provider.read(offset, &mut buf).unwrap();
328        assert_eq!(buf, data);
329        cleanup(&path);
330    }
331
332    #[test]
333    fn test_should_not_read_out_of_bounds() {
334        let path = temp_db_path();
335        let mut provider = WasiMemoryProvider::new(&path).unwrap();
336        provider.grow(1).unwrap();
337
338        let mut buf = vec![0u8; 10];
339        let result = provider.read(PAGE_SIZE - 5, &mut buf);
340        assert!(result.is_err());
341        assert!(matches!(result.err().unwrap(), MemoryError::OutOfBounds));
342        cleanup(&path);
343    }
344
345    #[test]
346    fn test_should_not_write_out_of_bounds() {
347        let path = temp_db_path();
348        let mut provider = WasiMemoryProvider::new(&path).unwrap();
349        provider.grow(1).unwrap();
350
351        let data = vec![1, 2, 3, 4, 5];
352        let result = provider.write(PAGE_SIZE - 3, &data);
353        assert!(result.is_err());
354        assert!(matches!(result.err().unwrap(), MemoryError::OutOfBounds));
355        cleanup(&path);
356    }
357
358    #[test]
359    fn test_should_not_read_from_empty_provider() {
360        let path = temp_db_path();
361        let mut provider = WasiMemoryProvider::new(&path).unwrap();
362
363        let mut buf = vec![0u8; 1];
364        let result = provider.read(0, &mut buf);
365        assert!(result.is_err());
366        assert!(matches!(result.err().unwrap(), MemoryError::OutOfBounds));
367        cleanup(&path);
368    }
369
370    #[test]
371    fn test_should_not_write_to_empty_provider() {
372        let path = temp_db_path();
373        let mut provider = WasiMemoryProvider::new(&path).unwrap();
374
375        let result = provider.write(0, &[1]);
376        assert!(result.is_err());
377        assert!(matches!(result.err().unwrap(), MemoryError::OutOfBounds));
378        cleanup(&path);
379    }
380
381    #[test]
382    fn test_should_persist_data_across_reopen() {
383        let path = temp_db_path();
384
385        // write data
386        {
387            let mut provider = WasiMemoryProvider::new(&path).unwrap();
388            provider.grow(1).unwrap();
389            provider.write(10, &[42, 43, 44]).unwrap();
390        }
391
392        // reopen and verify
393        {
394            let mut provider = WasiMemoryProvider::new(&path).unwrap();
395            assert_eq!(provider.pages(), 1);
396
397            let mut buf = vec![0u8; 3];
398            provider.read(10, &mut buf).unwrap();
399            assert_eq!(buf, vec![42, 43, 44]);
400        }
401        cleanup(&path);
402    }
403
404    #[test]
405    fn test_should_grow_zero_pages() {
406        let path = temp_db_path();
407        let mut provider = WasiMemoryProvider::new(&path).unwrap();
408
409        let previous = provider.grow(0).unwrap();
410        assert_eq!(previous, 0);
411        assert_eq!(provider.pages(), 0);
412        assert_eq!(provider.size(), 0);
413        cleanup(&path);
414    }
415
416    #[test]
417    fn test_should_read_and_write_exact_page_boundary() {
418        let path = temp_db_path();
419        let mut provider = WasiMemoryProvider::new(&path).unwrap();
420        provider.grow(1).unwrap();
421
422        // write at the last byte of the page
423        provider.write(PAGE_SIZE - 1, &[0xFF]).unwrap();
424
425        let mut buf = vec![0u8; 1];
426        provider.read(PAGE_SIZE - 1, &mut buf).unwrap();
427        assert_eq!(buf, vec![0xFF]);
428        cleanup(&path);
429    }
430
431    #[test]
432    fn test_should_read_and_write_empty_buffer() {
433        let path = temp_db_path();
434        let mut provider = WasiMemoryProvider::new(&path).unwrap();
435        provider.grow(1).unwrap();
436
437        // empty reads/writes should succeed
438        provider.write(0, &[]).unwrap();
439        let mut buf = vec![];
440        provider.read(0, &mut buf).unwrap();
441        cleanup(&path);
442    }
443
444    #[test]
445    fn test_should_return_path() {
446        let path = temp_db_path();
447        let provider = WasiMemoryProvider::new(&path).unwrap();
448        assert_eq!(provider.path(), path);
449        cleanup(&path);
450    }
451
452    #[test]
453    fn test_should_convert_from_path_ref() {
454        let path = temp_db_path();
455        let provider = WasiMemoryProvider::try_from(path.as_path()).unwrap();
456        assert_eq!(provider.pages(), 0);
457        cleanup(&path);
458    }
459
460    #[test]
461    fn test_should_convert_from_pathbuf() {
462        let path = temp_db_path();
463        let provider = WasiMemoryProvider::try_from(path.clone()).unwrap();
464        assert_eq!(provider.pages(), 0);
465        cleanup(&path);
466    }
467
468    #[test]
469    fn test_should_write_full_page() {
470        let path = temp_db_path();
471        let mut provider = WasiMemoryProvider::new(&path).unwrap();
472        provider.grow(1).unwrap();
473
474        let data = vec![0xAB; PAGE_SIZE as usize];
475        provider.write(0, &data).unwrap();
476
477        let mut buf = vec![0u8; PAGE_SIZE as usize];
478        provider.read(0, &mut buf).unwrap();
479        assert_eq!(buf, data);
480        cleanup(&path);
481    }
482
483    #[test]
484    fn test_should_grow_preserves_existing_data() {
485        let path = temp_db_path();
486        let mut provider = WasiMemoryProvider::new(&path).unwrap();
487        provider.grow(1).unwrap();
488
489        let data = vec![1, 2, 3, 4, 5];
490        provider.write(0, &data).unwrap();
491
492        provider.grow(1).unwrap();
493
494        // original data intact
495        let mut buf = vec![0u8; 5];
496        provider.read(0, &mut buf).unwrap();
497        assert_eq!(buf, data);
498
499        // new page is zeroed
500        let mut new_page = vec![0xFFu8; PAGE_SIZE as usize];
501        provider.read(PAGE_SIZE, &mut new_page).unwrap();
502        assert!(new_page.iter().all(|&b| b == 0));
503        cleanup(&path);
504    }
505}