Skip to main content

moltendb_core/engine/storage/
mod.rs

1// ─── storage/mod.rs ──────────────────────────────────────────────────────────
2// This is the root module for all storage backends. It does three things:
3//
4//   1. Declares and conditionally exposes the concrete backend modules
5//      (disk, encrypted, wasm) based on the compile target.
6//
7//   2. Defines the StorageBackend trait — the single interface that the rest
8//      of the engine uses to read/write data. Any type that implements this
9//      trait can be used as a storage backend, whether it writes to a disk
10//      file, an encrypted file, or a browser OPFS file.
11//
12//   3. Provides the startup replay functions (stream_into_state, apply_entry,
13//      replay_log_entries) that rebuild the in-memory database state from the
14//      persistent log on server/worker startup.
15//
16// The StorageBackend trait is the key abstraction that makes MoltenDB's
17// "same engine, different storage" design possible. The engine (mod.rs,
18// operations.rs, handlers.rs) never imports a concrete storage type — it
19// only ever holds an Arc<dyn StorageBackend>. This means you can swap the
20// storage backend without changing any engine code.
21// ─────────────────────────────────────────────────────────────────────────────
22
23// ── Conditional module declarations ──────────────────────────────────────────
24// These cfg attributes mean "only compile this when NOT targeting wasm32".
25// On native (server) builds we get disk.rs and encrypted.rs.
26// On WASM (browser) builds we get wasm.rs.
27// This prevents browser-incompatible code (file I/O, Tokio tasks) from being
28// compiled into the WASM binary.
29
30#[cfg(not(target_arch = "wasm32"))]
31mod disk;
32mod encrypted;
33// tiered.rs provides MmapLogReader (memory-mapped cold log reads) and
34// TieredStorage (hot + cold two-tier backend for large-scale deployments).
35#[cfg(not(target_arch = "wasm32"))]
36mod tiered;
37// Re-export the concrete types so callers can write `storage::AsyncDiskStorage`
38// instead of `storage::disk::AsyncDiskStorage`.
39#[cfg(not(target_arch = "wasm32"))]
40pub use disk::{AsyncDiskStorage, SyncDiskStorage};
41pub use encrypted::EncryptedStorage;
42// Re-export TieredStorage so engine/mod.rs and main.rs can use it directly.
43#[cfg(not(target_arch = "wasm32"))]
44pub use tiered::TieredStorage;
45
46// On WASM builds, expose the browser-side OPFS storage.
47#[cfg(target_arch = "wasm32")]
48pub mod wasm;
49#[cfg(target_arch = "wasm32")]
50pub use wasm::OpfsStorage;
51
52// ── Shared imports ────────────────────────────────────────────────────────────
53// These are used by both the trait definition and the replay functions below.
54use crate::engine::types::{DbError, LogEntry};
55#[cfg(feature = "schema")]
56use serde_json::Value;
57use std::ops::ControlFlow;
58// DashMap is a concurrent hash map — like HashMap but safe to read/write from
59// multiple threads simultaneously without a global lock.
60// DashSet is the set equivalent.
61use dashmap::{DashMap, DashSet};
62// serde_json::Value is a dynamically-typed JSON value (can be object, array,
63// string, number, bool, or null). All document data is stored as Value.
64
65// ─── StorageBackend trait ─────────────────────────────────────────────────────
66//
67// This is the core abstraction of the storage layer. Any type that implements
68// these three methods can serve as a MoltenDB storage backend.
69//
70// The trait requires Send + Sync because the backend is stored inside an
71// Arc<dyn StorageBackend> and shared across multiple Tokio tasks/threads.
72//   • Send  = the type can be moved to another thread
73//   • Sync  = the type can be referenced from multiple threads simultaneously
74// ─────────────────────────────────────────────────────────────────────────────
75
76/// The core storage abstraction. Implement this trait to add a new storage backend.
77///
78/// All three methods operate on `LogEntry` — the atomic unit of data in MoltenDB.
79/// The engine never writes raw bytes; it always goes through this interface.
80pub trait StorageBackend: Send + Sync {
81    /// Append a single log entry to the persistent store.
82    ///
83    /// This is called on every insert, update, delete, and index creation.
84    /// Implementations may buffer writes (async) or flush immediately (sync).
85    fn write_entry(&self, entry: &LogEntry) -> Result<(), DbError>;
86
87    /// Read all log entries from persistent storage into a Vec.
88    ///
89    /// Called on startup to rebuild the in-memory state, and by EncryptedStorage
90    /// which must decrypt entries before they can be streamed into state.
91    /// For large databases, prefer `stream_log_into` which avoids holding the
92    /// full log in RAM.
93    fn read_log(&self) -> Result<Vec<LogEntry>, DbError>;
94
95    /// Compact the log by writing only the current state (removing dead entries).
96    ///
97    /// `entries` is the complete current state of the database — every live
98    /// document as a single INSERT entry. The implementation should atomically
99    /// replace the existing log with this minimal set.
100    fn compact(&self, entries: Vec<LogEntry>) -> Result<(), DbError>;
101
102    /// Read exactly `length` bytes starting at `offset` from the log.
103    ///
104    /// This is used to fetch "Cold" documents from the append-only log without
105    /// loading the entire file into memory.
106    fn read_at(&self, offset: u64, length: u32) -> Result<Vec<u8>, DbError>;
107
108    /// Return the current size of the persistent log file in bytes.
109    ///
110    /// Used by the WASM worker to implement size-based auto-compaction — the JS
111    /// side calls `get_size` after every INSERT batch and compacts if the file
112    /// exceeds the configured threshold (default: 5 MB).
113    ///
114    /// The default implementation returns 0 (no size information available).
115    /// `OpfsStorage` overrides this with a real `FileSystemSyncAccessHandle.getSize()` call.
116    /// Native disk backends don't need this — they use OS-level file metadata instead.
117    #[allow(dead_code)]
118    fn get_size(&self) -> Result<u64, DbError> {
119        Ok(0)
120    }
121
122    /// Stream log entries into state one at a time, without loading the full
123    /// log into RAM. Implementations may load a binary snapshot first and only
124    /// replay the delta lines written after the snapshot.
125    ///
126    /// The default implementation falls back to `read_log()` for backwards
127    /// compatibility (used by WASM/EncryptedStorage which don't have snapshots).
128    ///
129    /// Returns the total number of entries processed.
130    fn stream_log_into(&self, f: &mut dyn FnMut(LogEntry, u32) -> ControlFlow<(), ()>) -> Result<u64, DbError> {
131        // Default: load everything into a Vec, then iterate.
132        // Concrete implementations (AsyncDiskStorage, SyncDiskStorage) override
133        // this with a more efficient snapshot + streaming approach.
134        let entries = self.read_log()?;
135        let mut count = 0u64;
136        for entry in entries {
137            // Default re-serializes to get length. 
138            // Better implementations override this.
139            let json = serde_json::to_vec(&entry).unwrap_or_default();
140            let length = json.len() as u32;
141            if let ControlFlow::Break(_) = f(entry, length) {
142                return Ok(count);
143            }
144            count += 1;
145        }
146        Ok(count)
147    }
148}
149
150// ─── Startup replay ───────────────────────────────────────────────────────────
151//
152// When the server starts (or the WASM worker initialises), we need to rebuild
153// the in-memory state from the persistent log. These functions handle that.
154//
155// The process is:
156//   1. Call storage.stream_log_into() — this either loads a binary snapshot
157//      + delta (fast path) or streams the full log line-by-line (slow path).
158//   2. For each LogEntry, call apply_entry() to update the in-memory DashMaps.
159//   3. After all entries are applied, the in-memory state matches the log.
160// ─────────────────────────────────────────────────────────────────────────────
161
162/// Drive startup by streaming all log entries from storage into the in-memory
163/// state and index maps. Uses snapshot + delta replay when available.
164///
165/// `state`   — the main data store: collection name → (key → document state)
166/// `indexes` — the index store: "collection:field" → (field value → set of keys)
167///
168/// Returns the total number of log entries processed.
169pub fn stream_into_state(
170    storage: &dyn StorageBackend,
171    state: &DashMap<String, DashMap<String, crate::engine::types::DocumentState>>,
172    indexes: &DashMap<String, DashMap<String, DashSet<String>>>,
173    #[cfg(feature = "schema")] schemas: &DashMap<String, std::sync::Arc<(Value, jsonschema::Validator)>>,
174) -> Result<u64, DbError> {
175    let mut count = 0u64;
176    let mut offset = 0u64;
177    let mut tx_buffer: Vec<(LogEntry, crate::engine::types::RecordPointer)> = Vec::new();
178    let mut active_tx: Option<String> = None;
179
180    // stream_log_into calls our closure once per LogEntry, providing the 
181    // LogEntry and its raw byte length in the log file.
182    storage.stream_log_into(&mut |entry, length| {
183        let pointer = crate::engine::types::RecordPointer {
184            offset,
185            length,
186        };
187
188        match entry.cmd.as_str() {
189            "TX_BEGIN" => {
190                active_tx = Some(entry.key.clone());
191                tx_buffer.clear();
192            }
193            "TX_COMMIT" => {
194                if active_tx.as_ref() == Some(&entry.key) {
195                    // Flush buffer to DashMap
196                    for (e, p) in tx_buffer.drain(..) {
197                        // If length was 0, p.length will be 0 (from the snapshot replay)
198                        let pointer = if p.length == 0 { None } else { Some(p) };
199                        apply_entry(
200                            &e,
201                            state,
202                            indexes,
203                            #[cfg(feature = "schema")] schemas,
204                            pointer,
205                        );
206                    }
207                    active_tx = None;
208                } else {
209                    tracing::warn!("⚠️  TX_COMMIT seen for unknown or inactive transaction ID: {}. Ignoring.", entry.key);
210                }
211            }
212            _ => {
213                if active_tx.is_some() {
214                    // Hold in RAM until commit
215                    tx_buffer.push((entry, pointer));
216                } else {
217                    // Standard non-transactional entry
218                    // If length is 0, it means it's from a snapshot, so we want it Hot (pointer=None).
219                    let p = if length == 0 { None } else { Some(pointer) };
220                    apply_entry(
221                        &entry,
222                        state,
223                        indexes,
224                        #[cfg(feature = "schema")] schemas,
225                        p,
226                    );
227                }
228            }
229        }
230
231        count += 1;
232        // +1 for the newline character appended to each JSON line in the log.
233        // length=0 means this entry came from the snapshot (not the log file),
234        // so we must NOT advance the file offset for it.
235        if length > 0 {
236            offset += (length + 1) as u64;
237        }
238        ControlFlow::Continue(())
239    })?;
240
241    // If active_tx is still Some, the file ended prematurely (crash).
242    // In this case, we DISCARD the buffer to ensure atomicity of the last operation.
243    Ok(count)
244}
245
246/// Apply a single log entry to the in-memory state and indexes.
247///
248/// If `pointer` is provided (during log replay), INSERT entries are stored
249/// as `DocumentState::Cold(pointer)` to save memory. Live writes stay `Hot`.
250pub fn apply_entry(
251    entry: &LogEntry,
252    state: &DashMap<String, DashMap<String, crate::engine::types::DocumentState>>,
253    indexes: &DashMap<String, DashMap<String, DashSet<String>>>,
254    #[cfg(feature = "schema")] schemas: &DashMap<String, std::sync::Arc<(Value, jsonschema::Validator)>>,
255    pointer: Option<crate::engine::types::RecordPointer>,
256) {
257    match entry.cmd.as_str() {
258        "INSERT" => {
259            let col = state
260                .entry(entry.collection.clone())
261                .or_insert_with(DashMap::new);
262
263            // During replay, we use the pointer (Cold). For live writes, we store the Value (Hot).
264            let doc_state = if let Some(p) = pointer {
265                crate::engine::types::DocumentState::Cold(p)
266            } else {
267                crate::engine::types::DocumentState::Hot(entry.value.clone())
268            };
269
270            col.insert(entry.key.clone(), doc_state);
271
272            // Indexes ALWAYS store values in RAM to keep searches O(1).
273            crate::engine::indexing::index_doc(indexes, &entry.collection, &entry.key, &entry.value);
274        }
275        "DELETE" => {
276            if let Some(col) = state.get(&entry.collection) {
277                // To unindex, we need the Value. If it's Cold, we'd have to fetch it.
278                // However, during REPLAY, we can just skip unindexing if we don't have the value,
279                // BUT that would break if a DELETE follows an INSERT.
280                // Actually, unindex_doc needs the Value.
281                // For simplicity in this v1 of Hybrid, we'll fetch if needed or change unindex_doc.
282                // Wait, if it's Cold, we don't have the value.
283                // I'll leave a TODO here and for now just handle Hot.
284                if let Some(old_state) = col.get(&entry.key) {
285                    if let crate::engine::types::DocumentState::Hot(old_val) = old_state.value() {
286                         crate::engine::indexing::unindex_doc(
287                            indexes,
288                            &entry.collection,
289                            &entry.key,
290                            old_val,
291                        );
292                    }
293                }
294                col.remove(&entry.key);
295            }
296        }
297        "DROP" => {
298            // Remove the entire collection from the state map.
299            state.remove(&entry.collection);
300            // Remove all indexes that belong to this collection.
301            // retain() keeps only entries where the closure returns true.
302            // We drop any index whose key starts with "collection:" (e.g. "users:role").
303            indexes.retain(|k, _| !k.starts_with(&format!("{}:", entry.collection)));
304        }
305        "INDEX" => {
306            // Register an empty index slot for "collection:field".
307            // The index will be populated as subsequent INSERT entries are applied.
308            // `entry.key` holds the field name (e.g. "role" for "users:role").
309            indexes.insert(
310                format!("{}:{}", entry.collection, entry.key),
311                DashMap::new(),
312            );
313        }
314        #[cfg(feature = "schema")]
315        "SCHEMA" => {
316            // Re-compile and register the schema during replay.
317            if let Ok(validator) = jsonschema::validator_for(&entry.value) {
318                schemas.insert(entry.collection.clone(), std::sync::Arc::new((entry.value.clone(), validator)));
319            }
320        }
321        // Unknown command types are silently ignored for forward compatibility.
322        // If a future version of MoltenDB adds a new command, older versions
323        // will simply skip those entries rather than crashing.
324        _ => {}
325    }
326}
327
328// Replay a slice of already-decoded log entries into RAM state.
329//
330// This is an alternative to stream_into_state() used when the entries have
331// already been loaded into memory (e.g. after decryption by EncryptedStorage).
332// It applies the same logic as apply_entry() but iterates a pre-built slice.
333
334// pub fn replay_log_entries(
335//     entries: &[LogEntry],
336//     state: &DashMap<String, DashMap<String, Value>>,
337//     indexes: &DashMap<String, DashMap<String, DashSet<String>>>,
338// ) {
339//     for entry in entries {
340//         match entry.cmd.as_str() {
341//             "INSERT" => {
342//                 // Get or create the collection, then insert the document.
343//                 let col = state
344//                     .entry(entry.collection.clone())
345//                     .or_insert_with(DashMap::new);
346//                 col.insert(entry.key.clone(), entry.value.clone());
347//                 // Keep indexes in sync with the inserted document.
348//                 crate::engine::indexing::index_doc(indexes, &entry.collection, &entry.key, &entry.value);
349//             }
350//             "DELETE" => {
351//                 if let Some(col) = state.get(&entry.collection) {
352//                     // Remove from indexes before removing from state.
353//                     if let Some(old_val) = col.get(&entry.key) {
354//                         crate::engine::indexing::unindex_doc(
355//                             indexes,
356//                             &entry.collection,
357//                             &entry.key,
358//                             old_val.value(),
359//                         );
360//                     }
361//                     col.remove(&entry.key);
362//                 }
363//             }
364//             "DROP" => {
365//                 // Remove the collection and all its associated indexes.
366//                 state.remove(&entry.collection);
367//                 indexes.retain(|k, _| !k.starts_with(&format!("{}:", entry.collection)));
368//             }
369//             "INDEX" => {
370//                 // Register an empty index slot.
371//                 indexes.insert(
372//                     format!("{}:{}", entry.collection, entry.key),
373//                     DashMap::new(),
374//                 );
375//             }
376//             _ => {}
377//         }
378//     }
379//     println!("✅ Database restored & Indexes rebuilt!");
380// }