d_engine/storage/
storage_engine.rs

1#![doc = include_str!("../docs/server_guide/customize-storage-engine.md")]
2
3use std::ops::RangeInclusive;
4use std::sync::Arc;
5
6#[cfg(test)]
7use mockall::automock;
8use tonic::async_trait;
9
10use crate::proto::common::Entry;
11use crate::proto::common::LogId;
12use crate::Error;
13use crate::HardState;
14
15/// High-performance storage abstraction for Raft consensus
16///
17/// Design principles:
18/// - Zero-cost abstractions through static dispatch
19/// - Physical separation of log and metadata stores
20/// - Async-ready for I/O parallelism
21/// - Minimal interface for maximum performance
22pub trait StorageEngine: Send + Sync + 'static {
23    /// Associated log store type
24    type LogStore: LogStore;
25
26    /// Associated metadata store type
27    type MetaStore: MetaStore;
28
29    /// Get log storage handle
30    fn log_store(&self) -> Arc<Self::LogStore>;
31
32    /// Get metadata storage handle
33    fn meta_store(&self) -> Arc<Self::MetaStore>;
34}
35
36#[cfg_attr(test, automock)]
37#[async_trait]
38pub trait LogStore: Send + Sync + 'static {
39    /// Batch persist entries into disk (optimized for sequential writes)
40    ///
41    /// # Performance
42    /// Implementations should use batch operations and avoid
43    /// per-entry synchronization. Expected throughput: >100k ops/sec.
44    async fn persist_entries(
45        &self,
46        entries: Vec<Entry>,
47    ) -> Result<(), Error>;
48
49    /// Get single log entry by index
50    async fn entry(
51        &self,
52        index: u64,
53    ) -> Result<Option<Entry>, Error>;
54
55    /// Get entries in range [start, end] (inclusive)
56    ///
57    /// # Performance
58    /// Should use efficient range scans. Expected latency: <1ms for 10k entries.
59    fn get_entries(
60        &self,
61        range: RangeInclusive<u64>,
62    ) -> Result<Vec<Entry>, Error>;
63
64    /// Remove logs up to specified index
65    async fn purge(
66        &self,
67        cutoff_index: LogId,
68    ) -> Result<(), Error>;
69
70    /// Truncates log from specified index onward
71    async fn truncate(
72        &self,
73        from_index: u64,
74    ) -> Result<(), Error>;
75
76    /// Optional: Flush pending writes (use with caution)
77    fn flush(&self) -> Result<(), Error> {
78        Ok(()) // Default no-op for engines with auto-flush
79    }
80
81    /// Optional: Flush pending writes (use with caution)
82    async fn flush_async(&self) -> Result<(), Error> {
83        Ok(()) // Default no-op for engines with auto-flush
84    }
85
86    async fn reset(&self) -> Result<(), Error>;
87
88    /// Get last log index (optimized for frequent access)
89    ///
90    /// # Implementation note
91    /// Should maintain cached value updated on write operations
92    fn last_index(&self) -> u64;
93}
94
95/// Metadata storage operations
96#[cfg_attr(test, automock)]
97#[async_trait]
98pub trait MetaStore: Send + Sync + 'static {
99    /// Atomically persist hard state (current term and votedFor)
100    fn save_hard_state(
101        &self,
102        state: &HardState,
103    ) -> Result<(), Error>;
104
105    /// Load persisted hard state
106    fn load_hard_state(&self) -> Result<Option<HardState>, Error>;
107
108    /// Optional: Flush pending writes (use with caution)
109    fn flush(&self) -> Result<(), Error> {
110        Ok(()) // Default no-op for engines with auto-flush
111    }
112
113    /// Optional: Flush pending writes (use with caution)
114    async fn flush_async(&self) -> Result<(), Error> {
115        Ok(()) // Default no-op for engines with auto-flush
116    }
117}