use std::{collections::HashMap, path::Path, sync::Arc};
use rocksdb::{DBCompressionType, Options, WriteBatch, DB};
use tokio::sync::Mutex;
use super::StorageAdapter;
pub const STORAGE_ID: &str = "RocksDB";
#[derive(Debug)]
pub struct RocksdbStorageAdapter {
db: Arc<Mutex<DB>>,
}
impl RocksdbStorageAdapter {
pub fn new(path: impl AsRef<Path>) -> crate::wallet::Result<Self> {
let mut opts = Options::default();
opts.set_compression_type(DBCompressionType::Lz4);
opts.create_if_missing(true);
opts.create_missing_column_families(true);
let db = DB::open(&opts, path)?;
Ok(Self {
db: Arc::new(Mutex::new(db)),
})
}
}
#[async_trait::async_trait]
impl StorageAdapter for RocksdbStorageAdapter {
fn id(&self) -> &'static str {
STORAGE_ID
}
async fn get(&self, key: &str) -> crate::wallet::Result<Option<String>> {
Ok(self
.db
.lock()
.await
.get(key.as_bytes())?
.map(|r| String::from_utf8_lossy(&r).to_string()))
}
async fn set(&mut self, key: &str, record: String) -> crate::wallet::Result<()> {
self.db.lock().await.put(key.as_bytes(), record.as_bytes())?;
Ok(())
}
async fn batch_set(&mut self, records: HashMap<String, String>) -> crate::wallet::Result<()> {
let mut batch = WriteBatch::default();
for (key, value) in records {
batch.put(key.as_bytes(), value.as_bytes());
}
self.db.lock().await.write(batch)?;
Ok(())
}
async fn remove(&mut self, key: &str) -> crate::wallet::Result<()> {
self.db.lock().await.delete(key.as_bytes())?;
Ok(())
}
}