use std::cell::RefCell;
use std::collections::{BTreeMap, HashMap};
use std::ffi::{CStr, CString};
use std::fmt;
use std::fs;
use std::iter;
use std::path::Path;
use std::path::PathBuf;
use std::ptr;
use std::slice;
use std::str;
use std::sync::Arc;
use std::time::Duration;
use crate::column_family::ColumnFamilyTtl;
use crate::ffi_util::CSlice;
use crate::{
ColumnFamily, ColumnFamilyDescriptor, CompactOptions, DBIteratorWithThreadMode,
DBPinnableSlice, DBRawIteratorWithThreadMode, DBWALIterator, DEFAULT_COLUMN_FAMILY_NAME,
Direction, Error, FlushOptions, IngestExternalFileOptions, IteratorMode, Options, ReadOptions,
SnapshotWithThreadMode, WaitForCompactOptions, WriteBatch, WriteBatchWithIndex, WriteOptions,
column_family::{AsColumnFamilyRef, BoundColumnFamily, UnboundColumnFamily},
db_options::{ImportColumnFamilyOptions, OptionsMustOutliveDB},
ffi,
ffi_util::{
CStrLike, convert_rocksdb_error, from_cstr_and_free, from_cstr_without_free,
opt_bytes_to_ptr, raw_data, to_cpath,
},
};
use rust_librocksdb_sys::{
rocksdb_livefile_destroy, rocksdb_livefile_t, rocksdb_livefiles_destroy, rocksdb_livefiles_t,
};
use libc::{self, c_char, c_int, c_uchar, c_void, size_t};
use parking_lot::RwLock;
thread_local! { static DEFAULT_READ_OPTS: ReadOptions = ReadOptions::default(); }
thread_local! { static DEFAULT_WRITE_OPTS: WriteOptions = WriteOptions::default(); }
thread_local! { static DEFAULT_FLUSH_OPTS: FlushOptions = FlushOptions::default(); }
thread_local! { static PREFIX_READ_OPTS: RefCell<ReadOptions> = RefCell::new({ let mut o = ReadOptions::default(); o.set_prefix_same_as_start(true); o }); }
pub struct Range<'a> {
start_key: &'a [u8],
end_key: &'a [u8],
}
impl<'a> Range<'a> {
pub fn new(start_key: &'a [u8], end_key: &'a [u8]) -> Range<'a> {
Range { start_key, end_key }
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum GetIntoBufferResult {
NotFound,
Found(usize),
BufferTooSmall(usize),
}
impl GetIntoBufferResult {
#[inline]
pub fn is_found(&self) -> bool {
matches!(self, Self::Found(_) | Self::BufferTooSmall(_))
}
#[inline]
pub fn is_not_found(&self) -> bool {
matches!(self, Self::NotFound)
}
#[inline]
pub fn value_size(&self) -> Option<usize> {
match self {
Self::Found(size) | Self::BufferTooSmall(size) => Some(*size),
Self::NotFound => None,
}
}
}
pub struct PrefixProber<'a, D: DBAccess> {
raw: DBRawIteratorWithThreadMode<'a, D>,
}
impl<D: DBAccess> PrefixProber<'_, D> {
pub fn exists(&mut self, prefix: &[u8]) -> Result<bool, Error> {
self.raw.seek(prefix);
if self.raw.valid()
&& let Some(k) = self.raw.key()
{
return Ok(k.starts_with(prefix));
}
self.raw.status()?;
Ok(false)
}
}
pub trait ThreadMode {
fn new_cf_map_internal(
cf_map: BTreeMap<String, *mut ffi::rocksdb_column_family_handle_t>,
) -> Self;
fn drop_all_cfs_internal(&mut self);
}
pub struct SingleThreaded {
pub(crate) cfs: HashMap<String, ColumnFamily>,
}
pub struct MultiThreaded {
pub(crate) cfs: RwLock<HashMap<String, Arc<UnboundColumnFamily>>>,
}
impl ThreadMode for SingleThreaded {
fn new_cf_map_internal(
cfs: BTreeMap<String, *mut ffi::rocksdb_column_family_handle_t>,
) -> Self {
Self {
cfs: cfs
.into_iter()
.map(|(n, c)| (n, ColumnFamily { inner: c }))
.collect(),
}
}
fn drop_all_cfs_internal(&mut self) {
self.cfs.clear();
}
}
impl ThreadMode for MultiThreaded {
fn new_cf_map_internal(
cfs: BTreeMap<String, *mut ffi::rocksdb_column_family_handle_t>,
) -> Self {
Self {
cfs: RwLock::new(
cfs.into_iter()
.map(|(n, c)| (n, Arc::new(UnboundColumnFamily { inner: c })))
.collect(),
),
}
}
fn drop_all_cfs_internal(&mut self) {
self.cfs.write().clear();
}
}
pub trait DBInner {
fn inner(&self) -> *mut ffi::rocksdb_t;
}
pub struct DBCommon<T: ThreadMode, D: DBInner> {
pub(crate) inner: D,
cfs: T, path: PathBuf,
_outlive: Vec<OptionsMustOutliveDB>,
}
pub trait DBAccess {
unsafe fn create_snapshot(&self) -> *const ffi::rocksdb_snapshot_t;
unsafe fn release_snapshot(&self, snapshot: *const ffi::rocksdb_snapshot_t);
unsafe fn create_iterator(&self, readopts: &ReadOptions) -> *mut ffi::rocksdb_iterator_t;
unsafe fn create_iterator_cf(
&self,
cf_handle: *mut ffi::rocksdb_column_family_handle_t,
readopts: &ReadOptions,
) -> *mut ffi::rocksdb_iterator_t;
fn get_opt<K: AsRef<[u8]>>(
&self,
key: K,
readopts: &ReadOptions,
) -> Result<Option<Vec<u8>>, Error>;
fn get_cf_opt<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
readopts: &ReadOptions,
) -> Result<Option<Vec<u8>>, Error>;
fn get_pinned_opt<K: AsRef<[u8]>>(
&'_ self,
key: K,
readopts: &ReadOptions,
) -> Result<Option<DBPinnableSlice<'_>>, Error>;
fn get_pinned_cf_opt<K: AsRef<[u8]>>(
&'_ self,
cf: &impl AsColumnFamilyRef,
key: K,
readopts: &ReadOptions,
) -> Result<Option<DBPinnableSlice<'_>>, Error>;
fn multi_get_opt<K, I>(
&self,
keys: I,
readopts: &ReadOptions,
) -> Vec<Result<Option<Vec<u8>>, Error>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = K>;
fn multi_get_cf_opt<'b, K, I, W>(
&self,
keys_cf: I,
readopts: &ReadOptions,
) -> Vec<Result<Option<Vec<u8>>, Error>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = (&'b W, K)>,
W: AsColumnFamilyRef + 'b;
}
impl<T: ThreadMode, D: DBInner> DBAccess for DBCommon<T, D> {
unsafe fn create_snapshot(&self) -> *const ffi::rocksdb_snapshot_t {
unsafe { ffi::rocksdb_create_snapshot(self.inner.inner()) }
}
unsafe fn release_snapshot(&self, snapshot: *const ffi::rocksdb_snapshot_t) {
unsafe {
ffi::rocksdb_release_snapshot(self.inner.inner(), snapshot);
}
}
unsafe fn create_iterator(&self, readopts: &ReadOptions) -> *mut ffi::rocksdb_iterator_t {
unsafe { ffi::rocksdb_create_iterator(self.inner.inner(), readopts.inner) }
}
unsafe fn create_iterator_cf(
&self,
cf_handle: *mut ffi::rocksdb_column_family_handle_t,
readopts: &ReadOptions,
) -> *mut ffi::rocksdb_iterator_t {
unsafe { ffi::rocksdb_create_iterator_cf(self.inner.inner(), readopts.inner, cf_handle) }
}
fn get_opt<K: AsRef<[u8]>>(
&self,
key: K,
readopts: &ReadOptions,
) -> Result<Option<Vec<u8>>, Error> {
self.get_opt(key, readopts)
}
fn get_cf_opt<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
readopts: &ReadOptions,
) -> Result<Option<Vec<u8>>, Error> {
self.get_cf_opt(cf, key, readopts)
}
fn get_pinned_opt<K: AsRef<[u8]>>(
&'_ self,
key: K,
readopts: &ReadOptions,
) -> Result<Option<DBPinnableSlice<'_>>, Error> {
self.get_pinned_opt(key, readopts)
}
fn get_pinned_cf_opt<K: AsRef<[u8]>>(
&'_ self,
cf: &impl AsColumnFamilyRef,
key: K,
readopts: &ReadOptions,
) -> Result<Option<DBPinnableSlice<'_>>, Error> {
self.get_pinned_cf_opt(cf, key, readopts)
}
fn multi_get_opt<K, Iter>(
&self,
keys: Iter,
readopts: &ReadOptions,
) -> Vec<Result<Option<Vec<u8>>, Error>>
where
K: AsRef<[u8]>,
Iter: IntoIterator<Item = K>,
{
self.multi_get_opt(keys, readopts)
}
fn multi_get_cf_opt<'b, K, Iter, W>(
&self,
keys_cf: Iter,
readopts: &ReadOptions,
) -> Vec<Result<Option<Vec<u8>>, Error>>
where
K: AsRef<[u8]>,
Iter: IntoIterator<Item = (&'b W, K)>,
W: AsColumnFamilyRef + 'b,
{
self.multi_get_cf_opt(keys_cf, readopts)
}
}
pub struct DBWithThreadModeInner {
inner: *mut ffi::rocksdb_t,
}
impl DBInner for DBWithThreadModeInner {
fn inner(&self) -> *mut ffi::rocksdb_t {
self.inner
}
}
impl Drop for DBWithThreadModeInner {
fn drop(&mut self) {
unsafe {
ffi::rocksdb_close(self.inner);
}
}
}
pub type DBWithThreadMode<T> = DBCommon<T, DBWithThreadModeInner>;
#[cfg(not(feature = "multi-threaded-cf"))]
pub type DB = DBWithThreadMode<SingleThreaded>;
#[cfg(feature = "multi-threaded-cf")]
pub type DB = DBWithThreadMode<MultiThreaded>;
unsafe impl<T: ThreadMode + Send, I: DBInner> Send for DBCommon<T, I> {}
unsafe impl<T: ThreadMode, I: DBInner> Sync for DBCommon<T, I> {}
enum AccessType<'a> {
ReadWrite,
ReadOnly { error_if_log_file_exist: bool },
Secondary { secondary_path: &'a Path },
WithTTL { ttl: Duration },
}
impl<T: ThreadMode> DBWithThreadMode<T> {
pub fn open_default<P: AsRef<Path>>(path: P) -> Result<Self, Error> {
let mut opts = Options::default();
opts.create_if_missing(true);
Self::open(&opts, path)
}
pub fn open<P: AsRef<Path>>(opts: &Options, path: P) -> Result<Self, Error> {
Self::open_cf(opts, path, None::<&str>)
}
pub fn open_for_read_only<P: AsRef<Path>>(
opts: &Options,
path: P,
error_if_log_file_exist: bool,
) -> Result<Self, Error> {
Self::open_cf_for_read_only(opts, path, None::<&str>, error_if_log_file_exist)
}
pub fn open_as_secondary<P: AsRef<Path>>(
opts: &Options,
primary_path: P,
secondary_path: P,
) -> Result<Self, Error> {
Self::open_cf_as_secondary(opts, primary_path, secondary_path, None::<&str>)
}
pub fn open_with_ttl<P: AsRef<Path>>(
opts: &Options,
path: P,
ttl: Duration,
) -> Result<Self, Error> {
Self::open_cf_descriptors_with_ttl(opts, path, std::iter::empty(), ttl)
}
pub fn open_cf_with_ttl<P, I, N>(
opts: &Options,
path: P,
cfs: I,
ttl: Duration,
) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = N>,
N: AsRef<str>,
{
let cfs = cfs
.into_iter()
.map(|name| ColumnFamilyDescriptor::new(name.as_ref(), Options::default()));
Self::open_cf_descriptors_with_ttl(opts, path, cfs, ttl)
}
pub fn open_cf_descriptors_with_ttl<P, I>(
opts: &Options,
path: P,
cfs: I,
ttl: Duration,
) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = ColumnFamilyDescriptor>,
{
Self::open_cf_descriptors_internal(opts, path, cfs, &AccessType::WithTTL { ttl })
}
pub fn open_cf<P, I, N>(opts: &Options, path: P, cfs: I) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = N>,
N: AsRef<str>,
{
let cfs = cfs
.into_iter()
.map(|name| ColumnFamilyDescriptor::new(name.as_ref(), Options::default()));
Self::open_cf_descriptors_internal(opts, path, cfs, &AccessType::ReadWrite)
}
pub fn open_cf_with_opts<P, I, N>(opts: &Options, path: P, cfs: I) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = (N, Options)>,
N: AsRef<str>,
{
let cfs = cfs
.into_iter()
.map(|(name, opts)| ColumnFamilyDescriptor::new(name.as_ref(), opts));
Self::open_cf_descriptors(opts, path, cfs)
}
pub fn open_cf_for_read_only<P, I, N>(
opts: &Options,
path: P,
cfs: I,
error_if_log_file_exist: bool,
) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = N>,
N: AsRef<str>,
{
let cfs = cfs
.into_iter()
.map(|name| ColumnFamilyDescriptor::new(name.as_ref(), Options::default()));
Self::open_cf_descriptors_internal(
opts,
path,
cfs,
&AccessType::ReadOnly {
error_if_log_file_exist,
},
)
}
pub fn open_cf_with_opts_for_read_only<P, I, N>(
db_opts: &Options,
path: P,
cfs: I,
error_if_log_file_exist: bool,
) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = (N, Options)>,
N: AsRef<str>,
{
let cfs = cfs
.into_iter()
.map(|(name, cf_opts)| ColumnFamilyDescriptor::new(name.as_ref(), cf_opts));
Self::open_cf_descriptors_internal(
db_opts,
path,
cfs,
&AccessType::ReadOnly {
error_if_log_file_exist,
},
)
}
pub fn open_cf_descriptors_read_only<P, I>(
opts: &Options,
path: P,
cfs: I,
error_if_log_file_exist: bool,
) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = ColumnFamilyDescriptor>,
{
Self::open_cf_descriptors_internal(
opts,
path,
cfs,
&AccessType::ReadOnly {
error_if_log_file_exist,
},
)
}
pub fn open_cf_as_secondary<P, I, N>(
opts: &Options,
primary_path: P,
secondary_path: P,
cfs: I,
) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = N>,
N: AsRef<str>,
{
let cfs = cfs
.into_iter()
.map(|name| ColumnFamilyDescriptor::new(name.as_ref(), Options::default()));
Self::open_cf_descriptors_internal(
opts,
primary_path,
cfs,
&AccessType::Secondary {
secondary_path: secondary_path.as_ref(),
},
)
}
pub fn open_cf_descriptors_as_secondary<P, I>(
opts: &Options,
path: P,
secondary_path: P,
cfs: I,
) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = ColumnFamilyDescriptor>,
{
Self::open_cf_descriptors_internal(
opts,
path,
cfs,
&AccessType::Secondary {
secondary_path: secondary_path.as_ref(),
},
)
}
pub fn open_cf_descriptors<P, I>(opts: &Options, path: P, cfs: I) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = ColumnFamilyDescriptor>,
{
Self::open_cf_descriptors_internal(opts, path, cfs, &AccessType::ReadWrite)
}
fn open_cf_descriptors_internal<P, I>(
opts: &Options,
path: P,
cfs: I,
access_type: &AccessType,
) -> Result<Self, Error>
where
P: AsRef<Path>,
I: IntoIterator<Item = ColumnFamilyDescriptor>,
{
let cfs: Vec<_> = cfs.into_iter().collect();
let outlive = iter::once(opts.outlive.clone())
.chain(cfs.iter().map(|cf| cf.options.outlive.clone()))
.collect();
let cpath = to_cpath(&path)?;
if let Err(e) = fs::create_dir_all(&path) {
return Err(Error::new(format!(
"Failed to create RocksDB directory: `{e:?}`."
)));
}
let db: *mut ffi::rocksdb_t;
let mut cf_map = BTreeMap::new();
if cfs.is_empty() {
db = Self::open_raw(opts, &cpath, access_type)?;
} else {
let mut cfs_v = cfs;
if !cfs_v.iter().any(|cf| cf.name == DEFAULT_COLUMN_FAMILY_NAME) {
cfs_v.push(ColumnFamilyDescriptor {
name: String::from(DEFAULT_COLUMN_FAMILY_NAME),
options: Options::default(),
ttl: ColumnFamilyTtl::SameAsDb,
});
}
let c_cfs: Vec<CString> = cfs_v
.iter()
.map(|cf| CString::new(cf.name.as_bytes()).unwrap())
.collect();
let cfnames: Vec<_> = c_cfs.iter().map(|cf| cf.as_ptr()).collect();
let mut cfhandles: Vec<_> = cfs_v.iter().map(|_| ptr::null_mut()).collect();
let cfopts: Vec<_> = cfs_v
.iter()
.map(|cf| cf.options.inner.cast_const())
.collect();
db = Self::open_cf_raw(
opts,
&cpath,
&cfs_v,
&cfnames,
&cfopts,
&mut cfhandles,
access_type,
)?;
for handle in &cfhandles {
if handle.is_null() {
return Err(Error::new(
"Received null column family handle from DB.".to_owned(),
));
}
}
for (cf_desc, inner) in cfs_v.iter().zip(cfhandles) {
cf_map.insert(cf_desc.name.clone(), inner);
}
}
if db.is_null() {
return Err(Error::new("Could not initialize database.".to_owned()));
}
Ok(Self {
inner: DBWithThreadModeInner { inner: db },
path: path.as_ref().to_path_buf(),
cfs: T::new_cf_map_internal(cf_map),
_outlive: outlive,
})
}
fn open_raw(
opts: &Options,
cpath: &CString,
access_type: &AccessType,
) -> Result<*mut ffi::rocksdb_t, Error> {
let db = unsafe {
match *access_type {
AccessType::ReadOnly {
error_if_log_file_exist,
} => ffi_try!(ffi::rocksdb_open_for_read_only(
opts.inner,
cpath.as_ptr(),
c_uchar::from(error_if_log_file_exist),
)),
AccessType::ReadWrite => {
ffi_try!(ffi::rocksdb_open(opts.inner, cpath.as_ptr()))
}
AccessType::Secondary { secondary_path } => {
ffi_try!(ffi::rocksdb_open_as_secondary(
opts.inner,
cpath.as_ptr(),
to_cpath(secondary_path)?.as_ptr(),
))
}
AccessType::WithTTL { ttl } => ffi_try!(ffi::rocksdb_open_with_ttl(
opts.inner,
cpath.as_ptr(),
ttl.as_secs() as c_int,
)),
}
};
Ok(db)
}
#[allow(clippy::pedantic)]
fn open_cf_raw(
opts: &Options,
cpath: &CString,
cfs_v: &[ColumnFamilyDescriptor],
cfnames: &[*const c_char],
cfopts: &[*const ffi::rocksdb_options_t],
cfhandles: &mut [*mut ffi::rocksdb_column_family_handle_t],
access_type: &AccessType,
) -> Result<*mut ffi::rocksdb_t, Error> {
let db = unsafe {
match *access_type {
AccessType::ReadOnly {
error_if_log_file_exist,
} => ffi_try!(ffi::rocksdb_open_for_read_only_column_families(
opts.inner,
cpath.as_ptr(),
cfs_v.len() as c_int,
cfnames.as_ptr(),
cfopts.as_ptr(),
cfhandles.as_mut_ptr(),
c_uchar::from(error_if_log_file_exist),
)),
AccessType::ReadWrite => ffi_try!(ffi::rocksdb_open_column_families(
opts.inner,
cpath.as_ptr(),
cfs_v.len() as c_int,
cfnames.as_ptr(),
cfopts.as_ptr(),
cfhandles.as_mut_ptr(),
)),
AccessType::Secondary { secondary_path } => {
ffi_try!(ffi::rocksdb_open_as_secondary_column_families(
opts.inner,
cpath.as_ptr(),
to_cpath(secondary_path)?.as_ptr(),
cfs_v.len() as c_int,
cfnames.as_ptr(),
cfopts.as_ptr(),
cfhandles.as_mut_ptr(),
))
}
AccessType::WithTTL { ttl } => {
let ttls: Vec<_> = cfs_v
.iter()
.map(|cf| match cf.ttl {
ColumnFamilyTtl::Disabled => i32::MAX,
ColumnFamilyTtl::Duration(duration) => duration.as_secs() as i32,
ColumnFamilyTtl::SameAsDb => ttl.as_secs() as i32,
})
.collect();
ffi_try!(ffi::rocksdb_open_column_families_with_ttl(
opts.inner,
cpath.as_ptr(),
cfs_v.len() as c_int,
cfnames.as_ptr(),
cfopts.as_ptr(),
cfhandles.as_mut_ptr(),
ttls.as_ptr(),
))
}
}
};
Ok(db)
}
pub fn delete_range_cf_opt<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
from: K,
to: K,
writeopts: &WriteOptions,
) -> Result<(), Error> {
let from = from.as_ref();
let to = to.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_delete_range_cf(
self.inner.inner(),
writeopts.inner,
cf.inner(),
from.as_ptr() as *const c_char,
from.len() as size_t,
to.as_ptr() as *const c_char,
to.len() as size_t,
));
Ok(())
}
}
pub fn delete_range_cf<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
from: K,
to: K,
) -> Result<(), Error> {
DEFAULT_WRITE_OPTS.with(|opts| self.delete_range_cf_opt(cf, from, to, opts))
}
pub fn write_opt(&self, batch: &WriteBatch, writeopts: &WriteOptions) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_write(
self.inner.inner(),
writeopts.inner,
batch.inner
));
}
Ok(())
}
pub fn write(&self, batch: &WriteBatch) -> Result<(), Error> {
DEFAULT_WRITE_OPTS.with(|opts| self.write_opt(batch, opts))
}
pub fn write_without_wal(&self, batch: &WriteBatch) -> Result<(), Error> {
let mut wo = WriteOptions::new();
wo.disable_wal(true);
self.write_opt(batch, &wo)
}
pub fn write_wbwi(&self, wbwi: &WriteBatchWithIndex) -> Result<(), Error> {
DEFAULT_WRITE_OPTS.with(|opts| self.write_wbwi_opt(wbwi, opts))
}
pub fn write_wbwi_opt(
&self,
wbwi: &WriteBatchWithIndex,
writeopts: &WriteOptions,
) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_write_writebatch_wi(
self.inner.inner(),
writeopts.inner,
wbwi.inner
));
Ok(())
}
}
pub fn disable_file_deletions(&self) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_disable_file_deletions(self.inner.inner()));
}
Ok(())
}
pub fn enable_file_deletions(&self) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_enable_file_deletions(self.inner.inner()));
}
Ok(())
}
}
impl<T: ThreadMode, D: DBInner> DBCommon<T, D> {
pub(crate) fn new(inner: D, cfs: T, path: PathBuf, outlive: Vec<OptionsMustOutliveDB>) -> Self {
Self {
inner,
cfs,
path,
_outlive: outlive,
}
}
pub fn list_cf<P: AsRef<Path>>(opts: &Options, path: P) -> Result<Vec<String>, Error> {
let cpath = to_cpath(path)?;
let mut length = 0;
unsafe {
let ptr = ffi_try!(ffi::rocksdb_list_column_families(
opts.inner,
cpath.as_ptr(),
&raw mut length,
));
let vec = slice::from_raw_parts(ptr, length)
.iter()
.map(|ptr| from_cstr_without_free(*ptr))
.collect();
ffi::rocksdb_list_column_families_destroy(ptr, length);
Ok(vec)
}
}
pub fn destroy<P: AsRef<Path>>(opts: &Options, path: P) -> Result<(), Error> {
let cpath = to_cpath(path)?;
unsafe {
ffi_try!(ffi::rocksdb_destroy_db(opts.inner, cpath.as_ptr()));
}
Ok(())
}
pub fn repair<P: AsRef<Path>>(opts: &Options, path: P) -> Result<(), Error> {
let cpath = to_cpath(path)?;
unsafe {
ffi_try!(ffi::rocksdb_repair_db(opts.inner, cpath.as_ptr()));
}
Ok(())
}
pub fn path(&self) -> &Path {
self.path.as_path()
}
pub fn flush_wal(&self, sync: bool) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_flush_wal(
self.inner.inner(),
c_uchar::from(sync)
));
}
Ok(())
}
pub fn flush_opt(&self, flushopts: &FlushOptions) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_flush(self.inner.inner(), flushopts.inner));
}
Ok(())
}
pub fn flush(&self) -> Result<(), Error> {
self.flush_opt(&FlushOptions::default())
}
pub fn flush_cf_opt(
&self,
cf: &impl AsColumnFamilyRef,
flushopts: &FlushOptions,
) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_flush_cf(
self.inner.inner(),
flushopts.inner,
cf.inner()
));
}
Ok(())
}
pub fn flush_cfs_opt(
&self,
cfs: &[&impl AsColumnFamilyRef],
opts: &FlushOptions,
) -> Result<(), Error> {
let mut cfs = cfs.iter().map(|cf| cf.inner()).collect::<Vec<_>>();
unsafe {
ffi_try!(ffi::rocksdb_flush_cfs(
self.inner.inner(),
opts.inner,
cfs.as_mut_ptr(),
cfs.len() as libc::c_int,
));
}
Ok(())
}
pub fn flush_cf(&self, cf: &impl AsColumnFamilyRef) -> Result<(), Error> {
DEFAULT_FLUSH_OPTS.with(|opts| self.flush_cf_opt(cf, opts))
}
pub fn get_opt<K: AsRef<[u8]>>(
&self,
key: K,
readopts: &ReadOptions,
) -> Result<Option<Vec<u8>>, Error> {
self.get_pinned_opt(key, readopts)
.map(|x| x.map(|v| v.as_ref().to_vec()))
}
pub fn get<K: AsRef<[u8]>>(&self, key: K) -> Result<Option<Vec<u8>>, Error> {
DEFAULT_READ_OPTS.with(|opts| self.get_opt(key.as_ref(), opts))
}
pub fn get_cf_opt<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
readopts: &ReadOptions,
) -> Result<Option<Vec<u8>>, Error> {
self.get_pinned_cf_opt(cf, key, readopts)
.map(|x| x.map(|v| v.as_ref().to_vec()))
}
pub fn get_cf<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
) -> Result<Option<Vec<u8>>, Error> {
DEFAULT_READ_OPTS.with(|opts| self.get_cf_opt(cf, key.as_ref(), opts))
}
pub fn get_pinned_opt<K: AsRef<[u8]>>(
&'_ self,
key: K,
readopts: &ReadOptions,
) -> Result<Option<DBPinnableSlice<'_>>, Error> {
if readopts.inner.is_null() {
return Err(Error::new(
"Unable to create RocksDB read options. This is a fairly trivial call, and its \
failure may be indicative of a mis-compiled or mis-loaded RocksDB library."
.to_owned(),
));
}
let key = key.as_ref();
unsafe {
let val = ffi_try!(ffi::rocksdb_get_pinned(
self.inner.inner(),
readopts.inner,
key.as_ptr() as *const c_char,
key.len() as size_t,
));
if val.is_null() {
Ok(None)
} else {
Ok(Some(DBPinnableSlice::from_c(val)))
}
}
}
pub fn get_pinned<K: AsRef<[u8]>>(
&'_ self,
key: K,
) -> Result<Option<DBPinnableSlice<'_>>, Error> {
DEFAULT_READ_OPTS.with(|opts| self.get_pinned_opt(key, opts))
}
pub fn get_pinned_cf_opt<K: AsRef<[u8]>>(
&'_ self,
cf: &impl AsColumnFamilyRef,
key: K,
readopts: &ReadOptions,
) -> Result<Option<DBPinnableSlice<'_>>, Error> {
if readopts.inner.is_null() {
return Err(Error::new(
"Unable to create RocksDB read options. This is a fairly trivial call, and its \
failure may be indicative of a mis-compiled or mis-loaded RocksDB library."
.to_owned(),
));
}
let key = key.as_ref();
unsafe {
let val = ffi_try!(ffi::rocksdb_get_pinned_cf(
self.inner.inner(),
readopts.inner,
cf.inner(),
key.as_ptr() as *const c_char,
key.len() as size_t,
));
if val.is_null() {
Ok(None)
} else {
Ok(Some(DBPinnableSlice::from_c(val)))
}
}
}
pub fn get_pinned_cf<K: AsRef<[u8]>>(
&'_ self,
cf: &impl AsColumnFamilyRef,
key: K,
) -> Result<Option<DBPinnableSlice<'_>>, Error> {
DEFAULT_READ_OPTS.with(|opts| self.get_pinned_cf_opt(cf, key, opts))
}
pub fn get_into_buffer<K: AsRef<[u8]>>(
&self,
key: K,
buffer: &mut [u8],
) -> Result<GetIntoBufferResult, Error> {
DEFAULT_READ_OPTS.with(|opts| self.get_into_buffer_opt(key, buffer, opts))
}
pub fn get_into_buffer_opt<K: AsRef<[u8]>>(
&self,
key: K,
buffer: &mut [u8],
readopts: &ReadOptions,
) -> Result<GetIntoBufferResult, Error> {
if readopts.inner.is_null() {
return Err(Error::new(
"Unable to create RocksDB read options. This is a fairly trivial call, and its \
failure may be indicative of a mis-compiled or mis-loaded RocksDB library."
.to_owned(),
));
}
let key = key.as_ref();
let mut val_len: size_t = 0;
let mut found: c_uchar = 0;
unsafe {
let success = ffi_try!(ffi::rocksdb_get_into_buffer(
self.inner.inner(),
readopts.inner,
key.as_ptr() as *const c_char,
key.len() as size_t,
buffer.as_mut_ptr() as *mut c_char,
buffer.len() as size_t,
&raw mut val_len,
&raw mut found,
));
if found == 0 {
Ok(GetIntoBufferResult::NotFound)
} else if success != 0 {
Ok(GetIntoBufferResult::Found(val_len))
} else {
Ok(GetIntoBufferResult::BufferTooSmall(val_len))
}
}
}
pub fn get_into_buffer_cf<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
buffer: &mut [u8],
) -> Result<GetIntoBufferResult, Error> {
DEFAULT_READ_OPTS.with(|opts| self.get_into_buffer_cf_opt(cf, key, buffer, opts))
}
pub fn get_into_buffer_cf_opt<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
buffer: &mut [u8],
readopts: &ReadOptions,
) -> Result<GetIntoBufferResult, Error> {
if readopts.inner.is_null() {
return Err(Error::new(
"Unable to create RocksDB read options. This is a fairly trivial call, and its \
failure may be indicative of a mis-compiled or mis-loaded RocksDB library."
.to_owned(),
));
}
let key = key.as_ref();
let mut val_len: size_t = 0;
let mut found: c_uchar = 0;
unsafe {
let success = ffi_try!(ffi::rocksdb_get_into_buffer_cf(
self.inner.inner(),
readopts.inner,
cf.inner(),
key.as_ptr() as *const c_char,
key.len() as size_t,
buffer.as_mut_ptr() as *mut c_char,
buffer.len() as size_t,
&raw mut val_len,
&raw mut found,
));
if found == 0 {
Ok(GetIntoBufferResult::NotFound)
} else if success != 0 {
Ok(GetIntoBufferResult::Found(val_len))
} else {
Ok(GetIntoBufferResult::BufferTooSmall(val_len))
}
}
}
pub fn multi_get<K, I>(&self, keys: I) -> Vec<Result<Option<Vec<u8>>, Error>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = K>,
{
DEFAULT_READ_OPTS.with(|opts| self.multi_get_opt(keys, opts))
}
pub fn multi_get_opt<K, I>(
&self,
keys: I,
readopts: &ReadOptions,
) -> Vec<Result<Option<Vec<u8>>, Error>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = K>,
{
let owned_keys: Vec<K> = keys.into_iter().collect();
let keys_sizes: Vec<usize> = owned_keys.iter().map(|k| k.as_ref().len()).collect();
let ptr_keys: Vec<*const c_char> = owned_keys
.iter()
.map(|k| k.as_ref().as_ptr() as *const c_char)
.collect();
let mut values: Vec<*mut c_char> = Vec::with_capacity(ptr_keys.len());
let mut values_sizes: Vec<usize> = Vec::with_capacity(ptr_keys.len());
let mut errors: Vec<*mut c_char> = Vec::with_capacity(ptr_keys.len());
unsafe {
ffi::rocksdb_multi_get(
self.inner.inner(),
readopts.inner,
ptr_keys.len(),
ptr_keys.as_ptr(),
keys_sizes.as_ptr(),
values.as_mut_ptr(),
values_sizes.as_mut_ptr(),
errors.as_mut_ptr(),
);
}
unsafe {
values.set_len(ptr_keys.len());
values_sizes.set_len(ptr_keys.len());
errors.set_len(ptr_keys.len());
}
convert_values(values, values_sizes, errors)
}
pub fn multi_get_pinned<K, I>(
&'_ self,
keys: I,
) -> Vec<Result<Option<DBPinnableSlice<'_>>, Error>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = K>,
{
DEFAULT_READ_OPTS.with(|opts| self.multi_get_pinned_opt(keys, opts))
}
pub fn multi_get_pinned_opt<K, I>(
&'_ self,
keys: I,
readopts: &ReadOptions,
) -> Vec<Result<Option<DBPinnableSlice<'_>>, Error>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = K>,
{
keys.into_iter()
.map(|k| self.get_pinned_opt(k, readopts))
.collect()
}
pub fn multi_get_pinned_cf<'a, 'b: 'a, K, I, W>(
&'a self,
keys: I,
) -> Vec<Result<Option<DBPinnableSlice<'a>>, Error>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = (&'b W, K)>,
W: 'b + AsColumnFamilyRef,
{
DEFAULT_READ_OPTS.with(|opts| self.multi_get_pinned_cf_opt(keys, opts))
}
pub fn multi_get_pinned_cf_opt<'a, 'b: 'a, K, I, W>(
&'a self,
keys: I,
readopts: &ReadOptions,
) -> Vec<Result<Option<DBPinnableSlice<'a>>, Error>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = (&'b W, K)>,
W: 'b + AsColumnFamilyRef,
{
keys.into_iter()
.map(|(cf, k)| self.get_pinned_cf_opt(cf, k, readopts))
.collect()
}
pub fn multi_get_cf<'a, 'b: 'a, K, I, W>(
&'a self,
keys: I,
) -> Vec<Result<Option<Vec<u8>>, Error>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = (&'b W, K)>,
W: 'b + AsColumnFamilyRef,
{
DEFAULT_READ_OPTS.with(|opts| self.multi_get_cf_opt(keys, opts))
}
pub fn multi_get_cf_opt<'a, 'b: 'a, K, I, W>(
&'a self,
keys: I,
readopts: &ReadOptions,
) -> Vec<Result<Option<Vec<u8>>, Error>>
where
K: AsRef<[u8]>,
I: IntoIterator<Item = (&'b W, K)>,
W: 'b + AsColumnFamilyRef,
{
let cfs_and_owned_keys: Vec<(&'b W, K)> = keys.into_iter().collect();
let keys_sizes: Vec<usize> = cfs_and_owned_keys
.iter()
.map(|(_, k)| k.as_ref().len())
.collect();
let ptr_keys: Vec<*const c_char> = cfs_and_owned_keys
.iter()
.map(|(_, k)| k.as_ref().as_ptr() as *const c_char)
.collect();
let ptr_cfs: Vec<*const ffi::rocksdb_column_family_handle_t> = cfs_and_owned_keys
.iter()
.map(|(c, _)| c.inner().cast_const())
.collect();
let mut values: Vec<*mut c_char> = Vec::with_capacity(ptr_keys.len());
let mut values_sizes: Vec<usize> = Vec::with_capacity(ptr_keys.len());
let mut errors: Vec<*mut c_char> = Vec::with_capacity(ptr_keys.len());
unsafe {
ffi::rocksdb_multi_get_cf(
self.inner.inner(),
readopts.inner,
ptr_cfs.as_ptr(),
ptr_keys.len(),
ptr_keys.as_ptr(),
keys_sizes.as_ptr(),
values.as_mut_ptr(),
values_sizes.as_mut_ptr(),
errors.as_mut_ptr(),
);
}
unsafe {
values.set_len(ptr_keys.len());
values_sizes.set_len(ptr_keys.len());
errors.set_len(ptr_keys.len());
}
convert_values(values, values_sizes, errors)
}
pub fn batched_multi_get_cf<'a, K, I>(
&'_ self,
cf: &impl AsColumnFamilyRef,
keys: I,
sorted_input: bool,
) -> Vec<Result<Option<DBPinnableSlice<'_>>, Error>>
where
K: AsRef<[u8]> + 'a + ?Sized,
I: IntoIterator<Item = &'a K>,
{
DEFAULT_READ_OPTS.with(|opts| self.batched_multi_get_cf_opt(cf, keys, sorted_input, opts))
}
pub fn batched_multi_get_cf_opt<'a, K, I>(
&'_ self,
cf: &impl AsColumnFamilyRef,
keys: I,
sorted_input: bool,
readopts: &ReadOptions,
) -> Vec<Result<Option<DBPinnableSlice<'_>>, Error>>
where
K: AsRef<[u8]> + 'a + ?Sized,
I: IntoIterator<Item = &'a K>,
{
let (ptr_keys, keys_sizes): (Vec<_>, Vec<_>) = keys
.into_iter()
.map(|k| {
let k = k.as_ref();
(k.as_ptr() as *const c_char, k.len())
})
.unzip();
let mut pinned_values = vec![ptr::null_mut(); ptr_keys.len()];
let mut errors = vec![ptr::null_mut(); ptr_keys.len()];
unsafe {
ffi::rocksdb_batched_multi_get_cf(
self.inner.inner(),
readopts.inner,
cf.inner(),
ptr_keys.len(),
ptr_keys.as_ptr(),
keys_sizes.as_ptr(),
pinned_values.as_mut_ptr(),
errors.as_mut_ptr(),
sorted_input,
);
pinned_values
.into_iter()
.zip(errors)
.map(|(v, e)| {
if e.is_null() {
if v.is_null() {
Ok(None)
} else {
Ok(Some(DBPinnableSlice::from_c(v)))
}
} else {
Err(convert_rocksdb_error(e))
}
})
.collect()
}
}
pub fn batched_multi_get_cf_slice<'a, K, I>(
&'_ self,
cf: &impl AsColumnFamilyRef,
keys: I,
sorted_input: bool,
) -> Vec<Result<Option<DBPinnableSlice<'_>>, Error>>
where
K: AsRef<[u8]> + 'a + ?Sized,
I: IntoIterator<Item = &'a K>,
{
DEFAULT_READ_OPTS
.with(|opts| self.batched_multi_get_cf_slice_opt(cf, keys, sorted_input, opts))
}
pub fn batched_multi_get_cf_slice_opt<'a, K, I>(
&'_ self,
cf: &impl AsColumnFamilyRef,
keys: I,
sorted_input: bool,
readopts: &ReadOptions,
) -> Vec<Result<Option<DBPinnableSlice<'_>>, Error>>
where
K: AsRef<[u8]> + 'a + ?Sized,
I: IntoIterator<Item = &'a K>,
{
let slices: Vec<ffi::rocksdb_slice_t> = keys
.into_iter()
.map(|k| {
let k = k.as_ref();
ffi::rocksdb_slice_t {
data: k.as_ptr() as *const c_char,
size: k.len(),
}
})
.collect();
if slices.is_empty() {
return Vec::new();
}
let mut pinned_values = vec![ptr::null_mut(); slices.len()];
let mut errors = vec![ptr::null_mut(); slices.len()];
unsafe {
ffi::rocksdb_batched_multi_get_cf_slice(
self.inner.inner(),
readopts.inner,
cf.inner(),
slices.len(),
slices.as_ptr(),
pinned_values.as_mut_ptr(),
errors.as_mut_ptr(),
sorted_input,
);
pinned_values
.into_iter()
.zip(errors)
.map(|(v, e)| {
if e.is_null() {
if v.is_null() {
Ok(None)
} else {
Ok(Some(DBPinnableSlice::from_c(v)))
}
} else {
Err(convert_rocksdb_error(e))
}
})
.collect()
}
}
pub fn key_may_exist<K: AsRef<[u8]>>(&self, key: K) -> bool {
DEFAULT_READ_OPTS.with(|opts| self.key_may_exist_opt(key, opts))
}
pub fn key_may_exist_opt<K: AsRef<[u8]>>(&self, key: K, readopts: &ReadOptions) -> bool {
let key = key.as_ref();
unsafe {
0 != ffi::rocksdb_key_may_exist(
self.inner.inner(),
readopts.inner,
key.as_ptr() as *const c_char,
key.len() as size_t,
ptr::null_mut(),
ptr::null_mut(),
ptr::null(),
0,
ptr::null_mut(),
)
}
}
pub fn key_may_exist_cf<K: AsRef<[u8]>>(&self, cf: &impl AsColumnFamilyRef, key: K) -> bool {
DEFAULT_READ_OPTS.with(|opts| self.key_may_exist_cf_opt(cf, key, opts))
}
pub fn key_may_exist_cf_opt<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
readopts: &ReadOptions,
) -> bool {
let key = key.as_ref();
0 != unsafe {
ffi::rocksdb_key_may_exist_cf(
self.inner.inner(),
readopts.inner,
cf.inner(),
key.as_ptr() as *const c_char,
key.len() as size_t,
ptr::null_mut(),
ptr::null_mut(),
ptr::null(),
0,
ptr::null_mut(),
)
}
}
pub fn key_may_exist_cf_opt_value<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
readopts: &ReadOptions,
) -> (bool, Option<CSlice>) {
let key = key.as_ref();
let mut val: *mut c_char = ptr::null_mut();
let mut val_len: usize = 0;
let mut value_found: c_uchar = 0;
let may_exists = 0
!= unsafe {
ffi::rocksdb_key_may_exist_cf(
self.inner.inner(),
readopts.inner,
cf.inner(),
key.as_ptr() as *const c_char,
key.len() as size_t,
&raw mut val,
&raw mut val_len,
ptr::null(),
0,
&raw mut value_found,
)
};
if may_exists && value_found != 0 {
(
may_exists,
Some(unsafe { CSlice::from_raw_parts(val, val_len) }),
)
} else {
(may_exists, None)
}
}
fn create_inner_cf_handle(
&self,
name: impl CStrLike,
opts: &Options,
) -> Result<*mut ffi::rocksdb_column_family_handle_t, Error> {
let cf_name = name.bake().map_err(|err| {
Error::new(format!(
"Failed to convert path to CString when creating cf: {err}"
))
})?;
let mut err: *mut ::libc::c_char = ::std::ptr::null_mut();
let cf_handle = unsafe {
ffi::rocksdb_create_column_family(
self.inner.inner(),
opts.inner,
cf_name.as_ptr(),
&raw mut err,
)
};
if !err.is_null() {
if !cf_handle.is_null() {
unsafe { ffi::rocksdb_column_family_handle_destroy(cf_handle) };
}
return Err(convert_rocksdb_error(err));
}
Ok(cf_handle)
}
pub fn iterator<'a: 'b, 'b>(
&'a self,
mode: IteratorMode,
) -> DBIteratorWithThreadMode<'b, Self> {
let readopts = ReadOptions::default();
self.iterator_opt(mode, readopts)
}
pub fn iterator_opt<'a: 'b, 'b>(
&'a self,
mode: IteratorMode,
readopts: ReadOptions,
) -> DBIteratorWithThreadMode<'b, Self> {
DBIteratorWithThreadMode::new(self, readopts, mode)
}
pub fn iterator_cf_opt<'a: 'b, 'b>(
&'a self,
cf_handle: &impl AsColumnFamilyRef,
readopts: ReadOptions,
mode: IteratorMode,
) -> DBIteratorWithThreadMode<'b, Self> {
DBIteratorWithThreadMode::new_cf(self, cf_handle.inner(), readopts, mode)
}
pub fn full_iterator<'a: 'b, 'b>(
&'a self,
mode: IteratorMode,
) -> DBIteratorWithThreadMode<'b, Self> {
let mut opts = ReadOptions::default();
opts.set_total_order_seek(true);
DBIteratorWithThreadMode::new(self, opts, mode)
}
pub fn prefix_iterator<'a: 'b, 'b, P: AsRef<[u8]>>(
&'a self,
prefix: P,
) -> DBIteratorWithThreadMode<'b, Self> {
let mut opts = ReadOptions::default();
opts.set_prefix_same_as_start(true);
DBIteratorWithThreadMode::new(
self,
opts,
IteratorMode::From(prefix.as_ref(), Direction::Forward),
)
}
pub fn iterator_cf<'a: 'b, 'b>(
&'a self,
cf_handle: &impl AsColumnFamilyRef,
mode: IteratorMode,
) -> DBIteratorWithThreadMode<'b, Self> {
let opts = ReadOptions::default();
DBIteratorWithThreadMode::new_cf(self, cf_handle.inner(), opts, mode)
}
pub fn full_iterator_cf<'a: 'b, 'b>(
&'a self,
cf_handle: &impl AsColumnFamilyRef,
mode: IteratorMode,
) -> DBIteratorWithThreadMode<'b, Self> {
let mut opts = ReadOptions::default();
opts.set_total_order_seek(true);
DBIteratorWithThreadMode::new_cf(self, cf_handle.inner(), opts, mode)
}
pub fn prefix_iterator_cf<'a, P: AsRef<[u8]>>(
&'a self,
cf_handle: &impl AsColumnFamilyRef,
prefix: P,
) -> DBIteratorWithThreadMode<'a, Self> {
let mut opts = ReadOptions::default();
opts.set_prefix_same_as_start(true);
DBIteratorWithThreadMode::<'a, Self>::new_cf(
self,
cf_handle.inner(),
opts,
IteratorMode::From(prefix.as_ref(), Direction::Forward),
)
}
pub fn prefix_exists<P: AsRef<[u8]>>(&self, prefix: P) -> Result<bool, Error> {
let p = prefix.as_ref();
PREFIX_READ_OPTS.with(|rc| {
let mut opts = rc.borrow_mut();
opts.set_iterate_range(crate::PrefixRange(p));
self.prefix_exists_opt(p, &opts)
})
}
pub fn prefix_exists_opt<P: AsRef<[u8]>>(
&self,
prefix: P,
readopts: &ReadOptions,
) -> Result<bool, Error> {
let prefix = prefix.as_ref();
let iter = unsafe { self.create_iterator(readopts) };
let res = unsafe {
ffi::rocksdb_iter_seek(
iter,
prefix.as_ptr() as *const c_char,
prefix.len() as size_t,
);
if ffi::rocksdb_iter_valid(iter) != 0 {
let mut key_len: size_t = 0;
let key_ptr = ffi::rocksdb_iter_key(iter, &raw mut key_len);
let key = slice::from_raw_parts(key_ptr as *const u8, key_len as usize);
Ok(key.starts_with(prefix))
} else if let Err(e) = (|| {
ffi_try!(ffi::rocksdb_iter_get_error(iter));
Ok::<(), Error>(())
})() {
Err(e)
} else {
Ok(false)
}
};
unsafe { ffi::rocksdb_iter_destroy(iter) };
res
}
pub fn prefix_prober(&self) -> PrefixProber<'_, Self> {
let mut opts = ReadOptions::default();
opts.set_prefix_same_as_start(true);
PrefixProber {
raw: DBRawIteratorWithThreadMode::new(self, opts),
}
}
pub fn prefix_prober_with_opts(&self, readopts: ReadOptions) -> PrefixProber<'_, Self> {
PrefixProber {
raw: DBRawIteratorWithThreadMode::new(self, readopts),
}
}
pub fn prefix_prober_cf(&self, cf_handle: &impl AsColumnFamilyRef) -> PrefixProber<'_, Self> {
let mut opts = ReadOptions::default();
opts.set_prefix_same_as_start(true);
PrefixProber {
raw: DBRawIteratorWithThreadMode::new_cf(self, cf_handle.inner(), opts),
}
}
pub fn prefix_prober_cf_with_opts(
&self,
cf_handle: &impl AsColumnFamilyRef,
readopts: ReadOptions,
) -> PrefixProber<'_, Self> {
PrefixProber {
raw: DBRawIteratorWithThreadMode::new_cf(self, cf_handle.inner(), readopts),
}
}
pub fn prefix_exists_cf<P: AsRef<[u8]>>(
&self,
cf_handle: &impl AsColumnFamilyRef,
prefix: P,
) -> Result<bool, Error> {
let p = prefix.as_ref();
PREFIX_READ_OPTS.with(|rc| {
let mut opts = rc.borrow_mut();
opts.set_iterate_range(crate::PrefixRange(p));
self.prefix_exists_cf_opt(cf_handle, p, &opts)
})
}
pub fn prefix_exists_cf_opt<P: AsRef<[u8]>>(
&self,
cf_handle: &impl AsColumnFamilyRef,
prefix: P,
readopts: &ReadOptions,
) -> Result<bool, Error> {
let prefix = prefix.as_ref();
let iter = unsafe { self.create_iterator_cf(cf_handle.inner(), readopts) };
let res = unsafe {
ffi::rocksdb_iter_seek(
iter,
prefix.as_ptr() as *const c_char,
prefix.len() as size_t,
);
if ffi::rocksdb_iter_valid(iter) != 0 {
let mut key_len: size_t = 0;
let key_ptr = ffi::rocksdb_iter_key(iter, &raw mut key_len);
let key = slice::from_raw_parts(key_ptr as *const u8, key_len as usize);
Ok(key.starts_with(prefix))
} else if let Err(e) = (|| {
ffi_try!(ffi::rocksdb_iter_get_error(iter));
Ok::<(), Error>(())
})() {
Err(e)
} else {
Ok(false)
}
};
unsafe { ffi::rocksdb_iter_destroy(iter) };
res
}
pub fn raw_iterator<'a: 'b, 'b>(&'a self) -> DBRawIteratorWithThreadMode<'b, Self> {
let opts = ReadOptions::default();
DBRawIteratorWithThreadMode::new(self, opts)
}
pub fn raw_iterator_cf<'a: 'b, 'b>(
&'a self,
cf_handle: &impl AsColumnFamilyRef,
) -> DBRawIteratorWithThreadMode<'b, Self> {
let opts = ReadOptions::default();
DBRawIteratorWithThreadMode::new_cf(self, cf_handle.inner(), opts)
}
pub fn raw_iterator_opt<'a: 'b, 'b>(
&'a self,
readopts: ReadOptions,
) -> DBRawIteratorWithThreadMode<'b, Self> {
DBRawIteratorWithThreadMode::new(self, readopts)
}
pub fn raw_iterator_cf_opt<'a: 'b, 'b>(
&'a self,
cf_handle: &impl AsColumnFamilyRef,
readopts: ReadOptions,
) -> DBRawIteratorWithThreadMode<'b, Self> {
DBRawIteratorWithThreadMode::new_cf(self, cf_handle.inner(), readopts)
}
pub fn snapshot(&'_ self) -> SnapshotWithThreadMode<'_, Self> {
SnapshotWithThreadMode::<Self>::new(self)
}
pub fn put_opt<K, V>(&self, key: K, value: V, writeopts: &WriteOptions) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let key = key.as_ref();
let value = value.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_put(
self.inner.inner(),
writeopts.inner,
key.as_ptr() as *const c_char,
key.len() as size_t,
value.as_ptr() as *const c_char,
value.len() as size_t,
));
Ok(())
}
}
pub fn put_cf_opt<K, V>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
value: V,
writeopts: &WriteOptions,
) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let key = key.as_ref();
let value = value.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_put_cf(
self.inner.inner(),
writeopts.inner,
cf.inner(),
key.as_ptr() as *const c_char,
key.len() as size_t,
value.as_ptr() as *const c_char,
value.len() as size_t,
));
Ok(())
}
}
pub fn put_with_ts_opt<K, V, S>(
&self,
key: K,
ts: S,
value: V,
writeopts: &WriteOptions,
) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
S: AsRef<[u8]>,
{
let key = key.as_ref();
let value = value.as_ref();
let ts = ts.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_put_with_ts(
self.inner.inner(),
writeopts.inner,
key.as_ptr() as *const c_char,
key.len() as size_t,
ts.as_ptr() as *const c_char,
ts.len() as size_t,
value.as_ptr() as *const c_char,
value.len() as size_t,
));
Ok(())
}
}
pub fn put_cf_with_ts_opt<K, V, S>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
ts: S,
value: V,
writeopts: &WriteOptions,
) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
S: AsRef<[u8]>,
{
let key = key.as_ref();
let value = value.as_ref();
let ts = ts.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_put_cf_with_ts(
self.inner.inner(),
writeopts.inner,
cf.inner(),
key.as_ptr() as *const c_char,
key.len() as size_t,
ts.as_ptr() as *const c_char,
ts.len() as size_t,
value.as_ptr() as *const c_char,
value.len() as size_t,
));
Ok(())
}
}
pub fn merge_opt<K, V>(&self, key: K, value: V, writeopts: &WriteOptions) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let key = key.as_ref();
let value = value.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_merge(
self.inner.inner(),
writeopts.inner,
key.as_ptr() as *const c_char,
key.len() as size_t,
value.as_ptr() as *const c_char,
value.len() as size_t,
));
Ok(())
}
}
pub fn merge_cf_opt<K, V>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
value: V,
writeopts: &WriteOptions,
) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
let key = key.as_ref();
let value = value.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_merge_cf(
self.inner.inner(),
writeopts.inner,
cf.inner(),
key.as_ptr() as *const c_char,
key.len() as size_t,
value.as_ptr() as *const c_char,
value.len() as size_t,
));
Ok(())
}
}
pub fn delete_opt<K: AsRef<[u8]>>(
&self,
key: K,
writeopts: &WriteOptions,
) -> Result<(), Error> {
let key = key.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_delete(
self.inner.inner(),
writeopts.inner,
key.as_ptr() as *const c_char,
key.len() as size_t,
));
Ok(())
}
}
pub fn delete_cf_opt<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
writeopts: &WriteOptions,
) -> Result<(), Error> {
let key = key.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_delete_cf(
self.inner.inner(),
writeopts.inner,
cf.inner(),
key.as_ptr() as *const c_char,
key.len() as size_t,
));
Ok(())
}
}
pub fn delete_with_ts_opt<K, S>(
&self,
key: K,
ts: S,
writeopts: &WriteOptions,
) -> Result<(), Error>
where
K: AsRef<[u8]>,
S: AsRef<[u8]>,
{
let key = key.as_ref();
let ts = ts.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_delete_with_ts(
self.inner.inner(),
writeopts.inner,
key.as_ptr() as *const c_char,
key.len() as size_t,
ts.as_ptr() as *const c_char,
ts.len() as size_t,
));
Ok(())
}
}
pub fn delete_cf_with_ts_opt<K, S>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
ts: S,
writeopts: &WriteOptions,
) -> Result<(), Error>
where
K: AsRef<[u8]>,
S: AsRef<[u8]>,
{
let key = key.as_ref();
let ts = ts.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_delete_cf_with_ts(
self.inner.inner(),
writeopts.inner,
cf.inner(),
key.as_ptr() as *const c_char,
key.len() as size_t,
ts.as_ptr() as *const c_char,
ts.len() as size_t,
));
Ok(())
}
}
pub fn put<K, V>(&self, key: K, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
DEFAULT_WRITE_OPTS.with(|opts| self.put_opt(key, value, opts))
}
pub fn put_cf<K, V>(&self, cf: &impl AsColumnFamilyRef, key: K, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
DEFAULT_WRITE_OPTS.with(|opts| self.put_cf_opt(cf, key, value, opts))
}
pub fn put_with_ts<K, V, S>(&self, key: K, ts: S, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
S: AsRef<[u8]>,
{
DEFAULT_WRITE_OPTS
.with(|opts| self.put_with_ts_opt(key.as_ref(), ts.as_ref(), value.as_ref(), opts))
}
pub fn put_cf_with_ts<K, V, S>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
ts: S,
value: V,
) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
S: AsRef<[u8]>,
{
DEFAULT_WRITE_OPTS.with(|opts| {
self.put_cf_with_ts_opt(cf, key.as_ref(), ts.as_ref(), value.as_ref(), opts)
})
}
pub fn merge<K, V>(&self, key: K, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
DEFAULT_WRITE_OPTS.with(|opts| self.merge_opt(key, value, opts))
}
pub fn merge_cf<K, V>(&self, cf: &impl AsColumnFamilyRef, key: K, value: V) -> Result<(), Error>
where
K: AsRef<[u8]>,
V: AsRef<[u8]>,
{
DEFAULT_WRITE_OPTS.with(|opts| self.merge_cf_opt(cf, key, value, opts))
}
pub fn delete<K: AsRef<[u8]>>(&self, key: K) -> Result<(), Error> {
DEFAULT_WRITE_OPTS.with(|opts| self.delete_opt(key, opts))
}
pub fn delete_cf<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
) -> Result<(), Error> {
DEFAULT_WRITE_OPTS.with(|opts| self.delete_cf_opt(cf, key, opts))
}
pub fn delete_with_ts<K: AsRef<[u8]>, S: AsRef<[u8]>>(
&self,
key: K,
ts: S,
) -> Result<(), Error> {
DEFAULT_WRITE_OPTS.with(|opts| self.delete_with_ts_opt(key, ts, opts))
}
pub fn delete_cf_with_ts<K: AsRef<[u8]>, S: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
ts: S,
) -> Result<(), Error> {
DEFAULT_WRITE_OPTS.with(|opts| self.delete_cf_with_ts_opt(cf, key, ts, opts))
}
pub fn single_delete_opt<K: AsRef<[u8]>>(
&self,
key: K,
writeopts: &WriteOptions,
) -> Result<(), Error> {
let key = key.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_singledelete(
self.inner.inner(),
writeopts.inner,
key.as_ptr() as *const c_char,
key.len() as size_t,
));
Ok(())
}
}
pub fn single_delete_cf_opt<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
writeopts: &WriteOptions,
) -> Result<(), Error> {
let key = key.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_singledelete_cf(
self.inner.inner(),
writeopts.inner,
cf.inner(),
key.as_ptr() as *const c_char,
key.len() as size_t,
));
Ok(())
}
}
pub fn single_delete_with_ts_opt<K, S>(
&self,
key: K,
ts: S,
writeopts: &WriteOptions,
) -> Result<(), Error>
where
K: AsRef<[u8]>,
S: AsRef<[u8]>,
{
let key = key.as_ref();
let ts = ts.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_singledelete_with_ts(
self.inner.inner(),
writeopts.inner,
key.as_ptr() as *const c_char,
key.len() as size_t,
ts.as_ptr() as *const c_char,
ts.len() as size_t,
));
Ok(())
}
}
pub fn single_delete_cf_with_ts_opt<K, S>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
ts: S,
writeopts: &WriteOptions,
) -> Result<(), Error>
where
K: AsRef<[u8]>,
S: AsRef<[u8]>,
{
let key = key.as_ref();
let ts = ts.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_singledelete_cf_with_ts(
self.inner.inner(),
writeopts.inner,
cf.inner(),
key.as_ptr() as *const c_char,
key.len() as size_t,
ts.as_ptr() as *const c_char,
ts.len() as size_t,
));
Ok(())
}
}
pub fn single_delete<K: AsRef<[u8]>>(&self, key: K) -> Result<(), Error> {
DEFAULT_WRITE_OPTS.with(|opts| self.single_delete_opt(key, opts))
}
pub fn single_delete_cf<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
) -> Result<(), Error> {
DEFAULT_WRITE_OPTS.with(|opts| self.single_delete_cf_opt(cf, key, opts))
}
pub fn single_delete_with_ts<K: AsRef<[u8]>, S: AsRef<[u8]>>(
&self,
key: K,
ts: S,
) -> Result<(), Error> {
DEFAULT_WRITE_OPTS.with(|opts| self.single_delete_with_ts_opt(key, ts, opts))
}
pub fn single_delete_cf_with_ts<K: AsRef<[u8]>, S: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
key: K,
ts: S,
) -> Result<(), Error> {
DEFAULT_WRITE_OPTS.with(|opts| self.single_delete_cf_with_ts_opt(cf, key, ts, opts))
}
pub fn compact_range<S: AsRef<[u8]>, E: AsRef<[u8]>>(&self, start: Option<S>, end: Option<E>) {
unsafe {
let start = start.as_ref().map(AsRef::as_ref);
let end = end.as_ref().map(AsRef::as_ref);
ffi::rocksdb_compact_range(
self.inner.inner(),
opt_bytes_to_ptr(start),
start.map_or(0, <[u8]>::len) as size_t,
opt_bytes_to_ptr(end),
end.map_or(0, <[u8]>::len) as size_t,
);
}
}
pub fn compact_range_opt<S: AsRef<[u8]>, E: AsRef<[u8]>>(
&self,
start: Option<S>,
end: Option<E>,
opts: &CompactOptions,
) {
unsafe {
let start = start.as_ref().map(AsRef::as_ref);
let end = end.as_ref().map(AsRef::as_ref);
ffi::rocksdb_compact_range_opt(
self.inner.inner(),
opts.inner,
opt_bytes_to_ptr(start),
start.map_or(0, <[u8]>::len) as size_t,
opt_bytes_to_ptr(end),
end.map_or(0, <[u8]>::len) as size_t,
);
}
}
pub fn compact_range_cf<S: AsRef<[u8]>, E: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
start: Option<S>,
end: Option<E>,
) {
unsafe {
let start = start.as_ref().map(AsRef::as_ref);
let end = end.as_ref().map(AsRef::as_ref);
ffi::rocksdb_compact_range_cf(
self.inner.inner(),
cf.inner(),
opt_bytes_to_ptr(start),
start.map_or(0, <[u8]>::len) as size_t,
opt_bytes_to_ptr(end),
end.map_or(0, <[u8]>::len) as size_t,
);
}
}
pub fn compact_range_cf_opt<S: AsRef<[u8]>, E: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
start: Option<S>,
end: Option<E>,
opts: &CompactOptions,
) {
unsafe {
let start = start.as_ref().map(AsRef::as_ref);
let end = end.as_ref().map(AsRef::as_ref);
ffi::rocksdb_compact_range_cf_opt(
self.inner.inner(),
cf.inner(),
opts.inner,
opt_bytes_to_ptr(start),
start.map_or(0, <[u8]>::len) as size_t,
opt_bytes_to_ptr(end),
end.map_or(0, <[u8]>::len) as size_t,
);
}
}
pub fn wait_for_compact(&self, opts: &WaitForCompactOptions) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_wait_for_compact(
self.inner.inner(),
opts.inner
));
}
Ok(())
}
pub fn set_options(&self, opts: &[(&str, &str)]) -> Result<(), Error> {
let copts = convert_options(opts)?;
let cnames: Vec<*const c_char> = copts.iter().map(|opt| opt.0.as_ptr()).collect();
let cvalues: Vec<*const c_char> = copts.iter().map(|opt| opt.1.as_ptr()).collect();
let count = opts.len() as i32;
unsafe {
ffi_try!(ffi::rocksdb_set_options(
self.inner.inner(),
count,
cnames.as_ptr(),
cvalues.as_ptr(),
));
}
Ok(())
}
pub fn set_options_cf(
&self,
cf: &impl AsColumnFamilyRef,
opts: &[(&str, &str)],
) -> Result<(), Error> {
let copts = convert_options(opts)?;
let cnames: Vec<*const c_char> = copts.iter().map(|opt| opt.0.as_ptr()).collect();
let cvalues: Vec<*const c_char> = copts.iter().map(|opt| opt.1.as_ptr()).collect();
let count = opts.len() as i32;
unsafe {
ffi_try!(ffi::rocksdb_set_options_cf(
self.inner.inner(),
cf.inner(),
count,
cnames.as_ptr(),
cvalues.as_ptr(),
));
}
Ok(())
}
fn property_value_impl<R>(
name: impl CStrLike,
get_property: impl FnOnce(*const c_char) -> *mut c_char,
parse: impl FnOnce(&str) -> Result<R, Error>,
) -> Result<Option<R>, Error> {
let value = match name.bake() {
Ok(prop_name) => get_property(prop_name.as_ptr()),
Err(e) => {
return Err(Error::new(format!(
"Failed to convert property name to CString: {e}"
)));
}
};
if value.is_null() {
return Ok(None);
}
let result = match unsafe { CStr::from_ptr(value) }.to_str() {
Ok(s) => parse(s).map(|value| Some(value)),
Err(e) => Err(Error::new(format!(
"Failed to convert property value to string: {e}"
))),
};
unsafe {
ffi::rocksdb_free(value as *mut c_void);
}
result
}
pub fn property_value(&self, name: impl CStrLike) -> Result<Option<String>, Error> {
Self::property_value_impl(
name,
|prop_name| unsafe { ffi::rocksdb_property_value(self.inner.inner(), prop_name) },
|str_value| Ok(str_value.to_owned()),
)
}
pub fn property_value_cf(
&self,
cf: &impl AsColumnFamilyRef,
name: impl CStrLike,
) -> Result<Option<String>, Error> {
Self::property_value_impl(
name,
|prop_name| unsafe {
ffi::rocksdb_property_value_cf(self.inner.inner(), cf.inner(), prop_name)
},
|str_value| Ok(str_value.to_owned()),
)
}
fn parse_property_int_value(value: &str) -> Result<u64, Error> {
value.parse::<u64>().map_err(|err| {
Error::new(format!(
"Failed to convert property value {value} to int: {err}"
))
})
}
pub fn property_int_value(&self, name: impl CStrLike) -> Result<Option<u64>, Error> {
Self::property_value_impl(
name,
|prop_name| unsafe { ffi::rocksdb_property_value(self.inner.inner(), prop_name) },
Self::parse_property_int_value,
)
}
pub fn property_int_value_cf(
&self,
cf: &impl AsColumnFamilyRef,
name: impl CStrLike,
) -> Result<Option<u64>, Error> {
Self::property_value_impl(
name,
|prop_name| unsafe {
ffi::rocksdb_property_value_cf(self.inner.inner(), cf.inner(), prop_name)
},
Self::parse_property_int_value,
)
}
pub fn latest_sequence_number(&self) -> u64 {
unsafe { ffi::rocksdb_get_latest_sequence_number(self.inner.inner()) }
}
pub fn get_approximate_sizes(&self, ranges: &[Range]) -> Vec<u64> {
self.get_approximate_sizes_cfopt(None::<&ColumnFamily>, ranges)
}
pub fn get_approximate_sizes_cf(
&self,
cf: &impl AsColumnFamilyRef,
ranges: &[Range],
) -> Vec<u64> {
self.get_approximate_sizes_cfopt(Some(cf), ranges)
}
fn get_approximate_sizes_cfopt(
&self,
cf: Option<&impl AsColumnFamilyRef>,
ranges: &[Range],
) -> Vec<u64> {
let start_keys: Vec<*const c_char> = ranges
.iter()
.map(|x| x.start_key.as_ptr() as *const c_char)
.collect();
let start_key_lens: Vec<_> = ranges.iter().map(|x| x.start_key.len()).collect();
let end_keys: Vec<*const c_char> = ranges
.iter()
.map(|x| x.end_key.as_ptr() as *const c_char)
.collect();
let end_key_lens: Vec<_> = ranges.iter().map(|x| x.end_key.len()).collect();
let mut sizes: Vec<u64> = vec![0; ranges.len()];
let (n, start_key_ptr, start_key_len_ptr, end_key_ptr, end_key_len_ptr, size_ptr) = (
ranges.len() as i32,
start_keys.as_ptr(),
start_key_lens.as_ptr(),
end_keys.as_ptr(),
end_key_lens.as_ptr(),
sizes.as_mut_ptr(),
);
let mut err: *mut c_char = ptr::null_mut();
match cf {
None => unsafe {
ffi::rocksdb_approximate_sizes(
self.inner.inner(),
n,
start_key_ptr,
start_key_len_ptr,
end_key_ptr,
end_key_len_ptr,
size_ptr,
&raw mut err,
);
},
Some(cf) => unsafe {
ffi::rocksdb_approximate_sizes_cf(
self.inner.inner(),
cf.inner(),
n,
start_key_ptr,
start_key_len_ptr,
end_key_ptr,
end_key_len_ptr,
size_ptr,
&raw mut err,
);
},
}
sizes
}
pub fn get_updates_since(&self, seq_number: u64) -> Result<DBWALIterator, Error> {
unsafe {
let opts: *const ffi::rocksdb_wal_readoptions_t = ptr::null();
let iter = ffi_try!(ffi::rocksdb_get_updates_since(
self.inner.inner(),
seq_number,
opts
));
Ok(DBWALIterator {
inner: iter,
start_seq_number: seq_number,
})
}
}
pub fn try_catch_up_with_primary(&self) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_try_catch_up_with_primary(self.inner.inner()));
}
Ok(())
}
pub fn ingest_external_file<P: AsRef<Path>>(&self, paths: Vec<P>) -> Result<(), Error> {
let opts = IngestExternalFileOptions::default();
self.ingest_external_file_opts(&opts, paths)
}
pub fn ingest_external_file_opts<P: AsRef<Path>>(
&self,
opts: &IngestExternalFileOptions,
paths: Vec<P>,
) -> Result<(), Error> {
let paths_v: Vec<CString> = paths.iter().map(to_cpath).collect::<Result<Vec<_>, _>>()?;
let cpaths: Vec<_> = paths_v.iter().map(|path| path.as_ptr()).collect();
self.ingest_external_file_raw(opts, &paths_v, &cpaths)
}
pub fn ingest_external_file_cf<P: AsRef<Path>>(
&self,
cf: &impl AsColumnFamilyRef,
paths: Vec<P>,
) -> Result<(), Error> {
let opts = IngestExternalFileOptions::default();
self.ingest_external_file_cf_opts(cf, &opts, paths)
}
pub fn ingest_external_file_cf_opts<P: AsRef<Path>>(
&self,
cf: &impl AsColumnFamilyRef,
opts: &IngestExternalFileOptions,
paths: Vec<P>,
) -> Result<(), Error> {
let paths_v: Vec<CString> = paths.iter().map(to_cpath).collect::<Result<Vec<_>, _>>()?;
let cpaths: Vec<_> = paths_v.iter().map(|path| path.as_ptr()).collect();
self.ingest_external_file_raw_cf(cf, opts, &paths_v, &cpaths)
}
fn ingest_external_file_raw(
&self,
opts: &IngestExternalFileOptions,
paths_v: &[CString],
cpaths: &[*const c_char],
) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_ingest_external_file(
self.inner.inner(),
cpaths.as_ptr(),
paths_v.len(),
opts.inner.cast_const()
));
Ok(())
}
}
fn ingest_external_file_raw_cf(
&self,
cf: &impl AsColumnFamilyRef,
opts: &IngestExternalFileOptions,
paths_v: &[CString],
cpaths: &[*const c_char],
) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_ingest_external_file_cf(
self.inner.inner(),
cf.inner(),
cpaths.as_ptr(),
paths_v.len(),
opts.inner.cast_const()
));
Ok(())
}
}
pub fn get_column_family_metadata(&self) -> ColumnFamilyMetaData {
unsafe {
let ptr = ffi::rocksdb_get_column_family_metadata(self.inner.inner());
let metadata = ColumnFamilyMetaData {
size: ffi::rocksdb_column_family_metadata_get_size(ptr),
name: from_cstr_and_free(ffi::rocksdb_column_family_metadata_get_name(ptr)),
file_count: ffi::rocksdb_column_family_metadata_get_file_count(ptr),
};
ffi::rocksdb_column_family_metadata_destroy(ptr);
metadata
}
}
pub fn get_column_family_metadata_cf(
&self,
cf: &impl AsColumnFamilyRef,
) -> ColumnFamilyMetaData {
unsafe {
let ptr = ffi::rocksdb_get_column_family_metadata_cf(self.inner.inner(), cf.inner());
let metadata = ColumnFamilyMetaData {
size: ffi::rocksdb_column_family_metadata_get_size(ptr),
name: from_cstr_and_free(ffi::rocksdb_column_family_metadata_get_name(ptr)),
file_count: ffi::rocksdb_column_family_metadata_get_file_count(ptr),
};
ffi::rocksdb_column_family_metadata_destroy(ptr);
metadata
}
}
pub fn live_files(&self) -> Result<Vec<LiveFile>, Error> {
unsafe {
let livefiles_ptr = ffi::rocksdb_livefiles(self.inner.inner());
if livefiles_ptr.is_null() {
Err(Error::new("Could not get live files".to_owned()))
} else {
let files = LiveFile::from_rocksdb_livefiles_ptr(livefiles_ptr);
ffi::rocksdb_livefiles_destroy(livefiles_ptr);
Ok(files)
}
}
}
pub fn delete_file_in_range<K: AsRef<[u8]>>(&self, from: K, to: K) -> Result<(), Error> {
let from = from.as_ref();
let to = to.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_delete_file_in_range(
self.inner.inner(),
from.as_ptr() as *const c_char,
from.len() as size_t,
to.as_ptr() as *const c_char,
to.len() as size_t,
));
Ok(())
}
}
pub fn delete_file_in_range_cf<K: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
from: K,
to: K,
) -> Result<(), Error> {
let from = from.as_ref();
let to = to.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_delete_file_in_range_cf(
self.inner.inner(),
cf.inner(),
from.as_ptr() as *const c_char,
from.len() as size_t,
to.as_ptr() as *const c_char,
to.len() as size_t,
));
Ok(())
}
}
pub fn cancel_all_background_work(&self, wait: bool) {
unsafe {
ffi::rocksdb_cancel_all_background_work(self.inner.inner(), c_uchar::from(wait));
}
}
fn drop_column_family<C>(
&self,
cf_inner: *mut ffi::rocksdb_column_family_handle_t,
cf: C,
) -> Result<(), Error> {
unsafe {
ffi_try!(ffi::rocksdb_drop_column_family(
self.inner.inner(),
cf_inner
));
}
drop(cf);
Ok(())
}
pub fn increase_full_history_ts_low<S: AsRef<[u8]>>(
&self,
cf: &impl AsColumnFamilyRef,
ts: S,
) -> Result<(), Error> {
let ts = ts.as_ref();
unsafe {
ffi_try!(ffi::rocksdb_increase_full_history_ts_low(
self.inner.inner(),
cf.inner(),
ts.as_ptr() as *const c_char,
ts.len() as size_t,
));
Ok(())
}
}
pub fn get_full_history_ts_low(&self, cf: &impl AsColumnFamilyRef) -> Result<Vec<u8>, Error> {
unsafe {
let mut ts_lowlen = 0;
let ts = ffi_try!(ffi::rocksdb_get_full_history_ts_low(
self.inner.inner(),
cf.inner(),
&raw mut ts_lowlen,
));
if ts.is_null() {
Err(Error::new("Could not get full_history_ts_low".to_owned()))
} else {
let mut vec = vec![0; ts_lowlen];
ptr::copy_nonoverlapping(ts as *mut u8, vec.as_mut_ptr(), ts_lowlen);
ffi::rocksdb_free(ts as *mut c_void);
Ok(vec)
}
}
}
pub fn get_db_identity(&self) -> Result<Vec<u8>, Error> {
unsafe {
let mut length: usize = 0;
let identity_ptr = ffi::rocksdb_get_db_identity(self.inner.inner(), &raw mut length);
let identity_vec = raw_data(identity_ptr, length);
ffi::rocksdb_free(identity_ptr as *mut c_void);
identity_vec.ok_or_else(|| Error::new("get_db_identity returned NULL".to_string()))
}
}
}
impl<I: DBInner> DBCommon<SingleThreaded, I> {
pub fn create_cf<N: AsRef<str>>(&mut self, name: N, opts: &Options) -> Result<(), Error> {
let inner = self.create_inner_cf_handle(name.as_ref(), opts)?;
self.cfs
.cfs
.insert(name.as_ref().to_string(), ColumnFamily { inner });
Ok(())
}
#[doc = include_str!("db_create_column_family_with_import.md")]
pub fn create_column_family_with_import<N: AsRef<str>>(
&mut self,
options: &Options,
column_family_name: N,
import_options: &ImportColumnFamilyOptions,
metadata: &ExportImportFilesMetaData,
) -> Result<(), Error> {
let name = column_family_name.as_ref();
let c_name = CString::new(name).map_err(|err| {
Error::new(format!(
"Failed to convert name to CString while importing column family: {err}"
))
})?;
let inner = unsafe {
ffi_try!(ffi::rocksdb_create_column_family_with_import(
self.inner.inner(),
options.inner,
c_name.as_ptr(),
import_options.inner,
metadata.inner
))
};
self.cfs
.cfs
.insert(column_family_name.as_ref().into(), ColumnFamily { inner });
Ok(())
}
pub fn drop_cf(&mut self, name: &str) -> Result<(), Error> {
match self.cfs.cfs.remove(name) {
Some(cf) => self.drop_column_family(cf.inner, cf),
_ => Err(Error::new(format!("Invalid column family: {name}"))),
}
}
pub fn cf_handle(&self, name: &str) -> Option<&ColumnFamily> {
self.cfs.cfs.get(name)
}
pub fn cf_names(&self) -> Vec<String> {
self.cfs.cfs.keys().cloned().collect()
}
}
impl<I: DBInner> DBCommon<MultiThreaded, I> {
pub fn create_cf<N: AsRef<str>>(&self, name: N, opts: &Options) -> Result<(), Error> {
let mut cfs = self.cfs.cfs.write();
let inner = self.create_inner_cf_handle(name.as_ref(), opts)?;
cfs.insert(
name.as_ref().to_string(),
Arc::new(UnboundColumnFamily { inner }),
);
Ok(())
}
#[doc = include_str!("db_create_column_family_with_import.md")]
pub fn create_column_family_with_import<N: AsRef<str>>(
&self,
options: &Options,
column_family_name: N,
import_options: &ImportColumnFamilyOptions,
metadata: &ExportImportFilesMetaData,
) -> Result<(), Error> {
let mut cfs = self.cfs.cfs.write();
let name = column_family_name.as_ref();
let c_name = CString::new(name).map_err(|err| {
Error::new(format!(
"Failed to convert name to CString while importing column family: {err}"
))
})?;
let inner = unsafe {
ffi_try!(ffi::rocksdb_create_column_family_with_import(
self.inner.inner(),
options.inner,
c_name.as_ptr(),
import_options.inner,
metadata.inner
))
};
cfs.insert(
column_family_name.as_ref().to_string(),
Arc::new(UnboundColumnFamily { inner }),
);
Ok(())
}
pub fn drop_cf(&self, name: &str) -> Result<(), Error> {
match self.cfs.cfs.write().remove(name) {
Some(cf) => self.drop_column_family(cf.inner, cf),
_ => Err(Error::new(format!("Invalid column family: {name}"))),
}
}
pub fn cf_handle(&'_ self, name: &str) -> Option<Arc<BoundColumnFamily<'_>>> {
self.cfs
.cfs
.read()
.get(name)
.cloned()
.map(UnboundColumnFamily::bound_column_family)
}
pub fn cf_names(&self) -> Vec<String> {
self.cfs.cfs.read().keys().cloned().collect()
}
}
impl<T: ThreadMode, I: DBInner> Drop for DBCommon<T, I> {
fn drop(&mut self) {
self.cfs.drop_all_cfs_internal();
}
}
impl<T: ThreadMode, I: DBInner> fmt::Debug for DBCommon<T, I> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "RocksDB {{ path: {} }}", self.path().display())
}
}
#[derive(Debug, Clone)]
pub struct ColumnFamilyMetaData {
pub size: u64,
pub name: String,
pub file_count: usize,
}
#[derive(Debug, Clone)]
pub struct LiveFile {
pub column_family_name: String,
pub name: String,
pub directory: String,
pub size: usize,
pub level: i32,
pub start_key: Option<Vec<u8>>,
pub end_key: Option<Vec<u8>>,
pub smallest_seqno: u64,
pub largest_seqno: u64,
pub num_entries: u64,
pub num_deletions: u64,
}
impl LiveFile {
pub(crate) fn from_rocksdb_livefiles_ptr(
files: *const ffi::rocksdb_livefiles_t,
) -> Vec<LiveFile> {
unsafe {
let n = ffi::rocksdb_livefiles_count(files);
let mut livefiles = Vec::with_capacity(n as usize);
let mut key_size: usize = 0;
for i in 0..n {
let column_family_name =
from_cstr_without_free(ffi::rocksdb_livefiles_column_family_name(files, i));
let name = from_cstr_without_free(ffi::rocksdb_livefiles_name(files, i));
let directory = from_cstr_without_free(ffi::rocksdb_livefiles_directory(files, i));
let size = ffi::rocksdb_livefiles_size(files, i);
let level = ffi::rocksdb_livefiles_level(files, i);
let smallest_key = ffi::rocksdb_livefiles_smallestkey(files, i, &raw mut key_size);
let smallest_key = raw_data(smallest_key, key_size);
let largest_key = ffi::rocksdb_livefiles_largestkey(files, i, &raw mut key_size);
let largest_key = raw_data(largest_key, key_size);
livefiles.push(LiveFile {
column_family_name,
name,
directory,
size,
level,
start_key: smallest_key,
end_key: largest_key,
largest_seqno: ffi::rocksdb_livefiles_largest_seqno(files, i),
smallest_seqno: ffi::rocksdb_livefiles_smallest_seqno(files, i),
num_entries: ffi::rocksdb_livefiles_entries(files, i),
num_deletions: ffi::rocksdb_livefiles_deletions(files, i),
});
}
livefiles
}
}
}
struct LiveFileGuard(*mut rocksdb_livefile_t);
impl LiveFileGuard {
fn into_raw(mut self) -> *mut rocksdb_livefile_t {
let ptr = self.0;
self.0 = ptr::null_mut();
ptr
}
}
impl Drop for LiveFileGuard {
fn drop(&mut self) {
if !self.0.is_null() {
unsafe {
rocksdb_livefile_destroy(self.0);
}
}
}
}
struct LiveFilesGuard(*mut rocksdb_livefiles_t);
impl LiveFilesGuard {
fn into_raw(mut self) -> *mut rocksdb_livefiles_t {
let ptr = self.0;
self.0 = ptr::null_mut();
ptr
}
}
impl Drop for LiveFilesGuard {
fn drop(&mut self) {
if !self.0.is_null() {
unsafe {
rocksdb_livefiles_destroy(self.0);
}
}
}
}
#[derive(Debug)]
pub struct ExportImportFilesMetaData {
pub(crate) inner: *mut ffi::rocksdb_export_import_files_metadata_t,
}
impl ExportImportFilesMetaData {
pub fn get_db_comparator_name(&self) -> String {
unsafe {
let c_name =
ffi::rocksdb_export_import_files_metadata_get_db_comparator_name(self.inner);
from_cstr_and_free(c_name)
}
}
pub fn set_db_comparator_name(&mut self, name: &str) {
let c_name = CString::new(name.as_bytes()).unwrap();
unsafe {
ffi::rocksdb_export_import_files_metadata_set_db_comparator_name(
self.inner,
c_name.as_ptr(),
);
};
}
pub fn get_files(&self) -> Vec<LiveFile> {
unsafe {
let livefiles_ptr = ffi::rocksdb_export_import_files_metadata_get_files(self.inner);
let files = LiveFile::from_rocksdb_livefiles_ptr(livefiles_ptr);
ffi::rocksdb_livefiles_destroy(livefiles_ptr);
files
}
}
pub fn set_files(&mut self, files: &[LiveFile]) -> Result<(), Error> {
static EMPTY: [u8; 0] = [];
let empty_ptr = EMPTY.as_ptr() as *const libc::c_char;
unsafe {
let live_files = LiveFilesGuard(ffi::rocksdb_livefiles_create());
for file in files {
let live_file = LiveFileGuard(ffi::rocksdb_livefile_create());
ffi::rocksdb_livefile_set_level(live_file.0, file.level);
let c_cf_name = CString::new(file.column_family_name.as_str()).map_err(|err| {
Error::new(format!("Unable to convert column family to CString: {err}"))
})?;
ffi::rocksdb_livefile_set_column_family_name(live_file.0, c_cf_name.as_ptr());
let c_name = CString::new(file.name.as_str()).map_err(|err| {
Error::new(format!("Unable to convert file name to CString: {err}"))
})?;
ffi::rocksdb_livefile_set_name(live_file.0, c_name.as_ptr());
let c_directory = CString::new(file.directory.as_str()).map_err(|err| {
Error::new(format!("Unable to convert directory to CString: {err}"))
})?;
ffi::rocksdb_livefile_set_directory(live_file.0, c_directory.as_ptr());
ffi::rocksdb_livefile_set_size(live_file.0, file.size);
let (start_key_ptr, start_key_len) = match &file.start_key {
None => (empty_ptr, 0),
Some(key) => (key.as_ptr() as *const libc::c_char, key.len()),
};
ffi::rocksdb_livefile_set_smallest_key(live_file.0, start_key_ptr, start_key_len);
let (largest_key_ptr, largest_key_len) = match &file.end_key {
None => (empty_ptr, 0),
Some(key) => (key.as_ptr() as *const libc::c_char, key.len()),
};
ffi::rocksdb_livefile_set_largest_key(
live_file.0,
largest_key_ptr,
largest_key_len,
);
ffi::rocksdb_livefile_set_smallest_seqno(live_file.0, file.smallest_seqno);
ffi::rocksdb_livefile_set_largest_seqno(live_file.0, file.largest_seqno);
ffi::rocksdb_livefile_set_num_entries(live_file.0, file.num_entries);
ffi::rocksdb_livefile_set_num_deletions(live_file.0, file.num_deletions);
ffi::rocksdb_livefiles_add(live_files.0, live_file.into_raw());
}
ffi::rocksdb_export_import_files_metadata_set_files(self.inner, live_files.into_raw());
Ok(())
}
}
}
impl Default for ExportImportFilesMetaData {
fn default() -> Self {
let inner = unsafe { ffi::rocksdb_export_import_files_metadata_create() };
assert!(
!inner.is_null(),
"Could not create rocksdb_export_import_files_metadata_t"
);
Self { inner }
}
}
impl Drop for ExportImportFilesMetaData {
fn drop(&mut self) {
unsafe {
ffi::rocksdb_export_import_files_metadata_destroy(self.inner);
}
}
}
unsafe impl Send for ExportImportFilesMetaData {}
unsafe impl Sync for ExportImportFilesMetaData {}
fn convert_options(opts: &[(&str, &str)]) -> Result<Vec<(CString, CString)>, Error> {
opts.iter()
.map(|(name, value)| {
let cname = match CString::new(name.as_bytes()) {
Ok(cname) => cname,
Err(e) => return Err(Error::new(format!("Invalid option name `{e}`"))),
};
let cvalue = match CString::new(value.as_bytes()) {
Ok(cvalue) => cvalue,
Err(e) => return Err(Error::new(format!("Invalid option value: `{e}`"))),
};
Ok((cname, cvalue))
})
.collect()
}
pub(crate) fn convert_values(
values: Vec<*mut c_char>,
values_sizes: Vec<usize>,
errors: Vec<*mut c_char>,
) -> Vec<Result<Option<Vec<u8>>, Error>> {
values
.into_iter()
.zip(values_sizes)
.zip(errors)
.map(|((v, s), e)| {
if e.is_null() {
let value = unsafe { crate::ffi_util::raw_data(v, s) };
unsafe {
ffi::rocksdb_free(v as *mut c_void);
}
Ok(value)
} else {
Err(convert_rocksdb_error(e))
}
})
.collect()
}