Skip to main content

rocksdb/
db_options.rs

1// Copyright 2020 Tyler Neely
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::path::Path;
16use std::ptr::{null_mut, NonNull};
17use std::slice;
18use std::sync::Arc;
19
20use libc::{self, c_char, c_double, c_int, c_uchar, c_uint, c_void, size_t};
21
22use crate::column_family::ColumnFamilyTtl;
23use crate::ffi_util::from_cstr_and_free;
24use crate::statistics::{Histogram, HistogramData, StatsLevel};
25use crate::{
26    compaction_filter::{self, CompactionFilterCallback, CompactionFilterFn},
27    compaction_filter_factory::{self, CompactionFilterFactory},
28    comparator::{
29        ComparatorCallback, ComparatorWithTsCallback, CompareFn, CompareTsFn, CompareWithoutTsFn,
30    },
31    db::DBAccess,
32    env::Env,
33    ffi,
34    ffi_util::{to_cpath, CStrLike},
35    merge_operator::{
36        self, full_merge_callback, partial_merge_callback, MergeFn, MergeOperatorCallback,
37    },
38    slice_transform::SliceTransform,
39    statistics::Ticker,
40    ColumnFamilyDescriptor, Error, SnapshotWithThreadMode,
41};
42
43/// Type for log callbacks used by [`Options::set_info_logger`]. Use Box to pass a thin pointer to
44/// the C callback.
45type LoggerCallback = Box<dyn Fn(LogLevel, &str) + Sync + Send>;
46
47pub(crate) struct WriteBufferManagerWrapper {
48    pub(crate) inner: NonNull<ffi::rocksdb_write_buffer_manager_t>,
49}
50
51impl Drop for WriteBufferManagerWrapper {
52    fn drop(&mut self) {
53        unsafe {
54            ffi::rocksdb_write_buffer_manager_destroy(self.inner.as_ptr());
55        }
56    }
57}
58
59#[derive(Clone)]
60pub struct WriteBufferManager(pub(crate) Arc<WriteBufferManagerWrapper>);
61
62impl WriteBufferManager {
63    /// <https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager>
64    /// Write buffer manager helps users control the total memory used by memtables across multiple column families and/or DB instances.
65    /// Users can enable this control by 2 ways:
66    ///
67    /// 1- Limit the total memtable usage across multiple column families and DBs under a threshold.
68    /// 2- Cost the memtable memory usage to block cache so that memory of RocksDB can be capped by the single limit.
69    /// The usage of a write buffer manager is similar to rate_limiter and sst_file_manager.
70    /// Users can create one write buffer manager object and pass it to all the options of column families or DBs whose memtable size they want to be controlled by this object.
71    ///
72    /// A memory limit is given when creating the write buffer manager object. RocksDB will try to limit the total memory to under this limit.
73    ///
74    /// a flush will be triggered on one column family of the DB you are inserting to,
75    ///
76    /// If mutable memtable size exceeds about 90% of the limit,
77    /// If the total memory is over the limit, more aggressive flush may also be triggered only if the mutable memtable size also exceeds 50% of the limit.
78    /// Both checks are needed because if already more than half memory is being flushed, triggering more flush may not help.
79    ///
80    /// The total memory is counted as total memory allocated in the arena, even if some of that may not yet be used by memtable.
81    ///
82    /// buffer_size: the memory limit in bytes.
83    /// allow_stall: If set true, it will enable stalling of all writers when memory usage exceeds buffer_size (soft limit).
84    ///             It will wait for flush to complete and memory usage to drop down
85    pub fn new_write_buffer_manager(buffer_size: size_t, allow_stall: bool) -> Self {
86        let inner = NonNull::new(unsafe {
87            ffi::rocksdb_write_buffer_manager_create(buffer_size, allow_stall)
88        })
89        .unwrap();
90        WriteBufferManager(Arc::new(WriteBufferManagerWrapper { inner }))
91    }
92
93    /// Users can set up RocksDB to cost memory used by memtables to block cache.
94    /// This can happen no matter whether you enable memtable memory limit or not.
95    /// This option is added to manage memory (memtables + block cache) under a single limit.
96    ///
97    /// buffer_size: the memory limit in bytes.
98    /// allow_stall: If set true, it will enable stalling of all writers when memory usage exceeds buffer_size (soft limit).
99    ///             It will wait for flush to complete and memory usage to drop down
100    /// cache: the block cache instance
101    pub fn new_write_buffer_manager_with_cache(
102        buffer_size: size_t,
103        allow_stall: bool,
104        cache: Cache,
105    ) -> Self {
106        let inner = NonNull::new(unsafe {
107            ffi::rocksdb_write_buffer_manager_create_with_cache(
108                buffer_size,
109                cache.0.inner.as_ptr(),
110                allow_stall,
111            )
112        })
113        .unwrap();
114        WriteBufferManager(Arc::new(WriteBufferManagerWrapper { inner }))
115    }
116
117    /// Returns the WriteBufferManager memory usage in bytes.
118    pub fn get_usage(&self) -> usize {
119        unsafe { ffi::rocksdb_write_buffer_manager_memory_usage(self.0.inner.as_ptr()) }
120    }
121
122    /// Returns the current buffer size in bytes.
123    pub fn get_buffer_size(&self) -> usize {
124        unsafe { ffi::rocksdb_write_buffer_manager_buffer_size(self.0.inner.as_ptr()) }
125    }
126
127    /// Set the buffer size in bytes.
128    pub fn set_buffer_size(&self, new_size: usize) {
129        unsafe {
130            ffi::rocksdb_write_buffer_manager_set_buffer_size(self.0.inner.as_ptr(), new_size);
131        }
132    }
133
134    /// Returns if WriteBufferManager is enabled.
135    pub fn enabled(&self) -> bool {
136        unsafe { ffi::rocksdb_write_buffer_manager_enabled(self.0.inner.as_ptr()) }
137    }
138
139    /// set the allow_stall flag.
140    pub fn set_allow_stall(&self, allow_stall: bool) {
141        unsafe {
142            ffi::rocksdb_write_buffer_manager_set_allow_stall(self.0.inner.as_ptr(), allow_stall);
143        }
144    }
145}
146
147pub(crate) struct CacheWrapper {
148    pub(crate) inner: NonNull<ffi::rocksdb_cache_t>,
149}
150
151impl Drop for CacheWrapper {
152    fn drop(&mut self) {
153        unsafe {
154            ffi::rocksdb_cache_destroy(self.inner.as_ptr());
155        }
156    }
157}
158
159#[derive(Clone)]
160pub struct Cache(pub(crate) Arc<CacheWrapper>);
161
162impl Cache {
163    /// Creates an LRU cache with capacity in bytes.
164    pub fn new_lru_cache(capacity: size_t) -> Cache {
165        let inner = NonNull::new(unsafe { ffi::rocksdb_cache_create_lru(capacity) }).unwrap();
166        Cache(Arc::new(CacheWrapper { inner }))
167    }
168
169    /// Creates an LRU cache with custom options.
170    pub fn new_lru_cache_opts(opts: &LruCacheOptions) -> Cache {
171        let inner =
172            NonNull::new(unsafe { ffi::rocksdb_cache_create_lru_opts(opts.inner) }).unwrap();
173        Cache(Arc::new(CacheWrapper { inner }))
174    }
175
176    /// Creates a HyperClockCache with `capacity` in bytes.
177    ///
178    /// HyperClockCache is now generally recommended over LRUCache. See RocksDB's
179    /// [HyperClockCacheOptions in cache.h](https://github.com/facebook/rocksdb/blob/main/include/rocksdb/cache.h)
180    /// for details.
181    ///
182    /// `estimated_entry_charge` is an optional parameter. When not provided
183    /// (== 0, recommended and default), an HCC variant with a
184    /// dynamically-growing table and generally good performance is used. This
185    /// variant depends on anonymous mmaps so might not be available on all
186    /// platforms.
187    ///
188    /// If the average "charge" (uncompressed block size) of block cache entries
189    /// is reasonably predicted and provided here, the most efficient variant of
190    /// HCC is used. Performance is degraded if the prediction is inaccurate.
191    /// Prediction could be difficult or impossible with cache-charging features
192    /// such as WriteBufferManager. The best parameter choice based on a cache
193    /// in use is roughly given by `cache.get_usage() / cache.get_occupancy_count()`,
194    /// though it is better to estimate toward the lower side than the higher
195    /// side when the ratio might vary.
196    pub fn new_hyper_clock_cache(capacity: size_t, estimated_entry_charge: size_t) -> Cache {
197        Cache(Arc::new(CacheWrapper {
198            inner: NonNull::new(unsafe {
199                ffi::rocksdb_cache_create_hyper_clock(capacity, estimated_entry_charge)
200            })
201            .unwrap(),
202        }))
203    }
204
205    /// Returns the cache memory usage in bytes.
206    pub fn get_usage(&self) -> usize {
207        unsafe { ffi::rocksdb_cache_get_usage(self.0.inner.as_ptr()) }
208    }
209
210    /// Returns the pinned memory usage in bytes.
211    pub fn get_pinned_usage(&self) -> usize {
212        unsafe { ffi::rocksdb_cache_get_pinned_usage(self.0.inner.as_ptr()) }
213    }
214
215    /// Sets cache capacity in bytes.
216    pub fn set_capacity(&mut self, capacity: size_t) {
217        unsafe {
218            ffi::rocksdb_cache_set_capacity(self.0.inner.as_ptr(), capacity);
219        }
220    }
221}
222
223/// Options that must outlive the DB, and may be shared between DBs. This is cloned and stored
224/// with every DB that is created from the options.
225#[derive(Default)]
226pub(crate) struct OptionsMustOutliveDB {
227    env: Option<Env>,
228    row_cache: Option<Cache>,
229    blob_cache: Option<Cache>,
230    block_based: Option<BlockBasedOptionsMustOutliveDB>,
231    write_buffer_manager: Option<WriteBufferManager>,
232    sst_file_manager: Option<crate::SstFileManager>,
233    comparator: Option<Arc<OwnedComparator>>,
234    compaction_filter: Option<Arc<OwnedCompactionFilter>>,
235    logger_callback: Option<Arc<LoggerCallback>>,
236}
237
238impl OptionsMustOutliveDB {
239    pub(crate) fn clone(&self) -> Self {
240        Self {
241            env: self.env.clone(),
242            row_cache: self.row_cache.clone(),
243            blob_cache: self.blob_cache.clone(),
244            block_based: self
245                .block_based
246                .as_ref()
247                .map(BlockBasedOptionsMustOutliveDB::clone),
248            write_buffer_manager: self.write_buffer_manager.clone(),
249            sst_file_manager: self.sst_file_manager.clone(),
250            comparator: self.comparator.clone(),
251            compaction_filter: self.compaction_filter.clone(),
252            logger_callback: self.logger_callback.clone(),
253        }
254    }
255}
256
257/// Stores a `rocksdb_comparator_t` and destroys it when dropped.
258///
259/// This has an unsafe implementation of Send and Sync because it wraps a RocksDB pointer that
260/// is safe to share between threads.
261struct OwnedComparator {
262    inner: NonNull<ffi::rocksdb_comparator_t>,
263}
264
265impl OwnedComparator {
266    fn new(inner: NonNull<ffi::rocksdb_comparator_t>) -> Self {
267        Self { inner }
268    }
269}
270
271impl Drop for OwnedComparator {
272    fn drop(&mut self) {
273        unsafe {
274            ffi::rocksdb_comparator_destroy(self.inner.as_ptr());
275        }
276    }
277}
278
279/// Stores a `rocksdb_compactionfilter_t` and destroys it when dropped.
280///
281/// This has an unsafe implementation of Send and Sync because it wraps a RocksDB pointer that
282/// is safe to share between threads.
283struct OwnedCompactionFilter {
284    inner: NonNull<ffi::rocksdb_compactionfilter_t>,
285}
286
287impl OwnedCompactionFilter {
288    fn new(inner: NonNull<ffi::rocksdb_compactionfilter_t>) -> Self {
289        Self { inner }
290    }
291}
292
293impl Drop for OwnedCompactionFilter {
294    fn drop(&mut self) {
295        unsafe {
296            ffi::rocksdb_compactionfilter_destroy(self.inner.as_ptr());
297        }
298    }
299}
300
301#[derive(Default)]
302struct BlockBasedOptionsMustOutliveDB {
303    block_cache: Option<Cache>,
304}
305
306impl BlockBasedOptionsMustOutliveDB {
307    fn clone(&self) -> Self {
308        Self {
309            block_cache: self.block_cache.clone(),
310        }
311    }
312}
313
314/// Database-wide options around performance and behavior.
315///
316/// Please read the official tuning [guide](https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide)
317/// and most importantly, measure performance under realistic workloads with realistic hardware.
318///
319/// # Examples
320///
321/// ```
322/// use rocksdb::{Options, DB};
323/// use rocksdb::DBCompactionStyle;
324///
325/// fn badly_tuned_for_somebody_elses_disk() -> DB {
326///    let path = "path/for/rocksdb/storageX";
327///    let mut opts = Options::default();
328///    opts.create_if_missing(true);
329///    opts.set_max_open_files(10000);
330///    opts.set_use_fsync(false);
331///    opts.set_bytes_per_sync(8388608);
332///    opts.optimize_for_point_lookup(1024);
333///    opts.set_table_cache_num_shard_bits(6);
334///    opts.set_max_write_buffer_number(32);
335///    opts.set_write_buffer_size(536870912);
336///    opts.set_target_file_size_base(1073741824);
337///    opts.set_min_write_buffer_number_to_merge(4);
338///    opts.set_level_zero_stop_writes_trigger(2000);
339///    opts.set_level_zero_slowdown_writes_trigger(0);
340///    opts.set_compaction_style(DBCompactionStyle::Universal);
341///    opts.set_disable_auto_compactions(true);
342///
343///    DB::open(&opts, path).unwrap()
344/// }
345/// ```
346pub struct Options {
347    pub(crate) inner: *mut ffi::rocksdb_options_t,
348    pub(crate) outlive: OptionsMustOutliveDB,
349}
350
351/// Optionally disable WAL or sync for this write.
352///
353/// # Examples
354///
355/// Making an unsafe write of a batch:
356///
357/// ```
358/// use rocksdb::{DB, Options, WriteBatch, WriteOptions};
359///
360/// let tempdir = tempfile::Builder::new()
361///     .prefix("_path_for_rocksdb_storageY1")
362///     .tempdir()
363///     .expect("Failed to create temporary path for the _path_for_rocksdb_storageY1");
364/// let path = tempdir.path();
365/// {
366///     let db = DB::open_default(path).unwrap();
367///     let mut batch = WriteBatch::default();
368///     batch.put(b"my key", b"my value");
369///     batch.put(b"key2", b"value2");
370///     batch.put(b"key3", b"value3");
371///
372///     let mut write_options = WriteOptions::default();
373///     write_options.set_sync(false);
374///     write_options.disable_wal(true);
375///
376///     db.write_opt(batch, &write_options);
377/// }
378/// let _ = DB::destroy(&Options::default(), path);
379/// ```
380pub struct WriteOptions {
381    pub(crate) inner: *mut ffi::rocksdb_writeoptions_t,
382}
383
384pub struct LruCacheOptions {
385    pub(crate) inner: *mut ffi::rocksdb_lru_cache_options_t,
386}
387
388/// Optionally wait for the memtable flush to be performed.
389///
390/// # Examples
391///
392/// Manually flushing the memtable:
393///
394/// ```
395/// use rocksdb::{DB, Options, FlushOptions};
396///
397/// let tempdir = tempfile::Builder::new()
398///     .prefix("_path_for_rocksdb_storageY2")
399///     .tempdir()
400///     .expect("Failed to create temporary path for the _path_for_rocksdb_storageY2");
401/// let path = tempdir.path();
402/// {
403///     let db = DB::open_default(path).unwrap();
404///
405///     let mut flush_options = FlushOptions::default();
406///     flush_options.set_wait(true);
407///
408///     db.flush_opt(&flush_options);
409/// }
410/// let _ = DB::destroy(&Options::default(), path);
411/// ```
412pub struct FlushOptions {
413    pub(crate) inner: *mut ffi::rocksdb_flushoptions_t,
414}
415
416/// For configuring block-based file storage.
417pub struct BlockBasedOptions {
418    pub(crate) inner: *mut ffi::rocksdb_block_based_table_options_t,
419    outlive: BlockBasedOptionsMustOutliveDB,
420}
421
422pub struct ReadOptions {
423    pub(crate) inner: *mut ffi::rocksdb_readoptions_t,
424    // The `ReadOptions` owns a copy of the timestamp and iteration bounds.
425    // This is necessary to ensure the pointers we pass over the FFI live as
426    // long as the `ReadOptions`. This way, when performing the read operation,
427    // the pointers are guaranteed to be valid.
428    timestamp: Option<Vec<u8>>,
429    iter_start_ts: Option<Vec<u8>>,
430    iterate_upper_bound: Option<Vec<u8>>,
431    iterate_lower_bound: Option<Vec<u8>>,
432}
433
434/// Configuration of cuckoo-based storage.
435pub struct CuckooTableOptions {
436    pub(crate) inner: *mut ffi::rocksdb_cuckoo_table_options_t,
437}
438
439/// For configuring external files ingestion.
440///
441/// # Examples
442///
443/// Move files instead of copying them:
444///
445/// ```
446/// use rocksdb::{DB, IngestExternalFileOptions, SstFileWriter, Options};
447///
448/// let writer_opts = Options::default();
449/// let mut writer = SstFileWriter::create(&writer_opts);
450/// let tempdir = tempfile::Builder::new()
451///     .tempdir()
452///     .expect("Failed to create temporary folder for the _path_for_sst_file");
453/// let path1 = tempdir.path().join("_path_for_sst_file");
454/// writer.open(path1.clone()).unwrap();
455/// writer.put(b"k1", b"v1").unwrap();
456/// writer.finish().unwrap();
457///
458/// let tempdir2 = tempfile::Builder::new()
459///     .prefix("_path_for_rocksdb_storageY3")
460///     .tempdir()
461///     .expect("Failed to create temporary path for the _path_for_rocksdb_storageY3");
462/// let path2 = tempdir2.path();
463/// {
464///   let db = DB::open_default(&path2).unwrap();
465///   let mut ingest_opts = IngestExternalFileOptions::default();
466///   ingest_opts.set_move_files(true);
467///   db.ingest_external_file_opts(&ingest_opts, vec![path1]).unwrap();
468/// }
469/// let _ = DB::destroy(&Options::default(), path2);
470/// ```
471pub struct IngestExternalFileOptions {
472    pub(crate) inner: *mut ffi::rocksdb_ingestexternalfileoptions_t,
473}
474
475// Safety note: auto-implementing Send on most db-related types is prevented by the inner FFI
476// pointer. In most cases, however, this pointer is Send-safe because it is never aliased and
477// rocksdb internally does not rely on thread-local information for its user-exposed types.
478unsafe impl Send for Options {}
479unsafe impl Send for WriteOptions {}
480unsafe impl Send for LruCacheOptions {}
481unsafe impl Send for FlushOptions {}
482unsafe impl Send for BlockBasedOptions {}
483unsafe impl Send for CuckooTableOptions {}
484unsafe impl Send for ReadOptions {}
485unsafe impl Send for IngestExternalFileOptions {}
486unsafe impl Send for CacheWrapper {}
487unsafe impl Send for CompactOptions {}
488unsafe impl Send for WriteBufferManagerWrapper {}
489unsafe impl Send for OwnedComparator {}
490unsafe impl Send for OwnedCompactionFilter {}
491
492// Sync is similarly safe for many types because they do not expose interior mutability, and their
493// use within the rocksdb library is generally behind a const reference
494unsafe impl Sync for Options {}
495unsafe impl Sync for WriteOptions {}
496unsafe impl Sync for LruCacheOptions {}
497unsafe impl Sync for FlushOptions {}
498unsafe impl Sync for BlockBasedOptions {}
499unsafe impl Sync for CuckooTableOptions {}
500unsafe impl Sync for ReadOptions {}
501unsafe impl Sync for IngestExternalFileOptions {}
502unsafe impl Sync for CacheWrapper {}
503unsafe impl Sync for CompactOptions {}
504unsafe impl Sync for WriteBufferManagerWrapper {}
505unsafe impl Sync for OwnedComparator {}
506unsafe impl Sync for OwnedCompactionFilter {}
507
508impl Drop for Options {
509    fn drop(&mut self) {
510        unsafe {
511            ffi::rocksdb_options_destroy(self.inner);
512        }
513    }
514}
515
516impl Clone for Options {
517    fn clone(&self) -> Self {
518        let inner = unsafe { ffi::rocksdb_options_create_copy(self.inner) };
519        assert!(!inner.is_null(), "Could not copy RocksDB options");
520
521        Self {
522            inner,
523            outlive: self.outlive.clone(),
524        }
525    }
526}
527
528impl Drop for BlockBasedOptions {
529    fn drop(&mut self) {
530        unsafe {
531            ffi::rocksdb_block_based_options_destroy(self.inner);
532        }
533    }
534}
535
536impl Drop for CuckooTableOptions {
537    fn drop(&mut self) {
538        unsafe {
539            ffi::rocksdb_cuckoo_options_destroy(self.inner);
540        }
541    }
542}
543
544impl Drop for FlushOptions {
545    fn drop(&mut self) {
546        unsafe {
547            ffi::rocksdb_flushoptions_destroy(self.inner);
548        }
549    }
550}
551
552impl Drop for WriteOptions {
553    fn drop(&mut self) {
554        unsafe {
555            ffi::rocksdb_writeoptions_destroy(self.inner);
556        }
557    }
558}
559
560impl Drop for LruCacheOptions {
561    fn drop(&mut self) {
562        unsafe {
563            ffi::rocksdb_lru_cache_options_destroy(self.inner);
564        }
565    }
566}
567
568impl Drop for ReadOptions {
569    fn drop(&mut self) {
570        unsafe {
571            ffi::rocksdb_readoptions_destroy(self.inner);
572        }
573    }
574}
575
576impl Drop for IngestExternalFileOptions {
577    fn drop(&mut self) {
578        unsafe {
579            ffi::rocksdb_ingestexternalfileoptions_destroy(self.inner);
580        }
581    }
582}
583
584impl BlockBasedOptions {
585    /// Approximate size of user data packed per block. Note that the
586    /// block size specified here corresponds to uncompressed data. The
587    /// actual size of the unit read from disk may be smaller if
588    /// compression is enabled. This parameter can be changed dynamically.
589    pub fn set_block_size(&mut self, size: usize) {
590        unsafe {
591            ffi::rocksdb_block_based_options_set_block_size(self.inner, size);
592        }
593    }
594
595    /// Block size for partitioned metadata. Currently applied to indexes when
596    /// kTwoLevelIndexSearch is used and to filters when partition_filters is used.
597    /// Note: Since in the current implementation the filters and index partitions
598    /// are aligned, an index/filter block is created when either index or filter
599    /// block size reaches the specified limit.
600    ///
601    /// Note: this limit is currently applied to only index blocks; a filter
602    /// partition is cut right after an index block is cut.
603    pub fn set_metadata_block_size(&mut self, size: usize) {
604        unsafe {
605            ffi::rocksdb_block_based_options_set_metadata_block_size(self.inner, size as u64);
606        }
607    }
608
609    /// Note: currently this option requires kTwoLevelIndexSearch to be set as
610    /// well.
611    ///
612    /// Use partitioned full filters for each SST file. This option is
613    /// incompatible with block-based filters.
614    pub fn set_partition_filters(&mut self, size: bool) {
615        unsafe {
616            ffi::rocksdb_block_based_options_set_partition_filters(self.inner, c_uchar::from(size));
617        }
618    }
619
620    /// Sets global cache for blocks (user data is stored in a set of blocks, and
621    /// a block is the unit of reading from disk).
622    ///
623    /// If set, use the specified cache for blocks.
624    /// By default, rocksdb will automatically create and use an 8MB internal cache.
625    pub fn set_block_cache(&mut self, cache: &Cache) {
626        unsafe {
627            ffi::rocksdb_block_based_options_set_block_cache(self.inner, cache.0.inner.as_ptr());
628        }
629        self.outlive.block_cache = Some(cache.clone());
630    }
631
632    /// Disable block cache
633    pub fn disable_cache(&mut self) {
634        unsafe {
635            ffi::rocksdb_block_based_options_set_no_block_cache(self.inner, c_uchar::from(true));
636        }
637    }
638
639    /// Sets a [Bloom filter](https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter)
640    /// policy to reduce disk reads.
641    ///
642    /// # Examples
643    ///
644    /// ```
645    /// use rocksdb::BlockBasedOptions;
646    ///
647    /// let mut opts = BlockBasedOptions::default();
648    /// opts.set_bloom_filter(10.0, true);
649    /// ```
650    pub fn set_bloom_filter(&mut self, bits_per_key: c_double, block_based: bool) {
651        unsafe {
652            let bloom = if block_based {
653                ffi::rocksdb_filterpolicy_create_bloom(bits_per_key as _)
654            } else {
655                ffi::rocksdb_filterpolicy_create_bloom_full(bits_per_key as _)
656            };
657
658            ffi::rocksdb_block_based_options_set_filter_policy(self.inner, bloom);
659        }
660    }
661
662    /// Sets a [Ribbon filter](http://rocksdb.org/blog/2021/12/29/ribbon-filter.html)
663    /// policy to reduce disk reads.
664    ///
665    /// Ribbon filters use less memory in exchange for slightly more CPU usage
666    /// compared to an equivalent bloom filter.
667    ///
668    /// # Examples
669    ///
670    /// ```
671    /// use rocksdb::BlockBasedOptions;
672    ///
673    /// let mut opts = BlockBasedOptions::default();
674    /// opts.set_ribbon_filter(10.0);
675    /// ```
676    pub fn set_ribbon_filter(&mut self, bloom_equivalent_bits_per_key: c_double) {
677        unsafe {
678            let ribbon = ffi::rocksdb_filterpolicy_create_ribbon(bloom_equivalent_bits_per_key);
679            ffi::rocksdb_block_based_options_set_filter_policy(self.inner, ribbon);
680        }
681    }
682
683    /// Sets a hybrid [Ribbon filter](http://rocksdb.org/blog/2021/12/29/ribbon-filter.html)
684    /// policy to reduce disk reads.
685    ///
686    /// Uses Bloom filters before the given level, and Ribbon filters for all
687    /// other levels. This combines the memory savings from Ribbon filters
688    /// with the lower CPU usage of Bloom filters.
689    ///
690    /// # Examples
691    ///
692    /// ```
693    /// use rocksdb::BlockBasedOptions;
694    ///
695    /// let mut opts = BlockBasedOptions::default();
696    /// opts.set_hybrid_ribbon_filter(10.0, 2);
697    /// ```
698    pub fn set_hybrid_ribbon_filter(
699        &mut self,
700        bloom_equivalent_bits_per_key: c_double,
701        bloom_before_level: c_int,
702    ) {
703        unsafe {
704            let ribbon = ffi::rocksdb_filterpolicy_create_ribbon_hybrid(
705                bloom_equivalent_bits_per_key,
706                bloom_before_level,
707            );
708            ffi::rocksdb_block_based_options_set_filter_policy(self.inner, ribbon);
709        }
710    }
711
712    /// If cache_index_and_filter_blocks is enabled, cache index and filter blocks with high priority.
713    /// If set to true, depending on implementation of block cache,
714    /// index and filter blocks may be less likely to be evicted than data blocks.
715    pub fn set_cache_index_and_filter_blocks(&mut self, v: bool) {
716        unsafe {
717            ffi::rocksdb_block_based_options_set_cache_index_and_filter_blocks(
718                self.inner,
719                c_uchar::from(v),
720            );
721        }
722    }
723
724    /// If cache_index_and_filter_blocks is enabled, cache index and filter
725    /// blocks with high priority. If set to true, depending on implementation of
726    /// block cache, index, filter, and other metadata blocks may be less likely
727    /// to be evicted than data blocks.
728    ///
729    /// Default: true.
730    pub fn set_cache_index_and_filter_blocks_with_high_priority(&mut self, v: bool) {
731        unsafe {
732            ffi::rocksdb_block_based_options_set_cache_index_and_filter_blocks_with_high_priority(
733                self.inner,
734                c_uchar::from(v),
735            );
736        }
737    }
738
739    /// Defines the index type to be used for SS-table lookups.
740    ///
741    /// # Examples
742    ///
743    /// ```
744    /// use rocksdb::{BlockBasedOptions, BlockBasedIndexType, Options};
745    ///
746    /// let mut opts = Options::default();
747    /// let mut block_opts = BlockBasedOptions::default();
748    /// block_opts.set_index_type(BlockBasedIndexType::HashSearch);
749    /// ```
750    pub fn set_index_type(&mut self, index_type: BlockBasedIndexType) {
751        let index = index_type as i32;
752        unsafe {
753            ffi::rocksdb_block_based_options_set_index_type(self.inner, index);
754        }
755    }
756
757    /// If cache_index_and_filter_blocks is true and the below is true, then
758    /// filter and index blocks are stored in the cache, but a reference is
759    /// held in the "table reader" object so the blocks are pinned and only
760    /// evicted from cache when the table reader is freed.
761    ///
762    /// Default: false.
763    pub fn set_pin_l0_filter_and_index_blocks_in_cache(&mut self, v: bool) {
764        unsafe {
765            ffi::rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache(
766                self.inner,
767                c_uchar::from(v),
768            );
769        }
770    }
771
772    /// If cache_index_and_filter_blocks is true and the below is true, then
773    /// the top-level index of partitioned filter and index blocks are stored in
774    /// the cache, but a reference is held in the "table reader" object so the
775    /// blocks are pinned and only evicted from cache when the table reader is
776    /// freed. This is not limited to l0 in LSM tree.
777    ///
778    /// Default: false.
779    pub fn set_pin_top_level_index_and_filter(&mut self, v: bool) {
780        unsafe {
781            ffi::rocksdb_block_based_options_set_pin_top_level_index_and_filter(
782                self.inner,
783                c_uchar::from(v),
784            );
785        }
786    }
787
788    /// Format version, reserved for backward compatibility.
789    ///
790    /// See full [list](https://github.com/facebook/rocksdb/blob/v8.6.7/include/rocksdb/table.h#L493-L521)
791    /// of the supported versions.
792    ///
793    /// Default: 5.
794    pub fn set_format_version(&mut self, version: i32) {
795        unsafe {
796            ffi::rocksdb_block_based_options_set_format_version(self.inner, version);
797        }
798    }
799
800    /// Number of keys between restart points for delta encoding of keys.
801    /// This parameter can be changed dynamically. Most clients should
802    /// leave this parameter alone. The minimum value allowed is 1. Any smaller
803    /// value will be silently overwritten with 1.
804    ///
805    /// Default: 16.
806    pub fn set_block_restart_interval(&mut self, interval: i32) {
807        unsafe {
808            ffi::rocksdb_block_based_options_set_block_restart_interval(self.inner, interval);
809        }
810    }
811
812    /// Same as block_restart_interval but used for the index block.
813    /// If you don't plan to run RocksDB before version 5.16 and you are
814    /// using `index_block_restart_interval` > 1, you should
815    /// probably set the `format_version` to >= 4 as it would reduce the index size.
816    ///
817    /// Default: 1.
818    pub fn set_index_block_restart_interval(&mut self, interval: i32) {
819        unsafe {
820            ffi::rocksdb_block_based_options_set_index_block_restart_interval(self.inner, interval);
821        }
822    }
823
824    /// Set the data block index type for point lookups:
825    ///  `DataBlockIndexType::BinarySearch` to use binary search within the data block.
826    ///  `DataBlockIndexType::BinaryAndHash` to use the data block hash index in combination with
827    ///  the normal binary search.
828    ///
829    /// The hash table utilization ratio is adjustable using [`set_data_block_hash_ratio`](#method.set_data_block_hash_ratio), which is
830    /// valid only when using `DataBlockIndexType::BinaryAndHash`.
831    ///
832    /// Default: `BinarySearch`
833    /// # Examples
834    ///
835    /// ```
836    /// use rocksdb::{BlockBasedOptions, DataBlockIndexType, Options};
837    ///
838    /// let mut opts = Options::default();
839    /// let mut block_opts = BlockBasedOptions::default();
840    /// block_opts.set_data_block_index_type(DataBlockIndexType::BinaryAndHash);
841    /// block_opts.set_data_block_hash_ratio(0.85);
842    /// ```
843    pub fn set_data_block_index_type(&mut self, index_type: DataBlockIndexType) {
844        let index_t = index_type as i32;
845        unsafe {
846            ffi::rocksdb_block_based_options_set_data_block_index_type(self.inner, index_t);
847        }
848    }
849
850    /// Set the data block hash index utilization ratio.
851    ///
852    /// The smaller the utilization ratio, the less hash collisions happen, and so reduce the risk for a
853    /// point lookup to fall back to binary search due to the collisions. A small ratio means faster
854    /// lookup at the price of more space overhead.
855    ///
856    /// Default: 0.75
857    pub fn set_data_block_hash_ratio(&mut self, ratio: f64) {
858        unsafe {
859            ffi::rocksdb_block_based_options_set_data_block_hash_ratio(self.inner, ratio);
860        }
861    }
862
863    /// If false, place only prefixes in the filter, not whole keys.
864    ///
865    /// Defaults to true.
866    pub fn set_whole_key_filtering(&mut self, v: bool) {
867        unsafe {
868            ffi::rocksdb_block_based_options_set_whole_key_filtering(self.inner, c_uchar::from(v));
869        }
870    }
871
872    /// Use the specified checksum type.
873    /// Newly created table files will be protected with this checksum type.
874    /// Old table files will still be readable, even though they have different checksum type.
875    pub fn set_checksum_type(&mut self, checksum_type: ChecksumType) {
876        unsafe {
877            ffi::rocksdb_block_based_options_set_checksum(self.inner, checksum_type as c_char);
878        }
879    }
880
881    /// If true, generate Bloom/Ribbon filters that minimize memory internal
882    /// fragmentation.
883    /// See official [wiki](
884    /// https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter#reducing-internal-fragmentation)
885    /// for more information.
886    ///
887    /// Defaults to false.
888    /// # Examples
889    ///
890    /// ```
891    /// use rocksdb::BlockBasedOptions;
892    ///
893    /// let mut opts = BlockBasedOptions::default();
894    /// opts.set_bloom_filter(10.0, true);
895    /// opts.set_optimize_filters_for_memory(true);
896    /// ```
897    pub fn set_optimize_filters_for_memory(&mut self, v: bool) {
898        unsafe {
899            ffi::rocksdb_block_based_options_set_optimize_filters_for_memory(
900                self.inner,
901                c_uchar::from(v),
902            );
903        }
904    }
905
906    /// Set the top-level index pinning tier.
907    ///
908    /// Controls when top-level index blocks are pinned in block cache memory.
909    /// This affects memory usage and lookup performance for large databases with
910    /// multiple levels.
911    ///
912    /// Default: `BlockBasedTablePinningTier::Fallback`
913    ///
914    /// # Examples
915    ///
916    /// ```
917    /// use rocksdb::{BlockBasedOptions, BlockBasedTablePinningTier};
918    ///
919    /// let mut opts = BlockBasedOptions::default();
920    /// opts.set_top_level_index_pinning_tier(BlockBasedTablePinningTier::FlushAndSimilar);
921    /// ```
922    pub fn set_top_level_index_pinning_tier(&mut self, pinning_tier: BlockBasedTablePinningTier) {
923        unsafe {
924            ffi::rocksdb_block_based_options_set_top_level_index_pinning_tier(
925                self.inner,
926                pinning_tier as c_int,
927            );
928        }
929    }
930
931    /// Set the partition pinning tier.
932    ///
933    /// Controls when partition blocks (used in partitioned indexes and filters)
934    /// are pinned in block cache memory. This affects performance for databases
935    /// using partitioned metadata.
936    ///
937    /// Default: `BlockBasedTablePinningTier::Fallback`
938    ///
939    /// # Examples
940    ///
941    /// ```
942    /// use rocksdb::{BlockBasedOptions, BlockBasedTablePinningTier};
943    ///
944    /// let mut opts = BlockBasedOptions::default();
945    /// opts.set_partition_pinning_tier(BlockBasedTablePinningTier::All);
946    /// ```
947    pub fn set_partition_pinning_tier(&mut self, pinning_tier: BlockBasedTablePinningTier) {
948        unsafe {
949            ffi::rocksdb_block_based_options_set_partition_pinning_tier(
950                self.inner,
951                pinning_tier as c_int,
952            );
953        }
954    }
955
956    /// Set the unpartitioned pinning tier.
957    ///
958    /// Controls when unpartitioned metadata blocks (index and filter blocks that
959    /// are not partitioned) are pinned in block cache memory.
960    ///
961    /// Default: `BlockBasedTablePinningTier::Fallback`
962    ///
963    /// # Examples
964    ///
965    /// ```
966    /// use rocksdb::{BlockBasedOptions, BlockBasedTablePinningTier};
967    ///
968    /// let mut opts = BlockBasedOptions::default();
969    /// opts.set_unpartitioned_pinning_tier(BlockBasedTablePinningTier::None);
970    /// ```
971    pub fn set_unpartitioned_pinning_tier(&mut self, pinning_tier: BlockBasedTablePinningTier) {
972        unsafe {
973            ffi::rocksdb_block_based_options_set_unpartitioned_pinning_tier(
974                self.inner,
975                pinning_tier as c_int,
976            );
977        }
978    }
979
980    /// Set the maximum readahead size for RocksDB's implicit (auto) readahead
981    /// on iterators.
982    ///
983    /// RocksDB does auto-readahead for iterators on noticing more than two
984    /// reads for a table file if the user doesn't provide `readahead_size`.
985    /// The readahead starts at `initial_auto_readahead_size` (default: 8 KB)
986    /// and doubles on every additional read up to `max_auto_readahead_size`.
987    ///
988    /// Special value `0` disables the implicit auto prefetching. If
989    /// `max_auto_readahead_size` is smaller than `initial_auto_readahead_size`,
990    /// RocksDB will sanitize `initial_auto_readahead_size` down to
991    /// `max_auto_readahead_size`.
992    ///
993    /// Default: 256 KB (`256 * 1024`).
994    ///
995    /// # Examples
996    ///
997    /// ```
998    /// use rocksdb::BlockBasedOptions;
999    ///
1000    /// let mut opts = BlockBasedOptions::default();
1001    /// opts.set_max_auto_readahead_size(4 * 1024 * 1024); // 4 MB cap
1002    /// ```
1003    pub fn set_max_auto_readahead_size(&mut self, size: usize) {
1004        unsafe {
1005            ffi::rocksdb_block_based_options_set_max_auto_readahead_size(self.inner, size);
1006        }
1007    }
1008
1009    /// Set the initial readahead size for RocksDB's implicit (auto) readahead
1010    /// on iterators.
1011    ///
1012    /// RocksDB does auto-readahead for iterators on noticing more than two
1013    /// reads for a table file if the user doesn't provide `readahead_size`.
1014    /// The readahead starts at `initial_auto_readahead_size` and doubles on
1015    /// every additional read up to `max_auto_readahead_size`.
1016    ///
1017    /// Special values:
1018    /// - `0`: disables the implicit auto prefetching irrespective of
1019    ///   `max_auto_readahead_size`.
1020    /// - If `initial_auto_readahead_size` > `max_auto_readahead_size`, RocksDB
1021    ///   will sanitize it down to `max_auto_readahead_size`.
1022    ///
1023    /// Default: 8 KB (`8 * 1024`).
1024    ///
1025    /// # Examples
1026    ///
1027    /// ```
1028    /// use rocksdb::BlockBasedOptions;
1029    ///
1030    /// let mut opts = BlockBasedOptions::default();
1031    /// opts.set_initial_auto_readahead_size(32 * 1024); // 32 KB initial
1032    /// ```
1033    pub fn set_initial_auto_readahead_size(&mut self, size: usize) {
1034        unsafe {
1035            ffi::rocksdb_block_based_options_set_initial_auto_readahead_size(self.inner, size);
1036        }
1037    }
1038
1039    /// Set how many sequential file reads must occur before RocksDB starts
1040    /// the internal auto prefetcher for an iterator.
1041    ///
1042    /// For example, if the value is `2` then after reading 2 sequential data
1043    /// blocks, on the third data block prefetching will start. If set to `0`,
1044    /// prefetching starts from the first read.
1045    ///
1046    /// Default: `2`.
1047    ///
1048    /// # Examples
1049    ///
1050    /// ```
1051    /// use rocksdb::BlockBasedOptions;
1052    ///
1053    /// let mut opts = BlockBasedOptions::default();
1054    /// opts.set_num_file_reads_for_auto_readahead(0); // prefetch from the start
1055    /// ```
1056    pub fn set_num_file_reads_for_auto_readahead(&mut self, num_reads: u64) {
1057        unsafe {
1058            ffi::rocksdb_block_based_options_set_num_file_reads_for_auto_readahead(
1059                self.inner, num_reads,
1060            );
1061        }
1062    }
1063}
1064
1065impl Default for BlockBasedOptions {
1066    fn default() -> Self {
1067        let block_opts = unsafe { ffi::rocksdb_block_based_options_create() };
1068        assert!(
1069            !block_opts.is_null(),
1070            "Could not create RocksDB block based options"
1071        );
1072
1073        Self {
1074            inner: block_opts,
1075            outlive: BlockBasedOptionsMustOutliveDB::default(),
1076        }
1077    }
1078}
1079
1080impl CuckooTableOptions {
1081    /// Determines the utilization of hash tables. Smaller values
1082    /// result in larger hash tables with fewer collisions.
1083    /// Default: 0.9
1084    pub fn set_hash_ratio(&mut self, ratio: f64) {
1085        unsafe {
1086            ffi::rocksdb_cuckoo_options_set_hash_ratio(self.inner, ratio);
1087        }
1088    }
1089
1090    /// A property used by builder to determine the depth to go to
1091    /// to search for a path to displace elements in case of
1092    /// collision. See Builder.MakeSpaceForKey method. Higher
1093    /// values result in more efficient hash tables with fewer
1094    /// lookups but take more time to build.
1095    /// Default: 100
1096    pub fn set_max_search_depth(&mut self, depth: u32) {
1097        unsafe {
1098            ffi::rocksdb_cuckoo_options_set_max_search_depth(self.inner, depth);
1099        }
1100    }
1101
1102    /// In case of collision while inserting, the builder
1103    /// attempts to insert in the next cuckoo_block_size
1104    /// locations before skipping over to the next Cuckoo hash
1105    /// function. This makes lookups more cache friendly in case
1106    /// of collisions.
1107    /// Default: 5
1108    pub fn set_cuckoo_block_size(&mut self, size: u32) {
1109        unsafe {
1110            ffi::rocksdb_cuckoo_options_set_cuckoo_block_size(self.inner, size);
1111        }
1112    }
1113
1114    /// If this option is enabled, user key is treated as uint64_t and its value
1115    /// is used as hash value directly. This option changes builder's behavior.
1116    /// Reader ignore this option and behave according to what specified in
1117    /// table property.
1118    /// Default: false
1119    pub fn set_identity_as_first_hash(&mut self, flag: bool) {
1120        unsafe {
1121            ffi::rocksdb_cuckoo_options_set_identity_as_first_hash(self.inner, c_uchar::from(flag));
1122        }
1123    }
1124
1125    /// If this option is set to true, module is used during hash calculation.
1126    /// This often yields better space efficiency at the cost of performance.
1127    /// If this option is set to false, # of entries in table is constrained to
1128    /// be power of two, and bit and is used to calculate hash, which is faster in general.
1129    /// Default: true
1130    pub fn set_use_module_hash(&mut self, flag: bool) {
1131        unsafe {
1132            ffi::rocksdb_cuckoo_options_set_use_module_hash(self.inner, c_uchar::from(flag));
1133        }
1134    }
1135}
1136
1137impl Default for CuckooTableOptions {
1138    fn default() -> Self {
1139        let opts = unsafe { ffi::rocksdb_cuckoo_options_create() };
1140        assert!(!opts.is_null(), "Could not create RocksDB cuckoo options");
1141
1142        Self { inner: opts }
1143    }
1144}
1145
1146// Verbosity of the LOG.
1147#[derive(Debug, Copy, Clone, PartialEq, Eq)]
1148#[repr(i32)]
1149pub enum LogLevel {
1150    Debug = 0,
1151    Info,
1152    Warn,
1153    Error,
1154    Fatal,
1155    Header,
1156}
1157
1158impl LogLevel {
1159    pub(crate) fn try_from_raw(raw: i32) -> Option<Self> {
1160        match raw {
1161            n if n == LogLevel::Debug as i32 => Some(LogLevel::Debug),
1162            n if n == LogLevel::Info as i32 => Some(LogLevel::Info),
1163            n if n == LogLevel::Warn as i32 => Some(LogLevel::Warn),
1164            n if n == LogLevel::Error as i32 => Some(LogLevel::Error),
1165            n if n == LogLevel::Fatal as i32 => Some(LogLevel::Fatal),
1166            n if n == LogLevel::Header as i32 => Some(LogLevel::Header),
1167            _ => None,
1168        }
1169    }
1170}
1171
1172impl Options {
1173    /// Constructs the DBOptions and ColumnFamilyDescriptors by loading the
1174    /// latest RocksDB options file stored in the specified rocksdb database.
1175    ///
1176    /// *IMPORTANT*:
1177    /// ROCKSDB DOES NOT STORE cf ttl in the options file. If you have set it via
1178    /// [`ColumnFamilyDescriptor::new_with_ttl`] then you need to set it again after loading the options file.
1179    /// Tll will be set to [`ColumnFamilyTtl::Disabled`] for all column families for your safety.
1180    pub fn load_latest<P: AsRef<Path>>(
1181        path: P,
1182        env: Env,
1183        ignore_unknown_options: bool,
1184        cache: Cache,
1185    ) -> Result<(Options, Vec<ColumnFamilyDescriptor>), Error> {
1186        let path = to_cpath(path)?;
1187        let mut db_options: *mut ffi::rocksdb_options_t = null_mut();
1188        let mut num_column_families: usize = 0;
1189        let mut column_family_names: *mut *mut c_char = null_mut();
1190        let mut column_family_options: *mut *mut ffi::rocksdb_options_t = null_mut();
1191        unsafe {
1192            ffi_try!(ffi::rocksdb_load_latest_options(
1193                path.as_ptr(),
1194                env.0.inner,
1195                ignore_unknown_options,
1196                cache.0.inner.as_ptr(),
1197                &mut db_options,
1198                &mut num_column_families,
1199                &mut column_family_names,
1200                &mut column_family_options,
1201            ));
1202        }
1203        let options = Options {
1204            inner: db_options,
1205            outlive: OptionsMustOutliveDB::default(),
1206        };
1207        // read_column_descriptors frees column_family_names and the column_family_options array.
1208        // We can't call rocksdb_load_latest_options_destroy because it also frees options, and
1209        // the individual `column_family_options` pointers. We want to return them.
1210        let column_families = unsafe {
1211            Options::read_column_descriptors(
1212                num_column_families,
1213                column_family_names,
1214                column_family_options,
1215            )
1216        };
1217        Ok((options, column_families))
1218    }
1219
1220    /// Constructs a new `DBOptions` from `self` and a string `opts_str` with the syntax detailed in the blogpost
1221    /// [Reading RocksDB options from a file](https://rocksdb.org/blog/2015/02/24/reading-rocksdb-options-from-a-file.html)
1222    pub fn get_options_from_string<S: AsRef<str>>(
1223        &mut self,
1224        opts_str: S,
1225    ) -> Result<Options, Error> {
1226        // create the rocksdb_options_t and immediately wrap it so we don't forget to free it
1227        let options = Options {
1228            inner: unsafe { ffi::rocksdb_options_create() },
1229            outlive: OptionsMustOutliveDB::default(),
1230        };
1231
1232        let opts_cstr = opts_str.as_ref().into_c_string().map_err(|e| {
1233            Error::new(format!(
1234                "options string must not contain NUL (0x00) bytes: {e}"
1235            ))
1236        })?;
1237        unsafe {
1238            ffi_try!(ffi::rocksdb_get_options_from_string(
1239                self.inner.cast_const(),
1240                opts_cstr.as_ptr(),
1241                options.inner,
1242            ));
1243        }
1244        Ok(options)
1245    }
1246
1247    /// Reads column descriptors from C pointers. This frees the `column_family_names` and
1248    /// `column_family_options` arrays, and the strings contained in `column_family_names`. It does
1249    /// *not* free the `rocksdb_options_t*` pointers contained in `column_family_options`.
1250    #[inline]
1251    unsafe fn read_column_descriptors(
1252        num_column_families: usize,
1253        column_family_names: *mut *mut c_char,
1254        column_family_options: *mut *mut ffi::rocksdb_options_t,
1255    ) -> Vec<ColumnFamilyDescriptor> {
1256        let column_family_names_iter = unsafe {
1257            slice::from_raw_parts(column_family_names, num_column_families)
1258                .iter()
1259                .map(|ptr| from_cstr_and_free(*ptr))
1260        };
1261        let column_family_options_iter = unsafe {
1262            slice::from_raw_parts(column_family_options, num_column_families)
1263                .iter()
1264                .map(|ptr| Options {
1265                    inner: *ptr,
1266                    outlive: OptionsMustOutliveDB::default(),
1267                })
1268        };
1269        let column_descriptors = column_family_names_iter
1270            .zip(column_family_options_iter)
1271            .map(|(name, options)| ColumnFamilyDescriptor {
1272                name,
1273                options,
1274                ttl: ColumnFamilyTtl::Disabled,
1275            })
1276            .collect::<Vec<_>>();
1277
1278        // free the arrays
1279        unsafe {
1280            // we freed each string in the column_family_names array using from_cstr_and_free
1281            ffi::rocksdb_free(column_family_names as *mut c_void);
1282            // we don't want to free the contents of this array because we return it
1283            ffi::rocksdb_free(column_family_options as *mut c_void);
1284        };
1285
1286        column_descriptors
1287    }
1288
1289    /// By default, RocksDB uses only one background thread for flush and
1290    /// compaction. Calling this function will set it up such that total of
1291    /// `total_threads` is used. Good value for `total_threads` is the number of
1292    /// cores. You almost definitely want to call this function if your system is
1293    /// bottlenecked by RocksDB.
1294    ///
1295    /// # Examples
1296    ///
1297    /// ```
1298    /// use rocksdb::Options;
1299    ///
1300    /// let mut opts = Options::default();
1301    /// opts.increase_parallelism(3);
1302    /// ```
1303    pub fn increase_parallelism(&mut self, parallelism: i32) {
1304        unsafe {
1305            ffi::rocksdb_options_increase_parallelism(self.inner, parallelism);
1306        }
1307    }
1308
1309    /// Optimize level style compaction.
1310    ///
1311    /// Default values for some parameters in `Options` are not optimized for heavy
1312    /// workloads and big datasets, which means you might observe write stalls under
1313    /// some conditions.
1314    ///
1315    /// This can be used as one of the starting points for tuning RocksDB options in
1316    /// such cases.
1317    ///
1318    /// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
1319    /// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
1320    /// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
1321    /// parameters were set before.
1322    ///
1323    /// It sets buffer sizes so that memory consumption would be constrained by
1324    /// `memtable_memory_budget`.
1325    pub fn optimize_level_style_compaction(&mut self, memtable_memory_budget: usize) {
1326        unsafe {
1327            ffi::rocksdb_options_optimize_level_style_compaction(
1328                self.inner,
1329                memtable_memory_budget as u64,
1330            );
1331        }
1332    }
1333
1334    /// Optimize universal style compaction.
1335    ///
1336    /// Default values for some parameters in `Options` are not optimized for heavy
1337    /// workloads and big datasets, which means you might observe write stalls under
1338    /// some conditions.
1339    ///
1340    /// This can be used as one of the starting points for tuning RocksDB options in
1341    /// such cases.
1342    ///
1343    /// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
1344    /// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
1345    /// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
1346    /// parameters were set before.
1347    ///
1348    /// It sets buffer sizes so that memory consumption would be constrained by
1349    /// `memtable_memory_budget`.
1350    pub fn optimize_universal_style_compaction(&mut self, memtable_memory_budget: usize) {
1351        unsafe {
1352            ffi::rocksdb_options_optimize_universal_style_compaction(
1353                self.inner,
1354                memtable_memory_budget as u64,
1355            );
1356        }
1357    }
1358
1359    /// If true, the database will be created if it is missing.
1360    ///
1361    /// Default: `false`
1362    ///
1363    /// # Examples
1364    ///
1365    /// ```
1366    /// use rocksdb::Options;
1367    ///
1368    /// let mut opts = Options::default();
1369    /// opts.create_if_missing(true);
1370    /// ```
1371    pub fn create_if_missing(&mut self, create_if_missing: bool) {
1372        unsafe {
1373            ffi::rocksdb_options_set_create_if_missing(
1374                self.inner,
1375                c_uchar::from(create_if_missing),
1376            );
1377        }
1378    }
1379
1380    /// If true, any column families that didn't exist when opening the database
1381    /// will be created.
1382    ///
1383    /// Default: `false`
1384    ///
1385    /// # Examples
1386    ///
1387    /// ```
1388    /// use rocksdb::Options;
1389    ///
1390    /// let mut opts = Options::default();
1391    /// opts.create_missing_column_families(true);
1392    /// ```
1393    pub fn create_missing_column_families(&mut self, create_missing_cfs: bool) {
1394        unsafe {
1395            ffi::rocksdb_options_set_create_missing_column_families(
1396                self.inner,
1397                c_uchar::from(create_missing_cfs),
1398            );
1399        }
1400    }
1401
1402    /// Specifies whether an error should be raised if the database already exists.
1403    ///
1404    /// Default: false
1405    pub fn set_error_if_exists(&mut self, enabled: bool) {
1406        unsafe {
1407            ffi::rocksdb_options_set_error_if_exists(self.inner, c_uchar::from(enabled));
1408        }
1409    }
1410
1411    /// Enable/disable paranoid checks.
1412    ///
1413    /// If true, the implementation will do aggressive checking of the
1414    /// data it is processing and will stop early if it detects any
1415    /// errors. This may have unforeseen ramifications: for example, a
1416    /// corruption of one DB entry may cause a large number of entries to
1417    /// become unreadable or for the entire DB to become unopenable.
1418    /// If any of the  writes to the database fails (Put, Delete, Merge, Write),
1419    /// the database will switch to read-only mode and fail all other
1420    /// Write operations.
1421    ///
1422    /// Default: false
1423    pub fn set_paranoid_checks(&mut self, enabled: bool) {
1424        unsafe {
1425            ffi::rocksdb_options_set_paranoid_checks(self.inner, c_uchar::from(enabled));
1426        }
1427    }
1428
1429    /// A list of paths where SST files can be put into, with its target size.
1430    /// Newer data is placed into paths specified earlier in the vector while
1431    /// older data gradually moves to paths specified later in the vector.
1432    ///
1433    /// For example, you have a flash device with 10GB allocated for the DB,
1434    /// as well as a hard drive of 2TB, you should config it to be:
1435    ///   [{"/flash_path", 10GB}, {"/hard_drive", 2TB}]
1436    ///
1437    /// The system will try to guarantee data under each path is close to but
1438    /// not larger than the target size. But current and future file sizes used
1439    /// by determining where to place a file are based on best-effort estimation,
1440    /// which means there is a chance that the actual size under the directory
1441    /// is slightly more than target size under some workloads. User should give
1442    /// some buffer room for those cases.
1443    ///
1444    /// If none of the paths has sufficient room to place a file, the file will
1445    /// be placed to the last path anyway, despite to the target size.
1446    ///
1447    /// Placing newer data to earlier paths is also best-efforts. User should
1448    /// expect user files to be placed in higher levels in some extreme cases.
1449    ///
1450    /// If left empty, only one path will be used, which is `path` passed when
1451    /// opening the DB.
1452    ///
1453    /// Default: empty
1454    pub fn set_db_paths(&mut self, paths: &[DBPath]) {
1455        let mut paths: Vec<_> = paths.iter().map(|path| path.inner.cast_const()).collect();
1456        let num_paths = paths.len();
1457        unsafe {
1458            ffi::rocksdb_options_set_db_paths(self.inner, paths.as_mut_ptr(), num_paths);
1459        }
1460    }
1461
1462    /// Use the specified object to interact with the environment,
1463    /// e.g. to read/write files, schedule background work, etc. In the near
1464    /// future, support for doing storage operations such as read/write files
1465    /// through env will be deprecated in favor of file_system.
1466    ///
1467    /// Default: Env::default()
1468    pub fn set_env(&mut self, env: &Env) {
1469        unsafe {
1470            ffi::rocksdb_options_set_env(self.inner, env.0.inner);
1471        }
1472        self.outlive.env = Some(env.clone());
1473    }
1474
1475    /// Sets the compression algorithm that will be used for compressing blocks.
1476    ///
1477    /// Default: `DBCompressionType::Snappy` (`DBCompressionType::None` if
1478    /// snappy feature is not enabled).
1479    ///
1480    /// # Examples
1481    ///
1482    /// ```
1483    /// use rocksdb::{Options, DBCompressionType};
1484    ///
1485    /// let mut opts = Options::default();
1486    /// opts.set_compression_type(DBCompressionType::Snappy);
1487    /// ```
1488    pub fn set_compression_type(&mut self, t: DBCompressionType) {
1489        unsafe {
1490            ffi::rocksdb_options_set_compression(self.inner, t as c_int);
1491        }
1492    }
1493
1494    /// Number of threads for parallel compression.
1495    /// Parallel compression is enabled only if threads > 1.
1496    /// THE FEATURE IS STILL EXPERIMENTAL
1497    ///
1498    /// See [code](https://github.com/facebook/rocksdb/blob/v8.6.7/include/rocksdb/advanced_options.h#L116-L127)
1499    /// for more information.
1500    ///
1501    /// Default: 1
1502    ///
1503    /// Examples
1504    ///
1505    /// ```
1506    /// use rocksdb::{Options, DBCompressionType};
1507    ///
1508    /// let mut opts = Options::default();
1509    /// opts.set_compression_type(DBCompressionType::Zstd);
1510    /// opts.set_compression_options_parallel_threads(3);
1511    /// ```
1512    pub fn set_compression_options_parallel_threads(&mut self, num: i32) {
1513        unsafe {
1514            ffi::rocksdb_options_set_compression_options_parallel_threads(self.inner, num);
1515        }
1516    }
1517
1518    /// Sets the compression algorithm that will be used for compressing WAL.
1519    ///
1520    /// At present, only ZSTD compression is supported!
1521    ///
1522    /// Default: `DBCompressionType::None`
1523    ///
1524    /// # Examples
1525    ///
1526    /// ```
1527    /// use rocksdb::{Options, DBCompressionType};
1528    ///
1529    /// let mut opts = Options::default();
1530    /// opts.set_wal_compression_type(DBCompressionType::Zstd);
1531    /// // Or None to disable it
1532    /// opts.set_wal_compression_type(DBCompressionType::None);
1533    /// ```
1534    pub fn set_wal_compression_type(&mut self, t: DBCompressionType) {
1535        match t {
1536            DBCompressionType::None | DBCompressionType::Zstd => unsafe {
1537                ffi::rocksdb_options_set_wal_compression(self.inner, t as c_int);
1538            },
1539            other => unimplemented!("{:?} is not supported for WAL compression", other),
1540        }
1541    }
1542
1543    /// Sets the bottom-most compression algorithm that will be used for
1544    /// compressing blocks at the bottom-most level.
1545    ///
1546    /// Note that to actually enable bottom-most compression configuration after
1547    /// setting the compression type, it needs to be enabled by calling
1548    /// [`set_bottommost_compression_options`](#method.set_bottommost_compression_options) or
1549    /// [`set_bottommost_zstd_max_train_bytes`](#method.set_bottommost_zstd_max_train_bytes) method with `enabled` argument
1550    /// set to `true`.
1551    ///
1552    /// # Examples
1553    ///
1554    /// ```
1555    /// use rocksdb::{Options, DBCompressionType};
1556    ///
1557    /// let mut opts = Options::default();
1558    /// opts.set_bottommost_compression_type(DBCompressionType::Zstd);
1559    /// opts.set_bottommost_zstd_max_train_bytes(0, true);
1560    /// ```
1561    pub fn set_bottommost_compression_type(&mut self, t: DBCompressionType) {
1562        unsafe {
1563            ffi::rocksdb_options_set_bottommost_compression(self.inner, t as c_int);
1564        }
1565    }
1566
1567    /// Different levels can have different compression policies. There
1568    /// are cases where most lower levels would like to use quick compression
1569    /// algorithms while the higher levels (which have more data) use
1570    /// compression algorithms that have better compression but could
1571    /// be slower. This array, if non-empty, should have an entry for
1572    /// each level of the database; these override the value specified in
1573    /// the previous field 'compression'.
1574    ///
1575    /// # Examples
1576    ///
1577    /// ```
1578    /// use rocksdb::{Options, DBCompressionType};
1579    ///
1580    /// let mut opts = Options::default();
1581    /// opts.set_compression_per_level(&[
1582    ///     DBCompressionType::None,
1583    ///     DBCompressionType::None,
1584    ///     DBCompressionType::Snappy,
1585    ///     DBCompressionType::Snappy,
1586    ///     DBCompressionType::Snappy
1587    /// ]);
1588    /// ```
1589    pub fn set_compression_per_level(&mut self, level_types: &[DBCompressionType]) {
1590        unsafe {
1591            let mut level_types: Vec<_> = level_types.iter().map(|&t| t as c_int).collect();
1592            ffi::rocksdb_options_set_compression_per_level(
1593                self.inner,
1594                level_types.as_mut_ptr(),
1595                level_types.len() as size_t,
1596            );
1597        }
1598    }
1599
1600    /// Maximum size of dictionaries used to prime the compression library.
1601    /// Enabling dictionary can improve compression ratios when there are
1602    /// repetitions across data blocks.
1603    ///
1604    /// The dictionary is created by sampling the SST file data. If
1605    /// `zstd_max_train_bytes` is nonzero, the samples are passed through zstd's
1606    /// dictionary generator. Otherwise, the random samples are used directly as
1607    /// the dictionary.
1608    ///
1609    /// When compression dictionary is disabled, we compress and write each block
1610    /// before buffering data for the next one. When compression dictionary is
1611    /// enabled, we buffer all SST file data in-memory so we can sample it, as data
1612    /// can only be compressed and written after the dictionary has been finalized.
1613    /// So users of this feature may see increased memory usage.
1614    ///
1615    /// Default: `0`
1616    ///
1617    /// # Examples
1618    ///
1619    /// ```
1620    /// use rocksdb::Options;
1621    ///
1622    /// let mut opts = Options::default();
1623    /// opts.set_compression_options(4, 5, 6, 7);
1624    /// ```
1625    pub fn set_compression_options(
1626        &mut self,
1627        w_bits: c_int,
1628        level: c_int,
1629        strategy: c_int,
1630        max_dict_bytes: c_int,
1631    ) {
1632        unsafe {
1633            ffi::rocksdb_options_set_compression_options(
1634                self.inner,
1635                w_bits,
1636                level,
1637                strategy,
1638                max_dict_bytes,
1639            );
1640        }
1641    }
1642
1643    /// Sets compression options for blocks at the bottom-most level.  Meaning
1644    /// of all settings is the same as in [`set_compression_options`](#method.set_compression_options) method but
1645    /// affect only the bottom-most compression which is set using
1646    /// [`set_bottommost_compression_type`](#method.set_bottommost_compression_type) method.
1647    ///
1648    /// # Examples
1649    ///
1650    /// ```
1651    /// use rocksdb::{Options, DBCompressionType};
1652    ///
1653    /// let mut opts = Options::default();
1654    /// opts.set_bottommost_compression_type(DBCompressionType::Zstd);
1655    /// opts.set_bottommost_compression_options(4, 5, 6, 7, true);
1656    /// ```
1657    pub fn set_bottommost_compression_options(
1658        &mut self,
1659        w_bits: c_int,
1660        level: c_int,
1661        strategy: c_int,
1662        max_dict_bytes: c_int,
1663        enabled: bool,
1664    ) {
1665        unsafe {
1666            ffi::rocksdb_options_set_bottommost_compression_options(
1667                self.inner,
1668                w_bits,
1669                level,
1670                strategy,
1671                max_dict_bytes,
1672                c_uchar::from(enabled),
1673            );
1674        }
1675    }
1676
1677    /// Sets maximum size of training data passed to zstd's dictionary trainer. Using zstd's
1678    /// dictionary trainer can achieve even better compression ratio improvements than using
1679    /// `max_dict_bytes` alone.
1680    ///
1681    /// The training data will be used to generate a dictionary of max_dict_bytes.
1682    ///
1683    /// Default: 0.
1684    pub fn set_zstd_max_train_bytes(&mut self, value: c_int) {
1685        unsafe {
1686            ffi::rocksdb_options_set_compression_options_zstd_max_train_bytes(self.inner, value);
1687        }
1688    }
1689
1690    /// Sets maximum size of training data passed to zstd's dictionary trainer
1691    /// when compressing the bottom-most level. Using zstd's dictionary trainer
1692    /// can achieve even better compression ratio improvements than using
1693    /// `max_dict_bytes` alone.
1694    ///
1695    /// The training data will be used to generate a dictionary of
1696    /// `max_dict_bytes`.
1697    ///
1698    /// Default: 0.
1699    pub fn set_bottommost_zstd_max_train_bytes(&mut self, value: c_int, enabled: bool) {
1700        unsafe {
1701            ffi::rocksdb_options_set_bottommost_compression_options_zstd_max_train_bytes(
1702                self.inner,
1703                value,
1704                c_uchar::from(enabled),
1705            );
1706        }
1707    }
1708
1709    /// If non-zero, we perform bigger reads when doing compaction. If you're
1710    /// running RocksDB on spinning disks, you should set this to at least 2MB.
1711    /// That way RocksDB's compaction is doing sequential instead of random reads.
1712    ///
1713    /// Default: 2 * 1024 * 1024 (2 MB)
1714    pub fn set_compaction_readahead_size(&mut self, compaction_readahead_size: usize) {
1715        unsafe {
1716            ffi::rocksdb_options_compaction_readahead_size(self.inner, compaction_readahead_size);
1717        }
1718    }
1719
1720    /// Allow RocksDB to pick dynamic base of bytes for levels.
1721    /// With this feature turned on, RocksDB will automatically adjust max bytes for each level.
1722    /// The goal of this feature is to have lower bound on size amplification.
1723    ///
1724    /// Default: false.
1725    pub fn set_level_compaction_dynamic_level_bytes(&mut self, v: bool) {
1726        unsafe {
1727            ffi::rocksdb_options_set_level_compaction_dynamic_level_bytes(
1728                self.inner,
1729                c_uchar::from(v),
1730            );
1731        }
1732    }
1733
1734    /// This option has different meanings for different compaction styles:
1735    ///
1736    /// Leveled: files older than `periodic_compaction_seconds` will be picked up
1737    /// for compaction and will be re-written to the same level as they were
1738    /// before.
1739    ///
1740    /// FIFO: not supported. Setting this option has no effect for FIFO compaction.
1741    ///
1742    /// Universal: when there are files older than `periodic_compaction_seconds`,
1743    /// rocksdb will try to do as large a compaction as possible including the
1744    /// last level. Such compaction is only skipped if only last level is to
1745    /// be compacted and no file in last level is older than
1746    /// `periodic_compaction_seconds`. See more in
1747    /// UniversalCompactionBuilder::PickPeriodicCompaction().
1748    /// For backward compatibility, the effective value of this option takes
1749    /// into account the value of option `ttl`. The logic is as follows:
1750    ///
1751    /// - both options are set to 30 days if they have the default value.
1752    /// - if both options are zero, zero is picked. Otherwise, we take the min
1753    ///   value among non-zero options values (i.e. takes the stricter limit).
1754    ///
1755    /// One main use of the feature is to make sure a file goes through compaction
1756    /// filters periodically. Users can also use the feature to clear up SST
1757    /// files using old format.
1758    ///
1759    /// A file's age is computed by looking at file_creation_time or creation_time
1760    /// table properties in order, if they have valid non-zero values; if not, the
1761    /// age is based on the file's last modified time (given by the underlying
1762    /// Env).
1763    ///
1764    /// This option only supports block based table format for any compaction
1765    /// style.
1766    ///
1767    /// unit: seconds. Ex: 7 days = 7 * 24 * 60 * 60
1768    ///
1769    /// Values:
1770    /// 0: Turn off Periodic compactions.
1771    /// UINT64_MAX - 1 (0xfffffffffffffffe) is special flag to allow RocksDB to
1772    /// pick default.
1773    ///
1774    /// Default: 30 days if using block based table format + compaction filter +
1775    /// leveled compaction or block based table format + universal compaction.
1776    /// 0 (disabled) otherwise.
1777    ///
1778    pub fn set_periodic_compaction_seconds(&mut self, secs: u64) {
1779        unsafe {
1780            ffi::rocksdb_options_set_periodic_compaction_seconds(self.inner, secs);
1781        }
1782    }
1783
1784    pub fn set_ttl(&mut self, ttl_secs: u64) {
1785        unsafe {
1786            ffi::rocksdb_options_set_ttl(self.inner, ttl_secs);
1787        }
1788    }
1789
1790    pub fn set_merge_operator_associative<F: MergeFn + Clone>(
1791        &mut self,
1792        name: impl CStrLike,
1793        full_merge_fn: F,
1794    ) {
1795        let cb = Box::new(MergeOperatorCallback {
1796            name: name.into_c_string().unwrap(),
1797            full_merge_fn: full_merge_fn.clone(),
1798            partial_merge_fn: full_merge_fn,
1799        });
1800
1801        unsafe {
1802            let mo = ffi::rocksdb_mergeoperator_create(
1803                Box::into_raw(cb).cast::<c_void>(),
1804                Some(merge_operator::destructor_callback::<F, F>),
1805                Some(full_merge_callback::<F, F>),
1806                Some(partial_merge_callback::<F, F>),
1807                Some(merge_operator::delete_callback),
1808                Some(merge_operator::name_callback::<F, F>),
1809            );
1810            ffi::rocksdb_options_set_merge_operator(self.inner, mo);
1811        }
1812    }
1813
1814    pub fn set_merge_operator<F: MergeFn, PF: MergeFn>(
1815        &mut self,
1816        name: impl CStrLike,
1817        full_merge_fn: F,
1818        partial_merge_fn: PF,
1819    ) {
1820        let cb = Box::new(MergeOperatorCallback {
1821            name: name.into_c_string().unwrap(),
1822            full_merge_fn,
1823            partial_merge_fn,
1824        });
1825
1826        unsafe {
1827            let mo = ffi::rocksdb_mergeoperator_create(
1828                Box::into_raw(cb).cast::<c_void>(),
1829                Some(merge_operator::destructor_callback::<F, PF>),
1830                Some(full_merge_callback::<F, PF>),
1831                Some(partial_merge_callback::<F, PF>),
1832                Some(merge_operator::delete_callback),
1833                Some(merge_operator::name_callback::<F, PF>),
1834            );
1835            ffi::rocksdb_options_set_merge_operator(self.inner, mo);
1836        }
1837    }
1838
1839    #[deprecated(
1840        since = "0.5.0",
1841        note = "add_merge_operator has been renamed to set_merge_operator"
1842    )]
1843    pub fn add_merge_operator<F: MergeFn + Clone>(&mut self, name: &str, merge_fn: F) {
1844        self.set_merge_operator_associative(name, merge_fn);
1845    }
1846
1847    /// Sets a compaction filter used to determine if entries should be kept, changed,
1848    /// or removed during compaction.
1849    ///
1850    /// An example use case is to remove entries with an expired TTL.
1851    ///
1852    /// If you take a snapshot of the database, only values written since the last
1853    /// snapshot will be passed through the compaction filter.
1854    ///
1855    /// If multi-threaded compaction is used, `filter_fn` may be called multiple times
1856    /// simultaneously.
1857    pub fn set_compaction_filter<F>(&mut self, name: impl CStrLike, filter_fn: F)
1858    where
1859        F: CompactionFilterFn + Send + 'static,
1860    {
1861        let cb = Box::new(CompactionFilterCallback {
1862            name: name.into_c_string().unwrap(),
1863            filter_fn,
1864        });
1865
1866        let filter = unsafe {
1867            let cf = ffi::rocksdb_compactionfilter_create(
1868                Box::into_raw(cb).cast::<c_void>(),
1869                Some(compaction_filter::destructor_callback::<CompactionFilterCallback<F>>),
1870                Some(compaction_filter::filter_callback::<CompactionFilterCallback<F>>),
1871                Some(compaction_filter::name_callback::<CompactionFilterCallback<F>>),
1872            );
1873            ffi::rocksdb_options_set_compaction_filter(self.inner, cf);
1874
1875            OwnedCompactionFilter::new(NonNull::new(cf).unwrap())
1876        };
1877        self.outlive.compaction_filter = Some(Arc::new(filter));
1878    }
1879
1880    /// This is a factory that provides compaction filter objects which allow
1881    /// an application to modify/delete a key-value during background compaction.
1882    ///
1883    /// A new filter will be created on each compaction run.  If multithreaded
1884    /// compaction is being used, each created CompactionFilter will only be used
1885    /// from a single thread and so does not need to be thread-safe.
1886    ///
1887    /// Default: nullptr
1888    pub fn set_compaction_filter_factory<F>(&mut self, factory: F)
1889    where
1890        F: CompactionFilterFactory + 'static,
1891    {
1892        let factory = Box::new(factory);
1893
1894        unsafe {
1895            let cff = ffi::rocksdb_compactionfilterfactory_create(
1896                Box::into_raw(factory).cast::<c_void>(),
1897                Some(compaction_filter_factory::destructor_callback::<F>),
1898                Some(compaction_filter_factory::create_compaction_filter_callback::<F>),
1899                Some(compaction_filter_factory::name_callback::<F>),
1900            );
1901
1902            ffi::rocksdb_options_set_compaction_filter_factory(self.inner, cff);
1903        }
1904    }
1905
1906    /// Sets the comparator used to define the order of keys in the table.
1907    /// Default: a comparator that uses lexicographic byte-wise ordering
1908    ///
1909    /// The client must ensure that the comparator supplied here has the same
1910    /// name and orders keys *exactly* the same as the comparator provided to
1911    /// previous open calls on the same DB.
1912    pub fn set_comparator(&mut self, name: impl CStrLike, compare_fn: Box<CompareFn>) {
1913        let cb = Box::new(ComparatorCallback {
1914            name: name.into_c_string().unwrap(),
1915            compare_fn,
1916        });
1917
1918        let cmp = unsafe {
1919            let cmp = ffi::rocksdb_comparator_create(
1920                Box::into_raw(cb).cast::<c_void>(),
1921                Some(ComparatorCallback::destructor_callback),
1922                Some(ComparatorCallback::compare_callback),
1923                Some(ComparatorCallback::name_callback),
1924            );
1925            ffi::rocksdb_options_set_comparator(self.inner, cmp);
1926            OwnedComparator::new(NonNull::new(cmp).unwrap())
1927        };
1928        self.outlive.comparator = Some(Arc::new(cmp));
1929    }
1930
1931    /// Sets the comparator that are timestamp-aware, used to define the order of keys in the table,
1932    /// taking timestamp into consideration.
1933    /// Find more information on timestamp-aware comparator on [here](https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp)
1934    ///
1935    /// The client must ensure that the comparator supplied here has the same
1936    /// name and orders keys *exactly* the same as the comparator provided to
1937    /// previous open calls on the same DB.
1938    pub fn set_comparator_with_ts(
1939        &mut self,
1940        name: impl CStrLike,
1941        timestamp_size: usize,
1942        compare_fn: Box<CompareFn>,
1943        compare_ts_fn: Box<CompareTsFn>,
1944        compare_without_ts_fn: Box<CompareWithoutTsFn>,
1945    ) {
1946        let cb = Box::new(ComparatorWithTsCallback {
1947            name: name.into_c_string().unwrap(),
1948            compare_fn,
1949            compare_ts_fn,
1950            compare_without_ts_fn,
1951        });
1952
1953        let cmp = unsafe {
1954            let cmp = ffi::rocksdb_comparator_with_ts_create(
1955                Box::into_raw(cb).cast::<c_void>(),
1956                Some(ComparatorWithTsCallback::destructor_callback),
1957                Some(ComparatorWithTsCallback::compare_callback),
1958                Some(ComparatorWithTsCallback::compare_ts_callback),
1959                Some(ComparatorWithTsCallback::compare_without_ts_callback),
1960                Some(ComparatorWithTsCallback::name_callback),
1961                timestamp_size,
1962            );
1963            ffi::rocksdb_options_set_comparator(self.inner, cmp);
1964            OwnedComparator::new(NonNull::new(cmp).unwrap())
1965        };
1966        self.outlive.comparator = Some(Arc::new(cmp));
1967    }
1968
1969    pub fn set_prefix_extractor(&mut self, prefix_extractor: SliceTransform) {
1970        unsafe {
1971            ffi::rocksdb_options_set_prefix_extractor(self.inner, prefix_extractor.inner);
1972        }
1973    }
1974
1975    // Use this if you don't need to keep the data sorted, i.e. you'll never use
1976    // an iterator, only Put() and Get() API calls
1977    //
1978    pub fn optimize_for_point_lookup(&mut self, block_cache_size_mb: u64) {
1979        unsafe {
1980            ffi::rocksdb_options_optimize_for_point_lookup(self.inner, block_cache_size_mb);
1981        }
1982    }
1983
1984    /// Sets the optimize_filters_for_hits flag
1985    ///
1986    /// Default: `false`
1987    ///
1988    /// # Examples
1989    ///
1990    /// ```
1991    /// use rocksdb::Options;
1992    ///
1993    /// let mut opts = Options::default();
1994    /// opts.set_optimize_filters_for_hits(true);
1995    /// ```
1996    pub fn set_optimize_filters_for_hits(&mut self, optimize_for_hits: bool) {
1997        unsafe {
1998            ffi::rocksdb_options_set_optimize_filters_for_hits(
1999                self.inner,
2000                c_int::from(optimize_for_hits),
2001            );
2002        }
2003    }
2004
2005    /// Sets the periodicity when obsolete files get deleted.
2006    ///
2007    /// The files that get out of scope by compaction
2008    /// process will still get automatically delete on every compaction,
2009    /// regardless of this setting.
2010    ///
2011    /// Default: 6 hours
2012    pub fn set_delete_obsolete_files_period_micros(&mut self, micros: u64) {
2013        unsafe {
2014            ffi::rocksdb_options_set_delete_obsolete_files_period_micros(self.inner, micros);
2015        }
2016    }
2017
2018    /// Prepare the DB for bulk loading.
2019    ///
2020    /// All data will be in level 0 without any automatic compaction.
2021    /// It's recommended to manually call CompactRange(NULL, NULL) before reading
2022    /// from the database, because otherwise the read can be very slow.
2023    pub fn prepare_for_bulk_load(&mut self) {
2024        unsafe {
2025            ffi::rocksdb_options_prepare_for_bulk_load(self.inner);
2026        }
2027    }
2028
2029    /// Sets the number of open files that can be used by the DB. You may need to
2030    /// increase this if your database has a large working set. Value `-1` means
2031    /// files opened are always kept open. You can estimate number of files based
2032    /// on target_file_size_base and target_file_size_multiplier for level-based
2033    /// compaction. For universal-style compaction, you can usually set it to `-1`.
2034    ///
2035    /// Default: `-1`
2036    ///
2037    /// # Examples
2038    ///
2039    /// ```
2040    /// use rocksdb::Options;
2041    ///
2042    /// let mut opts = Options::default();
2043    /// opts.set_max_open_files(10);
2044    /// ```
2045    pub fn set_max_open_files(&mut self, nfiles: c_int) {
2046        unsafe {
2047            ffi::rocksdb_options_set_max_open_files(self.inner, nfiles);
2048        }
2049    }
2050
2051    /// If max_open_files is -1, DB will open all files on DB::Open(). You can
2052    /// use this option to increase the number of threads used to open the files.
2053    /// Default: 16
2054    pub fn set_max_file_opening_threads(&mut self, nthreads: c_int) {
2055        unsafe {
2056            ffi::rocksdb_options_set_max_file_opening_threads(self.inner, nthreads);
2057        }
2058    }
2059
2060    /// Controls how many SST files have their metadata (index, filter, etc.)
2061    /// loaded eagerly during initial DB::Open(). The effective limit for
2062    /// positive values is min(limit, table_cache_capacity / 4).
2063    /// Set to 0 to open all files. Set to -1 to use table_cache_capacity/4.
2064    /// Default: 16
2065    pub fn set_initial_table_load_limit(&mut self, limit: c_int) {
2066        unsafe {
2067            ffi::rocksdb_options_set_initial_table_load_limit(self.inner, limit);
2068        }
2069    }
2070
2071    /// Returns the current initial table load limit.
2072    pub fn get_initial_table_load_limit(&self) -> c_int {
2073        unsafe { ffi::rocksdb_options_get_initial_table_load_limit(self.inner) }
2074    }
2075
2076    /// By default, writes to stable storage use fdatasync (on platforms
2077    /// where this function is available). If this option is true,
2078    /// fsync is used instead.
2079    ///
2080    /// fsync and fdatasync are equally safe for our purposes and fdatasync is
2081    /// faster, so it is rarely necessary to set this option. It is provided
2082    /// as a workaround for kernel/filesystem bugs, such as one that affected
2083    /// fdatasync with ext4 in kernel versions prior to 3.7.
2084    ///
2085    /// Default: `false`
2086    ///
2087    /// # Examples
2088    ///
2089    /// ```
2090    /// use rocksdb::Options;
2091    ///
2092    /// let mut opts = Options::default();
2093    /// opts.set_use_fsync(true);
2094    /// ```
2095    pub fn set_use_fsync(&mut self, useit: bool) {
2096        unsafe {
2097            ffi::rocksdb_options_set_use_fsync(self.inner, c_int::from(useit));
2098        }
2099    }
2100
2101    /// Returns the value of the `use_fsync` option.
2102    pub fn get_use_fsync(&self) -> bool {
2103        let val = unsafe { ffi::rocksdb_options_get_use_fsync(self.inner) };
2104        val != 0
2105    }
2106
2107    /// Specifies the absolute info LOG dir.
2108    ///
2109    /// If it is empty, the log files will be in the same dir as data.
2110    /// If it is non empty, the log files will be in the specified dir,
2111    /// and the db data dir's absolute path will be used as the log file
2112    /// name's prefix.
2113    ///
2114    /// Default: empty
2115    pub fn set_db_log_dir<P: AsRef<Path>>(&mut self, path: P) {
2116        let p = to_cpath(path).unwrap();
2117        unsafe {
2118            ffi::rocksdb_options_set_db_log_dir(self.inner, p.as_ptr());
2119        }
2120    }
2121
2122    /// Specifies the log level.
2123    /// Consider the `LogLevel` enum for a list of possible levels.
2124    ///
2125    /// Default: Info
2126    ///
2127    /// # Examples
2128    ///
2129    /// ```
2130    /// use rocksdb::{Options, LogLevel};
2131    ///
2132    /// let mut opts = Options::default();
2133    /// opts.set_log_level(LogLevel::Warn);
2134    /// ```
2135    pub fn set_log_level(&mut self, level: LogLevel) {
2136        unsafe {
2137            ffi::rocksdb_options_set_info_log_level(self.inner, level as c_int);
2138        }
2139    }
2140
2141    /// Allows OS to incrementally sync files to disk while they are being
2142    /// written, asynchronously, in the background. This operation can be used
2143    /// to smooth out write I/Os over time. Users shouldn't rely on it for
2144    /// persistency guarantee.
2145    /// Issue one request for every bytes_per_sync written. `0` turns it off.
2146    ///
2147    /// Default: `0`
2148    ///
2149    /// You may consider using rate_limiter to regulate write rate to device.
2150    /// When rate limiter is enabled, it automatically enables bytes_per_sync
2151    /// to 1MB.
2152    ///
2153    /// This option applies to table files
2154    ///
2155    /// # Examples
2156    ///
2157    /// ```
2158    /// use rocksdb::Options;
2159    ///
2160    /// let mut opts = Options::default();
2161    /// opts.set_bytes_per_sync(1024 * 1024);
2162    /// ```
2163    pub fn set_bytes_per_sync(&mut self, nbytes: u64) {
2164        unsafe {
2165            ffi::rocksdb_options_set_bytes_per_sync(self.inner, nbytes);
2166        }
2167    }
2168
2169    /// Same as bytes_per_sync, but applies to WAL files.
2170    ///
2171    /// Default: 0, turned off
2172    ///
2173    /// Dynamically changeable through SetDBOptions() API.
2174    pub fn set_wal_bytes_per_sync(&mut self, nbytes: u64) {
2175        unsafe {
2176            ffi::rocksdb_options_set_wal_bytes_per_sync(self.inner, nbytes);
2177        }
2178    }
2179
2180    /// Sets the maximum buffer size that is used by WritableFileWriter.
2181    ///
2182    /// On Windows, we need to maintain an aligned buffer for writes.
2183    /// We allow the buffer to grow until it's size hits the limit in buffered
2184    /// IO and fix the buffer size when using direct IO to ensure alignment of
2185    /// write requests if the logical sector size is unusual
2186    ///
2187    /// Default: 1024 * 1024 (1 MB)
2188    ///
2189    /// Dynamically changeable through SetDBOptions() API.
2190    pub fn set_writable_file_max_buffer_size(&mut self, nbytes: u64) {
2191        unsafe {
2192            ffi::rocksdb_options_set_writable_file_max_buffer_size(self.inner, nbytes);
2193        }
2194    }
2195
2196    /// If true, allow multi-writers to update mem tables in parallel.
2197    /// Only some memtable_factory-s support concurrent writes; currently it
2198    /// is implemented only for SkipListFactory.  Concurrent memtable writes
2199    /// are not compatible with inplace_update_support or filter_deletes.
2200    /// It is strongly recommended to set enable_write_thread_adaptive_yield
2201    /// if you are going to use this feature.
2202    ///
2203    /// Default: true
2204    ///
2205    /// # Examples
2206    ///
2207    /// ```
2208    /// use rocksdb::Options;
2209    ///
2210    /// let mut opts = Options::default();
2211    /// opts.set_allow_concurrent_memtable_write(false);
2212    /// ```
2213    pub fn set_allow_concurrent_memtable_write(&mut self, allow: bool) {
2214        unsafe {
2215            ffi::rocksdb_options_set_allow_concurrent_memtable_write(
2216                self.inner,
2217                c_uchar::from(allow),
2218            );
2219        }
2220    }
2221
2222    /// If true, threads synchronizing with the write batch group leader will wait for up to
2223    /// write_thread_max_yield_usec before blocking on a mutex. This can substantially improve
2224    /// throughput for concurrent workloads, regardless of whether allow_concurrent_memtable_write
2225    /// is enabled.
2226    ///
2227    /// Default: true
2228    pub fn set_enable_write_thread_adaptive_yield(&mut self, enabled: bool) {
2229        unsafe {
2230            ffi::rocksdb_options_set_enable_write_thread_adaptive_yield(
2231                self.inner,
2232                c_uchar::from(enabled),
2233            );
2234        }
2235    }
2236
2237    /// Specifies whether an iteration->Next() sequentially skips over keys with the same user-key or not.
2238    ///
2239    /// This number specifies the number of keys (with the same userkey)
2240    /// that will be sequentially skipped before a reseek is issued.
2241    ///
2242    /// Default: 8
2243    pub fn set_max_sequential_skip_in_iterations(&mut self, num: u64) {
2244        unsafe {
2245            ffi::rocksdb_options_set_max_sequential_skip_in_iterations(self.inner, num);
2246        }
2247    }
2248
2249    /// Enable direct I/O mode for reading
2250    /// they may or may not improve performance depending on the use case
2251    ///
2252    /// Files will be opened in "direct I/O" mode
2253    /// which means that data read from the disk will not be cached or
2254    /// buffered. The hardware buffer of the devices may however still
2255    /// be used. Memory mapped files are not impacted by these parameters.
2256    ///
2257    /// Default: false
2258    ///
2259    /// # Examples
2260    ///
2261    /// ```
2262    /// use rocksdb::Options;
2263    ///
2264    /// let mut opts = Options::default();
2265    /// opts.set_use_direct_reads(true);
2266    /// ```
2267    pub fn set_use_direct_reads(&mut self, enabled: bool) {
2268        unsafe {
2269            ffi::rocksdb_options_set_use_direct_reads(self.inner, c_uchar::from(enabled));
2270        }
2271    }
2272
2273    /// Enable direct I/O mode for flush and compaction
2274    ///
2275    /// Files will be opened in "direct I/O" mode
2276    /// which means that data written to the disk will not be cached or
2277    /// buffered. The hardware buffer of the devices may however still
2278    /// be used. Memory mapped files are not impacted by these parameters.
2279    /// they may or may not improve performance depending on the use case
2280    ///
2281    /// Default: false
2282    ///
2283    /// # Examples
2284    ///
2285    /// ```
2286    /// use rocksdb::Options;
2287    ///
2288    /// let mut opts = Options::default();
2289    /// opts.set_use_direct_io_for_flush_and_compaction(true);
2290    /// ```
2291    pub fn set_use_direct_io_for_flush_and_compaction(&mut self, enabled: bool) {
2292        unsafe {
2293            ffi::rocksdb_options_set_use_direct_io_for_flush_and_compaction(
2294                self.inner,
2295                c_uchar::from(enabled),
2296            );
2297        }
2298    }
2299
2300    /// Enable/disable child process inherit open files.
2301    ///
2302    /// Default: true
2303    pub fn set_is_fd_close_on_exec(&mut self, enabled: bool) {
2304        unsafe {
2305            ffi::rocksdb_options_set_is_fd_close_on_exec(self.inner, c_uchar::from(enabled));
2306        }
2307    }
2308
2309    /// Hints to the OS that it should not buffer disk I/O. Enabling this
2310    /// parameter may improve performance but increases pressure on the
2311    /// system cache.
2312    ///
2313    /// The exact behavior of this parameter is platform dependent.
2314    ///
2315    /// On POSIX systems, after RocksDB reads data from disk it will
2316    /// mark the pages as "unneeded". The operating system may or may not
2317    /// evict these pages from memory, reducing pressure on the system
2318    /// cache. If the disk block is requested again this can result in
2319    /// additional disk I/O.
2320    ///
2321    /// On WINDOWS systems, files will be opened in "unbuffered I/O" mode
2322    /// which means that data read from the disk will not be cached or
2323    /// bufferized. The hardware buffer of the devices may however still
2324    /// be used. Memory mapped files are not impacted by this parameter.
2325    ///
2326    /// Default: true
2327    ///
2328    /// # Examples
2329    ///
2330    /// ```
2331    /// use rocksdb::Options;
2332    ///
2333    /// let mut opts = Options::default();
2334    /// #[allow(deprecated)]
2335    /// opts.set_allow_os_buffer(false);
2336    /// ```
2337    #[deprecated(
2338        since = "0.7.0",
2339        note = "replaced with set_use_direct_reads/set_use_direct_io_for_flush_and_compaction methods"
2340    )]
2341    pub fn set_allow_os_buffer(&mut self, is_allow: bool) {
2342        self.set_use_direct_reads(!is_allow);
2343        self.set_use_direct_io_for_flush_and_compaction(!is_allow);
2344    }
2345
2346    /// Sets the number of shards used for table cache.
2347    ///
2348    /// Default: `6`
2349    ///
2350    /// # Examples
2351    ///
2352    /// ```
2353    /// use rocksdb::Options;
2354    ///
2355    /// let mut opts = Options::default();
2356    /// opts.set_table_cache_num_shard_bits(4);
2357    /// ```
2358    pub fn set_table_cache_num_shard_bits(&mut self, nbits: c_int) {
2359        unsafe {
2360            ffi::rocksdb_options_set_table_cache_numshardbits(self.inner, nbits);
2361        }
2362    }
2363
2364    /// By default target_file_size_multiplier is 1, which means
2365    /// by default files in different levels will have similar size.
2366    ///
2367    /// Dynamically changeable through SetOptions() API
2368    pub fn set_target_file_size_multiplier(&mut self, multiplier: i32) {
2369        unsafe {
2370            ffi::rocksdb_options_set_target_file_size_multiplier(self.inner, multiplier as c_int);
2371        }
2372    }
2373
2374    /// Sets the minimum number of write buffers that will be merged
2375    /// before writing to storage.  If set to `1`, then
2376    /// all write buffers are flushed to L0 as individual files and this increases
2377    /// read amplification because a get request has to check in all of these
2378    /// files. Also, an in-memory merge may result in writing lesser
2379    /// data to storage if there are duplicate records in each of these
2380    /// individual write buffers.
2381    ///
2382    /// Default: `1`
2383    ///
2384    /// # Examples
2385    ///
2386    /// ```
2387    /// use rocksdb::Options;
2388    ///
2389    /// let mut opts = Options::default();
2390    /// opts.set_min_write_buffer_number(2);
2391    /// ```
2392    pub fn set_min_write_buffer_number(&mut self, nbuf: c_int) {
2393        unsafe {
2394            ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, nbuf);
2395        }
2396    }
2397
2398    /// Sets the maximum number of write buffers that are built up in memory.
2399    /// The default and the minimum number is 2, so that when 1 write buffer
2400    /// is being flushed to storage, new writes can continue to the other
2401    /// write buffer.
2402    /// If max_write_buffer_number > 3, writing will be slowed down to
2403    /// options.delayed_write_rate if we are writing to the last write buffer
2404    /// allowed.
2405    ///
2406    /// Default: `2`
2407    ///
2408    /// # Examples
2409    ///
2410    /// ```
2411    /// use rocksdb::Options;
2412    ///
2413    /// let mut opts = Options::default();
2414    /// opts.set_max_write_buffer_number(4);
2415    /// ```
2416    pub fn set_max_write_buffer_number(&mut self, nbuf: c_int) {
2417        unsafe {
2418            ffi::rocksdb_options_set_max_write_buffer_number(self.inner, nbuf);
2419        }
2420    }
2421
2422    /// Sets the amount of data to build up in memory (backed by an unsorted log
2423    /// on disk) before converting to a sorted on-disk file.
2424    ///
2425    /// Larger values increase performance, especially during bulk loads.
2426    /// Up to max_write_buffer_number write buffers may be held in memory
2427    /// at the same time,
2428    /// so you may wish to adjust this parameter to control memory usage.
2429    /// Also, a larger write buffer will result in a longer recovery time
2430    /// the next time the database is opened.
2431    ///
2432    /// Note that write_buffer_size is enforced per column family.
2433    /// See db_write_buffer_size for sharing memory across column families.
2434    ///
2435    /// Default: `0x4000000` (64MiB)
2436    ///
2437    /// Dynamically changeable through SetOptions() API
2438    ///
2439    /// # Examples
2440    ///
2441    /// ```
2442    /// use rocksdb::Options;
2443    ///
2444    /// let mut opts = Options::default();
2445    /// opts.set_write_buffer_size(128 * 1024 * 1024);
2446    /// ```
2447    pub fn set_write_buffer_size(&mut self, size: usize) {
2448        unsafe {
2449            ffi::rocksdb_options_set_write_buffer_size(self.inner, size);
2450        }
2451    }
2452
2453    /// Amount of data to build up in memtables across all column
2454    /// families before writing to disk.
2455    ///
2456    /// This is distinct from write_buffer_size, which enforces a limit
2457    /// for a single memtable.
2458    ///
2459    /// This feature is disabled by default. Specify a non-zero value
2460    /// to enable it.
2461    ///
2462    /// Default: 0 (disabled)
2463    ///
2464    /// # Examples
2465    ///
2466    /// ```
2467    /// use rocksdb::Options;
2468    ///
2469    /// let mut opts = Options::default();
2470    /// opts.set_db_write_buffer_size(128 * 1024 * 1024);
2471    /// ```
2472    pub fn set_db_write_buffer_size(&mut self, size: usize) {
2473        unsafe {
2474            ffi::rocksdb_options_set_db_write_buffer_size(self.inner, size);
2475        }
2476    }
2477
2478    /// Control maximum total data size for a level.
2479    /// max_bytes_for_level_base is the max total for level-1.
2480    /// Maximum number of bytes for level L can be calculated as
2481    /// (max_bytes_for_level_base) * (max_bytes_for_level_multiplier ^ (L-1))
2482    /// For example, if max_bytes_for_level_base is 200MB, and if
2483    /// max_bytes_for_level_multiplier is 10, total data size for level-1
2484    /// will be 200MB, total file size for level-2 will be 2GB,
2485    /// and total file size for level-3 will be 20GB.
2486    ///
2487    /// Default: `0x10000000` (256MiB).
2488    ///
2489    /// Dynamically changeable through SetOptions() API
2490    ///
2491    /// # Examples
2492    ///
2493    /// ```
2494    /// use rocksdb::Options;
2495    ///
2496    /// let mut opts = Options::default();
2497    /// opts.set_max_bytes_for_level_base(512 * 1024 * 1024);
2498    /// ```
2499    pub fn set_max_bytes_for_level_base(&mut self, size: u64) {
2500        unsafe {
2501            ffi::rocksdb_options_set_max_bytes_for_level_base(self.inner, size);
2502        }
2503    }
2504
2505    /// Default: `10`
2506    ///
2507    /// # Examples
2508    ///
2509    /// ```
2510    /// use rocksdb::Options;
2511    ///
2512    /// let mut opts = Options::default();
2513    /// opts.set_max_bytes_for_level_multiplier(4.0);
2514    /// ```
2515    pub fn set_max_bytes_for_level_multiplier(&mut self, mul: f64) {
2516        unsafe {
2517            ffi::rocksdb_options_set_max_bytes_for_level_multiplier(self.inner, mul);
2518        }
2519    }
2520
2521    /// The manifest file is rolled over on reaching this limit.
2522    /// The older manifest file be deleted.
2523    /// The default value is MAX_INT so that roll-over does not take place.
2524    ///
2525    /// # Examples
2526    ///
2527    /// ```
2528    /// use rocksdb::Options;
2529    ///
2530    /// let mut opts = Options::default();
2531    /// opts.set_max_manifest_file_size(20 * 1024 * 1024);
2532    /// ```
2533    pub fn set_max_manifest_file_size(&mut self, size: usize) {
2534        unsafe {
2535            ffi::rocksdb_options_set_max_manifest_file_size(self.inner, size);
2536        }
2537    }
2538
2539    /// Sets the WAL sources for read replica catch-up.
2540    ///
2541    /// Combine [`ReadReplicaWALSource`] flags with bitwise OR.
2542    /// Only takes effect when the database is opened with
2543    /// [`DB::open_as_read_replica`].
2544    #[cfg(feature = "cloud")]
2545    pub fn set_read_replica_wal_sources(&mut self, sources: crate::cloud::ReadReplicaWALSource) {
2546        unsafe {
2547            ffi::rocksdb_options_set_read_replica_wal_sources(self.inner, sources.bits());
2548        }
2549    }
2550
2551    /// Sets the target file size for compaction.
2552    /// target_file_size_base is per-file size for level-1.
2553    /// Target file size for level L can be calculated by
2554    /// target_file_size_base * (target_file_size_multiplier ^ (L-1))
2555    /// For example, if target_file_size_base is 2MB and
2556    /// target_file_size_multiplier is 10, then each file on level-1 will
2557    /// be 2MB, and each file on level 2 will be 20MB,
2558    /// and each file on level-3 will be 200MB.
2559    ///
2560    /// Default: `0x4000000` (64MiB)
2561    ///
2562    /// Dynamically changeable through SetOptions() API
2563    ///
2564    /// # Examples
2565    ///
2566    /// ```
2567    /// use rocksdb::Options;
2568    ///
2569    /// let mut opts = Options::default();
2570    /// opts.set_target_file_size_base(128 * 1024 * 1024);
2571    /// ```
2572    pub fn set_target_file_size_base(&mut self, size: u64) {
2573        unsafe {
2574            ffi::rocksdb_options_set_target_file_size_base(self.inner, size);
2575        }
2576    }
2577
2578    /// Sets the minimum number of write buffers that will be merged together
2579    /// before writing to storage.  If set to `1`, then
2580    /// all write buffers are flushed to L0 as individual files and this increases
2581    /// read amplification because a get request has to check in all of these
2582    /// files. Also, an in-memory merge may result in writing lesser
2583    /// data to storage if there are duplicate records in each of these
2584    /// individual write buffers.
2585    ///
2586    /// Default: `1`
2587    ///
2588    /// # Examples
2589    ///
2590    /// ```
2591    /// use rocksdb::Options;
2592    ///
2593    /// let mut opts = Options::default();
2594    /// opts.set_min_write_buffer_number_to_merge(2);
2595    /// ```
2596    pub fn set_min_write_buffer_number_to_merge(&mut self, to_merge: c_int) {
2597        unsafe {
2598            ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, to_merge);
2599        }
2600    }
2601
2602    /// Sets the number of files to trigger level-0 compaction. A value < `0` means that
2603    /// level-0 compaction will not be triggered by number of files at all.
2604    ///
2605    /// Default: `4`
2606    ///
2607    /// Dynamically changeable through SetOptions() API
2608    ///
2609    /// # Examples
2610    ///
2611    /// ```
2612    /// use rocksdb::Options;
2613    ///
2614    /// let mut opts = Options::default();
2615    /// opts.set_level_zero_file_num_compaction_trigger(8);
2616    /// ```
2617    pub fn set_level_zero_file_num_compaction_trigger(&mut self, n: c_int) {
2618        unsafe {
2619            ffi::rocksdb_options_set_level0_file_num_compaction_trigger(self.inner, n);
2620        }
2621    }
2622
2623    /// Sets the compaction priority. When multiple files are picked for compaction from a level,
2624    /// this option determines which files to pick first.
2625    ///
2626    /// Default: `CompactionPri::ByCompensatedSize`
2627    ///
2628    /// Dynamically changeable through SetOptions() API
2629    ///
2630    /// See [rocksdb post](https://github.com/facebook/rocksdb/blob/f20d12adc85ece3e75fb238872959c702c0e5535/docs/_posts/2016-01-29-compaction_pri.markdown) for more details.
2631    ///
2632    /// # Examples
2633    ///
2634    /// ```
2635    /// use rocksdb::{Options, CompactionPri};
2636    ///
2637    /// let mut opts = Options::default();
2638    /// opts.set_compaction_pri(CompactionPri::MinOverlappingRatio);
2639    /// ```
2640    pub fn set_compaction_pri(&mut self, pri: CompactionPri) {
2641        unsafe {
2642            ffi::rocksdb_options_set_compaction_pri(self.inner, pri as i32);
2643        }
2644    }
2645
2646    /// Sets the soft limit on number of level-0 files. We start slowing down writes at this
2647    /// point. A value < `0` means that no writing slowdown will be triggered by
2648    /// number of files in level-0.
2649    ///
2650    /// Default: `20`
2651    ///
2652    /// Dynamically changeable through SetOptions() API
2653    ///
2654    /// # Examples
2655    ///
2656    /// ```
2657    /// use rocksdb::Options;
2658    ///
2659    /// let mut opts = Options::default();
2660    /// opts.set_level_zero_slowdown_writes_trigger(10);
2661    /// ```
2662    pub fn set_level_zero_slowdown_writes_trigger(&mut self, n: c_int) {
2663        unsafe {
2664            ffi::rocksdb_options_set_level0_slowdown_writes_trigger(self.inner, n);
2665        }
2666    }
2667
2668    /// Sets the maximum number of level-0 files.  We stop writes at this point.
2669    ///
2670    /// Default: `24`
2671    ///
2672    /// Dynamically changeable through SetOptions() API
2673    ///
2674    /// # Examples
2675    ///
2676    /// ```
2677    /// use rocksdb::Options;
2678    ///
2679    /// let mut opts = Options::default();
2680    /// opts.set_level_zero_stop_writes_trigger(48);
2681    /// ```
2682    pub fn set_level_zero_stop_writes_trigger(&mut self, n: c_int) {
2683        unsafe {
2684            ffi::rocksdb_options_set_level0_stop_writes_trigger(self.inner, n);
2685        }
2686    }
2687
2688    /// Sets the compaction style.
2689    ///
2690    /// Default: DBCompactionStyle::Level
2691    ///
2692    /// # Examples
2693    ///
2694    /// ```
2695    /// use rocksdb::{Options, DBCompactionStyle};
2696    ///
2697    /// let mut opts = Options::default();
2698    /// opts.set_compaction_style(DBCompactionStyle::Universal);
2699    /// ```
2700    pub fn set_compaction_style(&mut self, style: DBCompactionStyle) {
2701        unsafe {
2702            ffi::rocksdb_options_set_compaction_style(self.inner, style as c_int);
2703        }
2704    }
2705
2706    /// Sets the options needed to support Universal Style compactions.
2707    pub fn set_universal_compaction_options(&mut self, uco: &UniversalCompactOptions) {
2708        unsafe {
2709            ffi::rocksdb_options_set_universal_compaction_options(self.inner, uco.inner);
2710        }
2711    }
2712
2713    /// Sets the options for FIFO compaction style.
2714    pub fn set_fifo_compaction_options(&mut self, fco: &FifoCompactOptions) {
2715        unsafe {
2716            ffi::rocksdb_options_set_fifo_compaction_options(self.inner, fco.inner);
2717        }
2718    }
2719
2720    /// Sets unordered_write to true trades higher write throughput with
2721    /// relaxing the immutability guarantee of snapshots. This violates the
2722    /// repeatability one expects from ::Get from a snapshot, as well as
2723    /// ::MultiGet and Iterator's consistent-point-in-time view property.
2724    /// If the application cannot tolerate the relaxed guarantees, it can implement
2725    /// its own mechanisms to work around that and yet benefit from the higher
2726    /// throughput. Using TransactionDB with WRITE_PREPARED write policy and
2727    /// two_write_queues=true is one way to achieve immutable snapshots despite
2728    /// unordered_write.
2729    ///
2730    /// By default, i.e., when it is false, rocksdb does not advance the sequence
2731    /// number for new snapshots unless all the writes with lower sequence numbers
2732    /// are already finished. This provides the immutability that we expect from
2733    /// snapshots. Moreover, since Iterator and MultiGet internally depend on
2734    /// snapshots, the snapshot immutability results into Iterator and MultiGet
2735    /// offering consistent-point-in-time view. If set to true, although
2736    /// Read-Your-Own-Write property is still provided, the snapshot immutability
2737    /// property is relaxed: the writes issued after the snapshot is obtained (with
2738    /// larger sequence numbers) will be still not visible to the reads from that
2739    /// snapshot, however, there still might be pending writes (with lower sequence
2740    /// number) that will change the state visible to the snapshot after they are
2741    /// landed to the memtable.
2742    ///
2743    /// Default: false
2744    pub fn set_unordered_write(&mut self, unordered: bool) {
2745        unsafe {
2746            ffi::rocksdb_options_set_unordered_write(self.inner, c_uchar::from(unordered));
2747        }
2748    }
2749
2750    /// Sets maximum number of threads that will
2751    /// concurrently perform a compaction job by breaking it into multiple,
2752    /// smaller ones that are run simultaneously.
2753    ///
2754    /// Default: 1 (i.e. no subcompactions)
2755    pub fn set_max_subcompactions(&mut self, num: u32) {
2756        unsafe {
2757            ffi::rocksdb_options_set_max_subcompactions(self.inner, num);
2758        }
2759    }
2760
2761    /// Sets maximum number of concurrent background jobs
2762    /// (compactions and flushes).
2763    ///
2764    /// Default: 2
2765    ///
2766    /// Dynamically changeable through SetDBOptions() API.
2767    pub fn set_max_background_jobs(&mut self, jobs: c_int) {
2768        unsafe {
2769            ffi::rocksdb_options_set_max_background_jobs(self.inner, jobs);
2770        }
2771    }
2772
2773    /// Sets the maximum number of concurrent background compaction jobs, submitted to
2774    /// the default LOW priority thread pool.
2775    /// We first try to schedule compactions based on
2776    /// `base_background_compactions`. If the compaction cannot catch up , we
2777    /// will increase number of compaction threads up to
2778    /// `max_background_compactions`.
2779    ///
2780    /// If you're increasing this, also consider increasing number of threads in
2781    /// LOW priority thread pool. For more information, see
2782    /// Env::SetBackgroundThreads
2783    ///
2784    /// Default: `1`
2785    ///
2786    /// # Examples
2787    ///
2788    /// ```
2789    /// use rocksdb::Options;
2790    ///
2791    /// let mut opts = Options::default();
2792    /// #[allow(deprecated)]
2793    /// opts.set_max_background_compactions(2);
2794    /// ```
2795    #[deprecated(
2796        since = "0.15.0",
2797        note = "RocksDB automatically decides this based on the value of max_background_jobs"
2798    )]
2799    pub fn set_max_background_compactions(&mut self, n: c_int) {
2800        unsafe {
2801            ffi::rocksdb_options_set_max_background_compactions(self.inner, n);
2802        }
2803    }
2804
2805    /// Sets the maximum number of concurrent background memtable flush jobs, submitted to
2806    /// the HIGH priority thread pool.
2807    ///
2808    /// By default, all background jobs (major compaction and memtable flush) go
2809    /// to the LOW priority pool. If this option is set to a positive number,
2810    /// memtable flush jobs will be submitted to the HIGH priority pool.
2811    /// It is important when the same Env is shared by multiple db instances.
2812    /// Without a separate pool, long running major compaction jobs could
2813    /// potentially block memtable flush jobs of other db instances, leading to
2814    /// unnecessary Put stalls.
2815    ///
2816    /// If you're increasing this, also consider increasing number of threads in
2817    /// HIGH priority thread pool. For more information, see
2818    /// Env::SetBackgroundThreads
2819    ///
2820    /// Default: `1`
2821    ///
2822    /// # Examples
2823    ///
2824    /// ```
2825    /// use rocksdb::Options;
2826    ///
2827    /// let mut opts = Options::default();
2828    /// #[allow(deprecated)]
2829    /// opts.set_max_background_flushes(2);
2830    /// ```
2831    #[deprecated(
2832        since = "0.15.0",
2833        note = "RocksDB automatically decides this based on the value of max_background_jobs"
2834    )]
2835    pub fn set_max_background_flushes(&mut self, n: c_int) {
2836        unsafe {
2837            ffi::rocksdb_options_set_max_background_flushes(self.inner, n);
2838        }
2839    }
2840
2841    /// Disables automatic compactions. Manual compactions can still
2842    /// be issued on this column family
2843    ///
2844    /// Default: `false`
2845    ///
2846    /// Dynamically changeable through SetOptions() API
2847    ///
2848    /// # Examples
2849    ///
2850    /// ```
2851    /// use rocksdb::Options;
2852    ///
2853    /// let mut opts = Options::default();
2854    /// opts.set_disable_auto_compactions(true);
2855    /// ```
2856    pub fn set_disable_auto_compactions(&mut self, disable: bool) {
2857        unsafe {
2858            ffi::rocksdb_options_set_disable_auto_compactions(self.inner, c_int::from(disable));
2859        }
2860    }
2861
2862    /// SetMemtableHugePageSize sets the page size for huge page for
2863    /// arena used by the memtable.
2864    /// If <=0, it won't allocate from huge page but from malloc.
2865    /// Users are responsible to reserve huge pages for it to be allocated. For
2866    /// example:
2867    ///      sysctl -w vm.nr_hugepages=20
2868    /// See linux doc Documentation/vm/hugetlbpage.txt
2869    /// If there isn't enough free huge page available, it will fall back to
2870    /// malloc.
2871    ///
2872    /// Dynamically changeable through SetOptions() API
2873    pub fn set_memtable_huge_page_size(&mut self, size: size_t) {
2874        unsafe {
2875            ffi::rocksdb_options_set_memtable_huge_page_size(self.inner, size);
2876        }
2877    }
2878
2879    /// Sets the maximum number of successive merge operations on a key in the memtable.
2880    ///
2881    /// When a merge operation is added to the memtable and the maximum number of
2882    /// successive merges is reached, the value of the key will be calculated and
2883    /// inserted into the memtable instead of the merge operation. This will
2884    /// ensure that there are never more than max_successive_merges merge
2885    /// operations in the memtable.
2886    ///
2887    /// Default: 0 (disabled)
2888    pub fn set_max_successive_merges(&mut self, num: usize) {
2889        unsafe {
2890            ffi::rocksdb_options_set_max_successive_merges(self.inner, num);
2891        }
2892    }
2893
2894    /// Control locality of bloom filter probes to improve cache miss rate.
2895    /// This option only applies to memtable prefix bloom and plaintable
2896    /// prefix bloom. It essentially limits the max number of cache lines each
2897    /// bloom filter check can touch.
2898    ///
2899    /// This optimization is turned off when set to 0. The number should never
2900    /// be greater than number of probes. This option can boost performance
2901    /// for in-memory workload but should use with care since it can cause
2902    /// higher false positive rate.
2903    ///
2904    /// Default: 0
2905    pub fn set_bloom_locality(&mut self, v: u32) {
2906        unsafe {
2907            ffi::rocksdb_options_set_bloom_locality(self.inner, v);
2908        }
2909    }
2910
2911    /// Enable/disable thread-safe inplace updates.
2912    ///
2913    /// Requires updates if
2914    /// * key exists in current memtable
2915    /// * new sizeof(new_value) <= sizeof(old_value)
2916    /// * old_value for that key is a put i.e. kTypeValue
2917    ///
2918    /// Default: false.
2919    pub fn set_inplace_update_support(&mut self, enabled: bool) {
2920        unsafe {
2921            ffi::rocksdb_options_set_inplace_update_support(self.inner, c_uchar::from(enabled));
2922        }
2923    }
2924
2925    /// Sets the number of locks used for inplace update.
2926    ///
2927    /// Default: 10000 when inplace_update_support = true, otherwise 0.
2928    pub fn set_inplace_update_locks(&mut self, num: usize) {
2929        unsafe {
2930            ffi::rocksdb_options_set_inplace_update_num_locks(self.inner, num);
2931        }
2932    }
2933
2934    /// Different max-size multipliers for different levels.
2935    /// These are multiplied by max_bytes_for_level_multiplier to arrive
2936    /// at the max-size of each level.
2937    ///
2938    /// Default: 1
2939    ///
2940    /// Dynamically changeable through SetOptions() API
2941    pub fn set_max_bytes_for_level_multiplier_additional(&mut self, level_values: &[i32]) {
2942        let count = level_values.len();
2943        unsafe {
2944            ffi::rocksdb_options_set_max_bytes_for_level_multiplier_additional(
2945                self.inner,
2946                level_values.as_ptr().cast_mut(),
2947                count,
2948            );
2949        }
2950    }
2951
2952    /// The total maximum size(bytes) of write buffers to maintain in memory
2953    /// including copies of buffers that have already been flushed. This parameter
2954    /// only affects trimming of flushed buffers and does not affect flushing.
2955    /// This controls the maximum amount of write history that will be available
2956    /// in memory for conflict checking when Transactions are used. The actual
2957    /// size of write history (flushed Memtables) might be higher than this limit
2958    /// if further trimming will reduce write history total size below this
2959    /// limit. For example, if max_write_buffer_size_to_maintain is set to 64MB,
2960    /// and there are three flushed Memtables, with sizes of 32MB, 20MB, 20MB.
2961    /// Because trimming the next Memtable of size 20MB will reduce total memory
2962    /// usage to 52MB which is below the limit, RocksDB will stop trimming.
2963    ///
2964    /// When using an OptimisticTransactionDB:
2965    /// If this value is too low, some transactions may fail at commit time due
2966    /// to not being able to determine whether there were any write conflicts.
2967    ///
2968    /// When using a TransactionDB:
2969    /// If Transaction::SetSnapshot is used, TransactionDB will read either
2970    /// in-memory write buffers or SST files to do write-conflict checking.
2971    /// Increasing this value can reduce the number of reads to SST files
2972    /// done for conflict detection.
2973    ///
2974    /// Setting this value to 0 will cause write buffers to be freed immediately
2975    /// after they are flushed. If this value is set to -1,
2976    /// 'max_write_buffer_number * write_buffer_size' will be used.
2977    ///
2978    /// Default:
2979    /// If using a TransactionDB/OptimisticTransactionDB, the default value will
2980    /// be set to the value of 'max_write_buffer_number * write_buffer_size'
2981    /// if it is not explicitly set by the user.  Otherwise, the default is 0.
2982    pub fn set_max_write_buffer_size_to_maintain(&mut self, size: i64) {
2983        unsafe {
2984            ffi::rocksdb_options_set_max_write_buffer_size_to_maintain(self.inner, size);
2985        }
2986    }
2987
2988    /// By default, a single write thread queue is maintained. The thread gets
2989    /// to the head of the queue becomes write batch group leader and responsible
2990    /// for writing to WAL and memtable for the batch group.
2991    ///
2992    /// If enable_pipelined_write is true, separate write thread queue is
2993    /// maintained for WAL write and memtable write. A write thread first enter WAL
2994    /// writer queue and then memtable writer queue. Pending thread on the WAL
2995    /// writer queue thus only have to wait for previous writers to finish their
2996    /// WAL writing but not the memtable writing. Enabling the feature may improve
2997    /// write throughput and reduce latency of the prepare phase of two-phase
2998    /// commit.
2999    ///
3000    /// Default: false
3001    pub fn set_enable_pipelined_write(&mut self, value: bool) {
3002        unsafe {
3003            ffi::rocksdb_options_set_enable_pipelined_write(self.inner, c_uchar::from(value));
3004        }
3005    }
3006
3007    /// Defines the underlying memtable implementation.
3008    /// See official [wiki](https://github.com/facebook/rocksdb/wiki/MemTable) for more information.
3009    /// Defaults to using a skiplist.
3010    ///
3011    /// # Examples
3012    ///
3013    /// ```
3014    /// use rocksdb::{Options, MemtableFactory};
3015    /// let mut opts = Options::default();
3016    /// let factory = MemtableFactory::HashSkipList {
3017    ///     bucket_count: 1_000_000,
3018    ///     height: 4,
3019    ///     branching_factor: 4,
3020    /// };
3021    ///
3022    /// opts.set_allow_concurrent_memtable_write(false);
3023    /// opts.set_memtable_factory(factory);
3024    /// ```
3025    pub fn set_memtable_factory(&mut self, factory: MemtableFactory) {
3026        match factory {
3027            MemtableFactory::Vector => unsafe {
3028                ffi::rocksdb_options_set_memtable_vector_rep(self.inner);
3029            },
3030            MemtableFactory::HashSkipList {
3031                bucket_count,
3032                height,
3033                branching_factor,
3034            } => unsafe {
3035                ffi::rocksdb_options_set_hash_skip_list_rep(
3036                    self.inner,
3037                    bucket_count,
3038                    height,
3039                    branching_factor,
3040                );
3041            },
3042            MemtableFactory::HashLinkList { bucket_count } => unsafe {
3043                ffi::rocksdb_options_set_hash_link_list_rep(self.inner, bucket_count);
3044            },
3045        }
3046    }
3047
3048    pub fn set_block_based_table_factory(&mut self, factory: &BlockBasedOptions) {
3049        unsafe {
3050            ffi::rocksdb_options_set_block_based_table_factory(self.inner, factory.inner);
3051        }
3052        self.outlive.block_based = Some(factory.outlive.clone());
3053    }
3054
3055    /// Sets the table factory to a CuckooTableFactory (the default table
3056    /// factory is a block-based table factory that provides a default
3057    /// implementation of TableBuilder and TableReader with default
3058    /// BlockBasedTableOptions).
3059    /// See official [wiki](https://github.com/facebook/rocksdb/wiki/CuckooTable-Format) for more information on this table format.
3060    /// # Examples
3061    ///
3062    /// ```
3063    /// use rocksdb::{Options, CuckooTableOptions};
3064    ///
3065    /// let mut opts = Options::default();
3066    /// let mut factory_opts = CuckooTableOptions::default();
3067    /// factory_opts.set_hash_ratio(0.8);
3068    /// factory_opts.set_max_search_depth(20);
3069    /// factory_opts.set_cuckoo_block_size(10);
3070    /// factory_opts.set_identity_as_first_hash(true);
3071    /// factory_opts.set_use_module_hash(false);
3072    ///
3073    /// opts.set_cuckoo_table_factory(&factory_opts);
3074    /// ```
3075    pub fn set_cuckoo_table_factory(&mut self, factory: &CuckooTableOptions) {
3076        unsafe {
3077            ffi::rocksdb_options_set_cuckoo_table_factory(self.inner, factory.inner);
3078        }
3079    }
3080
3081    // This is a factory that provides TableFactory objects.
3082    // Default: a block-based table factory that provides a default
3083    // implementation of TableBuilder and TableReader with default
3084    // BlockBasedTableOptions.
3085    /// Sets the factory as plain table.
3086    /// See official [wiki](https://github.com/facebook/rocksdb/wiki/PlainTable-Format) for more
3087    /// information.
3088    ///
3089    /// # Examples
3090    ///
3091    /// ```
3092    /// use rocksdb::{KeyEncodingType, Options, PlainTableFactoryOptions};
3093    ///
3094    /// let mut opts = Options::default();
3095    /// let factory_opts = PlainTableFactoryOptions {
3096    ///   user_key_length: 0,
3097    ///   bloom_bits_per_key: 20,
3098    ///   hash_table_ratio: 0.75,
3099    ///   index_sparseness: 16,
3100    ///   huge_page_tlb_size: 0,
3101    ///   encoding_type: KeyEncodingType::Plain,
3102    ///   full_scan_mode: false,
3103    ///   store_index_in_file: false,
3104    /// };
3105    ///
3106    /// opts.set_plain_table_factory(&factory_opts);
3107    /// ```
3108    pub fn set_plain_table_factory(&mut self, options: &PlainTableFactoryOptions) {
3109        unsafe {
3110            ffi::rocksdb_options_set_plain_table_factory(
3111                self.inner,
3112                options.user_key_length,
3113                options.bloom_bits_per_key,
3114                options.hash_table_ratio,
3115                options.index_sparseness,
3116                options.huge_page_tlb_size,
3117                options.encoding_type as c_char,
3118                c_uchar::from(options.full_scan_mode),
3119                c_uchar::from(options.store_index_in_file),
3120            );
3121        }
3122    }
3123
3124    /// Sets the start level to use compression.
3125    pub fn set_min_level_to_compress(&mut self, lvl: c_int) {
3126        unsafe {
3127            ffi::rocksdb_options_set_min_level_to_compress(self.inner, lvl);
3128        }
3129    }
3130
3131    /// Measure IO stats in compactions and flushes, if `true`.
3132    ///
3133    /// Default: `false`
3134    ///
3135    /// # Examples
3136    ///
3137    /// ```
3138    /// use rocksdb::Options;
3139    ///
3140    /// let mut opts = Options::default();
3141    /// opts.set_report_bg_io_stats(true);
3142    /// ```
3143    pub fn set_report_bg_io_stats(&mut self, enable: bool) {
3144        unsafe {
3145            ffi::rocksdb_options_set_report_bg_io_stats(self.inner, c_int::from(enable));
3146        }
3147    }
3148
3149    /// Once write-ahead logs exceed this size, we will start forcing the flush of
3150    /// column families whose memtables are backed by the oldest live WAL file
3151    /// (i.e. the ones that are causing all the space amplification).
3152    ///
3153    /// Default: `0`
3154    ///
3155    /// # Examples
3156    ///
3157    /// ```
3158    /// use rocksdb::Options;
3159    ///
3160    /// let mut opts = Options::default();
3161    /// // Set max total wal size to 1G.
3162    /// opts.set_max_total_wal_size(1 << 30);
3163    /// ```
3164    pub fn set_max_total_wal_size(&mut self, size: u64) {
3165        unsafe {
3166            ffi::rocksdb_options_set_max_total_wal_size(self.inner, size);
3167        }
3168    }
3169
3170    /// Recovery mode to control the consistency while replaying WAL.
3171    ///
3172    /// Default: DBRecoveryMode::PointInTime
3173    ///
3174    /// # Examples
3175    ///
3176    /// ```
3177    /// use rocksdb::{Options, DBRecoveryMode};
3178    ///
3179    /// let mut opts = Options::default();
3180    /// opts.set_wal_recovery_mode(DBRecoveryMode::AbsoluteConsistency);
3181    /// ```
3182    pub fn set_wal_recovery_mode(&mut self, mode: DBRecoveryMode) {
3183        unsafe {
3184            ffi::rocksdb_options_set_wal_recovery_mode(self.inner, mode as c_int);
3185        }
3186    }
3187
3188    /// Enables recording RocksDB statistics.
3189    ///
3190    /// The statistics in this Options object are shared between all DB instances.
3191    /// See [`get_statistics`](Self::get_statistics), [`get_ticker_count`](Self::get_ticker_count),
3192    /// and [`get_histogram_data`](Self::get_histogram_data).
3193    pub fn enable_statistics(&mut self) {
3194        unsafe {
3195            ffi::rocksdb_options_enable_statistics(self.inner);
3196        }
3197    }
3198
3199    /// Returns a string containing RocksDB statistics if enabled using
3200    /// [`enable_statistics`](Self::enable_statistics).
3201    pub fn get_statistics(&self) -> Option<String> {
3202        unsafe {
3203            let value = ffi::rocksdb_options_statistics_get_string(self.inner);
3204            if value.is_null() {
3205                return None;
3206            }
3207
3208            // Must have valid UTF-8 format.
3209            Some(from_cstr_and_free(value))
3210        }
3211    }
3212
3213    /// StatsLevel can be used to reduce statistics overhead by skipping certain
3214    /// types of stats in the stats collection process.
3215    ///
3216    /// Only takes effect if stats are enabled first using
3217    /// [`enable_statistics`](Self::enable_statistics).
3218    pub fn set_statistics_level(&self, level: StatsLevel) {
3219        unsafe { ffi::rocksdb_options_set_statistics_level(self.inner, level as c_int) }
3220    }
3221
3222    /// Returns a counter if statistics are enabled using
3223    /// [`enable_statistics`](Self::enable_statistics).
3224    pub fn get_ticker_count(&self, ticker: Ticker) -> u64 {
3225        unsafe { ffi::rocksdb_options_statistics_get_ticker_count(self.inner, ticker as u32) }
3226    }
3227
3228    /// Returns a histogram if statistics are enabled using
3229    /// [`enable_statistics`](Self::enable_statistics).
3230    pub fn get_histogram_data(&self, histogram: Histogram) -> HistogramData {
3231        unsafe {
3232            let data = HistogramData::default();
3233            ffi::rocksdb_options_statistics_get_histogram_data(
3234                self.inner,
3235                histogram as u32,
3236                data.inner,
3237            );
3238            data
3239        }
3240    }
3241
3242    /// If not zero, dump `rocksdb.stats` to LOG every `stats_dump_period_sec`.
3243    ///
3244    /// Default: `600` (10 mins)
3245    ///
3246    /// # Examples
3247    ///
3248    /// ```
3249    /// use rocksdb::Options;
3250    ///
3251    /// let mut opts = Options::default();
3252    /// opts.set_stats_dump_period_sec(300);
3253    /// ```
3254    pub fn set_stats_dump_period_sec(&mut self, period: c_uint) {
3255        unsafe {
3256            ffi::rocksdb_options_set_stats_dump_period_sec(self.inner, period);
3257        }
3258    }
3259
3260    /// If not zero, dump rocksdb.stats to RocksDB to LOG every `stats_persist_period_sec`.
3261    ///
3262    /// Default: `600` (10 mins)
3263    ///
3264    /// # Examples
3265    ///
3266    /// ```
3267    /// use rocksdb::Options;
3268    ///
3269    /// let mut opts = Options::default();
3270    /// opts.set_stats_persist_period_sec(5);
3271    /// ```
3272    pub fn set_stats_persist_period_sec(&mut self, period: c_uint) {
3273        unsafe {
3274            ffi::rocksdb_options_set_stats_persist_period_sec(self.inner, period);
3275        }
3276    }
3277
3278    /// When set to true, reading SST files will opt out of the filesystem's
3279    /// readahead. Setting this to false may improve sequential iteration
3280    /// performance.
3281    ///
3282    /// Default: `true`
3283    pub fn set_advise_random_on_open(&mut self, advise: bool) {
3284        unsafe {
3285            ffi::rocksdb_options_set_advise_random_on_open(self.inner, c_uchar::from(advise));
3286        }
3287    }
3288
3289    /// Enable/disable adaptive mutex, which spins in the user space before resorting to kernel.
3290    ///
3291    /// This could reduce context switch when the mutex is not
3292    /// heavily contended. However, if the mutex is hot, we could end up
3293    /// wasting spin time.
3294    ///
3295    /// Default: false
3296    pub fn set_use_adaptive_mutex(&mut self, enabled: bool) {
3297        unsafe {
3298            ffi::rocksdb_options_set_use_adaptive_mutex(self.inner, c_uchar::from(enabled));
3299        }
3300    }
3301
3302    /// Sets the number of levels for this database.
3303    pub fn set_num_levels(&mut self, n: c_int) {
3304        unsafe {
3305            ffi::rocksdb_options_set_num_levels(self.inner, n);
3306        }
3307    }
3308
3309    /// When a `prefix_extractor` is defined through `opts.set_prefix_extractor` this
3310    /// creates a prefix bloom filter for each memtable with the size of
3311    /// `write_buffer_size * memtable_prefix_bloom_ratio` (capped at 0.25).
3312    ///
3313    /// Default: `0`
3314    ///
3315    /// # Examples
3316    ///
3317    /// ```
3318    /// use rocksdb::{Options, SliceTransform};
3319    ///
3320    /// let mut opts = Options::default();
3321    /// let transform = SliceTransform::create_fixed_prefix(10);
3322    /// opts.set_prefix_extractor(transform);
3323    /// opts.set_memtable_prefix_bloom_ratio(0.2);
3324    /// ```
3325    pub fn set_memtable_prefix_bloom_ratio(&mut self, ratio: f64) {
3326        unsafe {
3327            ffi::rocksdb_options_set_memtable_prefix_bloom_size_ratio(self.inner, ratio);
3328        }
3329    }
3330
3331    /// Sets the maximum number of bytes in all compacted files.
3332    /// We try to limit number of bytes in one compaction to be lower than this
3333    /// threshold. But it's not guaranteed.
3334    ///
3335    /// Value 0 will be sanitized.
3336    ///
3337    /// Default: target_file_size_base * 25
3338    pub fn set_max_compaction_bytes(&mut self, nbytes: u64) {
3339        unsafe {
3340            ffi::rocksdb_options_set_max_compaction_bytes(self.inner, nbytes);
3341        }
3342    }
3343
3344    /// Specifies the absolute path of the directory the
3345    /// write-ahead log (WAL) should be written to.
3346    ///
3347    /// Default: same directory as the database
3348    ///
3349    /// # Examples
3350    ///
3351    /// ```
3352    /// use rocksdb::Options;
3353    ///
3354    /// let mut opts = Options::default();
3355    /// opts.set_wal_dir("/path/to/dir");
3356    /// ```
3357    pub fn set_wal_dir<P: AsRef<Path>>(&mut self, path: P) {
3358        let p = to_cpath(path).unwrap();
3359        unsafe {
3360            ffi::rocksdb_options_set_wal_dir(self.inner, p.as_ptr());
3361        }
3362    }
3363
3364    /// Sets the WAL ttl in seconds.
3365    ///
3366    /// The following two options affect how archived logs will be deleted.
3367    /// 1. If both set to 0, logs will be deleted asap and will not get into
3368    ///    the archive.
3369    /// 2. If wal_ttl_seconds is 0 and wal_size_limit_mb is not 0,
3370    ///    WAL files will be checked every 10 min and if total size is greater
3371    ///    then wal_size_limit_mb, they will be deleted starting with the
3372    ///    earliest until size_limit is met. All empty files will be deleted.
3373    /// 3. If wal_ttl_seconds is not 0 and wall_size_limit_mb is 0, then
3374    ///    WAL files will be checked every wal_ttl_seconds / 2 and those that
3375    ///    are older than wal_ttl_seconds will be deleted.
3376    /// 4. If both are not 0, WAL files will be checked every 10 min and both
3377    ///    checks will be performed with ttl being first.
3378    ///
3379    /// Default: 0
3380    pub fn set_wal_ttl_seconds(&mut self, secs: u64) {
3381        unsafe {
3382            ffi::rocksdb_options_set_WAL_ttl_seconds(self.inner, secs);
3383        }
3384    }
3385
3386    /// Sets the WAL size limit in MB.
3387    ///
3388    /// If total size of WAL files is greater then wal_size_limit_mb,
3389    /// they will be deleted starting with the earliest until size_limit is met.
3390    ///
3391    /// Default: 0
3392    pub fn set_wal_size_limit_mb(&mut self, size: u64) {
3393        unsafe {
3394            ffi::rocksdb_options_set_WAL_size_limit_MB(self.inner, size);
3395        }
3396    }
3397
3398    /// Sets the number of bytes to preallocate (via fallocate) the manifest files.
3399    ///
3400    /// Default is 4MB, which is reasonable to reduce random IO
3401    /// as well as prevent overallocation for mounts that preallocate
3402    /// large amounts of data (such as xfs's allocsize option).
3403    pub fn set_manifest_preallocation_size(&mut self, size: usize) {
3404        unsafe {
3405            ffi::rocksdb_options_set_manifest_preallocation_size(self.inner, size);
3406        }
3407    }
3408
3409    /// If true, then DB::Open() will not update the statistics used to optimize
3410    /// compaction decision by loading table properties from many files.
3411    /// Turning off this feature will improve DBOpen time especially in disk environment.
3412    ///
3413    /// Default: false
3414    pub fn set_skip_stats_update_on_db_open(&mut self, skip: bool) {
3415        unsafe {
3416            ffi::rocksdb_options_set_skip_stats_update_on_db_open(self.inner, c_uchar::from(skip));
3417        }
3418    }
3419
3420    /// Specify the maximal number of info log files to be kept.
3421    ///
3422    /// Default: 1000
3423    ///
3424    /// # Examples
3425    ///
3426    /// ```
3427    /// use rocksdb::Options;
3428    ///
3429    /// let mut options = Options::default();
3430    /// options.set_keep_log_file_num(100);
3431    /// ```
3432    pub fn set_keep_log_file_num(&mut self, nfiles: usize) {
3433        unsafe {
3434            ffi::rocksdb_options_set_keep_log_file_num(self.inner, nfiles);
3435        }
3436    }
3437
3438    /// Allow the OS to mmap file for writing.
3439    ///
3440    /// Default: false
3441    ///
3442    /// # Examples
3443    ///
3444    /// ```
3445    /// use rocksdb::Options;
3446    ///
3447    /// let mut options = Options::default();
3448    /// options.set_allow_mmap_writes(true);
3449    /// ```
3450    pub fn set_allow_mmap_writes(&mut self, is_enabled: bool) {
3451        unsafe {
3452            ffi::rocksdb_options_set_allow_mmap_writes(self.inner, c_uchar::from(is_enabled));
3453        }
3454    }
3455
3456    /// Allow the OS to mmap file for reading sst tables.
3457    ///
3458    /// Default: false
3459    ///
3460    /// # Examples
3461    ///
3462    /// ```
3463    /// use rocksdb::Options;
3464    ///
3465    /// let mut options = Options::default();
3466    /// options.set_allow_mmap_reads(true);
3467    /// ```
3468    pub fn set_allow_mmap_reads(&mut self, is_enabled: bool) {
3469        unsafe {
3470            ffi::rocksdb_options_set_allow_mmap_reads(self.inner, c_uchar::from(is_enabled));
3471        }
3472    }
3473
3474    /// If enabled, WAL is not flushed automatically after each write. Instead it
3475    /// relies on manual invocation of `DB::flush_wal()` to write the WAL buffer
3476    /// to its file.
3477    ///
3478    /// Default: false
3479    ///
3480    /// # Examples
3481    ///
3482    /// ```
3483    /// use rocksdb::Options;
3484    ///
3485    /// let mut options = Options::default();
3486    /// options.set_manual_wal_flush(true);
3487    /// ```
3488    pub fn set_manual_wal_flush(&mut self, is_enabled: bool) {
3489        unsafe {
3490            ffi::rocksdb_options_set_manual_wal_flush(self.inner, c_uchar::from(is_enabled));
3491        }
3492    }
3493
3494    /// Guarantee that all column families are flushed together atomically.
3495    /// This option applies to both manual flushes (`db.flush()`) and automatic
3496    /// background flushes caused when memtables are filled.
3497    ///
3498    /// Note that this is only useful when the WAL is disabled. When using the
3499    /// WAL, writes are always consistent across column families.
3500    ///
3501    /// Default: false
3502    ///
3503    /// # Examples
3504    ///
3505    /// ```
3506    /// use rocksdb::Options;
3507    ///
3508    /// let mut options = Options::default();
3509    /// options.set_atomic_flush(true);
3510    /// ```
3511    pub fn set_atomic_flush(&mut self, atomic_flush: bool) {
3512        unsafe {
3513            ffi::rocksdb_options_set_atomic_flush(self.inner, c_uchar::from(atomic_flush));
3514        }
3515    }
3516
3517    /// Sets global cache for table-level rows.
3518    ///
3519    /// Default: null (disabled)
3520    /// Not supported in ROCKSDB_LITE mode!
3521    pub fn set_row_cache(&mut self, cache: &Cache) {
3522        unsafe {
3523            ffi::rocksdb_options_set_row_cache(self.inner, cache.0.inner.as_ptr());
3524        }
3525        self.outlive.row_cache = Some(cache.clone());
3526    }
3527
3528    /// Use to control write rate of flush and compaction. Flush has higher
3529    /// priority than compaction.
3530    /// If rate limiter is enabled, bytes_per_sync is set to 1MB by default.
3531    ///
3532    /// Default: disable
3533    ///
3534    /// # Examples
3535    ///
3536    /// ```
3537    /// use rocksdb::Options;
3538    ///
3539    /// let mut options = Options::default();
3540    /// options.set_ratelimiter(1024 * 1024, 100 * 1000, 10);
3541    /// ```
3542    pub fn set_ratelimiter(
3543        &mut self,
3544        rate_bytes_per_sec: i64,
3545        refill_period_us: i64,
3546        fairness: i32,
3547    ) {
3548        unsafe {
3549            let ratelimiter =
3550                ffi::rocksdb_ratelimiter_create(rate_bytes_per_sec, refill_period_us, fairness);
3551            ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
3552            ffi::rocksdb_ratelimiter_destroy(ratelimiter);
3553        }
3554    }
3555
3556    /// Use to control write rate of flush and compaction. Flush has higher
3557    /// priority than compaction.
3558    /// If rate limiter is enabled, bytes_per_sync is set to 1MB by default.
3559    ///
3560    /// Default: disable
3561    pub fn set_auto_tuned_ratelimiter(
3562        &mut self,
3563        rate_bytes_per_sec: i64,
3564        refill_period_us: i64,
3565        fairness: i32,
3566    ) {
3567        unsafe {
3568            let ratelimiter = ffi::rocksdb_ratelimiter_create_auto_tuned(
3569                rate_bytes_per_sec,
3570                refill_period_us,
3571                fairness,
3572            );
3573            ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
3574            ffi::rocksdb_ratelimiter_destroy(ratelimiter);
3575        }
3576    }
3577
3578    /// Sets the maximal size of the info log file.
3579    ///
3580    /// If the log file is larger than `max_log_file_size`, a new info log file
3581    /// will be created. If `max_log_file_size` is equal to zero, all logs will
3582    /// be written to one log file.
3583    ///
3584    /// Default: 0
3585    ///
3586    /// # Examples
3587    ///
3588    /// ```
3589    /// use rocksdb::Options;
3590    ///
3591    /// let mut options = Options::default();
3592    /// options.set_max_log_file_size(0);
3593    /// ```
3594    pub fn set_max_log_file_size(&mut self, size: usize) {
3595        unsafe {
3596            ffi::rocksdb_options_set_max_log_file_size(self.inner, size);
3597        }
3598    }
3599
3600    /// Sets the time for the info log file to roll (in seconds).
3601    ///
3602    /// If specified with non-zero value, log file will be rolled
3603    /// if it has been active longer than `log_file_time_to_roll`.
3604    /// Default: 0 (disabled)
3605    pub fn set_log_file_time_to_roll(&mut self, secs: usize) {
3606        unsafe {
3607            ffi::rocksdb_options_set_log_file_time_to_roll(self.inner, secs);
3608        }
3609    }
3610
3611    /// Controls the recycling of log files.
3612    ///
3613    /// If non-zero, previously written log files will be reused for new logs,
3614    /// overwriting the old data. The value indicates how many such files we will
3615    /// keep around at any point in time for later use. This is more efficient
3616    /// because the blocks are already allocated and fdatasync does not need to
3617    /// update the inode after each write.
3618    ///
3619    /// Default: 0
3620    ///
3621    /// # Examples
3622    ///
3623    /// ```
3624    /// use rocksdb::Options;
3625    ///
3626    /// let mut options = Options::default();
3627    /// options.set_recycle_log_file_num(5);
3628    /// ```
3629    pub fn set_recycle_log_file_num(&mut self, num: usize) {
3630        unsafe {
3631            ffi::rocksdb_options_set_recycle_log_file_num(self.inner, num);
3632        }
3633    }
3634
3635    /// Sets the threshold at which all writes will be slowed down to at least delayed_write_rate if estimated
3636    /// bytes needed to be compaction exceed this threshold.
3637    ///
3638    /// Default: 64GB
3639    pub fn set_soft_pending_compaction_bytes_limit(&mut self, limit: usize) {
3640        unsafe {
3641            ffi::rocksdb_options_set_soft_pending_compaction_bytes_limit(self.inner, limit);
3642        }
3643    }
3644
3645    /// Sets the bytes threshold at which all writes are stopped if estimated bytes needed to be compaction exceed
3646    /// this threshold.
3647    ///
3648    /// Default: 256GB
3649    pub fn set_hard_pending_compaction_bytes_limit(&mut self, limit: usize) {
3650        unsafe {
3651            ffi::rocksdb_options_set_hard_pending_compaction_bytes_limit(self.inner, limit);
3652        }
3653    }
3654
3655    /// Sets the size of one block in arena memory allocation.
3656    ///
3657    /// If <= 0, a proper value is automatically calculated (usually 1/10 of
3658    /// writer_buffer_size).
3659    ///
3660    /// Default: 0
3661    pub fn set_arena_block_size(&mut self, size: usize) {
3662        unsafe {
3663            ffi::rocksdb_options_set_arena_block_size(self.inner, size);
3664        }
3665    }
3666
3667    /// If true, then print malloc stats together with rocksdb.stats when printing to LOG.
3668    ///
3669    /// Default: false
3670    pub fn set_dump_malloc_stats(&mut self, enabled: bool) {
3671        unsafe {
3672            ffi::rocksdb_options_set_dump_malloc_stats(self.inner, c_uchar::from(enabled));
3673        }
3674    }
3675
3676    /// Enable whole key bloom filter in memtable. Note this will only take effect
3677    /// if memtable_prefix_bloom_size_ratio is not 0. Enabling whole key filtering
3678    /// can potentially reduce CPU usage for point-look-ups.
3679    ///
3680    /// Default: false (disable)
3681    ///
3682    /// Dynamically changeable through SetOptions() API
3683    pub fn set_memtable_whole_key_filtering(&mut self, whole_key_filter: bool) {
3684        unsafe {
3685            ffi::rocksdb_options_set_memtable_whole_key_filtering(
3686                self.inner,
3687                c_uchar::from(whole_key_filter),
3688            );
3689        }
3690    }
3691
3692    /// Enable the use of key-value separation.
3693    ///
3694    /// More details can be found here: [Integrated BlobDB](http://rocksdb.org/blog/2021/05/26/integrated-blob-db.html).
3695    ///
3696    /// Default: false (disable)
3697    ///
3698    /// Dynamically changeable through SetOptions() API
3699    pub fn set_enable_blob_files(&mut self, val: bool) {
3700        unsafe {
3701            ffi::rocksdb_options_set_enable_blob_files(self.inner, u8::from(val));
3702        }
3703    }
3704
3705    /// Sets the minimum threshold value at or above which will be written
3706    /// to blob files during flush or compaction.
3707    ///
3708    /// Dynamically changeable through SetOptions() API
3709    pub fn set_min_blob_size(&mut self, val: u64) {
3710        unsafe {
3711            ffi::rocksdb_options_set_min_blob_size(self.inner, val);
3712        }
3713    }
3714
3715    /// Sets the size limit for blob files.
3716    ///
3717    /// Dynamically changeable through SetOptions() API
3718    pub fn set_blob_file_size(&mut self, val: u64) {
3719        unsafe {
3720            ffi::rocksdb_options_set_blob_file_size(self.inner, val);
3721        }
3722    }
3723
3724    /// Sets the blob compression type. All blob files use the same
3725    /// compression type.
3726    ///
3727    /// Dynamically changeable through SetOptions() API
3728    pub fn set_blob_compression_type(&mut self, val: DBCompressionType) {
3729        unsafe {
3730            ffi::rocksdb_options_set_blob_compression_type(self.inner, val as _);
3731        }
3732    }
3733
3734    /// If this is set to true RocksDB will actively relocate valid blobs from the oldest blob files
3735    /// as they are encountered during compaction.
3736    ///
3737    /// Dynamically changeable through SetOptions() API
3738    pub fn set_enable_blob_gc(&mut self, val: bool) {
3739        unsafe {
3740            ffi::rocksdb_options_set_enable_blob_gc(self.inner, u8::from(val));
3741        }
3742    }
3743
3744    /// Sets the threshold that the GC logic uses to determine which blob files should be considered “old.”
3745    ///
3746    /// For example, the default value of 0.25 signals to RocksDB that blobs residing in the
3747    /// oldest 25% of blob files should be relocated by GC. This parameter can be tuned to adjust
3748    /// the trade-off between write amplification and space amplification.
3749    ///
3750    /// Dynamically changeable through SetOptions() API
3751    pub fn set_blob_gc_age_cutoff(&mut self, val: c_double) {
3752        unsafe {
3753            ffi::rocksdb_options_set_blob_gc_age_cutoff(self.inner, val);
3754        }
3755    }
3756
3757    /// Sets the blob GC force threshold.
3758    ///
3759    /// Dynamically changeable through SetOptions() API
3760    pub fn set_blob_gc_force_threshold(&mut self, val: c_double) {
3761        unsafe {
3762            ffi::rocksdb_options_set_blob_gc_force_threshold(self.inner, val);
3763        }
3764    }
3765
3766    /// Sets the blob compaction read ahead size.
3767    ///
3768    /// Dynamically changeable through SetOptions() API
3769    pub fn set_blob_compaction_readahead_size(&mut self, val: u64) {
3770        unsafe {
3771            ffi::rocksdb_options_set_blob_compaction_readahead_size(self.inner, val);
3772        }
3773    }
3774
3775    /// Sets the blob cache.
3776    ///
3777    /// Using a dedicated object for blobs and using the same object for the block and blob caches
3778    /// are both supported. In the latter case, note that blobs are less valuable from a caching
3779    /// perspective than SST blocks, and some cache implementations have configuration options that
3780    /// can be used to prioritize items accordingly (see Cache::Priority and
3781    /// LRUCacheOptions::{high,low}_pri_pool_ratio).
3782    ///
3783    /// Default: disabled
3784    pub fn set_blob_cache(&mut self, cache: &Cache) {
3785        unsafe {
3786            ffi::rocksdb_options_set_blob_cache(self.inner, cache.0.inner.as_ptr());
3787        }
3788        self.outlive.blob_cache = Some(cache.clone());
3789    }
3790
3791    /// Set this option to true during creation of database if you want
3792    /// to be able to ingest behind (call IngestExternalFile() skipping keys
3793    /// that already exist, rather than overwriting matching keys).
3794    /// Setting this option to true has the following effects:
3795    ///
3796    /// 1. Disable some internal optimizations around SST file compression.
3797    /// 2. Reserve the last level for ingested files only.
3798    /// 3. Compaction will not include any file from the last level.
3799    ///
3800    /// Note that only Universal Compaction supports allow_ingest_behind.
3801    /// `num_levels` should be >= 3 if this option is turned on.
3802    ///
3803    /// DEFAULT: false
3804    /// Immutable.
3805    pub fn set_allow_ingest_behind(&mut self, val: bool) {
3806        unsafe {
3807            ffi::rocksdb_options_set_allow_ingest_behind(self.inner, c_uchar::from(val));
3808        }
3809    }
3810
3811    // A factory of a table property collector that marks an SST
3812    // file as need-compaction when it observe at least "D" deletion
3813    // entries in any "N" consecutive entries, or the ratio of tombstone
3814    // entries >= deletion_ratio.
3815    //
3816    // `window_size`: is the sliding window size "N"
3817    // `num_dels_trigger`: is the deletion trigger "D"
3818    // `deletion_ratio`: if <= 0 or > 1, disable triggering compaction based on
3819    // deletion ratio.
3820    pub fn add_compact_on_deletion_collector_factory(
3821        &mut self,
3822        window_size: size_t,
3823        num_dels_trigger: size_t,
3824        deletion_ratio: f64,
3825    ) {
3826        unsafe {
3827            ffi::rocksdb_options_add_compact_on_deletion_collector_factory_del_ratio(
3828                self.inner,
3829                window_size,
3830                num_dels_trigger,
3831                deletion_ratio,
3832            );
3833        }
3834    }
3835
3836    /// <https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager>
3837    /// Write buffer manager helps users control the total memory used by memtables across multiple column families and/or DB instances.
3838    /// Users can enable this control by 2 ways:
3839    ///
3840    /// 1- Limit the total memtable usage across multiple column families and DBs under a threshold.
3841    /// 2- Cost the memtable memory usage to block cache so that memory of RocksDB can be capped by the single limit.
3842    /// The usage of a write buffer manager is similar to rate_limiter and sst_file_manager.
3843    /// Users can create one write buffer manager object and pass it to all the options of column families or DBs whose memtable size they want to be controlled by this object.
3844    pub fn set_write_buffer_manager(&mut self, write_buffer_manager: &WriteBufferManager) {
3845        unsafe {
3846            ffi::rocksdb_options_set_write_buffer_manager(
3847                self.inner,
3848                write_buffer_manager.0.inner.as_ptr(),
3849            );
3850        }
3851        self.outlive.write_buffer_manager = Some(write_buffer_manager.clone());
3852    }
3853
3854    /// Sets the SST file manager used to track SST files and control their
3855    /// deletion rate.
3856    ///
3857    /// The manager can be shared across multiple DBs/column families.
3858    pub fn set_sst_file_manager(&mut self, sst_file_manager: &crate::SstFileManager) {
3859        unsafe {
3860            ffi::rocksdb_options_set_sst_file_manager(self.inner, sst_file_manager.0.inner);
3861        }
3862        self.outlive.sst_file_manager = Some(sst_file_manager.clone());
3863    }
3864
3865    /// If true, working thread may avoid doing unnecessary and long-latency
3866    /// operation (such as deleting obsolete files directly or deleting memtable)
3867    /// and will instead schedule a background job to do it.
3868    ///
3869    /// Use it if you're latency-sensitive.
3870    ///
3871    /// Default: false (disabled)
3872    pub fn set_avoid_unnecessary_blocking_io(&mut self, val: bool) {
3873        unsafe {
3874            ffi::rocksdb_options_set_avoid_unnecessary_blocking_io(self.inner, u8::from(val));
3875        }
3876    }
3877
3878    /// If true, the log numbers and sizes of the synced WALs are tracked
3879    /// in MANIFEST. During DB recovery, if a synced WAL is missing
3880    /// from disk, or the WAL's size does not match the recorded size in
3881    /// MANIFEST, an error will be reported and the recovery will be aborted.
3882    ///
3883    /// This is one additional protection against WAL corruption besides the
3884    /// per-WAL-entry checksum.
3885    ///
3886    /// Note that this option does not work with secondary instance.
3887    /// Currently, only syncing closed WALs are tracked. Calling `DB::SyncWAL()`,
3888    /// etc. or writing with `WriteOptions::sync=true` to sync the live WAL is not
3889    /// tracked for performance/efficiency reasons.
3890    ///
3891    /// See: <https://github.com/facebook/rocksdb/wiki/Track-WAL-in-MANIFEST>
3892    ///
3893    /// Default: false (disabled)
3894    pub fn set_track_and_verify_wals_in_manifest(&mut self, val: bool) {
3895        unsafe {
3896            ffi::rocksdb_options_set_track_and_verify_wals_in_manifest(self.inner, u8::from(val));
3897        }
3898    }
3899
3900    /// Returns the value of the `track_and_verify_wals_in_manifest` option.
3901    pub fn get_track_and_verify_wals_in_manifest(&self) -> bool {
3902        let val_u8 =
3903            unsafe { ffi::rocksdb_options_get_track_and_verify_wals_in_manifest(self.inner) };
3904        val_u8 != 0
3905    }
3906
3907    /// The DB unique ID can be saved in the DB manifest (preferred, this option)
3908    /// or an IDENTITY file (historical, deprecated), or both. If this option is
3909    /// set to false (old behavior), then `write_identity_file` must be set to true.
3910    /// The manifest is preferred because
3911    ///
3912    /// 1. The IDENTITY file is not checksummed, so it is not as safe against
3913    ///    corruption.
3914    /// 2. The IDENTITY file may or may not be copied with the DB (e.g. not
3915    ///    copied by BackupEngine), so is not reliable for the provenance of a DB.
3916    ///
3917    /// This option might eventually be obsolete and removed as Identity files
3918    /// are phased out.
3919    ///
3920    /// Default: true (enabled)
3921    pub fn set_write_dbid_to_manifest(&mut self, val: bool) {
3922        unsafe {
3923            ffi::rocksdb_options_set_write_dbid_to_manifest(self.inner, u8::from(val));
3924        }
3925    }
3926
3927    /// Returns the value of the `write_dbid_to_manifest` option.
3928    pub fn get_write_dbid_to_manifest(&self) -> bool {
3929        let val_u8 = unsafe { ffi::rocksdb_options_get_write_dbid_to_manifest(self.inner) };
3930        val_u8 != 0
3931    }
3932
3933    /// Sets the logger to use.
3934    ///
3935    /// By default `rocksdb` writes its internal logs to a file in the database
3936    /// directory; this can be changed to a custom callback with the
3937    /// [`InfoLogger::new_callback_logger`] constructor.
3938    pub fn set_info_logger(&mut self, mut logger: InfoLogger) {
3939        // Move the callback so it can be shared across database instances
3940        self.outlive.logger_callback = logger.callback.take();
3941        unsafe {
3942            ffi::rocksdb_options_set_info_log(self.inner, logger.inner);
3943        }
3944    }
3945
3946    /// Returns a reference to the currently configured logger.
3947    pub fn get_info_logger(&self) -> InfoLogger {
3948        let raw = unsafe { ffi::rocksdb_options_get_info_log(self.inner) };
3949        InfoLogger {
3950            inner: raw,
3951            callback: self.outlive.logger_callback.clone(),
3952        }
3953    }
3954}
3955
3956impl Default for Options {
3957    fn default() -> Self {
3958        unsafe {
3959            let opts = ffi::rocksdb_options_create();
3960            assert!(!opts.is_null(), "Could not create RocksDB options");
3961
3962            Self {
3963                inner: opts,
3964                outlive: OptionsMustOutliveDB::default(),
3965            }
3966        }
3967    }
3968}
3969
3970impl FlushOptions {
3971    pub fn new() -> FlushOptions {
3972        FlushOptions::default()
3973    }
3974
3975    /// Waits until the flush is done.
3976    ///
3977    /// Default: true
3978    ///
3979    /// # Examples
3980    ///
3981    /// ```
3982    /// use rocksdb::FlushOptions;
3983    ///
3984    /// let mut options = FlushOptions::default();
3985    /// options.set_wait(false);
3986    /// ```
3987    pub fn set_wait(&mut self, wait: bool) {
3988        unsafe {
3989            ffi::rocksdb_flushoptions_set_wait(self.inner, c_uchar::from(wait));
3990        }
3991    }
3992}
3993
3994impl Default for FlushOptions {
3995    fn default() -> Self {
3996        let flush_opts = unsafe { ffi::rocksdb_flushoptions_create() };
3997        assert!(
3998            !flush_opts.is_null(),
3999            "Could not create RocksDB flush options"
4000        );
4001
4002        Self { inner: flush_opts }
4003    }
4004}
4005
4006impl WriteOptions {
4007    pub fn new() -> WriteOptions {
4008        WriteOptions::default()
4009    }
4010
4011    /// Sets the sync mode. If true, the write will be flushed
4012    /// from the operating system buffer cache before the write is considered complete.
4013    /// If this flag is true, writes will be slower.
4014    ///
4015    /// Default: false
4016    pub fn set_sync(&mut self, sync: bool) {
4017        unsafe {
4018            ffi::rocksdb_writeoptions_set_sync(self.inner, c_uchar::from(sync));
4019        }
4020    }
4021
4022    /// Sets whether WAL should be active or not.
4023    /// If true, writes will not first go to the write ahead log,
4024    /// and the write may got lost after a crash.
4025    ///
4026    /// Default: false
4027    pub fn disable_wal(&mut self, disable: bool) {
4028        unsafe {
4029            ffi::rocksdb_writeoptions_disable_WAL(self.inner, c_int::from(disable));
4030        }
4031    }
4032
4033    /// If true and if user is trying to write to column families that don't exist (they were dropped),
4034    /// ignore the write (don't return an error). If there are multiple writes in a WriteBatch,
4035    /// other writes will succeed.
4036    ///
4037    /// Default: false
4038    pub fn set_ignore_missing_column_families(&mut self, ignore: bool) {
4039        unsafe {
4040            ffi::rocksdb_writeoptions_set_ignore_missing_column_families(
4041                self.inner,
4042                c_uchar::from(ignore),
4043            );
4044        }
4045    }
4046
4047    /// If true and we need to wait or sleep for the write request, fails
4048    /// immediately with Status::Incomplete().
4049    ///
4050    /// Default: false
4051    pub fn set_no_slowdown(&mut self, no_slowdown: bool) {
4052        unsafe {
4053            ffi::rocksdb_writeoptions_set_no_slowdown(self.inner, c_uchar::from(no_slowdown));
4054        }
4055    }
4056
4057    /// If true, this write request is of lower priority if compaction is
4058    /// behind. In this case, no_slowdown = true, the request will be cancelled
4059    /// immediately with Status::Incomplete() returned. Otherwise, it will be
4060    /// slowed down. The slowdown value is determined by RocksDB to guarantee
4061    /// it introduces minimum impacts to high priority writes.
4062    ///
4063    /// Default: false
4064    pub fn set_low_pri(&mut self, v: bool) {
4065        unsafe {
4066            ffi::rocksdb_writeoptions_set_low_pri(self.inner, c_uchar::from(v));
4067        }
4068    }
4069
4070    /// If true, writebatch will maintain the last insert positions of each
4071    /// memtable as hints in concurrent write. It can improve write performance
4072    /// in concurrent writes if keys in one writebatch are sequential. In
4073    /// non-concurrent writes (when concurrent_memtable_writes is false) this
4074    /// option will be ignored.
4075    ///
4076    /// Default: false
4077    pub fn set_memtable_insert_hint_per_batch(&mut self, v: bool) {
4078        unsafe {
4079            ffi::rocksdb_writeoptions_set_memtable_insert_hint_per_batch(
4080                self.inner,
4081                c_uchar::from(v),
4082            );
4083        }
4084    }
4085}
4086
4087impl Default for WriteOptions {
4088    fn default() -> Self {
4089        let write_opts = unsafe { ffi::rocksdb_writeoptions_create() };
4090        assert!(
4091            !write_opts.is_null(),
4092            "Could not create RocksDB write options"
4093        );
4094
4095        Self { inner: write_opts }
4096    }
4097}
4098
4099impl LruCacheOptions {
4100    /// Capacity of the cache, in the same units as the `charge` of each entry.
4101    /// This is typically measured in bytes, but can be a different unit if using
4102    /// kDontChargeCacheMetadata.
4103    pub fn set_capacity(&mut self, cap: usize) {
4104        unsafe {
4105            ffi::rocksdb_lru_cache_options_set_capacity(self.inner, cap);
4106        }
4107    }
4108
4109    /// Cache is sharded into 2^num_shard_bits shards, by hash of key.
4110    /// If < 0, a good default is chosen based on the capacity and the
4111    /// implementation. (Mutex-based implementations are much more reliant
4112    /// on many shards for parallel scalability.)
4113    pub fn set_num_shard_bits(&mut self, val: c_int) {
4114        unsafe {
4115            ffi::rocksdb_lru_cache_options_set_num_shard_bits(self.inner, val);
4116        }
4117    }
4118}
4119
4120impl Default for LruCacheOptions {
4121    fn default() -> Self {
4122        let inner = unsafe { ffi::rocksdb_lru_cache_options_create() };
4123        assert!(
4124            !inner.is_null(),
4125            "Could not create RocksDB LRU cache options"
4126        );
4127
4128        Self { inner }
4129    }
4130}
4131
4132#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4133#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4134#[repr(i32)]
4135pub enum ReadTier {
4136    /// Reads data in memtable, block cache, OS cache or storage.
4137    All = 0,
4138    /// Reads data in memtable or block cache.
4139    BlockCache,
4140    /// Reads persisted data. When WAL is disabled, this option will skip data in memtable.
4141    Persisted,
4142    /// Reads data in memtable. Used for memtable only iterators.
4143    Memtable,
4144}
4145
4146#[repr(i32)]
4147pub enum CompactionPri {
4148    /// Slightly prioritize larger files by size compensated by #deletes
4149    ByCompensatedSize = 0,
4150    /// First compact files whose data's latest update time is oldest.
4151    /// Try this if you only update some hot keys in small ranges.
4152    OldestLargestSeqFirst = 1,
4153    /// First compact files whose range hasn't been compacted to the next level
4154    /// for the longest. If your updates are random across the key space,
4155    /// write amplification is slightly better with this option.
4156    OldestSmallestSeqFirst = 2,
4157    /// First compact files whose ratio between overlapping size in next level
4158    /// and its size is the smallest. It in many cases can optimize write amplification.
4159    MinOverlappingRatio = 3,
4160    /// Keeps a cursor(s) of the successor of the file (key range) was/were
4161    /// compacted before, and always picks the next files (key range) in that
4162    /// level. The file picking process will cycle through all the files in a
4163    /// round-robin manner.
4164    RoundRobin = 4,
4165}
4166
4167impl ReadOptions {
4168    // TODO add snapshot setting here
4169    // TODO add snapshot wrapper structs with proper destructors;
4170    // that struct needs an "iterator" impl too.
4171
4172    /// Specify whether the "data block"/"index block"/"filter block"
4173    /// read for this iteration should be cached in memory?
4174    /// Callers may wish to set this field to false for bulk scans.
4175    ///
4176    /// Default: true
4177    pub fn fill_cache(&mut self, v: bool) {
4178        unsafe {
4179            ffi::rocksdb_readoptions_set_fill_cache(self.inner, c_uchar::from(v));
4180        }
4181    }
4182
4183    /// Sets the snapshot which should be used for the read.
4184    /// The snapshot must belong to the DB that is being read and must
4185    /// not have been released.
4186    pub fn set_snapshot<D: DBAccess>(&mut self, snapshot: &SnapshotWithThreadMode<D>) {
4187        unsafe {
4188            ffi::rocksdb_readoptions_set_snapshot(self.inner, snapshot.inner);
4189        }
4190    }
4191
4192    /// Sets the lower bound for an iterator.
4193    pub fn set_iterate_lower_bound<K: Into<Vec<u8>>>(&mut self, key: K) {
4194        self.set_lower_bound_impl(Some(key.into()));
4195    }
4196
4197    /// Sets the upper bound for an iterator.
4198    /// The upper bound itself is not included on the iteration result.
4199    pub fn set_iterate_upper_bound<K: Into<Vec<u8>>>(&mut self, key: K) {
4200        self.set_upper_bound_impl(Some(key.into()));
4201    }
4202
4203    /// Sets lower and upper bounds based on the provided range.  This is
4204    /// similar to setting lower and upper bounds separately except that it also
4205    /// allows either bound to be reset.
4206    ///
4207    /// The argument can be a regular Rust range, e.g. `lower..upper`.  However,
4208    /// since RocksDB upper bound is always excluded (i.e. range can never be
4209    /// fully closed) inclusive ranges (`lower..=upper` and `..=upper`) are not
4210    /// supported.  For example:
4211    ///
4212    /// ```
4213    /// let mut options = rocksdb::ReadOptions::default();
4214    /// options.set_iterate_range("xy".as_bytes().."xz".as_bytes());
4215    /// ```
4216    ///
4217    /// In addition, [`crate::PrefixRange`] can be used to specify a range of
4218    /// keys with a given prefix.  In particular, the above example is
4219    /// equivalent to:
4220    ///
4221    /// ```
4222    /// let mut options = rocksdb::ReadOptions::default();
4223    /// options.set_iterate_range(rocksdb::PrefixRange("xy".as_bytes()));
4224    /// ```
4225    ///
4226    /// Note that setting range using this method is separate to using prefix
4227    /// iterators.  Prefix iterators use prefix extractor configured for
4228    /// a column family.  Setting bounds via [`crate::PrefixRange`] is more akin
4229    /// to using manual prefix.
4230    ///
4231    /// Using this method clears any previously set bounds.  In other words, the
4232    /// bounds can be reset by setting the range to `..` as in:
4233    ///
4234    /// ```
4235    /// let mut options = rocksdb::ReadOptions::default();
4236    /// options.set_iterate_range(..);
4237    /// ```
4238    pub fn set_iterate_range(&mut self, range: impl crate::IterateBounds) {
4239        let (lower, upper) = range.into_bounds();
4240        self.set_lower_bound_impl(lower);
4241        self.set_upper_bound_impl(upper);
4242    }
4243
4244    fn set_lower_bound_impl(&mut self, bound: Option<Vec<u8>>) {
4245        let (ptr, len) = if let Some(ref bound) = bound {
4246            (bound.as_ptr() as *const c_char, bound.len())
4247        } else if self.iterate_lower_bound.is_some() {
4248            (std::ptr::null(), 0)
4249        } else {
4250            return;
4251        };
4252        self.iterate_lower_bound = bound;
4253        unsafe {
4254            ffi::rocksdb_readoptions_set_iterate_lower_bound(self.inner, ptr, len);
4255        }
4256    }
4257
4258    fn set_upper_bound_impl(&mut self, bound: Option<Vec<u8>>) {
4259        let (ptr, len) = if let Some(ref bound) = bound {
4260            (bound.as_ptr() as *const c_char, bound.len())
4261        } else if self.iterate_upper_bound.is_some() {
4262            (std::ptr::null(), 0)
4263        } else {
4264            return;
4265        };
4266        self.iterate_upper_bound = bound;
4267        unsafe {
4268            ffi::rocksdb_readoptions_set_iterate_upper_bound(self.inner, ptr, len);
4269        }
4270    }
4271
4272    /// Specify if this read request should process data that ALREADY
4273    /// resides on a particular cache. If the required data is not
4274    /// found at the specified cache, then Status::Incomplete is returned.
4275    ///
4276    /// Default: ::All
4277    pub fn set_read_tier(&mut self, tier: ReadTier) {
4278        unsafe {
4279            ffi::rocksdb_readoptions_set_read_tier(self.inner, tier as c_int);
4280        }
4281    }
4282
4283    /// Enforce that the iterator only iterates over the same
4284    /// prefix as the seek.
4285    /// This option is effective only for prefix seeks, i.e. prefix_extractor is
4286    /// non-null for the column family and total_order_seek is false.  Unlike
4287    /// iterate_upper_bound, prefix_same_as_start only works within a prefix
4288    /// but in both directions.
4289    ///
4290    /// Default: false
4291    pub fn set_prefix_same_as_start(&mut self, v: bool) {
4292        unsafe {
4293            ffi::rocksdb_readoptions_set_prefix_same_as_start(self.inner, c_uchar::from(v));
4294        }
4295    }
4296
4297    /// Enable a total order seek regardless of index format (e.g. hash index)
4298    /// used in the table. Some table format (e.g. plain table) may not support
4299    /// this option.
4300    ///
4301    /// If true when calling Get(), we also skip prefix bloom when reading from
4302    /// block based table. It provides a way to read existing data after
4303    /// changing implementation of prefix extractor.
4304    pub fn set_total_order_seek(&mut self, v: bool) {
4305        unsafe {
4306            ffi::rocksdb_readoptions_set_total_order_seek(self.inner, c_uchar::from(v));
4307        }
4308    }
4309
4310    /// Sets a threshold for the number of keys that can be skipped
4311    /// before failing an iterator seek as incomplete. The default value of 0 should be used to
4312    /// never fail a request as incomplete, even on skipping too many keys.
4313    ///
4314    /// Default: 0
4315    pub fn set_max_skippable_internal_keys(&mut self, num: u64) {
4316        unsafe {
4317            ffi::rocksdb_readoptions_set_max_skippable_internal_keys(self.inner, num);
4318        }
4319    }
4320
4321    /// If true, when PurgeObsoleteFile is called in CleanupIteratorState, we schedule a background job
4322    /// in the flush job queue and delete obsolete files in background.
4323    ///
4324    /// Default: false
4325    pub fn set_background_purge_on_iterator_cleanup(&mut self, v: bool) {
4326        unsafe {
4327            ffi::rocksdb_readoptions_set_background_purge_on_iterator_cleanup(
4328                self.inner,
4329                c_uchar::from(v),
4330            );
4331        }
4332    }
4333
4334    /// If true, keys deleted using the DeleteRange() API will be visible to
4335    /// readers until they are naturally deleted during compaction.
4336    ///
4337    /// Default: false
4338    #[deprecated(
4339        note = "deprecated in RocksDB 10.2.1: no performance impact if DeleteRange is not used"
4340    )]
4341    pub fn set_ignore_range_deletions(&mut self, v: bool) {
4342        unsafe {
4343            ffi::rocksdb_readoptions_set_ignore_range_deletions(self.inner, c_uchar::from(v));
4344        }
4345    }
4346
4347    /// If true, all data read from underlying storage will be
4348    /// verified against corresponding checksums.
4349    ///
4350    /// Default: true
4351    pub fn set_verify_checksums(&mut self, v: bool) {
4352        unsafe {
4353            ffi::rocksdb_readoptions_set_verify_checksums(self.inner, c_uchar::from(v));
4354        }
4355    }
4356
4357    /// If non-zero, an iterator will create a new table reader which
4358    /// performs reads of the given size. Using a large size (> 2MB) can
4359    /// improve the performance of forward iteration on spinning disks.
4360    /// Default: 0
4361    ///
4362    /// ```
4363    /// use rocksdb::{ReadOptions};
4364    ///
4365    /// let mut opts = ReadOptions::default();
4366    /// opts.set_readahead_size(4_194_304); // 4mb
4367    /// ```
4368    pub fn set_readahead_size(&mut self, v: usize) {
4369        unsafe {
4370            ffi::rocksdb_readoptions_set_readahead_size(self.inner, v as size_t);
4371        }
4372    }
4373
4374    /// If auto_readahead_size is set to true, it will auto tune the readahead_size
4375    /// during scans internally.
4376    /// For this feature to be enabled, iterate_upper_bound must also be specified.
4377    ///
4378    /// NOTE: - Recommended for forward Scans only.
4379    ///       - If there is a backward scans, this option will be
4380    ///         disabled internally and won't be enabled again if the forward scan
4381    ///         is issued again.
4382    ///
4383    /// Default: true
4384    pub fn set_auto_readahead_size(&mut self, v: bool) {
4385        unsafe {
4386            ffi::rocksdb_readoptions_set_auto_readahead_size(self.inner, c_uchar::from(v));
4387        }
4388    }
4389
4390    /// If true, create a tailing iterator. Note that tailing iterators
4391    /// only support moving in the forward direction. Iterating in reverse
4392    /// or seek_to_last are not supported.
4393    pub fn set_tailing(&mut self, v: bool) {
4394        unsafe {
4395            ffi::rocksdb_readoptions_set_tailing(self.inner, c_uchar::from(v));
4396        }
4397    }
4398
4399    /// Specifies the value of "pin_data". If true, it keeps the blocks
4400    /// loaded by the iterator pinned in memory as long as the iterator is not deleted,
4401    /// If used when reading from tables created with
4402    /// BlockBasedTableOptions::use_delta_encoding = false,
4403    /// Iterator's property "rocksdb.iterator.is-key-pinned" is guaranteed to
4404    /// return 1.
4405    ///
4406    /// Default: false
4407    pub fn set_pin_data(&mut self, v: bool) {
4408        unsafe {
4409            ffi::rocksdb_readoptions_set_pin_data(self.inner, c_uchar::from(v));
4410        }
4411    }
4412
4413    /// Asynchronously prefetch some data.
4414    ///
4415    /// Used for sequential reads and internal automatic prefetching.
4416    ///
4417    /// Default: `false`
4418    pub fn set_async_io(&mut self, v: bool) {
4419        unsafe {
4420            ffi::rocksdb_readoptions_set_async_io(self.inner, c_uchar::from(v));
4421        }
4422    }
4423
4424    /// Timestamp of operation. Read should return the latest data visible to the
4425    /// specified timestamp. All timestamps of the same database must be of the
4426    /// same length and format. The user is responsible for providing a customized
4427    /// compare function via Comparator to order <key, timestamp> tuples.
4428    /// For iterator, iter_start_ts is the lower bound (older) and timestamp
4429    /// serves as the upper bound. Versions of the same record that fall in
4430    /// the timestamp range will be returned. If iter_start_ts is nullptr,
4431    /// only the most recent version visible to timestamp is returned.
4432    /// The user-specified timestamp feature is still under active development,
4433    /// and the API is subject to change.
4434    pub fn set_timestamp<S: Into<Vec<u8>>>(&mut self, ts: S) {
4435        self.set_timestamp_impl(Some(ts.into()));
4436    }
4437
4438    fn set_timestamp_impl(&mut self, ts: Option<Vec<u8>>) {
4439        let (ptr, len) = if let Some(ref ts) = ts {
4440            (ts.as_ptr() as *const c_char, ts.len())
4441        } else if self.timestamp.is_some() {
4442            // The stored timestamp is a `Some` but we're updating it to a `None`.
4443            // This means to cancel a previously set timestamp.
4444            // To do this, use a null pointer and zero length.
4445            (std::ptr::null(), 0)
4446        } else {
4447            return;
4448        };
4449        self.timestamp = ts;
4450        unsafe {
4451            ffi::rocksdb_readoptions_set_timestamp(self.inner, ptr, len);
4452        }
4453    }
4454
4455    /// See `set_timestamp`
4456    pub fn set_iter_start_ts<S: Into<Vec<u8>>>(&mut self, ts: S) {
4457        self.set_iter_start_ts_impl(Some(ts.into()));
4458    }
4459
4460    fn set_iter_start_ts_impl(&mut self, ts: Option<Vec<u8>>) {
4461        let (ptr, len) = if let Some(ref ts) = ts {
4462            (ts.as_ptr() as *const c_char, ts.len())
4463        } else if self.timestamp.is_some() {
4464            (std::ptr::null(), 0)
4465        } else {
4466            return;
4467        };
4468        self.iter_start_ts = ts;
4469        unsafe {
4470            ffi::rocksdb_readoptions_set_iter_start_ts(self.inner, ptr, len);
4471        }
4472    }
4473}
4474
4475impl Default for ReadOptions {
4476    fn default() -> Self {
4477        unsafe {
4478            Self {
4479                inner: ffi::rocksdb_readoptions_create(),
4480                timestamp: None,
4481                iter_start_ts: None,
4482                iterate_upper_bound: None,
4483                iterate_lower_bound: None,
4484            }
4485        }
4486    }
4487}
4488
4489impl IngestExternalFileOptions {
4490    /// Can be set to true to move the files instead of copying them.
4491    pub fn set_move_files(&mut self, v: bool) {
4492        unsafe {
4493            ffi::rocksdb_ingestexternalfileoptions_set_move_files(self.inner, c_uchar::from(v));
4494        }
4495    }
4496
4497    /// If set to false, an ingested file keys could appear in existing snapshots
4498    /// that where created before the file was ingested.
4499    pub fn set_snapshot_consistency(&mut self, v: bool) {
4500        unsafe {
4501            ffi::rocksdb_ingestexternalfileoptions_set_snapshot_consistency(
4502                self.inner,
4503                c_uchar::from(v),
4504            );
4505        }
4506    }
4507
4508    /// If set to false, IngestExternalFile() will fail if the file key range
4509    /// overlaps with existing keys or tombstones in the DB.
4510    pub fn set_allow_global_seqno(&mut self, v: bool) {
4511        unsafe {
4512            ffi::rocksdb_ingestexternalfileoptions_set_allow_global_seqno(
4513                self.inner,
4514                c_uchar::from(v),
4515            );
4516        }
4517    }
4518
4519    /// If set to false and the file key range overlaps with the memtable key range
4520    /// (memtable flush required), IngestExternalFile will fail.
4521    pub fn set_allow_blocking_flush(&mut self, v: bool) {
4522        unsafe {
4523            ffi::rocksdb_ingestexternalfileoptions_set_allow_blocking_flush(
4524                self.inner,
4525                c_uchar::from(v),
4526            );
4527        }
4528    }
4529
4530    /// Set to true if you would like duplicate keys in the file being ingested
4531    /// to be skipped rather than overwriting existing data under that key.
4532    /// Usecase: back-fill of some historical data in the database without
4533    /// over-writing existing newer version of data.
4534    /// This option could only be used if the DB has been running
4535    /// with allow_ingest_behind=true since the dawn of time.
4536    /// All files will be ingested at the bottommost level with seqno=0.
4537    pub fn set_ingest_behind(&mut self, v: bool) {
4538        unsafe {
4539            ffi::rocksdb_ingestexternalfileoptions_set_ingest_behind(self.inner, c_uchar::from(v));
4540        }
4541    }
4542}
4543
4544impl Default for IngestExternalFileOptions {
4545    fn default() -> Self {
4546        unsafe {
4547            Self {
4548                inner: ffi::rocksdb_ingestexternalfileoptions_create(),
4549            }
4550        }
4551    }
4552}
4553
4554/// Used by BlockBasedOptions::set_index_type.
4555pub enum BlockBasedIndexType {
4556    /// A space efficient index block that is optimized for
4557    /// binary-search-based index.
4558    BinarySearch,
4559
4560    /// The hash index, if enabled, will perform a hash lookup if
4561    /// a prefix extractor has been provided through Options::set_prefix_extractor.
4562    HashSearch,
4563
4564    /// A two-level index implementation. Both levels are binary search indexes.
4565    TwoLevelIndexSearch,
4566}
4567
4568/// Used by BlockBasedOptions::set_data_block_index_type.
4569#[repr(C)]
4570pub enum DataBlockIndexType {
4571    /// Use binary search when performing point lookup for keys in data blocks.
4572    /// This is the default.
4573    BinarySearch = 0,
4574
4575    /// Appends a compact hash table to the end of the data block for efficient indexing. Backwards
4576    /// compatible with databases created without this feature. Once turned on, existing data will
4577    /// be gradually converted to the hash index format.
4578    BinaryAndHash = 1,
4579}
4580
4581/// Used by BlockBasedOptions for setting metadata cache pinning tiers.
4582/// Controls how metadata blocks (index, filter, etc.) are pinned in block cache.
4583#[repr(C)]
4584pub enum BlockBasedTablePinningTier {
4585    /// Use fallback pinning tier (context-dependent)
4586    Fallback = ffi::rocksdb_block_based_k_fallback_pinning_tier as isize,
4587    /// No pinning - blocks can be evicted at any time
4588    None = ffi::rocksdb_block_based_k_none_pinning_tier as isize,
4589    /// Pin blocks for flushed files and similar scenarios
4590    FlushAndSimilar = ffi::rocksdb_block_based_k_flush_and_similar_pinning_tier as isize,
4591    /// Pin all blocks (highest priority)
4592    All = ffi::rocksdb_block_based_k_all_pinning_tier as isize,
4593}
4594
4595/// Defines the underlying memtable implementation.
4596/// See official [wiki](https://github.com/facebook/rocksdb/wiki/MemTable) for more information.
4597pub enum MemtableFactory {
4598    Vector,
4599    HashSkipList {
4600        bucket_count: usize,
4601        height: i32,
4602        branching_factor: i32,
4603    },
4604    HashLinkList {
4605        bucket_count: usize,
4606    },
4607}
4608
4609/// Used by BlockBasedOptions::set_checksum_type.
4610pub enum ChecksumType {
4611    NoChecksum = 0,
4612    CRC32c = 1,
4613    XXHash = 2,
4614    XXHash64 = 3,
4615    XXH3 = 4, // Supported since RocksDB 6.27
4616}
4617
4618/// Used in [`PlainTableFactoryOptions`].
4619#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
4620pub enum KeyEncodingType {
4621    /// Always write full keys.
4622    #[default]
4623    Plain = 0,
4624    /// Find opportunities to write the same prefix for multiple rows.
4625    Prefix = 1,
4626}
4627
4628/// Used with DBOptions::set_plain_table_factory.
4629/// See official [wiki](https://github.com/facebook/rocksdb/wiki/PlainTable-Format) for more
4630/// information.
4631///
4632/// Defaults:
4633///  user_key_length: 0 (variable length)
4634///  bloom_bits_per_key: 10
4635///  hash_table_ratio: 0.75
4636///  index_sparseness: 16
4637///  huge_page_tlb_size: 0
4638///  encoding_type: KeyEncodingType::Plain
4639///  full_scan_mode: false
4640///  store_index_in_file: false
4641pub struct PlainTableFactoryOptions {
4642    pub user_key_length: u32,
4643    pub bloom_bits_per_key: i32,
4644    pub hash_table_ratio: f64,
4645    pub index_sparseness: usize,
4646    pub huge_page_tlb_size: usize,
4647    pub encoding_type: KeyEncodingType,
4648    pub full_scan_mode: bool,
4649    pub store_index_in_file: bool,
4650}
4651
4652#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4653#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4654pub enum DBCompressionType {
4655    None = ffi::rocksdb_no_compression as isize,
4656    Snappy = ffi::rocksdb_snappy_compression as isize,
4657    Zlib = ffi::rocksdb_zlib_compression as isize,
4658    Bz2 = ffi::rocksdb_bz2_compression as isize,
4659    Lz4 = ffi::rocksdb_lz4_compression as isize,
4660    Lz4hc = ffi::rocksdb_lz4hc_compression as isize,
4661    Zstd = ffi::rocksdb_zstd_compression as isize,
4662}
4663
4664#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4665#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4666pub enum DBCompactionStyle {
4667    Level = ffi::rocksdb_level_compaction as isize,
4668    Universal = ffi::rocksdb_universal_compaction as isize,
4669    Fifo = ffi::rocksdb_fifo_compaction as isize,
4670}
4671
4672#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4673#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4674pub enum DBRecoveryMode {
4675    TolerateCorruptedTailRecords = ffi::rocksdb_tolerate_corrupted_tail_records_recovery as isize,
4676    AbsoluteConsistency = ffi::rocksdb_absolute_consistency_recovery as isize,
4677    PointInTime = ffi::rocksdb_point_in_time_recovery as isize,
4678    SkipAnyCorruptedRecord = ffi::rocksdb_skip_any_corrupted_records_recovery as isize,
4679}
4680
4681pub struct FifoCompactOptions {
4682    pub(crate) inner: *mut ffi::rocksdb_fifo_compaction_options_t,
4683}
4684
4685impl Default for FifoCompactOptions {
4686    fn default() -> Self {
4687        let opts = unsafe { ffi::rocksdb_fifo_compaction_options_create() };
4688        assert!(
4689            !opts.is_null(),
4690            "Could not create RocksDB Fifo Compaction Options"
4691        );
4692
4693        Self { inner: opts }
4694    }
4695}
4696
4697impl Drop for FifoCompactOptions {
4698    fn drop(&mut self) {
4699        unsafe {
4700            ffi::rocksdb_fifo_compaction_options_destroy(self.inner);
4701        }
4702    }
4703}
4704
4705impl FifoCompactOptions {
4706    /// Sets the max table file size.
4707    ///
4708    /// Once the total sum of table files reaches this, we will delete the oldest
4709    /// table file
4710    ///
4711    /// Default: 1GB
4712    pub fn set_max_table_files_size(&mut self, nbytes: u64) {
4713        unsafe {
4714            ffi::rocksdb_fifo_compaction_options_set_max_table_files_size(self.inner, nbytes);
4715        }
4716    }
4717}
4718
4719#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4720#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4721pub enum UniversalCompactionStopStyle {
4722    Similar = ffi::rocksdb_similar_size_compaction_stop_style as isize,
4723    Total = ffi::rocksdb_total_size_compaction_stop_style as isize,
4724}
4725
4726pub struct UniversalCompactOptions {
4727    pub(crate) inner: *mut ffi::rocksdb_universal_compaction_options_t,
4728}
4729
4730impl Default for UniversalCompactOptions {
4731    fn default() -> Self {
4732        let opts = unsafe { ffi::rocksdb_universal_compaction_options_create() };
4733        assert!(
4734            !opts.is_null(),
4735            "Could not create RocksDB Universal Compaction Options"
4736        );
4737
4738        Self { inner: opts }
4739    }
4740}
4741
4742impl Drop for UniversalCompactOptions {
4743    fn drop(&mut self) {
4744        unsafe {
4745            ffi::rocksdb_universal_compaction_options_destroy(self.inner);
4746        }
4747    }
4748}
4749
4750impl UniversalCompactOptions {
4751    /// Sets the percentage flexibility while comparing file size.
4752    /// If the candidate file(s) size is 1% smaller than the next file's size,
4753    /// then include next file into this candidate set.
4754    ///
4755    /// Default: 1
4756    pub fn set_size_ratio(&mut self, ratio: c_int) {
4757        unsafe {
4758            ffi::rocksdb_universal_compaction_options_set_size_ratio(self.inner, ratio);
4759        }
4760    }
4761
4762    /// Sets the minimum number of files in a single compaction run.
4763    ///
4764    /// Default: 2
4765    pub fn set_min_merge_width(&mut self, num: c_int) {
4766        unsafe {
4767            ffi::rocksdb_universal_compaction_options_set_min_merge_width(self.inner, num);
4768        }
4769    }
4770
4771    /// Sets the maximum number of files in a single compaction run.
4772    ///
4773    /// Default: UINT_MAX
4774    pub fn set_max_merge_width(&mut self, num: c_int) {
4775        unsafe {
4776            ffi::rocksdb_universal_compaction_options_set_max_merge_width(self.inner, num);
4777        }
4778    }
4779
4780    /// sets the size amplification.
4781    ///
4782    /// It is defined as the amount (in percentage) of
4783    /// additional storage needed to store a single byte of data in the database.
4784    /// For example, a size amplification of 2% means that a database that
4785    /// contains 100 bytes of user-data may occupy upto 102 bytes of
4786    /// physical storage. By this definition, a fully compacted database has
4787    /// a size amplification of 0%. Rocksdb uses the following heuristic
4788    /// to calculate size amplification: it assumes that all files excluding
4789    /// the earliest file contribute to the size amplification.
4790    ///
4791    /// Default: 200, which means that a 100 byte database could require upto 300 bytes of storage.
4792    pub fn set_max_size_amplification_percent(&mut self, v: c_int) {
4793        unsafe {
4794            ffi::rocksdb_universal_compaction_options_set_max_size_amplification_percent(
4795                self.inner, v,
4796            );
4797        }
4798    }
4799
4800    /// Sets the percentage of compression size.
4801    ///
4802    /// If this option is set to be -1, all the output files
4803    /// will follow compression type specified.
4804    ///
4805    /// If this option is not negative, we will try to make sure compressed
4806    /// size is just above this value. In normal cases, at least this percentage
4807    /// of data will be compressed.
4808    /// When we are compacting to a new file, here is the criteria whether
4809    /// it needs to be compressed: assuming here are the list of files sorted
4810    /// by generation time:
4811    ///    A1...An B1...Bm C1...Ct
4812    /// where A1 is the newest and Ct is the oldest, and we are going to compact
4813    /// B1...Bm, we calculate the total size of all the files as total_size, as
4814    /// well as  the total size of C1...Ct as total_C, the compaction output file
4815    /// will be compressed iff
4816    ///   total_C / total_size < this percentage
4817    ///
4818    /// Default: -1
4819    pub fn set_compression_size_percent(&mut self, v: c_int) {
4820        unsafe {
4821            ffi::rocksdb_universal_compaction_options_set_compression_size_percent(self.inner, v);
4822        }
4823    }
4824
4825    /// Sets the algorithm used to stop picking files into a single compaction run.
4826    ///
4827    /// Default: ::Total
4828    pub fn set_stop_style(&mut self, style: UniversalCompactionStopStyle) {
4829        unsafe {
4830            ffi::rocksdb_universal_compaction_options_set_stop_style(self.inner, style as c_int);
4831        }
4832    }
4833}
4834
4835#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4836#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4837#[repr(u8)]
4838pub enum BottommostLevelCompaction {
4839    /// Skip bottommost level compaction
4840    Skip = 0,
4841    /// Only compact bottommost level if there is a compaction filter
4842    /// This is the default option
4843    IfHaveCompactionFilter,
4844    /// Always compact bottommost level
4845    Force,
4846    /// Always compact bottommost level but in bottommost level avoid
4847    /// double-compacting files created in the same compaction
4848    ForceOptimized,
4849}
4850
4851pub struct CompactOptions {
4852    pub(crate) inner: *mut ffi::rocksdb_compactoptions_t,
4853    full_history_ts_low: Option<Vec<u8>>,
4854}
4855
4856impl Default for CompactOptions {
4857    fn default() -> Self {
4858        let opts = unsafe { ffi::rocksdb_compactoptions_create() };
4859        assert!(!opts.is_null(), "Could not create RocksDB Compact Options");
4860
4861        Self {
4862            inner: opts,
4863            full_history_ts_low: None,
4864        }
4865    }
4866}
4867
4868impl Drop for CompactOptions {
4869    fn drop(&mut self) {
4870        unsafe {
4871            ffi::rocksdb_compactoptions_destroy(self.inner);
4872        }
4873    }
4874}
4875
4876impl CompactOptions {
4877    /// If more than one thread calls manual compaction,
4878    /// only one will actually schedule it while the other threads will simply wait
4879    /// for the scheduled manual compaction to complete. If exclusive_manual_compaction
4880    /// is set to true, the call will disable scheduling of automatic compaction jobs
4881    /// and wait for existing automatic compaction jobs to finish.
4882    pub fn set_exclusive_manual_compaction(&mut self, v: bool) {
4883        unsafe {
4884            ffi::rocksdb_compactoptions_set_exclusive_manual_compaction(
4885                self.inner,
4886                c_uchar::from(v),
4887            );
4888        }
4889    }
4890
4891    /// Sets bottommost level compaction.
4892    pub fn set_bottommost_level_compaction(&mut self, lvl: BottommostLevelCompaction) {
4893        unsafe {
4894            ffi::rocksdb_compactoptions_set_bottommost_level_compaction(self.inner, lvl as c_uchar);
4895        }
4896    }
4897
4898    /// If true, compacted files will be moved to the minimum level capable
4899    /// of holding the data or given level (specified non-negative target_level).
4900    pub fn set_change_level(&mut self, v: bool) {
4901        unsafe {
4902            ffi::rocksdb_compactoptions_set_change_level(self.inner, c_uchar::from(v));
4903        }
4904    }
4905
4906    /// If change_level is true and target_level have non-negative value, compacted
4907    /// files will be moved to target_level.
4908    pub fn set_target_level(&mut self, lvl: c_int) {
4909        unsafe {
4910            ffi::rocksdb_compactoptions_set_target_level(self.inner, lvl);
4911        }
4912    }
4913
4914    /// Set user-defined timestamp low bound, the data with older timestamp than
4915    /// low bound maybe GCed by compaction. Default: nullptr
4916    pub fn set_full_history_ts_low<S: Into<Vec<u8>>>(&mut self, ts: S) {
4917        self.set_full_history_ts_low_impl(Some(ts.into()));
4918    }
4919
4920    fn set_full_history_ts_low_impl(&mut self, ts: Option<Vec<u8>>) {
4921        let (ptr, len) = if let Some(ref ts) = ts {
4922            (ts.as_ptr() as *mut c_char, ts.len())
4923        } else if self.full_history_ts_low.is_some() {
4924            (std::ptr::null::<Vec<u8>>() as *mut c_char, 0)
4925        } else {
4926            return;
4927        };
4928        self.full_history_ts_low = ts;
4929        unsafe {
4930            ffi::rocksdb_compactoptions_set_full_history_ts_low(self.inner, ptr, len);
4931        }
4932    }
4933}
4934
4935pub struct WaitForCompactOptions {
4936    pub(crate) inner: *mut ffi::rocksdb_wait_for_compact_options_t,
4937}
4938
4939impl Default for WaitForCompactOptions {
4940    fn default() -> Self {
4941        let opts = unsafe { ffi::rocksdb_wait_for_compact_options_create() };
4942        assert!(
4943            !opts.is_null(),
4944            "Could not create RocksDB Wait For Compact Options"
4945        );
4946
4947        Self { inner: opts }
4948    }
4949}
4950
4951impl Drop for WaitForCompactOptions {
4952    fn drop(&mut self) {
4953        unsafe {
4954            ffi::rocksdb_wait_for_compact_options_destroy(self.inner);
4955        }
4956    }
4957}
4958
4959impl WaitForCompactOptions {
4960    /// If true, abort waiting if background jobs are paused. If false,
4961    /// ContinueBackgroundWork() must be called to resume the background jobs.
4962    /// Otherwise, jobs that were queued, but not scheduled yet may never finish
4963    /// and WaitForCompact() may wait indefinitely (if timeout is set, it will
4964    /// abort after the timeout).
4965    ///
4966    /// Default: false
4967    pub fn set_abort_on_pause(&mut self, v: bool) {
4968        unsafe {
4969            ffi::rocksdb_wait_for_compact_options_set_abort_on_pause(self.inner, c_uchar::from(v));
4970        }
4971    }
4972
4973    /// If true, flush all column families before starting to wait.
4974    ///
4975    /// Default: false
4976    pub fn set_flush(&mut self, v: bool) {
4977        unsafe {
4978            ffi::rocksdb_wait_for_compact_options_set_flush(self.inner, c_uchar::from(v));
4979        }
4980    }
4981
4982    /// Timeout in microseconds for waiting for compaction to complete.
4983    /// when timeout == 0, WaitForCompact() will wait as long as there's background
4984    /// work to finish.
4985    ///
4986    /// Default: 0
4987    pub fn set_timeout(&mut self, microseconds: u64) {
4988        unsafe {
4989            ffi::rocksdb_wait_for_compact_options_set_timeout(self.inner, microseconds);
4990        }
4991    }
4992}
4993
4994/// Represents a path where sst files can be put into
4995pub struct DBPath {
4996    pub(crate) inner: *mut ffi::rocksdb_dbpath_t,
4997}
4998
4999impl DBPath {
5000    /// Create a new path
5001    pub fn new<P: AsRef<Path>>(path: P, target_size: u64) -> Result<Self, Error> {
5002        let p = to_cpath(path.as_ref()).unwrap();
5003        let dbpath = unsafe { ffi::rocksdb_dbpath_create(p.as_ptr(), target_size) };
5004        if dbpath.is_null() {
5005            Err(Error::new(format!(
5006                "Could not create path for storing sst files at location: {}",
5007                path.as_ref().display()
5008            )))
5009        } else {
5010            Ok(DBPath { inner: dbpath })
5011        }
5012    }
5013}
5014
5015impl Drop for DBPath {
5016    fn drop(&mut self) {
5017        unsafe {
5018            ffi::rocksdb_dbpath_destroy(self.inner);
5019        }
5020    }
5021}
5022
5023pub struct InfoLogger {
5024    pub(crate) inner: *mut ffi::rocksdb_logger_t,
5025    callback: Option<Arc<LoggerCallback>>,
5026}
5027
5028impl InfoLogger {
5029    /// Creates a new logger that redirects logs to `STDERR` with an optional
5030    /// prefix.
5031    pub fn new_stderr_logger<S: AsRef<str>>(log_level: LogLevel, prefix: Option<S>) -> Self {
5032        let prefix = prefix.map(|s| {
5033            s.as_ref()
5034                .into_c_string()
5035                .expect("cannot have NULL in prefix")
5036        });
5037        let prefix_ptr = match prefix.as_ref() {
5038            Some(s) => s.as_ptr(),
5039            None => std::ptr::null(),
5040        };
5041        let inner =
5042            unsafe { ffi::rocksdb_logger_create_stderr_logger(log_level as i32, prefix_ptr) };
5043        Self {
5044            inner,
5045            // no Rust callback: RocksDB implements this
5046            callback: None,
5047        }
5048    }
5049
5050    /// Creates a new logger that redirects logs to a custom callback.
5051    pub fn new_callback_logger<F: Fn(LogLevel, &str) + Sync + Send + 'static>(
5052        level: LogLevel,
5053        cb: F,
5054    ) -> Self {
5055        // use an Arc<Box<...>> so we can reference count, and still pass a thin pointer to C
5056        let arc_cb: Arc<LoggerCallback> = Arc::new(Box::new(cb));
5057        let raw_cb: LoggerCallbackPtr = Arc::as_ptr(&arc_cb);
5058        let inner = unsafe {
5059            ffi::rocksdb_logger_create_callback_logger(
5060                level as i32,
5061                Some(logger_callback),
5062                raw_cb as *mut c_void,
5063            )
5064        };
5065        Self {
5066            inner,
5067            callback: Some(arc_cb),
5068        }
5069    }
5070}
5071
5072impl Drop for InfoLogger {
5073    fn drop(&mut self) {
5074        unsafe {
5075            ffi::rocksdb_logger_destroy(self.inner);
5076        }
5077    }
5078}
5079
5080/// Ensures the unsafe casts use the same type.
5081type LoggerCallbackPtr = *const LoggerCallback;
5082
5083unsafe extern "C" fn logger_callback(
5084    raw_cb: *mut c_void,
5085    level: c_uint,
5086    msg: *mut c_char,
5087    len: size_t,
5088) {
5089    let rust_callback: &LoggerCallback = unsafe { &*(raw_cb as LoggerCallbackPtr) };
5090    let raw_msg = unsafe { std::slice::from_raw_parts(msg as *const u8, len) };
5091    let msg = String::from_utf8_lossy(raw_msg);
5092    let level =
5093        LogLevel::try_from_raw(level as i32).expect("rocksdb generated an invalid log level");
5094    (rust_callback)(level, &msg);
5095}
5096
5097#[cfg(test)]
5098mod tests {
5099    use crate::db_options::WriteBufferManager;
5100    use crate::{Cache, CompactionPri, InfoLogger, MemtableFactory, Options};
5101
5102    #[test]
5103    fn test_enable_statistics() {
5104        let mut opts = Options::default();
5105        assert_eq!(None, opts.get_statistics());
5106        opts.enable_statistics();
5107        opts.set_stats_dump_period_sec(60);
5108        assert!(opts.get_statistics().is_some());
5109
5110        let opts = Options::default();
5111        assert!(opts.get_statistics().is_none());
5112    }
5113
5114    #[test]
5115    fn test_set_memtable_factory() {
5116        let mut opts = Options::default();
5117        opts.set_memtable_factory(MemtableFactory::Vector);
5118        opts.set_memtable_factory(MemtableFactory::HashLinkList { bucket_count: 100 });
5119        opts.set_memtable_factory(MemtableFactory::HashSkipList {
5120            bucket_count: 100,
5121            height: 4,
5122            branching_factor: 4,
5123        });
5124    }
5125
5126    #[test]
5127    fn test_use_fsync() {
5128        let mut opts = Options::default();
5129        assert!(!opts.get_use_fsync());
5130        opts.set_use_fsync(true);
5131        assert!(opts.get_use_fsync());
5132    }
5133
5134    #[test]
5135    fn test_set_stats_persist_period_sec() {
5136        let mut opts = Options::default();
5137        opts.enable_statistics();
5138        opts.set_stats_persist_period_sec(5);
5139        assert!(opts.get_statistics().is_some());
5140
5141        let opts = Options::default();
5142        assert!(opts.get_statistics().is_none());
5143    }
5144
5145    #[test]
5146    fn test_set_write_buffer_manager() {
5147        let mut opts = Options::default();
5148        let lrucache = Cache::new_lru_cache(100);
5149        let write_buffer_manager =
5150            WriteBufferManager::new_write_buffer_manager_with_cache(100, false, lrucache);
5151        assert_eq!(write_buffer_manager.get_buffer_size(), 100);
5152        assert_eq!(write_buffer_manager.get_usage(), 0);
5153        assert!(write_buffer_manager.enabled());
5154
5155        opts.set_write_buffer_manager(&write_buffer_manager);
5156        drop(opts);
5157
5158        // WriteBufferManager outlives options
5159        assert!(write_buffer_manager.enabled());
5160    }
5161
5162    #[test]
5163    fn compaction_pri() {
5164        let mut opts = Options::default();
5165        opts.set_compaction_pri(CompactionPri::RoundRobin);
5166        opts.create_if_missing(true);
5167        let tmp = tempfile::tempdir().unwrap();
5168        let _db = crate::DB::open(&opts, tmp.path()).unwrap();
5169
5170        let options = std::fs::read_dir(tmp.path())
5171            .unwrap()
5172            .find_map(|x| {
5173                let x = x.ok()?;
5174                x.file_name()
5175                    .into_string()
5176                    .unwrap()
5177                    .contains("OPTIONS")
5178                    .then_some(x.path())
5179            })
5180            .map(std::fs::read_to_string)
5181            .unwrap()
5182            .unwrap();
5183
5184        assert!(options.contains("compaction_pri=kRoundRobin"));
5185    }
5186
5187    #[test]
5188    fn test_callback_logger() {
5189        let (log_snd, log_rcv) = std::sync::mpsc::channel();
5190        let callback = move |level, msg: &str| {
5191            log_snd.send((level, msg.to_string())).ok();
5192        };
5193
5194        let mut opts = Options::default();
5195        opts.create_if_missing(true);
5196        opts.set_info_logger(InfoLogger::new_callback_logger(
5197            super::LogLevel::Debug,
5198            callback,
5199        ));
5200
5201        // create 2 DBs with the options then drop the options to ensure it is reference counted
5202        let tmp = tempfile::tempdir().unwrap();
5203        let db = crate::DB::open(&opts, tmp.path()).unwrap();
5204        db.put(b"testkey", b"testvalue").unwrap();
5205        db.flush().unwrap();
5206        db.delete(b"testkey").unwrap();
5207        db.flush().unwrap();
5208        db.compact_range(Some(b"a"), Some(b"z"));
5209        assert!(log_rcv.try_recv().is_ok());
5210        drop(db);
5211
5212        let tmp2 = tempfile::tempdir().unwrap();
5213        let db2 = crate::DB::open(&opts, tmp2.path()).unwrap();
5214
5215        // get the configured logger before dropping the options
5216        let logger = opts.get_info_logger();
5217        drop(opts);
5218
5219        // clear the logs and make sure the callback is called by db2
5220        while log_rcv.try_recv().is_ok() {}
5221        assert!(log_rcv.try_recv().is_err());
5222
5223        db2.put(b"testkey2", b"testvalue2").unwrap();
5224        db2.flush().unwrap();
5225        db2.delete(b"testkey2").unwrap();
5226        db2.flush().unwrap();
5227        db2.compact_range(Some(b"a"), Some(b"z"));
5228
5229        drop(db2);
5230        assert!(log_rcv.try_recv().is_ok());
5231
5232        // clear the logs
5233        while log_rcv.try_recv().is_ok() {}
5234        assert!(log_rcv.try_recv().is_err());
5235
5236        // create a db with the copied logger to check lifetimes
5237        let tmp3 = tempfile::tempdir().unwrap();
5238        let mut opts2 = Options::default();
5239        opts2.create_if_missing(true);
5240        opts2.set_info_logger(logger);
5241        let db3 = crate::DB::open(&opts2, tmp3.path()).unwrap();
5242        drop(opts2);
5243        db3.put(b"testkey3", b"testvalue3").unwrap();
5244        db3.flush().unwrap();
5245        db3.delete(b"testkey3").unwrap();
5246        db3.flush().unwrap();
5247        db3.compact_range(Some(b"a"), Some(b"z"));
5248        assert!(log_rcv.try_recv().is_ok());
5249        drop(db3);
5250    }
5251}