rust_rocksdb/
db_options.rs

1// Copyright 2020 Tyler Neely
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::ffi::CStr;
16use std::panic::{catch_unwind, AssertUnwindSafe, RefUnwindSafe};
17use std::path::Path;
18use std::ptr::{null_mut, NonNull};
19use std::slice;
20use std::sync::Arc;
21
22use libc::{self, c_char, c_double, c_int, c_uchar, c_uint, c_void, size_t};
23
24use crate::column_family::ColumnFamilyTtl;
25use crate::statistics::{Histogram, HistogramData, StatsLevel};
26use crate::{
27    compaction_filter::{self, CompactionFilterCallback, CompactionFilterFn},
28    compaction_filter_factory::{self, CompactionFilterFactory},
29    comparator::{
30        ComparatorCallback, ComparatorWithTsCallback, CompareFn, CompareTsFn, CompareWithoutTsFn,
31    },
32    db::DBAccess,
33    env::Env,
34    ffi,
35    ffi_util::{from_cstr, to_cpath, CStrLike},
36    merge_operator::{
37        self, full_merge_callback, partial_merge_callback, MergeFn, MergeOperatorCallback,
38    },
39    slice_transform::SliceTransform,
40    statistics::Ticker,
41    ColumnFamilyDescriptor, Error, SnapshotWithThreadMode,
42};
43
44pub(crate) struct WriteBufferManagerWrapper {
45    pub(crate) inner: NonNull<ffi::rocksdb_write_buffer_manager_t>,
46}
47
48impl Drop for WriteBufferManagerWrapper {
49    fn drop(&mut self) {
50        unsafe {
51            ffi::rocksdb_write_buffer_manager_destroy(self.inner.as_ptr());
52        }
53    }
54}
55
56#[derive(Clone)]
57pub struct WriteBufferManager(pub(crate) Arc<WriteBufferManagerWrapper>);
58
59impl WriteBufferManager {
60    /// <https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager>
61    /// Write buffer manager helps users control the total memory used by memtables across multiple column families and/or DB instances.
62    /// Users can enable this control by 2 ways:
63    ///
64    /// 1- Limit the total memtable usage across multiple column families and DBs under a threshold.
65    /// 2- Cost the memtable memory usage to block cache so that memory of RocksDB can be capped by the single limit.
66    /// The usage of a write buffer manager is similar to rate_limiter and sst_file_manager.
67    /// Users can create one write buffer manager object and pass it to all the options of column families or DBs whose memtable size they want to be controlled by this object.
68    ///
69    /// A memory limit is given when creating the write buffer manager object. RocksDB will try to limit the total memory to under this limit.
70    ///
71    /// a flush will be triggered on one column family of the DB you are inserting to,
72    ///
73    /// If mutable memtable size exceeds about 90% of the limit,
74    /// If the total memory is over the limit, more aggressive flush may also be triggered only if the mutable memtable size also exceeds 50% of the limit.
75    /// Both checks are needed because if already more than half memory is being flushed, triggering more flush may not help.
76    ///
77    /// The total memory is counted as total memory allocated in the arena, even if some of that may not yet be used by memtable.
78    ///
79    /// buffer_size: the memory limit in bytes.
80    /// allow_stall: If set true, it will enable stalling of all writers when memory usage exceeds buffer_size (soft limit).
81    ///             It will wait for flush to complete and memory usage to drop down
82    pub fn new_write_buffer_manager(buffer_size: size_t, allow_stall: bool) -> Self {
83        let inner = NonNull::new(unsafe {
84            ffi::rocksdb_write_buffer_manager_create(buffer_size, allow_stall)
85        })
86        .unwrap();
87        WriteBufferManager(Arc::new(WriteBufferManagerWrapper { inner }))
88    }
89
90    /// Users can set up RocksDB to cost memory used by memtables to block cache.
91    /// This can happen no matter whether you enable memtable memory limit or not.
92    /// This option is added to manage memory (memtables + block cache) under a single limit.
93    ///
94    /// buffer_size: the memory limit in bytes.
95    /// allow_stall: If set true, it will enable stalling of all writers when memory usage exceeds buffer_size (soft limit).
96    ///             It will wait for flush to complete and memory usage to drop down
97    /// cache: the block cache instance
98    pub fn new_write_buffer_manager_with_cache(
99        buffer_size: size_t,
100        allow_stall: bool,
101        cache: Cache,
102    ) -> Self {
103        let inner = NonNull::new(unsafe {
104            ffi::rocksdb_write_buffer_manager_create_with_cache(
105                buffer_size,
106                cache.0.inner.as_ptr(),
107                allow_stall,
108            )
109        })
110        .unwrap();
111        WriteBufferManager(Arc::new(WriteBufferManagerWrapper { inner }))
112    }
113
114    /// Returns the WriteBufferManager memory usage in bytes.
115    pub fn get_usage(&self) -> usize {
116        unsafe { ffi::rocksdb_write_buffer_manager_memory_usage(self.0.inner.as_ptr()) }
117    }
118
119    /// Returns the current buffer size in bytes.
120    pub fn get_buffer_size(&self) -> usize {
121        unsafe { ffi::rocksdb_write_buffer_manager_buffer_size(self.0.inner.as_ptr()) }
122    }
123
124    /// Set the buffer size in bytes.
125    pub fn set_buffer_size(&self, new_size: usize) {
126        unsafe {
127            ffi::rocksdb_write_buffer_manager_set_buffer_size(self.0.inner.as_ptr(), new_size);
128        }
129    }
130
131    /// Returns if WriteBufferManager is enabled.
132    pub fn enabled(&self) -> bool {
133        unsafe { ffi::rocksdb_write_buffer_manager_enabled(self.0.inner.as_ptr()) }
134    }
135
136    /// set the allow_stall flag.
137    pub fn set_allow_stall(&self, allow_stall: bool) {
138        unsafe {
139            ffi::rocksdb_write_buffer_manager_set_allow_stall(self.0.inner.as_ptr(), allow_stall);
140        }
141    }
142}
143
144pub(crate) struct CacheWrapper {
145    pub(crate) inner: NonNull<ffi::rocksdb_cache_t>,
146}
147
148impl Drop for CacheWrapper {
149    fn drop(&mut self) {
150        unsafe {
151            ffi::rocksdb_cache_destroy(self.inner.as_ptr());
152        }
153    }
154}
155
156#[derive(Clone)]
157pub struct Cache(pub(crate) Arc<CacheWrapper>);
158
159impl Cache {
160    /// Creates an LRU cache with capacity in bytes.
161    pub fn new_lru_cache(capacity: size_t) -> Cache {
162        let inner = NonNull::new(unsafe { ffi::rocksdb_cache_create_lru(capacity) }).unwrap();
163        Cache(Arc::new(CacheWrapper { inner }))
164    }
165
166    /// Creates an LRU cache with custom options.
167    pub fn new_lru_cache_opts(opts: &LruCacheOptions) -> Cache {
168        let inner =
169            NonNull::new(unsafe { ffi::rocksdb_cache_create_lru_opts(opts.inner) }).unwrap();
170        Cache(Arc::new(CacheWrapper { inner }))
171    }
172
173    /// Creates a HyperClockCache with capacity in bytes.
174    ///
175    /// `estimated_entry_charge` is an important tuning parameter. The optimal
176    /// choice at any given time is
177    /// `(cache.get_usage() - 64 * cache.get_table_address_count()) /
178    /// cache.get_occupancy_count()`, or approximately `cache.get_usage() /
179    /// cache.get_occupancy_count()`.
180    ///
181    /// However, the value cannot be changed dynamically, so as the cache
182    /// composition changes at runtime, the following tradeoffs apply:
183    ///
184    /// * If the estimate is substantially too high (e.g., 25% higher),
185    ///   the cache may have to evict entries to prevent load factors that
186    ///   would dramatically affect lookup times.
187    /// * If the estimate is substantially too low (e.g., less than half),
188    ///   then meta data space overhead is substantially higher.
189    ///
190    /// The latter is generally preferable, and picking the larger of
191    /// block size and meta data block size is a reasonable choice that
192    /// errs towards this side.
193    pub fn new_hyper_clock_cache(capacity: size_t, estimated_entry_charge: size_t) -> Cache {
194        Cache(Arc::new(CacheWrapper {
195            inner: NonNull::new(unsafe {
196                ffi::rocksdb_cache_create_hyper_clock(capacity, estimated_entry_charge)
197            })
198            .unwrap(),
199        }))
200    }
201
202    /// Returns the cache memory usage in bytes.
203    pub fn get_usage(&self) -> usize {
204        unsafe { ffi::rocksdb_cache_get_usage(self.0.inner.as_ptr()) }
205    }
206
207    /// Returns the pinned memory usage in bytes.
208    pub fn get_pinned_usage(&self) -> usize {
209        unsafe { ffi::rocksdb_cache_get_pinned_usage(self.0.inner.as_ptr()) }
210    }
211
212    /// Sets cache capacity in bytes.
213    pub fn set_capacity(&mut self, capacity: size_t) {
214        unsafe {
215            ffi::rocksdb_cache_set_capacity(self.0.inner.as_ptr(), capacity);
216        }
217    }
218}
219
220#[derive(Default)]
221pub(crate) struct OptionsMustOutliveDB {
222    env: Option<Env>,
223    row_cache: Option<Cache>,
224    blob_cache: Option<Cache>,
225    block_based: Option<BlockBasedOptionsMustOutliveDB>,
226    write_buffer_manager: Option<WriteBufferManager>,
227}
228
229impl OptionsMustOutliveDB {
230    pub(crate) fn clone(&self) -> Self {
231        Self {
232            env: self.env.clone(),
233            row_cache: self.row_cache.clone(),
234            blob_cache: self.blob_cache.clone(),
235            block_based: self
236                .block_based
237                .as_ref()
238                .map(BlockBasedOptionsMustOutliveDB::clone),
239            write_buffer_manager: self.write_buffer_manager.clone(),
240        }
241    }
242}
243
244#[derive(Default)]
245struct BlockBasedOptionsMustOutliveDB {
246    block_cache: Option<Cache>,
247}
248
249impl BlockBasedOptionsMustOutliveDB {
250    fn clone(&self) -> Self {
251        Self {
252            block_cache: self.block_cache.clone(),
253        }
254    }
255}
256
257/// Database-wide options around performance and behavior.
258///
259/// Please read the official tuning [guide](https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide)
260/// and most importantly, measure performance under realistic workloads with realistic hardware.
261///
262/// # Examples
263///
264/// ```
265/// use rust_rocksdb::{Options, DB};
266/// use rust_rocksdb::DBCompactionStyle;
267///
268/// fn badly_tuned_for_somebody_elses_disk() -> DB {
269///    let path = "path/for/rocksdb/storageX";
270///    let mut opts = Options::default();
271///    opts.create_if_missing(true);
272///    opts.set_max_open_files(10000);
273///    opts.set_use_fsync(false);
274///    opts.set_bytes_per_sync(8388608);
275///    opts.optimize_for_point_lookup(1024);
276///    opts.set_table_cache_num_shard_bits(6);
277///    opts.set_max_write_buffer_number(32);
278///    opts.set_write_buffer_size(536870912);
279///    opts.set_target_file_size_base(1073741824);
280///    opts.set_min_write_buffer_number_to_merge(4);
281///    opts.set_level_zero_stop_writes_trigger(2000);
282///    opts.set_level_zero_slowdown_writes_trigger(0);
283///    opts.set_compaction_style(DBCompactionStyle::Universal);
284///    opts.set_disable_auto_compactions(true);
285///
286///    DB::open(&opts, path).unwrap()
287/// }
288/// ```
289pub struct Options {
290    pub(crate) inner: *mut ffi::rocksdb_options_t,
291    pub(crate) outlive: OptionsMustOutliveDB,
292}
293
294/// Optionally disable WAL or sync for this write.
295///
296/// # Examples
297///
298/// Making an unsafe write of a batch:
299///
300/// ```
301/// use rust_rocksdb::{DB, Options, WriteBatch, WriteOptions};
302///
303/// let tempdir = tempfile::Builder::new()
304///     .prefix("_path_for_rocksdb_storageY1")
305///     .tempdir()
306///     .expect("Failed to create temporary path for the _path_for_rocksdb_storageY1");
307/// let path = tempdir.path();
308/// {
309///     let db = DB::open_default(path).unwrap();
310///     let mut batch = WriteBatch::default();
311///     batch.put(b"my key", b"my value");
312///     batch.put(b"key2", b"value2");
313///     batch.put(b"key3", b"value3");
314///
315///     let mut write_options = WriteOptions::default();
316///     write_options.set_sync(false);
317///     write_options.disable_wal(true);
318///
319///     db.write_opt(batch, &write_options);
320/// }
321/// let _ = DB::destroy(&Options::default(), path);
322/// ```
323pub struct WriteOptions {
324    pub(crate) inner: *mut ffi::rocksdb_writeoptions_t,
325}
326
327pub struct LruCacheOptions {
328    pub(crate) inner: *mut ffi::rocksdb_lru_cache_options_t,
329}
330
331/// Optionally wait for the memtable flush to be performed.
332///
333/// # Examples
334///
335/// Manually flushing the memtable:
336///
337/// ```
338/// use rust_rocksdb::{DB, Options, FlushOptions};
339///
340/// let tempdir = tempfile::Builder::new()
341///     .prefix("_path_for_rocksdb_storageY2")
342///     .tempdir()
343///     .expect("Failed to create temporary path for the _path_for_rocksdb_storageY2");
344/// let path = tempdir.path();
345/// {
346///     let db = DB::open_default(path).unwrap();
347///
348///     let mut flush_options = FlushOptions::default();
349///     flush_options.set_wait(true);
350///
351///     db.flush_opt(&flush_options);
352/// }
353/// let _ = DB::destroy(&Options::default(), path);
354/// ```
355pub struct FlushOptions {
356    pub(crate) inner: *mut ffi::rocksdb_flushoptions_t,
357}
358
359/// For configuring block-based file storage.
360pub struct BlockBasedOptions {
361    pub(crate) inner: *mut ffi::rocksdb_block_based_table_options_t,
362    outlive: BlockBasedOptionsMustOutliveDB,
363}
364
365pub struct ReadOptions {
366    pub(crate) inner: *mut ffi::rocksdb_readoptions_t,
367    // The `ReadOptions` owns a copy of the timestamp and iteration bounds.
368    // This is necessary to ensure the pointers we pass over the FFI live as
369    // long as the `ReadOptions`. This way, when performing the read operation,
370    // the pointers are guaranteed to be valid.
371    timestamp: Option<Vec<u8>>,
372    iter_start_ts: Option<Vec<u8>>,
373    iterate_upper_bound: Option<Vec<u8>>,
374    iterate_lower_bound: Option<Vec<u8>>,
375}
376
377/// Configuration of cuckoo-based storage.
378pub struct CuckooTableOptions {
379    pub(crate) inner: *mut ffi::rocksdb_cuckoo_table_options_t,
380}
381
382/// For configuring external files ingestion.
383///
384/// # Examples
385///
386/// Move files instead of copying them:
387///
388/// ```
389/// use rust_rocksdb::{DB, IngestExternalFileOptions, SstFileWriter, Options};
390///
391/// let writer_opts = Options::default();
392/// let mut writer = SstFileWriter::create(&writer_opts);
393/// let tempdir = tempfile::Builder::new()
394///     .tempdir()
395///     .expect("Failed to create temporary folder for the _path_for_sst_file");
396/// let path1 = tempdir.path().join("_path_for_sst_file");
397/// writer.open(path1.clone()).unwrap();
398/// writer.put(b"k1", b"v1").unwrap();
399/// writer.finish().unwrap();
400///
401/// let tempdir2 = tempfile::Builder::new()
402///     .prefix("_path_for_rocksdb_storageY3")
403///     .tempdir()
404///     .expect("Failed to create temporary path for the _path_for_rocksdb_storageY3");
405/// let path2 = tempdir2.path();
406/// {
407///   let db = DB::open_default(&path2).unwrap();
408///   let mut ingest_opts = IngestExternalFileOptions::default();
409///   ingest_opts.set_move_files(true);
410///   db.ingest_external_file_opts(&ingest_opts, vec![path1]).unwrap();
411/// }
412/// let _ = DB::destroy(&Options::default(), path2);
413/// ```
414pub struct IngestExternalFileOptions {
415    pub(crate) inner: *mut ffi::rocksdb_ingestexternalfileoptions_t,
416}
417
418// Safety note: auto-implementing Send on most db-related types is prevented by the inner FFI
419// pointer. In most cases, however, this pointer is Send-safe because it is never aliased and
420// rocksdb internally does not rely on thread-local information for its user-exposed types.
421unsafe impl Send for Options {}
422unsafe impl Send for WriteOptions {}
423unsafe impl Send for LruCacheOptions {}
424unsafe impl Send for FlushOptions {}
425unsafe impl Send for BlockBasedOptions {}
426unsafe impl Send for CuckooTableOptions {}
427unsafe impl Send for ReadOptions {}
428unsafe impl Send for IngestExternalFileOptions {}
429unsafe impl Send for CacheWrapper {}
430unsafe impl Send for CompactOptions {}
431unsafe impl Send for WriteBufferManagerWrapper {}
432
433// Sync is similarly safe for many types because they do not expose interior mutability, and their
434// use within the rocksdb library is generally behind a const reference
435unsafe impl Sync for Options {}
436unsafe impl Sync for WriteOptions {}
437unsafe impl Sync for LruCacheOptions {}
438unsafe impl Sync for FlushOptions {}
439unsafe impl Sync for BlockBasedOptions {}
440unsafe impl Sync for CuckooTableOptions {}
441unsafe impl Sync for ReadOptions {}
442unsafe impl Sync for IngestExternalFileOptions {}
443unsafe impl Sync for CacheWrapper {}
444unsafe impl Sync for CompactOptions {}
445unsafe impl Sync for WriteBufferManagerWrapper {}
446
447impl Drop for Options {
448    fn drop(&mut self) {
449        unsafe {
450            ffi::rocksdb_options_destroy(self.inner);
451        }
452    }
453}
454
455impl Clone for Options {
456    fn clone(&self) -> Self {
457        let inner = unsafe { ffi::rocksdb_options_create_copy(self.inner) };
458        assert!(!inner.is_null(), "Could not copy RocksDB options");
459
460        Self {
461            inner,
462            outlive: self.outlive.clone(),
463        }
464    }
465}
466
467impl Drop for BlockBasedOptions {
468    fn drop(&mut self) {
469        unsafe {
470            ffi::rocksdb_block_based_options_destroy(self.inner);
471        }
472    }
473}
474
475impl Drop for CuckooTableOptions {
476    fn drop(&mut self) {
477        unsafe {
478            ffi::rocksdb_cuckoo_options_destroy(self.inner);
479        }
480    }
481}
482
483impl Drop for FlushOptions {
484    fn drop(&mut self) {
485        unsafe {
486            ffi::rocksdb_flushoptions_destroy(self.inner);
487        }
488    }
489}
490
491impl Drop for WriteOptions {
492    fn drop(&mut self) {
493        unsafe {
494            ffi::rocksdb_writeoptions_destroy(self.inner);
495        }
496    }
497}
498
499impl Drop for LruCacheOptions {
500    fn drop(&mut self) {
501        unsafe {
502            ffi::rocksdb_lru_cache_options_destroy(self.inner);
503        }
504    }
505}
506
507impl Drop for ReadOptions {
508    fn drop(&mut self) {
509        unsafe {
510            ffi::rocksdb_readoptions_destroy(self.inner);
511        }
512    }
513}
514
515impl Drop for IngestExternalFileOptions {
516    fn drop(&mut self) {
517        unsafe {
518            ffi::rocksdb_ingestexternalfileoptions_destroy(self.inner);
519        }
520    }
521}
522
523impl BlockBasedOptions {
524    /// Approximate size of user data packed per block. Note that the
525    /// block size specified here corresponds to uncompressed data. The
526    /// actual size of the unit read from disk may be smaller if
527    /// compression is enabled. This parameter can be changed dynamically.
528    pub fn set_block_size(&mut self, size: usize) {
529        unsafe {
530            ffi::rocksdb_block_based_options_set_block_size(self.inner, size);
531        }
532    }
533
534    /// Block size for partitioned metadata. Currently applied to indexes when
535    /// kTwoLevelIndexSearch is used and to filters when partition_filters is used.
536    /// Note: Since in the current implementation the filters and index partitions
537    /// are aligned, an index/filter block is created when either index or filter
538    /// block size reaches the specified limit.
539    ///
540    /// Note: this limit is currently applied to only index blocks; a filter
541    /// partition is cut right after an index block is cut.
542    pub fn set_metadata_block_size(&mut self, size: usize) {
543        unsafe {
544            ffi::rocksdb_block_based_options_set_metadata_block_size(self.inner, size as u64);
545        }
546    }
547
548    /// Note: currently this option requires kTwoLevelIndexSearch to be set as
549    /// well.
550    ///
551    /// Use partitioned full filters for each SST file. This option is
552    /// incompatible with block-based filters.
553    pub fn set_partition_filters(&mut self, size: bool) {
554        unsafe {
555            ffi::rocksdb_block_based_options_set_partition_filters(self.inner, c_uchar::from(size));
556        }
557    }
558
559    /// Sets global cache for blocks (user data is stored in a set of blocks, and
560    /// a block is the unit of reading from disk).
561    ///
562    /// If set, use the specified cache for blocks.
563    /// By default, rocksdb will automatically create and use an 8MB internal cache.
564    pub fn set_block_cache(&mut self, cache: &Cache) {
565        unsafe {
566            ffi::rocksdb_block_based_options_set_block_cache(self.inner, cache.0.inner.as_ptr());
567        }
568        self.outlive.block_cache = Some(cache.clone());
569    }
570
571    /// Disable block cache
572    pub fn disable_cache(&mut self) {
573        unsafe {
574            ffi::rocksdb_block_based_options_set_no_block_cache(self.inner, c_uchar::from(true));
575        }
576    }
577
578    /// Sets a [Bloom filter](https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter)
579    /// policy to reduce disk reads.
580    ///
581    /// # Examples
582    ///
583    /// ```
584    /// use rust_rocksdb::BlockBasedOptions;
585    ///
586    /// let mut opts = BlockBasedOptions::default();
587    /// opts.set_bloom_filter(10.0, true);
588    /// ```
589    pub fn set_bloom_filter(&mut self, bits_per_key: c_double, block_based: bool) {
590        unsafe {
591            let bloom = if block_based {
592                ffi::rocksdb_filterpolicy_create_bloom(bits_per_key as _)
593            } else {
594                ffi::rocksdb_filterpolicy_create_bloom_full(bits_per_key as _)
595            };
596
597            ffi::rocksdb_block_based_options_set_filter_policy(self.inner, bloom);
598        }
599    }
600
601    /// Sets a [Ribbon filter](http://rocksdb.org/blog/2021/12/29/ribbon-filter.html)
602    /// policy to reduce disk reads.
603    ///
604    /// Ribbon filters use less memory in exchange for slightly more CPU usage
605    /// compared to an equivalent bloom filter.
606    ///
607    /// # Examples
608    ///
609    /// ```
610    /// use rust_rocksdb::BlockBasedOptions;
611    ///
612    /// let mut opts = BlockBasedOptions::default();
613    /// opts.set_ribbon_filter(10.0);
614    /// ```
615    pub fn set_ribbon_filter(&mut self, bloom_equivalent_bits_per_key: c_double) {
616        unsafe {
617            let ribbon = ffi::rocksdb_filterpolicy_create_ribbon(bloom_equivalent_bits_per_key);
618            ffi::rocksdb_block_based_options_set_filter_policy(self.inner, ribbon);
619        }
620    }
621
622    /// Sets a hybrid [Ribbon filter](http://rocksdb.org/blog/2021/12/29/ribbon-filter.html)
623    /// policy to reduce disk reads.
624    ///
625    /// Uses Bloom filters before the given level, and Ribbon filters for all
626    /// other levels. This combines the memory savings from Ribbon filters
627    /// with the lower CPU usage of Bloom filters.
628    ///
629    /// # Examples
630    ///
631    /// ```
632    /// use rust_rocksdb::BlockBasedOptions;
633    ///
634    /// let mut opts = BlockBasedOptions::default();
635    /// opts.set_hybrid_ribbon_filter(10.0, 2);
636    /// ```
637    pub fn set_hybrid_ribbon_filter(
638        &mut self,
639        bloom_equivalent_bits_per_key: c_double,
640        bloom_before_level: c_int,
641    ) {
642        unsafe {
643            let ribbon = ffi::rocksdb_filterpolicy_create_ribbon_hybrid(
644                bloom_equivalent_bits_per_key,
645                bloom_before_level,
646            );
647            ffi::rocksdb_block_based_options_set_filter_policy(self.inner, ribbon);
648        }
649    }
650
651    /// If cache_index_and_filter_blocks is enabled, cache index and filter blocks with high priority.
652    /// If set to true, depending on implementation of block cache,
653    /// index and filter blocks may be less likely to be evicted than data blocks.
654    pub fn set_cache_index_and_filter_blocks(&mut self, v: bool) {
655        unsafe {
656            ffi::rocksdb_block_based_options_set_cache_index_and_filter_blocks(
657                self.inner,
658                c_uchar::from(v),
659            );
660        }
661    }
662
663    /// Defines the index type to be used for SS-table lookups.
664    ///
665    /// # Examples
666    ///
667    /// ```
668    /// use rust_rocksdb::{BlockBasedOptions, BlockBasedIndexType, Options};
669    ///
670    /// let mut opts = Options::default();
671    /// let mut block_opts = BlockBasedOptions::default();
672    /// block_opts.set_index_type(BlockBasedIndexType::HashSearch);
673    /// ```
674    pub fn set_index_type(&mut self, index_type: BlockBasedIndexType) {
675        let index = index_type as i32;
676        unsafe {
677            ffi::rocksdb_block_based_options_set_index_type(self.inner, index);
678        }
679    }
680
681    /// If cache_index_and_filter_blocks is true and the below is true, then
682    /// filter and index blocks are stored in the cache, but a reference is
683    /// held in the "table reader" object so the blocks are pinned and only
684    /// evicted from cache when the table reader is freed.
685    ///
686    /// Default: false.
687    pub fn set_pin_l0_filter_and_index_blocks_in_cache(&mut self, v: bool) {
688        unsafe {
689            ffi::rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache(
690                self.inner,
691                c_uchar::from(v),
692            );
693        }
694    }
695
696    /// If cache_index_and_filter_blocks is true and the below is true, then
697    /// the top-level index of partitioned filter and index blocks are stored in
698    /// the cache, but a reference is held in the "table reader" object so the
699    /// blocks are pinned and only evicted from cache when the table reader is
700    /// freed. This is not limited to l0 in LSM tree.
701    ///
702    /// Default: false.
703    pub fn set_pin_top_level_index_and_filter(&mut self, v: bool) {
704        unsafe {
705            ffi::rocksdb_block_based_options_set_pin_top_level_index_and_filter(
706                self.inner,
707                c_uchar::from(v),
708            );
709        }
710    }
711
712    /// Format version, reserved for backward compatibility.
713    ///
714    /// See full [list](https://github.com/facebook/rocksdb/blob/v8.6.7/include/rocksdb/table.h#L493-L521)
715    /// of the supported versions.
716    ///
717    /// Default: 6.
718    pub fn set_format_version(&mut self, version: i32) {
719        unsafe {
720            ffi::rocksdb_block_based_options_set_format_version(self.inner, version);
721        }
722    }
723
724    /// Use delta encoding to compress keys in blocks.
725    /// ReadOptions::pin_data requires this option to be disabled.
726    ///
727    /// Default: true
728    pub fn set_use_delta_encoding(&mut self, enable: bool) {
729        unsafe {
730            ffi::rocksdb_block_based_options_set_use_delta_encoding(
731                self.inner,
732                c_uchar::from(enable),
733            );
734        }
735    }
736
737    /// Number of keys between restart points for delta encoding of keys.
738    /// This parameter can be changed dynamically. Most clients should
739    /// leave this parameter alone. The minimum value allowed is 1. Any smaller
740    /// value will be silently overwritten with 1.
741    ///
742    /// Default: 16.
743    pub fn set_block_restart_interval(&mut self, interval: i32) {
744        unsafe {
745            ffi::rocksdb_block_based_options_set_block_restart_interval(self.inner, interval);
746        }
747    }
748
749    /// Same as block_restart_interval but used for the index block.
750    /// If you don't plan to run RocksDB before version 5.16 and you are
751    /// using `index_block_restart_interval` > 1, you should
752    /// probably set the `format_version` to >= 4 as it would reduce the index size.
753    ///
754    /// Default: 1.
755    pub fn set_index_block_restart_interval(&mut self, interval: i32) {
756        unsafe {
757            ffi::rocksdb_block_based_options_set_index_block_restart_interval(self.inner, interval);
758        }
759    }
760
761    /// Set the data block index type for point lookups:
762    ///  `DataBlockIndexType::BinarySearch` to use binary search within the data block.
763    ///  `DataBlockIndexType::BinaryAndHash` to use the data block hash index in combination with
764    ///  the normal binary search.
765    ///
766    /// The hash table utilization ratio is adjustable using [`set_data_block_hash_ratio`](#method.set_data_block_hash_ratio), which is
767    /// valid only when using `DataBlockIndexType::BinaryAndHash`.
768    ///
769    /// Default: `BinarySearch`
770    /// # Examples
771    ///
772    /// ```
773    /// use rust_rocksdb::{BlockBasedOptions, DataBlockIndexType, Options};
774    ///
775    /// let mut opts = Options::default();
776    /// let mut block_opts = BlockBasedOptions::default();
777    /// block_opts.set_data_block_index_type(DataBlockIndexType::BinaryAndHash);
778    /// block_opts.set_data_block_hash_ratio(0.85);
779    /// ```
780    pub fn set_data_block_index_type(&mut self, index_type: DataBlockIndexType) {
781        let index_t = index_type as i32;
782        unsafe {
783            ffi::rocksdb_block_based_options_set_data_block_index_type(self.inner, index_t);
784        }
785    }
786
787    /// Set the data block hash index utilization ratio.
788    ///
789    /// The smaller the utilization ratio, the less hash collisions happen, and so reduce the risk for a
790    /// point lookup to fall back to binary search due to the collisions. A small ratio means faster
791    /// lookup at the price of more space overhead.
792    ///
793    /// Default: 0.75
794    pub fn set_data_block_hash_ratio(&mut self, ratio: f64) {
795        unsafe {
796            ffi::rocksdb_block_based_options_set_data_block_hash_ratio(self.inner, ratio);
797        }
798    }
799
800    /// If false, place only prefixes in the filter, not whole keys.
801    ///
802    /// Defaults to true.
803    pub fn set_whole_key_filtering(&mut self, v: bool) {
804        unsafe {
805            ffi::rocksdb_block_based_options_set_whole_key_filtering(self.inner, c_uchar::from(v));
806        }
807    }
808
809    /// Use the specified checksum type.
810    /// Newly created table files will be protected with this checksum type.
811    /// Old table files will still be readable, even though they have different checksum type.
812    pub fn set_checksum_type(&mut self, checksum_type: ChecksumType) {
813        unsafe {
814            ffi::rocksdb_block_based_options_set_checksum(self.inner, checksum_type as c_char);
815        }
816    }
817
818    /// If true, generate Bloom/Ribbon filters that minimize memory internal
819    /// fragmentation.
820    /// See official [wiki](
821    /// https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter#reducing-internal-fragmentation)
822    /// for more information.
823    ///
824    /// Defaults to false.
825    /// # Examples
826    ///
827    /// ```
828    /// use rust_rocksdb::BlockBasedOptions;
829    ///
830    /// let mut opts = BlockBasedOptions::default();
831    /// opts.set_bloom_filter(10.0, true);
832    /// opts.set_optimize_filters_for_memory(true);
833    /// ```
834    pub fn set_optimize_filters_for_memory(&mut self, v: bool) {
835        unsafe {
836            ffi::rocksdb_block_based_options_set_optimize_filters_for_memory(
837                self.inner,
838                c_uchar::from(v),
839            );
840        }
841    }
842
843    /// The tier of block-based tables whose top-level index into metadata
844    /// partitions will be pinned. Currently indexes and filters may be
845    /// partitioned.
846    ///
847    /// Note `cache_index_and_filter_blocks` must be true for this option to have
848    /// any effect. Otherwise any top-level index into metadata partitions would be
849    /// held in table reader memory, outside the block cache.
850    ///
851    /// Default: `BlockBasedPinningTier:Fallback`
852    ///
853    /// # Example
854    ///
855    /// ```
856    /// use rust_rocksdb::{BlockBasedOptions, BlockBasedPinningTier, Options};
857    ///
858    /// let mut opts = Options::default();
859    /// let mut block_opts = BlockBasedOptions::default();
860    /// block_opts.set_top_level_index_pinning_tier(BlockBasedPinningTier::FlushAndSimilar);
861    /// ```
862    pub fn set_top_level_index_pinning_tier(&mut self, tier: BlockBasedPinningTier) {
863        unsafe {
864            ffi::rocksdb_block_based_options_set_top_level_index_pinning_tier(
865                self.inner,
866                tier as c_int,
867            );
868        }
869    }
870
871    /// The tier of block-based tables whose metadata partitions will be pinned.
872    /// Currently indexes and filters may be partitioned.
873    ///
874    /// Default: `BlockBasedPinningTier:Fallback`
875    ///
876    /// # Example
877    ///
878    /// ```
879    /// use rust_rocksdb::{BlockBasedOptions, BlockBasedPinningTier, Options};
880    ///
881    /// let mut opts = Options::default();
882    /// let mut block_opts = BlockBasedOptions::default();
883    /// block_opts.set_partition_pinning_tier(BlockBasedPinningTier::FlushAndSimilar);
884    /// ```
885    pub fn set_partition_pinning_tier(&mut self, tier: BlockBasedPinningTier) {
886        unsafe {
887            ffi::rocksdb_block_based_options_set_partition_pinning_tier(self.inner, tier as c_int);
888        }
889    }
890
891    /// The tier of block-based tables whose unpartitioned metadata blocks will be
892    /// pinned.
893    ///
894    /// Note `cache_index_and_filter_blocks` must be true for this option to have
895    /// any effect. Otherwise the unpartitioned meta-blocks would be held in table
896    /// reader memory, outside the block cache.
897    ///
898    /// Default: `BlockBasedPinningTier:Fallback`
899    ///
900    /// # Example
901    ///
902    /// ```
903    /// use rust_rocksdb::{BlockBasedOptions, BlockBasedPinningTier, Options};
904    ///
905    /// let mut opts = Options::default();
906    /// let mut block_opts = BlockBasedOptions::default();
907    /// block_opts.set_unpartitioned_pinning_tier(BlockBasedPinningTier::FlushAndSimilar);
908    /// ```
909    pub fn set_unpartitioned_pinning_tier(&mut self, tier: BlockBasedPinningTier) {
910        unsafe {
911            ffi::rocksdb_block_based_options_set_unpartitioned_pinning_tier(
912                self.inner,
913                tier as c_int,
914            );
915        }
916    }
917}
918
919impl Default for BlockBasedOptions {
920    fn default() -> Self {
921        let block_opts = unsafe { ffi::rocksdb_block_based_options_create() };
922        assert!(
923            !block_opts.is_null(),
924            "Could not create RocksDB block based options"
925        );
926
927        Self {
928            inner: block_opts,
929            outlive: BlockBasedOptionsMustOutliveDB::default(),
930        }
931    }
932}
933
934impl CuckooTableOptions {
935    /// Determines the utilization of hash tables. Smaller values
936    /// result in larger hash tables with fewer collisions.
937    /// Default: 0.9
938    pub fn set_hash_ratio(&mut self, ratio: f64) {
939        unsafe {
940            ffi::rocksdb_cuckoo_options_set_hash_ratio(self.inner, ratio);
941        }
942    }
943
944    /// A property used by builder to determine the depth to go to
945    /// to search for a path to displace elements in case of
946    /// collision. See Builder.MakeSpaceForKey method. Higher
947    /// values result in more efficient hash tables with fewer
948    /// lookups but take more time to build.
949    /// Default: 100
950    pub fn set_max_search_depth(&mut self, depth: u32) {
951        unsafe {
952            ffi::rocksdb_cuckoo_options_set_max_search_depth(self.inner, depth);
953        }
954    }
955
956    /// In case of collision while inserting, the builder
957    /// attempts to insert in the next cuckoo_block_size
958    /// locations before skipping over to the next Cuckoo hash
959    /// function. This makes lookups more cache friendly in case
960    /// of collisions.
961    /// Default: 5
962    pub fn set_cuckoo_block_size(&mut self, size: u32) {
963        unsafe {
964            ffi::rocksdb_cuckoo_options_set_cuckoo_block_size(self.inner, size);
965        }
966    }
967
968    /// If this option is enabled, user key is treated as uint64_t and its value
969    /// is used as hash value directly. This option changes builder's behavior.
970    /// Reader ignore this option and behave according to what specified in
971    /// table property.
972    /// Default: false
973    pub fn set_identity_as_first_hash(&mut self, flag: bool) {
974        unsafe {
975            ffi::rocksdb_cuckoo_options_set_identity_as_first_hash(self.inner, c_uchar::from(flag));
976        }
977    }
978
979    /// If this option is set to true, module is used during hash calculation.
980    /// This often yields better space efficiency at the cost of performance.
981    /// If this option is set to false, # of entries in table is constrained to
982    /// be power of two, and bit and is used to calculate hash, which is faster in general.
983    /// Default: true
984    pub fn set_use_module_hash(&mut self, flag: bool) {
985        unsafe {
986            ffi::rocksdb_cuckoo_options_set_use_module_hash(self.inner, c_uchar::from(flag));
987        }
988    }
989}
990
991impl Default for CuckooTableOptions {
992    fn default() -> Self {
993        let opts = unsafe { ffi::rocksdb_cuckoo_options_create() };
994        assert!(!opts.is_null(), "Could not create RocksDB cuckoo options");
995
996        Self { inner: opts }
997    }
998}
999
1000// Verbosity of the LOG.
1001#[derive(Debug, Copy, Clone, PartialEq, Eq)]
1002#[repr(i32)]
1003pub enum LogLevel {
1004    Debug = 0,
1005    Info,
1006    Warn,
1007    Error,
1008    Fatal,
1009    Header,
1010}
1011
1012impl Options {
1013    /// Constructs the DBOptions and ColumnFamilyDescriptors by loading the
1014    /// latest RocksDB options file stored in the specified rocksdb database.
1015    ///
1016    /// *IMPORTANT*:
1017    /// ROCKSDB DOES NOT STORE cf ttl in the options file. If you have set it via
1018    /// [`ColumnFamilyDescriptor::new_with_ttl`] then you need to set it again after loading the options file.
1019    /// Tll will be set to [`ColumnFamilyTtl::Disabled`] for all column families for your safety.
1020    pub fn load_latest<P: AsRef<Path>>(
1021        path: P,
1022        env: Env,
1023        ignore_unknown_options: bool,
1024        cache: Cache,
1025    ) -> Result<(Options, Vec<ColumnFamilyDescriptor>), Error> {
1026        let path = to_cpath(path)?;
1027        let mut db_options: *mut ffi::rocksdb_options_t = null_mut();
1028        let mut num_column_families: usize = 0;
1029        let mut column_family_names: *mut *mut c_char = null_mut();
1030        let mut column_family_options: *mut *mut ffi::rocksdb_options_t = null_mut();
1031        unsafe {
1032            ffi_try!(ffi::rocksdb_load_latest_options(
1033                path.as_ptr(),
1034                env.0.inner,
1035                ignore_unknown_options,
1036                cache.0.inner.as_ptr(),
1037                &mut db_options,
1038                &mut num_column_families,
1039                &mut column_family_names,
1040                &mut column_family_options,
1041            ));
1042        }
1043        let options = Options {
1044            inner: db_options,
1045            outlive: OptionsMustOutliveDB::default(),
1046        };
1047        let column_families = unsafe {
1048            Options::read_column_descriptors(
1049                num_column_families,
1050                column_family_names,
1051                column_family_options,
1052            )
1053        };
1054        Ok((options, column_families))
1055    }
1056
1057    /// read column descriptors from c pointers
1058    #[inline]
1059    unsafe fn read_column_descriptors(
1060        num_column_families: usize,
1061        column_family_names: *mut *mut c_char,
1062        column_family_options: *mut *mut ffi::rocksdb_options_t,
1063    ) -> Vec<ColumnFamilyDescriptor> {
1064        let column_family_names_iter =
1065            slice::from_raw_parts(column_family_names, num_column_families)
1066                .iter()
1067                .map(|ptr| from_cstr(*ptr));
1068        let column_family_options_iter =
1069            slice::from_raw_parts(column_family_options, num_column_families)
1070                .iter()
1071                .map(|ptr| Options {
1072                    inner: *ptr,
1073                    outlive: OptionsMustOutliveDB::default(),
1074                });
1075        let column_descriptors = column_family_names_iter
1076            .zip(column_family_options_iter)
1077            .map(|(name, options)| ColumnFamilyDescriptor {
1078                name,
1079                options,
1080                ttl: ColumnFamilyTtl::Disabled,
1081            })
1082            .collect::<Vec<_>>();
1083        // free pointers
1084        slice::from_raw_parts(column_family_names, num_column_families)
1085            .iter()
1086            .for_each(|ptr| ffi::rocksdb_free(*ptr as *mut c_void));
1087        ffi::rocksdb_free(column_family_names as *mut c_void);
1088        ffi::rocksdb_free(column_family_options as *mut c_void);
1089        column_descriptors
1090    }
1091
1092    /// Updates DBOptions with values parsed from a string.
1093    ///
1094    /// See official [wiki](
1095    /// https://github.com/facebook/rocksdb/wiki/Option-String-and-Option-Map#option-string)
1096    /// for more information.
1097    pub fn set_options_from_string(&mut self, string: impl CStrLike) -> Result<&mut Self, Error> {
1098        let c_string = string.into_c_string().unwrap();
1099        let mut err: *mut c_char = null_mut();
1100        let err_ptr: *mut *mut c_char = &mut err;
1101        unsafe {
1102            ffi::rocksdb_get_options_from_string(
1103                self.inner,
1104                c_string.as_ptr(),
1105                self.inner,
1106                err_ptr,
1107            );
1108        }
1109
1110        if err.is_null() {
1111            Ok(self)
1112        } else {
1113            Err(Error::new(format!(
1114                "Could not set options from string: {}",
1115                crate::ffi_util::error_message(err)
1116            )))
1117        }
1118    }
1119
1120    /// By default, RocksDB uses only one background thread for flush and
1121    /// compaction. Calling this function will set it up such that total of
1122    /// `total_threads` is used. Good value for `total_threads` is the number of
1123    /// cores. You almost definitely want to call this function if your system is
1124    /// bottlenecked by RocksDB.
1125    ///
1126    /// # Examples
1127    ///
1128    /// ```
1129    /// use rust_rocksdb::Options;
1130    ///
1131    /// let mut opts = Options::default();
1132    /// opts.increase_parallelism(3);
1133    /// ```
1134    pub fn increase_parallelism(&mut self, parallelism: i32) {
1135        unsafe {
1136            ffi::rocksdb_options_increase_parallelism(self.inner, parallelism);
1137        }
1138    }
1139
1140    /// Optimize level style compaction.
1141    ///
1142    /// Default values for some parameters in `Options` are not optimized for heavy
1143    /// workloads and big datasets, which means you might observe write stalls under
1144    /// some conditions.
1145    ///
1146    /// This can be used as one of the starting points for tuning RocksDB options in
1147    /// such cases.
1148    ///
1149    /// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
1150    /// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
1151    /// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
1152    /// parameters were set before.
1153    ///
1154    /// It sets buffer sizes so that memory consumption would be constrained by
1155    /// `memtable_memory_budget`.
1156    pub fn optimize_level_style_compaction(&mut self, memtable_memory_budget: usize) {
1157        unsafe {
1158            ffi::rocksdb_options_optimize_level_style_compaction(
1159                self.inner,
1160                memtable_memory_budget as u64,
1161            );
1162        }
1163    }
1164
1165    /// Optimize universal style compaction.
1166    ///
1167    /// Default values for some parameters in `Options` are not optimized for heavy
1168    /// workloads and big datasets, which means you might observe write stalls under
1169    /// some conditions.
1170    ///
1171    /// This can be used as one of the starting points for tuning RocksDB options in
1172    /// such cases.
1173    ///
1174    /// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
1175    /// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
1176    /// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
1177    /// parameters were set before.
1178    ///
1179    /// It sets buffer sizes so that memory consumption would be constrained by
1180    /// `memtable_memory_budget`.
1181    pub fn optimize_universal_style_compaction(&mut self, memtable_memory_budget: usize) {
1182        unsafe {
1183            ffi::rocksdb_options_optimize_universal_style_compaction(
1184                self.inner,
1185                memtable_memory_budget as u64,
1186            );
1187        }
1188    }
1189
1190    /// If true, the database will be created if it is missing.
1191    ///
1192    /// Default: `false`
1193    ///
1194    /// # Examples
1195    ///
1196    /// ```
1197    /// use rust_rocksdb::Options;
1198    ///
1199    /// let mut opts = Options::default();
1200    /// opts.create_if_missing(true);
1201    /// ```
1202    pub fn create_if_missing(&mut self, create_if_missing: bool) {
1203        unsafe {
1204            ffi::rocksdb_options_set_create_if_missing(
1205                self.inner,
1206                c_uchar::from(create_if_missing),
1207            );
1208        }
1209    }
1210
1211    /// If true, any column families that didn't exist when opening the database
1212    /// will be created.
1213    ///
1214    /// Default: `false`
1215    ///
1216    /// # Examples
1217    ///
1218    /// ```
1219    /// use rust_rocksdb::Options;
1220    ///
1221    /// let mut opts = Options::default();
1222    /// opts.create_missing_column_families(true);
1223    /// ```
1224    pub fn create_missing_column_families(&mut self, create_missing_cfs: bool) {
1225        unsafe {
1226            ffi::rocksdb_options_set_create_missing_column_families(
1227                self.inner,
1228                c_uchar::from(create_missing_cfs),
1229            );
1230        }
1231    }
1232
1233    /// Specifies whether an error should be raised if the database already exists.
1234    ///
1235    /// Default: false
1236    pub fn set_error_if_exists(&mut self, enabled: bool) {
1237        unsafe {
1238            ffi::rocksdb_options_set_error_if_exists(self.inner, c_uchar::from(enabled));
1239        }
1240    }
1241
1242    /// Enable/disable paranoid checks.
1243    ///
1244    /// If true, the implementation will do aggressive checking of the
1245    /// data it is processing and will stop early if it detects any
1246    /// errors. This may have unforeseen ramifications: for example, a
1247    /// corruption of one DB entry may cause a large number of entries to
1248    /// become unreadable or for the entire DB to become unopenable.
1249    /// If any of the  writes to the database fails (Put, Delete, Merge, Write),
1250    /// the database will switch to read-only mode and fail all other
1251    /// Write operations.
1252    ///
1253    /// Default: false
1254    pub fn set_paranoid_checks(&mut self, enabled: bool) {
1255        unsafe {
1256            ffi::rocksdb_options_set_paranoid_checks(self.inner, c_uchar::from(enabled));
1257        }
1258    }
1259
1260    /// A list of paths where SST files can be put into, with its target size.
1261    /// Newer data is placed into paths specified earlier in the vector while
1262    /// older data gradually moves to paths specified later in the vector.
1263    ///
1264    /// For example, you have a flash device with 10GB allocated for the DB,
1265    /// as well as a hard drive of 2TB, you should config it to be:
1266    ///   [{"/flash_path", 10GB}, {"/hard_drive", 2TB}]
1267    ///
1268    /// The system will try to guarantee data under each path is close to but
1269    /// not larger than the target size. But current and future file sizes used
1270    /// by determining where to place a file are based on best-effort estimation,
1271    /// which means there is a chance that the actual size under the directory
1272    /// is slightly more than target size under some workloads. User should give
1273    /// some buffer room for those cases.
1274    ///
1275    /// If none of the paths has sufficient room to place a file, the file will
1276    /// be placed to the last path anyway, despite to the target size.
1277    ///
1278    /// Placing newer data to earlier paths is also best-efforts. User should
1279    /// expect user files to be placed in higher levels in some extreme cases.
1280    ///
1281    /// If left empty, only one path will be used, which is `path` passed when
1282    /// opening the DB.
1283    ///
1284    /// Default: empty
1285    pub fn set_db_paths(&mut self, paths: &[DBPath]) {
1286        let mut paths: Vec<_> = paths.iter().map(|path| path.inner.cast_const()).collect();
1287        let num_paths = paths.len();
1288        unsafe {
1289            ffi::rocksdb_options_set_db_paths(self.inner, paths.as_mut_ptr(), num_paths);
1290        }
1291    }
1292
1293    /// Use the specified object to interact with the environment,
1294    /// e.g. to read/write files, schedule background work, etc. In the near
1295    /// future, support for doing storage operations such as read/write files
1296    /// through env will be deprecated in favor of file_system.
1297    ///
1298    /// Default: Env::default()
1299    pub fn set_env(&mut self, env: &Env) {
1300        unsafe {
1301            ffi::rocksdb_options_set_env(self.inner, env.0.inner);
1302        }
1303        self.outlive.env = Some(env.clone());
1304    }
1305
1306    /// Sets the compression algorithm that will be used for compressing blocks.
1307    ///
1308    /// Default: `DBCompressionType::Snappy` (`DBCompressionType::None` if
1309    /// snappy feature is not enabled).
1310    ///
1311    /// # Examples
1312    ///
1313    /// ```
1314    /// use rust_rocksdb::{Options, DBCompressionType};
1315    ///
1316    /// let mut opts = Options::default();
1317    /// opts.set_compression_type(DBCompressionType::Snappy);
1318    /// ```
1319    pub fn set_compression_type(&mut self, t: DBCompressionType) {
1320        unsafe {
1321            ffi::rocksdb_options_set_compression(self.inner, t as c_int);
1322        }
1323    }
1324
1325    /// Number of threads for parallel compression.
1326    /// Parallel compression is enabled only if threads > 1.
1327    /// THE FEATURE IS STILL EXPERIMENTAL
1328    ///
1329    /// See [code](https://github.com/facebook/rocksdb/blob/v8.6.7/include/rocksdb/advanced_options.h#L116-L127)
1330    /// for more information.
1331    ///
1332    /// Default: 1
1333    ///
1334    /// Examples
1335    ///
1336    /// ```
1337    /// use rust_rocksdb::{Options, DBCompressionType};
1338    ///
1339    /// let mut opts = Options::default();
1340    /// opts.set_compression_type(DBCompressionType::Zstd);
1341    /// opts.set_compression_options_parallel_threads(3);
1342    /// ```
1343    pub fn set_compression_options_parallel_threads(&mut self, num: i32) {
1344        unsafe {
1345            ffi::rocksdb_options_set_compression_options_parallel_threads(self.inner, num);
1346        }
1347    }
1348
1349    /// Sets the compression algorithm that will be used for compressing WAL.
1350    ///
1351    /// At present, only ZSTD compression is supported!
1352    ///
1353    /// Default: `DBCompressionType::None`
1354    ///
1355    /// # Examples
1356    ///
1357    /// ```
1358    /// use rust_rocksdb::{Options, DBCompressionType};
1359    ///
1360    /// let mut opts = Options::default();
1361    /// opts.set_wal_compression_type(DBCompressionType::Zstd);
1362    /// // Or None to disable it
1363    /// opts.set_wal_compression_type(DBCompressionType::None);
1364    /// ```
1365    pub fn set_wal_compression_type(&mut self, t: DBCompressionType) {
1366        match t {
1367            DBCompressionType::None | DBCompressionType::Zstd => unsafe {
1368                ffi::rocksdb_options_set_wal_compression(self.inner, t as c_int);
1369            },
1370            other => unimplemented!("{:?} is not supported for WAL compression", other),
1371        }
1372    }
1373
1374    /// Sets the bottom-most compression algorithm that will be used for
1375    /// compressing blocks at the bottom-most level.
1376    ///
1377    /// Note that to actually enable bottom-most compression configuration after
1378    /// setting the compression type, it needs to be enabled by calling
1379    /// [`set_bottommost_compression_options`](#method.set_bottommost_compression_options) or
1380    /// [`set_bottommost_zstd_max_train_bytes`](#method.set_bottommost_zstd_max_train_bytes) method with `enabled` argument
1381    /// set to `true`.
1382    ///
1383    /// # Examples
1384    ///
1385    /// ```
1386    /// use rust_rocksdb::{Options, DBCompressionType};
1387    ///
1388    /// let mut opts = Options::default();
1389    /// opts.set_bottommost_compression_type(DBCompressionType::Zstd);
1390    /// opts.set_bottommost_zstd_max_train_bytes(0, true);
1391    /// ```
1392    pub fn set_bottommost_compression_type(&mut self, t: DBCompressionType) {
1393        unsafe {
1394            ffi::rocksdb_options_set_bottommost_compression(self.inner, t as c_int);
1395        }
1396    }
1397
1398    /// Different levels can have different compression policies. There
1399    /// are cases where most lower levels would like to use quick compression
1400    /// algorithms while the higher levels (which have more data) use
1401    /// compression algorithms that have better compression but could
1402    /// be slower. This array, if non-empty, should have an entry for
1403    /// each level of the database; these override the value specified in
1404    /// the previous field 'compression'.
1405    ///
1406    /// # Examples
1407    ///
1408    /// ```
1409    /// use rust_rocksdb::{Options, DBCompressionType};
1410    ///
1411    /// let mut opts = Options::default();
1412    /// opts.set_compression_per_level(&[
1413    ///     DBCompressionType::None,
1414    ///     DBCompressionType::None,
1415    ///     DBCompressionType::Snappy,
1416    ///     DBCompressionType::Snappy,
1417    ///     DBCompressionType::Snappy
1418    /// ]);
1419    /// ```
1420    pub fn set_compression_per_level(&mut self, level_types: &[DBCompressionType]) {
1421        unsafe {
1422            let mut level_types: Vec<_> = level_types.iter().map(|&t| t as c_int).collect();
1423            ffi::rocksdb_options_set_compression_per_level(
1424                self.inner,
1425                level_types.as_mut_ptr(),
1426                level_types.len() as size_t,
1427            );
1428        }
1429    }
1430
1431    /// Maximum size of dictionaries used to prime the compression library.
1432    /// Enabling dictionary can improve compression ratios when there are
1433    /// repetitions across data blocks.
1434    ///
1435    /// The dictionary is created by sampling the SST file data. If
1436    /// `zstd_max_train_bytes` is nonzero, the samples are passed through zstd's
1437    /// dictionary generator. Otherwise, the random samples are used directly as
1438    /// the dictionary.
1439    ///
1440    /// When compression dictionary is disabled, we compress and write each block
1441    /// before buffering data for the next one. When compression dictionary is
1442    /// enabled, we buffer all SST file data in-memory so we can sample it, as data
1443    /// can only be compressed and written after the dictionary has been finalized.
1444    /// So users of this feature may see increased memory usage.
1445    ///
1446    /// Default: `0`
1447    ///
1448    /// # Examples
1449    ///
1450    /// ```
1451    /// use rust_rocksdb::Options;
1452    ///
1453    /// let mut opts = Options::default();
1454    /// opts.set_compression_options(4, 5, 6, 7);
1455    /// ```
1456    pub fn set_compression_options(
1457        &mut self,
1458        w_bits: c_int,
1459        level: c_int,
1460        strategy: c_int,
1461        max_dict_bytes: c_int,
1462    ) {
1463        unsafe {
1464            ffi::rocksdb_options_set_compression_options(
1465                self.inner,
1466                w_bits,
1467                level,
1468                strategy,
1469                max_dict_bytes,
1470            );
1471        }
1472    }
1473
1474    /// Sets compression options for blocks at the bottom-most level.  Meaning
1475    /// of all settings is the same as in [`set_compression_options`](#method.set_compression_options) method but
1476    /// affect only the bottom-most compression which is set using
1477    /// [`set_bottommost_compression_type`](#method.set_bottommost_compression_type) method.
1478    ///
1479    /// # Examples
1480    ///
1481    /// ```
1482    /// use rust_rocksdb::{Options, DBCompressionType};
1483    ///
1484    /// let mut opts = Options::default();
1485    /// opts.set_bottommost_compression_type(DBCompressionType::Zstd);
1486    /// opts.set_bottommost_compression_options(4, 5, 6, 7, true);
1487    /// ```
1488    pub fn set_bottommost_compression_options(
1489        &mut self,
1490        w_bits: c_int,
1491        level: c_int,
1492        strategy: c_int,
1493        max_dict_bytes: c_int,
1494        enabled: bool,
1495    ) {
1496        unsafe {
1497            ffi::rocksdb_options_set_bottommost_compression_options(
1498                self.inner,
1499                w_bits,
1500                level,
1501                strategy,
1502                max_dict_bytes,
1503                c_uchar::from(enabled),
1504            );
1505        }
1506    }
1507
1508    /// Sets maximum size of training data passed to zstd's dictionary trainer. Using zstd's
1509    /// dictionary trainer can achieve even better compression ratio improvements than using
1510    /// `max_dict_bytes` alone.
1511    ///
1512    /// The training data will be used to generate a dictionary of max_dict_bytes.
1513    ///
1514    /// Default: 0.
1515    pub fn set_zstd_max_train_bytes(&mut self, value: c_int) {
1516        unsafe {
1517            ffi::rocksdb_options_set_compression_options_zstd_max_train_bytes(self.inner, value);
1518        }
1519    }
1520
1521    /// Sets maximum size of training data passed to zstd's dictionary trainer
1522    /// when compressing the bottom-most level. Using zstd's dictionary trainer
1523    /// can achieve even better compression ratio improvements than using
1524    /// `max_dict_bytes` alone.
1525    ///
1526    /// The training data will be used to generate a dictionary of
1527    /// `max_dict_bytes`.
1528    ///
1529    /// Default: 0.
1530    pub fn set_bottommost_zstd_max_train_bytes(&mut self, value: c_int, enabled: bool) {
1531        unsafe {
1532            ffi::rocksdb_options_set_bottommost_compression_options_zstd_max_train_bytes(
1533                self.inner,
1534                value,
1535                c_uchar::from(enabled),
1536            );
1537        }
1538    }
1539
1540    /// If non-zero, we perform bigger reads when doing compaction. If you're
1541    /// running RocksDB on spinning disks, you should set this to at least 2MB.
1542    /// That way RocksDB's compaction is doing sequential instead of random reads.
1543    ///
1544    /// Default: 2 * 1024 * 1024 (2 MB)
1545    pub fn set_compaction_readahead_size(&mut self, compaction_readahead_size: usize) {
1546        unsafe {
1547            ffi::rocksdb_options_compaction_readahead_size(self.inner, compaction_readahead_size);
1548        }
1549    }
1550
1551    /// Allow RocksDB to pick dynamic base of bytes for levels.
1552    /// With this feature turned on, RocksDB will automatically adjust max bytes for each level.
1553    /// The goal of this feature is to have lower bound on size amplification.
1554    ///
1555    /// Default: false.
1556    pub fn set_level_compaction_dynamic_level_bytes(&mut self, v: bool) {
1557        unsafe {
1558            ffi::rocksdb_options_set_level_compaction_dynamic_level_bytes(
1559                self.inner,
1560                c_uchar::from(v),
1561            );
1562        }
1563    }
1564
1565    /// This option has different meanings for different compaction styles:
1566    ///
1567    /// Leveled: files older than `periodic_compaction_seconds` will be picked up
1568    /// for compaction and will be re-written to the same level as they were
1569    /// before if level_compaction_dynamic_level_bytes is disabled. Otherwise,
1570    /// it will rewrite files to the next level except for the last level files
1571    /// to the same level.
1572    ///
1573    /// FIFO: not supported. Setting this option has no effect for FIFO compaction.
1574    ///
1575    /// Universal: when there are files older than `periodic_compaction_seconds`,
1576    /// rocksdb will try to do as large a compaction as possible including the
1577    /// last level. Such compaction is only skipped if only last level is to
1578    /// be compacted and no file in last level is older than
1579    /// `periodic_compaction_seconds`. See more in
1580    /// UniversalCompactionBuilder::PickPeriodicCompaction().
1581    /// For backward compatibility, the effective value of this option takes
1582    /// into account the value of option `ttl`. The logic is as follows:
1583    ///
1584    /// - both options are set to 30 days if they have the default value.
1585    /// - if both options are zero, zero is picked. Otherwise, we take the min
1586    ///   value among non-zero options values (i.e. takes the stricter limit).
1587    ///
1588    /// One main use of the feature is to make sure a file goes through compaction
1589    /// filters periodically. Users can also use the feature to clear up SST
1590    /// files using old format.
1591    ///
1592    /// A file's age is computed by looking at file_creation_time or creation_time
1593    /// table properties in order, if they have valid non-zero values; if not, the
1594    /// age is based on the file's last modified time (given by the underlying
1595    /// Env).
1596    ///
1597    /// This option only supports block based table format for any compaction
1598    /// style.
1599    ///
1600    /// unit: seconds. Ex: 7 days = 7 * 24 * 60 * 60
1601    ///
1602    /// Values:
1603    /// 0: Turn off Periodic compactions.
1604    /// UINT64_MAX - 1 (0xfffffffffffffffe) is special flag to allow RocksDB to
1605    /// pick default.
1606    ///
1607    /// Default: 30 days if using block based table format + compaction filter +
1608    /// leveled compaction or block based table format + universal compaction.
1609    /// 0 (disabled) otherwise.
1610    ///
1611    pub fn set_periodic_compaction_seconds(&mut self, secs: u64) {
1612        unsafe {
1613            ffi::rocksdb_options_set_periodic_compaction_seconds(self.inner, secs);
1614        }
1615    }
1616
1617    /// This option has different meanings for different compaction styles:
1618    ///
1619    /// Leveled: Non-bottom-level files with all keys older than TTL will go
1620    ///    through the compaction process. This usually happens in a cascading
1621    ///    way so that those entries will be compacted to bottommost level/file.
1622    ///    The feature is used to remove stale entries that have been deleted or
1623    ///    updated from the file system.
1624    ///
1625    /// FIFO: Files with all keys older than TTL will be deleted. TTL is only
1626    ///    supported if option max_open_files is set to -1.
1627    ///
1628    /// Universal: users should only set the option `periodic_compaction_seconds`
1629    ///    instead. For backward compatibility, this option has the same
1630    ///    meaning as `periodic_compaction_seconds`. See more in comments for
1631    ///    `periodic_compaction_seconds` on the interaction between these two
1632    ///    options.
1633    ///
1634    /// This option only supports block based table format for any compaction
1635    /// style.
1636    ///
1637    /// unit: seconds. Ex: 1 day = 1 * 24 * 60 * 60
1638    /// 0 means disabling.
1639    /// UINT64_MAX - 1 (0xfffffffffffffffe) is special flag to allow RocksDB to
1640    /// pick default.
1641    ///
1642    /// Default: 30 days if using block based table. 0 (disable) otherwise.
1643    ///
1644    /// Dynamically changeable
1645    /// Note that dynamically changing this option only works for leveled and FIFO
1646    /// compaction. For universal compaction, dynamically changing this option has
1647    /// no effect, users should dynamically change `periodic_compaction_seconds`
1648    /// instead.
1649    pub fn set_ttl(&mut self, secs: u64) {
1650        unsafe {
1651            ffi::rocksdb_options_set_ttl(self.inner, secs);
1652        }
1653    }
1654
1655    pub fn set_merge_operator_associative<F: MergeFn + Clone>(
1656        &mut self,
1657        name: impl CStrLike,
1658        full_merge_fn: F,
1659    ) {
1660        let cb = Box::new(MergeOperatorCallback {
1661            name: name.into_c_string().unwrap(),
1662            full_merge_fn: full_merge_fn.clone(),
1663            partial_merge_fn: full_merge_fn,
1664        });
1665
1666        unsafe {
1667            let mo = ffi::rocksdb_mergeoperator_create(
1668                Box::into_raw(cb).cast::<c_void>(),
1669                Some(merge_operator::destructor_callback::<F, F>),
1670                Some(full_merge_callback::<F, F>),
1671                Some(partial_merge_callback::<F, F>),
1672                Some(merge_operator::delete_callback),
1673                Some(merge_operator::name_callback::<F, F>),
1674            );
1675            ffi::rocksdb_options_set_merge_operator(self.inner, mo);
1676        }
1677    }
1678
1679    pub fn set_merge_operator<F: MergeFn, PF: MergeFn>(
1680        &mut self,
1681        name: impl CStrLike,
1682        full_merge_fn: F,
1683        partial_merge_fn: PF,
1684    ) {
1685        let cb = Box::new(MergeOperatorCallback {
1686            name: name.into_c_string().unwrap(),
1687            full_merge_fn,
1688            partial_merge_fn,
1689        });
1690
1691        unsafe {
1692            let mo = ffi::rocksdb_mergeoperator_create(
1693                Box::into_raw(cb).cast::<c_void>(),
1694                Some(merge_operator::destructor_callback::<F, PF>),
1695                Some(full_merge_callback::<F, PF>),
1696                Some(partial_merge_callback::<F, PF>),
1697                Some(merge_operator::delete_callback),
1698                Some(merge_operator::name_callback::<F, PF>),
1699            );
1700            ffi::rocksdb_options_set_merge_operator(self.inner, mo);
1701        }
1702    }
1703
1704    #[deprecated(
1705        since = "0.5.0",
1706        note = "add_merge_operator has been renamed to set_merge_operator"
1707    )]
1708    pub fn add_merge_operator<F: MergeFn + Clone>(&mut self, name: &str, merge_fn: F) {
1709        self.set_merge_operator_associative(name, merge_fn);
1710    }
1711
1712    /// Sets a compaction filter used to determine if entries should be kept, changed,
1713    /// or removed during compaction.
1714    ///
1715    /// An example use case is to remove entries with an expired TTL.
1716    ///
1717    /// If you take a snapshot of the database, only values written since the last
1718    /// snapshot will be passed through the compaction filter.
1719    ///
1720    /// If multi-threaded compaction is used, `filter_fn` may be called multiple times
1721    /// simultaneously.
1722    pub fn set_compaction_filter<F>(&mut self, name: impl CStrLike, filter_fn: F)
1723    where
1724        F: CompactionFilterFn + Send + 'static,
1725    {
1726        let cb = Box::new(CompactionFilterCallback {
1727            name: name.into_c_string().unwrap(),
1728            filter_fn,
1729        });
1730
1731        unsafe {
1732            let cf = ffi::rocksdb_compactionfilter_create(
1733                Box::into_raw(cb).cast::<c_void>(),
1734                Some(compaction_filter::destructor_callback::<CompactionFilterCallback<F>>),
1735                Some(compaction_filter::filter_callback::<CompactionFilterCallback<F>>),
1736                Some(compaction_filter::name_callback::<CompactionFilterCallback<F>>),
1737            );
1738            ffi::rocksdb_options_set_compaction_filter(self.inner, cf);
1739        }
1740    }
1741
1742    /// This is a factory that provides compaction filter objects which allow
1743    /// an application to modify/delete a key-value during background compaction.
1744    ///
1745    /// A new filter will be created on each compaction run.  If multithreaded
1746    /// compaction is being used, each created CompactionFilter will only be used
1747    /// from a single thread and so does not need to be thread-safe.
1748    ///
1749    /// Default: nullptr
1750    pub fn set_compaction_filter_factory<F>(&mut self, factory: F)
1751    where
1752        F: CompactionFilterFactory + 'static,
1753    {
1754        let factory = Box::new(factory);
1755
1756        unsafe {
1757            let cff = ffi::rocksdb_compactionfilterfactory_create(
1758                Box::into_raw(factory).cast::<c_void>(),
1759                Some(compaction_filter_factory::destructor_callback::<F>),
1760                Some(compaction_filter_factory::create_compaction_filter_callback::<F>),
1761                Some(compaction_filter_factory::name_callback::<F>),
1762            );
1763
1764            ffi::rocksdb_options_set_compaction_filter_factory(self.inner, cff);
1765        }
1766    }
1767
1768    /// Sets the comparator used to define the order of keys in the table.
1769    /// Default: a comparator that uses lexicographic byte-wise ordering
1770    ///
1771    /// The client must ensure that the comparator supplied here has the same
1772    /// name and orders keys *exactly* the same as the comparator provided to
1773    /// previous open calls on the same DB.
1774    pub fn set_comparator(&mut self, name: impl CStrLike, compare_fn: Box<CompareFn>) {
1775        let cb = Box::new(ComparatorCallback {
1776            name: name.into_c_string().unwrap(),
1777            compare_fn,
1778        });
1779
1780        unsafe {
1781            let cmp = ffi::rocksdb_comparator_create(
1782                Box::into_raw(cb).cast::<c_void>(),
1783                Some(ComparatorCallback::destructor_callback),
1784                Some(ComparatorCallback::compare_callback),
1785                Some(ComparatorCallback::name_callback),
1786            );
1787            ffi::rocksdb_options_set_comparator(self.inner, cmp);
1788        }
1789    }
1790
1791    /// Sets the comparator that are timestamp-aware, used to define the order of keys in the table,
1792    /// taking timestamp into consideration.
1793    /// Find more information on timestamp-aware comparator on [here](https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp)
1794    ///
1795    /// The client must ensure that the comparator supplied here has the same
1796    /// name and orders keys *exactly* the same as the comparator provided to
1797    /// previous open calls on the same DB.
1798    pub fn set_comparator_with_ts(
1799        &mut self,
1800        name: impl CStrLike,
1801        timestamp_size: usize,
1802        compare_fn: Box<CompareFn>,
1803        compare_ts_fn: Box<CompareTsFn>,
1804        compare_without_ts_fn: Box<CompareWithoutTsFn>,
1805    ) {
1806        let cb = Box::new(ComparatorWithTsCallback {
1807            name: name.into_c_string().unwrap(),
1808            compare_fn,
1809            compare_ts_fn,
1810            compare_without_ts_fn,
1811        });
1812
1813        unsafe {
1814            let cmp = ffi::rocksdb_comparator_with_ts_create(
1815                Box::into_raw(cb).cast::<c_void>(),
1816                Some(ComparatorWithTsCallback::destructor_callback),
1817                Some(ComparatorWithTsCallback::compare_callback),
1818                Some(ComparatorWithTsCallback::compare_ts_callback),
1819                Some(ComparatorWithTsCallback::compare_without_ts_callback),
1820                Some(ComparatorWithTsCallback::name_callback),
1821                timestamp_size,
1822            );
1823            ffi::rocksdb_options_set_comparator(self.inner, cmp);
1824        }
1825    }
1826
1827    pub fn set_prefix_extractor(&mut self, prefix_extractor: SliceTransform) {
1828        unsafe {
1829            ffi::rocksdb_options_set_prefix_extractor(self.inner, prefix_extractor.inner);
1830        }
1831    }
1832
1833    // Use this if you don't need to keep the data sorted, i.e. you'll never use
1834    // an iterator, only Put() and Get() API calls
1835    //
1836    pub fn optimize_for_point_lookup(&mut self, block_cache_size_mb: u64) {
1837        unsafe {
1838            ffi::rocksdb_options_optimize_for_point_lookup(self.inner, block_cache_size_mb);
1839        }
1840    }
1841
1842    /// Sets the optimize_filters_for_hits flag
1843    ///
1844    /// Default: `false`
1845    ///
1846    /// # Examples
1847    ///
1848    /// ```
1849    /// use rust_rocksdb::Options;
1850    ///
1851    /// let mut opts = Options::default();
1852    /// opts.set_optimize_filters_for_hits(true);
1853    /// ```
1854    pub fn set_optimize_filters_for_hits(&mut self, optimize_for_hits: bool) {
1855        unsafe {
1856            ffi::rocksdb_options_set_optimize_filters_for_hits(
1857                self.inner,
1858                c_int::from(optimize_for_hits),
1859            );
1860        }
1861    }
1862
1863    /// Sets the periodicity when obsolete files get deleted.
1864    ///
1865    /// The files that get out of scope by compaction
1866    /// process will still get automatically delete on every compaction,
1867    /// regardless of this setting.
1868    ///
1869    /// Default: 6 hours
1870    pub fn set_delete_obsolete_files_period_micros(&mut self, micros: u64) {
1871        unsafe {
1872            ffi::rocksdb_options_set_delete_obsolete_files_period_micros(self.inner, micros);
1873        }
1874    }
1875
1876    /// Prepare the DB for bulk loading.
1877    ///
1878    /// All data will be in level 0 without any automatic compaction.
1879    /// It's recommended to manually call CompactRange(NULL, NULL) before reading
1880    /// from the database, because otherwise the read can be very slow.
1881    pub fn prepare_for_bulk_load(&mut self) {
1882        unsafe {
1883            ffi::rocksdb_options_prepare_for_bulk_load(self.inner);
1884        }
1885    }
1886
1887    /// Sets the number of open files that can be used by the DB. You may need to
1888    /// increase this if your database has a large working set. Value `-1` means
1889    /// files opened are always kept open. You can estimate number of files based
1890    /// on target_file_size_base and target_file_size_multiplier for level-based
1891    /// compaction. For universal-style compaction, you can usually set it to `-1`.
1892    ///
1893    /// Default: `-1`
1894    ///
1895    /// # Examples
1896    ///
1897    /// ```
1898    /// use rust_rocksdb::Options;
1899    ///
1900    /// let mut opts = Options::default();
1901    /// opts.set_max_open_files(10);
1902    /// ```
1903    pub fn set_max_open_files(&mut self, nfiles: c_int) {
1904        unsafe {
1905            ffi::rocksdb_options_set_max_open_files(self.inner, nfiles);
1906        }
1907    }
1908
1909    /// If max_open_files is -1, DB will open all files on DB::Open(). You can
1910    /// use this option to increase the number of threads used to open the files.
1911    /// Default: 16
1912    pub fn set_max_file_opening_threads(&mut self, nthreads: c_int) {
1913        unsafe {
1914            ffi::rocksdb_options_set_max_file_opening_threads(self.inner, nthreads);
1915        }
1916    }
1917
1918    /// By default, writes to stable storage use fdatasync (on platforms
1919    /// where this function is available). If this option is true,
1920    /// fsync is used instead.
1921    ///
1922    /// fsync and fdatasync are equally safe for our purposes and fdatasync is
1923    /// faster, so it is rarely necessary to set this option. It is provided
1924    /// as a workaround for kernel/filesystem bugs, such as one that affected
1925    /// fdatasync with ext4 in kernel versions prior to 3.7.
1926    ///
1927    /// Default: `false`
1928    ///
1929    /// # Examples
1930    ///
1931    /// ```
1932    /// use rust_rocksdb::Options;
1933    ///
1934    /// let mut opts = Options::default();
1935    /// opts.set_use_fsync(true);
1936    /// ```
1937    pub fn set_use_fsync(&mut self, useit: bool) {
1938        unsafe {
1939            ffi::rocksdb_options_set_use_fsync(self.inner, c_int::from(useit));
1940        }
1941    }
1942
1943    /// Returns the value of the `use_fsync` option.
1944    pub fn get_use_fsync(&self) -> bool {
1945        let val = unsafe { ffi::rocksdb_options_get_use_fsync(self.inner) };
1946        val != 0
1947    }
1948
1949    /// Specifies the absolute info LOG dir.
1950    ///
1951    /// If it is empty, the log files will be in the same dir as data.
1952    /// If it is non empty, the log files will be in the specified dir,
1953    /// and the db data dir's absolute path will be used as the log file
1954    /// name's prefix.
1955    ///
1956    /// Default: empty
1957    pub fn set_db_log_dir<P: AsRef<Path>>(&mut self, path: P) {
1958        let p = to_cpath(path).unwrap();
1959        unsafe {
1960            ffi::rocksdb_options_set_db_log_dir(self.inner, p.as_ptr());
1961        }
1962    }
1963
1964    /// Specifies the log level.
1965    /// Consider the `LogLevel` enum for a list of possible levels.
1966    ///
1967    /// Default: Info
1968    ///
1969    /// # Examples
1970    ///
1971    /// ```
1972    /// use rust_rocksdb::{Options, LogLevel};
1973    ///
1974    /// let mut opts = Options::default();
1975    /// opts.set_log_level(LogLevel::Warn);
1976    /// ```
1977    pub fn set_log_level(&mut self, level: LogLevel) {
1978        unsafe {
1979            ffi::rocksdb_options_set_info_log_level(self.inner, level as c_int);
1980        }
1981    }
1982
1983    /// Allows OS to incrementally sync files to disk while they are being
1984    /// written, asynchronously, in the background. This operation can be used
1985    /// to smooth out write I/Os over time. Users shouldn't rely on it for
1986    /// persistency guarantee.
1987    /// Issue one request for every bytes_per_sync written. `0` turns it off.
1988    ///
1989    /// Default: `0`
1990    ///
1991    /// You may consider using rate_limiter to regulate write rate to device.
1992    /// When rate limiter is enabled, it automatically enables bytes_per_sync
1993    /// to 1MB.
1994    ///
1995    /// This option applies to table files
1996    ///
1997    /// # Examples
1998    ///
1999    /// ```
2000    /// use rust_rocksdb::Options;
2001    ///
2002    /// let mut opts = Options::default();
2003    /// opts.set_bytes_per_sync(1024 * 1024);
2004    /// ```
2005    pub fn set_bytes_per_sync(&mut self, nbytes: u64) {
2006        unsafe {
2007            ffi::rocksdb_options_set_bytes_per_sync(self.inner, nbytes);
2008        }
2009    }
2010
2011    /// Same as bytes_per_sync, but applies to WAL files.
2012    ///
2013    /// Default: 0, turned off
2014    ///
2015    /// Dynamically changeable through SetDBOptions() API.
2016    pub fn set_wal_bytes_per_sync(&mut self, nbytes: u64) {
2017        unsafe {
2018            ffi::rocksdb_options_set_wal_bytes_per_sync(self.inner, nbytes);
2019        }
2020    }
2021
2022    /// Sets the maximum buffer size that is used by WritableFileWriter.
2023    ///
2024    /// On Windows, we need to maintain an aligned buffer for writes.
2025    /// We allow the buffer to grow until it's size hits the limit in buffered
2026    /// IO and fix the buffer size when using direct IO to ensure alignment of
2027    /// write requests if the logical sector size is unusual
2028    ///
2029    /// Default: 1024 * 1024 (1 MB)
2030    ///
2031    /// Dynamically changeable through SetDBOptions() API.
2032    pub fn set_writable_file_max_buffer_size(&mut self, nbytes: u64) {
2033        unsafe {
2034            ffi::rocksdb_options_set_writable_file_max_buffer_size(self.inner, nbytes);
2035        }
2036    }
2037
2038    /// If true, allow multi-writers to update mem tables in parallel.
2039    /// Only some memtable_factory-s support concurrent writes; currently it
2040    /// is implemented only for SkipListFactory.  Concurrent memtable writes
2041    /// are not compatible with inplace_update_support or filter_deletes.
2042    /// It is strongly recommended to set enable_write_thread_adaptive_yield
2043    /// if you are going to use this feature.
2044    ///
2045    /// Default: true
2046    ///
2047    /// # Examples
2048    ///
2049    /// ```
2050    /// use rust_rocksdb::Options;
2051    ///
2052    /// let mut opts = Options::default();
2053    /// opts.set_allow_concurrent_memtable_write(false);
2054    /// ```
2055    pub fn set_allow_concurrent_memtable_write(&mut self, allow: bool) {
2056        unsafe {
2057            ffi::rocksdb_options_set_allow_concurrent_memtable_write(
2058                self.inner,
2059                c_uchar::from(allow),
2060            );
2061        }
2062    }
2063
2064    /// If true, threads synchronizing with the write batch group leader will wait for up to
2065    /// write_thread_max_yield_usec before blocking on a mutex. This can substantially improve
2066    /// throughput for concurrent workloads, regardless of whether allow_concurrent_memtable_write
2067    /// is enabled.
2068    ///
2069    /// Default: true
2070    pub fn set_enable_write_thread_adaptive_yield(&mut self, enabled: bool) {
2071        unsafe {
2072            ffi::rocksdb_options_set_enable_write_thread_adaptive_yield(
2073                self.inner,
2074                c_uchar::from(enabled),
2075            );
2076        }
2077    }
2078
2079    /// Specifies whether an iteration->Next() sequentially skips over keys with the same user-key or not.
2080    ///
2081    /// This number specifies the number of keys (with the same userkey)
2082    /// that will be sequentially skipped before a reseek is issued.
2083    ///
2084    /// Default: 8
2085    pub fn set_max_sequential_skip_in_iterations(&mut self, num: u64) {
2086        unsafe {
2087            ffi::rocksdb_options_set_max_sequential_skip_in_iterations(self.inner, num);
2088        }
2089    }
2090
2091    /// Enable direct I/O mode for reading
2092    /// they may or may not improve performance depending on the use case
2093    ///
2094    /// Files will be opened in "direct I/O" mode
2095    /// which means that data read from the disk will not be cached or
2096    /// buffered. The hardware buffer of the devices may however still
2097    /// be used. Memory mapped files are not impacted by these parameters.
2098    ///
2099    /// Default: false
2100    ///
2101    /// # Examples
2102    ///
2103    /// ```
2104    /// use rust_rocksdb::Options;
2105    ///
2106    /// let mut opts = Options::default();
2107    /// opts.set_use_direct_reads(true);
2108    /// ```
2109    pub fn set_use_direct_reads(&mut self, enabled: bool) {
2110        unsafe {
2111            ffi::rocksdb_options_set_use_direct_reads(self.inner, c_uchar::from(enabled));
2112        }
2113    }
2114
2115    /// Enable direct I/O mode for flush and compaction
2116    ///
2117    /// Files will be opened in "direct I/O" mode
2118    /// which means that data written to the disk will not be cached or
2119    /// buffered. The hardware buffer of the devices may however still
2120    /// be used. Memory mapped files are not impacted by these parameters.
2121    /// they may or may not improve performance depending on the use case
2122    ///
2123    /// Default: false
2124    ///
2125    /// # Examples
2126    ///
2127    /// ```
2128    /// use rust_rocksdb::Options;
2129    ///
2130    /// let mut opts = Options::default();
2131    /// opts.set_use_direct_io_for_flush_and_compaction(true);
2132    /// ```
2133    pub fn set_use_direct_io_for_flush_and_compaction(&mut self, enabled: bool) {
2134        unsafe {
2135            ffi::rocksdb_options_set_use_direct_io_for_flush_and_compaction(
2136                self.inner,
2137                c_uchar::from(enabled),
2138            );
2139        }
2140    }
2141
2142    /// Enable/disable child process inherit open files.
2143    ///
2144    /// Default: true
2145    pub fn set_is_fd_close_on_exec(&mut self, enabled: bool) {
2146        unsafe {
2147            ffi::rocksdb_options_set_is_fd_close_on_exec(self.inner, c_uchar::from(enabled));
2148        }
2149    }
2150
2151    /// Hints to the OS that it should not buffer disk I/O. Enabling this
2152    /// parameter may improve performance but increases pressure on the
2153    /// system cache.
2154    ///
2155    /// The exact behavior of this parameter is platform dependent.
2156    ///
2157    /// On POSIX systems, after RocksDB reads data from disk it will
2158    /// mark the pages as "unneeded". The operating system may or may not
2159    /// evict these pages from memory, reducing pressure on the system
2160    /// cache. If the disk block is requested again this can result in
2161    /// additional disk I/O.
2162    ///
2163    /// On WINDOWS systems, files will be opened in "unbuffered I/O" mode
2164    /// which means that data read from the disk will not be cached or
2165    /// bufferized. The hardware buffer of the devices may however still
2166    /// be used. Memory mapped files are not impacted by this parameter.
2167    ///
2168    /// Default: true
2169    ///
2170    /// # Examples
2171    ///
2172    /// ```
2173    /// use rust_rocksdb::Options;
2174    ///
2175    /// let mut opts = Options::default();
2176    /// #[allow(deprecated)]
2177    /// opts.set_allow_os_buffer(false);
2178    /// ```
2179    #[deprecated(
2180        since = "0.7.0",
2181        note = "replaced with set_use_direct_reads/set_use_direct_io_for_flush_and_compaction methods"
2182    )]
2183    pub fn set_allow_os_buffer(&mut self, is_allow: bool) {
2184        self.set_use_direct_reads(!is_allow);
2185        self.set_use_direct_io_for_flush_and_compaction(!is_allow);
2186    }
2187
2188    /// Sets the number of shards used for table cache.
2189    ///
2190    /// Default: `6`
2191    ///
2192    /// # Examples
2193    ///
2194    /// ```
2195    /// use rust_rocksdb::Options;
2196    ///
2197    /// let mut opts = Options::default();
2198    /// opts.set_table_cache_num_shard_bits(4);
2199    /// ```
2200    pub fn set_table_cache_num_shard_bits(&mut self, nbits: c_int) {
2201        unsafe {
2202            ffi::rocksdb_options_set_table_cache_numshardbits(self.inner, nbits);
2203        }
2204    }
2205
2206    /// By default target_file_size_multiplier is 1, which means
2207    /// by default files in different levels will have similar size.
2208    ///
2209    /// Dynamically changeable through SetOptions() API
2210    pub fn set_target_file_size_multiplier(&mut self, multiplier: i32) {
2211        unsafe {
2212            ffi::rocksdb_options_set_target_file_size_multiplier(self.inner, multiplier as c_int);
2213        }
2214    }
2215
2216    /// Sets the minimum number of write buffers that will be merged
2217    /// before writing to storage.  If set to `1`, then
2218    /// all write buffers are flushed to L0 as individual files and this increases
2219    /// read amplification because a get request has to check in all of these
2220    /// files. Also, an in-memory merge may result in writing lesser
2221    /// data to storage if there are duplicate records in each of these
2222    /// individual write buffers.
2223    ///
2224    /// Default: `1`
2225    ///
2226    /// # Examples
2227    ///
2228    /// ```
2229    /// use rust_rocksdb::Options;
2230    ///
2231    /// let mut opts = Options::default();
2232    /// opts.set_min_write_buffer_number(2);
2233    /// ```
2234    pub fn set_min_write_buffer_number(&mut self, nbuf: c_int) {
2235        unsafe {
2236            ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, nbuf);
2237        }
2238    }
2239
2240    /// Sets the maximum number of write buffers that are built up in memory.
2241    /// The default and the minimum number is 2, so that when 1 write buffer
2242    /// is being flushed to storage, new writes can continue to the other
2243    /// write buffer.
2244    /// If max_write_buffer_number > 3, writing will be slowed down to
2245    /// options.delayed_write_rate if we are writing to the last write buffer
2246    /// allowed.
2247    ///
2248    /// Default: `2`
2249    ///
2250    /// # Examples
2251    ///
2252    /// ```
2253    /// use rust_rocksdb::Options;
2254    ///
2255    /// let mut opts = Options::default();
2256    /// opts.set_max_write_buffer_number(4);
2257    /// ```
2258    pub fn set_max_write_buffer_number(&mut self, nbuf: c_int) {
2259        unsafe {
2260            ffi::rocksdb_options_set_max_write_buffer_number(self.inner, nbuf);
2261        }
2262    }
2263
2264    /// Sets the amount of data to build up in memory (backed by an unsorted log
2265    /// on disk) before converting to a sorted on-disk file.
2266    ///
2267    /// Larger values increase performance, especially during bulk loads.
2268    /// Up to max_write_buffer_number write buffers may be held in memory
2269    /// at the same time,
2270    /// so you may wish to adjust this parameter to control memory usage.
2271    /// Also, a larger write buffer will result in a longer recovery time
2272    /// the next time the database is opened.
2273    ///
2274    /// Note that write_buffer_size is enforced per column family.
2275    /// See db_write_buffer_size for sharing memory across column families.
2276    ///
2277    /// Default: `0x4000000` (64MiB)
2278    ///
2279    /// Dynamically changeable through SetOptions() API
2280    ///
2281    /// # Examples
2282    ///
2283    /// ```
2284    /// use rust_rocksdb::Options;
2285    ///
2286    /// let mut opts = Options::default();
2287    /// opts.set_write_buffer_size(128 * 1024 * 1024);
2288    /// ```
2289    pub fn set_write_buffer_size(&mut self, size: usize) {
2290        unsafe {
2291            ffi::rocksdb_options_set_write_buffer_size(self.inner, size);
2292        }
2293    }
2294
2295    /// Amount of data to build up in memtables across all column
2296    /// families before writing to disk.
2297    ///
2298    /// This is distinct from write_buffer_size, which enforces a limit
2299    /// for a single memtable.
2300    ///
2301    /// This feature is disabled by default. Specify a non-zero value
2302    /// to enable it.
2303    ///
2304    /// Default: 0 (disabled)
2305    ///
2306    /// # Examples
2307    ///
2308    /// ```
2309    /// use rust_rocksdb::Options;
2310    ///
2311    /// let mut opts = Options::default();
2312    /// opts.set_db_write_buffer_size(128 * 1024 * 1024);
2313    /// ```
2314    pub fn set_db_write_buffer_size(&mut self, size: usize) {
2315        unsafe {
2316            ffi::rocksdb_options_set_db_write_buffer_size(self.inner, size);
2317        }
2318    }
2319
2320    /// Control maximum total data size for a level.
2321    /// max_bytes_for_level_base is the max total for level-1.
2322    /// Maximum number of bytes for level L can be calculated as
2323    /// (max_bytes_for_level_base) * (max_bytes_for_level_multiplier ^ (L-1))
2324    /// For example, if max_bytes_for_level_base is 200MB, and if
2325    /// max_bytes_for_level_multiplier is 10, total data size for level-1
2326    /// will be 200MB, total file size for level-2 will be 2GB,
2327    /// and total file size for level-3 will be 20GB.
2328    ///
2329    /// Default: `0x10000000` (256MiB).
2330    ///
2331    /// Dynamically changeable through SetOptions() API
2332    ///
2333    /// # Examples
2334    ///
2335    /// ```
2336    /// use rust_rocksdb::Options;
2337    ///
2338    /// let mut opts = Options::default();
2339    /// opts.set_max_bytes_for_level_base(512 * 1024 * 1024);
2340    /// ```
2341    pub fn set_max_bytes_for_level_base(&mut self, size: u64) {
2342        unsafe {
2343            ffi::rocksdb_options_set_max_bytes_for_level_base(self.inner, size);
2344        }
2345    }
2346
2347    /// Default: `10`
2348    ///
2349    /// # Examples
2350    ///
2351    /// ```
2352    /// use rust_rocksdb::Options;
2353    ///
2354    /// let mut opts = Options::default();
2355    /// opts.set_max_bytes_for_level_multiplier(4.0);
2356    /// ```
2357    pub fn set_max_bytes_for_level_multiplier(&mut self, mul: f64) {
2358        unsafe {
2359            ffi::rocksdb_options_set_max_bytes_for_level_multiplier(self.inner, mul);
2360        }
2361    }
2362
2363    /// The manifest file is rolled over on reaching this limit.
2364    /// The older manifest file be deleted.
2365    /// The default value is MAX_INT so that roll-over does not take place.
2366    ///
2367    /// # Examples
2368    ///
2369    /// ```
2370    /// use rust_rocksdb::Options;
2371    ///
2372    /// let mut opts = Options::default();
2373    /// opts.set_max_manifest_file_size(20 * 1024 * 1024);
2374    /// ```
2375    pub fn set_max_manifest_file_size(&mut self, size: usize) {
2376        unsafe {
2377            ffi::rocksdb_options_set_max_manifest_file_size(self.inner, size);
2378        }
2379    }
2380
2381    /// Sets the target file size for compaction.
2382    /// target_file_size_base is per-file size for level-1.
2383    /// Target file size for level L can be calculated by
2384    /// target_file_size_base * (target_file_size_multiplier ^ (L-1))
2385    /// For example, if target_file_size_base is 2MB and
2386    /// target_file_size_multiplier is 10, then each file on level-1 will
2387    /// be 2MB, and each file on level 2 will be 20MB,
2388    /// and each file on level-3 will be 200MB.
2389    ///
2390    /// Default: `0x4000000` (64MiB)
2391    ///
2392    /// Dynamically changeable through SetOptions() API
2393    ///
2394    /// # Examples
2395    ///
2396    /// ```
2397    /// use rust_rocksdb::Options;
2398    ///
2399    /// let mut opts = Options::default();
2400    /// opts.set_target_file_size_base(128 * 1024 * 1024);
2401    /// ```
2402    pub fn set_target_file_size_base(&mut self, size: u64) {
2403        unsafe {
2404            ffi::rocksdb_options_set_target_file_size_base(self.inner, size);
2405        }
2406    }
2407
2408    /// Sets the minimum number of write buffers that will be merged together
2409    /// before writing to storage.  If set to `1`, then
2410    /// all write buffers are flushed to L0 as individual files and this increases
2411    /// read amplification because a get request has to check in all of these
2412    /// files. Also, an in-memory merge may result in writing lesser
2413    /// data to storage if there are duplicate records in each of these
2414    /// individual write buffers.
2415    ///
2416    /// Default: `1`
2417    ///
2418    /// # Examples
2419    ///
2420    /// ```
2421    /// use rust_rocksdb::Options;
2422    ///
2423    /// let mut opts = Options::default();
2424    /// opts.set_min_write_buffer_number_to_merge(2);
2425    /// ```
2426    pub fn set_min_write_buffer_number_to_merge(&mut self, to_merge: c_int) {
2427        unsafe {
2428            ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, to_merge);
2429        }
2430    }
2431
2432    /// Sets the number of files to trigger level-0 compaction. A value < `0` means that
2433    /// level-0 compaction will not be triggered by number of files at all.
2434    ///
2435    /// Default: `4`
2436    ///
2437    /// Dynamically changeable through SetOptions() API
2438    ///
2439    /// # Examples
2440    ///
2441    /// ```
2442    /// use rust_rocksdb::Options;
2443    ///
2444    /// let mut opts = Options::default();
2445    /// opts.set_level_zero_file_num_compaction_trigger(8);
2446    /// ```
2447    pub fn set_level_zero_file_num_compaction_trigger(&mut self, n: c_int) {
2448        unsafe {
2449            ffi::rocksdb_options_set_level0_file_num_compaction_trigger(self.inner, n);
2450        }
2451    }
2452
2453    /// Sets the soft limit on number of level-0 files. We start slowing down writes at this
2454    /// point. A value < `0` means that no writing slowdown will be triggered by
2455    /// number of files in level-0.
2456    ///
2457    /// Default: `20`
2458    ///
2459    /// Dynamically changeable through SetOptions() API
2460    ///
2461    /// # Examples
2462    ///
2463    /// ```
2464    /// use rust_rocksdb::Options;
2465    ///
2466    /// let mut opts = Options::default();
2467    /// opts.set_level_zero_slowdown_writes_trigger(10);
2468    /// ```
2469    pub fn set_level_zero_slowdown_writes_trigger(&mut self, n: c_int) {
2470        unsafe {
2471            ffi::rocksdb_options_set_level0_slowdown_writes_trigger(self.inner, n);
2472        }
2473    }
2474
2475    /// Sets the maximum number of level-0 files.  We stop writes at this point.
2476    ///
2477    /// Default: `24`
2478    ///
2479    /// Dynamically changeable through SetOptions() API
2480    ///
2481    /// # Examples
2482    ///
2483    /// ```
2484    /// use rust_rocksdb::Options;
2485    ///
2486    /// let mut opts = Options::default();
2487    /// opts.set_level_zero_stop_writes_trigger(48);
2488    /// ```
2489    pub fn set_level_zero_stop_writes_trigger(&mut self, n: c_int) {
2490        unsafe {
2491            ffi::rocksdb_options_set_level0_stop_writes_trigger(self.inner, n);
2492        }
2493    }
2494
2495    /// Sets the compaction style.
2496    ///
2497    /// Default: DBCompactionStyle::Level
2498    ///
2499    /// # Examples
2500    ///
2501    /// ```
2502    /// use rust_rocksdb::{Options, DBCompactionStyle};
2503    ///
2504    /// let mut opts = Options::default();
2505    /// opts.set_compaction_style(DBCompactionStyle::Universal);
2506    /// ```
2507    pub fn set_compaction_style(&mut self, style: DBCompactionStyle) {
2508        unsafe {
2509            ffi::rocksdb_options_set_compaction_style(self.inner, style as c_int);
2510        }
2511    }
2512
2513    /// Sets the options needed to support Universal Style compactions.
2514    pub fn set_universal_compaction_options(&mut self, uco: &UniversalCompactOptions) {
2515        unsafe {
2516            ffi::rocksdb_options_set_universal_compaction_options(self.inner, uco.inner);
2517        }
2518    }
2519
2520    /// Sets the options for FIFO compaction style.
2521    pub fn set_fifo_compaction_options(&mut self, fco: &FifoCompactOptions) {
2522        unsafe {
2523            ffi::rocksdb_options_set_fifo_compaction_options(self.inner, fco.inner);
2524        }
2525    }
2526
2527    /// Sets unordered_write to true trades higher write throughput with
2528    /// relaxing the immutability guarantee of snapshots. This violates the
2529    /// repeatability one expects from ::Get from a snapshot, as well as
2530    /// ::MultiGet and Iterator's consistent-point-in-time view property.
2531    /// If the application cannot tolerate the relaxed guarantees, it can implement
2532    /// its own mechanisms to work around that and yet benefit from the higher
2533    /// throughput. Using TransactionDB with WRITE_PREPARED write policy and
2534    /// two_write_queues=true is one way to achieve immutable snapshots despite
2535    /// unordered_write.
2536    ///
2537    /// By default, i.e., when it is false, rocksdb does not advance the sequence
2538    /// number for new snapshots unless all the writes with lower sequence numbers
2539    /// are already finished. This provides the immutability that we expect from
2540    /// snapshots. Moreover, since Iterator and MultiGet internally depend on
2541    /// snapshots, the snapshot immutability results into Iterator and MultiGet
2542    /// offering consistent-point-in-time view. If set to true, although
2543    /// Read-Your-Own-Write property is still provided, the snapshot immutability
2544    /// property is relaxed: the writes issued after the snapshot is obtained (with
2545    /// larger sequence numbers) will be still not visible to the reads from that
2546    /// snapshot, however, there still might be pending writes (with lower sequence
2547    /// number) that will change the state visible to the snapshot after they are
2548    /// landed to the memtable.
2549    ///
2550    /// Default: false
2551    pub fn set_unordered_write(&mut self, unordered: bool) {
2552        unsafe {
2553            ffi::rocksdb_options_set_unordered_write(self.inner, c_uchar::from(unordered));
2554        }
2555    }
2556
2557    /// Sets maximum number of threads that will
2558    /// concurrently perform a compaction job by breaking it into multiple,
2559    /// smaller ones that are run simultaneously.
2560    ///
2561    /// Default: 1 (i.e. no subcompactions)
2562    pub fn set_max_subcompactions(&mut self, num: u32) {
2563        unsafe {
2564            ffi::rocksdb_options_set_max_subcompactions(self.inner, num);
2565        }
2566    }
2567
2568    /// Sets maximum number of concurrent background jobs
2569    /// (compactions and flushes).
2570    ///
2571    /// Default: 2
2572    ///
2573    /// Dynamically changeable through SetDBOptions() API.
2574    pub fn set_max_background_jobs(&mut self, jobs: c_int) {
2575        unsafe {
2576            ffi::rocksdb_options_set_max_background_jobs(self.inner, jobs);
2577        }
2578    }
2579
2580    /// Sets the maximum number of concurrent background compaction jobs, submitted to
2581    /// the default LOW priority thread pool.
2582    /// We first try to schedule compactions based on
2583    /// `base_background_compactions`. If the compaction cannot catch up , we
2584    /// will increase number of compaction threads up to
2585    /// `max_background_compactions`.
2586    ///
2587    /// If you're increasing this, also consider increasing number of threads in
2588    /// LOW priority thread pool. For more information, see
2589    /// Env::SetBackgroundThreads
2590    ///
2591    /// Default: `1`
2592    ///
2593    /// # Examples
2594    ///
2595    /// ```
2596    /// use rust_rocksdb::Options;
2597    ///
2598    /// let mut opts = Options::default();
2599    /// #[allow(deprecated)]
2600    /// opts.set_max_background_compactions(2);
2601    /// ```
2602    #[deprecated(
2603        since = "0.15.0",
2604        note = "RocksDB automatically decides this based on the value of max_background_jobs"
2605    )]
2606    pub fn set_max_background_compactions(&mut self, n: c_int) {
2607        unsafe {
2608            ffi::rocksdb_options_set_max_background_compactions(self.inner, n);
2609        }
2610    }
2611
2612    /// Sets the maximum number of concurrent background memtable flush jobs, submitted to
2613    /// the HIGH priority thread pool.
2614    ///
2615    /// By default, all background jobs (major compaction and memtable flush) go
2616    /// to the LOW priority pool. If this option is set to a positive number,
2617    /// memtable flush jobs will be submitted to the HIGH priority pool.
2618    /// It is important when the same Env is shared by multiple db instances.
2619    /// Without a separate pool, long running major compaction jobs could
2620    /// potentially block memtable flush jobs of other db instances, leading to
2621    /// unnecessary Put stalls.
2622    ///
2623    /// If you're increasing this, also consider increasing number of threads in
2624    /// HIGH priority thread pool. For more information, see
2625    /// Env::SetBackgroundThreads
2626    ///
2627    /// Default: `1`
2628    ///
2629    /// # Examples
2630    ///
2631    /// ```
2632    /// use rust_rocksdb::Options;
2633    ///
2634    /// let mut opts = Options::default();
2635    /// #[allow(deprecated)]
2636    /// opts.set_max_background_flushes(2);
2637    /// ```
2638    #[deprecated(
2639        since = "0.15.0",
2640        note = "RocksDB automatically decides this based on the value of max_background_jobs"
2641    )]
2642    pub fn set_max_background_flushes(&mut self, n: c_int) {
2643        unsafe {
2644            ffi::rocksdb_options_set_max_background_flushes(self.inner, n);
2645        }
2646    }
2647
2648    /// Disables automatic compactions. Manual compactions can still
2649    /// be issued on this column family
2650    ///
2651    /// Default: `false`
2652    ///
2653    /// Dynamically changeable through SetOptions() API
2654    ///
2655    /// # Examples
2656    ///
2657    /// ```
2658    /// use rust_rocksdb::Options;
2659    ///
2660    /// let mut opts = Options::default();
2661    /// opts.set_disable_auto_compactions(true);
2662    /// ```
2663    pub fn set_disable_auto_compactions(&mut self, disable: bool) {
2664        unsafe {
2665            ffi::rocksdb_options_set_disable_auto_compactions(self.inner, c_int::from(disable));
2666        }
2667    }
2668
2669    /// SetMemtableHugePageSize sets the page size for huge page for
2670    /// arena used by the memtable.
2671    /// If <=0, it won't allocate from huge page but from malloc.
2672    /// Users are responsible to reserve huge pages for it to be allocated. For
2673    /// example:
2674    ///      sysctl -w vm.nr_hugepages=20
2675    /// See linux doc Documentation/vm/hugetlbpage.txt
2676    /// If there isn't enough free huge page available, it will fall back to
2677    /// malloc.
2678    ///
2679    /// Dynamically changeable through SetOptions() API
2680    pub fn set_memtable_huge_page_size(&mut self, size: size_t) {
2681        unsafe {
2682            ffi::rocksdb_options_set_memtable_huge_page_size(self.inner, size);
2683        }
2684    }
2685
2686    /// Sets the maximum number of successive merge operations on a key in the memtable.
2687    ///
2688    /// When a merge operation is added to the memtable and the maximum number of
2689    /// successive merges is reached, the value of the key will be calculated and
2690    /// inserted into the memtable instead of the merge operation. This will
2691    /// ensure that there are never more than max_successive_merges merge
2692    /// operations in the memtable.
2693    ///
2694    /// Default: 0 (disabled)
2695    pub fn set_max_successive_merges(&mut self, num: usize) {
2696        unsafe {
2697            ffi::rocksdb_options_set_max_successive_merges(self.inner, num);
2698        }
2699    }
2700
2701    /// Control locality of bloom filter probes to improve cache miss rate.
2702    /// This option only applies to memtable prefix bloom and plaintable
2703    /// prefix bloom. It essentially limits the max number of cache lines each
2704    /// bloom filter check can touch.
2705    ///
2706    /// This optimization is turned off when set to 0. The number should never
2707    /// be greater than number of probes. This option can boost performance
2708    /// for in-memory workload but should use with care since it can cause
2709    /// higher false positive rate.
2710    ///
2711    /// Default: 0
2712    pub fn set_bloom_locality(&mut self, v: u32) {
2713        unsafe {
2714            ffi::rocksdb_options_set_bloom_locality(self.inner, v);
2715        }
2716    }
2717
2718    /// Enable/disable thread-safe inplace updates.
2719    ///
2720    /// Requires updates if
2721    /// * key exists in current memtable
2722    /// * new sizeof(new_value) <= sizeof(old_value)
2723    /// * old_value for that key is a put i.e. kTypeValue
2724    ///
2725    /// Default: false.
2726    pub fn set_inplace_update_support(&mut self, enabled: bool) {
2727        unsafe {
2728            ffi::rocksdb_options_set_inplace_update_support(self.inner, c_uchar::from(enabled));
2729        }
2730    }
2731
2732    /// Sets the number of locks used for inplace update.
2733    ///
2734    /// Default: 10000 when inplace_update_support = true, otherwise 0.
2735    pub fn set_inplace_update_locks(&mut self, num: usize) {
2736        unsafe {
2737            ffi::rocksdb_options_set_inplace_update_num_locks(self.inner, num);
2738        }
2739    }
2740
2741    /// Different max-size multipliers for different levels.
2742    /// These are multiplied by max_bytes_for_level_multiplier to arrive
2743    /// at the max-size of each level.
2744    ///
2745    /// Default: 1
2746    ///
2747    /// Dynamically changeable through SetOptions() API
2748    pub fn set_max_bytes_for_level_multiplier_additional(&mut self, level_values: &[i32]) {
2749        let count = level_values.len();
2750        unsafe {
2751            ffi::rocksdb_options_set_max_bytes_for_level_multiplier_additional(
2752                self.inner,
2753                level_values.as_ptr().cast_mut(),
2754                count,
2755            );
2756        }
2757    }
2758
2759    /// If true, then DB::Open() will not fetch and check sizes of all sst files.
2760    /// This may significantly speed up startup if there are many sst files,
2761    /// especially when using non-default Env with expensive GetFileSize().
2762    /// We'll still check that all required sst files exist.
2763    /// If paranoid_checks is false, this option is ignored, and sst files are
2764    /// not checked at all.
2765    ///
2766    /// Default: false
2767    pub fn set_skip_checking_sst_file_sizes_on_db_open(&mut self, value: bool) {
2768        unsafe {
2769            ffi::rocksdb_options_set_skip_checking_sst_file_sizes_on_db_open(
2770                self.inner,
2771                c_uchar::from(value),
2772            );
2773        }
2774    }
2775
2776    /// The total maximum size(bytes) of write buffers to maintain in memory
2777    /// including copies of buffers that have already been flushed. This parameter
2778    /// only affects trimming of flushed buffers and does not affect flushing.
2779    /// This controls the maximum amount of write history that will be available
2780    /// in memory for conflict checking when Transactions are used. The actual
2781    /// size of write history (flushed Memtables) might be higher than this limit
2782    /// if further trimming will reduce write history total size below this
2783    /// limit. For example, if max_write_buffer_size_to_maintain is set to 64MB,
2784    /// and there are three flushed Memtables, with sizes of 32MB, 20MB, 20MB.
2785    /// Because trimming the next Memtable of size 20MB will reduce total memory
2786    /// usage to 52MB which is below the limit, RocksDB will stop trimming.
2787    ///
2788    /// When using an OptimisticTransactionDB:
2789    /// If this value is too low, some transactions may fail at commit time due
2790    /// to not being able to determine whether there were any write conflicts.
2791    ///
2792    /// When using a TransactionDB:
2793    /// If Transaction::SetSnapshot is used, TransactionDB will read either
2794    /// in-memory write buffers or SST files to do write-conflict checking.
2795    /// Increasing this value can reduce the number of reads to SST files
2796    /// done for conflict detection.
2797    ///
2798    /// Setting this value to 0 will cause write buffers to be freed immediately
2799    /// after they are flushed. If this value is set to -1,
2800    /// 'max_write_buffer_number * write_buffer_size' will be used.
2801    ///
2802    /// Default:
2803    /// If using a TransactionDB/OptimisticTransactionDB, the default value will
2804    /// be set to the value of 'max_write_buffer_number * write_buffer_size'
2805    /// if it is not explicitly set by the user.  Otherwise, the default is 0.
2806    pub fn set_max_write_buffer_size_to_maintain(&mut self, size: i64) {
2807        unsafe {
2808            ffi::rocksdb_options_set_max_write_buffer_size_to_maintain(self.inner, size);
2809        }
2810    }
2811
2812    /// By default, a single write thread queue is maintained. The thread gets
2813    /// to the head of the queue becomes write batch group leader and responsible
2814    /// for writing to WAL and memtable for the batch group.
2815    ///
2816    /// If enable_pipelined_write is true, separate write thread queue is
2817    /// maintained for WAL write and memtable write. A write thread first enter WAL
2818    /// writer queue and then memtable writer queue. Pending thread on the WAL
2819    /// writer queue thus only have to wait for previous writers to finish their
2820    /// WAL writing but not the memtable writing. Enabling the feature may improve
2821    /// write throughput and reduce latency of the prepare phase of two-phase
2822    /// commit.
2823    ///
2824    /// Default: false
2825    pub fn set_enable_pipelined_write(&mut self, value: bool) {
2826        unsafe {
2827            ffi::rocksdb_options_set_enable_pipelined_write(self.inner, c_uchar::from(value));
2828        }
2829    }
2830
2831    /// Defines the underlying memtable implementation.
2832    /// See official [wiki](https://github.com/facebook/rocksdb/wiki/MemTable) for more information.
2833    /// Defaults to using a skiplist.
2834    ///
2835    /// # Examples
2836    ///
2837    /// ```
2838    /// use rust_rocksdb::{Options, MemtableFactory};
2839    /// let mut opts = Options::default();
2840    /// let factory = MemtableFactory::HashSkipList {
2841    ///     bucket_count: 1_000_000,
2842    ///     height: 4,
2843    ///     branching_factor: 4,
2844    /// };
2845    ///
2846    /// opts.set_allow_concurrent_memtable_write(false);
2847    /// opts.set_memtable_factory(factory);
2848    /// ```
2849    pub fn set_memtable_factory(&mut self, factory: MemtableFactory) {
2850        match factory {
2851            MemtableFactory::Vector => unsafe {
2852                ffi::rocksdb_options_set_memtable_vector_rep(self.inner);
2853            },
2854            MemtableFactory::HashSkipList {
2855                bucket_count,
2856                height,
2857                branching_factor,
2858            } => unsafe {
2859                ffi::rocksdb_options_set_hash_skip_list_rep(
2860                    self.inner,
2861                    bucket_count,
2862                    height,
2863                    branching_factor,
2864                );
2865            },
2866            MemtableFactory::HashLinkList { bucket_count } => unsafe {
2867                ffi::rocksdb_options_set_hash_link_list_rep(self.inner, bucket_count);
2868            },
2869        };
2870    }
2871
2872    pub fn set_block_based_table_factory(&mut self, factory: &BlockBasedOptions) {
2873        unsafe {
2874            ffi::rocksdb_options_set_block_based_table_factory(self.inner, factory.inner);
2875        }
2876        self.outlive.block_based = Some(factory.outlive.clone());
2877    }
2878
2879    /// Sets the table factory to a CuckooTableFactory (the default table
2880    /// factory is a block-based table factory that provides a default
2881    /// implementation of TableBuilder and TableReader with default
2882    /// BlockBasedTableOptions).
2883    /// See official [wiki](https://github.com/facebook/rocksdb/wiki/CuckooTable-Format) for more information on this table format.
2884    /// # Examples
2885    ///
2886    /// ```
2887    /// use rust_rocksdb::{Options, CuckooTableOptions};
2888    ///
2889    /// let mut opts = Options::default();
2890    /// let mut factory_opts = CuckooTableOptions::default();
2891    /// factory_opts.set_hash_ratio(0.8);
2892    /// factory_opts.set_max_search_depth(20);
2893    /// factory_opts.set_cuckoo_block_size(10);
2894    /// factory_opts.set_identity_as_first_hash(true);
2895    /// factory_opts.set_use_module_hash(false);
2896    ///
2897    /// opts.set_cuckoo_table_factory(&factory_opts);
2898    /// ```
2899    pub fn set_cuckoo_table_factory(&mut self, factory: &CuckooTableOptions) {
2900        unsafe {
2901            ffi::rocksdb_options_set_cuckoo_table_factory(self.inner, factory.inner);
2902        }
2903    }
2904
2905    // This is a factory that provides TableFactory objects.
2906    // Default: a block-based table factory that provides a default
2907    // implementation of TableBuilder and TableReader with default
2908    // BlockBasedTableOptions.
2909    /// Sets the factory as plain table.
2910    /// See official [wiki](https://github.com/facebook/rocksdb/wiki/PlainTable-Format) for more
2911    /// information.
2912    ///
2913    /// # Examples
2914    ///
2915    /// ```
2916    /// use rust_rocksdb::{KeyEncodingType, Options, PlainTableFactoryOptions};
2917    ///
2918    /// let mut opts = Options::default();
2919    /// let factory_opts = PlainTableFactoryOptions {
2920    ///   user_key_length: 0,
2921    ///   bloom_bits_per_key: 20,
2922    ///   hash_table_ratio: 0.75,
2923    ///   index_sparseness: 16,
2924    ///   huge_page_tlb_size: 0,
2925    ///   encoding_type: KeyEncodingType::Plain,
2926    ///   full_scan_mode: false,
2927    ///   store_index_in_file: false,
2928    /// };
2929    ///
2930    /// opts.set_plain_table_factory(&factory_opts);
2931    /// ```
2932    pub fn set_plain_table_factory(&mut self, options: &PlainTableFactoryOptions) {
2933        unsafe {
2934            ffi::rocksdb_options_set_plain_table_factory(
2935                self.inner,
2936                options.user_key_length,
2937                options.bloom_bits_per_key,
2938                options.hash_table_ratio,
2939                options.index_sparseness,
2940                options.huge_page_tlb_size,
2941                options.encoding_type as c_char,
2942                c_uchar::from(options.full_scan_mode),
2943                c_uchar::from(options.store_index_in_file),
2944            );
2945        }
2946    }
2947
2948    /// Sets the start level to use compression.
2949    pub fn set_min_level_to_compress(&mut self, lvl: c_int) {
2950        unsafe {
2951            ffi::rocksdb_options_set_min_level_to_compress(self.inner, lvl);
2952        }
2953    }
2954
2955    /// Measure IO stats in compactions and flushes, if `true`.
2956    ///
2957    /// Default: `false`
2958    ///
2959    /// # Examples
2960    ///
2961    /// ```
2962    /// use rust_rocksdb::Options;
2963    ///
2964    /// let mut opts = Options::default();
2965    /// opts.set_report_bg_io_stats(true);
2966    /// ```
2967    pub fn set_report_bg_io_stats(&mut self, enable: bool) {
2968        unsafe {
2969            ffi::rocksdb_options_set_report_bg_io_stats(self.inner, c_int::from(enable));
2970        }
2971    }
2972
2973    /// Once write-ahead logs exceed this size, we will start forcing the flush of
2974    /// column families whose memtables are backed by the oldest live WAL file
2975    /// (i.e. the ones that are causing all the space amplification).
2976    ///
2977    /// Default: `0`
2978    ///
2979    /// # Examples
2980    ///
2981    /// ```
2982    /// use rust_rocksdb::Options;
2983    ///
2984    /// let mut opts = Options::default();
2985    /// // Set max total wal size to 1G.
2986    /// opts.set_max_total_wal_size(1 << 30);
2987    /// ```
2988    pub fn set_max_total_wal_size(&mut self, size: u64) {
2989        unsafe {
2990            ffi::rocksdb_options_set_max_total_wal_size(self.inner, size);
2991        }
2992    }
2993
2994    /// Recovery mode to control the consistency while replaying WAL.
2995    ///
2996    /// Default: DBRecoveryMode::PointInTime
2997    ///
2998    /// # Examples
2999    ///
3000    /// ```
3001    /// use rust_rocksdb::{Options, DBRecoveryMode};
3002    ///
3003    /// let mut opts = Options::default();
3004    /// opts.set_wal_recovery_mode(DBRecoveryMode::AbsoluteConsistency);
3005    /// ```
3006    pub fn set_wal_recovery_mode(&mut self, mode: DBRecoveryMode) {
3007        unsafe {
3008            ffi::rocksdb_options_set_wal_recovery_mode(self.inner, mode as c_int);
3009        }
3010    }
3011
3012    pub fn enable_statistics(&mut self) {
3013        unsafe {
3014            ffi::rocksdb_options_enable_statistics(self.inner);
3015        }
3016    }
3017
3018    pub fn get_statistics(&self) -> Option<String> {
3019        unsafe {
3020            let value = ffi::rocksdb_options_statistics_get_string(self.inner);
3021            if value.is_null() {
3022                return None;
3023            }
3024
3025            // Must have valid UTF-8 format.
3026            let s = CStr::from_ptr(value).to_str().unwrap().to_owned();
3027            ffi::rocksdb_free(value as *mut c_void);
3028            Some(s)
3029        }
3030    }
3031
3032    /// StatsLevel can be used to reduce statistics overhead by skipping certain
3033    /// types of stats in the stats collection process.
3034    pub fn set_statistics_level(&self, level: StatsLevel) {
3035        unsafe { ffi::rocksdb_options_set_statistics_level(self.inner, level as c_int) }
3036    }
3037
3038    /// Returns the value of cumulative db counters if stat collection is enabled.
3039    pub fn get_ticker_count(&self, ticker: Ticker) -> u64 {
3040        unsafe { ffi::rocksdb_options_statistics_get_ticker_count(self.inner, ticker as u32) }
3041    }
3042
3043    /// Gets Histogram data from collected db stats. Requires stats to be enabled.
3044    pub fn get_histogram_data(&self, histogram: Histogram) -> HistogramData {
3045        unsafe {
3046            let data = HistogramData::default();
3047            ffi::rocksdb_options_statistics_get_histogram_data(
3048                self.inner,
3049                histogram as u32,
3050                data.inner,
3051            );
3052            data
3053        }
3054    }
3055
3056    /// If not zero, dump `rocksdb.stats` to LOG every `stats_dump_period_sec`.
3057    ///
3058    /// Default: `600` (10 mins)
3059    ///
3060    /// # Examples
3061    ///
3062    /// ```
3063    /// use rust_rocksdb::Options;
3064    ///
3065    /// let mut opts = Options::default();
3066    /// opts.set_stats_dump_period_sec(300);
3067    /// ```
3068    pub fn set_stats_dump_period_sec(&mut self, period: c_uint) {
3069        unsafe {
3070            ffi::rocksdb_options_set_stats_dump_period_sec(self.inner, period);
3071        }
3072    }
3073
3074    /// If not zero, dump rocksdb.stats to RocksDB to LOG every `stats_persist_period_sec`.
3075    ///
3076    /// Default: `600` (10 mins)
3077    ///
3078    /// # Examples
3079    ///
3080    /// ```
3081    /// use rust_rocksdb::Options;
3082    ///
3083    /// let mut opts = Options::default();
3084    /// opts.set_stats_persist_period_sec(5);
3085    /// ```
3086    pub fn set_stats_persist_period_sec(&mut self, period: c_uint) {
3087        unsafe {
3088            ffi::rocksdb_options_set_stats_persist_period_sec(self.inner, period);
3089        }
3090    }
3091
3092    /// When set to true, reading SST files will opt out of the filesystem's
3093    /// readahead. Setting this to false may improve sequential iteration
3094    /// performance.
3095    ///
3096    /// Default: `true`
3097    pub fn set_advise_random_on_open(&mut self, advise: bool) {
3098        unsafe {
3099            ffi::rocksdb_options_set_advise_random_on_open(self.inner, c_uchar::from(advise));
3100        }
3101    }
3102
3103    /// Enable/disable adaptive mutex, which spins in the user space before resorting to kernel.
3104    ///
3105    /// This could reduce context switch when the mutex is not
3106    /// heavily contended. However, if the mutex is hot, we could end up
3107    /// wasting spin time.
3108    ///
3109    /// Default: false
3110    pub fn set_use_adaptive_mutex(&mut self, enabled: bool) {
3111        unsafe {
3112            ffi::rocksdb_options_set_use_adaptive_mutex(self.inner, c_uchar::from(enabled));
3113        }
3114    }
3115
3116    /// Sets the number of levels for this database.
3117    pub fn set_num_levels(&mut self, n: c_int) {
3118        unsafe {
3119            ffi::rocksdb_options_set_num_levels(self.inner, n);
3120        }
3121    }
3122
3123    /// When a `prefix_extractor` is defined through `opts.set_prefix_extractor` this
3124    /// creates a prefix bloom filter for each memtable with the size of
3125    /// `write_buffer_size * memtable_prefix_bloom_ratio` (capped at 0.25).
3126    ///
3127    /// Default: `0`
3128    ///
3129    /// # Examples
3130    ///
3131    /// ```
3132    /// use rust_rocksdb::{Options, SliceTransform};
3133    ///
3134    /// let mut opts = Options::default();
3135    /// let transform = SliceTransform::create_fixed_prefix(10);
3136    /// opts.set_prefix_extractor(transform);
3137    /// opts.set_memtable_prefix_bloom_ratio(0.2);
3138    /// ```
3139    pub fn set_memtable_prefix_bloom_ratio(&mut self, ratio: f64) {
3140        unsafe {
3141            ffi::rocksdb_options_set_memtable_prefix_bloom_size_ratio(self.inner, ratio);
3142        }
3143    }
3144
3145    /// Sets the maximum number of bytes in all compacted files.
3146    /// We try to limit number of bytes in one compaction to be lower than this
3147    /// threshold. But it's not guaranteed.
3148    ///
3149    /// Value 0 will be sanitized.
3150    ///
3151    /// Default: target_file_size_base * 25
3152    pub fn set_max_compaction_bytes(&mut self, nbytes: u64) {
3153        unsafe {
3154            ffi::rocksdb_options_set_max_compaction_bytes(self.inner, nbytes);
3155        }
3156    }
3157
3158    /// Specifies the absolute path of the directory the
3159    /// write-ahead log (WAL) should be written to.
3160    ///
3161    /// Default: same directory as the database
3162    ///
3163    /// # Examples
3164    ///
3165    /// ```
3166    /// use rust_rocksdb::Options;
3167    ///
3168    /// let mut opts = Options::default();
3169    /// opts.set_wal_dir("/path/to/dir");
3170    /// ```
3171    pub fn set_wal_dir<P: AsRef<Path>>(&mut self, path: P) {
3172        let p = to_cpath(path).unwrap();
3173        unsafe {
3174            ffi::rocksdb_options_set_wal_dir(self.inner, p.as_ptr());
3175        }
3176    }
3177
3178    /// Sets the WAL ttl in seconds.
3179    ///
3180    /// The following two options affect how archived logs will be deleted.
3181    /// 1. If both set to 0, logs will be deleted asap and will not get into
3182    ///    the archive.
3183    /// 2. If wal_ttl_seconds is 0 and wal_size_limit_mb is not 0,
3184    ///    WAL files will be checked every 10 min and if total size is greater
3185    ///    then wal_size_limit_mb, they will be deleted starting with the
3186    ///    earliest until size_limit is met. All empty files will be deleted.
3187    /// 3. If wal_ttl_seconds is not 0 and wall_size_limit_mb is 0, then
3188    ///    WAL files will be checked every wal_ttl_seconds / 2 and those that
3189    ///    are older than wal_ttl_seconds will be deleted.
3190    /// 4. If both are not 0, WAL files will be checked every 10 min and both
3191    ///    checks will be performed with ttl being first.
3192    ///
3193    /// Default: 0
3194    pub fn set_wal_ttl_seconds(&mut self, secs: u64) {
3195        unsafe {
3196            ffi::rocksdb_options_set_WAL_ttl_seconds(self.inner, secs);
3197        }
3198    }
3199
3200    /// Sets the WAL size limit in MB.
3201    ///
3202    /// If total size of WAL files is greater then wal_size_limit_mb,
3203    /// they will be deleted starting with the earliest until size_limit is met.
3204    ///
3205    /// Default: 0
3206    pub fn set_wal_size_limit_mb(&mut self, size: u64) {
3207        unsafe {
3208            ffi::rocksdb_options_set_WAL_size_limit_MB(self.inner, size);
3209        }
3210    }
3211
3212    /// Sets the number of bytes to preallocate (via fallocate) the manifest files.
3213    ///
3214    /// Default is 4MB, which is reasonable to reduce random IO
3215    /// as well as prevent overallocation for mounts that preallocate
3216    /// large amounts of data (such as xfs's allocsize option).
3217    pub fn set_manifest_preallocation_size(&mut self, size: usize) {
3218        unsafe {
3219            ffi::rocksdb_options_set_manifest_preallocation_size(self.inner, size);
3220        }
3221    }
3222
3223    /// If true, then DB::Open() will not update the statistics used to optimize
3224    /// compaction decision by loading table properties from many files.
3225    /// Turning off this feature will improve DBOpen time especially in disk environment.
3226    ///
3227    /// Default: false
3228    pub fn set_skip_stats_update_on_db_open(&mut self, skip: bool) {
3229        unsafe {
3230            ffi::rocksdb_options_set_skip_stats_update_on_db_open(self.inner, c_uchar::from(skip));
3231        }
3232    }
3233
3234    /// Specify the maximal number of info log files to be kept.
3235    ///
3236    /// Default: 1000
3237    ///
3238    /// # Examples
3239    ///
3240    /// ```
3241    /// use rust_rocksdb::Options;
3242    ///
3243    /// let mut options = Options::default();
3244    /// options.set_keep_log_file_num(100);
3245    /// ```
3246    pub fn set_keep_log_file_num(&mut self, nfiles: usize) {
3247        unsafe {
3248            ffi::rocksdb_options_set_keep_log_file_num(self.inner, nfiles);
3249        }
3250    }
3251
3252    /// Allow the OS to mmap file for writing.
3253    ///
3254    /// Default: false
3255    ///
3256    /// # Examples
3257    ///
3258    /// ```
3259    /// use rust_rocksdb::Options;
3260    ///
3261    /// let mut options = Options::default();
3262    /// options.set_allow_mmap_writes(true);
3263    /// ```
3264    pub fn set_allow_mmap_writes(&mut self, is_enabled: bool) {
3265        unsafe {
3266            ffi::rocksdb_options_set_allow_mmap_writes(self.inner, c_uchar::from(is_enabled));
3267        }
3268    }
3269
3270    /// Allow the OS to mmap file for reading sst tables.
3271    ///
3272    /// Default: false
3273    ///
3274    /// # Examples
3275    ///
3276    /// ```
3277    /// use rust_rocksdb::Options;
3278    ///
3279    /// let mut options = Options::default();
3280    /// options.set_allow_mmap_reads(true);
3281    /// ```
3282    pub fn set_allow_mmap_reads(&mut self, is_enabled: bool) {
3283        unsafe {
3284            ffi::rocksdb_options_set_allow_mmap_reads(self.inner, c_uchar::from(is_enabled));
3285        }
3286    }
3287
3288    /// If enabled, WAL is not flushed automatically after each write. Instead it
3289    /// relies on manual invocation of `DB::flush_wal()` to write the WAL buffer
3290    /// to its file.
3291    ///
3292    /// Default: false
3293    ///
3294    /// # Examples
3295    ///
3296    /// ```
3297    /// use rust_rocksdb::Options;
3298    ///
3299    /// let mut options = Options::default();
3300    /// options.set_manual_wal_flush(true);
3301    /// ```
3302    pub fn set_manual_wal_flush(&mut self, is_enabled: bool) {
3303        unsafe {
3304            ffi::rocksdb_options_set_manual_wal_flush(self.inner, c_uchar::from(is_enabled));
3305        }
3306    }
3307
3308    /// Guarantee that all column families are flushed together atomically.
3309    /// This option applies to both manual flushes (`db.flush()`) and automatic
3310    /// background flushes caused when memtables are filled.
3311    ///
3312    /// Note that this is only useful when the WAL is disabled. When using the
3313    /// WAL, writes are always consistent across column families.
3314    ///
3315    /// Default: false
3316    ///
3317    /// # Examples
3318    ///
3319    /// ```
3320    /// use rust_rocksdb::Options;
3321    ///
3322    /// let mut options = Options::default();
3323    /// options.set_atomic_flush(true);
3324    /// ```
3325    pub fn set_atomic_flush(&mut self, atomic_flush: bool) {
3326        unsafe {
3327            ffi::rocksdb_options_set_atomic_flush(self.inner, c_uchar::from(atomic_flush));
3328        }
3329    }
3330
3331    /// Sets global cache for table-level rows.
3332    ///
3333    /// Default: null (disabled)
3334    /// Not supported in ROCKSDB_LITE mode!
3335    pub fn set_row_cache(&mut self, cache: &Cache) {
3336        unsafe {
3337            ffi::rocksdb_options_set_row_cache(self.inner, cache.0.inner.as_ptr());
3338        }
3339        self.outlive.row_cache = Some(cache.clone());
3340    }
3341
3342    /// Use to control write rate of flush and compaction. Flush has higher
3343    /// priority than compaction.
3344    /// If rate limiter is enabled, bytes_per_sync is set to 1MB by default.
3345    ///
3346    /// Default: disable
3347    ///
3348    /// # Examples
3349    ///
3350    /// ```
3351    /// use rust_rocksdb::Options;
3352    ///
3353    /// let mut options = Options::default();
3354    /// options.set_ratelimiter(1024 * 1024, 100 * 1000, 10);
3355    /// ```
3356    pub fn set_ratelimiter(
3357        &mut self,
3358        rate_bytes_per_sec: i64,
3359        refill_period_us: i64,
3360        fairness: i32,
3361    ) {
3362        unsafe {
3363            let ratelimiter =
3364                ffi::rocksdb_ratelimiter_create(rate_bytes_per_sec, refill_period_us, fairness);
3365            ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
3366            ffi::rocksdb_ratelimiter_destroy(ratelimiter);
3367        }
3368    }
3369
3370    /// Use to control write rate of flush and compaction. Flush has higher
3371    /// priority than compaction.
3372    /// If rate limiter is enabled, bytes_per_sync is set to 1MB by default.
3373    ///
3374    /// Default: disable
3375    pub fn set_auto_tuned_ratelimiter(
3376        &mut self,
3377        rate_bytes_per_sec: i64,
3378        refill_period_us: i64,
3379        fairness: i32,
3380    ) {
3381        unsafe {
3382            let ratelimiter = ffi::rocksdb_ratelimiter_create_auto_tuned(
3383                rate_bytes_per_sec,
3384                refill_period_us,
3385                fairness,
3386            );
3387            ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
3388            ffi::rocksdb_ratelimiter_destroy(ratelimiter);
3389        }
3390    }
3391
3392    /// Create a RateLimiter object, which can be shared among RocksDB instances to
3393    /// control write rate of flush and compaction.
3394    ///
3395    /// rate_bytes_per_sec: this is the only parameter you want to set most of the
3396    /// time. It controls the total write rate of compaction and flush in bytes per
3397    /// second. Currently, RocksDB does not enforce rate limit for anything other
3398    /// than flush and compaction, e.g. write to WAL.
3399    ///
3400    /// refill_period_us: this controls how often tokens are refilled. For example,
3401    /// when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to
3402    /// 100ms, then 1MB is refilled every 100ms internally. Larger value can lead to
3403    /// burstier writes while smaller value introduces more CPU overhead.
3404    /// The default should work for most cases.
3405    ///
3406    /// fairness: RateLimiter accepts high-pri requests and low-pri requests.
3407    /// A low-pri request is usually blocked in favor of hi-pri request. Currently,
3408    /// RocksDB assigns low-pri to request from compaction and high-pri to request
3409    /// from flush. Low-pri requests can get blocked if flush requests come in
3410    /// continuously. This fairness parameter grants low-pri requests permission by
3411    /// 1/fairness chance even though high-pri requests exist to avoid starvation.
3412    /// You should be good by leaving it at default 10.
3413    ///
3414    /// mode: Mode indicates which types of operations count against the limit.
3415    ///
3416    /// auto_tuned: Enables dynamic adjustment of rate limit within the range
3417    ///              `[rate_bytes_per_sec / 20, rate_bytes_per_sec]`, according to
3418    ///              the recent demand for background I/O.
3419    pub fn set_ratelimiter_with_mode(
3420        &mut self,
3421        rate_bytes_per_sec: i64,
3422        refill_period_us: i64,
3423        fairness: i32,
3424        mode: RateLimiterMode,
3425        auto_tuned: bool,
3426    ) {
3427        unsafe {
3428            let ratelimiter = ffi::rocksdb_ratelimiter_create_with_mode(
3429                rate_bytes_per_sec,
3430                refill_period_us,
3431                fairness,
3432                mode as c_int,
3433                auto_tuned,
3434            );
3435            ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
3436            ffi::rocksdb_ratelimiter_destroy(ratelimiter);
3437        }
3438    }
3439
3440    /// Sets the maximal size of the info log file.
3441    ///
3442    /// If the log file is larger than `max_log_file_size`, a new info log file
3443    /// will be created. If `max_log_file_size` is equal to zero, all logs will
3444    /// be written to one log file.
3445    ///
3446    /// Default: 0
3447    ///
3448    /// # Examples
3449    ///
3450    /// ```
3451    /// use rust_rocksdb::Options;
3452    ///
3453    /// let mut options = Options::default();
3454    /// options.set_max_log_file_size(0);
3455    /// ```
3456    pub fn set_max_log_file_size(&mut self, size: usize) {
3457        unsafe {
3458            ffi::rocksdb_options_set_max_log_file_size(self.inner, size);
3459        }
3460    }
3461
3462    /// Sets the time for the info log file to roll (in seconds).
3463    ///
3464    /// If specified with non-zero value, log file will be rolled
3465    /// if it has been active longer than `log_file_time_to_roll`.
3466    /// Default: 0 (disabled)
3467    pub fn set_log_file_time_to_roll(&mut self, secs: usize) {
3468        unsafe {
3469            ffi::rocksdb_options_set_log_file_time_to_roll(self.inner, secs);
3470        }
3471    }
3472
3473    /// Controls the recycling of log files.
3474    ///
3475    /// If non-zero, previously written log files will be reused for new logs,
3476    /// overwriting the old data. The value indicates how many such files we will
3477    /// keep around at any point in time for later use. This is more efficient
3478    /// because the blocks are already allocated and fdatasync does not need to
3479    /// update the inode after each write.
3480    ///
3481    /// Default: 0
3482    ///
3483    /// # Examples
3484    ///
3485    /// ```
3486    /// use rust_rocksdb::Options;
3487    ///
3488    /// let mut options = Options::default();
3489    /// options.set_recycle_log_file_num(5);
3490    /// ```
3491    pub fn set_recycle_log_file_num(&mut self, num: usize) {
3492        unsafe {
3493            ffi::rocksdb_options_set_recycle_log_file_num(self.inner, num);
3494        }
3495    }
3496
3497    /// Prints logs to stderr for faster debugging
3498    /// See official [wiki](https://github.com/facebook/rocksdb/wiki/Logger) for more information.
3499    pub fn set_stderr_logger(&mut self, log_level: LogLevel, prefix: impl CStrLike) {
3500        let p = prefix.into_c_string().unwrap();
3501
3502        unsafe {
3503            let logger = ffi::rocksdb_logger_create_stderr_logger(log_level as c_int, p.as_ptr());
3504            ffi::rocksdb_options_set_info_log(self.inner, logger);
3505            ffi::rocksdb_logger_destroy(logger);
3506        }
3507    }
3508
3509    /// Invokes callback with log messages.
3510    ///
3511    /// # Examples
3512    /// ```
3513    /// use rust_rocksdb::{LogLevel, Options};
3514    ///
3515    /// let mut options = Options::default();
3516    /// options.set_callback_logger(LogLevel::Debug, &|level, msg| println!("{level:?} {msg}"));
3517    /// ```
3518    pub fn set_callback_logger<'a, F>(&mut self, log_level: LogLevel, func: &'a F)
3519    where
3520        F: for<'b> FnMut(LogLevel, &'b str) + RefUnwindSafe + Send + Sync + 'a,
3521    {
3522        let func = func as *const F;
3523        let func = func.cast::<c_void>();
3524        unsafe {
3525            let logger = ffi::rocksdb_logger_create_callback_logger(
3526                log_level as c_int,
3527                Some(Self::logger_callback::<'a, F>),
3528                func.cast_mut(),
3529            );
3530            ffi::rocksdb_options_set_info_log(self.inner, logger);
3531            ffi::rocksdb_logger_destroy(logger);
3532        }
3533    }
3534
3535    extern "C" fn logger_callback<'a, F>(
3536        func: *mut c_void,
3537        level: u32,
3538        msg: *mut c_char,
3539        len: usize,
3540    ) where
3541        F: for<'b> FnMut(LogLevel, &'b str) + RefUnwindSafe + Send + Sync + 'a,
3542    {
3543        use std::{mem, process, str};
3544
3545        let level = unsafe { mem::transmute::<u32, LogLevel>(level) };
3546        let slice = unsafe { slice::from_raw_parts_mut(msg.cast::<u8>(), len) };
3547        let msg = unsafe { str::from_utf8_unchecked(slice) };
3548        let func = unsafe { &mut *func.cast::<F>() };
3549        let mut func = AssertUnwindSafe(func);
3550        if catch_unwind(move || func(level, msg)).is_err() {
3551            process::abort();
3552        }
3553    }
3554
3555    /// Sets the threshold at which all writes will be slowed down to at least delayed_write_rate if estimated
3556    /// bytes needed to be compaction exceed this threshold.
3557    ///
3558    /// Default: 64GB
3559    pub fn set_soft_pending_compaction_bytes_limit(&mut self, limit: usize) {
3560        unsafe {
3561            ffi::rocksdb_options_set_soft_pending_compaction_bytes_limit(self.inner, limit);
3562        }
3563    }
3564
3565    /// Sets the bytes threshold at which all writes are stopped if estimated bytes needed to be compaction exceed
3566    /// this threshold.
3567    ///
3568    /// Default: 256GB
3569    pub fn set_hard_pending_compaction_bytes_limit(&mut self, limit: usize) {
3570        unsafe {
3571            ffi::rocksdb_options_set_hard_pending_compaction_bytes_limit(self.inner, limit);
3572        }
3573    }
3574
3575    /// Sets the size of one block in arena memory allocation.
3576    ///
3577    /// If <= 0, a proper value is automatically calculated (usually 1/10 of
3578    /// writer_buffer_size).
3579    ///
3580    /// Default: 0
3581    pub fn set_arena_block_size(&mut self, size: usize) {
3582        unsafe {
3583            ffi::rocksdb_options_set_arena_block_size(self.inner, size);
3584        }
3585    }
3586
3587    /// If true, then print malloc stats together with rocksdb.stats when printing to LOG.
3588    ///
3589    /// Default: false
3590    pub fn set_dump_malloc_stats(&mut self, enabled: bool) {
3591        unsafe {
3592            ffi::rocksdb_options_set_dump_malloc_stats(self.inner, c_uchar::from(enabled));
3593        }
3594    }
3595
3596    /// Enable whole key bloom filter in memtable. Note this will only take effect
3597    /// if memtable_prefix_bloom_size_ratio is not 0. Enabling whole key filtering
3598    /// can potentially reduce CPU usage for point-look-ups.
3599    ///
3600    /// Default: false (disable)
3601    ///
3602    /// Dynamically changeable through SetOptions() API
3603    pub fn set_memtable_whole_key_filtering(&mut self, whole_key_filter: bool) {
3604        unsafe {
3605            ffi::rocksdb_options_set_memtable_whole_key_filtering(
3606                self.inner,
3607                c_uchar::from(whole_key_filter),
3608            );
3609        }
3610    }
3611
3612    /// Enable the use of key-value separation.
3613    ///
3614    /// More details can be found here: [Integrated BlobDB](http://rocksdb.org/blog/2021/05/26/integrated-blob-db.html).
3615    ///
3616    /// Default: false (disable)
3617    ///
3618    /// Dynamically changeable through SetOptions() API
3619    pub fn set_enable_blob_files(&mut self, val: bool) {
3620        unsafe {
3621            ffi::rocksdb_options_set_enable_blob_files(self.inner, u8::from(val));
3622        }
3623    }
3624
3625    /// Sets the minimum threshold value at or above which will be written
3626    /// to blob files during flush or compaction.
3627    ///
3628    /// Dynamically changeable through SetOptions() API
3629    pub fn set_min_blob_size(&mut self, val: u64) {
3630        unsafe {
3631            ffi::rocksdb_options_set_min_blob_size(self.inner, val);
3632        }
3633    }
3634
3635    /// Sets the size limit for blob files.
3636    ///
3637    /// Dynamically changeable through SetOptions() API
3638    pub fn set_blob_file_size(&mut self, val: u64) {
3639        unsafe {
3640            ffi::rocksdb_options_set_blob_file_size(self.inner, val);
3641        }
3642    }
3643
3644    /// Sets the blob compression type. All blob files use the same
3645    /// compression type.
3646    ///
3647    /// Dynamically changeable through SetOptions() API
3648    pub fn set_blob_compression_type(&mut self, val: DBCompressionType) {
3649        unsafe {
3650            ffi::rocksdb_options_set_blob_compression_type(self.inner, val as _);
3651        }
3652    }
3653
3654    /// If this is set to true RocksDB will actively relocate valid blobs from the oldest blob files
3655    /// as they are encountered during compaction.
3656    ///
3657    /// Dynamically changeable through SetOptions() API
3658    pub fn set_enable_blob_gc(&mut self, val: bool) {
3659        unsafe {
3660            ffi::rocksdb_options_set_enable_blob_gc(self.inner, u8::from(val));
3661        }
3662    }
3663
3664    /// Sets the threshold that the GC logic uses to determine which blob files should be considered “old.”
3665    ///
3666    /// For example, the default value of 0.25 signals to RocksDB that blobs residing in the
3667    /// oldest 25% of blob files should be relocated by GC. This parameter can be tuned to adjust
3668    /// the trade-off between write amplification and space amplification.
3669    ///
3670    /// Dynamically changeable through SetOptions() API
3671    pub fn set_blob_gc_age_cutoff(&mut self, val: c_double) {
3672        unsafe {
3673            ffi::rocksdb_options_set_blob_gc_age_cutoff(self.inner, val);
3674        }
3675    }
3676
3677    /// Sets the blob GC force threshold.
3678    ///
3679    /// Dynamically changeable through SetOptions() API
3680    pub fn set_blob_gc_force_threshold(&mut self, val: c_double) {
3681        unsafe {
3682            ffi::rocksdb_options_set_blob_gc_force_threshold(self.inner, val);
3683        }
3684    }
3685
3686    /// Sets the blob compaction read ahead size.
3687    ///
3688    /// Dynamically changeable through SetOptions() API
3689    pub fn set_blob_compaction_readahead_size(&mut self, val: u64) {
3690        unsafe {
3691            ffi::rocksdb_options_set_blob_compaction_readahead_size(self.inner, val);
3692        }
3693    }
3694
3695    /// Sets the blob cache.
3696    ///
3697    /// Using a dedicated object for blobs and using the same object for the block and blob caches
3698    /// are both supported. In the latter case, note that blobs are less valuable from a caching
3699    /// perspective than SST blocks, and some cache implementations have configuration options that
3700    /// can be used to prioritize items accordingly (see Cache::Priority and
3701    /// LRUCacheOptions::{high,low}_pri_pool_ratio).
3702    ///
3703    /// Default: disabled
3704    pub fn set_blob_cache(&mut self, cache: &Cache) {
3705        unsafe {
3706            ffi::rocksdb_options_set_blob_cache(self.inner, cache.0.inner.as_ptr());
3707        }
3708        self.outlive.blob_cache = Some(cache.clone());
3709    }
3710
3711    /// Set this option to true during creation of database if you want
3712    /// to be able to ingest behind (call IngestExternalFile() skipping keys
3713    /// that already exist, rather than overwriting matching keys).
3714    /// Setting this option to true has the following effects:
3715    ///
3716    /// 1. Disable some internal optimizations around SST file compression.
3717    /// 2. Reserve the last level for ingested files only.
3718    /// 3. Compaction will not include any file from the last level.
3719    ///
3720    /// Note that only Universal Compaction supports allow_ingest_behind.
3721    /// `num_levels` should be >= 3 if this option is turned on.
3722    ///
3723    /// DEFAULT: false
3724    /// Immutable.
3725    pub fn set_allow_ingest_behind(&mut self, val: bool) {
3726        unsafe {
3727            ffi::rocksdb_options_set_allow_ingest_behind(self.inner, c_uchar::from(val));
3728        }
3729    }
3730
3731    // A factory of a table property collector that marks an SST
3732    // file as need-compaction when it observe at least "D" deletion
3733    // entries in any "N" consecutive entries, or the ratio of tombstone
3734    // entries >= deletion_ratio.
3735    //
3736    // `window_size`: is the sliding window size "N"
3737    // `num_dels_trigger`: is the deletion trigger "D"
3738    // `deletion_ratio`: if <= 0 or > 1, disable triggering compaction based on
3739    // deletion ratio.
3740    pub fn add_compact_on_deletion_collector_factory(
3741        &mut self,
3742        window_size: size_t,
3743        num_dels_trigger: size_t,
3744        deletion_ratio: f64,
3745    ) {
3746        unsafe {
3747            ffi::rocksdb_options_add_compact_on_deletion_collector_factory_del_ratio(
3748                self.inner,
3749                window_size,
3750                num_dels_trigger,
3751                deletion_ratio,
3752            );
3753        }
3754    }
3755
3756    /// <https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager>
3757    /// Write buffer manager helps users control the total memory used by memtables across multiple column families and/or DB instances.
3758    /// Users can enable this control by 2 ways:
3759    ///
3760    /// 1- Limit the total memtable usage across multiple column families and DBs under a threshold.
3761    /// 2- Cost the memtable memory usage to block cache so that memory of RocksDB can be capped by the single limit.
3762    /// The usage of a write buffer manager is similar to rate_limiter and sst_file_manager.
3763    /// Users can create one write buffer manager object and pass it to all the options of column families or DBs whose memtable size they want to be controlled by this object.
3764    pub fn set_write_buffer_manager(&mut self, write_buffer_manager: &WriteBufferManager) {
3765        unsafe {
3766            ffi::rocksdb_options_set_write_buffer_manager(
3767                self.inner,
3768                write_buffer_manager.0.inner.as_ptr(),
3769            );
3770        }
3771        self.outlive.write_buffer_manager = Some(write_buffer_manager.clone());
3772    }
3773
3774    /// If true, working thread may avoid doing unnecessary and long-latency
3775    /// operation (such as deleting obsolete files directly or deleting memtable)
3776    /// and will instead schedule a background job to do it.
3777    ///
3778    /// Use it if you're latency-sensitive.
3779    ///
3780    /// Default: false (disabled)
3781    pub fn set_avoid_unnecessary_blocking_io(&mut self, val: bool) {
3782        unsafe {
3783            ffi::rocksdb_options_set_avoid_unnecessary_blocking_io(self.inner, u8::from(val));
3784        }
3785    }
3786
3787    /// Sets the compaction priority.
3788    ///
3789    /// If level compaction_style =
3790    /// kCompactionStyleLevel, for each level, which files are prioritized to be
3791    /// picked to compact.
3792    ///
3793    /// Default: `DBCompactionPri::MinOverlappingRatio`
3794    ///
3795    /// # Examples
3796    ///
3797    /// ```
3798    /// use rust_rocksdb::{Options, DBCompactionPri};
3799    ///
3800    /// let mut opts = Options::default();
3801    /// opts.set_compaction_pri(DBCompactionPri::RoundRobin);
3802    /// ```
3803    pub fn set_compaction_pri(&mut self, pri: DBCompactionPri) {
3804        unsafe {
3805            ffi::rocksdb_options_set_compaction_pri(self.inner, pri as c_int);
3806        }
3807    }
3808
3809    /// If true, the log numbers and sizes of the synced WALs are tracked
3810    /// in MANIFEST. During DB recovery, if a synced WAL is missing
3811    /// from disk, or the WAL's size does not match the recorded size in
3812    /// MANIFEST, an error will be reported and the recovery will be aborted.
3813    ///
3814    /// This is one additional protection against WAL corruption besides the
3815    /// per-WAL-entry checksum.
3816    ///
3817    /// Note that this option does not work with secondary instance.
3818    /// Currently, only syncing closed WALs are tracked. Calling `DB::SyncWAL()`,
3819    /// etc. or writing with `WriteOptions::sync=true` to sync the live WAL is not
3820    /// tracked for performance/efficiency reasons.
3821    ///
3822    /// See: <https://github.com/facebook/rocksdb/wiki/Track-WAL-in-MANIFEST>
3823    ///
3824    /// Default: false (disabled)
3825    pub fn set_track_and_verify_wals_in_manifest(&mut self, val: bool) {
3826        unsafe {
3827            ffi::rocksdb_options_set_track_and_verify_wals_in_manifest(self.inner, u8::from(val));
3828        }
3829    }
3830
3831    /// Returns the value of the `track_and_verify_wals_in_manifest` option.
3832    pub fn get_track_and_verify_wals_in_manifest(&self) -> bool {
3833        let val_u8 =
3834            unsafe { ffi::rocksdb_options_get_track_and_verify_wals_in_manifest(self.inner) };
3835        val_u8 != 0
3836    }
3837
3838    /// The DB unique ID can be saved in the DB manifest (preferred, this option)
3839    /// or an IDENTITY file (historical, deprecated), or both. If this option is
3840    /// set to false (old behavior), then `write_identity_file` must be set to true.
3841    /// The manifest is preferred because
3842    ///
3843    /// 1. The IDENTITY file is not checksummed, so it is not as safe against
3844    ///    corruption.
3845    /// 2. The IDENTITY file may or may not be copied with the DB (e.g. not
3846    ///    copied by BackupEngine), so is not reliable for the provenance of a DB.
3847    ///
3848    /// This option might eventually be obsolete and removed as Identity files
3849    /// are phased out.
3850    ///
3851    /// Default: true (enabled)
3852    pub fn set_write_dbid_to_manifest(&mut self, val: bool) {
3853        unsafe {
3854            ffi::rocksdb_options_set_write_dbid_to_manifest(self.inner, u8::from(val));
3855        }
3856    }
3857
3858    /// Returns the value of the `write_dbid_to_manifest` option.
3859    pub fn get_write_dbid_to_manifest(&self) -> bool {
3860        let val_u8 = unsafe { ffi::rocksdb_options_get_write_dbid_to_manifest(self.inner) };
3861        val_u8 != 0
3862    }
3863}
3864
3865impl Default for Options {
3866    fn default() -> Self {
3867        unsafe {
3868            let opts = ffi::rocksdb_options_create();
3869            assert!(!opts.is_null(), "Could not create RocksDB options");
3870
3871            Self {
3872                inner: opts,
3873                outlive: OptionsMustOutliveDB::default(),
3874            }
3875        }
3876    }
3877}
3878
3879impl FlushOptions {
3880    pub fn new() -> FlushOptions {
3881        FlushOptions::default()
3882    }
3883
3884    /// Waits until the flush is done.
3885    ///
3886    /// Default: true
3887    ///
3888    /// # Examples
3889    ///
3890    /// ```
3891    /// use rust_rocksdb::FlushOptions;
3892    ///
3893    /// let mut options = FlushOptions::default();
3894    /// options.set_wait(false);
3895    /// ```
3896    pub fn set_wait(&mut self, wait: bool) {
3897        unsafe {
3898            ffi::rocksdb_flushoptions_set_wait(self.inner, c_uchar::from(wait));
3899        }
3900    }
3901}
3902
3903impl Default for FlushOptions {
3904    fn default() -> Self {
3905        let flush_opts = unsafe { ffi::rocksdb_flushoptions_create() };
3906        assert!(
3907            !flush_opts.is_null(),
3908            "Could not create RocksDB flush options"
3909        );
3910
3911        Self { inner: flush_opts }
3912    }
3913}
3914
3915impl WriteOptions {
3916    pub fn new() -> WriteOptions {
3917        WriteOptions::default()
3918    }
3919
3920    /// Sets the sync mode. If true, the write will be flushed
3921    /// from the operating system buffer cache before the write is considered complete.
3922    /// If this flag is true, writes will be slower.
3923    ///
3924    /// Default: false
3925    pub fn set_sync(&mut self, sync: bool) {
3926        unsafe {
3927            ffi::rocksdb_writeoptions_set_sync(self.inner, c_uchar::from(sync));
3928        }
3929    }
3930
3931    /// Sets whether WAL should be active or not.
3932    /// If true, writes will not first go to the write ahead log,
3933    /// and the write may got lost after a crash.
3934    ///
3935    /// Default: false
3936    pub fn disable_wal(&mut self, disable: bool) {
3937        unsafe {
3938            ffi::rocksdb_writeoptions_disable_WAL(self.inner, c_int::from(disable));
3939        }
3940    }
3941
3942    /// If true and if user is trying to write to column families that don't exist (they were dropped),
3943    /// ignore the write (don't return an error). If there are multiple writes in a WriteBatch,
3944    /// other writes will succeed.
3945    ///
3946    /// Default: false
3947    pub fn set_ignore_missing_column_families(&mut self, ignore: bool) {
3948        unsafe {
3949            ffi::rocksdb_writeoptions_set_ignore_missing_column_families(
3950                self.inner,
3951                c_uchar::from(ignore),
3952            );
3953        }
3954    }
3955
3956    /// If true and we need to wait or sleep for the write request, fails
3957    /// immediately with Status::Incomplete().
3958    ///
3959    /// Default: false
3960    pub fn set_no_slowdown(&mut self, no_slowdown: bool) {
3961        unsafe {
3962            ffi::rocksdb_writeoptions_set_no_slowdown(self.inner, c_uchar::from(no_slowdown));
3963        }
3964    }
3965
3966    /// If true, this write request is of lower priority if compaction is
3967    /// behind. In this case, no_slowdown = true, the request will be cancelled
3968    /// immediately with Status::Incomplete() returned. Otherwise, it will be
3969    /// slowed down. The slowdown value is determined by RocksDB to guarantee
3970    /// it introduces minimum impacts to high priority writes.
3971    ///
3972    /// Default: false
3973    pub fn set_low_pri(&mut self, v: bool) {
3974        unsafe {
3975            ffi::rocksdb_writeoptions_set_low_pri(self.inner, c_uchar::from(v));
3976        }
3977    }
3978
3979    /// If true, writebatch will maintain the last insert positions of each
3980    /// memtable as hints in concurrent write. It can improve write performance
3981    /// in concurrent writes if keys in one writebatch are sequential. In
3982    /// non-concurrent writes (when concurrent_memtable_writes is false) this
3983    /// option will be ignored.
3984    ///
3985    /// Default: false
3986    pub fn set_memtable_insert_hint_per_batch(&mut self, v: bool) {
3987        unsafe {
3988            ffi::rocksdb_writeoptions_set_memtable_insert_hint_per_batch(
3989                self.inner,
3990                c_uchar::from(v),
3991            );
3992        }
3993    }
3994}
3995
3996impl Default for WriteOptions {
3997    fn default() -> Self {
3998        let write_opts = unsafe { ffi::rocksdb_writeoptions_create() };
3999        assert!(
4000            !write_opts.is_null(),
4001            "Could not create RocksDB write options"
4002        );
4003
4004        Self { inner: write_opts }
4005    }
4006}
4007
4008impl LruCacheOptions {
4009    /// Capacity of the cache, in the same units as the `charge` of each entry.
4010    /// This is typically measured in bytes, but can be a different unit if using
4011    /// kDontChargeCacheMetadata.
4012    pub fn set_capacity(&mut self, cap: usize) {
4013        unsafe {
4014            ffi::rocksdb_lru_cache_options_set_capacity(self.inner, cap);
4015        }
4016    }
4017
4018    /// Cache is sharded into 2^num_shard_bits shards, by hash of key.
4019    /// If < 0, a good default is chosen based on the capacity and the
4020    /// implementation. (Mutex-based implementations are much more reliant
4021    /// on many shards for parallel scalability.)
4022    pub fn set_num_shard_bits(&mut self, val: c_int) {
4023        unsafe {
4024            ffi::rocksdb_lru_cache_options_set_num_shard_bits(self.inner, val);
4025        }
4026    }
4027}
4028
4029impl Default for LruCacheOptions {
4030    fn default() -> Self {
4031        let inner = unsafe { ffi::rocksdb_lru_cache_options_create() };
4032        assert!(
4033            !inner.is_null(),
4034            "Could not create RocksDB LRU cache options"
4035        );
4036
4037        Self { inner }
4038    }
4039}
4040
4041#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4042#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4043#[repr(i32)]
4044pub enum ReadTier {
4045    /// Reads data in memtable, block cache, OS cache or storage.
4046    All = 0,
4047    /// Reads data in memtable or block cache.
4048    BlockCache,
4049    /// Reads persisted data. When WAL is disabled, this option will skip data in memtable.
4050    Persisted,
4051    /// Reads data in memtable. Used for memtable only iterators.
4052    Memtable,
4053}
4054
4055impl ReadOptions {
4056    // TODO add snapshot setting here
4057    // TODO add snapshot wrapper structs with proper destructors;
4058    // that struct needs an "iterator" impl too.
4059
4060    /// Specify whether the "data block"/"index block"/"filter block"
4061    /// read for this iteration should be cached in memory?
4062    /// Callers may wish to set this field to false for bulk scans.
4063    ///
4064    /// Default: true
4065    pub fn fill_cache(&mut self, v: bool) {
4066        unsafe {
4067            ffi::rocksdb_readoptions_set_fill_cache(self.inner, c_uchar::from(v));
4068        }
4069    }
4070
4071    /// Sets the snapshot which should be used for the read.
4072    /// The snapshot must belong to the DB that is being read and must
4073    /// not have been released.
4074    pub fn set_snapshot<D: DBAccess>(&mut self, snapshot: &SnapshotWithThreadMode<D>) {
4075        unsafe {
4076            ffi::rocksdb_readoptions_set_snapshot(self.inner, snapshot.inner);
4077        }
4078    }
4079
4080    /// Sets the lower bound for an iterator.
4081    pub fn set_iterate_lower_bound<K: Into<Vec<u8>>>(&mut self, key: K) {
4082        self.set_lower_bound_impl(Some(key.into()));
4083    }
4084
4085    /// Sets the upper bound for an iterator.
4086    /// The upper bound itself is not included on the iteration result.
4087    pub fn set_iterate_upper_bound<K: Into<Vec<u8>>>(&mut self, key: K) {
4088        self.set_upper_bound_impl(Some(key.into()));
4089    }
4090
4091    /// Sets lower and upper bounds based on the provided range.  This is
4092    /// similar to setting lower and upper bounds separately except that it also
4093    /// allows either bound to be reset.
4094    ///
4095    /// The argument can be a regular Rust range, e.g. `lower..upper`.  However,
4096    /// since RocksDB upper bound is always excluded (i.e. range can never be
4097    /// fully closed) inclusive ranges (`lower..=upper` and `..=upper`) are not
4098    /// supported.  For example:
4099    ///
4100    /// ```
4101    /// let mut options = rust_rocksdb::ReadOptions::default();
4102    /// options.set_iterate_range("xy".as_bytes().."xz".as_bytes());
4103    /// ```
4104    ///
4105    /// In addition, [`crate::PrefixRange`] can be used to specify a range of
4106    /// keys with a given prefix.  In particular, the above example is
4107    /// equivalent to:
4108    ///
4109    /// ```
4110    /// let mut options = rust_rocksdb::ReadOptions::default();
4111    /// options.set_iterate_range(rust_rocksdb::PrefixRange("xy".as_bytes()));
4112    /// ```
4113    ///
4114    /// Note that setting range using this method is separate to using prefix
4115    /// iterators.  Prefix iterators use prefix extractor configured for
4116    /// a column family.  Setting bounds via [`crate::PrefixRange`] is more akin
4117    /// to using manual prefix.
4118    ///
4119    /// Using this method clears any previously set bounds.  In other words, the
4120    /// bounds can be reset by setting the range to `..` as in:
4121    ///
4122    /// ```
4123    /// let mut options = rust_rocksdb::ReadOptions::default();
4124    /// options.set_iterate_range(..);
4125    /// ```
4126    pub fn set_iterate_range(&mut self, range: impl crate::IterateBounds) {
4127        let (lower, upper) = range.into_bounds();
4128        self.set_lower_bound_impl(lower);
4129        self.set_upper_bound_impl(upper);
4130    }
4131
4132    fn set_lower_bound_impl(&mut self, bound: Option<Vec<u8>>) {
4133        let (ptr, len) = if let Some(ref bound) = bound {
4134            (bound.as_ptr() as *const c_char, bound.len())
4135        } else if self.iterate_lower_bound.is_some() {
4136            (std::ptr::null(), 0)
4137        } else {
4138            return;
4139        };
4140        self.iterate_lower_bound = bound;
4141        unsafe {
4142            ffi::rocksdb_readoptions_set_iterate_lower_bound(self.inner, ptr, len);
4143        }
4144    }
4145
4146    fn set_upper_bound_impl(&mut self, bound: Option<Vec<u8>>) {
4147        let (ptr, len) = if let Some(ref bound) = bound {
4148            (bound.as_ptr() as *const c_char, bound.len())
4149        } else if self.iterate_upper_bound.is_some() {
4150            (std::ptr::null(), 0)
4151        } else {
4152            return;
4153        };
4154        self.iterate_upper_bound = bound;
4155        unsafe {
4156            ffi::rocksdb_readoptions_set_iterate_upper_bound(self.inner, ptr, len);
4157        }
4158    }
4159
4160    /// Specify if this read request should process data that ALREADY
4161    /// resides on a particular cache. If the required data is not
4162    /// found at the specified cache, then Status::Incomplete is returned.
4163    ///
4164    /// Default: ::All
4165    pub fn set_read_tier(&mut self, tier: ReadTier) {
4166        unsafe {
4167            ffi::rocksdb_readoptions_set_read_tier(self.inner, tier as c_int);
4168        }
4169    }
4170
4171    /// Enforce that the iterator only iterates over the same
4172    /// prefix as the seek.
4173    /// This option is effective only for prefix seeks, i.e. prefix_extractor is
4174    /// non-null for the column family and total_order_seek is false.  Unlike
4175    /// iterate_upper_bound, prefix_same_as_start only works within a prefix
4176    /// but in both directions.
4177    ///
4178    /// Default: false
4179    pub fn set_prefix_same_as_start(&mut self, v: bool) {
4180        unsafe {
4181            ffi::rocksdb_readoptions_set_prefix_same_as_start(self.inner, c_uchar::from(v));
4182        }
4183    }
4184
4185    /// Enable a total order seek regardless of index format (e.g. hash index)
4186    /// used in the table. Some table format (e.g. plain table) may not support
4187    /// this option.
4188    ///
4189    /// If true when calling Get(), we also skip prefix bloom when reading from
4190    /// block based table. It provides a way to read existing data after
4191    /// changing implementation of prefix extractor.
4192    pub fn set_total_order_seek(&mut self, v: bool) {
4193        unsafe {
4194            ffi::rocksdb_readoptions_set_total_order_seek(self.inner, c_uchar::from(v));
4195        }
4196    }
4197
4198    /// Sets a threshold for the number of keys that can be skipped
4199    /// before failing an iterator seek as incomplete. The default value of 0 should be used to
4200    /// never fail a request as incomplete, even on skipping too many keys.
4201    ///
4202    /// Default: 0
4203    pub fn set_max_skippable_internal_keys(&mut self, num: u64) {
4204        unsafe {
4205            ffi::rocksdb_readoptions_set_max_skippable_internal_keys(self.inner, num);
4206        }
4207    }
4208
4209    /// If true, when PurgeObsoleteFile is called in CleanupIteratorState, we schedule a background job
4210    /// in the flush job queue and delete obsolete files in background.
4211    ///
4212    /// Default: false
4213    pub fn set_background_purge_on_iterator_cleanup(&mut self, v: bool) {
4214        unsafe {
4215            ffi::rocksdb_readoptions_set_background_purge_on_iterator_cleanup(
4216                self.inner,
4217                c_uchar::from(v),
4218            );
4219        }
4220    }
4221
4222    /// If true, keys deleted using the DeleteRange() API will be visible to
4223    /// readers until they are naturally deleted during compaction. This improves
4224    /// read performance in DBs with many range deletions.
4225    ///
4226    /// Default: false
4227    pub fn set_ignore_range_deletions(&mut self, v: bool) {
4228        unsafe {
4229            ffi::rocksdb_readoptions_set_ignore_range_deletions(self.inner, c_uchar::from(v));
4230        }
4231    }
4232
4233    /// If true, all data read from underlying storage will be
4234    /// verified against corresponding checksums.
4235    ///
4236    /// Default: true
4237    pub fn set_verify_checksums(&mut self, v: bool) {
4238        unsafe {
4239            ffi::rocksdb_readoptions_set_verify_checksums(self.inner, c_uchar::from(v));
4240        }
4241    }
4242
4243    /// If non-zero, an iterator will create a new table reader which
4244    /// performs reads of the given size. Using a large size (> 2MB) can
4245    /// improve the performance of forward iteration on spinning disks.
4246    /// Default: 0
4247    ///
4248    /// ```
4249    /// use rust_rocksdb::{ReadOptions};
4250    ///
4251    /// let mut opts = ReadOptions::default();
4252    /// opts.set_readahead_size(4_194_304); // 4mb
4253    /// ```
4254    pub fn set_readahead_size(&mut self, v: usize) {
4255        unsafe {
4256            ffi::rocksdb_readoptions_set_readahead_size(self.inner, v as size_t);
4257        }
4258    }
4259
4260    /// If auto_readahead_size is set to true, it will auto tune the readahead_size
4261    /// during scans internally.
4262    /// For this feature to be enabled, iterate_upper_bound must also be specified.
4263    ///
4264    /// NOTE: - Recommended for forward Scans only.
4265    ///       - If there is a backward scans, this option will be
4266    ///         disabled internally and won't be enabled again if the forward scan
4267    ///         is issued again.
4268    ///
4269    /// Default: true
4270    pub fn set_auto_readahead_size(&mut self, v: bool) {
4271        unsafe {
4272            ffi::rocksdb_readoptions_set_auto_readahead_size(self.inner, c_uchar::from(v));
4273        }
4274    }
4275
4276    /// If true, create a tailing iterator. Note that tailing iterators
4277    /// only support moving in the forward direction. Iterating in reverse
4278    /// or seek_to_last are not supported.
4279    pub fn set_tailing(&mut self, v: bool) {
4280        unsafe {
4281            ffi::rocksdb_readoptions_set_tailing(self.inner, c_uchar::from(v));
4282        }
4283    }
4284
4285    /// Specifies the value of "pin_data". If true, it keeps the blocks
4286    /// loaded by the iterator pinned in memory as long as the iterator is not deleted,
4287    /// If used when reading from tables created with
4288    /// BlockBasedTableOptions::use_delta_encoding = false,
4289    /// Iterator's property "rocksdb.iterator.is-key-pinned" is guaranteed to
4290    /// return 1.
4291    ///
4292    /// Default: false
4293    pub fn set_pin_data(&mut self, v: bool) {
4294        unsafe {
4295            ffi::rocksdb_readoptions_set_pin_data(self.inner, c_uchar::from(v));
4296        }
4297    }
4298
4299    /// Asynchronously prefetch some data.
4300    ///
4301    /// Used for sequential reads and internal automatic prefetching.
4302    ///
4303    /// Default: `false`
4304    pub fn set_async_io(&mut self, v: bool) {
4305        unsafe {
4306            ffi::rocksdb_readoptions_set_async_io(self.inner, c_uchar::from(v));
4307        }
4308    }
4309
4310    /// Deadline for completing an API call (Get/MultiGet/Seek/Next for now)
4311    /// in microseconds.
4312    /// It should be set to microseconds since epoch, i.e, gettimeofday or
4313    /// equivalent plus allowed duration in microseconds.
4314    /// This is best effort. The call may exceed the deadline if there is IO
4315    /// involved and the file system doesn't support deadlines, or due to
4316    /// checking for deadline periodically rather than for every key if
4317    /// processing a batch
4318    pub fn set_deadline(&mut self, microseconds: u64) {
4319        unsafe {
4320            ffi::rocksdb_readoptions_set_deadline(self.inner, microseconds);
4321        }
4322    }
4323
4324    /// A timeout in microseconds to be passed to the underlying FileSystem for
4325    /// reads. As opposed to deadline, this determines the timeout for each
4326    /// individual file read request. If a MultiGet/Get/Seek/Next etc call
4327    /// results in multiple reads, each read can last up to io_timeout us.
4328    pub fn set_io_timeout(&mut self, microseconds: u64) {
4329        unsafe {
4330            ffi::rocksdb_readoptions_set_io_timeout(self.inner, microseconds);
4331        }
4332    }
4333
4334    /// Timestamp of operation. Read should return the latest data visible to the
4335    /// specified timestamp. All timestamps of the same database must be of the
4336    /// same length and format. The user is responsible for providing a customized
4337    /// compare function via Comparator to order <key, timestamp> tuples.
4338    /// For iterator, iter_start_ts is the lower bound (older) and timestamp
4339    /// serves as the upper bound. Versions of the same record that fall in
4340    /// the timestamp range will be returned. If iter_start_ts is nullptr,
4341    /// only the most recent version visible to timestamp is returned.
4342    /// The user-specified timestamp feature is still under active development,
4343    /// and the API is subject to change.
4344    pub fn set_timestamp<S: Into<Vec<u8>>>(&mut self, ts: S) {
4345        self.set_timestamp_impl(Some(ts.into()));
4346    }
4347
4348    fn set_timestamp_impl(&mut self, ts: Option<Vec<u8>>) {
4349        let (ptr, len) = if let Some(ref ts) = ts {
4350            (ts.as_ptr() as *const c_char, ts.len())
4351        } else if self.timestamp.is_some() {
4352            // The stored timestamp is a `Some` but we're updating it to a `None`.
4353            // This means to cancel a previously set timestamp.
4354            // To do this, use a null pointer and zero length.
4355            (std::ptr::null(), 0)
4356        } else {
4357            return;
4358        };
4359        self.timestamp = ts;
4360        unsafe {
4361            ffi::rocksdb_readoptions_set_timestamp(self.inner, ptr, len);
4362        }
4363    }
4364
4365    /// See `set_timestamp`
4366    pub fn set_iter_start_ts<S: Into<Vec<u8>>>(&mut self, ts: S) {
4367        self.set_iter_start_ts_impl(Some(ts.into()));
4368    }
4369
4370    fn set_iter_start_ts_impl(&mut self, ts: Option<Vec<u8>>) {
4371        let (ptr, len) = if let Some(ref ts) = ts {
4372            (ts.as_ptr() as *const c_char, ts.len())
4373        } else if self.timestamp.is_some() {
4374            (std::ptr::null(), 0)
4375        } else {
4376            return;
4377        };
4378        self.iter_start_ts = ts;
4379        unsafe {
4380            ffi::rocksdb_readoptions_set_iter_start_ts(self.inner, ptr, len);
4381        }
4382    }
4383}
4384
4385impl Default for ReadOptions {
4386    fn default() -> Self {
4387        unsafe {
4388            Self {
4389                inner: ffi::rocksdb_readoptions_create(),
4390                timestamp: None,
4391                iter_start_ts: None,
4392                iterate_upper_bound: None,
4393                iterate_lower_bound: None,
4394            }
4395        }
4396    }
4397}
4398
4399impl IngestExternalFileOptions {
4400    /// Can be set to true to move the files instead of copying them.
4401    pub fn set_move_files(&mut self, v: bool) {
4402        unsafe {
4403            ffi::rocksdb_ingestexternalfileoptions_set_move_files(self.inner, c_uchar::from(v));
4404        }
4405    }
4406
4407    /// If set to false, an ingested file keys could appear in existing snapshots
4408    /// that where created before the file was ingested.
4409    pub fn set_snapshot_consistency(&mut self, v: bool) {
4410        unsafe {
4411            ffi::rocksdb_ingestexternalfileoptions_set_snapshot_consistency(
4412                self.inner,
4413                c_uchar::from(v),
4414            );
4415        }
4416    }
4417
4418    /// If set to false, IngestExternalFile() will fail if the file key range
4419    /// overlaps with existing keys or tombstones in the DB.
4420    pub fn set_allow_global_seqno(&mut self, v: bool) {
4421        unsafe {
4422            ffi::rocksdb_ingestexternalfileoptions_set_allow_global_seqno(
4423                self.inner,
4424                c_uchar::from(v),
4425            );
4426        }
4427    }
4428
4429    /// If set to false and the file key range overlaps with the memtable key range
4430    /// (memtable flush required), IngestExternalFile will fail.
4431    pub fn set_allow_blocking_flush(&mut self, v: bool) {
4432        unsafe {
4433            ffi::rocksdb_ingestexternalfileoptions_set_allow_blocking_flush(
4434                self.inner,
4435                c_uchar::from(v),
4436            );
4437        }
4438    }
4439
4440    /// Set to true if you would like duplicate keys in the file being ingested
4441    /// to be skipped rather than overwriting existing data under that key.
4442    /// Usecase: back-fill of some historical data in the database without
4443    /// over-writing existing newer version of data.
4444    /// This option could only be used if the DB has been running
4445    /// with allow_ingest_behind=true since the dawn of time.
4446    /// All files will be ingested at the bottommost level with seqno=0.
4447    pub fn set_ingest_behind(&mut self, v: bool) {
4448        unsafe {
4449            ffi::rocksdb_ingestexternalfileoptions_set_ingest_behind(self.inner, c_uchar::from(v));
4450        }
4451    }
4452}
4453
4454impl Default for IngestExternalFileOptions {
4455    fn default() -> Self {
4456        unsafe {
4457            Self {
4458                inner: ffi::rocksdb_ingestexternalfileoptions_create(),
4459            }
4460        }
4461    }
4462}
4463
4464/// Used by BlockBasedOptions::set_index_type.
4465pub enum BlockBasedIndexType {
4466    /// A space efficient index block that is optimized for
4467    /// binary-search-based index.
4468    BinarySearch,
4469
4470    /// The hash index, if enabled, will perform a hash lookup if
4471    /// a prefix extractor has been provided through Options::set_prefix_extractor.
4472    HashSearch,
4473
4474    /// A two-level index implementation. Both levels are binary search indexes.
4475    TwoLevelIndexSearch,
4476}
4477
4478/// Used by BlockBasedOptions::set_data_block_index_type.
4479#[repr(C)]
4480pub enum DataBlockIndexType {
4481    /// Use binary search when performing point lookup for keys in data blocks.
4482    /// This is the default.
4483    BinarySearch = 0,
4484
4485    /// Appends a compact hash table to the end of the data block for efficient indexing. Backwards
4486    /// compatible with databases created without this feature. Once turned on, existing data will
4487    /// be gradually converted to the hash index format.
4488    BinaryAndHash = 1,
4489}
4490
4491/// Defines the underlying memtable implementation.
4492/// See official [wiki](https://github.com/facebook/rocksdb/wiki/MemTable) for more information.
4493pub enum MemtableFactory {
4494    Vector,
4495    HashSkipList {
4496        bucket_count: usize,
4497        height: i32,
4498        branching_factor: i32,
4499    },
4500    HashLinkList {
4501        bucket_count: usize,
4502    },
4503}
4504
4505/// Used by BlockBasedOptions::set_checksum_type.
4506pub enum ChecksumType {
4507    NoChecksum = 0,
4508    CRC32c = 1,
4509    XXHash = 2,
4510    XXHash64 = 3,
4511    XXH3 = 4, // Supported since RocksDB 6.27
4512}
4513
4514/// Used in [`PlainTableFactoryOptions`].
4515#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
4516pub enum KeyEncodingType {
4517    /// Always write full keys.
4518    #[default]
4519    Plain = 0,
4520    /// Find opportunities to write the same prefix for multiple rows.
4521    Prefix = 1,
4522}
4523
4524/// Used with DBOptions::set_plain_table_factory.
4525/// See official [wiki](https://github.com/facebook/rocksdb/wiki/PlainTable-Format) for more
4526/// information.
4527///
4528/// Defaults:
4529///  user_key_length: 0 (variable length)
4530///  bloom_bits_per_key: 10
4531///  hash_table_ratio: 0.75
4532///  index_sparseness: 16
4533///  huge_page_tlb_size: 0
4534///  encoding_type: KeyEncodingType::Plain
4535///  full_scan_mode: false
4536///  store_index_in_file: false
4537pub struct PlainTableFactoryOptions {
4538    pub user_key_length: u32,
4539    pub bloom_bits_per_key: i32,
4540    pub hash_table_ratio: f64,
4541    pub index_sparseness: usize,
4542    pub huge_page_tlb_size: usize,
4543    pub encoding_type: KeyEncodingType,
4544    pub full_scan_mode: bool,
4545    pub store_index_in_file: bool,
4546}
4547
4548#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4549#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4550pub enum DBCompressionType {
4551    None = ffi::rocksdb_no_compression as isize,
4552    Snappy = ffi::rocksdb_snappy_compression as isize,
4553    Zlib = ffi::rocksdb_zlib_compression as isize,
4554    Bz2 = ffi::rocksdb_bz2_compression as isize,
4555    Lz4 = ffi::rocksdb_lz4_compression as isize,
4556    Lz4hc = ffi::rocksdb_lz4hc_compression as isize,
4557    Zstd = ffi::rocksdb_zstd_compression as isize,
4558}
4559
4560#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4561#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4562pub enum DBCompactionStyle {
4563    Level = ffi::rocksdb_level_compaction as isize,
4564    Universal = ffi::rocksdb_universal_compaction as isize,
4565    Fifo = ffi::rocksdb_fifo_compaction as isize,
4566}
4567
4568#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4569#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4570pub enum DBRecoveryMode {
4571    TolerateCorruptedTailRecords = ffi::rocksdb_tolerate_corrupted_tail_records_recovery as isize,
4572    AbsoluteConsistency = ffi::rocksdb_absolute_consistency_recovery as isize,
4573    PointInTime = ffi::rocksdb_point_in_time_recovery as isize,
4574    SkipAnyCorruptedRecord = ffi::rocksdb_skip_any_corrupted_records_recovery as isize,
4575}
4576
4577#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4578#[repr(i32)]
4579pub enum RateLimiterMode {
4580    KReadsOnly = 0,
4581    KWritesOnly = 1,
4582    KAllIo = 2,
4583}
4584
4585#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4586#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4587pub enum DBCompactionPri {
4588    ByCompensatedSize = ffi::rocksdb_k_by_compensated_size_compaction_pri as isize,
4589    OldestLargestSeqFirst = ffi::rocksdb_k_oldest_largest_seq_first_compaction_pri as isize,
4590    OldestSmallestSeqFirst = ffi::rocksdb_k_oldest_smallest_seq_first_compaction_pri as isize,
4591    MinOverlappingRatio = ffi::rocksdb_k_min_overlapping_ratio_compaction_pri as isize,
4592    RoundRobin = ffi::rocksdb_k_round_robin_compaction_pri as isize,
4593}
4594
4595#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4596#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4597pub enum BlockBasedPinningTier {
4598    Fallback = ffi::rocksdb_block_based_k_fallback_pinning_tier as isize,
4599    None = ffi::rocksdb_block_based_k_none_pinning_tier as isize,
4600    FlushAndSimilar = ffi::rocksdb_block_based_k_flush_and_similar_pinning_tier as isize,
4601    All = ffi::rocksdb_block_based_k_all_pinning_tier as isize,
4602}
4603
4604pub struct FifoCompactOptions {
4605    pub(crate) inner: *mut ffi::rocksdb_fifo_compaction_options_t,
4606}
4607
4608impl Default for FifoCompactOptions {
4609    fn default() -> Self {
4610        let opts = unsafe { ffi::rocksdb_fifo_compaction_options_create() };
4611        assert!(
4612            !opts.is_null(),
4613            "Could not create RocksDB Fifo Compaction Options"
4614        );
4615
4616        Self { inner: opts }
4617    }
4618}
4619
4620impl Drop for FifoCompactOptions {
4621    fn drop(&mut self) {
4622        unsafe {
4623            ffi::rocksdb_fifo_compaction_options_destroy(self.inner);
4624        }
4625    }
4626}
4627
4628impl FifoCompactOptions {
4629    /// Sets the max table file size.
4630    ///
4631    /// Once the total sum of table files reaches this, we will delete the oldest
4632    /// table file
4633    ///
4634    /// Default: 1GB
4635    pub fn set_max_table_files_size(&mut self, nbytes: u64) {
4636        unsafe {
4637            ffi::rocksdb_fifo_compaction_options_set_max_table_files_size(self.inner, nbytes);
4638        }
4639    }
4640}
4641
4642#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4643#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4644pub enum UniversalCompactionStopStyle {
4645    Similar = ffi::rocksdb_similar_size_compaction_stop_style as isize,
4646    Total = ffi::rocksdb_total_size_compaction_stop_style as isize,
4647}
4648
4649pub struct UniversalCompactOptions {
4650    pub(crate) inner: *mut ffi::rocksdb_universal_compaction_options_t,
4651}
4652
4653impl Default for UniversalCompactOptions {
4654    fn default() -> Self {
4655        let opts = unsafe { ffi::rocksdb_universal_compaction_options_create() };
4656        assert!(
4657            !opts.is_null(),
4658            "Could not create RocksDB Universal Compaction Options"
4659        );
4660
4661        Self { inner: opts }
4662    }
4663}
4664
4665impl Drop for UniversalCompactOptions {
4666    fn drop(&mut self) {
4667        unsafe {
4668            ffi::rocksdb_universal_compaction_options_destroy(self.inner);
4669        }
4670    }
4671}
4672
4673impl UniversalCompactOptions {
4674    /// Sets the percentage flexibility while comparing file size.
4675    /// If the candidate file(s) size is 1% smaller than the next file's size,
4676    /// then include next file into this candidate set.
4677    ///
4678    /// Default: 1
4679    pub fn set_size_ratio(&mut self, ratio: c_int) {
4680        unsafe {
4681            ffi::rocksdb_universal_compaction_options_set_size_ratio(self.inner, ratio);
4682        }
4683    }
4684
4685    /// Sets the minimum number of files in a single compaction run.
4686    ///
4687    /// Default: 2
4688    pub fn set_min_merge_width(&mut self, num: c_int) {
4689        unsafe {
4690            ffi::rocksdb_universal_compaction_options_set_min_merge_width(self.inner, num);
4691        }
4692    }
4693
4694    /// Sets the maximum number of files in a single compaction run.
4695    ///
4696    /// Default: UINT_MAX
4697    pub fn set_max_merge_width(&mut self, num: c_int) {
4698        unsafe {
4699            ffi::rocksdb_universal_compaction_options_set_max_merge_width(self.inner, num);
4700        }
4701    }
4702
4703    /// sets the size amplification.
4704    ///
4705    /// It is defined as the amount (in percentage) of
4706    /// additional storage needed to store a single byte of data in the database.
4707    /// For example, a size amplification of 2% means that a database that
4708    /// contains 100 bytes of user-data may occupy upto 102 bytes of
4709    /// physical storage. By this definition, a fully compacted database has
4710    /// a size amplification of 0%. Rocksdb uses the following heuristic
4711    /// to calculate size amplification: it assumes that all files excluding
4712    /// the earliest file contribute to the size amplification.
4713    ///
4714    /// Default: 200, which means that a 100 byte database could require upto 300 bytes of storage.
4715    pub fn set_max_size_amplification_percent(&mut self, v: c_int) {
4716        unsafe {
4717            ffi::rocksdb_universal_compaction_options_set_max_size_amplification_percent(
4718                self.inner, v,
4719            );
4720        }
4721    }
4722
4723    /// Sets the percentage of compression size.
4724    ///
4725    /// If this option is set to be -1, all the output files
4726    /// will follow compression type specified.
4727    ///
4728    /// If this option is not negative, we will try to make sure compressed
4729    /// size is just above this value. In normal cases, at least this percentage
4730    /// of data will be compressed.
4731    /// When we are compacting to a new file, here is the criteria whether
4732    /// it needs to be compressed: assuming here are the list of files sorted
4733    /// by generation time:
4734    ///    A1...An B1...Bm C1...Ct
4735    /// where A1 is the newest and Ct is the oldest, and we are going to compact
4736    /// B1...Bm, we calculate the total size of all the files as total_size, as
4737    /// well as  the total size of C1...Ct as total_C, the compaction output file
4738    /// will be compressed iff
4739    ///   total_C / total_size < this percentage
4740    ///
4741    /// Default: -1
4742    pub fn set_compression_size_percent(&mut self, v: c_int) {
4743        unsafe {
4744            ffi::rocksdb_universal_compaction_options_set_compression_size_percent(self.inner, v);
4745        }
4746    }
4747
4748    /// Sets the algorithm used to stop picking files into a single compaction run.
4749    ///
4750    /// Default: ::Total
4751    pub fn set_stop_style(&mut self, style: UniversalCompactionStopStyle) {
4752        unsafe {
4753            ffi::rocksdb_universal_compaction_options_set_stop_style(self.inner, style as c_int);
4754        }
4755    }
4756}
4757
4758#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4759#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4760#[repr(u8)]
4761pub enum BottommostLevelCompaction {
4762    /// Skip bottommost level compaction
4763    Skip = 0,
4764    /// Only compact bottommost level if there is a compaction filter
4765    /// This is the default option
4766    IfHaveCompactionFilter,
4767    /// Always compact bottommost level
4768    Force,
4769    /// Always compact bottommost level but in bottommost level avoid
4770    /// double-compacting files created in the same compaction
4771    ForceOptimized,
4772}
4773
4774pub struct CompactOptions {
4775    pub(crate) inner: *mut ffi::rocksdb_compactoptions_t,
4776    full_history_ts_low: Option<Vec<u8>>,
4777}
4778
4779impl Default for CompactOptions {
4780    fn default() -> Self {
4781        let opts = unsafe { ffi::rocksdb_compactoptions_create() };
4782        assert!(!opts.is_null(), "Could not create RocksDB Compact Options");
4783
4784        Self {
4785            inner: opts,
4786            full_history_ts_low: None,
4787        }
4788    }
4789}
4790
4791impl Drop for CompactOptions {
4792    fn drop(&mut self) {
4793        unsafe {
4794            ffi::rocksdb_compactoptions_destroy(self.inner);
4795        }
4796    }
4797}
4798
4799impl CompactOptions {
4800    /// If more than one thread calls manual compaction,
4801    /// only one will actually schedule it while the other threads will simply wait
4802    /// for the scheduled manual compaction to complete. If exclusive_manual_compaction
4803    /// is set to true, the call will disable scheduling of automatic compaction jobs
4804    /// and wait for existing automatic compaction jobs to finish.
4805    pub fn set_exclusive_manual_compaction(&mut self, v: bool) {
4806        unsafe {
4807            ffi::rocksdb_compactoptions_set_exclusive_manual_compaction(
4808                self.inner,
4809                c_uchar::from(v),
4810            );
4811        }
4812    }
4813
4814    /// Sets bottommost level compaction.
4815    pub fn set_bottommost_level_compaction(&mut self, lvl: BottommostLevelCompaction) {
4816        unsafe {
4817            ffi::rocksdb_compactoptions_set_bottommost_level_compaction(self.inner, lvl as c_uchar);
4818        }
4819    }
4820
4821    /// If true, compacted files will be moved to the minimum level capable
4822    /// of holding the data or given level (specified non-negative target_level).
4823    pub fn set_change_level(&mut self, v: bool) {
4824        unsafe {
4825            ffi::rocksdb_compactoptions_set_change_level(self.inner, c_uchar::from(v));
4826        }
4827    }
4828
4829    /// If change_level is true and target_level have non-negative value, compacted
4830    /// files will be moved to target_level.
4831    pub fn set_target_level(&mut self, lvl: c_int) {
4832        unsafe {
4833            ffi::rocksdb_compactoptions_set_target_level(self.inner, lvl);
4834        }
4835    }
4836
4837    /// Set user-defined timestamp low bound, the data with older timestamp than
4838    /// low bound maybe GCed by compaction. Default: nullptr
4839    pub fn set_full_history_ts_low<S: Into<Vec<u8>>>(&mut self, ts: S) {
4840        self.set_full_history_ts_low_impl(Some(ts.into()));
4841    }
4842
4843    fn set_full_history_ts_low_impl(&mut self, ts: Option<Vec<u8>>) {
4844        let (ptr, len) = if let Some(ref ts) = ts {
4845            (ts.as_ptr() as *mut c_char, ts.len())
4846        } else if self.full_history_ts_low.is_some() {
4847            (std::ptr::null::<Vec<u8>>() as *mut c_char, 0)
4848        } else {
4849            return;
4850        };
4851        self.full_history_ts_low = ts;
4852        unsafe {
4853            ffi::rocksdb_compactoptions_set_full_history_ts_low(self.inner, ptr, len);
4854        }
4855    }
4856}
4857
4858pub struct WaitForCompactOptions {
4859    pub(crate) inner: *mut ffi::rocksdb_wait_for_compact_options_t,
4860}
4861
4862impl Default for WaitForCompactOptions {
4863    fn default() -> Self {
4864        let opts = unsafe { ffi::rocksdb_wait_for_compact_options_create() };
4865        assert!(
4866            !opts.is_null(),
4867            "Could not create RocksDB Wait For Compact Options"
4868        );
4869
4870        Self { inner: opts }
4871    }
4872}
4873
4874impl Drop for WaitForCompactOptions {
4875    fn drop(&mut self) {
4876        unsafe {
4877            ffi::rocksdb_wait_for_compact_options_destroy(self.inner);
4878        }
4879    }
4880}
4881
4882impl WaitForCompactOptions {
4883    /// If true, abort waiting if background jobs are paused. If false,
4884    /// ContinueBackgroundWork() must be called to resume the background jobs.
4885    /// Otherwise, jobs that were queued, but not scheduled yet may never finish
4886    /// and WaitForCompact() may wait indefinitely (if timeout is set, it will
4887    /// abort after the timeout).
4888    ///
4889    /// Default: false
4890    pub fn set_abort_on_pause(&mut self, v: bool) {
4891        unsafe {
4892            ffi::rocksdb_wait_for_compact_options_set_abort_on_pause(self.inner, c_uchar::from(v));
4893        }
4894    }
4895
4896    /// If true, flush all column families before starting to wait.
4897    ///
4898    /// Default: false
4899    pub fn set_flush(&mut self, v: bool) {
4900        unsafe {
4901            ffi::rocksdb_wait_for_compact_options_set_flush(self.inner, c_uchar::from(v));
4902        }
4903    }
4904
4905    /// Timeout in microseconds for waiting for compaction to complete.
4906    /// when timeout == 0, WaitForCompact() will wait as long as there's background
4907    /// work to finish.
4908    ///
4909    /// Default: 0
4910    pub fn set_timeout(&mut self, microseconds: u64) {
4911        unsafe {
4912            ffi::rocksdb_wait_for_compact_options_set_timeout(self.inner, microseconds);
4913        }
4914    }
4915}
4916
4917/// Represents a path where sst files can be put into
4918pub struct DBPath {
4919    pub(crate) inner: *mut ffi::rocksdb_dbpath_t,
4920}
4921
4922impl DBPath {
4923    /// Create a new path
4924    pub fn new<P: AsRef<Path>>(path: P, target_size: u64) -> Result<Self, Error> {
4925        let p = to_cpath(path.as_ref()).unwrap();
4926        let dbpath = unsafe { ffi::rocksdb_dbpath_create(p.as_ptr(), target_size) };
4927        if dbpath.is_null() {
4928            Err(Error::new(format!(
4929                "Could not create path for storing sst files at location: {}",
4930                path.as_ref().display()
4931            )))
4932        } else {
4933            Ok(DBPath { inner: dbpath })
4934        }
4935    }
4936}
4937
4938impl Drop for DBPath {
4939    fn drop(&mut self) {
4940        unsafe {
4941            ffi::rocksdb_dbpath_destroy(self.inner);
4942        }
4943    }
4944}
4945
4946#[cfg(test)]
4947mod tests {
4948    use crate::db_options::WriteBufferManager;
4949    use crate::{Cache, MemtableFactory, Options};
4950
4951    #[test]
4952    fn test_enable_statistics() {
4953        let mut opts = Options::default();
4954        opts.enable_statistics();
4955        opts.set_stats_dump_period_sec(60);
4956        assert!(opts.get_statistics().is_some());
4957
4958        let opts = Options::default();
4959        assert!(opts.get_statistics().is_none());
4960    }
4961
4962    #[test]
4963    fn test_set_memtable_factory() {
4964        let mut opts = Options::default();
4965        opts.set_memtable_factory(MemtableFactory::Vector);
4966        opts.set_memtable_factory(MemtableFactory::HashLinkList { bucket_count: 100 });
4967        opts.set_memtable_factory(MemtableFactory::HashSkipList {
4968            bucket_count: 100,
4969            height: 4,
4970            branching_factor: 4,
4971        });
4972    }
4973
4974    #[test]
4975    fn test_use_fsync() {
4976        let mut opts = Options::default();
4977        assert!(!opts.get_use_fsync());
4978        opts.set_use_fsync(true);
4979        assert!(opts.get_use_fsync());
4980    }
4981
4982    #[test]
4983    fn test_set_stats_persist_period_sec() {
4984        let mut opts = Options::default();
4985        opts.enable_statistics();
4986        opts.set_stats_persist_period_sec(5);
4987        assert!(opts.get_statistics().is_some());
4988
4989        let opts = Options::default();
4990        assert!(opts.get_statistics().is_none());
4991    }
4992
4993    #[test]
4994    fn test_set_write_buffer_manager() {
4995        let mut opts = Options::default();
4996        let lrucache = Cache::new_lru_cache(100);
4997        let write_buffer_manager =
4998            WriteBufferManager::new_write_buffer_manager_with_cache(100, false, lrucache);
4999        assert_eq!(write_buffer_manager.get_buffer_size(), 100);
5000        assert_eq!(write_buffer_manager.get_usage(), 0);
5001        assert!(write_buffer_manager.enabled());
5002
5003        opts.set_write_buffer_manager(&write_buffer_manager);
5004        drop(opts);
5005
5006        // WriteBufferManager outlives options
5007        assert!(write_buffer_manager.enabled());
5008    }
5009}