sfzhou_rocksdb/
db_options.rs

1// Copyright 2020 Tyler Neely
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::ffi::CStr;
16use std::path::Path;
17use std::ptr::{null_mut, NonNull};
18use std::slice;
19use std::sync::Arc;
20
21use libc::{self, c_char, c_double, c_int, c_uchar, c_uint, c_void, size_t};
22
23use crate::column_family::ColumnFamilyTtl;
24use crate::statistics::{Histogram, HistogramData, StatsLevel};
25use crate::{
26    compaction_filter::{self, CompactionFilterCallback, CompactionFilterFn},
27    compaction_filter_factory::{self, CompactionFilterFactory},
28    comparator::{
29        ComparatorCallback, ComparatorWithTsCallback, CompareFn, CompareTsFn, CompareWithoutTsFn,
30    },
31    db::DBAccess,
32    env::Env,
33    ffi,
34    ffi_util::{from_cstr, to_cpath, CStrLike},
35    merge_operator::{
36        self, full_merge_callback, partial_merge_callback, MergeFn, MergeOperatorCallback,
37    },
38    slice_transform::SliceTransform,
39    statistics::Ticker,
40    ColumnFamilyDescriptor, Error, SnapshotWithThreadMode,
41};
42
43pub(crate) struct WriteBufferManagerWrapper {
44    pub(crate) inner: NonNull<ffi::rocksdb_write_buffer_manager_t>,
45}
46
47impl Drop for WriteBufferManagerWrapper {
48    fn drop(&mut self) {
49        unsafe {
50            ffi::rocksdb_write_buffer_manager_destroy(self.inner.as_ptr());
51        }
52    }
53}
54
55#[derive(Clone)]
56pub struct WriteBufferManager(pub(crate) Arc<WriteBufferManagerWrapper>);
57
58impl WriteBufferManager {
59    /// <https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager>
60    /// Write buffer manager helps users control the total memory used by memtables across multiple column families and/or DB instances.
61    /// Users can enable this control by 2 ways:
62    ///
63    /// 1- Limit the total memtable usage across multiple column families and DBs under a threshold.
64    /// 2- Cost the memtable memory usage to block cache so that memory of RocksDB can be capped by the single limit.
65    /// The usage of a write buffer manager is similar to rate_limiter and sst_file_manager.
66    /// Users can create one write buffer manager object and pass it to all the options of column families or DBs whose memtable size they want to be controlled by this object.
67    ///
68    /// A memory limit is given when creating the write buffer manager object. RocksDB will try to limit the total memory to under this limit.
69    ///
70    /// a flush will be triggered on one column family of the DB you are inserting to,
71    ///
72    /// If mutable memtable size exceeds about 90% of the limit,
73    /// If the total memory is over the limit, more aggressive flush may also be triggered only if the mutable memtable size also exceeds 50% of the limit.
74    /// Both checks are needed because if already more than half memory is being flushed, triggering more flush may not help.
75    ///
76    /// The total memory is counted as total memory allocated in the arena, even if some of that may not yet be used by memtable.
77    ///
78    /// buffer_size: the memory limit in bytes.
79    /// allow_stall: If set true, it will enable stalling of all writers when memory usage exceeds buffer_size (soft limit).
80    ///             It will wait for flush to complete and memory usage to drop down
81    pub fn new_write_buffer_manager(buffer_size: size_t, allow_stall: bool) -> Self {
82        let inner = NonNull::new(unsafe {
83            ffi::rocksdb_write_buffer_manager_create(buffer_size, allow_stall)
84        })
85        .unwrap();
86        WriteBufferManager(Arc::new(WriteBufferManagerWrapper { inner }))
87    }
88
89    /// Users can set up RocksDB to cost memory used by memtables to block cache.
90    /// This can happen no matter whether you enable memtable memory limit or not.
91    /// This option is added to manage memory (memtables + block cache) under a single limit.
92    ///
93    /// buffer_size: the memory limit in bytes.
94    /// allow_stall: If set true, it will enable stalling of all writers when memory usage exceeds buffer_size (soft limit).
95    ///             It will wait for flush to complete and memory usage to drop down
96    /// cache: the block cache instance
97    pub fn new_write_buffer_manager_with_cache(
98        buffer_size: size_t,
99        allow_stall: bool,
100        cache: Cache,
101    ) -> Self {
102        let inner = NonNull::new(unsafe {
103            ffi::rocksdb_write_buffer_manager_create_with_cache(
104                buffer_size,
105                cache.0.inner.as_ptr(),
106                allow_stall,
107            )
108        })
109        .unwrap();
110        WriteBufferManager(Arc::new(WriteBufferManagerWrapper { inner }))
111    }
112
113    /// Returns the WriteBufferManager memory usage in bytes.
114    pub fn get_usage(&self) -> usize {
115        unsafe { ffi::rocksdb_write_buffer_manager_memory_usage(self.0.inner.as_ptr()) }
116    }
117
118    /// Returns the current buffer size in bytes.
119    pub fn get_buffer_size(&self) -> usize {
120        unsafe { ffi::rocksdb_write_buffer_manager_buffer_size(self.0.inner.as_ptr()) }
121    }
122
123    /// Set the buffer size in bytes.
124    pub fn set_buffer_size(&self, new_size: usize) {
125        unsafe {
126            ffi::rocksdb_write_buffer_manager_set_buffer_size(self.0.inner.as_ptr(), new_size);
127        }
128    }
129
130    /// Returns if WriteBufferManager is enabled.
131    pub fn enabled(&self) -> bool {
132        unsafe { ffi::rocksdb_write_buffer_manager_enabled(self.0.inner.as_ptr()) }
133    }
134
135    /// set the allow_stall flag.
136    pub fn set_allow_stall(&self, allow_stall: bool) {
137        unsafe {
138            ffi::rocksdb_write_buffer_manager_set_allow_stall(self.0.inner.as_ptr(), allow_stall);
139        }
140    }
141}
142
143pub(crate) struct CacheWrapper {
144    pub(crate) inner: NonNull<ffi::rocksdb_cache_t>,
145}
146
147impl Drop for CacheWrapper {
148    fn drop(&mut self) {
149        unsafe {
150            ffi::rocksdb_cache_destroy(self.inner.as_ptr());
151        }
152    }
153}
154
155#[derive(Clone)]
156pub struct Cache(pub(crate) Arc<CacheWrapper>);
157
158impl Cache {
159    /// Creates an LRU cache with capacity in bytes.
160    pub fn new_lru_cache(capacity: size_t) -> Cache {
161        let inner = NonNull::new(unsafe { ffi::rocksdb_cache_create_lru(capacity) }).unwrap();
162        Cache(Arc::new(CacheWrapper { inner }))
163    }
164
165    /// Creates an LRU cache with custom options.
166    pub fn new_lru_cache_opts(opts: &LruCacheOptions) -> Cache {
167        let inner =
168            NonNull::new(unsafe { ffi::rocksdb_cache_create_lru_opts(opts.inner) }).unwrap();
169        Cache(Arc::new(CacheWrapper { inner }))
170    }
171
172    /// Creates a HyperClockCache with capacity in bytes.
173    ///
174    /// `estimated_entry_charge` is an important tuning parameter. The optimal
175    /// choice at any given time is
176    /// `(cache.get_usage() - 64 * cache.get_table_address_count()) /
177    /// cache.get_occupancy_count()`, or approximately `cache.get_usage() /
178    /// cache.get_occupancy_count()`.
179    ///
180    /// However, the value cannot be changed dynamically, so as the cache
181    /// composition changes at runtime, the following tradeoffs apply:
182    ///
183    /// * If the estimate is substantially too high (e.g., 25% higher),
184    ///   the cache may have to evict entries to prevent load factors that
185    ///   would dramatically affect lookup times.
186    /// * If the estimate is substantially too low (e.g., less than half),
187    ///   then meta data space overhead is substantially higher.
188    ///
189    /// The latter is generally preferable, and picking the larger of
190    /// block size and meta data block size is a reasonable choice that
191    /// errs towards this side.
192    pub fn new_hyper_clock_cache(capacity: size_t, estimated_entry_charge: size_t) -> Cache {
193        Cache(Arc::new(CacheWrapper {
194            inner: NonNull::new(unsafe {
195                ffi::rocksdb_cache_create_hyper_clock(capacity, estimated_entry_charge)
196            })
197            .unwrap(),
198        }))
199    }
200
201    /// Returns the cache memory usage in bytes.
202    pub fn get_usage(&self) -> usize {
203        unsafe { ffi::rocksdb_cache_get_usage(self.0.inner.as_ptr()) }
204    }
205
206    /// Returns the pinned memory usage in bytes.
207    pub fn get_pinned_usage(&self) -> usize {
208        unsafe { ffi::rocksdb_cache_get_pinned_usage(self.0.inner.as_ptr()) }
209    }
210
211    /// Sets cache capacity in bytes.
212    pub fn set_capacity(&mut self, capacity: size_t) {
213        unsafe {
214            ffi::rocksdb_cache_set_capacity(self.0.inner.as_ptr(), capacity);
215        }
216    }
217}
218
219#[derive(Default)]
220pub(crate) struct OptionsMustOutliveDB {
221    env: Option<Env>,
222    row_cache: Option<Cache>,
223    blob_cache: Option<Cache>,
224    block_based: Option<BlockBasedOptionsMustOutliveDB>,
225    write_buffer_manager: Option<WriteBufferManager>,
226}
227
228impl OptionsMustOutliveDB {
229    pub(crate) fn clone(&self) -> Self {
230        Self {
231            env: self.env.clone(),
232            row_cache: self.row_cache.clone(),
233            blob_cache: self.blob_cache.clone(),
234            block_based: self
235                .block_based
236                .as_ref()
237                .map(BlockBasedOptionsMustOutliveDB::clone),
238            write_buffer_manager: self.write_buffer_manager.clone(),
239        }
240    }
241}
242
243#[derive(Default)]
244struct BlockBasedOptionsMustOutliveDB {
245    block_cache: Option<Cache>,
246}
247
248impl BlockBasedOptionsMustOutliveDB {
249    fn clone(&self) -> Self {
250        Self {
251            block_cache: self.block_cache.clone(),
252        }
253    }
254}
255
256/// Database-wide options around performance and behavior.
257///
258/// Please read the official tuning [guide](https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide)
259/// and most importantly, measure performance under realistic workloads with realistic hardware.
260///
261/// # Examples
262///
263/// ```
264/// use sfzhou_rocksdb::{Options, DB};
265/// use sfzhou_rocksdb::DBCompactionStyle;
266///
267/// fn badly_tuned_for_somebody_elses_disk() -> DB {
268///    let path = "path/for/rocksdb/storageX";
269///    let mut opts = Options::default();
270///    opts.create_if_missing(true);
271///    opts.set_max_open_files(10000);
272///    opts.set_use_fsync(false);
273///    opts.set_bytes_per_sync(8388608);
274///    opts.optimize_for_point_lookup(1024);
275///    opts.set_table_cache_num_shard_bits(6);
276///    opts.set_max_write_buffer_number(32);
277///    opts.set_write_buffer_size(536870912);
278///    opts.set_target_file_size_base(1073741824);
279///    opts.set_min_write_buffer_number_to_merge(4);
280///    opts.set_level_zero_stop_writes_trigger(2000);
281///    opts.set_level_zero_slowdown_writes_trigger(0);
282///    opts.set_compaction_style(DBCompactionStyle::Universal);
283///    opts.set_disable_auto_compactions(true);
284///
285///    DB::open(&opts, path).unwrap()
286/// }
287/// ```
288pub struct Options {
289    pub(crate) inner: *mut ffi::rocksdb_options_t,
290    pub(crate) outlive: OptionsMustOutliveDB,
291}
292
293/// Optionally disable WAL or sync for this write.
294///
295/// # Examples
296///
297/// Making an unsafe write of a batch:
298///
299/// ```
300/// use sfzhou_rocksdb::{DB, Options, WriteBatch, WriteOptions};
301///
302/// let tempdir = tempfile::Builder::new()
303///     .prefix("_path_for_rocksdb_storageY1")
304///     .tempdir()
305///     .expect("Failed to create temporary path for the _path_for_rocksdb_storageY1");
306/// let path = tempdir.path();
307/// {
308///     let db = DB::open_default(path).unwrap();
309///     let mut batch = WriteBatch::default();
310///     batch.put(b"my key", b"my value");
311///     batch.put(b"key2", b"value2");
312///     batch.put(b"key3", b"value3");
313///
314///     let mut write_options = WriteOptions::default();
315///     write_options.set_sync(false);
316///     write_options.disable_wal(true);
317///
318///     db.write_opt(batch, &write_options);
319/// }
320/// let _ = DB::destroy(&Options::default(), path);
321/// ```
322pub struct WriteOptions {
323    pub(crate) inner: *mut ffi::rocksdb_writeoptions_t,
324}
325
326pub struct LruCacheOptions {
327    pub(crate) inner: *mut ffi::rocksdb_lru_cache_options_t,
328}
329
330/// Optionally wait for the memtable flush to be performed.
331///
332/// # Examples
333///
334/// Manually flushing the memtable:
335///
336/// ```
337/// use sfzhou_rocksdb::{DB, Options, FlushOptions};
338///
339/// let tempdir = tempfile::Builder::new()
340///     .prefix("_path_for_rocksdb_storageY2")
341///     .tempdir()
342///     .expect("Failed to create temporary path for the _path_for_rocksdb_storageY2");
343/// let path = tempdir.path();
344/// {
345///     let db = DB::open_default(path).unwrap();
346///
347///     let mut flush_options = FlushOptions::default();
348///     flush_options.set_wait(true);
349///
350///     db.flush_opt(&flush_options);
351/// }
352/// let _ = DB::destroy(&Options::default(), path);
353/// ```
354pub struct FlushOptions {
355    pub(crate) inner: *mut ffi::rocksdb_flushoptions_t,
356}
357
358/// For configuring block-based file storage.
359pub struct BlockBasedOptions {
360    pub(crate) inner: *mut ffi::rocksdb_block_based_table_options_t,
361    outlive: BlockBasedOptionsMustOutliveDB,
362}
363
364pub struct ReadOptions {
365    pub(crate) inner: *mut ffi::rocksdb_readoptions_t,
366    // The `ReadOptions` owns a copy of the timestamp and iteration bounds.
367    // This is necessary to ensure the pointers we pass over the FFI live as
368    // long as the `ReadOptions`. This way, when performing the read operation,
369    // the pointers are guaranteed to be valid.
370    timestamp: Option<Vec<u8>>,
371    iter_start_ts: Option<Vec<u8>>,
372    iterate_upper_bound: Option<Vec<u8>>,
373    iterate_lower_bound: Option<Vec<u8>>,
374}
375
376/// Configuration of cuckoo-based storage.
377pub struct CuckooTableOptions {
378    pub(crate) inner: *mut ffi::rocksdb_cuckoo_table_options_t,
379}
380
381/// For configuring external files ingestion.
382///
383/// # Examples
384///
385/// Move files instead of copying them:
386///
387/// ```
388/// use sfzhou_rocksdb::{DB, IngestExternalFileOptions, SstFileWriter, Options};
389///
390/// let writer_opts = Options::default();
391/// let mut writer = SstFileWriter::create(&writer_opts);
392/// let tempdir = tempfile::Builder::new()
393///     .tempdir()
394///     .expect("Failed to create temporary folder for the _path_for_sst_file");
395/// let path1 = tempdir.path().join("_path_for_sst_file");
396/// writer.open(path1.clone()).unwrap();
397/// writer.put(b"k1", b"v1").unwrap();
398/// writer.finish().unwrap();
399///
400/// let tempdir2 = tempfile::Builder::new()
401///     .prefix("_path_for_rocksdb_storageY3")
402///     .tempdir()
403///     .expect("Failed to create temporary path for the _path_for_rocksdb_storageY3");
404/// let path2 = tempdir2.path();
405/// {
406///   let db = DB::open_default(&path2).unwrap();
407///   let mut ingest_opts = IngestExternalFileOptions::default();
408///   ingest_opts.set_move_files(true);
409///   db.ingest_external_file_opts(&ingest_opts, vec![path1]).unwrap();
410/// }
411/// let _ = DB::destroy(&Options::default(), path2);
412/// ```
413pub struct IngestExternalFileOptions {
414    pub(crate) inner: *mut ffi::rocksdb_ingestexternalfileoptions_t,
415}
416
417// Safety note: auto-implementing Send on most db-related types is prevented by the inner FFI
418// pointer. In most cases, however, this pointer is Send-safe because it is never aliased and
419// rocksdb internally does not rely on thread-local information for its user-exposed types.
420unsafe impl Send for Options {}
421unsafe impl Send for WriteOptions {}
422unsafe impl Send for LruCacheOptions {}
423unsafe impl Send for FlushOptions {}
424unsafe impl Send for BlockBasedOptions {}
425unsafe impl Send for CuckooTableOptions {}
426unsafe impl Send for ReadOptions {}
427unsafe impl Send for IngestExternalFileOptions {}
428unsafe impl Send for CacheWrapper {}
429unsafe impl Send for CompactOptions {}
430unsafe impl Send for WriteBufferManagerWrapper {}
431
432// Sync is similarly safe for many types because they do not expose interior mutability, and their
433// use within the rocksdb library is generally behind a const reference
434unsafe impl Sync for Options {}
435unsafe impl Sync for WriteOptions {}
436unsafe impl Sync for LruCacheOptions {}
437unsafe impl Sync for FlushOptions {}
438unsafe impl Sync for BlockBasedOptions {}
439unsafe impl Sync for CuckooTableOptions {}
440unsafe impl Sync for ReadOptions {}
441unsafe impl Sync for IngestExternalFileOptions {}
442unsafe impl Sync for CacheWrapper {}
443unsafe impl Sync for CompactOptions {}
444unsafe impl Sync for WriteBufferManagerWrapper {}
445
446impl Drop for Options {
447    fn drop(&mut self) {
448        unsafe {
449            ffi::rocksdb_options_destroy(self.inner);
450        }
451    }
452}
453
454impl Clone for Options {
455    fn clone(&self) -> Self {
456        let inner = unsafe { ffi::rocksdb_options_create_copy(self.inner) };
457        assert!(!inner.is_null(), "Could not copy RocksDB options");
458
459        Self {
460            inner,
461            outlive: self.outlive.clone(),
462        }
463    }
464}
465
466impl Drop for BlockBasedOptions {
467    fn drop(&mut self) {
468        unsafe {
469            ffi::rocksdb_block_based_options_destroy(self.inner);
470        }
471    }
472}
473
474impl Drop for CuckooTableOptions {
475    fn drop(&mut self) {
476        unsafe {
477            ffi::rocksdb_cuckoo_options_destroy(self.inner);
478        }
479    }
480}
481
482impl Drop for FlushOptions {
483    fn drop(&mut self) {
484        unsafe {
485            ffi::rocksdb_flushoptions_destroy(self.inner);
486        }
487    }
488}
489
490impl Drop for WriteOptions {
491    fn drop(&mut self) {
492        unsafe {
493            ffi::rocksdb_writeoptions_destroy(self.inner);
494        }
495    }
496}
497
498impl Drop for LruCacheOptions {
499    fn drop(&mut self) {
500        unsafe {
501            ffi::rocksdb_lru_cache_options_destroy(self.inner);
502        }
503    }
504}
505
506impl Drop for ReadOptions {
507    fn drop(&mut self) {
508        unsafe {
509            ffi::rocksdb_readoptions_destroy(self.inner);
510        }
511    }
512}
513
514impl Drop for IngestExternalFileOptions {
515    fn drop(&mut self) {
516        unsafe {
517            ffi::rocksdb_ingestexternalfileoptions_destroy(self.inner);
518        }
519    }
520}
521
522impl BlockBasedOptions {
523    /// Approximate size of user data packed per block. Note that the
524    /// block size specified here corresponds to uncompressed data. The
525    /// actual size of the unit read from disk may be smaller if
526    /// compression is enabled. This parameter can be changed dynamically.
527    pub fn set_block_size(&mut self, size: usize) {
528        unsafe {
529            ffi::rocksdb_block_based_options_set_block_size(self.inner, size);
530        }
531    }
532
533    /// Block size for partitioned metadata. Currently applied to indexes when
534    /// kTwoLevelIndexSearch is used and to filters when partition_filters is used.
535    /// Note: Since in the current implementation the filters and index partitions
536    /// are aligned, an index/filter block is created when either index or filter
537    /// block size reaches the specified limit.
538    ///
539    /// Note: this limit is currently applied to only index blocks; a filter
540    /// partition is cut right after an index block is cut.
541    pub fn set_metadata_block_size(&mut self, size: usize) {
542        unsafe {
543            ffi::rocksdb_block_based_options_set_metadata_block_size(self.inner, size as u64);
544        }
545    }
546
547    /// Note: currently this option requires kTwoLevelIndexSearch to be set as
548    /// well.
549    ///
550    /// Use partitioned full filters for each SST file. This option is
551    /// incompatible with block-based filters.
552    pub fn set_partition_filters(&mut self, size: bool) {
553        unsafe {
554            ffi::rocksdb_block_based_options_set_partition_filters(self.inner, c_uchar::from(size));
555        }
556    }
557
558    /// Sets global cache for blocks (user data is stored in a set of blocks, and
559    /// a block is the unit of reading from disk).
560    ///
561    /// If set, use the specified cache for blocks.
562    /// By default, rocksdb will automatically create and use an 8MB internal cache.
563    pub fn set_block_cache(&mut self, cache: &Cache) {
564        unsafe {
565            ffi::rocksdb_block_based_options_set_block_cache(self.inner, cache.0.inner.as_ptr());
566        }
567        self.outlive.block_cache = Some(cache.clone());
568    }
569
570    /// Disable block cache
571    pub fn disable_cache(&mut self) {
572        unsafe {
573            ffi::rocksdb_block_based_options_set_no_block_cache(self.inner, c_uchar::from(true));
574        }
575    }
576
577    /// Sets a [Bloom filter](https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter)
578    /// policy to reduce disk reads.
579    ///
580    /// # Examples
581    ///
582    /// ```
583    /// use sfzhou_rocksdb::BlockBasedOptions;
584    ///
585    /// let mut opts = BlockBasedOptions::default();
586    /// opts.set_bloom_filter(10.0, true);
587    /// ```
588    pub fn set_bloom_filter(&mut self, bits_per_key: c_double, block_based: bool) {
589        unsafe {
590            let bloom = if block_based {
591                ffi::rocksdb_filterpolicy_create_bloom(bits_per_key as _)
592            } else {
593                ffi::rocksdb_filterpolicy_create_bloom_full(bits_per_key as _)
594            };
595
596            ffi::rocksdb_block_based_options_set_filter_policy(self.inner, bloom);
597        }
598    }
599
600    /// Sets a [Ribbon filter](http://rocksdb.org/blog/2021/12/29/ribbon-filter.html)
601    /// policy to reduce disk reads.
602    ///
603    /// Ribbon filters use less memory in exchange for slightly more CPU usage
604    /// compared to an equivalent bloom filter.
605    ///
606    /// # Examples
607    ///
608    /// ```
609    /// use sfzhou_rocksdb::BlockBasedOptions;
610    ///
611    /// let mut opts = BlockBasedOptions::default();
612    /// opts.set_ribbon_filter(10.0);
613    /// ```
614    pub fn set_ribbon_filter(&mut self, bloom_equivalent_bits_per_key: c_double) {
615        unsafe {
616            let ribbon = ffi::rocksdb_filterpolicy_create_ribbon(bloom_equivalent_bits_per_key);
617            ffi::rocksdb_block_based_options_set_filter_policy(self.inner, ribbon);
618        }
619    }
620
621    /// Sets a hybrid [Ribbon filter](http://rocksdb.org/blog/2021/12/29/ribbon-filter.html)
622    /// policy to reduce disk reads.
623    ///
624    /// Uses Bloom filters before the given level, and Ribbon filters for all
625    /// other levels. This combines the memory savings from Ribbon filters
626    /// with the lower CPU usage of Bloom filters.
627    ///
628    /// # Examples
629    ///
630    /// ```
631    /// use sfzhou_rocksdb::BlockBasedOptions;
632    ///
633    /// let mut opts = BlockBasedOptions::default();
634    /// opts.set_hybrid_ribbon_filter(10.0, 2);
635    /// ```
636    pub fn set_hybrid_ribbon_filter(
637        &mut self,
638        bloom_equivalent_bits_per_key: c_double,
639        bloom_before_level: c_int,
640    ) {
641        unsafe {
642            let ribbon = ffi::rocksdb_filterpolicy_create_ribbon_hybrid(
643                bloom_equivalent_bits_per_key,
644                bloom_before_level,
645            );
646            ffi::rocksdb_block_based_options_set_filter_policy(self.inner, ribbon);
647        }
648    }
649
650    /// If cache_index_and_filter_blocks is enabled, cache index and filter blocks with high priority.
651    /// If set to true, depending on implementation of block cache,
652    /// index and filter blocks may be less likely to be evicted than data blocks.
653    pub fn set_cache_index_and_filter_blocks(&mut self, v: bool) {
654        unsafe {
655            ffi::rocksdb_block_based_options_set_cache_index_and_filter_blocks(
656                self.inner,
657                c_uchar::from(v),
658            );
659        }
660    }
661
662    /// Defines the index type to be used for SS-table lookups.
663    ///
664    /// # Examples
665    ///
666    /// ```
667    /// use sfzhou_rocksdb::{BlockBasedOptions, BlockBasedIndexType, Options};
668    ///
669    /// let mut opts = Options::default();
670    /// let mut block_opts = BlockBasedOptions::default();
671    /// block_opts.set_index_type(BlockBasedIndexType::HashSearch);
672    /// ```
673    pub fn set_index_type(&mut self, index_type: BlockBasedIndexType) {
674        let index = index_type as i32;
675        unsafe {
676            ffi::rocksdb_block_based_options_set_index_type(self.inner, index);
677        }
678    }
679
680    /// If cache_index_and_filter_blocks is true and the below is true, then
681    /// filter and index blocks are stored in the cache, but a reference is
682    /// held in the "table reader" object so the blocks are pinned and only
683    /// evicted from cache when the table reader is freed.
684    ///
685    /// Default: false.
686    pub fn set_pin_l0_filter_and_index_blocks_in_cache(&mut self, v: bool) {
687        unsafe {
688            ffi::rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache(
689                self.inner,
690                c_uchar::from(v),
691            );
692        }
693    }
694
695    /// If cache_index_and_filter_blocks is true and the below is true, then
696    /// the top-level index of partitioned filter and index blocks are stored in
697    /// the cache, but a reference is held in the "table reader" object so the
698    /// blocks are pinned and only evicted from cache when the table reader is
699    /// freed. This is not limited to l0 in LSM tree.
700    ///
701    /// Default: false.
702    pub fn set_pin_top_level_index_and_filter(&mut self, v: bool) {
703        unsafe {
704            ffi::rocksdb_block_based_options_set_pin_top_level_index_and_filter(
705                self.inner,
706                c_uchar::from(v),
707            );
708        }
709    }
710
711    /// Format version, reserved for backward compatibility.
712    ///
713    /// See full [list](https://github.com/facebook/rocksdb/blob/v8.6.7/include/rocksdb/table.h#L493-L521)
714    /// of the supported versions.
715    ///
716    /// Default: 5.
717    pub fn set_format_version(&mut self, version: i32) {
718        unsafe {
719            ffi::rocksdb_block_based_options_set_format_version(self.inner, version);
720        }
721    }
722
723    /// Number of keys between restart points for delta encoding of keys.
724    /// This parameter can be changed dynamically. Most clients should
725    /// leave this parameter alone. The minimum value allowed is 1. Any smaller
726    /// value will be silently overwritten with 1.
727    ///
728    /// Default: 16.
729    pub fn set_block_restart_interval(&mut self, interval: i32) {
730        unsafe {
731            ffi::rocksdb_block_based_options_set_block_restart_interval(self.inner, interval);
732        }
733    }
734
735    /// Same as block_restart_interval but used for the index block.
736    /// If you don't plan to run RocksDB before version 5.16 and you are
737    /// using `index_block_restart_interval` > 1, you should
738    /// probably set the `format_version` to >= 4 as it would reduce the index size.
739    ///
740    /// Default: 1.
741    pub fn set_index_block_restart_interval(&mut self, interval: i32) {
742        unsafe {
743            ffi::rocksdb_block_based_options_set_index_block_restart_interval(self.inner, interval);
744        }
745    }
746
747    /// Set the data block index type for point lookups:
748    ///  `DataBlockIndexType::BinarySearch` to use binary search within the data block.
749    ///  `DataBlockIndexType::BinaryAndHash` to use the data block hash index in combination with
750    ///  the normal binary search.
751    ///
752    /// The hash table utilization ratio is adjustable using [`set_data_block_hash_ratio`](#method.set_data_block_hash_ratio), which is
753    /// valid only when using `DataBlockIndexType::BinaryAndHash`.
754    ///
755    /// Default: `BinarySearch`
756    /// # Examples
757    ///
758    /// ```
759    /// use sfzhou_rocksdb::{BlockBasedOptions, DataBlockIndexType, Options};
760    ///
761    /// let mut opts = Options::default();
762    /// let mut block_opts = BlockBasedOptions::default();
763    /// block_opts.set_data_block_index_type(DataBlockIndexType::BinaryAndHash);
764    /// block_opts.set_data_block_hash_ratio(0.85);
765    /// ```
766    pub fn set_data_block_index_type(&mut self, index_type: DataBlockIndexType) {
767        let index_t = index_type as i32;
768        unsafe {
769            ffi::rocksdb_block_based_options_set_data_block_index_type(self.inner, index_t);
770        }
771    }
772
773    /// Set the data block hash index utilization ratio.
774    ///
775    /// The smaller the utilization ratio, the less hash collisions happen, and so reduce the risk for a
776    /// point lookup to fall back to binary search due to the collisions. A small ratio means faster
777    /// lookup at the price of more space overhead.
778    ///
779    /// Default: 0.75
780    pub fn set_data_block_hash_ratio(&mut self, ratio: f64) {
781        unsafe {
782            ffi::rocksdb_block_based_options_set_data_block_hash_ratio(self.inner, ratio);
783        }
784    }
785
786    /// If false, place only prefixes in the filter, not whole keys.
787    ///
788    /// Defaults to true.
789    pub fn set_whole_key_filtering(&mut self, v: bool) {
790        unsafe {
791            ffi::rocksdb_block_based_options_set_whole_key_filtering(self.inner, c_uchar::from(v));
792        }
793    }
794
795    /// Use the specified checksum type.
796    /// Newly created table files will be protected with this checksum type.
797    /// Old table files will still be readable, even though they have different checksum type.
798    pub fn set_checksum_type(&mut self, checksum_type: ChecksumType) {
799        unsafe {
800            ffi::rocksdb_block_based_options_set_checksum(self.inner, checksum_type as c_char);
801        }
802    }
803
804    /// If true, generate Bloom/Ribbon filters that minimize memory internal
805    /// fragmentation.
806    /// See official [wiki](
807    /// https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter#reducing-internal-fragmentation)
808    /// for more information.
809    ///
810    /// Defaults to false.
811    /// # Examples
812    ///
813    /// ```
814    /// use sfzhou_rocksdb::BlockBasedOptions;
815    ///
816    /// let mut opts = BlockBasedOptions::default();
817    /// opts.set_bloom_filter(10.0, true);
818    /// opts.set_optimize_filters_for_memory(true);
819    /// ```
820    pub fn set_optimize_filters_for_memory(&mut self, v: bool) {
821        unsafe {
822            ffi::rocksdb_block_based_options_set_optimize_filters_for_memory(
823                self.inner,
824                c_uchar::from(v),
825            );
826        }
827    }
828}
829
830impl Default for BlockBasedOptions {
831    fn default() -> Self {
832        let block_opts = unsafe { ffi::rocksdb_block_based_options_create() };
833        assert!(
834            !block_opts.is_null(),
835            "Could not create RocksDB block based options"
836        );
837
838        Self {
839            inner: block_opts,
840            outlive: BlockBasedOptionsMustOutliveDB::default(),
841        }
842    }
843}
844
845impl CuckooTableOptions {
846    /// Determines the utilization of hash tables. Smaller values
847    /// result in larger hash tables with fewer collisions.
848    /// Default: 0.9
849    pub fn set_hash_ratio(&mut self, ratio: f64) {
850        unsafe {
851            ffi::rocksdb_cuckoo_options_set_hash_ratio(self.inner, ratio);
852        }
853    }
854
855    /// A property used by builder to determine the depth to go to
856    /// to search for a path to displace elements in case of
857    /// collision. See Builder.MakeSpaceForKey method. Higher
858    /// values result in more efficient hash tables with fewer
859    /// lookups but take more time to build.
860    /// Default: 100
861    pub fn set_max_search_depth(&mut self, depth: u32) {
862        unsafe {
863            ffi::rocksdb_cuckoo_options_set_max_search_depth(self.inner, depth);
864        }
865    }
866
867    /// In case of collision while inserting, the builder
868    /// attempts to insert in the next cuckoo_block_size
869    /// locations before skipping over to the next Cuckoo hash
870    /// function. This makes lookups more cache friendly in case
871    /// of collisions.
872    /// Default: 5
873    pub fn set_cuckoo_block_size(&mut self, size: u32) {
874        unsafe {
875            ffi::rocksdb_cuckoo_options_set_cuckoo_block_size(self.inner, size);
876        }
877    }
878
879    /// If this option is enabled, user key is treated as uint64_t and its value
880    /// is used as hash value directly. This option changes builder's behavior.
881    /// Reader ignore this option and behave according to what specified in
882    /// table property.
883    /// Default: false
884    pub fn set_identity_as_first_hash(&mut self, flag: bool) {
885        unsafe {
886            ffi::rocksdb_cuckoo_options_set_identity_as_first_hash(self.inner, c_uchar::from(flag));
887        }
888    }
889
890    /// If this option is set to true, module is used during hash calculation.
891    /// This often yields better space efficiency at the cost of performance.
892    /// If this option is set to false, # of entries in table is constrained to
893    /// be power of two, and bit and is used to calculate hash, which is faster in general.
894    /// Default: true
895    pub fn set_use_module_hash(&mut self, flag: bool) {
896        unsafe {
897            ffi::rocksdb_cuckoo_options_set_use_module_hash(self.inner, c_uchar::from(flag));
898        }
899    }
900}
901
902impl Default for CuckooTableOptions {
903    fn default() -> Self {
904        let opts = unsafe { ffi::rocksdb_cuckoo_options_create() };
905        assert!(!opts.is_null(), "Could not create RocksDB cuckoo options");
906
907        Self { inner: opts }
908    }
909}
910
911// Verbosity of the LOG.
912#[derive(Debug, Copy, Clone, PartialEq, Eq)]
913#[repr(i32)]
914pub enum LogLevel {
915    Debug = 0,
916    Info,
917    Warn,
918    Error,
919    Fatal,
920    Header,
921}
922
923impl Options {
924    /// Constructs the DBOptions and ColumnFamilyDescriptors by loading the
925    /// latest RocksDB options file stored in the specified rocksdb database.
926    ///
927    /// *IMPORTANT*:
928    /// ROCKSDB DOES NOT STORE cf ttl in the options file. If you have set it via
929    /// [`ColumnFamilyDescriptor::new_with_ttl`] then you need to set it again after loading the options file.
930    /// Tll will be set to [`ColumnFamilyTtl::Disabled`] for all column families for your safety.
931    pub fn load_latest<P: AsRef<Path>>(
932        path: P,
933        env: Env,
934        ignore_unknown_options: bool,
935        cache: Cache,
936    ) -> Result<(Options, Vec<ColumnFamilyDescriptor>), Error> {
937        let path = to_cpath(path)?;
938        let mut db_options: *mut ffi::rocksdb_options_t = null_mut();
939        let mut num_column_families: usize = 0;
940        let mut column_family_names: *mut *mut c_char = null_mut();
941        let mut column_family_options: *mut *mut ffi::rocksdb_options_t = null_mut();
942        unsafe {
943            ffi_try!(ffi::rocksdb_load_latest_options(
944                path.as_ptr(),
945                env.0.inner,
946                ignore_unknown_options,
947                cache.0.inner.as_ptr(),
948                &mut db_options,
949                &mut num_column_families,
950                &mut column_family_names,
951                &mut column_family_options,
952            ));
953        }
954        let options = Options {
955            inner: db_options,
956            outlive: OptionsMustOutliveDB::default(),
957        };
958        let column_families = unsafe {
959            Options::read_column_descriptors(
960                num_column_families,
961                column_family_names,
962                column_family_options,
963            )
964        };
965        Ok((options, column_families))
966    }
967
968    /// read column descriptors from c pointers
969    #[inline]
970    unsafe fn read_column_descriptors(
971        num_column_families: usize,
972        column_family_names: *mut *mut c_char,
973        column_family_options: *mut *mut ffi::rocksdb_options_t,
974    ) -> Vec<ColumnFamilyDescriptor> {
975        let column_family_names_iter =
976            slice::from_raw_parts(column_family_names, num_column_families)
977                .iter()
978                .map(|ptr| from_cstr(*ptr));
979        let column_family_options_iter =
980            slice::from_raw_parts(column_family_options, num_column_families)
981                .iter()
982                .map(|ptr| Options {
983                    inner: *ptr,
984                    outlive: OptionsMustOutliveDB::default(),
985                });
986        let column_descriptors = column_family_names_iter
987            .zip(column_family_options_iter)
988            .map(|(name, options)| ColumnFamilyDescriptor {
989                name,
990                options,
991                ttl: ColumnFamilyTtl::Disabled,
992            })
993            .collect::<Vec<_>>();
994        // free pointers
995        slice::from_raw_parts(column_family_names, num_column_families)
996            .iter()
997            .for_each(|ptr| ffi::rocksdb_free(*ptr as *mut c_void));
998        ffi::rocksdb_free(column_family_names as *mut c_void);
999        ffi::rocksdb_free(column_family_options as *mut c_void);
1000        column_descriptors
1001    }
1002
1003    /// By default, RocksDB uses only one background thread for flush and
1004    /// compaction. Calling this function will set it up such that total of
1005    /// `total_threads` is used. Good value for `total_threads` is the number of
1006    /// cores. You almost definitely want to call this function if your system is
1007    /// bottlenecked by RocksDB.
1008    ///
1009    /// # Examples
1010    ///
1011    /// ```
1012    /// use sfzhou_rocksdb::Options;
1013    ///
1014    /// let mut opts = Options::default();
1015    /// opts.increase_parallelism(3);
1016    /// ```
1017    pub fn increase_parallelism(&mut self, parallelism: i32) {
1018        unsafe {
1019            ffi::rocksdb_options_increase_parallelism(self.inner, parallelism);
1020        }
1021    }
1022
1023    /// Optimize level style compaction.
1024    ///
1025    /// Default values for some parameters in `Options` are not optimized for heavy
1026    /// workloads and big datasets, which means you might observe write stalls under
1027    /// some conditions.
1028    ///
1029    /// This can be used as one of the starting points for tuning RocksDB options in
1030    /// such cases.
1031    ///
1032    /// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
1033    /// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
1034    /// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
1035    /// parameters were set before.
1036    ///
1037    /// It sets buffer sizes so that memory consumption would be constrained by
1038    /// `memtable_memory_budget`.
1039    pub fn optimize_level_style_compaction(&mut self, memtable_memory_budget: usize) {
1040        unsafe {
1041            ffi::rocksdb_options_optimize_level_style_compaction(
1042                self.inner,
1043                memtable_memory_budget as u64,
1044            );
1045        }
1046    }
1047
1048    /// Optimize universal style compaction.
1049    ///
1050    /// Default values for some parameters in `Options` are not optimized for heavy
1051    /// workloads and big datasets, which means you might observe write stalls under
1052    /// some conditions.
1053    ///
1054    /// This can be used as one of the starting points for tuning RocksDB options in
1055    /// such cases.
1056    ///
1057    /// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
1058    /// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
1059    /// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
1060    /// parameters were set before.
1061    ///
1062    /// It sets buffer sizes so that memory consumption would be constrained by
1063    /// `memtable_memory_budget`.
1064    pub fn optimize_universal_style_compaction(&mut self, memtable_memory_budget: usize) {
1065        unsafe {
1066            ffi::rocksdb_options_optimize_universal_style_compaction(
1067                self.inner,
1068                memtable_memory_budget as u64,
1069            );
1070        }
1071    }
1072
1073    /// If true, the database will be created if it is missing.
1074    ///
1075    /// Default: `false`
1076    ///
1077    /// # Examples
1078    ///
1079    /// ```
1080    /// use sfzhou_rocksdb::Options;
1081    ///
1082    /// let mut opts = Options::default();
1083    /// opts.create_if_missing(true);
1084    /// ```
1085    pub fn create_if_missing(&mut self, create_if_missing: bool) {
1086        unsafe {
1087            ffi::rocksdb_options_set_create_if_missing(
1088                self.inner,
1089                c_uchar::from(create_if_missing),
1090            );
1091        }
1092    }
1093
1094    /// If true, any column families that didn't exist when opening the database
1095    /// will be created.
1096    ///
1097    /// Default: `false`
1098    ///
1099    /// # Examples
1100    ///
1101    /// ```
1102    /// use sfzhou_rocksdb::Options;
1103    ///
1104    /// let mut opts = Options::default();
1105    /// opts.create_missing_column_families(true);
1106    /// ```
1107    pub fn create_missing_column_families(&mut self, create_missing_cfs: bool) {
1108        unsafe {
1109            ffi::rocksdb_options_set_create_missing_column_families(
1110                self.inner,
1111                c_uchar::from(create_missing_cfs),
1112            );
1113        }
1114    }
1115
1116    /// Specifies whether an error should be raised if the database already exists.
1117    ///
1118    /// Default: false
1119    pub fn set_error_if_exists(&mut self, enabled: bool) {
1120        unsafe {
1121            ffi::rocksdb_options_set_error_if_exists(self.inner, c_uchar::from(enabled));
1122        }
1123    }
1124
1125    /// Enable/disable paranoid checks.
1126    ///
1127    /// If true, the implementation will do aggressive checking of the
1128    /// data it is processing and will stop early if it detects any
1129    /// errors. This may have unforeseen ramifications: for example, a
1130    /// corruption of one DB entry may cause a large number of entries to
1131    /// become unreadable or for the entire DB to become unopenable.
1132    /// If any of the  writes to the database fails (Put, Delete, Merge, Write),
1133    /// the database will switch to read-only mode and fail all other
1134    /// Write operations.
1135    ///
1136    /// Default: false
1137    pub fn set_paranoid_checks(&mut self, enabled: bool) {
1138        unsafe {
1139            ffi::rocksdb_options_set_paranoid_checks(self.inner, c_uchar::from(enabled));
1140        }
1141    }
1142
1143    /// A list of paths where SST files can be put into, with its target size.
1144    /// Newer data is placed into paths specified earlier in the vector while
1145    /// older data gradually moves to paths specified later in the vector.
1146    ///
1147    /// For example, you have a flash device with 10GB allocated for the DB,
1148    /// as well as a hard drive of 2TB, you should config it to be:
1149    ///   [{"/flash_path", 10GB}, {"/hard_drive", 2TB}]
1150    ///
1151    /// The system will try to guarantee data under each path is close to but
1152    /// not larger than the target size. But current and future file sizes used
1153    /// by determining where to place a file are based on best-effort estimation,
1154    /// which means there is a chance that the actual size under the directory
1155    /// is slightly more than target size under some workloads. User should give
1156    /// some buffer room for those cases.
1157    ///
1158    /// If none of the paths has sufficient room to place a file, the file will
1159    /// be placed to the last path anyway, despite to the target size.
1160    ///
1161    /// Placing newer data to earlier paths is also best-efforts. User should
1162    /// expect user files to be placed in higher levels in some extreme cases.
1163    ///
1164    /// If left empty, only one path will be used, which is `path` passed when
1165    /// opening the DB.
1166    ///
1167    /// Default: empty
1168    pub fn set_db_paths(&mut self, paths: &[DBPath]) {
1169        let mut paths: Vec<_> = paths.iter().map(|path| path.inner.cast_const()).collect();
1170        let num_paths = paths.len();
1171        unsafe {
1172            ffi::rocksdb_options_set_db_paths(self.inner, paths.as_mut_ptr(), num_paths);
1173        }
1174    }
1175
1176    /// Use the specified object to interact with the environment,
1177    /// e.g. to read/write files, schedule background work, etc. In the near
1178    /// future, support for doing storage operations such as read/write files
1179    /// through env will be deprecated in favor of file_system.
1180    ///
1181    /// Default: Env::default()
1182    pub fn set_env(&mut self, env: &Env) {
1183        unsafe {
1184            ffi::rocksdb_options_set_env(self.inner, env.0.inner);
1185        }
1186        self.outlive.env = Some(env.clone());
1187    }
1188
1189    /// Sets the compression algorithm that will be used for compressing blocks.
1190    ///
1191    /// Default: `DBCompressionType::Snappy` (`DBCompressionType::None` if
1192    /// snappy feature is not enabled).
1193    ///
1194    /// # Examples
1195    ///
1196    /// ```
1197    /// use sfzhou_rocksdb::{Options, DBCompressionType};
1198    ///
1199    /// let mut opts = Options::default();
1200    /// opts.set_compression_type(DBCompressionType::Snappy);
1201    /// ```
1202    pub fn set_compression_type(&mut self, t: DBCompressionType) {
1203        unsafe {
1204            ffi::rocksdb_options_set_compression(self.inner, t as c_int);
1205        }
1206    }
1207
1208    /// Number of threads for parallel compression.
1209    /// Parallel compression is enabled only if threads > 1.
1210    /// THE FEATURE IS STILL EXPERIMENTAL
1211    ///
1212    /// See [code](https://github.com/facebook/rocksdb/blob/v8.6.7/include/rocksdb/advanced_options.h#L116-L127)
1213    /// for more information.
1214    ///
1215    /// Default: 1
1216    ///
1217    /// Examples
1218    ///
1219    /// ```
1220    /// use sfzhou_rocksdb::{Options, DBCompressionType};
1221    ///
1222    /// let mut opts = Options::default();
1223    /// opts.set_compression_type(DBCompressionType::Zstd);
1224    /// opts.set_compression_options_parallel_threads(3);
1225    /// ```
1226    pub fn set_compression_options_parallel_threads(&mut self, num: i32) {
1227        unsafe {
1228            ffi::rocksdb_options_set_compression_options_parallel_threads(self.inner, num);
1229        }
1230    }
1231
1232    /// Sets the compression algorithm that will be used for compressing WAL.
1233    ///
1234    /// At present, only ZSTD compression is supported!
1235    ///
1236    /// Default: `DBCompressionType::None`
1237    ///
1238    /// # Examples
1239    ///
1240    /// ```
1241    /// use sfzhou_rocksdb::{Options, DBCompressionType};
1242    ///
1243    /// let mut opts = Options::default();
1244    /// opts.set_wal_compression_type(DBCompressionType::Zstd);
1245    /// // Or None to disable it
1246    /// opts.set_wal_compression_type(DBCompressionType::None);
1247    /// ```
1248    pub fn set_wal_compression_type(&mut self, t: DBCompressionType) {
1249        match t {
1250            DBCompressionType::None | DBCompressionType::Zstd => unsafe {
1251                ffi::rocksdb_options_set_wal_compression(self.inner, t as c_int);
1252            },
1253            other => unimplemented!("{:?} is not supported for WAL compression", other),
1254        }
1255    }
1256
1257    /// Sets the bottom-most compression algorithm that will be used for
1258    /// compressing blocks at the bottom-most level.
1259    ///
1260    /// Note that to actually enable bottom-most compression configuration after
1261    /// setting the compression type, it needs to be enabled by calling
1262    /// [`set_bottommost_compression_options`](#method.set_bottommost_compression_options) or
1263    /// [`set_bottommost_zstd_max_train_bytes`](#method.set_bottommost_zstd_max_train_bytes) method with `enabled` argument
1264    /// set to `true`.
1265    ///
1266    /// # Examples
1267    ///
1268    /// ```
1269    /// use sfzhou_rocksdb::{Options, DBCompressionType};
1270    ///
1271    /// let mut opts = Options::default();
1272    /// opts.set_bottommost_compression_type(DBCompressionType::Zstd);
1273    /// opts.set_bottommost_zstd_max_train_bytes(0, true);
1274    /// ```
1275    pub fn set_bottommost_compression_type(&mut self, t: DBCompressionType) {
1276        unsafe {
1277            ffi::rocksdb_options_set_bottommost_compression(self.inner, t as c_int);
1278        }
1279    }
1280
1281    /// Different levels can have different compression policies. There
1282    /// are cases where most lower levels would like to use quick compression
1283    /// algorithms while the higher levels (which have more data) use
1284    /// compression algorithms that have better compression but could
1285    /// be slower. This array, if non-empty, should have an entry for
1286    /// each level of the database; these override the value specified in
1287    /// the previous field 'compression'.
1288    ///
1289    /// # Examples
1290    ///
1291    /// ```
1292    /// use sfzhou_rocksdb::{Options, DBCompressionType};
1293    ///
1294    /// let mut opts = Options::default();
1295    /// opts.set_compression_per_level(&[
1296    ///     DBCompressionType::None,
1297    ///     DBCompressionType::None,
1298    ///     DBCompressionType::Snappy,
1299    ///     DBCompressionType::Snappy,
1300    ///     DBCompressionType::Snappy
1301    /// ]);
1302    /// ```
1303    pub fn set_compression_per_level(&mut self, level_types: &[DBCompressionType]) {
1304        unsafe {
1305            let mut level_types: Vec<_> = level_types.iter().map(|&t| t as c_int).collect();
1306            ffi::rocksdb_options_set_compression_per_level(
1307                self.inner,
1308                level_types.as_mut_ptr(),
1309                level_types.len() as size_t,
1310            );
1311        }
1312    }
1313
1314    /// Maximum size of dictionaries used to prime the compression library.
1315    /// Enabling dictionary can improve compression ratios when there are
1316    /// repetitions across data blocks.
1317    ///
1318    /// The dictionary is created by sampling the SST file data. If
1319    /// `zstd_max_train_bytes` is nonzero, the samples are passed through zstd's
1320    /// dictionary generator. Otherwise, the random samples are used directly as
1321    /// the dictionary.
1322    ///
1323    /// When compression dictionary is disabled, we compress and write each block
1324    /// before buffering data for the next one. When compression dictionary is
1325    /// enabled, we buffer all SST file data in-memory so we can sample it, as data
1326    /// can only be compressed and written after the dictionary has been finalized.
1327    /// So users of this feature may see increased memory usage.
1328    ///
1329    /// Default: `0`
1330    ///
1331    /// # Examples
1332    ///
1333    /// ```
1334    /// use sfzhou_rocksdb::Options;
1335    ///
1336    /// let mut opts = Options::default();
1337    /// opts.set_compression_options(4, 5, 6, 7);
1338    /// ```
1339    pub fn set_compression_options(
1340        &mut self,
1341        w_bits: c_int,
1342        level: c_int,
1343        strategy: c_int,
1344        max_dict_bytes: c_int,
1345    ) {
1346        unsafe {
1347            ffi::rocksdb_options_set_compression_options(
1348                self.inner,
1349                w_bits,
1350                level,
1351                strategy,
1352                max_dict_bytes,
1353            );
1354        }
1355    }
1356
1357    /// Sets compression options for blocks at the bottom-most level.  Meaning
1358    /// of all settings is the same as in [`set_compression_options`](#method.set_compression_options) method but
1359    /// affect only the bottom-most compression which is set using
1360    /// [`set_bottommost_compression_type`](#method.set_bottommost_compression_type) method.
1361    ///
1362    /// # Examples
1363    ///
1364    /// ```
1365    /// use sfzhou_rocksdb::{Options, DBCompressionType};
1366    ///
1367    /// let mut opts = Options::default();
1368    /// opts.set_bottommost_compression_type(DBCompressionType::Zstd);
1369    /// opts.set_bottommost_compression_options(4, 5, 6, 7, true);
1370    /// ```
1371    pub fn set_bottommost_compression_options(
1372        &mut self,
1373        w_bits: c_int,
1374        level: c_int,
1375        strategy: c_int,
1376        max_dict_bytes: c_int,
1377        enabled: bool,
1378    ) {
1379        unsafe {
1380            ffi::rocksdb_options_set_bottommost_compression_options(
1381                self.inner,
1382                w_bits,
1383                level,
1384                strategy,
1385                max_dict_bytes,
1386                c_uchar::from(enabled),
1387            );
1388        }
1389    }
1390
1391    /// Sets maximum size of training data passed to zstd's dictionary trainer. Using zstd's
1392    /// dictionary trainer can achieve even better compression ratio improvements than using
1393    /// `max_dict_bytes` alone.
1394    ///
1395    /// The training data will be used to generate a dictionary of max_dict_bytes.
1396    ///
1397    /// Default: 0.
1398    pub fn set_zstd_max_train_bytes(&mut self, value: c_int) {
1399        unsafe {
1400            ffi::rocksdb_options_set_compression_options_zstd_max_train_bytes(self.inner, value);
1401        }
1402    }
1403
1404    /// Sets maximum size of training data passed to zstd's dictionary trainer
1405    /// when compressing the bottom-most level. Using zstd's dictionary trainer
1406    /// can achieve even better compression ratio improvements than using
1407    /// `max_dict_bytes` alone.
1408    ///
1409    /// The training data will be used to generate a dictionary of
1410    /// `max_dict_bytes`.
1411    ///
1412    /// Default: 0.
1413    pub fn set_bottommost_zstd_max_train_bytes(&mut self, value: c_int, enabled: bool) {
1414        unsafe {
1415            ffi::rocksdb_options_set_bottommost_compression_options_zstd_max_train_bytes(
1416                self.inner,
1417                value,
1418                c_uchar::from(enabled),
1419            );
1420        }
1421    }
1422
1423    /// If non-zero, we perform bigger reads when doing compaction. If you're
1424    /// running RocksDB on spinning disks, you should set this to at least 2MB.
1425    /// That way RocksDB's compaction is doing sequential instead of random reads.
1426    ///
1427    /// Default: 2 * 1024 * 1024 (2 MB)
1428    pub fn set_compaction_readahead_size(&mut self, compaction_readahead_size: usize) {
1429        unsafe {
1430            ffi::rocksdb_options_compaction_readahead_size(self.inner, compaction_readahead_size);
1431        }
1432    }
1433
1434    /// Allow RocksDB to pick dynamic base of bytes for levels.
1435    /// With this feature turned on, RocksDB will automatically adjust max bytes for each level.
1436    /// The goal of this feature is to have lower bound on size amplification.
1437    ///
1438    /// Default: false.
1439    pub fn set_level_compaction_dynamic_level_bytes(&mut self, v: bool) {
1440        unsafe {
1441            ffi::rocksdb_options_set_level_compaction_dynamic_level_bytes(
1442                self.inner,
1443                c_uchar::from(v),
1444            );
1445        }
1446    }
1447
1448    /// This option has different meanings for different compaction styles:
1449    ///
1450    /// Leveled: files older than `periodic_compaction_seconds` will be picked up
1451    /// for compaction and will be re-written to the same level as they were
1452    /// before.
1453    ///
1454    /// FIFO: not supported. Setting this option has no effect for FIFO compaction.
1455    ///
1456    /// Universal: when there are files older than `periodic_compaction_seconds`,
1457    /// rocksdb will try to do as large a compaction as possible including the
1458    /// last level. Such compaction is only skipped if only last level is to
1459    /// be compacted and no file in last level is older than
1460    /// `periodic_compaction_seconds`. See more in
1461    /// UniversalCompactionBuilder::PickPeriodicCompaction().
1462    /// For backward compatibility, the effective value of this option takes
1463    /// into account the value of option `ttl`. The logic is as follows:
1464    ///    - both options are set to 30 days if they have the default value.
1465    ///    - if both options are zero, zero is picked. Otherwise, we take the min
1466    ///    value among non-zero options values (i.e. takes the stricter limit).
1467    ///
1468    /// One main use of the feature is to make sure a file goes through compaction
1469    /// filters periodically. Users can also use the feature to clear up SST
1470    /// files using old format.
1471    ///
1472    /// A file's age is computed by looking at file_creation_time or creation_time
1473    /// table properties in order, if they have valid non-zero values; if not, the
1474    /// age is based on the file's last modified time (given by the underlying
1475    /// Env).
1476    ///
1477    /// This option only supports block based table format for any compaction
1478    /// style.
1479    ///
1480    /// unit: seconds. Ex: 7 days = 7 * 24 * 60 * 60
1481    ///
1482    /// Values:
1483    /// 0: Turn off Periodic compactions.
1484    /// UINT64_MAX - 1 (0xfffffffffffffffe) is special flag to allow RocksDB to
1485    /// pick default.
1486    ///
1487    /// Default: 30 days if using block based table format + compaction filter +
1488    /// leveled compaction or block based table format + universal compaction.
1489    /// 0 (disabled) otherwise.
1490    ///
1491    pub fn set_periodic_compaction_seconds(&mut self, secs: u64) {
1492        unsafe {
1493            ffi::rocksdb_options_set_periodic_compaction_seconds(self.inner, secs);
1494        }
1495    }
1496
1497    pub fn set_merge_operator_associative<F: MergeFn + Clone>(
1498        &mut self,
1499        name: impl CStrLike,
1500        full_merge_fn: F,
1501    ) {
1502        let cb = Box::new(MergeOperatorCallback {
1503            name: name.into_c_string().unwrap(),
1504            full_merge_fn: full_merge_fn.clone(),
1505            partial_merge_fn: full_merge_fn,
1506        });
1507
1508        unsafe {
1509            let mo = ffi::rocksdb_mergeoperator_create(
1510                Box::into_raw(cb).cast::<c_void>(),
1511                Some(merge_operator::destructor_callback::<F, F>),
1512                Some(full_merge_callback::<F, F>),
1513                Some(partial_merge_callback::<F, F>),
1514                Some(merge_operator::delete_callback),
1515                Some(merge_operator::name_callback::<F, F>),
1516            );
1517            ffi::rocksdb_options_set_merge_operator(self.inner, mo);
1518        }
1519    }
1520
1521    pub fn set_merge_operator<F: MergeFn, PF: MergeFn>(
1522        &mut self,
1523        name: impl CStrLike,
1524        full_merge_fn: F,
1525        partial_merge_fn: PF,
1526    ) {
1527        let cb = Box::new(MergeOperatorCallback {
1528            name: name.into_c_string().unwrap(),
1529            full_merge_fn,
1530            partial_merge_fn,
1531        });
1532
1533        unsafe {
1534            let mo = ffi::rocksdb_mergeoperator_create(
1535                Box::into_raw(cb).cast::<c_void>(),
1536                Some(merge_operator::destructor_callback::<F, PF>),
1537                Some(full_merge_callback::<F, PF>),
1538                Some(partial_merge_callback::<F, PF>),
1539                Some(merge_operator::delete_callback),
1540                Some(merge_operator::name_callback::<F, PF>),
1541            );
1542            ffi::rocksdb_options_set_merge_operator(self.inner, mo);
1543        }
1544    }
1545
1546    #[deprecated(
1547        since = "0.5.0",
1548        note = "add_merge_operator has been renamed to set_merge_operator"
1549    )]
1550    pub fn add_merge_operator<F: MergeFn + Clone>(&mut self, name: &str, merge_fn: F) {
1551        self.set_merge_operator_associative(name, merge_fn);
1552    }
1553
1554    /// Sets a compaction filter used to determine if entries should be kept, changed,
1555    /// or removed during compaction.
1556    ///
1557    /// An example use case is to remove entries with an expired TTL.
1558    ///
1559    /// If you take a snapshot of the database, only values written since the last
1560    /// snapshot will be passed through the compaction filter.
1561    ///
1562    /// If multi-threaded compaction is used, `filter_fn` may be called multiple times
1563    /// simultaneously.
1564    pub fn set_compaction_filter<F>(&mut self, name: impl CStrLike, filter_fn: F)
1565    where
1566        F: CompactionFilterFn + Send + 'static,
1567    {
1568        let cb = Box::new(CompactionFilterCallback {
1569            name: name.into_c_string().unwrap(),
1570            filter_fn,
1571        });
1572
1573        unsafe {
1574            let cf = ffi::rocksdb_compactionfilter_create(
1575                Box::into_raw(cb).cast::<c_void>(),
1576                Some(compaction_filter::destructor_callback::<CompactionFilterCallback<F>>),
1577                Some(compaction_filter::filter_callback::<CompactionFilterCallback<F>>),
1578                Some(compaction_filter::name_callback::<CompactionFilterCallback<F>>),
1579            );
1580            ffi::rocksdb_options_set_compaction_filter(self.inner, cf);
1581        }
1582    }
1583
1584    /// This is a factory that provides compaction filter objects which allow
1585    /// an application to modify/delete a key-value during background compaction.
1586    ///
1587    /// A new filter will be created on each compaction run.  If multithreaded
1588    /// compaction is being used, each created CompactionFilter will only be used
1589    /// from a single thread and so does not need to be thread-safe.
1590    ///
1591    /// Default: nullptr
1592    pub fn set_compaction_filter_factory<F>(&mut self, factory: F)
1593    where
1594        F: CompactionFilterFactory + 'static,
1595    {
1596        let factory = Box::new(factory);
1597
1598        unsafe {
1599            let cff = ffi::rocksdb_compactionfilterfactory_create(
1600                Box::into_raw(factory).cast::<c_void>(),
1601                Some(compaction_filter_factory::destructor_callback::<F>),
1602                Some(compaction_filter_factory::create_compaction_filter_callback::<F>),
1603                Some(compaction_filter_factory::name_callback::<F>),
1604            );
1605
1606            ffi::rocksdb_options_set_compaction_filter_factory(self.inner, cff);
1607        }
1608    }
1609
1610    /// Sets the comparator used to define the order of keys in the table.
1611    /// Default: a comparator that uses lexicographic byte-wise ordering
1612    ///
1613    /// The client must ensure that the comparator supplied here has the same
1614    /// name and orders keys *exactly* the same as the comparator provided to
1615    /// previous open calls on the same DB.
1616    pub fn set_comparator(&mut self, name: impl CStrLike, compare_fn: Box<CompareFn>) {
1617        let cb = Box::new(ComparatorCallback {
1618            name: name.into_c_string().unwrap(),
1619            compare_fn,
1620        });
1621
1622        unsafe {
1623            let cmp = ffi::rocksdb_comparator_create(
1624                Box::into_raw(cb).cast::<c_void>(),
1625                Some(ComparatorCallback::destructor_callback),
1626                Some(ComparatorCallback::compare_callback),
1627                Some(ComparatorCallback::name_callback),
1628            );
1629            ffi::rocksdb_options_set_comparator(self.inner, cmp);
1630        }
1631    }
1632
1633    /// Sets the comparator that are timestamp-aware, used to define the order of keys in the table,
1634    /// taking timestamp into consideration.
1635    /// Find more information on timestamp-aware comparator on [here](https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp)
1636    ///
1637    /// The client must ensure that the comparator supplied here has the same
1638    /// name and orders keys *exactly* the same as the comparator provided to
1639    /// previous open calls on the same DB.
1640    pub fn set_comparator_with_ts(
1641        &mut self,
1642        name: impl CStrLike,
1643        timestamp_size: usize,
1644        compare_fn: Box<CompareFn>,
1645        compare_ts_fn: Box<CompareTsFn>,
1646        compare_without_ts_fn: Box<CompareWithoutTsFn>,
1647    ) {
1648        let cb = Box::new(ComparatorWithTsCallback {
1649            name: name.into_c_string().unwrap(),
1650            compare_fn,
1651            compare_ts_fn,
1652            compare_without_ts_fn,
1653        });
1654
1655        unsafe {
1656            let cmp = ffi::rocksdb_comparator_with_ts_create(
1657                Box::into_raw(cb).cast::<c_void>(),
1658                Some(ComparatorWithTsCallback::destructor_callback),
1659                Some(ComparatorWithTsCallback::compare_callback),
1660                Some(ComparatorWithTsCallback::compare_ts_callback),
1661                Some(ComparatorWithTsCallback::compare_without_ts_callback),
1662                Some(ComparatorWithTsCallback::name_callback),
1663                timestamp_size,
1664            );
1665            ffi::rocksdb_options_set_comparator(self.inner, cmp);
1666        }
1667    }
1668
1669    pub fn set_prefix_extractor(&mut self, prefix_extractor: SliceTransform) {
1670        unsafe {
1671            ffi::rocksdb_options_set_prefix_extractor(self.inner, prefix_extractor.inner);
1672        }
1673    }
1674
1675    // Use this if you don't need to keep the data sorted, i.e. you'll never use
1676    // an iterator, only Put() and Get() API calls
1677    //
1678    pub fn optimize_for_point_lookup(&mut self, block_cache_size_mb: u64) {
1679        unsafe {
1680            ffi::rocksdb_options_optimize_for_point_lookup(self.inner, block_cache_size_mb);
1681        }
1682    }
1683
1684    /// Sets the optimize_filters_for_hits flag
1685    ///
1686    /// Default: `false`
1687    ///
1688    /// # Examples
1689    ///
1690    /// ```
1691    /// use sfzhou_rocksdb::Options;
1692    ///
1693    /// let mut opts = Options::default();
1694    /// opts.set_optimize_filters_for_hits(true);
1695    /// ```
1696    pub fn set_optimize_filters_for_hits(&mut self, optimize_for_hits: bool) {
1697        unsafe {
1698            ffi::rocksdb_options_set_optimize_filters_for_hits(
1699                self.inner,
1700                c_int::from(optimize_for_hits),
1701            );
1702        }
1703    }
1704
1705    /// Sets the periodicity when obsolete files get deleted.
1706    ///
1707    /// The files that get out of scope by compaction
1708    /// process will still get automatically delete on every compaction,
1709    /// regardless of this setting.
1710    ///
1711    /// Default: 6 hours
1712    pub fn set_delete_obsolete_files_period_micros(&mut self, micros: u64) {
1713        unsafe {
1714            ffi::rocksdb_options_set_delete_obsolete_files_period_micros(self.inner, micros);
1715        }
1716    }
1717
1718    /// Prepare the DB for bulk loading.
1719    ///
1720    /// All data will be in level 0 without any automatic compaction.
1721    /// It's recommended to manually call CompactRange(NULL, NULL) before reading
1722    /// from the database, because otherwise the read can be very slow.
1723    pub fn prepare_for_bulk_load(&mut self) {
1724        unsafe {
1725            ffi::rocksdb_options_prepare_for_bulk_load(self.inner);
1726        }
1727    }
1728
1729    /// Sets the number of open files that can be used by the DB. You may need to
1730    /// increase this if your database has a large working set. Value `-1` means
1731    /// files opened are always kept open. You can estimate number of files based
1732    /// on target_file_size_base and target_file_size_multiplier for level-based
1733    /// compaction. For universal-style compaction, you can usually set it to `-1`.
1734    ///
1735    /// Default: `-1`
1736    ///
1737    /// # Examples
1738    ///
1739    /// ```
1740    /// use sfzhou_rocksdb::Options;
1741    ///
1742    /// let mut opts = Options::default();
1743    /// opts.set_max_open_files(10);
1744    /// ```
1745    pub fn set_max_open_files(&mut self, nfiles: c_int) {
1746        unsafe {
1747            ffi::rocksdb_options_set_max_open_files(self.inner, nfiles);
1748        }
1749    }
1750
1751    /// If max_open_files is -1, DB will open all files on DB::Open(). You can
1752    /// use this option to increase the number of threads used to open the files.
1753    /// Default: 16
1754    pub fn set_max_file_opening_threads(&mut self, nthreads: c_int) {
1755        unsafe {
1756            ffi::rocksdb_options_set_max_file_opening_threads(self.inner, nthreads);
1757        }
1758    }
1759
1760    /// By default, writes to stable storage use fdatasync (on platforms
1761    /// where this function is available). If this option is true,
1762    /// fsync is used instead.
1763    ///
1764    /// fsync and fdatasync are equally safe for our purposes and fdatasync is
1765    /// faster, so it is rarely necessary to set this option. It is provided
1766    /// as a workaround for kernel/filesystem bugs, such as one that affected
1767    /// fdatasync with ext4 in kernel versions prior to 3.7.
1768    ///
1769    /// Default: `false`
1770    ///
1771    /// # Examples
1772    ///
1773    /// ```
1774    /// use sfzhou_rocksdb::Options;
1775    ///
1776    /// let mut opts = Options::default();
1777    /// opts.set_use_fsync(true);
1778    /// ```
1779    pub fn set_use_fsync(&mut self, useit: bool) {
1780        unsafe {
1781            ffi::rocksdb_options_set_use_fsync(self.inner, c_int::from(useit));
1782        }
1783    }
1784
1785    /// Specifies the absolute info LOG dir.
1786    ///
1787    /// If it is empty, the log files will be in the same dir as data.
1788    /// If it is non empty, the log files will be in the specified dir,
1789    /// and the db data dir's absolute path will be used as the log file
1790    /// name's prefix.
1791    ///
1792    /// Default: empty
1793    pub fn set_db_log_dir<P: AsRef<Path>>(&mut self, path: P) {
1794        let p = to_cpath(path).unwrap();
1795        unsafe {
1796            ffi::rocksdb_options_set_db_log_dir(self.inner, p.as_ptr());
1797        }
1798    }
1799
1800    /// Specifies the log level.
1801    /// Consider the `LogLevel` enum for a list of possible levels.
1802    ///
1803    /// Default: Info
1804    ///
1805    /// # Examples
1806    ///
1807    /// ```
1808    /// use sfzhou_rocksdb::{Options, LogLevel};
1809    ///
1810    /// let mut opts = Options::default();
1811    /// opts.set_log_level(LogLevel::Warn);
1812    /// ```
1813    pub fn set_log_level(&mut self, level: LogLevel) {
1814        unsafe {
1815            ffi::rocksdb_options_set_info_log_level(self.inner, level as c_int);
1816        }
1817    }
1818
1819    /// Allows OS to incrementally sync files to disk while they are being
1820    /// written, asynchronously, in the background. This operation can be used
1821    /// to smooth out write I/Os over time. Users shouldn't rely on it for
1822    /// persistency guarantee.
1823    /// Issue one request for every bytes_per_sync written. `0` turns it off.
1824    ///
1825    /// Default: `0`
1826    ///
1827    /// You may consider using rate_limiter to regulate write rate to device.
1828    /// When rate limiter is enabled, it automatically enables bytes_per_sync
1829    /// to 1MB.
1830    ///
1831    /// This option applies to table files
1832    ///
1833    /// # Examples
1834    ///
1835    /// ```
1836    /// use sfzhou_rocksdb::Options;
1837    ///
1838    /// let mut opts = Options::default();
1839    /// opts.set_bytes_per_sync(1024 * 1024);
1840    /// ```
1841    pub fn set_bytes_per_sync(&mut self, nbytes: u64) {
1842        unsafe {
1843            ffi::rocksdb_options_set_bytes_per_sync(self.inner, nbytes);
1844        }
1845    }
1846
1847    /// Same as bytes_per_sync, but applies to WAL files.
1848    ///
1849    /// Default: 0, turned off
1850    ///
1851    /// Dynamically changeable through SetDBOptions() API.
1852    pub fn set_wal_bytes_per_sync(&mut self, nbytes: u64) {
1853        unsafe {
1854            ffi::rocksdb_options_set_wal_bytes_per_sync(self.inner, nbytes);
1855        }
1856    }
1857
1858    /// Sets the maximum buffer size that is used by WritableFileWriter.
1859    ///
1860    /// On Windows, we need to maintain an aligned buffer for writes.
1861    /// We allow the buffer to grow until it's size hits the limit in buffered
1862    /// IO and fix the buffer size when using direct IO to ensure alignment of
1863    /// write requests if the logical sector size is unusual
1864    ///
1865    /// Default: 1024 * 1024 (1 MB)
1866    ///
1867    /// Dynamically changeable through SetDBOptions() API.
1868    pub fn set_writable_file_max_buffer_size(&mut self, nbytes: u64) {
1869        unsafe {
1870            ffi::rocksdb_options_set_writable_file_max_buffer_size(self.inner, nbytes);
1871        }
1872    }
1873
1874    /// If true, allow multi-writers to update mem tables in parallel.
1875    /// Only some memtable_factory-s support concurrent writes; currently it
1876    /// is implemented only for SkipListFactory.  Concurrent memtable writes
1877    /// are not compatible with inplace_update_support or filter_deletes.
1878    /// It is strongly recommended to set enable_write_thread_adaptive_yield
1879    /// if you are going to use this feature.
1880    ///
1881    /// Default: true
1882    ///
1883    /// # Examples
1884    ///
1885    /// ```
1886    /// use sfzhou_rocksdb::Options;
1887    ///
1888    /// let mut opts = Options::default();
1889    /// opts.set_allow_concurrent_memtable_write(false);
1890    /// ```
1891    pub fn set_allow_concurrent_memtable_write(&mut self, allow: bool) {
1892        unsafe {
1893            ffi::rocksdb_options_set_allow_concurrent_memtable_write(
1894                self.inner,
1895                c_uchar::from(allow),
1896            );
1897        }
1898    }
1899
1900    /// If true, threads synchronizing with the write batch group leader will wait for up to
1901    /// write_thread_max_yield_usec before blocking on a mutex. This can substantially improve
1902    /// throughput for concurrent workloads, regardless of whether allow_concurrent_memtable_write
1903    /// is enabled.
1904    ///
1905    /// Default: true
1906    pub fn set_enable_write_thread_adaptive_yield(&mut self, enabled: bool) {
1907        unsafe {
1908            ffi::rocksdb_options_set_enable_write_thread_adaptive_yield(
1909                self.inner,
1910                c_uchar::from(enabled),
1911            );
1912        }
1913    }
1914
1915    /// Specifies whether an iteration->Next() sequentially skips over keys with the same user-key or not.
1916    ///
1917    /// This number specifies the number of keys (with the same userkey)
1918    /// that will be sequentially skipped before a reseek is issued.
1919    ///
1920    /// Default: 8
1921    pub fn set_max_sequential_skip_in_iterations(&mut self, num: u64) {
1922        unsafe {
1923            ffi::rocksdb_options_set_max_sequential_skip_in_iterations(self.inner, num);
1924        }
1925    }
1926
1927    /// Enable direct I/O mode for reading
1928    /// they may or may not improve performance depending on the use case
1929    ///
1930    /// Files will be opened in "direct I/O" mode
1931    /// which means that data read from the disk will not be cached or
1932    /// buffered. The hardware buffer of the devices may however still
1933    /// be used. Memory mapped files are not impacted by these parameters.
1934    ///
1935    /// Default: false
1936    ///
1937    /// # Examples
1938    ///
1939    /// ```
1940    /// use sfzhou_rocksdb::Options;
1941    ///
1942    /// let mut opts = Options::default();
1943    /// opts.set_use_direct_reads(true);
1944    /// ```
1945    pub fn set_use_direct_reads(&mut self, enabled: bool) {
1946        unsafe {
1947            ffi::rocksdb_options_set_use_direct_reads(self.inner, c_uchar::from(enabled));
1948        }
1949    }
1950
1951    /// Enable direct I/O mode for flush and compaction
1952    ///
1953    /// Files will be opened in "direct I/O" mode
1954    /// which means that data written to the disk will not be cached or
1955    /// buffered. The hardware buffer of the devices may however still
1956    /// be used. Memory mapped files are not impacted by these parameters.
1957    /// they may or may not improve performance depending on the use case
1958    ///
1959    /// Default: false
1960    ///
1961    /// # Examples
1962    ///
1963    /// ```
1964    /// use sfzhou_rocksdb::Options;
1965    ///
1966    /// let mut opts = Options::default();
1967    /// opts.set_use_direct_io_for_flush_and_compaction(true);
1968    /// ```
1969    pub fn set_use_direct_io_for_flush_and_compaction(&mut self, enabled: bool) {
1970        unsafe {
1971            ffi::rocksdb_options_set_use_direct_io_for_flush_and_compaction(
1972                self.inner,
1973                c_uchar::from(enabled),
1974            );
1975        }
1976    }
1977
1978    pub fn set_use_direct_io_for_wal(&mut self, enabled: bool) {
1979        unsafe {
1980            ffi::rocksdb_options_set_use_direct_io_for_wal(self.inner, c_uchar::from(enabled));
1981        }
1982    }
1983
1984    /// Enable/disable child process inherit open files.
1985    ///
1986    /// Default: true
1987    pub fn set_is_fd_close_on_exec(&mut self, enabled: bool) {
1988        unsafe {
1989            ffi::rocksdb_options_set_is_fd_close_on_exec(self.inner, c_uchar::from(enabled));
1990        }
1991    }
1992
1993    /// Hints to the OS that it should not buffer disk I/O. Enabling this
1994    /// parameter may improve performance but increases pressure on the
1995    /// system cache.
1996    ///
1997    /// The exact behavior of this parameter is platform dependent.
1998    ///
1999    /// On POSIX systems, after RocksDB reads data from disk it will
2000    /// mark the pages as "unneeded". The operating system may - or may not
2001    /// - evict these pages from memory, reducing pressure on the system
2002    /// cache. If the disk block is requested again this can result in
2003    /// additional disk I/O.
2004    ///
2005    /// On WINDOWS systems, files will be opened in "unbuffered I/O" mode
2006    /// which means that data read from the disk will not be cached or
2007    /// bufferized. The hardware buffer of the devices may however still
2008    /// be used. Memory mapped files are not impacted by this parameter.
2009    ///
2010    /// Default: true
2011    ///
2012    /// # Examples
2013    ///
2014    /// ```
2015    /// use sfzhou_rocksdb::Options;
2016    ///
2017    /// let mut opts = Options::default();
2018    /// #[allow(deprecated)]
2019    /// opts.set_allow_os_buffer(false);
2020    /// ```
2021    #[deprecated(
2022        since = "0.7.0",
2023        note = "replaced with set_use_direct_reads/set_use_direct_io_for_flush_and_compaction methods"
2024    )]
2025    pub fn set_allow_os_buffer(&mut self, is_allow: bool) {
2026        self.set_use_direct_reads(!is_allow);
2027        self.set_use_direct_io_for_flush_and_compaction(!is_allow);
2028    }
2029
2030    /// Sets the number of shards used for table cache.
2031    ///
2032    /// Default: `6`
2033    ///
2034    /// # Examples
2035    ///
2036    /// ```
2037    /// use sfzhou_rocksdb::Options;
2038    ///
2039    /// let mut opts = Options::default();
2040    /// opts.set_table_cache_num_shard_bits(4);
2041    /// ```
2042    pub fn set_table_cache_num_shard_bits(&mut self, nbits: c_int) {
2043        unsafe {
2044            ffi::rocksdb_options_set_table_cache_numshardbits(self.inner, nbits);
2045        }
2046    }
2047
2048    /// By default target_file_size_multiplier is 1, which means
2049    /// by default files in different levels will have similar size.
2050    ///
2051    /// Dynamically changeable through SetOptions() API
2052    pub fn set_target_file_size_multiplier(&mut self, multiplier: i32) {
2053        unsafe {
2054            ffi::rocksdb_options_set_target_file_size_multiplier(self.inner, multiplier as c_int);
2055        }
2056    }
2057
2058    /// Sets the minimum number of write buffers that will be merged
2059    /// before writing to storage.  If set to `1`, then
2060    /// all write buffers are flushed to L0 as individual files and this increases
2061    /// read amplification because a get request has to check in all of these
2062    /// files. Also, an in-memory merge may result in writing lesser
2063    /// data to storage if there are duplicate records in each of these
2064    /// individual write buffers.
2065    ///
2066    /// Default: `1`
2067    ///
2068    /// # Examples
2069    ///
2070    /// ```
2071    /// use sfzhou_rocksdb::Options;
2072    ///
2073    /// let mut opts = Options::default();
2074    /// opts.set_min_write_buffer_number(2);
2075    /// ```
2076    pub fn set_min_write_buffer_number(&mut self, nbuf: c_int) {
2077        unsafe {
2078            ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, nbuf);
2079        }
2080    }
2081
2082    /// Sets the maximum number of write buffers that are built up in memory.
2083    /// The default and the minimum number is 2, so that when 1 write buffer
2084    /// is being flushed to storage, new writes can continue to the other
2085    /// write buffer.
2086    /// If max_write_buffer_number > 3, writing will be slowed down to
2087    /// options.delayed_write_rate if we are writing to the last write buffer
2088    /// allowed.
2089    ///
2090    /// Default: `2`
2091    ///
2092    /// # Examples
2093    ///
2094    /// ```
2095    /// use sfzhou_rocksdb::Options;
2096    ///
2097    /// let mut opts = Options::default();
2098    /// opts.set_max_write_buffer_number(4);
2099    /// ```
2100    pub fn set_max_write_buffer_number(&mut self, nbuf: c_int) {
2101        unsafe {
2102            ffi::rocksdb_options_set_max_write_buffer_number(self.inner, nbuf);
2103        }
2104    }
2105
2106    /// Sets the amount of data to build up in memory (backed by an unsorted log
2107    /// on disk) before converting to a sorted on-disk file.
2108    ///
2109    /// Larger values increase performance, especially during bulk loads.
2110    /// Up to max_write_buffer_number write buffers may be held in memory
2111    /// at the same time,
2112    /// so you may wish to adjust this parameter to control memory usage.
2113    /// Also, a larger write buffer will result in a longer recovery time
2114    /// the next time the database is opened.
2115    ///
2116    /// Note that write_buffer_size is enforced per column family.
2117    /// See db_write_buffer_size for sharing memory across column families.
2118    ///
2119    /// Default: `0x4000000` (64MiB)
2120    ///
2121    /// Dynamically changeable through SetOptions() API
2122    ///
2123    /// # Examples
2124    ///
2125    /// ```
2126    /// use sfzhou_rocksdb::Options;
2127    ///
2128    /// let mut opts = Options::default();
2129    /// opts.set_write_buffer_size(128 * 1024 * 1024);
2130    /// ```
2131    pub fn set_write_buffer_size(&mut self, size: usize) {
2132        unsafe {
2133            ffi::rocksdb_options_set_write_buffer_size(self.inner, size);
2134        }
2135    }
2136
2137    /// Amount of data to build up in memtables across all column
2138    /// families before writing to disk.
2139    ///
2140    /// This is distinct from write_buffer_size, which enforces a limit
2141    /// for a single memtable.
2142    ///
2143    /// This feature is disabled by default. Specify a non-zero value
2144    /// to enable it.
2145    ///
2146    /// Default: 0 (disabled)
2147    ///
2148    /// # Examples
2149    ///
2150    /// ```
2151    /// use sfzhou_rocksdb::Options;
2152    ///
2153    /// let mut opts = Options::default();
2154    /// opts.set_db_write_buffer_size(128 * 1024 * 1024);
2155    /// ```
2156    pub fn set_db_write_buffer_size(&mut self, size: usize) {
2157        unsafe {
2158            ffi::rocksdb_options_set_db_write_buffer_size(self.inner, size);
2159        }
2160    }
2161
2162    /// Control maximum total data size for a level.
2163    /// max_bytes_for_level_base is the max total for level-1.
2164    /// Maximum number of bytes for level L can be calculated as
2165    /// (max_bytes_for_level_base) * (max_bytes_for_level_multiplier ^ (L-1))
2166    /// For example, if max_bytes_for_level_base is 200MB, and if
2167    /// max_bytes_for_level_multiplier is 10, total data size for level-1
2168    /// will be 200MB, total file size for level-2 will be 2GB,
2169    /// and total file size for level-3 will be 20GB.
2170    ///
2171    /// Default: `0x10000000` (256MiB).
2172    ///
2173    /// Dynamically changeable through SetOptions() API
2174    ///
2175    /// # Examples
2176    ///
2177    /// ```
2178    /// use sfzhou_rocksdb::Options;
2179    ///
2180    /// let mut opts = Options::default();
2181    /// opts.set_max_bytes_for_level_base(512 * 1024 * 1024);
2182    /// ```
2183    pub fn set_max_bytes_for_level_base(&mut self, size: u64) {
2184        unsafe {
2185            ffi::rocksdb_options_set_max_bytes_for_level_base(self.inner, size);
2186        }
2187    }
2188
2189    /// Default: `10`
2190    ///
2191    /// # Examples
2192    ///
2193    /// ```
2194    /// use sfzhou_rocksdb::Options;
2195    ///
2196    /// let mut opts = Options::default();
2197    /// opts.set_max_bytes_for_level_multiplier(4.0);
2198    /// ```
2199    pub fn set_max_bytes_for_level_multiplier(&mut self, mul: f64) {
2200        unsafe {
2201            ffi::rocksdb_options_set_max_bytes_for_level_multiplier(self.inner, mul);
2202        }
2203    }
2204
2205    /// The manifest file is rolled over on reaching this limit.
2206    /// The older manifest file be deleted.
2207    /// The default value is MAX_INT so that roll-over does not take place.
2208    ///
2209    /// # Examples
2210    ///
2211    /// ```
2212    /// use sfzhou_rocksdb::Options;
2213    ///
2214    /// let mut opts = Options::default();
2215    /// opts.set_max_manifest_file_size(20 * 1024 * 1024);
2216    /// ```
2217    pub fn set_max_manifest_file_size(&mut self, size: usize) {
2218        unsafe {
2219            ffi::rocksdb_options_set_max_manifest_file_size(self.inner, size);
2220        }
2221    }
2222
2223    /// Sets the target file size for compaction.
2224    /// target_file_size_base is per-file size for level-1.
2225    /// Target file size for level L can be calculated by
2226    /// target_file_size_base * (target_file_size_multiplier ^ (L-1))
2227    /// For example, if target_file_size_base is 2MB and
2228    /// target_file_size_multiplier is 10, then each file on level-1 will
2229    /// be 2MB, and each file on level 2 will be 20MB,
2230    /// and each file on level-3 will be 200MB.
2231    ///
2232    /// Default: `0x4000000` (64MiB)
2233    ///
2234    /// Dynamically changeable through SetOptions() API
2235    ///
2236    /// # Examples
2237    ///
2238    /// ```
2239    /// use sfzhou_rocksdb::Options;
2240    ///
2241    /// let mut opts = Options::default();
2242    /// opts.set_target_file_size_base(128 * 1024 * 1024);
2243    /// ```
2244    pub fn set_target_file_size_base(&mut self, size: u64) {
2245        unsafe {
2246            ffi::rocksdb_options_set_target_file_size_base(self.inner, size);
2247        }
2248    }
2249
2250    /// Sets the minimum number of write buffers that will be merged together
2251    /// before writing to storage.  If set to `1`, then
2252    /// all write buffers are flushed to L0 as individual files and this increases
2253    /// read amplification because a get request has to check in all of these
2254    /// files. Also, an in-memory merge may result in writing lesser
2255    /// data to storage if there are duplicate records in each of these
2256    /// individual write buffers.
2257    ///
2258    /// Default: `1`
2259    ///
2260    /// # Examples
2261    ///
2262    /// ```
2263    /// use sfzhou_rocksdb::Options;
2264    ///
2265    /// let mut opts = Options::default();
2266    /// opts.set_min_write_buffer_number_to_merge(2);
2267    /// ```
2268    pub fn set_min_write_buffer_number_to_merge(&mut self, to_merge: c_int) {
2269        unsafe {
2270            ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, to_merge);
2271        }
2272    }
2273
2274    /// Sets the number of files to trigger level-0 compaction. A value < `0` means that
2275    /// level-0 compaction will not be triggered by number of files at all.
2276    ///
2277    /// Default: `4`
2278    ///
2279    /// Dynamically changeable through SetOptions() API
2280    ///
2281    /// # Examples
2282    ///
2283    /// ```
2284    /// use sfzhou_rocksdb::Options;
2285    ///
2286    /// let mut opts = Options::default();
2287    /// opts.set_level_zero_file_num_compaction_trigger(8);
2288    /// ```
2289    pub fn set_level_zero_file_num_compaction_trigger(&mut self, n: c_int) {
2290        unsafe {
2291            ffi::rocksdb_options_set_level0_file_num_compaction_trigger(self.inner, n);
2292        }
2293    }
2294
2295    /// Sets the compaction priority. When multiple files are picked for compaction from a level,
2296    /// this option determines which files to pick first.
2297    ///
2298    /// Default: `CompactionPri::ByCompensatedSize`
2299    ///
2300    /// Dynamically changeable through SetOptions() API
2301    ///
2302    /// See [rocksdb post](https://github.com/facebook/rocksdb/blob/f20d12adc85ece3e75fb238872959c702c0e5535/docs/_posts/2016-01-29-compaction_pri.markdown) for more details.
2303    ///
2304    /// # Examples
2305    ///
2306    /// ```
2307    /// use sfzhou_rocksdb::{Options, CompactionPri};
2308    ///
2309    /// let mut opts = Options::default();
2310    /// opts.set_compaction_pri(CompactionPri::MinOverlappingRatio);
2311    /// ```
2312    pub fn set_compaction_pri(&mut self, pri: CompactionPri) {
2313        unsafe {
2314            ffi::rocksdb_options_set_compaction_pri(self.inner, pri as i32);
2315        }
2316    }
2317
2318    /// Sets the soft limit on number of level-0 files. We start slowing down writes at this
2319    /// point. A value < `0` means that no writing slowdown will be triggered by
2320    /// number of files in level-0.
2321    ///
2322    /// Default: `20`
2323    ///
2324    /// Dynamically changeable through SetOptions() API
2325    ///
2326    /// # Examples
2327    ///
2328    /// ```
2329    /// use sfzhou_rocksdb::Options;
2330    ///
2331    /// let mut opts = Options::default();
2332    /// opts.set_level_zero_slowdown_writes_trigger(10);
2333    /// ```
2334    pub fn set_level_zero_slowdown_writes_trigger(&mut self, n: c_int) {
2335        unsafe {
2336            ffi::rocksdb_options_set_level0_slowdown_writes_trigger(self.inner, n);
2337        }
2338    }
2339
2340    /// Sets the maximum number of level-0 files.  We stop writes at this point.
2341    ///
2342    /// Default: `24`
2343    ///
2344    /// Dynamically changeable through SetOptions() API
2345    ///
2346    /// # Examples
2347    ///
2348    /// ```
2349    /// use sfzhou_rocksdb::Options;
2350    ///
2351    /// let mut opts = Options::default();
2352    /// opts.set_level_zero_stop_writes_trigger(48);
2353    /// ```
2354    pub fn set_level_zero_stop_writes_trigger(&mut self, n: c_int) {
2355        unsafe {
2356            ffi::rocksdb_options_set_level0_stop_writes_trigger(self.inner, n);
2357        }
2358    }
2359
2360    /// Sets the compaction style.
2361    ///
2362    /// Default: DBCompactionStyle::Level
2363    ///
2364    /// # Examples
2365    ///
2366    /// ```
2367    /// use sfzhou_rocksdb::{Options, DBCompactionStyle};
2368    ///
2369    /// let mut opts = Options::default();
2370    /// opts.set_compaction_style(DBCompactionStyle::Universal);
2371    /// ```
2372    pub fn set_compaction_style(&mut self, style: DBCompactionStyle) {
2373        unsafe {
2374            ffi::rocksdb_options_set_compaction_style(self.inner, style as c_int);
2375        }
2376    }
2377
2378    /// Sets the options needed to support Universal Style compactions.
2379    pub fn set_universal_compaction_options(&mut self, uco: &UniversalCompactOptions) {
2380        unsafe {
2381            ffi::rocksdb_options_set_universal_compaction_options(self.inner, uco.inner);
2382        }
2383    }
2384
2385    /// Sets the options for FIFO compaction style.
2386    pub fn set_fifo_compaction_options(&mut self, fco: &FifoCompactOptions) {
2387        unsafe {
2388            ffi::rocksdb_options_set_fifo_compaction_options(self.inner, fco.inner);
2389        }
2390    }
2391
2392    /// Sets unordered_write to true trades higher write throughput with
2393    /// relaxing the immutability guarantee of snapshots. This violates the
2394    /// repeatability one expects from ::Get from a snapshot, as well as
2395    /// ::MultiGet and Iterator's consistent-point-in-time view property.
2396    /// If the application cannot tolerate the relaxed guarantees, it can implement
2397    /// its own mechanisms to work around that and yet benefit from the higher
2398    /// throughput. Using TransactionDB with WRITE_PREPARED write policy and
2399    /// two_write_queues=true is one way to achieve immutable snapshots despite
2400    /// unordered_write.
2401    ///
2402    /// By default, i.e., when it is false, rocksdb does not advance the sequence
2403    /// number for new snapshots unless all the writes with lower sequence numbers
2404    /// are already finished. This provides the immutability that we expect from
2405    /// snapshots. Moreover, since Iterator and MultiGet internally depend on
2406    /// snapshots, the snapshot immutability results into Iterator and MultiGet
2407    /// offering consistent-point-in-time view. If set to true, although
2408    /// Read-Your-Own-Write property is still provided, the snapshot immutability
2409    /// property is relaxed: the writes issued after the snapshot is obtained (with
2410    /// larger sequence numbers) will be still not visible to the reads from that
2411    /// snapshot, however, there still might be pending writes (with lower sequence
2412    /// number) that will change the state visible to the snapshot after they are
2413    /// landed to the memtable.
2414    ///
2415    /// Default: false
2416    pub fn set_unordered_write(&mut self, unordered: bool) {
2417        unsafe {
2418            ffi::rocksdb_options_set_unordered_write(self.inner, c_uchar::from(unordered));
2419        }
2420    }
2421
2422    /// Sets maximum number of threads that will
2423    /// concurrently perform a compaction job by breaking it into multiple,
2424    /// smaller ones that are run simultaneously.
2425    ///
2426    /// Default: 1 (i.e. no subcompactions)
2427    pub fn set_max_subcompactions(&mut self, num: u32) {
2428        unsafe {
2429            ffi::rocksdb_options_set_max_subcompactions(self.inner, num);
2430        }
2431    }
2432
2433    /// Sets maximum number of concurrent background jobs
2434    /// (compactions and flushes).
2435    ///
2436    /// Default: 2
2437    ///
2438    /// Dynamically changeable through SetDBOptions() API.
2439    pub fn set_max_background_jobs(&mut self, jobs: c_int) {
2440        unsafe {
2441            ffi::rocksdb_options_set_max_background_jobs(self.inner, jobs);
2442        }
2443    }
2444
2445    /// Sets the maximum number of concurrent background compaction jobs, submitted to
2446    /// the default LOW priority thread pool.
2447    /// We first try to schedule compactions based on
2448    /// `base_background_compactions`. If the compaction cannot catch up , we
2449    /// will increase number of compaction threads up to
2450    /// `max_background_compactions`.
2451    ///
2452    /// If you're increasing this, also consider increasing number of threads in
2453    /// LOW priority thread pool. For more information, see
2454    /// Env::SetBackgroundThreads
2455    ///
2456    /// Default: `1`
2457    ///
2458    /// # Examples
2459    ///
2460    /// ```
2461    /// use sfzhou_rocksdb::Options;
2462    ///
2463    /// let mut opts = Options::default();
2464    /// #[allow(deprecated)]
2465    /// opts.set_max_background_compactions(2);
2466    /// ```
2467    #[deprecated(
2468        since = "0.15.0",
2469        note = "RocksDB automatically decides this based on the value of max_background_jobs"
2470    )]
2471    pub fn set_max_background_compactions(&mut self, n: c_int) {
2472        unsafe {
2473            ffi::rocksdb_options_set_max_background_compactions(self.inner, n);
2474        }
2475    }
2476
2477    /// Sets the maximum number of concurrent background memtable flush jobs, submitted to
2478    /// the HIGH priority thread pool.
2479    ///
2480    /// By default, all background jobs (major compaction and memtable flush) go
2481    /// to the LOW priority pool. If this option is set to a positive number,
2482    /// memtable flush jobs will be submitted to the HIGH priority pool.
2483    /// It is important when the same Env is shared by multiple db instances.
2484    /// Without a separate pool, long running major compaction jobs could
2485    /// potentially block memtable flush jobs of other db instances, leading to
2486    /// unnecessary Put stalls.
2487    ///
2488    /// If you're increasing this, also consider increasing number of threads in
2489    /// HIGH priority thread pool. For more information, see
2490    /// Env::SetBackgroundThreads
2491    ///
2492    /// Default: `1`
2493    ///
2494    /// # Examples
2495    ///
2496    /// ```
2497    /// use sfzhou_rocksdb::Options;
2498    ///
2499    /// let mut opts = Options::default();
2500    /// #[allow(deprecated)]
2501    /// opts.set_max_background_flushes(2);
2502    /// ```
2503    #[deprecated(
2504        since = "0.15.0",
2505        note = "RocksDB automatically decides this based on the value of max_background_jobs"
2506    )]
2507    pub fn set_max_background_flushes(&mut self, n: c_int) {
2508        unsafe {
2509            ffi::rocksdb_options_set_max_background_flushes(self.inner, n);
2510        }
2511    }
2512
2513    /// Disables automatic compactions. Manual compactions can still
2514    /// be issued on this column family
2515    ///
2516    /// Default: `false`
2517    ///
2518    /// Dynamically changeable through SetOptions() API
2519    ///
2520    /// # Examples
2521    ///
2522    /// ```
2523    /// use sfzhou_rocksdb::Options;
2524    ///
2525    /// let mut opts = Options::default();
2526    /// opts.set_disable_auto_compactions(true);
2527    /// ```
2528    pub fn set_disable_auto_compactions(&mut self, disable: bool) {
2529        unsafe {
2530            ffi::rocksdb_options_set_disable_auto_compactions(self.inner, c_int::from(disable));
2531        }
2532    }
2533
2534    /// SetMemtableHugePageSize sets the page size for huge page for
2535    /// arena used by the memtable.
2536    /// If <=0, it won't allocate from huge page but from malloc.
2537    /// Users are responsible to reserve huge pages for it to be allocated. For
2538    /// example:
2539    ///      sysctl -w vm.nr_hugepages=20
2540    /// See linux doc Documentation/vm/hugetlbpage.txt
2541    /// If there isn't enough free huge page available, it will fall back to
2542    /// malloc.
2543    ///
2544    /// Dynamically changeable through SetOptions() API
2545    pub fn set_memtable_huge_page_size(&mut self, size: size_t) {
2546        unsafe {
2547            ffi::rocksdb_options_set_memtable_huge_page_size(self.inner, size);
2548        }
2549    }
2550
2551    /// Sets the maximum number of successive merge operations on a key in the memtable.
2552    ///
2553    /// When a merge operation is added to the memtable and the maximum number of
2554    /// successive merges is reached, the value of the key will be calculated and
2555    /// inserted into the memtable instead of the merge operation. This will
2556    /// ensure that there are never more than max_successive_merges merge
2557    /// operations in the memtable.
2558    ///
2559    /// Default: 0 (disabled)
2560    pub fn set_max_successive_merges(&mut self, num: usize) {
2561        unsafe {
2562            ffi::rocksdb_options_set_max_successive_merges(self.inner, num);
2563        }
2564    }
2565
2566    /// Control locality of bloom filter probes to improve cache miss rate.
2567    /// This option only applies to memtable prefix bloom and plaintable
2568    /// prefix bloom. It essentially limits the max number of cache lines each
2569    /// bloom filter check can touch.
2570    ///
2571    /// This optimization is turned off when set to 0. The number should never
2572    /// be greater than number of probes. This option can boost performance
2573    /// for in-memory workload but should use with care since it can cause
2574    /// higher false positive rate.
2575    ///
2576    /// Default: 0
2577    pub fn set_bloom_locality(&mut self, v: u32) {
2578        unsafe {
2579            ffi::rocksdb_options_set_bloom_locality(self.inner, v);
2580        }
2581    }
2582
2583    /// Enable/disable thread-safe inplace updates.
2584    ///
2585    /// Requires updates if
2586    /// * key exists in current memtable
2587    /// * new sizeof(new_value) <= sizeof(old_value)
2588    /// * old_value for that key is a put i.e. kTypeValue
2589    ///
2590    /// Default: false.
2591    pub fn set_inplace_update_support(&mut self, enabled: bool) {
2592        unsafe {
2593            ffi::rocksdb_options_set_inplace_update_support(self.inner, c_uchar::from(enabled));
2594        }
2595    }
2596
2597    /// Sets the number of locks used for inplace update.
2598    ///
2599    /// Default: 10000 when inplace_update_support = true, otherwise 0.
2600    pub fn set_inplace_update_locks(&mut self, num: usize) {
2601        unsafe {
2602            ffi::rocksdb_options_set_inplace_update_num_locks(self.inner, num);
2603        }
2604    }
2605
2606    /// Different max-size multipliers for different levels.
2607    /// These are multiplied by max_bytes_for_level_multiplier to arrive
2608    /// at the max-size of each level.
2609    ///
2610    /// Default: 1
2611    ///
2612    /// Dynamically changeable through SetOptions() API
2613    pub fn set_max_bytes_for_level_multiplier_additional(&mut self, level_values: &[i32]) {
2614        let count = level_values.len();
2615        unsafe {
2616            ffi::rocksdb_options_set_max_bytes_for_level_multiplier_additional(
2617                self.inner,
2618                level_values.as_ptr().cast_mut(),
2619                count,
2620            );
2621        }
2622    }
2623
2624    /// If true, then DB::Open() will not fetch and check sizes of all sst files.
2625    /// This may significantly speed up startup if there are many sst files,
2626    /// especially when using non-default Env with expensive GetFileSize().
2627    /// We'll still check that all required sst files exist.
2628    /// If paranoid_checks is false, this option is ignored, and sst files are
2629    /// not checked at all.
2630    ///
2631    /// Default: false
2632    pub fn set_skip_checking_sst_file_sizes_on_db_open(&mut self, value: bool) {
2633        unsafe {
2634            ffi::rocksdb_options_set_skip_checking_sst_file_sizes_on_db_open(
2635                self.inner,
2636                c_uchar::from(value),
2637            );
2638        }
2639    }
2640
2641    /// The total maximum size(bytes) of write buffers to maintain in memory
2642    /// including copies of buffers that have already been flushed. This parameter
2643    /// only affects trimming of flushed buffers and does not affect flushing.
2644    /// This controls the maximum amount of write history that will be available
2645    /// in memory for conflict checking when Transactions are used. The actual
2646    /// size of write history (flushed Memtables) might be higher than this limit
2647    /// if further trimming will reduce write history total size below this
2648    /// limit. For example, if max_write_buffer_size_to_maintain is set to 64MB,
2649    /// and there are three flushed Memtables, with sizes of 32MB, 20MB, 20MB.
2650    /// Because trimming the next Memtable of size 20MB will reduce total memory
2651    /// usage to 52MB which is below the limit, RocksDB will stop trimming.
2652    ///
2653    /// When using an OptimisticTransactionDB:
2654    /// If this value is too low, some transactions may fail at commit time due
2655    /// to not being able to determine whether there were any write conflicts.
2656    ///
2657    /// When using a TransactionDB:
2658    /// If Transaction::SetSnapshot is used, TransactionDB will read either
2659    /// in-memory write buffers or SST files to do write-conflict checking.
2660    /// Increasing this value can reduce the number of reads to SST files
2661    /// done for conflict detection.
2662    ///
2663    /// Setting this value to 0 will cause write buffers to be freed immediately
2664    /// after they are flushed. If this value is set to -1,
2665    /// 'max_write_buffer_number * write_buffer_size' will be used.
2666    ///
2667    /// Default:
2668    /// If using a TransactionDB/OptimisticTransactionDB, the default value will
2669    /// be set to the value of 'max_write_buffer_number * write_buffer_size'
2670    /// if it is not explicitly set by the user.  Otherwise, the default is 0.
2671    pub fn set_max_write_buffer_size_to_maintain(&mut self, size: i64) {
2672        unsafe {
2673            ffi::rocksdb_options_set_max_write_buffer_size_to_maintain(self.inner, size);
2674        }
2675    }
2676
2677    /// By default, a single write thread queue is maintained. The thread gets
2678    /// to the head of the queue becomes write batch group leader and responsible
2679    /// for writing to WAL and memtable for the batch group.
2680    ///
2681    /// If enable_pipelined_write is true, separate write thread queue is
2682    /// maintained for WAL write and memtable write. A write thread first enter WAL
2683    /// writer queue and then memtable writer queue. Pending thread on the WAL
2684    /// writer queue thus only have to wait for previous writers to finish their
2685    /// WAL writing but not the memtable writing. Enabling the feature may improve
2686    /// write throughput and reduce latency of the prepare phase of two-phase
2687    /// commit.
2688    ///
2689    /// Default: false
2690    pub fn set_enable_pipelined_write(&mut self, value: bool) {
2691        unsafe {
2692            ffi::rocksdb_options_set_enable_pipelined_write(self.inner, c_uchar::from(value));
2693        }
2694    }
2695
2696    /// Defines the underlying memtable implementation.
2697    /// See official [wiki](https://github.com/facebook/rocksdb/wiki/MemTable) for more information.
2698    /// Defaults to using a skiplist.
2699    ///
2700    /// # Examples
2701    ///
2702    /// ```
2703    /// use sfzhou_rocksdb::{Options, MemtableFactory};
2704    /// let mut opts = Options::default();
2705    /// let factory = MemtableFactory::HashSkipList {
2706    ///     bucket_count: 1_000_000,
2707    ///     height: 4,
2708    ///     branching_factor: 4,
2709    /// };
2710    ///
2711    /// opts.set_allow_concurrent_memtable_write(false);
2712    /// opts.set_memtable_factory(factory);
2713    /// ```
2714    pub fn set_memtable_factory(&mut self, factory: MemtableFactory) {
2715        match factory {
2716            MemtableFactory::Vector => unsafe {
2717                ffi::rocksdb_options_set_memtable_vector_rep(self.inner);
2718            },
2719            MemtableFactory::HashSkipList {
2720                bucket_count,
2721                height,
2722                branching_factor,
2723            } => unsafe {
2724                ffi::rocksdb_options_set_hash_skip_list_rep(
2725                    self.inner,
2726                    bucket_count,
2727                    height,
2728                    branching_factor,
2729                );
2730            },
2731            MemtableFactory::HashLinkList { bucket_count } => unsafe {
2732                ffi::rocksdb_options_set_hash_link_list_rep(self.inner, bucket_count);
2733            },
2734        };
2735    }
2736
2737    pub fn set_block_based_table_factory(&mut self, factory: &BlockBasedOptions) {
2738        unsafe {
2739            ffi::rocksdb_options_set_block_based_table_factory(self.inner, factory.inner);
2740        }
2741        self.outlive.block_based = Some(factory.outlive.clone());
2742    }
2743
2744    /// Sets the table factory to a CuckooTableFactory (the default table
2745    /// factory is a block-based table factory that provides a default
2746    /// implementation of TableBuilder and TableReader with default
2747    /// BlockBasedTableOptions).
2748    /// See official [wiki](https://github.com/facebook/rocksdb/wiki/CuckooTable-Format) for more information on this table format.
2749    /// # Examples
2750    ///
2751    /// ```
2752    /// use sfzhou_rocksdb::{Options, CuckooTableOptions};
2753    ///
2754    /// let mut opts = Options::default();
2755    /// let mut factory_opts = CuckooTableOptions::default();
2756    /// factory_opts.set_hash_ratio(0.8);
2757    /// factory_opts.set_max_search_depth(20);
2758    /// factory_opts.set_cuckoo_block_size(10);
2759    /// factory_opts.set_identity_as_first_hash(true);
2760    /// factory_opts.set_use_module_hash(false);
2761    ///
2762    /// opts.set_cuckoo_table_factory(&factory_opts);
2763    /// ```
2764    pub fn set_cuckoo_table_factory(&mut self, factory: &CuckooTableOptions) {
2765        unsafe {
2766            ffi::rocksdb_options_set_cuckoo_table_factory(self.inner, factory.inner);
2767        }
2768    }
2769
2770    // This is a factory that provides TableFactory objects.
2771    // Default: a block-based table factory that provides a default
2772    // implementation of TableBuilder and TableReader with default
2773    // BlockBasedTableOptions.
2774    /// Sets the factory as plain table.
2775    /// See official [wiki](https://github.com/facebook/rocksdb/wiki/PlainTable-Format) for more
2776    /// information.
2777    ///
2778    /// # Examples
2779    ///
2780    /// ```
2781    /// use sfzhou_rocksdb::{KeyEncodingType, Options, PlainTableFactoryOptions};
2782    ///
2783    /// let mut opts = Options::default();
2784    /// let factory_opts = PlainTableFactoryOptions {
2785    ///   user_key_length: 0,
2786    ///   bloom_bits_per_key: 20,
2787    ///   hash_table_ratio: 0.75,
2788    ///   index_sparseness: 16,
2789    ///   huge_page_tlb_size: 0,
2790    ///   encoding_type: KeyEncodingType::Plain,
2791    ///   full_scan_mode: false,
2792    ///   store_index_in_file: false,
2793    /// };
2794    ///
2795    /// opts.set_plain_table_factory(&factory_opts);
2796    /// ```
2797    pub fn set_plain_table_factory(&mut self, options: &PlainTableFactoryOptions) {
2798        unsafe {
2799            ffi::rocksdb_options_set_plain_table_factory(
2800                self.inner,
2801                options.user_key_length,
2802                options.bloom_bits_per_key,
2803                options.hash_table_ratio,
2804                options.index_sparseness,
2805                options.huge_page_tlb_size,
2806                options.encoding_type as c_char,
2807                c_uchar::from(options.full_scan_mode),
2808                c_uchar::from(options.store_index_in_file),
2809            );
2810        }
2811    }
2812
2813    /// Sets the start level to use compression.
2814    pub fn set_min_level_to_compress(&mut self, lvl: c_int) {
2815        unsafe {
2816            ffi::rocksdb_options_set_min_level_to_compress(self.inner, lvl);
2817        }
2818    }
2819
2820    /// Measure IO stats in compactions and flushes, if `true`.
2821    ///
2822    /// Default: `false`
2823    ///
2824    /// # Examples
2825    ///
2826    /// ```
2827    /// use sfzhou_rocksdb::Options;
2828    ///
2829    /// let mut opts = Options::default();
2830    /// opts.set_report_bg_io_stats(true);
2831    /// ```
2832    pub fn set_report_bg_io_stats(&mut self, enable: bool) {
2833        unsafe {
2834            ffi::rocksdb_options_set_report_bg_io_stats(self.inner, c_int::from(enable));
2835        }
2836    }
2837
2838    /// Once write-ahead logs exceed this size, we will start forcing the flush of
2839    /// column families whose memtables are backed by the oldest live WAL file
2840    /// (i.e. the ones that are causing all the space amplification).
2841    ///
2842    /// Default: `0`
2843    ///
2844    /// # Examples
2845    ///
2846    /// ```
2847    /// use sfzhou_rocksdb::Options;
2848    ///
2849    /// let mut opts = Options::default();
2850    /// // Set max total wal size to 1G.
2851    /// opts.set_max_total_wal_size(1 << 30);
2852    /// ```
2853    pub fn set_max_total_wal_size(&mut self, size: u64) {
2854        unsafe {
2855            ffi::rocksdb_options_set_max_total_wal_size(self.inner, size);
2856        }
2857    }
2858
2859    /// Recovery mode to control the consistency while replaying WAL.
2860    ///
2861    /// Default: DBRecoveryMode::PointInTime
2862    ///
2863    /// # Examples
2864    ///
2865    /// ```
2866    /// use sfzhou_rocksdb::{Options, DBRecoveryMode};
2867    ///
2868    /// let mut opts = Options::default();
2869    /// opts.set_wal_recovery_mode(DBRecoveryMode::AbsoluteConsistency);
2870    /// ```
2871    pub fn set_wal_recovery_mode(&mut self, mode: DBRecoveryMode) {
2872        unsafe {
2873            ffi::rocksdb_options_set_wal_recovery_mode(self.inner, mode as c_int);
2874        }
2875    }
2876
2877    pub fn enable_statistics(&mut self) {
2878        unsafe {
2879            ffi::rocksdb_options_enable_statistics(self.inner);
2880        }
2881    }
2882
2883    pub fn get_statistics(&self) -> Option<String> {
2884        unsafe {
2885            let value = ffi::rocksdb_options_statistics_get_string(self.inner);
2886            if value.is_null() {
2887                return None;
2888            }
2889
2890            // Must have valid UTF-8 format.
2891            let s = CStr::from_ptr(value).to_str().unwrap().to_owned();
2892            ffi::rocksdb_free(value as *mut c_void);
2893            Some(s)
2894        }
2895    }
2896
2897    /// StatsLevel can be used to reduce statistics overhead by skipping certain
2898    /// types of stats in the stats collection process.
2899    pub fn set_statistics_level(&self, level: StatsLevel) {
2900        unsafe { ffi::rocksdb_options_set_statistics_level(self.inner, level as c_int) }
2901    }
2902
2903    /// Returns the value of cumulative db counters if stat collection is enabled.
2904    pub fn get_ticker_count(&self, ticker: Ticker) -> u64 {
2905        unsafe { ffi::rocksdb_options_statistics_get_ticker_count(self.inner, ticker as u32) }
2906    }
2907
2908    /// Gets Histogram data from collected db stats. Requires stats to be enabled.
2909    pub fn get_histogram_data(&self, histogram: Histogram) -> HistogramData {
2910        unsafe {
2911            let data = HistogramData::default();
2912            ffi::rocksdb_options_statistics_get_histogram_data(
2913                self.inner,
2914                histogram as u32,
2915                data.inner,
2916            );
2917            data
2918        }
2919    }
2920
2921    /// If not zero, dump `rocksdb.stats` to LOG every `stats_dump_period_sec`.
2922    ///
2923    /// Default: `600` (10 mins)
2924    ///
2925    /// # Examples
2926    ///
2927    /// ```
2928    /// use sfzhou_rocksdb::Options;
2929    ///
2930    /// let mut opts = Options::default();
2931    /// opts.set_stats_dump_period_sec(300);
2932    /// ```
2933    pub fn set_stats_dump_period_sec(&mut self, period: c_uint) {
2934        unsafe {
2935            ffi::rocksdb_options_set_stats_dump_period_sec(self.inner, period);
2936        }
2937    }
2938
2939    /// If not zero, dump rocksdb.stats to RocksDB to LOG every `stats_persist_period_sec`.
2940    ///
2941    /// Default: `600` (10 mins)
2942    ///
2943    /// # Examples
2944    ///
2945    /// ```
2946    /// use sfzhou_rocksdb::Options;
2947    ///
2948    /// let mut opts = Options::default();
2949    /// opts.set_stats_persist_period_sec(5);
2950    /// ```
2951    pub fn set_stats_persist_period_sec(&mut self, period: c_uint) {
2952        unsafe {
2953            ffi::rocksdb_options_set_stats_persist_period_sec(self.inner, period);
2954        }
2955    }
2956
2957    /// When set to true, reading SST files will opt out of the filesystem's
2958    /// readahead. Setting this to false may improve sequential iteration
2959    /// performance.
2960    ///
2961    /// Default: `true`
2962    pub fn set_advise_random_on_open(&mut self, advise: bool) {
2963        unsafe {
2964            ffi::rocksdb_options_set_advise_random_on_open(self.inner, c_uchar::from(advise));
2965        }
2966    }
2967
2968    /// Enable/disable adaptive mutex, which spins in the user space before resorting to kernel.
2969    ///
2970    /// This could reduce context switch when the mutex is not
2971    /// heavily contended. However, if the mutex is hot, we could end up
2972    /// wasting spin time.
2973    ///
2974    /// Default: false
2975    pub fn set_use_adaptive_mutex(&mut self, enabled: bool) {
2976        unsafe {
2977            ffi::rocksdb_options_set_use_adaptive_mutex(self.inner, c_uchar::from(enabled));
2978        }
2979    }
2980
2981    /// Sets the number of levels for this database.
2982    pub fn set_num_levels(&mut self, n: c_int) {
2983        unsafe {
2984            ffi::rocksdb_options_set_num_levels(self.inner, n);
2985        }
2986    }
2987
2988    /// When a `prefix_extractor` is defined through `opts.set_prefix_extractor` this
2989    /// creates a prefix bloom filter for each memtable with the size of
2990    /// `write_buffer_size * memtable_prefix_bloom_ratio` (capped at 0.25).
2991    ///
2992    /// Default: `0`
2993    ///
2994    /// # Examples
2995    ///
2996    /// ```
2997    /// use sfzhou_rocksdb::{Options, SliceTransform};
2998    ///
2999    /// let mut opts = Options::default();
3000    /// let transform = SliceTransform::create_fixed_prefix(10);
3001    /// opts.set_prefix_extractor(transform);
3002    /// opts.set_memtable_prefix_bloom_ratio(0.2);
3003    /// ```
3004    pub fn set_memtable_prefix_bloom_ratio(&mut self, ratio: f64) {
3005        unsafe {
3006            ffi::rocksdb_options_set_memtable_prefix_bloom_size_ratio(self.inner, ratio);
3007        }
3008    }
3009
3010    /// Sets the maximum number of bytes in all compacted files.
3011    /// We try to limit number of bytes in one compaction to be lower than this
3012    /// threshold. But it's not guaranteed.
3013    ///
3014    /// Value 0 will be sanitized.
3015    ///
3016    /// Default: target_file_size_base * 25
3017    pub fn set_max_compaction_bytes(&mut self, nbytes: u64) {
3018        unsafe {
3019            ffi::rocksdb_options_set_max_compaction_bytes(self.inner, nbytes);
3020        }
3021    }
3022
3023    /// Specifies the absolute path of the directory the
3024    /// write-ahead log (WAL) should be written to.
3025    ///
3026    /// Default: same directory as the database
3027    ///
3028    /// # Examples
3029    ///
3030    /// ```
3031    /// use sfzhou_rocksdb::Options;
3032    ///
3033    /// let mut opts = Options::default();
3034    /// opts.set_wal_dir("/path/to/dir");
3035    /// ```
3036    pub fn set_wal_dir<P: AsRef<Path>>(&mut self, path: P) {
3037        let p = to_cpath(path).unwrap();
3038        unsafe {
3039            ffi::rocksdb_options_set_wal_dir(self.inner, p.as_ptr());
3040        }
3041    }
3042
3043    /// Sets the WAL ttl in seconds.
3044    ///
3045    /// The following two options affect how archived logs will be deleted.
3046    /// 1. If both set to 0, logs will be deleted asap and will not get into
3047    ///    the archive.
3048    /// 2. If wal_ttl_seconds is 0 and wal_size_limit_mb is not 0,
3049    ///    WAL files will be checked every 10 min and if total size is greater
3050    ///    then wal_size_limit_mb, they will be deleted starting with the
3051    ///    earliest until size_limit is met. All empty files will be deleted.
3052    /// 3. If wal_ttl_seconds is not 0 and wall_size_limit_mb is 0, then
3053    ///    WAL files will be checked every wal_ttl_seconds / 2 and those that
3054    ///    are older than wal_ttl_seconds will be deleted.
3055    /// 4. If both are not 0, WAL files will be checked every 10 min and both
3056    ///    checks will be performed with ttl being first.
3057    ///
3058    /// Default: 0
3059    pub fn set_wal_ttl_seconds(&mut self, secs: u64) {
3060        unsafe {
3061            ffi::rocksdb_options_set_WAL_ttl_seconds(self.inner, secs);
3062        }
3063    }
3064
3065    /// Sets the WAL size limit in MB.
3066    ///
3067    /// If total size of WAL files is greater then wal_size_limit_mb,
3068    /// they will be deleted starting with the earliest until size_limit is met.
3069    ///
3070    /// Default: 0
3071    pub fn set_wal_size_limit_mb(&mut self, size: u64) {
3072        unsafe {
3073            ffi::rocksdb_options_set_WAL_size_limit_MB(self.inner, size);
3074        }
3075    }
3076
3077    /// Sets the number of bytes to preallocate (via fallocate) the manifest files.
3078    ///
3079    /// Default is 4MB, which is reasonable to reduce random IO
3080    /// as well as prevent overallocation for mounts that preallocate
3081    /// large amounts of data (such as xfs's allocsize option).
3082    pub fn set_manifest_preallocation_size(&mut self, size: usize) {
3083        unsafe {
3084            ffi::rocksdb_options_set_manifest_preallocation_size(self.inner, size);
3085        }
3086    }
3087
3088    /// If true, then DB::Open() will not update the statistics used to optimize
3089    /// compaction decision by loading table properties from many files.
3090    /// Turning off this feature will improve DBOpen time especially in disk environment.
3091    ///
3092    /// Default: false
3093    pub fn set_skip_stats_update_on_db_open(&mut self, skip: bool) {
3094        unsafe {
3095            ffi::rocksdb_options_set_skip_stats_update_on_db_open(self.inner, c_uchar::from(skip));
3096        }
3097    }
3098
3099    /// Specify the maximal number of info log files to be kept.
3100    ///
3101    /// Default: 1000
3102    ///
3103    /// # Examples
3104    ///
3105    /// ```
3106    /// use sfzhou_rocksdb::Options;
3107    ///
3108    /// let mut options = Options::default();
3109    /// options.set_keep_log_file_num(100);
3110    /// ```
3111    pub fn set_keep_log_file_num(&mut self, nfiles: usize) {
3112        unsafe {
3113            ffi::rocksdb_options_set_keep_log_file_num(self.inner, nfiles);
3114        }
3115    }
3116
3117    /// Allow the OS to mmap file for writing.
3118    ///
3119    /// Default: false
3120    ///
3121    /// # Examples
3122    ///
3123    /// ```
3124    /// use sfzhou_rocksdb::Options;
3125    ///
3126    /// let mut options = Options::default();
3127    /// options.set_allow_mmap_writes(true);
3128    /// ```
3129    pub fn set_allow_mmap_writes(&mut self, is_enabled: bool) {
3130        unsafe {
3131            ffi::rocksdb_options_set_allow_mmap_writes(self.inner, c_uchar::from(is_enabled));
3132        }
3133    }
3134
3135    /// Allow the OS to mmap file for reading sst tables.
3136    ///
3137    /// Default: false
3138    ///
3139    /// # Examples
3140    ///
3141    /// ```
3142    /// use sfzhou_rocksdb::Options;
3143    ///
3144    /// let mut options = Options::default();
3145    /// options.set_allow_mmap_reads(true);
3146    /// ```
3147    pub fn set_allow_mmap_reads(&mut self, is_enabled: bool) {
3148        unsafe {
3149            ffi::rocksdb_options_set_allow_mmap_reads(self.inner, c_uchar::from(is_enabled));
3150        }
3151    }
3152
3153    /// If enabled, WAL is not flushed automatically after each write. Instead it
3154    /// relies on manual invocation of `DB::flush_wal()` to write the WAL buffer
3155    /// to its file.
3156    ///
3157    /// Default: false
3158    ///
3159    /// # Examples
3160    ///
3161    /// ```
3162    /// use sfzhou_rocksdb::Options;
3163    ///
3164    /// let mut options = Options::default();
3165    /// options.set_manual_wal_flush(true);
3166    /// ```
3167    pub fn set_manual_wal_flush(&mut self, is_enabled: bool) {
3168        unsafe {
3169            ffi::rocksdb_options_set_manual_wal_flush(self.inner, c_uchar::from(is_enabled));
3170        }
3171    }
3172
3173    /// Guarantee that all column families are flushed together atomically.
3174    /// This option applies to both manual flushes (`db.flush()`) and automatic
3175    /// background flushes caused when memtables are filled.
3176    ///
3177    /// Note that this is only useful when the WAL is disabled. When using the
3178    /// WAL, writes are always consistent across column families.
3179    ///
3180    /// Default: false
3181    ///
3182    /// # Examples
3183    ///
3184    /// ```
3185    /// use sfzhou_rocksdb::Options;
3186    ///
3187    /// let mut options = Options::default();
3188    /// options.set_atomic_flush(true);
3189    /// ```
3190    pub fn set_atomic_flush(&mut self, atomic_flush: bool) {
3191        unsafe {
3192            ffi::rocksdb_options_set_atomic_flush(self.inner, c_uchar::from(atomic_flush));
3193        }
3194    }
3195
3196    /// Sets global cache for table-level rows.
3197    ///
3198    /// Default: null (disabled)
3199    /// Not supported in ROCKSDB_LITE mode!
3200    pub fn set_row_cache(&mut self, cache: &Cache) {
3201        unsafe {
3202            ffi::rocksdb_options_set_row_cache(self.inner, cache.0.inner.as_ptr());
3203        }
3204        self.outlive.row_cache = Some(cache.clone());
3205    }
3206
3207    /// Use to control write rate of flush and compaction. Flush has higher
3208    /// priority than compaction.
3209    /// If rate limiter is enabled, bytes_per_sync is set to 1MB by default.
3210    ///
3211    /// Default: disable
3212    ///
3213    /// # Examples
3214    ///
3215    /// ```
3216    /// use sfzhou_rocksdb::Options;
3217    ///
3218    /// let mut options = Options::default();
3219    /// options.set_ratelimiter(1024 * 1024, 100 * 1000, 10);
3220    /// ```
3221    pub fn set_ratelimiter(
3222        &mut self,
3223        rate_bytes_per_sec: i64,
3224        refill_period_us: i64,
3225        fairness: i32,
3226    ) {
3227        unsafe {
3228            let ratelimiter =
3229                ffi::rocksdb_ratelimiter_create(rate_bytes_per_sec, refill_period_us, fairness);
3230            ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
3231            ffi::rocksdb_ratelimiter_destroy(ratelimiter);
3232        }
3233    }
3234
3235    /// Use to control write rate of flush and compaction. Flush has higher
3236    /// priority than compaction.
3237    /// If rate limiter is enabled, bytes_per_sync is set to 1MB by default.
3238    ///
3239    /// Default: disable
3240    pub fn set_auto_tuned_ratelimiter(
3241        &mut self,
3242        rate_bytes_per_sec: i64,
3243        refill_period_us: i64,
3244        fairness: i32,
3245    ) {
3246        unsafe {
3247            let ratelimiter = ffi::rocksdb_ratelimiter_create_auto_tuned(
3248                rate_bytes_per_sec,
3249                refill_period_us,
3250                fairness,
3251            );
3252            ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
3253            ffi::rocksdb_ratelimiter_destroy(ratelimiter);
3254        }
3255    }
3256
3257    /// Sets the maximal size of the info log file.
3258    ///
3259    /// If the log file is larger than `max_log_file_size`, a new info log file
3260    /// will be created. If `max_log_file_size` is equal to zero, all logs will
3261    /// be written to one log file.
3262    ///
3263    /// Default: 0
3264    ///
3265    /// # Examples
3266    ///
3267    /// ```
3268    /// use sfzhou_rocksdb::Options;
3269    ///
3270    /// let mut options = Options::default();
3271    /// options.set_max_log_file_size(0);
3272    /// ```
3273    pub fn set_max_log_file_size(&mut self, size: usize) {
3274        unsafe {
3275            ffi::rocksdb_options_set_max_log_file_size(self.inner, size);
3276        }
3277    }
3278
3279    /// Sets the time for the info log file to roll (in seconds).
3280    ///
3281    /// If specified with non-zero value, log file will be rolled
3282    /// if it has been active longer than `log_file_time_to_roll`.
3283    /// Default: 0 (disabled)
3284    pub fn set_log_file_time_to_roll(&mut self, secs: usize) {
3285        unsafe {
3286            ffi::rocksdb_options_set_log_file_time_to_roll(self.inner, secs);
3287        }
3288    }
3289
3290    /// Controls the recycling of log files.
3291    ///
3292    /// If non-zero, previously written log files will be reused for new logs,
3293    /// overwriting the old data. The value indicates how many such files we will
3294    /// keep around at any point in time for later use. This is more efficient
3295    /// because the blocks are already allocated and fdatasync does not need to
3296    /// update the inode after each write.
3297    ///
3298    /// Default: 0
3299    ///
3300    /// # Examples
3301    ///
3302    /// ```
3303    /// use sfzhou_rocksdb::Options;
3304    ///
3305    /// let mut options = Options::default();
3306    /// options.set_recycle_log_file_num(5);
3307    /// ```
3308    pub fn set_recycle_log_file_num(&mut self, num: usize) {
3309        unsafe {
3310            ffi::rocksdb_options_set_recycle_log_file_num(self.inner, num);
3311        }
3312    }
3313
3314    /// Sets the threshold at which all writes will be slowed down to at least delayed_write_rate if estimated
3315    /// bytes needed to be compaction exceed this threshold.
3316    ///
3317    /// Default: 64GB
3318    pub fn set_soft_pending_compaction_bytes_limit(&mut self, limit: usize) {
3319        unsafe {
3320            ffi::rocksdb_options_set_soft_pending_compaction_bytes_limit(self.inner, limit);
3321        }
3322    }
3323
3324    /// Sets the bytes threshold at which all writes are stopped if estimated bytes needed to be compaction exceed
3325    /// this threshold.
3326    ///
3327    /// Default: 256GB
3328    pub fn set_hard_pending_compaction_bytes_limit(&mut self, limit: usize) {
3329        unsafe {
3330            ffi::rocksdb_options_set_hard_pending_compaction_bytes_limit(self.inner, limit);
3331        }
3332    }
3333
3334    /// Sets the size of one block in arena memory allocation.
3335    ///
3336    /// If <= 0, a proper value is automatically calculated (usually 1/10 of
3337    /// writer_buffer_size).
3338    ///
3339    /// Default: 0
3340    pub fn set_arena_block_size(&mut self, size: usize) {
3341        unsafe {
3342            ffi::rocksdb_options_set_arena_block_size(self.inner, size);
3343        }
3344    }
3345
3346    /// If true, then print malloc stats together with rocksdb.stats when printing to LOG.
3347    ///
3348    /// Default: false
3349    pub fn set_dump_malloc_stats(&mut self, enabled: bool) {
3350        unsafe {
3351            ffi::rocksdb_options_set_dump_malloc_stats(self.inner, c_uchar::from(enabled));
3352        }
3353    }
3354
3355    /// Enable whole key bloom filter in memtable. Note this will only take effect
3356    /// if memtable_prefix_bloom_size_ratio is not 0. Enabling whole key filtering
3357    /// can potentially reduce CPU usage for point-look-ups.
3358    ///
3359    /// Default: false (disable)
3360    ///
3361    /// Dynamically changeable through SetOptions() API
3362    pub fn set_memtable_whole_key_filtering(&mut self, whole_key_filter: bool) {
3363        unsafe {
3364            ffi::rocksdb_options_set_memtable_whole_key_filtering(
3365                self.inner,
3366                c_uchar::from(whole_key_filter),
3367            );
3368        }
3369    }
3370
3371    /// Enable the use of key-value separation.
3372    ///
3373    /// More details can be found here: [Integrated BlobDB](http://rocksdb.org/blog/2021/05/26/integrated-blob-db.html).
3374    ///
3375    /// Default: false (disable)
3376    ///
3377    /// Dynamically changeable through SetOptions() API
3378    pub fn set_enable_blob_files(&mut self, val: bool) {
3379        unsafe {
3380            ffi::rocksdb_options_set_enable_blob_files(self.inner, u8::from(val));
3381        }
3382    }
3383
3384    /// Sets the minimum threshold value at or above which will be written
3385    /// to blob files during flush or compaction.
3386    ///
3387    /// Dynamically changeable through SetOptions() API
3388    pub fn set_min_blob_size(&mut self, val: u64) {
3389        unsafe {
3390            ffi::rocksdb_options_set_min_blob_size(self.inner, val);
3391        }
3392    }
3393
3394    /// Sets the size limit for blob files.
3395    ///
3396    /// Dynamically changeable through SetOptions() API
3397    pub fn set_blob_file_size(&mut self, val: u64) {
3398        unsafe {
3399            ffi::rocksdb_options_set_blob_file_size(self.inner, val);
3400        }
3401    }
3402
3403    /// Sets the blob compression type. All blob files use the same
3404    /// compression type.
3405    ///
3406    /// Dynamically changeable through SetOptions() API
3407    pub fn set_blob_compression_type(&mut self, val: DBCompressionType) {
3408        unsafe {
3409            ffi::rocksdb_options_set_blob_compression_type(self.inner, val as _);
3410        }
3411    }
3412
3413    /// If this is set to true RocksDB will actively relocate valid blobs from the oldest blob files
3414    /// as they are encountered during compaction.
3415    ///
3416    /// Dynamically changeable through SetOptions() API
3417    pub fn set_enable_blob_gc(&mut self, val: bool) {
3418        unsafe {
3419            ffi::rocksdb_options_set_enable_blob_gc(self.inner, u8::from(val));
3420        }
3421    }
3422
3423    /// Sets the threshold that the GC logic uses to determine which blob files should be considered “old.”
3424    ///
3425    /// For example, the default value of 0.25 signals to RocksDB that blobs residing in the
3426    /// oldest 25% of blob files should be relocated by GC. This parameter can be tuned to adjust
3427    /// the trade-off between write amplification and space amplification.
3428    ///
3429    /// Dynamically changeable through SetOptions() API
3430    pub fn set_blob_gc_age_cutoff(&mut self, val: c_double) {
3431        unsafe {
3432            ffi::rocksdb_options_set_blob_gc_age_cutoff(self.inner, val);
3433        }
3434    }
3435
3436    /// Sets the blob GC force threshold.
3437    ///
3438    /// Dynamically changeable through SetOptions() API
3439    pub fn set_blob_gc_force_threshold(&mut self, val: c_double) {
3440        unsafe {
3441            ffi::rocksdb_options_set_blob_gc_force_threshold(self.inner, val);
3442        }
3443    }
3444
3445    /// Sets the blob compaction read ahead size.
3446    ///
3447    /// Dynamically changeable through SetOptions() API
3448    pub fn set_blob_compaction_readahead_size(&mut self, val: u64) {
3449        unsafe {
3450            ffi::rocksdb_options_set_blob_compaction_readahead_size(self.inner, val);
3451        }
3452    }
3453
3454    /// Sets the blob cache.
3455    ///
3456    /// Using a dedicated object for blobs and using the same object for the block and blob caches
3457    /// are both supported. In the latter case, note that blobs are less valuable from a caching
3458    /// perspective than SST blocks, and some cache implementations have configuration options that
3459    /// can be used to prioritize items accordingly (see Cache::Priority and
3460    /// LRUCacheOptions::{high,low}_pri_pool_ratio).
3461    ///
3462    /// Default: disabled
3463    pub fn set_blob_cache(&mut self, cache: &Cache) {
3464        unsafe {
3465            ffi::rocksdb_options_set_blob_cache(self.inner, cache.0.inner.as_ptr());
3466        }
3467        self.outlive.blob_cache = Some(cache.clone());
3468    }
3469
3470    /// Set this option to true during creation of database if you want
3471    /// to be able to ingest behind (call IngestExternalFile() skipping keys
3472    /// that already exist, rather than overwriting matching keys).
3473    /// Setting this option to true has the following effects:
3474    /// 1) Disable some internal optimizations around SST file compression.
3475    /// 2) Reserve the last level for ingested files only.
3476    /// 3) Compaction will not include any file from the last level.
3477    /// Note that only Universal Compaction supports allow_ingest_behind.
3478    /// `num_levels` should be >= 3 if this option is turned on.
3479    ///
3480    /// DEFAULT: false
3481    /// Immutable.
3482    pub fn set_allow_ingest_behind(&mut self, val: bool) {
3483        unsafe {
3484            ffi::rocksdb_options_set_allow_ingest_behind(self.inner, c_uchar::from(val));
3485        }
3486    }
3487
3488    // A factory of a table property collector that marks an SST
3489    // file as need-compaction when it observe at least "D" deletion
3490    // entries in any "N" consecutive entries, or the ratio of tombstone
3491    // entries >= deletion_ratio.
3492    //
3493    // `window_size`: is the sliding window size "N"
3494    // `num_dels_trigger`: is the deletion trigger "D"
3495    // `deletion_ratio`: if <= 0 or > 1, disable triggering compaction based on
3496    // deletion ratio.
3497    pub fn add_compact_on_deletion_collector_factory(
3498        &mut self,
3499        window_size: size_t,
3500        num_dels_trigger: size_t,
3501        deletion_ratio: f64,
3502    ) {
3503        unsafe {
3504            ffi::rocksdb_options_add_compact_on_deletion_collector_factory_del_ratio(
3505                self.inner,
3506                window_size,
3507                num_dels_trigger,
3508                deletion_ratio,
3509            );
3510        }
3511    }
3512
3513    /// <https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager>
3514    /// Write buffer manager helps users control the total memory used by memtables across multiple column families and/or DB instances.
3515    /// Users can enable this control by 2 ways:
3516    ///
3517    /// 1- Limit the total memtable usage across multiple column families and DBs under a threshold.
3518    /// 2- Cost the memtable memory usage to block cache so that memory of RocksDB can be capped by the single limit.
3519    /// The usage of a write buffer manager is similar to rate_limiter and sst_file_manager.
3520    /// Users can create one write buffer manager object and pass it to all the options of column families or DBs whose memtable size they want to be controlled by this object.
3521    pub fn set_write_buffer_manager(&mut self, write_buffer_manager: &WriteBufferManager) {
3522        unsafe {
3523            ffi::rocksdb_options_set_write_buffer_manager(
3524                self.inner,
3525                write_buffer_manager.0.inner.as_ptr(),
3526            );
3527        }
3528        self.outlive.write_buffer_manager = Some(write_buffer_manager.clone());
3529    }
3530
3531    /// If true, working thread may avoid doing unnecessary and long-latency
3532    /// operation (such as deleting obsolete files directly or deleting memtable)
3533    /// and will instead schedule a background job to do it.
3534    ///
3535    /// Use it if you're latency-sensitive.
3536    ///
3537    /// Default: false (disabled)
3538    pub fn set_avoid_unnecessary_blocking_io(&mut self, val: bool) {
3539        unsafe {
3540            ffi::rocksdb_options_set_avoid_unnecessary_blocking_io(self.inner, u8::from(val));
3541        }
3542    }
3543
3544    /// If true, the log numbers and sizes of the synced WALs are tracked
3545    /// in MANIFEST. During DB recovery, if a synced WAL is missing
3546    /// from disk, or the WAL's size does not match the recorded size in
3547    /// MANIFEST, an error will be reported and the recovery will be aborted.
3548    ///
3549    /// This is one additional protection against WAL corruption besides the
3550    /// per-WAL-entry checksum.
3551    ///
3552    /// Note that this option does not work with secondary instance.
3553    /// Currently, only syncing closed WALs are tracked. Calling `DB::SyncWAL()`,
3554    /// etc. or writing with `WriteOptions::sync=true` to sync the live WAL is not
3555    /// tracked for performance/efficiency reasons.
3556    ///
3557    /// See: <https://github.com/facebook/rocksdb/wiki/Track-WAL-in-MANIFEST>
3558    ///
3559    /// Default: false (disabled)
3560    pub fn set_track_and_verify_wals_in_manifest(&mut self, val: bool) {
3561        unsafe {
3562            ffi::rocksdb_options_set_track_and_verify_wals_in_manifest(self.inner, u8::from(val));
3563        }
3564    }
3565
3566    /// Returns the value of the `track_and_verify_wals_in_manifest` option.
3567    pub fn get_track_and_verify_wals_in_manifest(&self) -> bool {
3568        let val_u8 =
3569            unsafe { ffi::rocksdb_options_get_track_and_verify_wals_in_manifest(self.inner) };
3570        val_u8 != 0
3571    }
3572
3573    /// The DB unique ID can be saved in the DB manifest (preferred, this option)
3574    /// or an IDENTITY file (historical, deprecated), or both. If this option is
3575    /// set to false (old behavior), then `write_identity_file` must be set to true.
3576    /// The manifest is preferred because
3577    /// 1. The IDENTITY file is not checksummed, so it is not as safe against
3578    ///    corruption.
3579    /// 2. The IDENTITY file may or may not be copied with the DB (e.g. not
3580    ///    copied by BackupEngine), so is not reliable for the provenance of a DB.
3581    /// This option might eventually be obsolete and removed as Identity files
3582    /// are phased out.
3583    ///
3584    /// Default: true (enabled)
3585    pub fn set_write_dbid_to_manifest(&mut self, val: bool) {
3586        unsafe {
3587            ffi::rocksdb_options_set_write_dbid_to_manifest(self.inner, u8::from(val));
3588        }
3589    }
3590
3591    /// Returns the value of the `write_dbid_to_manifest` option.
3592    pub fn get_write_dbid_to_manifest(&self) -> bool {
3593        let val_u8 = unsafe { ffi::rocksdb_options_get_write_dbid_to_manifest(self.inner) };
3594        val_u8 != 0
3595    }
3596}
3597
3598impl Default for Options {
3599    fn default() -> Self {
3600        unsafe {
3601            let opts = ffi::rocksdb_options_create();
3602            assert!(!opts.is_null(), "Could not create RocksDB options");
3603
3604            Self {
3605                inner: opts,
3606                outlive: OptionsMustOutliveDB::default(),
3607            }
3608        }
3609    }
3610}
3611
3612impl FlushOptions {
3613    pub fn new() -> FlushOptions {
3614        FlushOptions::default()
3615    }
3616
3617    /// Waits until the flush is done.
3618    ///
3619    /// Default: true
3620    ///
3621    /// # Examples
3622    ///
3623    /// ```
3624    /// use sfzhou_rocksdb::FlushOptions;
3625    ///
3626    /// let mut options = FlushOptions::default();
3627    /// options.set_wait(false);
3628    /// ```
3629    pub fn set_wait(&mut self, wait: bool) {
3630        unsafe {
3631            ffi::rocksdb_flushoptions_set_wait(self.inner, c_uchar::from(wait));
3632        }
3633    }
3634}
3635
3636impl Default for FlushOptions {
3637    fn default() -> Self {
3638        let flush_opts = unsafe { ffi::rocksdb_flushoptions_create() };
3639        assert!(
3640            !flush_opts.is_null(),
3641            "Could not create RocksDB flush options"
3642        );
3643
3644        Self { inner: flush_opts }
3645    }
3646}
3647
3648impl WriteOptions {
3649    pub fn new() -> WriteOptions {
3650        WriteOptions::default()
3651    }
3652
3653    /// Sets the sync mode. If true, the write will be flushed
3654    /// from the operating system buffer cache before the write is considered complete.
3655    /// If this flag is true, writes will be slower.
3656    ///
3657    /// Default: false
3658    pub fn set_sync(&mut self, sync: bool) {
3659        unsafe {
3660            ffi::rocksdb_writeoptions_set_sync(self.inner, c_uchar::from(sync));
3661        }
3662    }
3663
3664    /// Sets whether WAL should be active or not.
3665    /// If true, writes will not first go to the write ahead log,
3666    /// and the write may got lost after a crash.
3667    ///
3668    /// Default: false
3669    pub fn disable_wal(&mut self, disable: bool) {
3670        unsafe {
3671            ffi::rocksdb_writeoptions_disable_WAL(self.inner, c_int::from(disable));
3672        }
3673    }
3674
3675    /// If true and if user is trying to write to column families that don't exist (they were dropped),
3676    /// ignore the write (don't return an error). If there are multiple writes in a WriteBatch,
3677    /// other writes will succeed.
3678    ///
3679    /// Default: false
3680    pub fn set_ignore_missing_column_families(&mut self, ignore: bool) {
3681        unsafe {
3682            ffi::rocksdb_writeoptions_set_ignore_missing_column_families(
3683                self.inner,
3684                c_uchar::from(ignore),
3685            );
3686        }
3687    }
3688
3689    /// If true and we need to wait or sleep for the write request, fails
3690    /// immediately with Status::Incomplete().
3691    ///
3692    /// Default: false
3693    pub fn set_no_slowdown(&mut self, no_slowdown: bool) {
3694        unsafe {
3695            ffi::rocksdb_writeoptions_set_no_slowdown(self.inner, c_uchar::from(no_slowdown));
3696        }
3697    }
3698
3699    /// If true, this write request is of lower priority if compaction is
3700    /// behind. In this case, no_slowdown = true, the request will be cancelled
3701    /// immediately with Status::Incomplete() returned. Otherwise, it will be
3702    /// slowed down. The slowdown value is determined by RocksDB to guarantee
3703    /// it introduces minimum impacts to high priority writes.
3704    ///
3705    /// Default: false
3706    pub fn set_low_pri(&mut self, v: bool) {
3707        unsafe {
3708            ffi::rocksdb_writeoptions_set_low_pri(self.inner, c_uchar::from(v));
3709        }
3710    }
3711
3712    /// If true, writebatch will maintain the last insert positions of each
3713    /// memtable as hints in concurrent write. It can improve write performance
3714    /// in concurrent writes if keys in one writebatch are sequential. In
3715    /// non-concurrent writes (when concurrent_memtable_writes is false) this
3716    /// option will be ignored.
3717    ///
3718    /// Default: false
3719    pub fn set_memtable_insert_hint_per_batch(&mut self, v: bool) {
3720        unsafe {
3721            ffi::rocksdb_writeoptions_set_memtable_insert_hint_per_batch(
3722                self.inner,
3723                c_uchar::from(v),
3724            );
3725        }
3726    }
3727}
3728
3729impl Default for WriteOptions {
3730    fn default() -> Self {
3731        let write_opts = unsafe { ffi::rocksdb_writeoptions_create() };
3732        assert!(
3733            !write_opts.is_null(),
3734            "Could not create RocksDB write options"
3735        );
3736
3737        Self { inner: write_opts }
3738    }
3739}
3740
3741impl LruCacheOptions {
3742    /// Capacity of the cache, in the same units as the `charge` of each entry.
3743    /// This is typically measured in bytes, but can be a different unit if using
3744    /// kDontChargeCacheMetadata.
3745    pub fn set_capacity(&mut self, cap: usize) {
3746        unsafe {
3747            ffi::rocksdb_lru_cache_options_set_capacity(self.inner, cap);
3748        }
3749    }
3750
3751    /// Cache is sharded into 2^num_shard_bits shards, by hash of key.
3752    /// If < 0, a good default is chosen based on the capacity and the
3753    /// implementation. (Mutex-based implementations are much more reliant
3754    /// on many shards for parallel scalability.)
3755    pub fn set_num_shard_bits(&mut self, val: c_int) {
3756        unsafe {
3757            ffi::rocksdb_lru_cache_options_set_num_shard_bits(self.inner, val);
3758        }
3759    }
3760}
3761
3762impl Default for LruCacheOptions {
3763    fn default() -> Self {
3764        let inner = unsafe { ffi::rocksdb_lru_cache_options_create() };
3765        assert!(
3766            !inner.is_null(),
3767            "Could not create RocksDB LRU cache options"
3768        );
3769
3770        Self { inner }
3771    }
3772}
3773
3774#[derive(Debug, Copy, Clone, PartialEq, Eq)]
3775#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
3776#[repr(i32)]
3777pub enum ReadTier {
3778    /// Reads data in memtable, block cache, OS cache or storage.
3779    All = 0,
3780    /// Reads data in memtable or block cache.
3781    BlockCache,
3782    /// Reads persisted data. When WAL is disabled, this option will skip data in memtable.
3783    Persisted,
3784    /// Reads data in memtable. Used for memtable only iterators.
3785    Memtable,
3786}
3787
3788#[repr(i32)]
3789pub enum CompactionPri {
3790    /// Slightly prioritize larger files by size compensated by #deletes
3791    ByCompensatedSize = 0,
3792    /// First compact files whose data's latest update time is oldest.
3793    /// Try this if you only update some hot keys in small ranges.
3794    OldestLargestSeqFirst = 1,
3795    /// First compact files whose range hasn't been compacted to the next level
3796    /// for the longest. If your updates are random across the key space,
3797    /// write amplification is slightly better with this option.
3798    OldestSmallestSeqFirst = 2,
3799    /// First compact files whose ratio between overlapping size in next level
3800    /// and its size is the smallest. It in many cases can optimize write amplification.
3801    MinOverlappingRatio = 3,
3802    /// Keeps a cursor(s) of the successor of the file (key range) was/were
3803    /// compacted before, and always picks the next files (key range) in that
3804    /// level. The file picking process will cycle through all the files in a
3805    /// round-robin manner.
3806    RoundRobin = 4,
3807}
3808
3809impl ReadOptions {
3810    // TODO add snapshot setting here
3811    // TODO add snapshot wrapper structs with proper destructors;
3812    // that struct needs an "iterator" impl too.
3813
3814    /// Specify whether the "data block"/"index block"/"filter block"
3815    /// read for this iteration should be cached in memory?
3816    /// Callers may wish to set this field to false for bulk scans.
3817    ///
3818    /// Default: true
3819    pub fn fill_cache(&mut self, v: bool) {
3820        unsafe {
3821            ffi::rocksdb_readoptions_set_fill_cache(self.inner, c_uchar::from(v));
3822        }
3823    }
3824
3825    /// Sets the snapshot which should be used for the read.
3826    /// The snapshot must belong to the DB that is being read and must
3827    /// not have been released.
3828    pub fn set_snapshot<D: DBAccess>(&mut self, snapshot: &SnapshotWithThreadMode<D>) {
3829        unsafe {
3830            ffi::rocksdb_readoptions_set_snapshot(self.inner, snapshot.inner);
3831        }
3832    }
3833
3834    /// Sets the lower bound for an iterator.
3835    pub fn set_iterate_lower_bound<K: Into<Vec<u8>>>(&mut self, key: K) {
3836        self.set_lower_bound_impl(Some(key.into()));
3837    }
3838
3839    /// Sets the upper bound for an iterator.
3840    /// The upper bound itself is not included on the iteration result.
3841    pub fn set_iterate_upper_bound<K: Into<Vec<u8>>>(&mut self, key: K) {
3842        self.set_upper_bound_impl(Some(key.into()));
3843    }
3844
3845    /// Sets lower and upper bounds based on the provided range.  This is
3846    /// similar to setting lower and upper bounds separately except that it also
3847    /// allows either bound to be reset.
3848    ///
3849    /// The argument can be a regular Rust range, e.g. `lower..upper`.  However,
3850    /// since RocksDB upper bound is always excluded (i.e. range can never be
3851    /// fully closed) inclusive ranges (`lower..=upper` and `..=upper`) are not
3852    /// supported.  For example:
3853    ///
3854    /// ```
3855    /// let mut options = sfzhou_rocksdb::ReadOptions::default();
3856    /// options.set_iterate_range("xy".as_bytes().."xz".as_bytes());
3857    /// ```
3858    ///
3859    /// In addition, [`crate::PrefixRange`] can be used to specify a range of
3860    /// keys with a given prefix.  In particular, the above example is
3861    /// equivalent to:
3862    ///
3863    /// ```
3864    /// let mut options = sfzhou_rocksdb::ReadOptions::default();
3865    /// options.set_iterate_range(sfzhou_rocksdb::PrefixRange("xy".as_bytes()));
3866    /// ```
3867    ///
3868    /// Note that setting range using this method is separate to using prefix
3869    /// iterators.  Prefix iterators use prefix extractor configured for
3870    /// a column family.  Setting bounds via [`crate::PrefixRange`] is more akin
3871    /// to using manual prefix.
3872    ///
3873    /// Using this method clears any previously set bounds.  In other words, the
3874    /// bounds can be reset by setting the range to `..` as in:
3875    ///
3876    /// ```
3877    /// let mut options = sfzhou_rocksdb::ReadOptions::default();
3878    /// options.set_iterate_range(..);
3879    /// ```
3880    pub fn set_iterate_range(&mut self, range: impl crate::IterateBounds) {
3881        let (lower, upper) = range.into_bounds();
3882        self.set_lower_bound_impl(lower);
3883        self.set_upper_bound_impl(upper);
3884    }
3885
3886    fn set_lower_bound_impl(&mut self, bound: Option<Vec<u8>>) {
3887        let (ptr, len) = if let Some(ref bound) = bound {
3888            (bound.as_ptr() as *const c_char, bound.len())
3889        } else if self.iterate_lower_bound.is_some() {
3890            (std::ptr::null(), 0)
3891        } else {
3892            return;
3893        };
3894        self.iterate_lower_bound = bound;
3895        unsafe {
3896            ffi::rocksdb_readoptions_set_iterate_lower_bound(self.inner, ptr, len);
3897        }
3898    }
3899
3900    fn set_upper_bound_impl(&mut self, bound: Option<Vec<u8>>) {
3901        let (ptr, len) = if let Some(ref bound) = bound {
3902            (bound.as_ptr() as *const c_char, bound.len())
3903        } else if self.iterate_upper_bound.is_some() {
3904            (std::ptr::null(), 0)
3905        } else {
3906            return;
3907        };
3908        self.iterate_upper_bound = bound;
3909        unsafe {
3910            ffi::rocksdb_readoptions_set_iterate_upper_bound(self.inner, ptr, len);
3911        }
3912    }
3913
3914    /// Specify if this read request should process data that ALREADY
3915    /// resides on a particular cache. If the required data is not
3916    /// found at the specified cache, then Status::Incomplete is returned.
3917    ///
3918    /// Default: ::All
3919    pub fn set_read_tier(&mut self, tier: ReadTier) {
3920        unsafe {
3921            ffi::rocksdb_readoptions_set_read_tier(self.inner, tier as c_int);
3922        }
3923    }
3924
3925    /// Enforce that the iterator only iterates over the same
3926    /// prefix as the seek.
3927    /// This option is effective only for prefix seeks, i.e. prefix_extractor is
3928    /// non-null for the column family and total_order_seek is false.  Unlike
3929    /// iterate_upper_bound, prefix_same_as_start only works within a prefix
3930    /// but in both directions.
3931    ///
3932    /// Default: false
3933    pub fn set_prefix_same_as_start(&mut self, v: bool) {
3934        unsafe {
3935            ffi::rocksdb_readoptions_set_prefix_same_as_start(self.inner, c_uchar::from(v));
3936        }
3937    }
3938
3939    /// Enable a total order seek regardless of index format (e.g. hash index)
3940    /// used in the table. Some table format (e.g. plain table) may not support
3941    /// this option.
3942    ///
3943    /// If true when calling Get(), we also skip prefix bloom when reading from
3944    /// block based table. It provides a way to read existing data after
3945    /// changing implementation of prefix extractor.
3946    pub fn set_total_order_seek(&mut self, v: bool) {
3947        unsafe {
3948            ffi::rocksdb_readoptions_set_total_order_seek(self.inner, c_uchar::from(v));
3949        }
3950    }
3951
3952    /// Sets a threshold for the number of keys that can be skipped
3953    /// before failing an iterator seek as incomplete. The default value of 0 should be used to
3954    /// never fail a request as incomplete, even on skipping too many keys.
3955    ///
3956    /// Default: 0
3957    pub fn set_max_skippable_internal_keys(&mut self, num: u64) {
3958        unsafe {
3959            ffi::rocksdb_readoptions_set_max_skippable_internal_keys(self.inner, num);
3960        }
3961    }
3962
3963    /// If true, when PurgeObsoleteFile is called in CleanupIteratorState, we schedule a background job
3964    /// in the flush job queue and delete obsolete files in background.
3965    ///
3966    /// Default: false
3967    pub fn set_background_purge_on_iterator_cleanup(&mut self, v: bool) {
3968        unsafe {
3969            ffi::rocksdb_readoptions_set_background_purge_on_iterator_cleanup(
3970                self.inner,
3971                c_uchar::from(v),
3972            );
3973        }
3974    }
3975
3976    /// If true, keys deleted using the DeleteRange() API will be visible to
3977    /// readers until they are naturally deleted during compaction. This improves
3978    /// read performance in DBs with many range deletions.
3979    ///
3980    /// Default: false
3981    pub fn set_ignore_range_deletions(&mut self, v: bool) {
3982        unsafe {
3983            ffi::rocksdb_readoptions_set_ignore_range_deletions(self.inner, c_uchar::from(v));
3984        }
3985    }
3986
3987    /// If true, all data read from underlying storage will be
3988    /// verified against corresponding checksums.
3989    ///
3990    /// Default: true
3991    pub fn set_verify_checksums(&mut self, v: bool) {
3992        unsafe {
3993            ffi::rocksdb_readoptions_set_verify_checksums(self.inner, c_uchar::from(v));
3994        }
3995    }
3996
3997    /// If non-zero, an iterator will create a new table reader which
3998    /// performs reads of the given size. Using a large size (> 2MB) can
3999    /// improve the performance of forward iteration on spinning disks.
4000    /// Default: 0
4001    ///
4002    /// ```
4003    /// use sfzhou_rocksdb::{ReadOptions};
4004    ///
4005    /// let mut opts = ReadOptions::default();
4006    /// opts.set_readahead_size(4_194_304); // 4mb
4007    /// ```
4008    pub fn set_readahead_size(&mut self, v: usize) {
4009        unsafe {
4010            ffi::rocksdb_readoptions_set_readahead_size(self.inner, v as size_t);
4011        }
4012    }
4013
4014    /// If auto_readahead_size is set to true, it will auto tune the readahead_size
4015    /// during scans internally.
4016    /// For this feature to be enabled, iterate_upper_bound must also be specified.
4017    ///
4018    /// NOTE: - Recommended for forward Scans only.
4019    ///       - If there is a backward scans, this option will be
4020    ///         disabled internally and won't be enabled again if the forward scan
4021    ///         is issued again.
4022    ///
4023    /// Default: true
4024    pub fn set_auto_readahead_size(&mut self, v: bool) {
4025        unsafe {
4026            ffi::rocksdb_readoptions_set_auto_readahead_size(self.inner, c_uchar::from(v));
4027        }
4028    }
4029
4030    /// If true, create a tailing iterator. Note that tailing iterators
4031    /// only support moving in the forward direction. Iterating in reverse
4032    /// or seek_to_last are not supported.
4033    pub fn set_tailing(&mut self, v: bool) {
4034        unsafe {
4035            ffi::rocksdb_readoptions_set_tailing(self.inner, c_uchar::from(v));
4036        }
4037    }
4038
4039    /// Specifies the value of "pin_data". If true, it keeps the blocks
4040    /// loaded by the iterator pinned in memory as long as the iterator is not deleted,
4041    /// If used when reading from tables created with
4042    /// BlockBasedTableOptions::use_delta_encoding = false,
4043    /// Iterator's property "rocksdb.iterator.is-key-pinned" is guaranteed to
4044    /// return 1.
4045    ///
4046    /// Default: false
4047    pub fn set_pin_data(&mut self, v: bool) {
4048        unsafe {
4049            ffi::rocksdb_readoptions_set_pin_data(self.inner, c_uchar::from(v));
4050        }
4051    }
4052
4053    /// Asynchronously prefetch some data.
4054    ///
4055    /// Used for sequential reads and internal automatic prefetching.
4056    ///
4057    /// Default: `false`
4058    pub fn set_async_io(&mut self, v: bool) {
4059        unsafe {
4060            ffi::rocksdb_readoptions_set_async_io(self.inner, c_uchar::from(v));
4061        }
4062    }
4063
4064    /// Timestamp of operation. Read should return the latest data visible to the
4065    /// specified timestamp. All timestamps of the same database must be of the
4066    /// same length and format. The user is responsible for providing a customized
4067    /// compare function via Comparator to order <key, timestamp> tuples.
4068    /// For iterator, iter_start_ts is the lower bound (older) and timestamp
4069    /// serves as the upper bound. Versions of the same record that fall in
4070    /// the timestamp range will be returned. If iter_start_ts is nullptr,
4071    /// only the most recent version visible to timestamp is returned.
4072    /// The user-specified timestamp feature is still under active development,
4073    /// and the API is subject to change.
4074    pub fn set_timestamp<S: Into<Vec<u8>>>(&mut self, ts: S) {
4075        self.set_timestamp_impl(Some(ts.into()));
4076    }
4077
4078    fn set_timestamp_impl(&mut self, ts: Option<Vec<u8>>) {
4079        let (ptr, len) = if let Some(ref ts) = ts {
4080            (ts.as_ptr() as *const c_char, ts.len())
4081        } else if self.timestamp.is_some() {
4082            // The stored timestamp is a `Some` but we're updating it to a `None`.
4083            // This means to cancel a previously set timestamp.
4084            // To do this, use a null pointer and zero length.
4085            (std::ptr::null(), 0)
4086        } else {
4087            return;
4088        };
4089        self.timestamp = ts;
4090        unsafe {
4091            ffi::rocksdb_readoptions_set_timestamp(self.inner, ptr, len);
4092        }
4093    }
4094
4095    /// See `set_timestamp`
4096    pub fn set_iter_start_ts<S: Into<Vec<u8>>>(&mut self, ts: S) {
4097        self.set_iter_start_ts_impl(Some(ts.into()));
4098    }
4099
4100    fn set_iter_start_ts_impl(&mut self, ts: Option<Vec<u8>>) {
4101        let (ptr, len) = if let Some(ref ts) = ts {
4102            (ts.as_ptr() as *const c_char, ts.len())
4103        } else if self.timestamp.is_some() {
4104            (std::ptr::null(), 0)
4105        } else {
4106            return;
4107        };
4108        self.iter_start_ts = ts;
4109        unsafe {
4110            ffi::rocksdb_readoptions_set_iter_start_ts(self.inner, ptr, len);
4111        }
4112    }
4113}
4114
4115impl Default for ReadOptions {
4116    fn default() -> Self {
4117        unsafe {
4118            Self {
4119                inner: ffi::rocksdb_readoptions_create(),
4120                timestamp: None,
4121                iter_start_ts: None,
4122                iterate_upper_bound: None,
4123                iterate_lower_bound: None,
4124            }
4125        }
4126    }
4127}
4128
4129impl IngestExternalFileOptions {
4130    /// Can be set to true to move the files instead of copying them.
4131    pub fn set_move_files(&mut self, v: bool) {
4132        unsafe {
4133            ffi::rocksdb_ingestexternalfileoptions_set_move_files(self.inner, c_uchar::from(v));
4134        }
4135    }
4136
4137    /// If set to false, an ingested file keys could appear in existing snapshots
4138    /// that where created before the file was ingested.
4139    pub fn set_snapshot_consistency(&mut self, v: bool) {
4140        unsafe {
4141            ffi::rocksdb_ingestexternalfileoptions_set_snapshot_consistency(
4142                self.inner,
4143                c_uchar::from(v),
4144            );
4145        }
4146    }
4147
4148    /// If set to false, IngestExternalFile() will fail if the file key range
4149    /// overlaps with existing keys or tombstones in the DB.
4150    pub fn set_allow_global_seqno(&mut self, v: bool) {
4151        unsafe {
4152            ffi::rocksdb_ingestexternalfileoptions_set_allow_global_seqno(
4153                self.inner,
4154                c_uchar::from(v),
4155            );
4156        }
4157    }
4158
4159    /// If set to false and the file key range overlaps with the memtable key range
4160    /// (memtable flush required), IngestExternalFile will fail.
4161    pub fn set_allow_blocking_flush(&mut self, v: bool) {
4162        unsafe {
4163            ffi::rocksdb_ingestexternalfileoptions_set_allow_blocking_flush(
4164                self.inner,
4165                c_uchar::from(v),
4166            );
4167        }
4168    }
4169
4170    /// Set to true if you would like duplicate keys in the file being ingested
4171    /// to be skipped rather than overwriting existing data under that key.
4172    /// Usecase: back-fill of some historical data in the database without
4173    /// over-writing existing newer version of data.
4174    /// This option could only be used if the DB has been running
4175    /// with allow_ingest_behind=true since the dawn of time.
4176    /// All files will be ingested at the bottommost level with seqno=0.
4177    pub fn set_ingest_behind(&mut self, v: bool) {
4178        unsafe {
4179            ffi::rocksdb_ingestexternalfileoptions_set_ingest_behind(self.inner, c_uchar::from(v));
4180        }
4181    }
4182}
4183
4184impl Default for IngestExternalFileOptions {
4185    fn default() -> Self {
4186        unsafe {
4187            Self {
4188                inner: ffi::rocksdb_ingestexternalfileoptions_create(),
4189            }
4190        }
4191    }
4192}
4193
4194/// Used by BlockBasedOptions::set_index_type.
4195pub enum BlockBasedIndexType {
4196    /// A space efficient index block that is optimized for
4197    /// binary-search-based index.
4198    BinarySearch,
4199
4200    /// The hash index, if enabled, will perform a hash lookup if
4201    /// a prefix extractor has been provided through Options::set_prefix_extractor.
4202    HashSearch,
4203
4204    /// A two-level index implementation. Both levels are binary search indexes.
4205    TwoLevelIndexSearch,
4206}
4207
4208/// Used by BlockBasedOptions::set_data_block_index_type.
4209#[repr(C)]
4210pub enum DataBlockIndexType {
4211    /// Use binary search when performing point lookup for keys in data blocks.
4212    /// This is the default.
4213    BinarySearch = 0,
4214
4215    /// Appends a compact hash table to the end of the data block for efficient indexing. Backwards
4216    /// compatible with databases created without this feature. Once turned on, existing data will
4217    /// be gradually converted to the hash index format.
4218    BinaryAndHash = 1,
4219}
4220
4221/// Defines the underlying memtable implementation.
4222/// See official [wiki](https://github.com/facebook/rocksdb/wiki/MemTable) for more information.
4223pub enum MemtableFactory {
4224    Vector,
4225    HashSkipList {
4226        bucket_count: usize,
4227        height: i32,
4228        branching_factor: i32,
4229    },
4230    HashLinkList {
4231        bucket_count: usize,
4232    },
4233}
4234
4235/// Used by BlockBasedOptions::set_checksum_type.
4236pub enum ChecksumType {
4237    NoChecksum = 0,
4238    CRC32c = 1,
4239    XXHash = 2,
4240    XXHash64 = 3,
4241    XXH3 = 4, // Supported since RocksDB 6.27
4242}
4243
4244/// Used in [`PlainTableFactoryOptions`].
4245#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
4246pub enum KeyEncodingType {
4247    /// Always write full keys.
4248    #[default]
4249    Plain = 0,
4250    /// Find opportunities to write the same prefix for multiple rows.
4251    Prefix = 1,
4252}
4253
4254/// Used with DBOptions::set_plain_table_factory.
4255/// See official [wiki](https://github.com/facebook/rocksdb/wiki/PlainTable-Format) for more
4256/// information.
4257///
4258/// Defaults:
4259///  user_key_length: 0 (variable length)
4260///  bloom_bits_per_key: 10
4261///  hash_table_ratio: 0.75
4262///  index_sparseness: 16
4263///  huge_page_tlb_size: 0
4264///  encoding_type: KeyEncodingType::Plain
4265///  full_scan_mode: false
4266///  store_index_in_file: false
4267pub struct PlainTableFactoryOptions {
4268    pub user_key_length: u32,
4269    pub bloom_bits_per_key: i32,
4270    pub hash_table_ratio: f64,
4271    pub index_sparseness: usize,
4272    pub huge_page_tlb_size: usize,
4273    pub encoding_type: KeyEncodingType,
4274    pub full_scan_mode: bool,
4275    pub store_index_in_file: bool,
4276}
4277
4278#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4279#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4280pub enum DBCompressionType {
4281    None = ffi::rocksdb_no_compression as isize,
4282    Snappy = ffi::rocksdb_snappy_compression as isize,
4283    Zlib = ffi::rocksdb_zlib_compression as isize,
4284    Bz2 = ffi::rocksdb_bz2_compression as isize,
4285    Lz4 = ffi::rocksdb_lz4_compression as isize,
4286    Lz4hc = ffi::rocksdb_lz4hc_compression as isize,
4287    Zstd = ffi::rocksdb_zstd_compression as isize,
4288}
4289
4290#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4291#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4292pub enum DBCompactionStyle {
4293    Level = ffi::rocksdb_level_compaction as isize,
4294    Universal = ffi::rocksdb_universal_compaction as isize,
4295    Fifo = ffi::rocksdb_fifo_compaction as isize,
4296}
4297
4298#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4299#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4300pub enum DBRecoveryMode {
4301    TolerateCorruptedTailRecords = ffi::rocksdb_tolerate_corrupted_tail_records_recovery as isize,
4302    AbsoluteConsistency = ffi::rocksdb_absolute_consistency_recovery as isize,
4303    PointInTime = ffi::rocksdb_point_in_time_recovery as isize,
4304    SkipAnyCorruptedRecord = ffi::rocksdb_skip_any_corrupted_records_recovery as isize,
4305}
4306
4307pub struct FifoCompactOptions {
4308    pub(crate) inner: *mut ffi::rocksdb_fifo_compaction_options_t,
4309}
4310
4311impl Default for FifoCompactOptions {
4312    fn default() -> Self {
4313        let opts = unsafe { ffi::rocksdb_fifo_compaction_options_create() };
4314        assert!(
4315            !opts.is_null(),
4316            "Could not create RocksDB Fifo Compaction Options"
4317        );
4318
4319        Self { inner: opts }
4320    }
4321}
4322
4323impl Drop for FifoCompactOptions {
4324    fn drop(&mut self) {
4325        unsafe {
4326            ffi::rocksdb_fifo_compaction_options_destroy(self.inner);
4327        }
4328    }
4329}
4330
4331impl FifoCompactOptions {
4332    /// Sets the max table file size.
4333    ///
4334    /// Once the total sum of table files reaches this, we will delete the oldest
4335    /// table file
4336    ///
4337    /// Default: 1GB
4338    pub fn set_max_table_files_size(&mut self, nbytes: u64) {
4339        unsafe {
4340            ffi::rocksdb_fifo_compaction_options_set_max_table_files_size(self.inner, nbytes);
4341        }
4342    }
4343}
4344
4345#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4346#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4347pub enum UniversalCompactionStopStyle {
4348    Similar = ffi::rocksdb_similar_size_compaction_stop_style as isize,
4349    Total = ffi::rocksdb_total_size_compaction_stop_style as isize,
4350}
4351
4352pub struct UniversalCompactOptions {
4353    pub(crate) inner: *mut ffi::rocksdb_universal_compaction_options_t,
4354}
4355
4356impl Default for UniversalCompactOptions {
4357    fn default() -> Self {
4358        let opts = unsafe { ffi::rocksdb_universal_compaction_options_create() };
4359        assert!(
4360            !opts.is_null(),
4361            "Could not create RocksDB Universal Compaction Options"
4362        );
4363
4364        Self { inner: opts }
4365    }
4366}
4367
4368impl Drop for UniversalCompactOptions {
4369    fn drop(&mut self) {
4370        unsafe {
4371            ffi::rocksdb_universal_compaction_options_destroy(self.inner);
4372        }
4373    }
4374}
4375
4376impl UniversalCompactOptions {
4377    /// Sets the percentage flexibility while comparing file size.
4378    /// If the candidate file(s) size is 1% smaller than the next file's size,
4379    /// then include next file into this candidate set.
4380    ///
4381    /// Default: 1
4382    pub fn set_size_ratio(&mut self, ratio: c_int) {
4383        unsafe {
4384            ffi::rocksdb_universal_compaction_options_set_size_ratio(self.inner, ratio);
4385        }
4386    }
4387
4388    /// Sets the minimum number of files in a single compaction run.
4389    ///
4390    /// Default: 2
4391    pub fn set_min_merge_width(&mut self, num: c_int) {
4392        unsafe {
4393            ffi::rocksdb_universal_compaction_options_set_min_merge_width(self.inner, num);
4394        }
4395    }
4396
4397    /// Sets the maximum number of files in a single compaction run.
4398    ///
4399    /// Default: UINT_MAX
4400    pub fn set_max_merge_width(&mut self, num: c_int) {
4401        unsafe {
4402            ffi::rocksdb_universal_compaction_options_set_max_merge_width(self.inner, num);
4403        }
4404    }
4405
4406    /// sets the size amplification.
4407    ///
4408    /// It is defined as the amount (in percentage) of
4409    /// additional storage needed to store a single byte of data in the database.
4410    /// For example, a size amplification of 2% means that a database that
4411    /// contains 100 bytes of user-data may occupy upto 102 bytes of
4412    /// physical storage. By this definition, a fully compacted database has
4413    /// a size amplification of 0%. Rocksdb uses the following heuristic
4414    /// to calculate size amplification: it assumes that all files excluding
4415    /// the earliest file contribute to the size amplification.
4416    ///
4417    /// Default: 200, which means that a 100 byte database could require upto 300 bytes of storage.
4418    pub fn set_max_size_amplification_percent(&mut self, v: c_int) {
4419        unsafe {
4420            ffi::rocksdb_universal_compaction_options_set_max_size_amplification_percent(
4421                self.inner, v,
4422            );
4423        }
4424    }
4425
4426    /// Sets the percentage of compression size.
4427    ///
4428    /// If this option is set to be -1, all the output files
4429    /// will follow compression type specified.
4430    ///
4431    /// If this option is not negative, we will try to make sure compressed
4432    /// size is just above this value. In normal cases, at least this percentage
4433    /// of data will be compressed.
4434    /// When we are compacting to a new file, here is the criteria whether
4435    /// it needs to be compressed: assuming here are the list of files sorted
4436    /// by generation time:
4437    ///    A1...An B1...Bm C1...Ct
4438    /// where A1 is the newest and Ct is the oldest, and we are going to compact
4439    /// B1...Bm, we calculate the total size of all the files as total_size, as
4440    /// well as  the total size of C1...Ct as total_C, the compaction output file
4441    /// will be compressed iff
4442    ///   total_C / total_size < this percentage
4443    ///
4444    /// Default: -1
4445    pub fn set_compression_size_percent(&mut self, v: c_int) {
4446        unsafe {
4447            ffi::rocksdb_universal_compaction_options_set_compression_size_percent(self.inner, v);
4448        }
4449    }
4450
4451    /// Sets the algorithm used to stop picking files into a single compaction run.
4452    ///
4453    /// Default: ::Total
4454    pub fn set_stop_style(&mut self, style: UniversalCompactionStopStyle) {
4455        unsafe {
4456            ffi::rocksdb_universal_compaction_options_set_stop_style(self.inner, style as c_int);
4457        }
4458    }
4459}
4460
4461#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4462#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4463#[repr(u8)]
4464pub enum BottommostLevelCompaction {
4465    /// Skip bottommost level compaction
4466    Skip = 0,
4467    /// Only compact bottommost level if there is a compaction filter
4468    /// This is the default option
4469    IfHaveCompactionFilter,
4470    /// Always compact bottommost level
4471    Force,
4472    /// Always compact bottommost level but in bottommost level avoid
4473    /// double-compacting files created in the same compaction
4474    ForceOptimized,
4475}
4476
4477pub struct CompactOptions {
4478    pub(crate) inner: *mut ffi::rocksdb_compactoptions_t,
4479    full_history_ts_low: Option<Vec<u8>>,
4480}
4481
4482impl Default for CompactOptions {
4483    fn default() -> Self {
4484        let opts = unsafe { ffi::rocksdb_compactoptions_create() };
4485        assert!(!opts.is_null(), "Could not create RocksDB Compact Options");
4486
4487        Self {
4488            inner: opts,
4489            full_history_ts_low: None,
4490        }
4491    }
4492}
4493
4494impl Drop for CompactOptions {
4495    fn drop(&mut self) {
4496        unsafe {
4497            ffi::rocksdb_compactoptions_destroy(self.inner);
4498        }
4499    }
4500}
4501
4502impl CompactOptions {
4503    /// If more than one thread calls manual compaction,
4504    /// only one will actually schedule it while the other threads will simply wait
4505    /// for the scheduled manual compaction to complete. If exclusive_manual_compaction
4506    /// is set to true, the call will disable scheduling of automatic compaction jobs
4507    /// and wait for existing automatic compaction jobs to finish.
4508    pub fn set_exclusive_manual_compaction(&mut self, v: bool) {
4509        unsafe {
4510            ffi::rocksdb_compactoptions_set_exclusive_manual_compaction(
4511                self.inner,
4512                c_uchar::from(v),
4513            );
4514        }
4515    }
4516
4517    /// Sets bottommost level compaction.
4518    pub fn set_bottommost_level_compaction(&mut self, lvl: BottommostLevelCompaction) {
4519        unsafe {
4520            ffi::rocksdb_compactoptions_set_bottommost_level_compaction(self.inner, lvl as c_uchar);
4521        }
4522    }
4523
4524    /// If true, compacted files will be moved to the minimum level capable
4525    /// of holding the data or given level (specified non-negative target_level).
4526    pub fn set_change_level(&mut self, v: bool) {
4527        unsafe {
4528            ffi::rocksdb_compactoptions_set_change_level(self.inner, c_uchar::from(v));
4529        }
4530    }
4531
4532    /// If change_level is true and target_level have non-negative value, compacted
4533    /// files will be moved to target_level.
4534    pub fn set_target_level(&mut self, lvl: c_int) {
4535        unsafe {
4536            ffi::rocksdb_compactoptions_set_target_level(self.inner, lvl);
4537        }
4538    }
4539
4540    /// Set user-defined timestamp low bound, the data with older timestamp than
4541    /// low bound maybe GCed by compaction. Default: nullptr
4542    pub fn set_full_history_ts_low<S: Into<Vec<u8>>>(&mut self, ts: S) {
4543        self.set_full_history_ts_low_impl(Some(ts.into()));
4544    }
4545
4546    fn set_full_history_ts_low_impl(&mut self, ts: Option<Vec<u8>>) {
4547        let (ptr, len) = if let Some(ref ts) = ts {
4548            (ts.as_ptr() as *mut c_char, ts.len())
4549        } else if self.full_history_ts_low.is_some() {
4550            (std::ptr::null::<Vec<u8>>() as *mut c_char, 0)
4551        } else {
4552            return;
4553        };
4554        self.full_history_ts_low = ts;
4555        unsafe {
4556            ffi::rocksdb_compactoptions_set_full_history_ts_low(self.inner, ptr, len);
4557        }
4558    }
4559}
4560
4561pub struct WaitForCompactOptions {
4562    pub(crate) inner: *mut ffi::rocksdb_wait_for_compact_options_t,
4563}
4564
4565impl Default for WaitForCompactOptions {
4566    fn default() -> Self {
4567        let opts = unsafe { ffi::rocksdb_wait_for_compact_options_create() };
4568        assert!(
4569            !opts.is_null(),
4570            "Could not create RocksDB Wait For Compact Options"
4571        );
4572
4573        Self { inner: opts }
4574    }
4575}
4576
4577impl Drop for WaitForCompactOptions {
4578    fn drop(&mut self) {
4579        unsafe {
4580            ffi::rocksdb_wait_for_compact_options_destroy(self.inner);
4581        }
4582    }
4583}
4584
4585impl WaitForCompactOptions {
4586    /// If true, abort waiting if background jobs are paused. If false,
4587    /// ContinueBackgroundWork() must be called to resume the background jobs.
4588    /// Otherwise, jobs that were queued, but not scheduled yet may never finish
4589    /// and WaitForCompact() may wait indefinitely (if timeout is set, it will
4590    /// abort after the timeout).
4591    ///
4592    /// Default: false
4593    pub fn set_abort_on_pause(&mut self, v: bool) {
4594        unsafe {
4595            ffi::rocksdb_wait_for_compact_options_set_abort_on_pause(self.inner, c_uchar::from(v));
4596        }
4597    }
4598
4599    /// If true, flush all column families before starting to wait.
4600    ///
4601    /// Default: false
4602    pub fn set_flush(&mut self, v: bool) {
4603        unsafe {
4604            ffi::rocksdb_wait_for_compact_options_set_flush(self.inner, c_uchar::from(v));
4605        }
4606    }
4607
4608    /// Timeout in microseconds for waiting for compaction to complete.
4609    /// when timeout == 0, WaitForCompact() will wait as long as there's background
4610    /// work to finish.
4611    ///
4612    /// Default: 0
4613    pub fn set_timeout(&mut self, microseconds: u64) {
4614        unsafe {
4615            ffi::rocksdb_wait_for_compact_options_set_timeout(self.inner, microseconds);
4616        }
4617    }
4618}
4619
4620/// Represents a path where sst files can be put into
4621pub struct DBPath {
4622    pub(crate) inner: *mut ffi::rocksdb_dbpath_t,
4623}
4624
4625impl DBPath {
4626    /// Create a new path
4627    pub fn new<P: AsRef<Path>>(path: P, target_size: u64) -> Result<Self, Error> {
4628        let p = to_cpath(path.as_ref()).unwrap();
4629        let dbpath = unsafe { ffi::rocksdb_dbpath_create(p.as_ptr(), target_size) };
4630        if dbpath.is_null() {
4631            Err(Error::new(format!(
4632                "Could not create path for storing sst files at location: {}",
4633                path.as_ref().display()
4634            )))
4635        } else {
4636            Ok(DBPath { inner: dbpath })
4637        }
4638    }
4639}
4640
4641impl Drop for DBPath {
4642    fn drop(&mut self) {
4643        unsafe {
4644            ffi::rocksdb_dbpath_destroy(self.inner);
4645        }
4646    }
4647}
4648
4649#[cfg(test)]
4650mod tests {
4651    use crate::db_options::WriteBufferManager;
4652    use crate::{Cache, CompactionPri, MemtableFactory, Options};
4653
4654    #[test]
4655    fn test_enable_statistics() {
4656        let mut opts = Options::default();
4657        opts.enable_statistics();
4658        opts.set_stats_dump_period_sec(60);
4659        assert!(opts.get_statistics().is_some());
4660
4661        let opts = Options::default();
4662        assert!(opts.get_statistics().is_none());
4663    }
4664
4665    #[test]
4666    fn test_set_memtable_factory() {
4667        let mut opts = Options::default();
4668        opts.set_memtable_factory(MemtableFactory::Vector);
4669        opts.set_memtable_factory(MemtableFactory::HashLinkList { bucket_count: 100 });
4670        opts.set_memtable_factory(MemtableFactory::HashSkipList {
4671            bucket_count: 100,
4672            height: 4,
4673            branching_factor: 4,
4674        });
4675    }
4676
4677    #[test]
4678    fn test_set_stats_persist_period_sec() {
4679        let mut opts = Options::default();
4680        opts.enable_statistics();
4681        opts.set_stats_persist_period_sec(5);
4682        assert!(opts.get_statistics().is_some());
4683
4684        let opts = Options::default();
4685        assert!(opts.get_statistics().is_none());
4686    }
4687
4688    #[test]
4689    fn test_set_write_buffer_manager() {
4690        let mut opts = Options::default();
4691        let lrucache = Cache::new_lru_cache(100);
4692        let write_buffer_manager =
4693            WriteBufferManager::new_write_buffer_manager_with_cache(100, false, lrucache);
4694        assert_eq!(write_buffer_manager.get_buffer_size(), 100);
4695        assert_eq!(write_buffer_manager.get_usage(), 0);
4696        assert!(write_buffer_manager.enabled());
4697
4698        opts.set_write_buffer_manager(&write_buffer_manager);
4699        drop(opts);
4700
4701        // WriteBufferManager outlives options
4702        assert!(write_buffer_manager.enabled());
4703    }
4704
4705    #[test]
4706    fn compaction_pri() {
4707        let mut opts = Options::default();
4708        opts.set_compaction_pri(CompactionPri::RoundRobin);
4709        opts.create_if_missing(true);
4710        let tmp = tempfile::tempdir().unwrap();
4711        let _db = crate::DB::open(&opts, tmp.path()).unwrap();
4712
4713        let options = std::fs::read_dir(tmp.path())
4714            .unwrap()
4715            .find_map(|x| {
4716                let x = x.ok()?;
4717                x.file_name()
4718                    .into_string()
4719                    .unwrap()
4720                    .contains("OPTIONS")
4721                    .then_some(x.path())
4722            })
4723            .map(std::fs::read_to_string)
4724            .unwrap()
4725            .unwrap();
4726
4727        assert!(options.contains("compaction_pri=kRoundRobin"));
4728    }
4729}