rust_rocksdb/
db_options.rs

1// Copyright 2020 Tyler Neely
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::ffi::CStr;
16use std::panic::{catch_unwind, AssertUnwindSafe, RefUnwindSafe};
17use std::path::Path;
18use std::ptr::null_mut;
19use std::slice;
20
21use libc::{self, c_char, c_double, c_int, c_uchar, c_uint, c_void, size_t};
22
23use crate::cache::Cache;
24use crate::column_family::ColumnFamilyTtl;
25use crate::event_listener::{new_event_listener, EventListener};
26use crate::statistics::{Histogram, HistogramData, StatsLevel};
27use crate::write_buffer_manager::WriteBufferManager;
28use crate::{
29    compaction_filter::{self, CompactionFilterCallback, CompactionFilterFn},
30    compaction_filter_factory::{self, CompactionFilterFactory},
31    comparator::{
32        ComparatorCallback, ComparatorWithTsCallback, CompareFn, CompareTsFn, CompareWithoutTsFn,
33    },
34    db::DBAccess,
35    env::Env,
36    ffi,
37    ffi_util::{from_cstr, to_cpath, CStrLike},
38    merge_operator::{
39        self, full_merge_callback, partial_merge_callback, MergeFn, MergeOperatorCallback,
40    },
41    slice_transform::SliceTransform,
42    statistics::Ticker,
43    ColumnFamilyDescriptor, Error, SnapshotWithThreadMode,
44};
45
46#[derive(Default)]
47pub(crate) struct OptionsMustOutliveDB {
48    env: Option<Env>,
49    row_cache: Option<Cache>,
50    blob_cache: Option<Cache>,
51    block_based: Option<BlockBasedOptionsMustOutliveDB>,
52    write_buffer_manager: Option<WriteBufferManager>,
53}
54
55impl OptionsMustOutliveDB {
56    pub(crate) fn clone(&self) -> Self {
57        Self {
58            env: self.env.clone(),
59            row_cache: self.row_cache.clone(),
60            blob_cache: self.blob_cache.clone(),
61            block_based: self
62                .block_based
63                .as_ref()
64                .map(BlockBasedOptionsMustOutliveDB::clone),
65            write_buffer_manager: self.write_buffer_manager.clone(),
66        }
67    }
68}
69
70#[derive(Default)]
71struct BlockBasedOptionsMustOutliveDB {
72    block_cache: Option<Cache>,
73}
74
75impl BlockBasedOptionsMustOutliveDB {
76    fn clone(&self) -> Self {
77        Self {
78            block_cache: self.block_cache.clone(),
79        }
80    }
81}
82
83/// Database-wide options around performance and behavior.
84///
85/// Please read the official tuning [guide](https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide)
86/// and most importantly, measure performance under realistic workloads with realistic hardware.
87///
88/// # Examples
89///
90/// ```
91/// use rust_rocksdb::{Options, DB};
92/// use rust_rocksdb::DBCompactionStyle;
93///
94/// fn badly_tuned_for_somebody_elses_disk() -> DB {
95///    let path = "path/for/rocksdb/storageX";
96///    let mut opts = Options::default();
97///    opts.create_if_missing(true);
98///    opts.set_max_open_files(10000);
99///    opts.set_use_fsync(false);
100///    opts.set_bytes_per_sync(8388608);
101///    opts.optimize_for_point_lookup(1024);
102///    opts.set_table_cache_num_shard_bits(6);
103///    opts.set_max_write_buffer_number(32);
104///    opts.set_write_buffer_size(536870912);
105///    opts.set_target_file_size_base(1073741824);
106///    opts.set_min_write_buffer_number_to_merge(4);
107///    opts.set_level_zero_stop_writes_trigger(2000);
108///    opts.set_level_zero_slowdown_writes_trigger(0);
109///    opts.set_compaction_style(DBCompactionStyle::Universal);
110///    opts.set_disable_auto_compactions(true);
111///
112///    DB::open(&opts, path).unwrap()
113/// }
114/// ```
115pub struct Options {
116    pub(crate) inner: *mut ffi::rocksdb_options_t,
117    pub(crate) outlive: OptionsMustOutliveDB,
118}
119
120/// Optionally disable WAL or sync for this write.
121///
122/// # Examples
123///
124/// Making an unsafe write of a batch:
125///
126/// ```
127/// use rust_rocksdb::{DB, Options, WriteBatch, WriteOptions};
128///
129/// let tempdir = tempfile::Builder::new()
130///     .prefix("_path_for_rocksdb_storageY1")
131///     .tempdir()
132///     .expect("Failed to create temporary path for the _path_for_rocksdb_storageY1");
133/// let path = tempdir.path();
134/// {
135///     let db = DB::open_default(path).unwrap();
136///     let mut batch = WriteBatch::default();
137///     batch.put(b"my key", b"my value");
138///     batch.put(b"key2", b"value2");
139///     batch.put(b"key3", b"value3");
140///
141///     let mut write_options = WriteOptions::default();
142///     write_options.set_sync(false);
143///     write_options.disable_wal(true);
144///
145///     db.write_opt(&batch, &write_options);
146/// }
147/// let _ = DB::destroy(&Options::default(), path);
148/// ```
149pub struct WriteOptions {
150    pub(crate) inner: *mut ffi::rocksdb_writeoptions_t,
151}
152
153pub struct LruCacheOptions {
154    pub(crate) inner: *mut ffi::rocksdb_lru_cache_options_t,
155}
156
157/// Optionally wait for the memtable flush to be performed.
158///
159/// # Examples
160///
161/// Manually flushing the memtable:
162///
163/// ```
164/// use rust_rocksdb::{DB, Options, FlushOptions};
165///
166/// let tempdir = tempfile::Builder::new()
167///     .prefix("_path_for_rocksdb_storageY2")
168///     .tempdir()
169///     .expect("Failed to create temporary path for the _path_for_rocksdb_storageY2");
170/// let path = tempdir.path();
171/// {
172///     let db = DB::open_default(path).unwrap();
173///
174///     let mut flush_options = FlushOptions::default();
175///     flush_options.set_wait(true);
176///
177///     db.flush_opt(&flush_options);
178/// }
179/// let _ = DB::destroy(&Options::default(), path);
180/// ```
181pub struct FlushOptions {
182    pub(crate) inner: *mut ffi::rocksdb_flushoptions_t,
183}
184
185/// For configuring block-based file storage.
186pub struct BlockBasedOptions {
187    pub(crate) inner: *mut ffi::rocksdb_block_based_table_options_t,
188    outlive: BlockBasedOptionsMustOutliveDB,
189}
190
191pub struct ReadOptions {
192    pub(crate) inner: *mut ffi::rocksdb_readoptions_t,
193    // The `ReadOptions` owns a copy of the timestamp and iteration bounds.
194    // This is necessary to ensure the pointers we pass over the FFI live as
195    // long as the `ReadOptions`. This way, when performing the read operation,
196    // the pointers are guaranteed to be valid.
197    timestamp: Option<Vec<u8>>,
198    iter_start_ts: Option<Vec<u8>>,
199    iterate_upper_bound: Option<Vec<u8>>,
200    iterate_lower_bound: Option<Vec<u8>>,
201}
202
203/// Configuration of cuckoo-based storage.
204pub struct CuckooTableOptions {
205    pub(crate) inner: *mut ffi::rocksdb_cuckoo_table_options_t,
206}
207
208/// For configuring external files ingestion.
209///
210/// # Examples
211///
212/// Move files instead of copying them:
213///
214/// ```
215/// use rust_rocksdb::{DB, IngestExternalFileOptions, SstFileWriter, Options};
216///
217/// let writer_opts = Options::default();
218/// let mut writer = SstFileWriter::create(&writer_opts);
219/// let tempdir = tempfile::Builder::new()
220///     .tempdir()
221///     .expect("Failed to create temporary folder for the _path_for_sst_file");
222/// let path1 = tempdir.path().join("_path_for_sst_file");
223/// writer.open(path1.clone()).unwrap();
224/// writer.put(b"k1", b"v1").unwrap();
225/// writer.finish().unwrap();
226///
227/// let tempdir2 = tempfile::Builder::new()
228///     .prefix("_path_for_rocksdb_storageY3")
229///     .tempdir()
230///     .expect("Failed to create temporary path for the _path_for_rocksdb_storageY3");
231/// let path2 = tempdir2.path();
232/// {
233///   let db = DB::open_default(&path2).unwrap();
234///   let mut ingest_opts = IngestExternalFileOptions::default();
235///   ingest_opts.set_move_files(true);
236///   db.ingest_external_file_opts(&ingest_opts, vec![path1]).unwrap();
237/// }
238/// let _ = DB::destroy(&Options::default(), path2);
239/// ```
240pub struct IngestExternalFileOptions {
241    pub(crate) inner: *mut ffi::rocksdb_ingestexternalfileoptions_t,
242}
243
244// Safety note: auto-implementing Send on most db-related types is prevented by the inner FFI
245// pointer. In most cases, however, this pointer is Send-safe because it is never aliased and
246// rocksdb internally does not rely on thread-local information for its user-exposed types.
247unsafe impl Send for Options {}
248unsafe impl Send for WriteOptions {}
249unsafe impl Send for LruCacheOptions {}
250unsafe impl Send for FlushOptions {}
251unsafe impl Send for BlockBasedOptions {}
252unsafe impl Send for CuckooTableOptions {}
253unsafe impl Send for ReadOptions {}
254unsafe impl Send for IngestExternalFileOptions {}
255unsafe impl Send for CompactOptions {}
256
257// Sync is similarly safe for many types because they do not expose interior mutability, and their
258// use within the rocksdb library is generally behind a const reference
259unsafe impl Sync for Options {}
260unsafe impl Sync for WriteOptions {}
261unsafe impl Sync for LruCacheOptions {}
262unsafe impl Sync for FlushOptions {}
263unsafe impl Sync for BlockBasedOptions {}
264unsafe impl Sync for CuckooTableOptions {}
265unsafe impl Sync for ReadOptions {}
266unsafe impl Sync for IngestExternalFileOptions {}
267unsafe impl Sync for CompactOptions {}
268
269impl Drop for Options {
270    fn drop(&mut self) {
271        unsafe {
272            ffi::rocksdb_options_destroy(self.inner);
273        }
274    }
275}
276
277impl Clone for Options {
278    fn clone(&self) -> Self {
279        let inner = unsafe { ffi::rocksdb_options_create_copy(self.inner) };
280        assert!(!inner.is_null(), "Could not copy RocksDB options");
281
282        Self {
283            inner,
284            outlive: self.outlive.clone(),
285        }
286    }
287}
288
289impl Drop for BlockBasedOptions {
290    fn drop(&mut self) {
291        unsafe {
292            ffi::rocksdb_block_based_options_destroy(self.inner);
293        }
294    }
295}
296
297impl Drop for CuckooTableOptions {
298    fn drop(&mut self) {
299        unsafe {
300            ffi::rocksdb_cuckoo_options_destroy(self.inner);
301        }
302    }
303}
304
305impl Drop for FlushOptions {
306    fn drop(&mut self) {
307        unsafe {
308            ffi::rocksdb_flushoptions_destroy(self.inner);
309        }
310    }
311}
312
313impl Drop for WriteOptions {
314    fn drop(&mut self) {
315        unsafe {
316            ffi::rocksdb_writeoptions_destroy(self.inner);
317        }
318    }
319}
320
321impl Drop for LruCacheOptions {
322    fn drop(&mut self) {
323        unsafe {
324            ffi::rocksdb_lru_cache_options_destroy(self.inner);
325        }
326    }
327}
328
329impl Drop for ReadOptions {
330    fn drop(&mut self) {
331        unsafe {
332            ffi::rocksdb_readoptions_destroy(self.inner);
333        }
334    }
335}
336
337impl Drop for IngestExternalFileOptions {
338    fn drop(&mut self) {
339        unsafe {
340            ffi::rocksdb_ingestexternalfileoptions_destroy(self.inner);
341        }
342    }
343}
344
345impl BlockBasedOptions {
346    /// Approximate size of user data packed per block. Note that the
347    /// block size specified here corresponds to uncompressed data. The
348    /// actual size of the unit read from disk may be smaller if
349    /// compression is enabled. This parameter can be changed dynamically.
350    pub fn set_block_size(&mut self, size: usize) {
351        unsafe {
352            ffi::rocksdb_block_based_options_set_block_size(self.inner, size);
353        }
354    }
355
356    /// Block size for partitioned metadata. Currently applied to indexes when
357    /// kTwoLevelIndexSearch is used and to filters when partition_filters is used.
358    /// Note: Since in the current implementation the filters and index partitions
359    /// are aligned, an index/filter block is created when either index or filter
360    /// block size reaches the specified limit.
361    ///
362    /// Note: this limit is currently applied to only index blocks; a filter
363    /// partition is cut right after an index block is cut.
364    pub fn set_metadata_block_size(&mut self, size: usize) {
365        unsafe {
366            ffi::rocksdb_block_based_options_set_metadata_block_size(self.inner, size as u64);
367        }
368    }
369
370    /// Note: currently this option requires kTwoLevelIndexSearch to be set as
371    /// well.
372    ///
373    /// Use partitioned full filters for each SST file. This option is
374    /// incompatible with block-based filters.
375    pub fn set_partition_filters(&mut self, size: bool) {
376        unsafe {
377            ffi::rocksdb_block_based_options_set_partition_filters(self.inner, c_uchar::from(size));
378        }
379    }
380
381    /// Sets global cache for blocks (user data is stored in a set of blocks, and
382    /// a block is the unit of reading from disk).
383    ///
384    /// If set, use the specified cache for blocks.
385    /// By default, rocksdb will automatically create and use an 8MB internal cache.
386    pub fn set_block_cache(&mut self, cache: &Cache) {
387        unsafe {
388            ffi::rocksdb_block_based_options_set_block_cache(self.inner, cache.0.inner.as_ptr());
389        }
390        self.outlive.block_cache = Some(cache.clone());
391    }
392
393    /// Disable block cache
394    pub fn disable_cache(&mut self) {
395        unsafe {
396            ffi::rocksdb_block_based_options_set_no_block_cache(self.inner, c_uchar::from(true));
397        }
398    }
399
400    /// Sets a [Bloom filter](https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter)
401    /// policy to reduce disk reads.
402    ///
403    /// # Examples
404    ///
405    /// ```
406    /// use rust_rocksdb::BlockBasedOptions;
407    ///
408    /// let mut opts = BlockBasedOptions::default();
409    /// opts.set_bloom_filter(10.0, true);
410    /// ```
411    pub fn set_bloom_filter(&mut self, bits_per_key: c_double, block_based: bool) {
412        unsafe {
413            let bloom = if block_based {
414                ffi::rocksdb_filterpolicy_create_bloom(bits_per_key as _)
415            } else {
416                ffi::rocksdb_filterpolicy_create_bloom_full(bits_per_key as _)
417            };
418
419            ffi::rocksdb_block_based_options_set_filter_policy(self.inner, bloom);
420        }
421    }
422
423    /// Sets a [Ribbon filter](http://rocksdb.org/blog/2021/12/29/ribbon-filter.html)
424    /// policy to reduce disk reads.
425    ///
426    /// Ribbon filters use less memory in exchange for slightly more CPU usage
427    /// compared to an equivalent bloom filter.
428    ///
429    /// # Examples
430    ///
431    /// ```
432    /// use rust_rocksdb::BlockBasedOptions;
433    ///
434    /// let mut opts = BlockBasedOptions::default();
435    /// opts.set_ribbon_filter(10.0);
436    /// ```
437    pub fn set_ribbon_filter(&mut self, bloom_equivalent_bits_per_key: c_double) {
438        unsafe {
439            let ribbon = ffi::rocksdb_filterpolicy_create_ribbon(bloom_equivalent_bits_per_key);
440            ffi::rocksdb_block_based_options_set_filter_policy(self.inner, ribbon);
441        }
442    }
443
444    /// Sets a hybrid [Ribbon filter](http://rocksdb.org/blog/2021/12/29/ribbon-filter.html)
445    /// policy to reduce disk reads.
446    ///
447    /// Uses Bloom filters before the given level, and Ribbon filters for all
448    /// other levels. This combines the memory savings from Ribbon filters
449    /// with the lower CPU usage of Bloom filters.
450    ///
451    /// # Examples
452    ///
453    /// ```
454    /// use rust_rocksdb::BlockBasedOptions;
455    ///
456    /// let mut opts = BlockBasedOptions::default();
457    /// opts.set_hybrid_ribbon_filter(10.0, 2);
458    /// ```
459    pub fn set_hybrid_ribbon_filter(
460        &mut self,
461        bloom_equivalent_bits_per_key: c_double,
462        bloom_before_level: c_int,
463    ) {
464        unsafe {
465            let ribbon = ffi::rocksdb_filterpolicy_create_ribbon_hybrid(
466                bloom_equivalent_bits_per_key,
467                bloom_before_level,
468            );
469            ffi::rocksdb_block_based_options_set_filter_policy(self.inner, ribbon);
470        }
471    }
472
473    /// If cache_index_and_filter_blocks is enabled, cache index and filter blocks with high priority.
474    /// If set to true, depending on implementation of block cache,
475    /// index and filter blocks may be less likely to be evicted than data blocks.
476    pub fn set_cache_index_and_filter_blocks(&mut self, v: bool) {
477        unsafe {
478            ffi::rocksdb_block_based_options_set_cache_index_and_filter_blocks(
479                self.inner,
480                c_uchar::from(v),
481            );
482        }
483    }
484
485    /// Defines the index type to be used for SS-table lookups.
486    ///
487    /// # Examples
488    ///
489    /// ```
490    /// use rust_rocksdb::{BlockBasedOptions, BlockBasedIndexType, Options};
491    ///
492    /// let mut opts = Options::default();
493    /// let mut block_opts = BlockBasedOptions::default();
494    /// block_opts.set_index_type(BlockBasedIndexType::HashSearch);
495    /// ```
496    pub fn set_index_type(&mut self, index_type: BlockBasedIndexType) {
497        let index = index_type as i32;
498        unsafe {
499            ffi::rocksdb_block_based_options_set_index_type(self.inner, index);
500        }
501    }
502
503    /// If cache_index_and_filter_blocks is true and the below is true, then
504    /// filter and index blocks are stored in the cache, but a reference is
505    /// held in the "table reader" object so the blocks are pinned and only
506    /// evicted from cache when the table reader is freed.
507    ///
508    /// Default: false.
509    pub fn set_pin_l0_filter_and_index_blocks_in_cache(&mut self, v: bool) {
510        unsafe {
511            ffi::rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache(
512                self.inner,
513                c_uchar::from(v),
514            );
515        }
516    }
517
518    /// If cache_index_and_filter_blocks is true and the below is true, then
519    /// the top-level index of partitioned filter and index blocks are stored in
520    /// the cache, but a reference is held in the "table reader" object so the
521    /// blocks are pinned and only evicted from cache when the table reader is
522    /// freed. This is not limited to l0 in LSM tree.
523    ///
524    /// Default: false.
525    pub fn set_pin_top_level_index_and_filter(&mut self, v: bool) {
526        unsafe {
527            ffi::rocksdb_block_based_options_set_pin_top_level_index_and_filter(
528                self.inner,
529                c_uchar::from(v),
530            );
531        }
532    }
533
534    /// Format version, reserved for backward compatibility.
535    ///
536    /// See full [list](https://github.com/facebook/rocksdb/blob/v8.6.7/include/rocksdb/table.h#L493-L521)
537    /// of the supported versions.
538    ///
539    /// Default: 6.
540    pub fn set_format_version(&mut self, version: i32) {
541        unsafe {
542            ffi::rocksdb_block_based_options_set_format_version(self.inner, version);
543        }
544    }
545
546    /// Use delta encoding to compress keys in blocks.
547    /// ReadOptions::pin_data requires this option to be disabled.
548    ///
549    /// Default: true
550    pub fn set_use_delta_encoding(&mut self, enable: bool) {
551        unsafe {
552            ffi::rocksdb_block_based_options_set_use_delta_encoding(
553                self.inner,
554                c_uchar::from(enable),
555            );
556        }
557    }
558
559    /// Number of keys between restart points for delta encoding of keys.
560    /// This parameter can be changed dynamically. Most clients should
561    /// leave this parameter alone. The minimum value allowed is 1. Any smaller
562    /// value will be silently overwritten with 1.
563    ///
564    /// Default: 16.
565    pub fn set_block_restart_interval(&mut self, interval: i32) {
566        unsafe {
567            ffi::rocksdb_block_based_options_set_block_restart_interval(self.inner, interval);
568        }
569    }
570
571    /// Same as block_restart_interval but used for the index block.
572    /// If you don't plan to run RocksDB before version 5.16 and you are
573    /// using `index_block_restart_interval` > 1, you should
574    /// probably set the `format_version` to >= 4 as it would reduce the index size.
575    ///
576    /// Default: 1.
577    pub fn set_index_block_restart_interval(&mut self, interval: i32) {
578        unsafe {
579            ffi::rocksdb_block_based_options_set_index_block_restart_interval(self.inner, interval);
580        }
581    }
582
583    /// Set the data block index type for point lookups:
584    ///  `DataBlockIndexType::BinarySearch` to use binary search within the data block.
585    ///  `DataBlockIndexType::BinaryAndHash` to use the data block hash index in combination with
586    ///  the normal binary search.
587    ///
588    /// The hash table utilization ratio is adjustable using [`set_data_block_hash_ratio`](#method.set_data_block_hash_ratio), which is
589    /// valid only when using `DataBlockIndexType::BinaryAndHash`.
590    ///
591    /// Default: `BinarySearch`
592    /// # Examples
593    ///
594    /// ```
595    /// use rust_rocksdb::{BlockBasedOptions, DataBlockIndexType, Options};
596    ///
597    /// let mut opts = Options::default();
598    /// let mut block_opts = BlockBasedOptions::default();
599    /// block_opts.set_data_block_index_type(DataBlockIndexType::BinaryAndHash);
600    /// block_opts.set_data_block_hash_ratio(0.85);
601    /// ```
602    pub fn set_data_block_index_type(&mut self, index_type: DataBlockIndexType) {
603        let index_t = index_type as i32;
604        unsafe {
605            ffi::rocksdb_block_based_options_set_data_block_index_type(self.inner, index_t);
606        }
607    }
608
609    /// Set the data block hash index utilization ratio.
610    ///
611    /// The smaller the utilization ratio, the less hash collisions happen, and so reduce the risk for a
612    /// point lookup to fall back to binary search due to the collisions. A small ratio means faster
613    /// lookup at the price of more space overhead.
614    ///
615    /// Default: 0.75
616    pub fn set_data_block_hash_ratio(&mut self, ratio: f64) {
617        unsafe {
618            ffi::rocksdb_block_based_options_set_data_block_hash_ratio(self.inner, ratio);
619        }
620    }
621
622    /// If false, place only prefixes in the filter, not whole keys.
623    ///
624    /// Defaults to true.
625    pub fn set_whole_key_filtering(&mut self, v: bool) {
626        unsafe {
627            ffi::rocksdb_block_based_options_set_whole_key_filtering(self.inner, c_uchar::from(v));
628        }
629    }
630
631    /// Use the specified checksum type.
632    /// Newly created table files will be protected with this checksum type.
633    /// Old table files will still be readable, even though they have different checksum type.
634    pub fn set_checksum_type(&mut self, checksum_type: ChecksumType) {
635        unsafe {
636            ffi::rocksdb_block_based_options_set_checksum(self.inner, checksum_type as c_char);
637        }
638    }
639
640    /// If true, generate Bloom/Ribbon filters that minimize memory internal
641    /// fragmentation.
642    /// See official [wiki](
643    /// https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter#reducing-internal-fragmentation)
644    /// for more information.
645    ///
646    /// Defaults to false.
647    /// # Examples
648    ///
649    /// ```
650    /// use rust_rocksdb::BlockBasedOptions;
651    ///
652    /// let mut opts = BlockBasedOptions::default();
653    /// opts.set_bloom_filter(10.0, true);
654    /// opts.set_optimize_filters_for_memory(true);
655    /// ```
656    pub fn set_optimize_filters_for_memory(&mut self, v: bool) {
657        unsafe {
658            ffi::rocksdb_block_based_options_set_optimize_filters_for_memory(
659                self.inner,
660                c_uchar::from(v),
661            );
662        }
663    }
664
665    /// The tier of block-based tables whose top-level index into metadata
666    /// partitions will be pinned. Currently indexes and filters may be
667    /// partitioned.
668    ///
669    /// Note `cache_index_and_filter_blocks` must be true for this option to have
670    /// any effect. Otherwise any top-level index into metadata partitions would be
671    /// held in table reader memory, outside the block cache.
672    ///
673    /// Default: `BlockBasedPinningTier:Fallback`
674    ///
675    /// # Example
676    ///
677    /// ```
678    /// use rust_rocksdb::{BlockBasedOptions, BlockBasedPinningTier, Options};
679    ///
680    /// let mut opts = Options::default();
681    /// let mut block_opts = BlockBasedOptions::default();
682    /// block_opts.set_top_level_index_pinning_tier(BlockBasedPinningTier::FlushAndSimilar);
683    /// ```
684    pub fn set_top_level_index_pinning_tier(&mut self, tier: BlockBasedPinningTier) {
685        unsafe {
686            ffi::rocksdb_block_based_options_set_top_level_index_pinning_tier(
687                self.inner,
688                tier as c_int,
689            );
690        }
691    }
692
693    /// The tier of block-based tables whose metadata partitions will be pinned.
694    /// Currently indexes and filters may be partitioned.
695    ///
696    /// Default: `BlockBasedPinningTier:Fallback`
697    ///
698    /// # Example
699    ///
700    /// ```
701    /// use rust_rocksdb::{BlockBasedOptions, BlockBasedPinningTier, Options};
702    ///
703    /// let mut opts = Options::default();
704    /// let mut block_opts = BlockBasedOptions::default();
705    /// block_opts.set_partition_pinning_tier(BlockBasedPinningTier::FlushAndSimilar);
706    /// ```
707    pub fn set_partition_pinning_tier(&mut self, tier: BlockBasedPinningTier) {
708        unsafe {
709            ffi::rocksdb_block_based_options_set_partition_pinning_tier(self.inner, tier as c_int);
710        }
711    }
712
713    /// The tier of block-based tables whose unpartitioned metadata blocks will be
714    /// pinned.
715    ///
716    /// Note `cache_index_and_filter_blocks` must be true for this option to have
717    /// any effect. Otherwise the unpartitioned meta-blocks would be held in table
718    /// reader memory, outside the block cache.
719    ///
720    /// Default: `BlockBasedPinningTier:Fallback`
721    ///
722    /// # Example
723    ///
724    /// ```
725    /// use rust_rocksdb::{BlockBasedOptions, BlockBasedPinningTier, Options};
726    ///
727    /// let mut opts = Options::default();
728    /// let mut block_opts = BlockBasedOptions::default();
729    /// block_opts.set_unpartitioned_pinning_tier(BlockBasedPinningTier::FlushAndSimilar);
730    /// ```
731    pub fn set_unpartitioned_pinning_tier(&mut self, tier: BlockBasedPinningTier) {
732        unsafe {
733            ffi::rocksdb_block_based_options_set_unpartitioned_pinning_tier(
734                self.inner,
735                tier as c_int,
736            );
737        }
738    }
739}
740
741impl Default for BlockBasedOptions {
742    fn default() -> Self {
743        let block_opts = unsafe { ffi::rocksdb_block_based_options_create() };
744        assert!(
745            !block_opts.is_null(),
746            "Could not create RocksDB block based options"
747        );
748
749        Self {
750            inner: block_opts,
751            outlive: BlockBasedOptionsMustOutliveDB::default(),
752        }
753    }
754}
755
756impl CuckooTableOptions {
757    /// Determines the utilization of hash tables. Smaller values
758    /// result in larger hash tables with fewer collisions.
759    /// Default: 0.9
760    pub fn set_hash_ratio(&mut self, ratio: f64) {
761        unsafe {
762            ffi::rocksdb_cuckoo_options_set_hash_ratio(self.inner, ratio);
763        }
764    }
765
766    /// A property used by builder to determine the depth to go to
767    /// to search for a path to displace elements in case of
768    /// collision. See Builder.MakeSpaceForKey method. Higher
769    /// values result in more efficient hash tables with fewer
770    /// lookups but take more time to build.
771    /// Default: 100
772    pub fn set_max_search_depth(&mut self, depth: u32) {
773        unsafe {
774            ffi::rocksdb_cuckoo_options_set_max_search_depth(self.inner, depth);
775        }
776    }
777
778    /// In case of collision while inserting, the builder
779    /// attempts to insert in the next cuckoo_block_size
780    /// locations before skipping over to the next Cuckoo hash
781    /// function. This makes lookups more cache friendly in case
782    /// of collisions.
783    /// Default: 5
784    pub fn set_cuckoo_block_size(&mut self, size: u32) {
785        unsafe {
786            ffi::rocksdb_cuckoo_options_set_cuckoo_block_size(self.inner, size);
787        }
788    }
789
790    /// If this option is enabled, user key is treated as uint64_t and its value
791    /// is used as hash value directly. This option changes builder's behavior.
792    /// Reader ignore this option and behave according to what specified in
793    /// table property.
794    /// Default: false
795    pub fn set_identity_as_first_hash(&mut self, flag: bool) {
796        unsafe {
797            ffi::rocksdb_cuckoo_options_set_identity_as_first_hash(self.inner, c_uchar::from(flag));
798        }
799    }
800
801    /// If this option is set to true, module is used during hash calculation.
802    /// This often yields better space efficiency at the cost of performance.
803    /// If this option is set to false, # of entries in table is constrained to
804    /// be power of two, and bit and is used to calculate hash, which is faster in general.
805    /// Default: true
806    pub fn set_use_module_hash(&mut self, flag: bool) {
807        unsafe {
808            ffi::rocksdb_cuckoo_options_set_use_module_hash(self.inner, c_uchar::from(flag));
809        }
810    }
811}
812
813impl Default for CuckooTableOptions {
814    fn default() -> Self {
815        let opts = unsafe { ffi::rocksdb_cuckoo_options_create() };
816        assert!(!opts.is_null(), "Could not create RocksDB cuckoo options");
817
818        Self { inner: opts }
819    }
820}
821
822// Verbosity of the LOG.
823#[derive(Debug, Copy, Clone, PartialEq, Eq)]
824#[repr(i32)]
825pub enum LogLevel {
826    Debug = 0,
827    Info,
828    Warn,
829    Error,
830    Fatal,
831    Header,
832}
833
834impl Options {
835    /// Constructs the DBOptions and ColumnFamilyDescriptors by loading the
836    /// latest RocksDB options file stored in the specified rocksdb database.
837    ///
838    /// *IMPORTANT*:
839    /// ROCKSDB DOES NOT STORE cf ttl in the options file. If you have set it via
840    /// [`ColumnFamilyDescriptor::new_with_ttl`] then you need to set it again after loading the options file.
841    /// Tll will be set to [`ColumnFamilyTtl::Disabled`] for all column families for your safety.
842    pub fn load_latest<P: AsRef<Path>>(
843        path: P,
844        env: Env,
845        ignore_unknown_options: bool,
846        cache: Cache,
847    ) -> Result<(Options, Vec<ColumnFamilyDescriptor>), Error> {
848        let path = to_cpath(path)?;
849        let mut db_options: *mut ffi::rocksdb_options_t = null_mut();
850        let mut num_column_families: usize = 0;
851        let mut column_family_names: *mut *mut c_char = null_mut();
852        let mut column_family_options: *mut *mut ffi::rocksdb_options_t = null_mut();
853        unsafe {
854            ffi_try!(ffi::rocksdb_load_latest_options(
855                path.as_ptr(),
856                env.0.inner,
857                ignore_unknown_options,
858                cache.0.inner.as_ptr(),
859                &mut db_options,
860                &mut num_column_families,
861                &mut column_family_names,
862                &mut column_family_options,
863            ));
864        }
865        let options = Options {
866            inner: db_options,
867            outlive: OptionsMustOutliveDB::default(),
868        };
869        let column_families = unsafe {
870            Options::read_column_descriptors(
871                num_column_families,
872                column_family_names,
873                column_family_options,
874            )
875        };
876        Ok((options, column_families))
877    }
878
879    /// read column descriptors from c pointers
880    #[inline]
881    unsafe fn read_column_descriptors(
882        num_column_families: usize,
883        column_family_names: *mut *mut c_char,
884        column_family_options: *mut *mut ffi::rocksdb_options_t,
885    ) -> Vec<ColumnFamilyDescriptor> {
886        unsafe {
887            let column_family_names_iter =
888                slice::from_raw_parts(column_family_names, num_column_families)
889                    .iter()
890                    .map(|ptr| from_cstr(*ptr));
891            let column_family_options_iter =
892                slice::from_raw_parts(column_family_options, num_column_families)
893                    .iter()
894                    .map(|ptr| Options {
895                        inner: *ptr,
896                        outlive: OptionsMustOutliveDB::default(),
897                    });
898            let column_descriptors = column_family_names_iter
899                .zip(column_family_options_iter)
900                .map(|(name, options)| ColumnFamilyDescriptor {
901                    name,
902                    options,
903                    ttl: ColumnFamilyTtl::Disabled,
904                })
905                .collect::<Vec<_>>();
906            // free pointers
907            for ptr in slice::from_raw_parts(column_family_names, num_column_families) {
908                ffi::rocksdb_free(*ptr as *mut c_void);
909            }
910            ffi::rocksdb_free(column_family_names as *mut c_void);
911            ffi::rocksdb_free(column_family_options as *mut c_void);
912            column_descriptors
913        }
914    }
915
916    /// Updates DBOptions with values parsed from a string.
917    ///
918    /// See official [wiki](
919    /// https://github.com/facebook/rocksdb/wiki/Option-String-and-Option-Map#option-string)
920    /// for more information.
921    pub fn set_options_from_string(&mut self, string: impl CStrLike) -> Result<&mut Self, Error> {
922        let c_string = string.into_c_string().unwrap();
923        let mut err: *mut c_char = null_mut();
924        let err_ptr: *mut *mut c_char = &mut err;
925        unsafe {
926            ffi::rocksdb_get_options_from_string(
927                self.inner,
928                c_string.as_ptr(),
929                self.inner,
930                err_ptr,
931            );
932        }
933
934        if err.is_null() {
935            Ok(self)
936        } else {
937            Err(Error::new(format!(
938                "Could not set options from string: {}",
939                crate::ffi_util::error_message(err)
940            )))
941        }
942    }
943
944    /// By default, RocksDB uses only one background thread for flush and
945    /// compaction. Calling this function will set it up such that total of
946    /// `total_threads` is used. Good value for `total_threads` is the number of
947    /// cores. You almost definitely want to call this function if your system is
948    /// bottlenecked by RocksDB.
949    ///
950    /// # Examples
951    ///
952    /// ```
953    /// use rust_rocksdb::Options;
954    ///
955    /// let mut opts = Options::default();
956    /// opts.increase_parallelism(3);
957    /// ```
958    pub fn increase_parallelism(&mut self, parallelism: i32) {
959        unsafe {
960            ffi::rocksdb_options_increase_parallelism(self.inner, parallelism);
961        }
962    }
963
964    /// Optimize level style compaction.
965    ///
966    /// Default values for some parameters in `Options` are not optimized for heavy
967    /// workloads and big datasets, which means you might observe write stalls under
968    /// some conditions.
969    ///
970    /// This can be used as one of the starting points for tuning RocksDB options in
971    /// such cases.
972    ///
973    /// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
974    /// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
975    /// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
976    /// parameters were set before.
977    ///
978    /// It sets buffer sizes so that memory consumption would be constrained by
979    /// `memtable_memory_budget`.
980    pub fn optimize_level_style_compaction(&mut self, memtable_memory_budget: usize) {
981        unsafe {
982            ffi::rocksdb_options_optimize_level_style_compaction(
983                self.inner,
984                memtable_memory_budget as u64,
985            );
986        }
987    }
988
989    /// Optimize universal style compaction.
990    ///
991    /// Default values for some parameters in `Options` are not optimized for heavy
992    /// workloads and big datasets, which means you might observe write stalls under
993    /// some conditions.
994    ///
995    /// This can be used as one of the starting points for tuning RocksDB options in
996    /// such cases.
997    ///
998    /// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
999    /// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
1000    /// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
1001    /// parameters were set before.
1002    ///
1003    /// It sets buffer sizes so that memory consumption would be constrained by
1004    /// `memtable_memory_budget`.
1005    pub fn optimize_universal_style_compaction(&mut self, memtable_memory_budget: usize) {
1006        unsafe {
1007            ffi::rocksdb_options_optimize_universal_style_compaction(
1008                self.inner,
1009                memtable_memory_budget as u64,
1010            );
1011        }
1012    }
1013
1014    /// If true, the database will be created if it is missing.
1015    ///
1016    /// Default: `false`
1017    ///
1018    /// # Examples
1019    ///
1020    /// ```
1021    /// use rust_rocksdb::Options;
1022    ///
1023    /// let mut opts = Options::default();
1024    /// opts.create_if_missing(true);
1025    /// ```
1026    pub fn create_if_missing(&mut self, create_if_missing: bool) {
1027        unsafe {
1028            ffi::rocksdb_options_set_create_if_missing(
1029                self.inner,
1030                c_uchar::from(create_if_missing),
1031            );
1032        }
1033    }
1034
1035    /// If true, any column families that didn't exist when opening the database
1036    /// will be created.
1037    ///
1038    /// Default: `false`
1039    ///
1040    /// # Examples
1041    ///
1042    /// ```
1043    /// use rust_rocksdb::Options;
1044    ///
1045    /// let mut opts = Options::default();
1046    /// opts.create_missing_column_families(true);
1047    /// ```
1048    pub fn create_missing_column_families(&mut self, create_missing_cfs: bool) {
1049        unsafe {
1050            ffi::rocksdb_options_set_create_missing_column_families(
1051                self.inner,
1052                c_uchar::from(create_missing_cfs),
1053            );
1054        }
1055    }
1056
1057    /// Specifies whether an error should be raised if the database already exists.
1058    ///
1059    /// Default: false
1060    pub fn set_error_if_exists(&mut self, enabled: bool) {
1061        unsafe {
1062            ffi::rocksdb_options_set_error_if_exists(self.inner, c_uchar::from(enabled));
1063        }
1064    }
1065
1066    /// Enable/disable paranoid checks.
1067    ///
1068    /// If true, the implementation will do aggressive checking of the
1069    /// data it is processing and will stop early if it detects any
1070    /// errors. This may have unforeseen ramifications: for example, a
1071    /// corruption of one DB entry may cause a large number of entries to
1072    /// become unreadable or for the entire DB to become unopenable.
1073    /// If any of the  writes to the database fails (Put, Delete, Merge, Write),
1074    /// the database will switch to read-only mode and fail all other
1075    /// Write operations.
1076    ///
1077    /// Default: false
1078    pub fn set_paranoid_checks(&mut self, enabled: bool) {
1079        unsafe {
1080            ffi::rocksdb_options_set_paranoid_checks(self.inner, c_uchar::from(enabled));
1081        }
1082    }
1083
1084    /// A list of paths where SST files can be put into, with its target size.
1085    /// Newer data is placed into paths specified earlier in the vector while
1086    /// older data gradually moves to paths specified later in the vector.
1087    ///
1088    /// For example, you have a flash device with 10GB allocated for the DB,
1089    /// as well as a hard drive of 2TB, you should config it to be:
1090    ///   [{"/flash_path", 10GB}, {"/hard_drive", 2TB}]
1091    ///
1092    /// The system will try to guarantee data under each path is close to but
1093    /// not larger than the target size. But current and future file sizes used
1094    /// by determining where to place a file are based on best-effort estimation,
1095    /// which means there is a chance that the actual size under the directory
1096    /// is slightly more than target size under some workloads. User should give
1097    /// some buffer room for those cases.
1098    ///
1099    /// If none of the paths has sufficient room to place a file, the file will
1100    /// be placed to the last path anyway, despite to the target size.
1101    ///
1102    /// Placing newer data to earlier paths is also best-efforts. User should
1103    /// expect user files to be placed in higher levels in some extreme cases.
1104    ///
1105    /// If left empty, only one path will be used, which is `path` passed when
1106    /// opening the DB.
1107    ///
1108    /// Default: empty
1109    pub fn set_db_paths(&mut self, paths: &[DBPath]) {
1110        let mut paths: Vec<_> = paths.iter().map(|path| path.inner.cast_const()).collect();
1111        let num_paths = paths.len();
1112        unsafe {
1113            ffi::rocksdb_options_set_db_paths(self.inner, paths.as_mut_ptr(), num_paths);
1114        }
1115    }
1116
1117    /// Use the specified object to interact with the environment,
1118    /// e.g. to read/write files, schedule background work, etc. In the near
1119    /// future, support for doing storage operations such as read/write files
1120    /// through env will be deprecated in favor of file_system.
1121    ///
1122    /// Default: Env::default()
1123    pub fn set_env(&mut self, env: &Env) {
1124        unsafe {
1125            ffi::rocksdb_options_set_env(self.inner, env.0.inner);
1126        }
1127        self.outlive.env = Some(env.clone());
1128    }
1129
1130    /// Sets the compression algorithm that will be used for compressing blocks.
1131    ///
1132    /// Default: `DBCompressionType::Snappy` (`DBCompressionType::None` if
1133    /// snappy feature is not enabled).
1134    ///
1135    /// # Examples
1136    ///
1137    /// ```
1138    /// use rust_rocksdb::{Options, DBCompressionType};
1139    ///
1140    /// let mut opts = Options::default();
1141    /// opts.set_compression_type(DBCompressionType::Snappy);
1142    /// ```
1143    pub fn set_compression_type(&mut self, t: DBCompressionType) {
1144        unsafe {
1145            ffi::rocksdb_options_set_compression(self.inner, t as c_int);
1146        }
1147    }
1148
1149    /// Number of threads for parallel compression.
1150    /// Parallel compression is enabled only if threads > 1.
1151    /// THE FEATURE IS STILL EXPERIMENTAL
1152    ///
1153    /// See [code](https://github.com/facebook/rocksdb/blob/v8.6.7/include/rocksdb/advanced_options.h#L116-L127)
1154    /// for more information.
1155    ///
1156    /// Default: 1
1157    ///
1158    /// Examples
1159    ///
1160    /// ```
1161    /// use rust_rocksdb::{Options, DBCompressionType};
1162    ///
1163    /// let mut opts = Options::default();
1164    /// opts.set_compression_type(DBCompressionType::Zstd);
1165    /// opts.set_compression_options_parallel_threads(3);
1166    /// ```
1167    pub fn set_compression_options_parallel_threads(&mut self, num: i32) {
1168        unsafe {
1169            ffi::rocksdb_options_set_compression_options_parallel_threads(self.inner, num);
1170        }
1171    }
1172
1173    /// Sets the compression algorithm that will be used for compressing WAL.
1174    ///
1175    /// At present, only ZSTD compression is supported!
1176    ///
1177    /// Default: `DBCompressionType::None`
1178    ///
1179    /// # Examples
1180    ///
1181    /// ```
1182    /// use rust_rocksdb::{Options, DBCompressionType};
1183    ///
1184    /// let mut opts = Options::default();
1185    /// opts.set_wal_compression_type(DBCompressionType::Zstd);
1186    /// // Or None to disable it
1187    /// opts.set_wal_compression_type(DBCompressionType::None);
1188    /// ```
1189    pub fn set_wal_compression_type(&mut self, t: DBCompressionType) {
1190        match t {
1191            DBCompressionType::None | DBCompressionType::Zstd => unsafe {
1192                ffi::rocksdb_options_set_wal_compression(self.inner, t as c_int);
1193            },
1194            other => unimplemented!("{:?} is not supported for WAL compression", other),
1195        }
1196    }
1197
1198    /// Sets the bottom-most compression algorithm that will be used for
1199    /// compressing blocks at the bottom-most level.
1200    ///
1201    /// Note that to actually enable bottom-most compression configuration after
1202    /// setting the compression type, it needs to be enabled by calling
1203    /// [`set_bottommost_compression_options`](#method.set_bottommost_compression_options) or
1204    /// [`set_bottommost_zstd_max_train_bytes`](#method.set_bottommost_zstd_max_train_bytes) method with `enabled` argument
1205    /// set to `true`.
1206    ///
1207    /// # Examples
1208    ///
1209    /// ```
1210    /// use rust_rocksdb::{Options, DBCompressionType};
1211    ///
1212    /// let mut opts = Options::default();
1213    /// opts.set_bottommost_compression_type(DBCompressionType::Zstd);
1214    /// opts.set_bottommost_zstd_max_train_bytes(0, true);
1215    /// ```
1216    pub fn set_bottommost_compression_type(&mut self, t: DBCompressionType) {
1217        unsafe {
1218            ffi::rocksdb_options_set_bottommost_compression(self.inner, t as c_int);
1219        }
1220    }
1221
1222    /// Different levels can have different compression policies. There
1223    /// are cases where most lower levels would like to use quick compression
1224    /// algorithms while the higher levels (which have more data) use
1225    /// compression algorithms that have better compression but could
1226    /// be slower. This array, if non-empty, should have an entry for
1227    /// each level of the database; these override the value specified in
1228    /// the previous field 'compression'.
1229    ///
1230    /// # Examples
1231    ///
1232    /// ```
1233    /// use rust_rocksdb::{Options, DBCompressionType};
1234    ///
1235    /// let mut opts = Options::default();
1236    /// opts.set_compression_per_level(&[
1237    ///     DBCompressionType::None,
1238    ///     DBCompressionType::None,
1239    ///     DBCompressionType::Snappy,
1240    ///     DBCompressionType::Snappy,
1241    ///     DBCompressionType::Snappy
1242    /// ]);
1243    /// ```
1244    pub fn set_compression_per_level(&mut self, level_types: &[DBCompressionType]) {
1245        unsafe {
1246            let mut level_types: Vec<_> = level_types.iter().map(|&t| t as c_int).collect();
1247            ffi::rocksdb_options_set_compression_per_level(
1248                self.inner,
1249                level_types.as_mut_ptr(),
1250                level_types.len() as size_t,
1251            );
1252        }
1253    }
1254
1255    /// Maximum size of dictionaries used to prime the compression library.
1256    /// Enabling dictionary can improve compression ratios when there are
1257    /// repetitions across data blocks.
1258    ///
1259    /// The dictionary is created by sampling the SST file data. If
1260    /// `zstd_max_train_bytes` is nonzero, the samples are passed through zstd's
1261    /// dictionary generator. Otherwise, the random samples are used directly as
1262    /// the dictionary.
1263    ///
1264    /// When compression dictionary is disabled, we compress and write each block
1265    /// before buffering data for the next one. When compression dictionary is
1266    /// enabled, we buffer all SST file data in-memory so we can sample it, as data
1267    /// can only be compressed and written after the dictionary has been finalized.
1268    /// So users of this feature may see increased memory usage.
1269    ///
1270    /// Default: `0`
1271    ///
1272    /// # Examples
1273    ///
1274    /// ```
1275    /// use rust_rocksdb::Options;
1276    ///
1277    /// let mut opts = Options::default();
1278    /// opts.set_compression_options(4, 5, 6, 7);
1279    /// ```
1280    pub fn set_compression_options(
1281        &mut self,
1282        w_bits: c_int,
1283        level: c_int,
1284        strategy: c_int,
1285        max_dict_bytes: c_int,
1286    ) {
1287        unsafe {
1288            ffi::rocksdb_options_set_compression_options(
1289                self.inner,
1290                w_bits,
1291                level,
1292                strategy,
1293                max_dict_bytes,
1294            );
1295        }
1296    }
1297
1298    /// Sets compression options for blocks at the bottom-most level.  Meaning
1299    /// of all settings is the same as in [`set_compression_options`](#method.set_compression_options) method but
1300    /// affect only the bottom-most compression which is set using
1301    /// [`set_bottommost_compression_type`](#method.set_bottommost_compression_type) method.
1302    ///
1303    /// # Examples
1304    ///
1305    /// ```
1306    /// use rust_rocksdb::{Options, DBCompressionType};
1307    ///
1308    /// let mut opts = Options::default();
1309    /// opts.set_bottommost_compression_type(DBCompressionType::Zstd);
1310    /// opts.set_bottommost_compression_options(4, 5, 6, 7, true);
1311    /// ```
1312    pub fn set_bottommost_compression_options(
1313        &mut self,
1314        w_bits: c_int,
1315        level: c_int,
1316        strategy: c_int,
1317        max_dict_bytes: c_int,
1318        enabled: bool,
1319    ) {
1320        unsafe {
1321            ffi::rocksdb_options_set_bottommost_compression_options(
1322                self.inner,
1323                w_bits,
1324                level,
1325                strategy,
1326                max_dict_bytes,
1327                c_uchar::from(enabled),
1328            );
1329        }
1330    }
1331
1332    /// Sets maximum size of training data passed to zstd's dictionary trainer. Using zstd's
1333    /// dictionary trainer can achieve even better compression ratio improvements than using
1334    /// `max_dict_bytes` alone.
1335    ///
1336    /// The training data will be used to generate a dictionary of max_dict_bytes.
1337    ///
1338    /// Default: 0.
1339    pub fn set_zstd_max_train_bytes(&mut self, value: c_int) {
1340        unsafe {
1341            ffi::rocksdb_options_set_compression_options_zstd_max_train_bytes(self.inner, value);
1342        }
1343    }
1344
1345    /// Sets maximum size of training data passed to zstd's dictionary trainer
1346    /// when compressing the bottom-most level. Using zstd's dictionary trainer
1347    /// can achieve even better compression ratio improvements than using
1348    /// `max_dict_bytes` alone.
1349    ///
1350    /// The training data will be used to generate a dictionary of
1351    /// `max_dict_bytes`.
1352    ///
1353    /// Default: 0.
1354    pub fn set_bottommost_zstd_max_train_bytes(&mut self, value: c_int, enabled: bool) {
1355        unsafe {
1356            ffi::rocksdb_options_set_bottommost_compression_options_zstd_max_train_bytes(
1357                self.inner,
1358                value,
1359                c_uchar::from(enabled),
1360            );
1361        }
1362    }
1363
1364    /// If non-zero, we perform bigger reads when doing compaction. If you're
1365    /// running RocksDB on spinning disks, you should set this to at least 2MB.
1366    /// That way RocksDB's compaction is doing sequential instead of random reads.
1367    ///
1368    /// Default: 2 * 1024 * 1024 (2 MB)
1369    pub fn set_compaction_readahead_size(&mut self, compaction_readahead_size: usize) {
1370        unsafe {
1371            ffi::rocksdb_options_compaction_readahead_size(self.inner, compaction_readahead_size);
1372        }
1373    }
1374
1375    /// Allow RocksDB to pick dynamic base of bytes for levels.
1376    /// With this feature turned on, RocksDB will automatically adjust max bytes for each level.
1377    /// The goal of this feature is to have lower bound on size amplification.
1378    ///
1379    /// Default: false.
1380    pub fn set_level_compaction_dynamic_level_bytes(&mut self, v: bool) {
1381        unsafe {
1382            ffi::rocksdb_options_set_level_compaction_dynamic_level_bytes(
1383                self.inner,
1384                c_uchar::from(v),
1385            );
1386        }
1387    }
1388
1389    /// This option has different meanings for different compaction styles:
1390    ///
1391    /// Leveled: files older than `periodic_compaction_seconds` will be picked up
1392    /// for compaction and will be re-written to the same level as they were
1393    /// before if level_compaction_dynamic_level_bytes is disabled. Otherwise,
1394    /// it will rewrite files to the next level except for the last level files
1395    /// to the same level.
1396    ///
1397    /// FIFO: not supported. Setting this option has no effect for FIFO compaction.
1398    ///
1399    /// Universal: when there are files older than `periodic_compaction_seconds`,
1400    /// rocksdb will try to do as large a compaction as possible including the
1401    /// last level. Such compaction is only skipped if only last level is to
1402    /// be compacted and no file in last level is older than
1403    /// `periodic_compaction_seconds`. See more in
1404    /// UniversalCompactionBuilder::PickPeriodicCompaction().
1405    /// For backward compatibility, the effective value of this option takes
1406    /// into account the value of option `ttl`. The logic is as follows:
1407    ///
1408    /// - both options are set to 30 days if they have the default value.
1409    /// - if both options are zero, zero is picked. Otherwise, we take the min
1410    ///   value among non-zero options values (i.e. takes the stricter limit).
1411    ///
1412    /// One main use of the feature is to make sure a file goes through compaction
1413    /// filters periodically. Users can also use the feature to clear up SST
1414    /// files using old format.
1415    ///
1416    /// A file's age is computed by looking at file_creation_time or creation_time
1417    /// table properties in order, if they have valid non-zero values; if not, the
1418    /// age is based on the file's last modified time (given by the underlying
1419    /// Env).
1420    ///
1421    /// This option only supports block based table format for any compaction
1422    /// style.
1423    ///
1424    /// unit: seconds. Ex: 7 days = 7 * 24 * 60 * 60
1425    ///
1426    /// Values:
1427    /// 0: Turn off Periodic compactions.
1428    /// UINT64_MAX - 1 (0xfffffffffffffffe) is special flag to allow RocksDB to
1429    /// pick default.
1430    ///
1431    /// Default: 30 days if using block based table format + compaction filter +
1432    /// leveled compaction or block based table format + universal compaction.
1433    /// 0 (disabled) otherwise.
1434    ///
1435    pub fn set_periodic_compaction_seconds(&mut self, secs: u64) {
1436        unsafe {
1437            ffi::rocksdb_options_set_periodic_compaction_seconds(self.inner, secs);
1438        }
1439    }
1440
1441    /// When an iterator scans this number of invisible entries (tombstones or
1442    /// hidden puts) from the active memtable during a single iterator operation,
1443    /// we will attempt to flush the memtable. Currently only forward scans are
1444    /// supported (SeekToFirst(), Seek() and Next()).
1445    /// This option helps to reduce the overhead of scanning through a
1446    /// large number of entries in memtable.
1447    /// Users should consider enable deletion-triggered-compaction (see
1448    /// CompactOnDeletionCollectorFactory) together with this option to compact
1449    /// away tombstones after the memtable is flushed.
1450    ///
1451    /// Default: 0 (disabled)
1452    /// Dynamically changeable through the SetOptions() API.
1453    pub fn set_memtable_op_scan_flush_trigger(&mut self, num: u32) {
1454        unsafe {
1455            ffi::rocksdb_options_set_memtable_op_scan_flush_trigger(self.inner, num);
1456        }
1457    }
1458
1459    /// Similar to `memtable_op_scan_flush_trigger`, but this option applies to
1460    /// Next() calls between Seeks or until iterator destruction. If the average
1461    /// of the number of invisible entries scanned from the active memtable, the
1462    /// memtable will be marked for flush.
1463    /// Note that to avoid the case where the window between Seeks is too small,
1464    /// the option only takes effect if the total number of hidden entries scanned
1465    /// within a window is at least `memtable_op_scan_flush_trigger`. So this
1466    /// option is only effective when `memtable_op_scan_flush_trigger` is set.
1467    ///
1468    /// This option should be set to a lower value than
1469    /// `memtable_op_scan_flush_trigger`. It covers the case where an iterator
1470    /// scans through an expensive key range with many invisible entries from the
1471    /// active memtable, but the number of invisible entries per operation does not
1472    /// exceed `memtable_op_scan_flush_trigger`.
1473    ///
1474    /// Default: 0 (disabled)
1475    /// Dynamically changeable through the SetOptions() API.
1476    pub fn set_memtable_avg_op_scan_flush_trigger(&mut self, num: u32) {
1477        unsafe {
1478            ffi::rocksdb_options_set_memtable_avg_op_scan_flush_trigger(self.inner, num);
1479        }
1480    }
1481
1482    /// This option has different meanings for different compaction styles:
1483    ///
1484    /// Leveled: Non-bottom-level files with all keys older than TTL will go
1485    ///    through the compaction process. This usually happens in a cascading
1486    ///    way so that those entries will be compacted to bottommost level/file.
1487    ///    The feature is used to remove stale entries that have been deleted or
1488    ///    updated from the file system.
1489    ///
1490    /// FIFO: Files with all keys older than TTL will be deleted. TTL is only
1491    ///    supported if option max_open_files is set to -1.
1492    ///
1493    /// Universal: users should only set the option `periodic_compaction_seconds`
1494    ///    instead. For backward compatibility, this option has the same
1495    ///    meaning as `periodic_compaction_seconds`. See more in comments for
1496    ///    `periodic_compaction_seconds` on the interaction between these two
1497    ///    options.
1498    ///
1499    /// This option only supports block based table format for any compaction
1500    /// style.
1501    ///
1502    /// unit: seconds. Ex: 1 day = 1 * 24 * 60 * 60
1503    /// 0 means disabling.
1504    /// UINT64_MAX - 1 (0xfffffffffffffffe) is special flag to allow RocksDB to
1505    /// pick default.
1506    ///
1507    /// Default: 30 days if using block based table. 0 (disable) otherwise.
1508    ///
1509    /// Dynamically changeable
1510    /// Note that dynamically changing this option only works for leveled and FIFO
1511    /// compaction. For universal compaction, dynamically changing this option has
1512    /// no effect, users should dynamically change `periodic_compaction_seconds`
1513    /// instead.
1514    pub fn set_ttl(&mut self, secs: u64) {
1515        unsafe {
1516            ffi::rocksdb_options_set_ttl(self.inner, secs);
1517        }
1518    }
1519
1520    pub fn set_merge_operator_associative<F: MergeFn + Clone>(
1521        &mut self,
1522        name: impl CStrLike,
1523        full_merge_fn: F,
1524    ) {
1525        let cb = Box::new(MergeOperatorCallback {
1526            name: name.into_c_string().unwrap(),
1527            full_merge_fn: full_merge_fn.clone(),
1528            partial_merge_fn: full_merge_fn,
1529        });
1530
1531        unsafe {
1532            let mo = ffi::rocksdb_mergeoperator_create(
1533                Box::into_raw(cb).cast::<c_void>(),
1534                Some(merge_operator::destructor_callback::<F, F>),
1535                Some(full_merge_callback::<F, F>),
1536                Some(partial_merge_callback::<F, F>),
1537                Some(merge_operator::delete_callback),
1538                Some(merge_operator::name_callback::<F, F>),
1539            );
1540            ffi::rocksdb_options_set_merge_operator(self.inner, mo);
1541        }
1542    }
1543
1544    pub fn set_merge_operator<F: MergeFn, PF: MergeFn>(
1545        &mut self,
1546        name: impl CStrLike,
1547        full_merge_fn: F,
1548        partial_merge_fn: PF,
1549    ) {
1550        let cb = Box::new(MergeOperatorCallback {
1551            name: name.into_c_string().unwrap(),
1552            full_merge_fn,
1553            partial_merge_fn,
1554        });
1555
1556        unsafe {
1557            let mo = ffi::rocksdb_mergeoperator_create(
1558                Box::into_raw(cb).cast::<c_void>(),
1559                Some(merge_operator::destructor_callback::<F, PF>),
1560                Some(full_merge_callback::<F, PF>),
1561                Some(partial_merge_callback::<F, PF>),
1562                Some(merge_operator::delete_callback),
1563                Some(merge_operator::name_callback::<F, PF>),
1564            );
1565            ffi::rocksdb_options_set_merge_operator(self.inner, mo);
1566        }
1567    }
1568
1569    #[deprecated(
1570        since = "0.5.0",
1571        note = "add_merge_operator has been renamed to set_merge_operator"
1572    )]
1573    pub fn add_merge_operator<F: MergeFn + Clone>(&mut self, name: &str, merge_fn: F) {
1574        self.set_merge_operator_associative(name, merge_fn);
1575    }
1576
1577    /// Sets a compaction filter used to determine if entries should be kept, changed,
1578    /// or removed during compaction.
1579    ///
1580    /// An example use case is to remove entries with an expired TTL.
1581    ///
1582    /// If you take a snapshot of the database, only values written since the last
1583    /// snapshot will be passed through the compaction filter.
1584    ///
1585    /// If multi-threaded compaction is used, `filter_fn` may be called multiple times
1586    /// simultaneously.
1587    pub fn set_compaction_filter<F>(&mut self, name: impl CStrLike, filter_fn: F)
1588    where
1589        F: CompactionFilterFn + Send + 'static,
1590    {
1591        let cb = Box::new(CompactionFilterCallback {
1592            name: name.into_c_string().unwrap(),
1593            filter_fn,
1594        });
1595
1596        unsafe {
1597            let cf = ffi::rocksdb_compactionfilter_create(
1598                Box::into_raw(cb).cast::<c_void>(),
1599                Some(compaction_filter::destructor_callback::<CompactionFilterCallback<F>>),
1600                Some(compaction_filter::filter_callback::<CompactionFilterCallback<F>>),
1601                Some(compaction_filter::name_callback::<CompactionFilterCallback<F>>),
1602            );
1603            ffi::rocksdb_options_set_compaction_filter(self.inner, cf);
1604        }
1605    }
1606
1607    pub fn add_event_listener<L: EventListener>(&mut self, l: L) {
1608        let handle = new_event_listener(l);
1609        unsafe { ffi::rocksdb_options_add_eventlistener(self.inner, handle.inner) }
1610    }
1611
1612    /// This is a factory that provides compaction filter objects which allow
1613    /// an application to modify/delete a key-value during background compaction.
1614    ///
1615    /// A new filter will be created on each compaction run.  If multithreaded
1616    /// compaction is being used, each created CompactionFilter will only be used
1617    /// from a single thread and so does not need to be thread-safe.
1618    ///
1619    /// Default: nullptr
1620    pub fn set_compaction_filter_factory<F>(&mut self, factory: F)
1621    where
1622        F: CompactionFilterFactory + 'static,
1623    {
1624        let factory = Box::new(factory);
1625
1626        unsafe {
1627            let cff = ffi::rocksdb_compactionfilterfactory_create(
1628                Box::into_raw(factory).cast::<c_void>(),
1629                Some(compaction_filter_factory::destructor_callback::<F>),
1630                Some(compaction_filter_factory::create_compaction_filter_callback::<F>),
1631                Some(compaction_filter_factory::name_callback::<F>),
1632            );
1633
1634            ffi::rocksdb_options_set_compaction_filter_factory(self.inner, cff);
1635        }
1636    }
1637
1638    /// Sets the comparator used to define the order of keys in the table.
1639    /// Default: a comparator that uses lexicographic byte-wise ordering
1640    ///
1641    /// The client must ensure that the comparator supplied here has the same
1642    /// name and orders keys *exactly* the same as the comparator provided to
1643    /// previous open calls on the same DB.
1644    pub fn set_comparator(&mut self, name: impl CStrLike, compare_fn: Box<CompareFn>) {
1645        let cb = Box::new(ComparatorCallback {
1646            name: name.into_c_string().unwrap(),
1647            compare_fn,
1648        });
1649
1650        unsafe {
1651            let cmp = ffi::rocksdb_comparator_create(
1652                Box::into_raw(cb).cast::<c_void>(),
1653                Some(ComparatorCallback::destructor_callback),
1654                Some(ComparatorCallback::compare_callback),
1655                Some(ComparatorCallback::name_callback),
1656            );
1657            ffi::rocksdb_options_set_comparator(self.inner, cmp);
1658        }
1659    }
1660
1661    /// Sets the comparator that are timestamp-aware, used to define the order of keys in the table,
1662    /// taking timestamp into consideration.
1663    /// Find more information on timestamp-aware comparator on [here](https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp)
1664    ///
1665    /// The client must ensure that the comparator supplied here has the same
1666    /// name and orders keys *exactly* the same as the comparator provided to
1667    /// previous open calls on the same DB.
1668    pub fn set_comparator_with_ts(
1669        &mut self,
1670        name: impl CStrLike,
1671        timestamp_size: usize,
1672        compare_fn: Box<CompareFn>,
1673        compare_ts_fn: Box<CompareTsFn>,
1674        compare_without_ts_fn: Box<CompareWithoutTsFn>,
1675    ) {
1676        let cb = Box::new(ComparatorWithTsCallback {
1677            name: name.into_c_string().unwrap(),
1678            compare_fn,
1679            compare_ts_fn,
1680            compare_without_ts_fn,
1681        });
1682
1683        unsafe {
1684            let cmp = ffi::rocksdb_comparator_with_ts_create(
1685                Box::into_raw(cb).cast::<c_void>(),
1686                Some(ComparatorWithTsCallback::destructor_callback),
1687                Some(ComparatorWithTsCallback::compare_callback),
1688                Some(ComparatorWithTsCallback::compare_ts_callback),
1689                Some(ComparatorWithTsCallback::compare_without_ts_callback),
1690                Some(ComparatorWithTsCallback::name_callback),
1691                timestamp_size,
1692            );
1693            ffi::rocksdb_options_set_comparator(self.inner, cmp);
1694        }
1695    }
1696
1697    pub fn set_prefix_extractor(&mut self, prefix_extractor: SliceTransform) {
1698        unsafe {
1699            ffi::rocksdb_options_set_prefix_extractor(self.inner, prefix_extractor.inner);
1700        }
1701    }
1702
1703    // Use this if you don't need to keep the data sorted, i.e. you'll never use
1704    // an iterator, only Put() and Get() API calls
1705    //
1706    pub fn optimize_for_point_lookup(&mut self, block_cache_size_mb: u64) {
1707        unsafe {
1708            ffi::rocksdb_options_optimize_for_point_lookup(self.inner, block_cache_size_mb);
1709        }
1710    }
1711
1712    /// Sets the optimize_filters_for_hits flag
1713    ///
1714    /// Default: `false`
1715    ///
1716    /// # Examples
1717    ///
1718    /// ```
1719    /// use rust_rocksdb::Options;
1720    ///
1721    /// let mut opts = Options::default();
1722    /// opts.set_optimize_filters_for_hits(true);
1723    /// ```
1724    pub fn set_optimize_filters_for_hits(&mut self, optimize_for_hits: bool) {
1725        unsafe {
1726            ffi::rocksdb_options_set_optimize_filters_for_hits(
1727                self.inner,
1728                c_int::from(optimize_for_hits),
1729            );
1730        }
1731    }
1732
1733    /// Sets the periodicity when obsolete files get deleted.
1734    ///
1735    /// The files that get out of scope by compaction
1736    /// process will still get automatically delete on every compaction,
1737    /// regardless of this setting.
1738    ///
1739    /// Default: 6 hours
1740    pub fn set_delete_obsolete_files_period_micros(&mut self, micros: u64) {
1741        unsafe {
1742            ffi::rocksdb_options_set_delete_obsolete_files_period_micros(self.inner, micros);
1743        }
1744    }
1745
1746    /// Prepare the DB for bulk loading.
1747    ///
1748    /// All data will be in level 0 without any automatic compaction.
1749    /// It's recommended to manually call CompactRange(NULL, NULL) before reading
1750    /// from the database, because otherwise the read can be very slow.
1751    pub fn prepare_for_bulk_load(&mut self) {
1752        unsafe {
1753            ffi::rocksdb_options_prepare_for_bulk_load(self.inner);
1754        }
1755    }
1756
1757    /// Sets the number of open files that can be used by the DB. You may need to
1758    /// increase this if your database has a large working set. Value `-1` means
1759    /// files opened are always kept open. You can estimate number of files based
1760    /// on target_file_size_base and target_file_size_multiplier for level-based
1761    /// compaction. For universal-style compaction, you can usually set it to `-1`.
1762    ///
1763    /// Default: `-1`
1764    ///
1765    /// # Examples
1766    ///
1767    /// ```
1768    /// use rust_rocksdb::Options;
1769    ///
1770    /// let mut opts = Options::default();
1771    /// opts.set_max_open_files(10);
1772    /// ```
1773    pub fn set_max_open_files(&mut self, nfiles: c_int) {
1774        unsafe {
1775            ffi::rocksdb_options_set_max_open_files(self.inner, nfiles);
1776        }
1777    }
1778
1779    /// If max_open_files is -1, DB will open all files on DB::Open(). You can
1780    /// use this option to increase the number of threads used to open the files.
1781    /// Default: 16
1782    pub fn set_max_file_opening_threads(&mut self, nthreads: c_int) {
1783        unsafe {
1784            ffi::rocksdb_options_set_max_file_opening_threads(self.inner, nthreads);
1785        }
1786    }
1787
1788    /// By default, writes to stable storage use fdatasync (on platforms
1789    /// where this function is available). If this option is true,
1790    /// fsync is used instead.
1791    ///
1792    /// fsync and fdatasync are equally safe for our purposes and fdatasync is
1793    /// faster, so it is rarely necessary to set this option. It is provided
1794    /// as a workaround for kernel/filesystem bugs, such as one that affected
1795    /// fdatasync with ext4 in kernel versions prior to 3.7.
1796    ///
1797    /// Default: `false`
1798    ///
1799    /// # Examples
1800    ///
1801    /// ```
1802    /// use rust_rocksdb::Options;
1803    ///
1804    /// let mut opts = Options::default();
1805    /// opts.set_use_fsync(true);
1806    /// ```
1807    pub fn set_use_fsync(&mut self, useit: bool) {
1808        unsafe {
1809            ffi::rocksdb_options_set_use_fsync(self.inner, c_int::from(useit));
1810        }
1811    }
1812
1813    /// Returns the value of the `use_fsync` option.
1814    pub fn get_use_fsync(&self) -> bool {
1815        let val = unsafe { ffi::rocksdb_options_get_use_fsync(self.inner) };
1816        val != 0
1817    }
1818
1819    /// Specifies the absolute info LOG dir.
1820    ///
1821    /// If it is empty, the log files will be in the same dir as data.
1822    /// If it is non empty, the log files will be in the specified dir,
1823    /// and the db data dir's absolute path will be used as the log file
1824    /// name's prefix.
1825    ///
1826    /// Default: empty
1827    pub fn set_db_log_dir<P: AsRef<Path>>(&mut self, path: P) {
1828        let p = to_cpath(path).unwrap();
1829        unsafe {
1830            ffi::rocksdb_options_set_db_log_dir(self.inner, p.as_ptr());
1831        }
1832    }
1833
1834    /// Specifies the log level.
1835    /// Consider the `LogLevel` enum for a list of possible levels.
1836    ///
1837    /// Default: Info
1838    ///
1839    /// # Examples
1840    ///
1841    /// ```
1842    /// use rust_rocksdb::{Options, LogLevel};
1843    ///
1844    /// let mut opts = Options::default();
1845    /// opts.set_log_level(LogLevel::Warn);
1846    /// ```
1847    pub fn set_log_level(&mut self, level: LogLevel) {
1848        unsafe {
1849            ffi::rocksdb_options_set_info_log_level(self.inner, level as c_int);
1850        }
1851    }
1852
1853    /// Allows OS to incrementally sync files to disk while they are being
1854    /// written, asynchronously, in the background. This operation can be used
1855    /// to smooth out write I/Os over time. Users shouldn't rely on it for
1856    /// persistency guarantee.
1857    /// Issue one request for every bytes_per_sync written. `0` turns it off.
1858    ///
1859    /// Default: `0`
1860    ///
1861    /// You may consider using rate_limiter to regulate write rate to device.
1862    /// When rate limiter is enabled, it automatically enables bytes_per_sync
1863    /// to 1MB.
1864    ///
1865    /// This option applies to table files
1866    ///
1867    /// # Examples
1868    ///
1869    /// ```
1870    /// use rust_rocksdb::Options;
1871    ///
1872    /// let mut opts = Options::default();
1873    /// opts.set_bytes_per_sync(1024 * 1024);
1874    /// ```
1875    pub fn set_bytes_per_sync(&mut self, nbytes: u64) {
1876        unsafe {
1877            ffi::rocksdb_options_set_bytes_per_sync(self.inner, nbytes);
1878        }
1879    }
1880
1881    /// Same as bytes_per_sync, but applies to WAL files.
1882    ///
1883    /// Default: 0, turned off
1884    ///
1885    /// Dynamically changeable through SetDBOptions() API.
1886    pub fn set_wal_bytes_per_sync(&mut self, nbytes: u64) {
1887        unsafe {
1888            ffi::rocksdb_options_set_wal_bytes_per_sync(self.inner, nbytes);
1889        }
1890    }
1891
1892    /// Sets the maximum buffer size that is used by WritableFileWriter.
1893    ///
1894    /// On Windows, we need to maintain an aligned buffer for writes.
1895    /// We allow the buffer to grow until it's size hits the limit in buffered
1896    /// IO and fix the buffer size when using direct IO to ensure alignment of
1897    /// write requests if the logical sector size is unusual
1898    ///
1899    /// Default: 1024 * 1024 (1 MB)
1900    ///
1901    /// Dynamically changeable through SetDBOptions() API.
1902    pub fn set_writable_file_max_buffer_size(&mut self, nbytes: u64) {
1903        unsafe {
1904            ffi::rocksdb_options_set_writable_file_max_buffer_size(self.inner, nbytes);
1905        }
1906    }
1907
1908    /// If true, allow multi-writers to update mem tables in parallel.
1909    /// Only some memtable_factory-s support concurrent writes; currently it
1910    /// is implemented only for SkipListFactory.  Concurrent memtable writes
1911    /// are not compatible with inplace_update_support or filter_deletes.
1912    /// It is strongly recommended to set enable_write_thread_adaptive_yield
1913    /// if you are going to use this feature.
1914    ///
1915    /// Default: true
1916    ///
1917    /// # Examples
1918    ///
1919    /// ```
1920    /// use rust_rocksdb::Options;
1921    ///
1922    /// let mut opts = Options::default();
1923    /// opts.set_allow_concurrent_memtable_write(false);
1924    /// ```
1925    pub fn set_allow_concurrent_memtable_write(&mut self, allow: bool) {
1926        unsafe {
1927            ffi::rocksdb_options_set_allow_concurrent_memtable_write(
1928                self.inner,
1929                c_uchar::from(allow),
1930            );
1931        }
1932    }
1933
1934    /// If true, threads synchronizing with the write batch group leader will wait for up to
1935    /// write_thread_max_yield_usec before blocking on a mutex. This can substantially improve
1936    /// throughput for concurrent workloads, regardless of whether allow_concurrent_memtable_write
1937    /// is enabled.
1938    ///
1939    /// Default: true
1940    pub fn set_enable_write_thread_adaptive_yield(&mut self, enabled: bool) {
1941        unsafe {
1942            ffi::rocksdb_options_set_enable_write_thread_adaptive_yield(
1943                self.inner,
1944                c_uchar::from(enabled),
1945            );
1946        }
1947    }
1948
1949    /// Specifies whether an iteration->Next() sequentially skips over keys with the same user-key or not.
1950    ///
1951    /// This number specifies the number of keys (with the same userkey)
1952    /// that will be sequentially skipped before a reseek is issued.
1953    ///
1954    /// Default: 8
1955    pub fn set_max_sequential_skip_in_iterations(&mut self, num: u64) {
1956        unsafe {
1957            ffi::rocksdb_options_set_max_sequential_skip_in_iterations(self.inner, num);
1958        }
1959    }
1960
1961    /// Enable direct I/O mode for reading
1962    /// they may or may not improve performance depending on the use case
1963    ///
1964    /// Files will be opened in "direct I/O" mode
1965    /// which means that data read from the disk will not be cached or
1966    /// buffered. The hardware buffer of the devices may however still
1967    /// be used. Memory mapped files are not impacted by these parameters.
1968    ///
1969    /// Default: false
1970    ///
1971    /// # Examples
1972    ///
1973    /// ```
1974    /// use rust_rocksdb::Options;
1975    ///
1976    /// let mut opts = Options::default();
1977    /// opts.set_use_direct_reads(true);
1978    /// ```
1979    pub fn set_use_direct_reads(&mut self, enabled: bool) {
1980        unsafe {
1981            ffi::rocksdb_options_set_use_direct_reads(self.inner, c_uchar::from(enabled));
1982        }
1983    }
1984
1985    /// Enable direct I/O mode for flush and compaction
1986    ///
1987    /// Files will be opened in "direct I/O" mode
1988    /// which means that data written to the disk will not be cached or
1989    /// buffered. The hardware buffer of the devices may however still
1990    /// be used. Memory mapped files are not impacted by these parameters.
1991    /// they may or may not improve performance depending on the use case
1992    ///
1993    /// Default: false
1994    ///
1995    /// # Examples
1996    ///
1997    /// ```
1998    /// use rust_rocksdb::Options;
1999    ///
2000    /// let mut opts = Options::default();
2001    /// opts.set_use_direct_io_for_flush_and_compaction(true);
2002    /// ```
2003    pub fn set_use_direct_io_for_flush_and_compaction(&mut self, enabled: bool) {
2004        unsafe {
2005            ffi::rocksdb_options_set_use_direct_io_for_flush_and_compaction(
2006                self.inner,
2007                c_uchar::from(enabled),
2008            );
2009        }
2010    }
2011
2012    /// Enable/disable child process inherit open files.
2013    ///
2014    /// Default: true
2015    pub fn set_is_fd_close_on_exec(&mut self, enabled: bool) {
2016        unsafe {
2017            ffi::rocksdb_options_set_is_fd_close_on_exec(self.inner, c_uchar::from(enabled));
2018        }
2019    }
2020
2021    /// Hints to the OS that it should not buffer disk I/O. Enabling this
2022    /// parameter may improve performance but increases pressure on the
2023    /// system cache.
2024    ///
2025    /// The exact behavior of this parameter is platform dependent.
2026    ///
2027    /// On POSIX systems, after RocksDB reads data from disk it will
2028    /// mark the pages as "unneeded". The operating system may or may not
2029    /// evict these pages from memory, reducing pressure on the system
2030    /// cache. If the disk block is requested again this can result in
2031    /// additional disk I/O.
2032    ///
2033    /// On WINDOWS systems, files will be opened in "unbuffered I/O" mode
2034    /// which means that data read from the disk will not be cached or
2035    /// bufferized. The hardware buffer of the devices may however still
2036    /// be used. Memory mapped files are not impacted by this parameter.
2037    ///
2038    /// Default: true
2039    ///
2040    /// # Examples
2041    ///
2042    /// ```
2043    /// use rust_rocksdb::Options;
2044    ///
2045    /// let mut opts = Options::default();
2046    /// #[allow(deprecated)]
2047    /// opts.set_allow_os_buffer(false);
2048    /// ```
2049    #[deprecated(
2050        since = "0.7.0",
2051        note = "replaced with set_use_direct_reads/set_use_direct_io_for_flush_and_compaction methods"
2052    )]
2053    pub fn set_allow_os_buffer(&mut self, is_allow: bool) {
2054        self.set_use_direct_reads(!is_allow);
2055        self.set_use_direct_io_for_flush_and_compaction(!is_allow);
2056    }
2057
2058    /// Sets the number of shards used for table cache.
2059    ///
2060    /// Default: `6`
2061    ///
2062    /// # Examples
2063    ///
2064    /// ```
2065    /// use rust_rocksdb::Options;
2066    ///
2067    /// let mut opts = Options::default();
2068    /// opts.set_table_cache_num_shard_bits(4);
2069    /// ```
2070    pub fn set_table_cache_num_shard_bits(&mut self, nbits: c_int) {
2071        unsafe {
2072            ffi::rocksdb_options_set_table_cache_numshardbits(self.inner, nbits);
2073        }
2074    }
2075
2076    /// By default target_file_size_multiplier is 1, which means
2077    /// by default files in different levels will have similar size.
2078    ///
2079    /// Dynamically changeable through SetOptions() API
2080    pub fn set_target_file_size_multiplier(&mut self, multiplier: i32) {
2081        unsafe {
2082            ffi::rocksdb_options_set_target_file_size_multiplier(self.inner, multiplier as c_int);
2083        }
2084    }
2085
2086    /// Sets the minimum number of write buffers that will be merged
2087    /// before writing to storage.  If set to `1`, then
2088    /// all write buffers are flushed to L0 as individual files and this increases
2089    /// read amplification because a get request has to check in all of these
2090    /// files. Also, an in-memory merge may result in writing lesser
2091    /// data to storage if there are duplicate records in each of these
2092    /// individual write buffers.
2093    ///
2094    /// Default: `1`
2095    ///
2096    /// # Examples
2097    ///
2098    /// ```
2099    /// use rust_rocksdb::Options;
2100    ///
2101    /// let mut opts = Options::default();
2102    /// opts.set_min_write_buffer_number(2);
2103    /// ```
2104    pub fn set_min_write_buffer_number(&mut self, nbuf: c_int) {
2105        unsafe {
2106            ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, nbuf);
2107        }
2108    }
2109
2110    /// Sets the maximum number of write buffers that are built up in memory.
2111    /// The default and the minimum number is 2, so that when 1 write buffer
2112    /// is being flushed to storage, new writes can continue to the other
2113    /// write buffer.
2114    /// If max_write_buffer_number > 3, writing will be slowed down to
2115    /// options.delayed_write_rate if we are writing to the last write buffer
2116    /// allowed.
2117    ///
2118    /// Default: `2`
2119    ///
2120    /// # Examples
2121    ///
2122    /// ```
2123    /// use rust_rocksdb::Options;
2124    ///
2125    /// let mut opts = Options::default();
2126    /// opts.set_max_write_buffer_number(4);
2127    /// ```
2128    pub fn set_max_write_buffer_number(&mut self, nbuf: c_int) {
2129        unsafe {
2130            ffi::rocksdb_options_set_max_write_buffer_number(self.inner, nbuf);
2131        }
2132    }
2133
2134    /// Sets the amount of data to build up in memory (backed by an unsorted log
2135    /// on disk) before converting to a sorted on-disk file.
2136    ///
2137    /// Larger values increase performance, especially during bulk loads.
2138    /// Up to max_write_buffer_number write buffers may be held in memory
2139    /// at the same time,
2140    /// so you may wish to adjust this parameter to control memory usage.
2141    /// Also, a larger write buffer will result in a longer recovery time
2142    /// the next time the database is opened.
2143    ///
2144    /// Note that write_buffer_size is enforced per column family.
2145    /// See db_write_buffer_size for sharing memory across column families.
2146    ///
2147    /// Default: `0x4000000` (64MiB)
2148    ///
2149    /// Dynamically changeable through SetOptions() API
2150    ///
2151    /// # Examples
2152    ///
2153    /// ```
2154    /// use rust_rocksdb::Options;
2155    ///
2156    /// let mut opts = Options::default();
2157    /// opts.set_write_buffer_size(128 * 1024 * 1024);
2158    /// ```
2159    pub fn set_write_buffer_size(&mut self, size: usize) {
2160        unsafe {
2161            ffi::rocksdb_options_set_write_buffer_size(self.inner, size);
2162        }
2163    }
2164
2165    /// Amount of data to build up in memtables across all column
2166    /// families before writing to disk.
2167    ///
2168    /// This is distinct from write_buffer_size, which enforces a limit
2169    /// for a single memtable.
2170    ///
2171    /// This feature is disabled by default. Specify a non-zero value
2172    /// to enable it.
2173    ///
2174    /// Default: 0 (disabled)
2175    ///
2176    /// # Examples
2177    ///
2178    /// ```
2179    /// use rust_rocksdb::Options;
2180    ///
2181    /// let mut opts = Options::default();
2182    /// opts.set_db_write_buffer_size(128 * 1024 * 1024);
2183    /// ```
2184    pub fn set_db_write_buffer_size(&mut self, size: usize) {
2185        unsafe {
2186            ffi::rocksdb_options_set_db_write_buffer_size(self.inner, size);
2187        }
2188    }
2189
2190    /// Control maximum total data size for a level.
2191    /// max_bytes_for_level_base is the max total for level-1.
2192    /// Maximum number of bytes for level L can be calculated as
2193    /// (max_bytes_for_level_base) * (max_bytes_for_level_multiplier ^ (L-1))
2194    /// For example, if max_bytes_for_level_base is 200MB, and if
2195    /// max_bytes_for_level_multiplier is 10, total data size for level-1
2196    /// will be 200MB, total file size for level-2 will be 2GB,
2197    /// and total file size for level-3 will be 20GB.
2198    ///
2199    /// Default: `0x10000000` (256MiB).
2200    ///
2201    /// Dynamically changeable through SetOptions() API
2202    ///
2203    /// # Examples
2204    ///
2205    /// ```
2206    /// use rust_rocksdb::Options;
2207    ///
2208    /// let mut opts = Options::default();
2209    /// opts.set_max_bytes_for_level_base(512 * 1024 * 1024);
2210    /// ```
2211    pub fn set_max_bytes_for_level_base(&mut self, size: u64) {
2212        unsafe {
2213            ffi::rocksdb_options_set_max_bytes_for_level_base(self.inner, size);
2214        }
2215    }
2216
2217    /// Default: `10`
2218    ///
2219    /// # Examples
2220    ///
2221    /// ```
2222    /// use rust_rocksdb::Options;
2223    ///
2224    /// let mut opts = Options::default();
2225    /// opts.set_max_bytes_for_level_multiplier(4.0);
2226    /// ```
2227    pub fn set_max_bytes_for_level_multiplier(&mut self, mul: f64) {
2228        unsafe {
2229            ffi::rocksdb_options_set_max_bytes_for_level_multiplier(self.inner, mul);
2230        }
2231    }
2232
2233    /// The manifest file is rolled over on reaching this limit.
2234    /// The older manifest file be deleted.
2235    /// The default value is MAX_INT so that roll-over does not take place.
2236    ///
2237    /// # Examples
2238    ///
2239    /// ```
2240    /// use rust_rocksdb::Options;
2241    ///
2242    /// let mut opts = Options::default();
2243    /// opts.set_max_manifest_file_size(20 * 1024 * 1024);
2244    /// ```
2245    pub fn set_max_manifest_file_size(&mut self, size: usize) {
2246        unsafe {
2247            ffi::rocksdb_options_set_max_manifest_file_size(self.inner, size);
2248        }
2249    }
2250
2251    /// Sets the target file size for compaction.
2252    /// target_file_size_base is per-file size for level-1.
2253    /// Target file size for level L can be calculated by
2254    /// target_file_size_base * (target_file_size_multiplier ^ (L-1))
2255    /// For example, if target_file_size_base is 2MB and
2256    /// target_file_size_multiplier is 10, then each file on level-1 will
2257    /// be 2MB, and each file on level 2 will be 20MB,
2258    /// and each file on level-3 will be 200MB.
2259    ///
2260    /// Default: `0x4000000` (64MiB)
2261    ///
2262    /// Dynamically changeable through SetOptions() API
2263    ///
2264    /// # Examples
2265    ///
2266    /// ```
2267    /// use rust_rocksdb::Options;
2268    ///
2269    /// let mut opts = Options::default();
2270    /// opts.set_target_file_size_base(128 * 1024 * 1024);
2271    /// ```
2272    pub fn set_target_file_size_base(&mut self, size: u64) {
2273        unsafe {
2274            ffi::rocksdb_options_set_target_file_size_base(self.inner, size);
2275        }
2276    }
2277
2278    /// Sets the minimum number of write buffers that will be merged together
2279    /// before writing to storage.  If set to `1`, then
2280    /// all write buffers are flushed to L0 as individual files and this increases
2281    /// read amplification because a get request has to check in all of these
2282    /// files. Also, an in-memory merge may result in writing lesser
2283    /// data to storage if there are duplicate records in each of these
2284    /// individual write buffers.
2285    ///
2286    /// Default: `1`
2287    ///
2288    /// # Examples
2289    ///
2290    /// ```
2291    /// use rust_rocksdb::Options;
2292    ///
2293    /// let mut opts = Options::default();
2294    /// opts.set_min_write_buffer_number_to_merge(2);
2295    /// ```
2296    pub fn set_min_write_buffer_number_to_merge(&mut self, to_merge: c_int) {
2297        unsafe {
2298            ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, to_merge);
2299        }
2300    }
2301
2302    /// Sets the number of files to trigger level-0 compaction. A value < `0` means that
2303    /// level-0 compaction will not be triggered by number of files at all.
2304    ///
2305    /// Default: `4`
2306    ///
2307    /// Dynamically changeable through SetOptions() API
2308    ///
2309    /// # Examples
2310    ///
2311    /// ```
2312    /// use rust_rocksdb::Options;
2313    ///
2314    /// let mut opts = Options::default();
2315    /// opts.set_level_zero_file_num_compaction_trigger(8);
2316    /// ```
2317    pub fn set_level_zero_file_num_compaction_trigger(&mut self, n: c_int) {
2318        unsafe {
2319            ffi::rocksdb_options_set_level0_file_num_compaction_trigger(self.inner, n);
2320        }
2321    }
2322
2323    /// Sets the soft limit on number of level-0 files. We start slowing down writes at this
2324    /// point. A value < `0` means that no writing slowdown will be triggered by
2325    /// number of files in level-0.
2326    ///
2327    /// Default: `20`
2328    ///
2329    /// Dynamically changeable through SetOptions() API
2330    ///
2331    /// # Examples
2332    ///
2333    /// ```
2334    /// use rust_rocksdb::Options;
2335    ///
2336    /// let mut opts = Options::default();
2337    /// opts.set_level_zero_slowdown_writes_trigger(10);
2338    /// ```
2339    pub fn set_level_zero_slowdown_writes_trigger(&mut self, n: c_int) {
2340        unsafe {
2341            ffi::rocksdb_options_set_level0_slowdown_writes_trigger(self.inner, n);
2342        }
2343    }
2344
2345    /// Sets the maximum number of level-0 files.  We stop writes at this point.
2346    ///
2347    /// Default: `24`
2348    ///
2349    /// Dynamically changeable through SetOptions() API
2350    ///
2351    /// # Examples
2352    ///
2353    /// ```
2354    /// use rust_rocksdb::Options;
2355    ///
2356    /// let mut opts = Options::default();
2357    /// opts.set_level_zero_stop_writes_trigger(48);
2358    /// ```
2359    pub fn set_level_zero_stop_writes_trigger(&mut self, n: c_int) {
2360        unsafe {
2361            ffi::rocksdb_options_set_level0_stop_writes_trigger(self.inner, n);
2362        }
2363    }
2364
2365    /// Sets the compaction style.
2366    ///
2367    /// Default: DBCompactionStyle::Level
2368    ///
2369    /// # Examples
2370    ///
2371    /// ```
2372    /// use rust_rocksdb::{Options, DBCompactionStyle};
2373    ///
2374    /// let mut opts = Options::default();
2375    /// opts.set_compaction_style(DBCompactionStyle::Universal);
2376    /// ```
2377    pub fn set_compaction_style(&mut self, style: DBCompactionStyle) {
2378        unsafe {
2379            ffi::rocksdb_options_set_compaction_style(self.inner, style as c_int);
2380        }
2381    }
2382
2383    /// Sets the options needed to support Universal Style compactions.
2384    pub fn set_universal_compaction_options(&mut self, uco: &UniversalCompactOptions) {
2385        unsafe {
2386            ffi::rocksdb_options_set_universal_compaction_options(self.inner, uco.inner);
2387        }
2388    }
2389
2390    /// Sets the options for FIFO compaction style.
2391    pub fn set_fifo_compaction_options(&mut self, fco: &FifoCompactOptions) {
2392        unsafe {
2393            ffi::rocksdb_options_set_fifo_compaction_options(self.inner, fco.inner);
2394        }
2395    }
2396
2397    /// Sets unordered_write to true trades higher write throughput with
2398    /// relaxing the immutability guarantee of snapshots. This violates the
2399    /// repeatability one expects from ::Get from a snapshot, as well as
2400    /// ::MultiGet and Iterator's consistent-point-in-time view property.
2401    /// If the application cannot tolerate the relaxed guarantees, it can implement
2402    /// its own mechanisms to work around that and yet benefit from the higher
2403    /// throughput. Using TransactionDB with WRITE_PREPARED write policy and
2404    /// two_write_queues=true is one way to achieve immutable snapshots despite
2405    /// unordered_write.
2406    ///
2407    /// By default, i.e., when it is false, rocksdb does not advance the sequence
2408    /// number for new snapshots unless all the writes with lower sequence numbers
2409    /// are already finished. This provides the immutability that we expect from
2410    /// snapshots. Moreover, since Iterator and MultiGet internally depend on
2411    /// snapshots, the snapshot immutability results into Iterator and MultiGet
2412    /// offering consistent-point-in-time view. If set to true, although
2413    /// Read-Your-Own-Write property is still provided, the snapshot immutability
2414    /// property is relaxed: the writes issued after the snapshot is obtained (with
2415    /// larger sequence numbers) will be still not visible to the reads from that
2416    /// snapshot, however, there still might be pending writes (with lower sequence
2417    /// number) that will change the state visible to the snapshot after they are
2418    /// landed to the memtable.
2419    ///
2420    /// Default: false
2421    pub fn set_unordered_write(&mut self, unordered: bool) {
2422        unsafe {
2423            ffi::rocksdb_options_set_unordered_write(self.inner, c_uchar::from(unordered));
2424        }
2425    }
2426
2427    /// Sets maximum number of threads that will
2428    /// concurrently perform a compaction job by breaking it into multiple,
2429    /// smaller ones that are run simultaneously.
2430    ///
2431    /// Default: 1 (i.e. no subcompactions)
2432    pub fn set_max_subcompactions(&mut self, num: u32) {
2433        unsafe {
2434            ffi::rocksdb_options_set_max_subcompactions(self.inner, num);
2435        }
2436    }
2437
2438    /// Sets maximum number of concurrent background jobs
2439    /// (compactions and flushes).
2440    ///
2441    /// Default: 2
2442    ///
2443    /// Dynamically changeable through SetDBOptions() API.
2444    pub fn set_max_background_jobs(&mut self, jobs: c_int) {
2445        unsafe {
2446            ffi::rocksdb_options_set_max_background_jobs(self.inner, jobs);
2447        }
2448    }
2449
2450    /// Sets the maximum number of concurrent background compaction jobs, submitted to
2451    /// the default LOW priority thread pool.
2452    /// We first try to schedule compactions based on
2453    /// `base_background_compactions`. If the compaction cannot catch up , we
2454    /// will increase number of compaction threads up to
2455    /// `max_background_compactions`.
2456    ///
2457    /// If you're increasing this, also consider increasing number of threads in
2458    /// LOW priority thread pool. For more information, see
2459    /// Env::SetBackgroundThreads
2460    ///
2461    /// Default: `1`
2462    ///
2463    /// # Examples
2464    ///
2465    /// ```
2466    /// use rust_rocksdb::Options;
2467    ///
2468    /// let mut opts = Options::default();
2469    /// #[allow(deprecated)]
2470    /// opts.set_max_background_compactions(2);
2471    /// ```
2472    #[deprecated(
2473        since = "0.15.0",
2474        note = "RocksDB automatically decides this based on the value of max_background_jobs"
2475    )]
2476    pub fn set_max_background_compactions(&mut self, n: c_int) {
2477        unsafe {
2478            ffi::rocksdb_options_set_max_background_compactions(self.inner, n);
2479        }
2480    }
2481
2482    /// Sets the maximum number of concurrent background memtable flush jobs, submitted to
2483    /// the HIGH priority thread pool.
2484    ///
2485    /// By default, all background jobs (major compaction and memtable flush) go
2486    /// to the LOW priority pool. If this option is set to a positive number,
2487    /// memtable flush jobs will be submitted to the HIGH priority pool.
2488    /// It is important when the same Env is shared by multiple db instances.
2489    /// Without a separate pool, long running major compaction jobs could
2490    /// potentially block memtable flush jobs of other db instances, leading to
2491    /// unnecessary Put stalls.
2492    ///
2493    /// If you're increasing this, also consider increasing number of threads in
2494    /// HIGH priority thread pool. For more information, see
2495    /// Env::SetBackgroundThreads
2496    ///
2497    /// Default: `1`
2498    ///
2499    /// # Examples
2500    ///
2501    /// ```
2502    /// use rust_rocksdb::Options;
2503    ///
2504    /// let mut opts = Options::default();
2505    /// #[allow(deprecated)]
2506    /// opts.set_max_background_flushes(2);
2507    /// ```
2508    #[deprecated(
2509        since = "0.15.0",
2510        note = "RocksDB automatically decides this based on the value of max_background_jobs"
2511    )]
2512    pub fn set_max_background_flushes(&mut self, n: c_int) {
2513        unsafe {
2514            ffi::rocksdb_options_set_max_background_flushes(self.inner, n);
2515        }
2516    }
2517
2518    /// Disables automatic compactions. Manual compactions can still
2519    /// be issued on this column family
2520    ///
2521    /// Default: `false`
2522    ///
2523    /// Dynamically changeable through SetOptions() API
2524    ///
2525    /// # Examples
2526    ///
2527    /// ```
2528    /// use rust_rocksdb::Options;
2529    ///
2530    /// let mut opts = Options::default();
2531    /// opts.set_disable_auto_compactions(true);
2532    /// ```
2533    pub fn set_disable_auto_compactions(&mut self, disable: bool) {
2534        unsafe {
2535            ffi::rocksdb_options_set_disable_auto_compactions(self.inner, c_int::from(disable));
2536        }
2537    }
2538
2539    /// SetMemtableHugePageSize sets the page size for huge page for
2540    /// arena used by the memtable.
2541    /// If <=0, it won't allocate from huge page but from malloc.
2542    /// Users are responsible to reserve huge pages for it to be allocated. For
2543    /// example:
2544    ///      sysctl -w vm.nr_hugepages=20
2545    /// See linux doc Documentation/vm/hugetlbpage.txt
2546    /// If there isn't enough free huge page available, it will fall back to
2547    /// malloc.
2548    ///
2549    /// Dynamically changeable through SetOptions() API
2550    pub fn set_memtable_huge_page_size(&mut self, size: size_t) {
2551        unsafe {
2552            ffi::rocksdb_options_set_memtable_huge_page_size(self.inner, size);
2553        }
2554    }
2555
2556    /// Sets the maximum number of successive merge operations on a key in the memtable.
2557    ///
2558    /// When a merge operation is added to the memtable and the maximum number of
2559    /// successive merges is reached, the value of the key will be calculated and
2560    /// inserted into the memtable instead of the merge operation. This will
2561    /// ensure that there are never more than max_successive_merges merge
2562    /// operations in the memtable.
2563    ///
2564    /// Default: 0 (disabled)
2565    pub fn set_max_successive_merges(&mut self, num: usize) {
2566        unsafe {
2567            ffi::rocksdb_options_set_max_successive_merges(self.inner, num);
2568        }
2569    }
2570
2571    /// Control locality of bloom filter probes to improve cache miss rate.
2572    /// This option only applies to memtable prefix bloom and plaintable
2573    /// prefix bloom. It essentially limits the max number of cache lines each
2574    /// bloom filter check can touch.
2575    ///
2576    /// This optimization is turned off when set to 0. The number should never
2577    /// be greater than number of probes. This option can boost performance
2578    /// for in-memory workload but should use with care since it can cause
2579    /// higher false positive rate.
2580    ///
2581    /// Default: 0
2582    pub fn set_bloom_locality(&mut self, v: u32) {
2583        unsafe {
2584            ffi::rocksdb_options_set_bloom_locality(self.inner, v);
2585        }
2586    }
2587
2588    /// Enable/disable thread-safe inplace updates.
2589    ///
2590    /// Requires updates if
2591    /// * key exists in current memtable
2592    /// * new sizeof(new_value) <= sizeof(old_value)
2593    /// * old_value for that key is a put i.e. kTypeValue
2594    ///
2595    /// Default: false.
2596    pub fn set_inplace_update_support(&mut self, enabled: bool) {
2597        unsafe {
2598            ffi::rocksdb_options_set_inplace_update_support(self.inner, c_uchar::from(enabled));
2599        }
2600    }
2601
2602    /// Sets the number of locks used for inplace update.
2603    ///
2604    /// Default: 10000 when inplace_update_support = true, otherwise 0.
2605    pub fn set_inplace_update_locks(&mut self, num: usize) {
2606        unsafe {
2607            ffi::rocksdb_options_set_inplace_update_num_locks(self.inner, num);
2608        }
2609    }
2610
2611    /// Different max-size multipliers for different levels.
2612    /// These are multiplied by max_bytes_for_level_multiplier to arrive
2613    /// at the max-size of each level.
2614    ///
2615    /// Default: 1
2616    ///
2617    /// Dynamically changeable through SetOptions() API
2618    pub fn set_max_bytes_for_level_multiplier_additional(&mut self, level_values: &[i32]) {
2619        let count = level_values.len();
2620        unsafe {
2621            ffi::rocksdb_options_set_max_bytes_for_level_multiplier_additional(
2622                self.inner,
2623                level_values.as_ptr().cast_mut(),
2624                count,
2625            );
2626        }
2627    }
2628
2629    /// If true, then DB::Open() will not fetch and check sizes of all sst files.
2630    /// This may significantly speed up startup if there are many sst files,
2631    /// especially when using non-default Env with expensive GetFileSize().
2632    /// We'll still check that all required sst files exist.
2633    /// If paranoid_checks is false, this option is ignored, and sst files are
2634    /// not checked at all.
2635    ///
2636    /// Default: false
2637    #[deprecated(note = "RocksDB >= 10.5: option is ignored: checking done with a thread pool")]
2638    pub fn set_skip_checking_sst_file_sizes_on_db_open(&mut self, value: bool) {
2639        unsafe {
2640            ffi::rocksdb_options_set_skip_checking_sst_file_sizes_on_db_open(
2641                self.inner,
2642                c_uchar::from(value),
2643            );
2644        }
2645    }
2646
2647    /// The total maximum size(bytes) of write buffers to maintain in memory
2648    /// including copies of buffers that have already been flushed. This parameter
2649    /// only affects trimming of flushed buffers and does not affect flushing.
2650    /// This controls the maximum amount of write history that will be available
2651    /// in memory for conflict checking when Transactions are used. The actual
2652    /// size of write history (flushed Memtables) might be higher than this limit
2653    /// if further trimming will reduce write history total size below this
2654    /// limit. For example, if max_write_buffer_size_to_maintain is set to 64MB,
2655    /// and there are three flushed Memtables, with sizes of 32MB, 20MB, 20MB.
2656    /// Because trimming the next Memtable of size 20MB will reduce total memory
2657    /// usage to 52MB which is below the limit, RocksDB will stop trimming.
2658    ///
2659    /// When using an OptimisticTransactionDB:
2660    /// If this value is too low, some transactions may fail at commit time due
2661    /// to not being able to determine whether there were any write conflicts.
2662    ///
2663    /// When using a TransactionDB:
2664    /// If Transaction::SetSnapshot is used, TransactionDB will read either
2665    /// in-memory write buffers or SST files to do write-conflict checking.
2666    /// Increasing this value can reduce the number of reads to SST files
2667    /// done for conflict detection.
2668    ///
2669    /// Setting this value to 0 will cause write buffers to be freed immediately
2670    /// after they are flushed. If this value is set to -1,
2671    /// 'max_write_buffer_number * write_buffer_size' will be used.
2672    ///
2673    /// Default:
2674    /// If using a TransactionDB/OptimisticTransactionDB, the default value will
2675    /// be set to the value of 'max_write_buffer_number * write_buffer_size'
2676    /// if it is not explicitly set by the user.  Otherwise, the default is 0.
2677    pub fn set_max_write_buffer_size_to_maintain(&mut self, size: i64) {
2678        unsafe {
2679            ffi::rocksdb_options_set_max_write_buffer_size_to_maintain(self.inner, size);
2680        }
2681    }
2682
2683    /// By default, a single write thread queue is maintained. The thread gets
2684    /// to the head of the queue becomes write batch group leader and responsible
2685    /// for writing to WAL and memtable for the batch group.
2686    ///
2687    /// If enable_pipelined_write is true, separate write thread queue is
2688    /// maintained for WAL write and memtable write. A write thread first enter WAL
2689    /// writer queue and then memtable writer queue. Pending thread on the WAL
2690    /// writer queue thus only have to wait for previous writers to finish their
2691    /// WAL writing but not the memtable writing. Enabling the feature may improve
2692    /// write throughput and reduce latency of the prepare phase of two-phase
2693    /// commit.
2694    ///
2695    /// Default: false
2696    pub fn set_enable_pipelined_write(&mut self, value: bool) {
2697        unsafe {
2698            ffi::rocksdb_options_set_enable_pipelined_write(self.inner, c_uchar::from(value));
2699        }
2700    }
2701
2702    /// Defines the underlying memtable implementation.
2703    /// See official [wiki](https://github.com/facebook/rocksdb/wiki/MemTable) for more information.
2704    /// Defaults to using a skiplist.
2705    ///
2706    /// # Examples
2707    ///
2708    /// ```
2709    /// use rust_rocksdb::{Options, MemtableFactory};
2710    /// let mut opts = Options::default();
2711    /// let factory = MemtableFactory::HashSkipList {
2712    ///     bucket_count: 1_000_000,
2713    ///     height: 4,
2714    ///     branching_factor: 4,
2715    /// };
2716    ///
2717    /// opts.set_allow_concurrent_memtable_write(false);
2718    /// opts.set_memtable_factory(factory);
2719    /// ```
2720    pub fn set_memtable_factory(&mut self, factory: MemtableFactory) {
2721        match factory {
2722            MemtableFactory::Vector => unsafe {
2723                ffi::rocksdb_options_set_memtable_vector_rep(self.inner);
2724            },
2725            MemtableFactory::HashSkipList {
2726                bucket_count,
2727                height,
2728                branching_factor,
2729            } => unsafe {
2730                ffi::rocksdb_options_set_hash_skip_list_rep(
2731                    self.inner,
2732                    bucket_count,
2733                    height,
2734                    branching_factor,
2735                );
2736            },
2737            MemtableFactory::HashLinkList { bucket_count } => unsafe {
2738                ffi::rocksdb_options_set_hash_link_list_rep(self.inner, bucket_count);
2739            },
2740        }
2741    }
2742
2743    pub fn set_block_based_table_factory(&mut self, factory: &BlockBasedOptions) {
2744        unsafe {
2745            ffi::rocksdb_options_set_block_based_table_factory(self.inner, factory.inner);
2746        }
2747        self.outlive.block_based = Some(factory.outlive.clone());
2748    }
2749
2750    /// Sets the table factory to a CuckooTableFactory (the default table
2751    /// factory is a block-based table factory that provides a default
2752    /// implementation of TableBuilder and TableReader with default
2753    /// BlockBasedTableOptions).
2754    /// See official [wiki](https://github.com/facebook/rocksdb/wiki/CuckooTable-Format) for more information on this table format.
2755    /// # Examples
2756    ///
2757    /// ```
2758    /// use rust_rocksdb::{Options, CuckooTableOptions};
2759    ///
2760    /// let mut opts = Options::default();
2761    /// let mut factory_opts = CuckooTableOptions::default();
2762    /// factory_opts.set_hash_ratio(0.8);
2763    /// factory_opts.set_max_search_depth(20);
2764    /// factory_opts.set_cuckoo_block_size(10);
2765    /// factory_opts.set_identity_as_first_hash(true);
2766    /// factory_opts.set_use_module_hash(false);
2767    ///
2768    /// opts.set_cuckoo_table_factory(&factory_opts);
2769    /// ```
2770    pub fn set_cuckoo_table_factory(&mut self, factory: &CuckooTableOptions) {
2771        unsafe {
2772            ffi::rocksdb_options_set_cuckoo_table_factory(self.inner, factory.inner);
2773        }
2774    }
2775
2776    // This is a factory that provides TableFactory objects.
2777    // Default: a block-based table factory that provides a default
2778    // implementation of TableBuilder and TableReader with default
2779    // BlockBasedTableOptions.
2780    /// Sets the factory as plain table.
2781    /// See official [wiki](https://github.com/facebook/rocksdb/wiki/PlainTable-Format) for more
2782    /// information.
2783    ///
2784    /// # Examples
2785    ///
2786    /// ```
2787    /// use rust_rocksdb::{KeyEncodingType, Options, PlainTableFactoryOptions};
2788    ///
2789    /// let mut opts = Options::default();
2790    /// let factory_opts = PlainTableFactoryOptions {
2791    ///   user_key_length: 0,
2792    ///   bloom_bits_per_key: 20,
2793    ///   hash_table_ratio: 0.75,
2794    ///   index_sparseness: 16,
2795    ///   huge_page_tlb_size: 0,
2796    ///   encoding_type: KeyEncodingType::Plain,
2797    ///   full_scan_mode: false,
2798    ///   store_index_in_file: false,
2799    /// };
2800    ///
2801    /// opts.set_plain_table_factory(&factory_opts);
2802    /// ```
2803    pub fn set_plain_table_factory(&mut self, options: &PlainTableFactoryOptions) {
2804        unsafe {
2805            ffi::rocksdb_options_set_plain_table_factory(
2806                self.inner,
2807                options.user_key_length,
2808                options.bloom_bits_per_key,
2809                options.hash_table_ratio,
2810                options.index_sparseness,
2811                options.huge_page_tlb_size,
2812                options.encoding_type as c_char,
2813                c_uchar::from(options.full_scan_mode),
2814                c_uchar::from(options.store_index_in_file),
2815            );
2816        }
2817    }
2818
2819    /// Sets the start level to use compression.
2820    pub fn set_min_level_to_compress(&mut self, lvl: c_int) {
2821        unsafe {
2822            ffi::rocksdb_options_set_min_level_to_compress(self.inner, lvl);
2823        }
2824    }
2825
2826    /// Measure IO stats in compactions and flushes, if `true`.
2827    ///
2828    /// Default: `false`
2829    ///
2830    /// # Examples
2831    ///
2832    /// ```
2833    /// use rust_rocksdb::Options;
2834    ///
2835    /// let mut opts = Options::default();
2836    /// opts.set_report_bg_io_stats(true);
2837    /// ```
2838    pub fn set_report_bg_io_stats(&mut self, enable: bool) {
2839        unsafe {
2840            ffi::rocksdb_options_set_report_bg_io_stats(self.inner, c_int::from(enable));
2841        }
2842    }
2843
2844    /// Once write-ahead logs exceed this size, we will start forcing the flush of
2845    /// column families whose memtables are backed by the oldest live WAL file
2846    /// (i.e. the ones that are causing all the space amplification).
2847    ///
2848    /// Default: `0`
2849    ///
2850    /// # Examples
2851    ///
2852    /// ```
2853    /// use rust_rocksdb::Options;
2854    ///
2855    /// let mut opts = Options::default();
2856    /// // Set max total wal size to 1G.
2857    /// opts.set_max_total_wal_size(1 << 30);
2858    /// ```
2859    pub fn set_max_total_wal_size(&mut self, size: u64) {
2860        unsafe {
2861            ffi::rocksdb_options_set_max_total_wal_size(self.inner, size);
2862        }
2863    }
2864
2865    /// Recovery mode to control the consistency while replaying WAL.
2866    ///
2867    /// Default: DBRecoveryMode::PointInTime
2868    ///
2869    /// # Examples
2870    ///
2871    /// ```
2872    /// use rust_rocksdb::{Options, DBRecoveryMode};
2873    ///
2874    /// let mut opts = Options::default();
2875    /// opts.set_wal_recovery_mode(DBRecoveryMode::AbsoluteConsistency);
2876    /// ```
2877    pub fn set_wal_recovery_mode(&mut self, mode: DBRecoveryMode) {
2878        unsafe {
2879            ffi::rocksdb_options_set_wal_recovery_mode(self.inner, mode as c_int);
2880        }
2881    }
2882
2883    pub fn enable_statistics(&mut self) {
2884        unsafe {
2885            ffi::rocksdb_options_enable_statistics(self.inner);
2886        }
2887    }
2888
2889    pub fn get_statistics(&self) -> Option<String> {
2890        unsafe {
2891            let value = ffi::rocksdb_options_statistics_get_string(self.inner);
2892            if value.is_null() {
2893                return None;
2894            }
2895
2896            // Must have valid UTF-8 format.
2897            let s = CStr::from_ptr(value).to_str().unwrap().to_owned();
2898            ffi::rocksdb_free(value as *mut c_void);
2899            Some(s)
2900        }
2901    }
2902
2903    /// StatsLevel can be used to reduce statistics overhead by skipping certain
2904    /// types of stats in the stats collection process.
2905    pub fn set_statistics_level(&self, level: StatsLevel) {
2906        unsafe { ffi::rocksdb_options_set_statistics_level(self.inner, level as c_int) }
2907    }
2908
2909    /// Returns the value of cumulative db counters if stat collection is enabled.
2910    pub fn get_ticker_count(&self, ticker: Ticker) -> u64 {
2911        unsafe { ffi::rocksdb_options_statistics_get_ticker_count(self.inner, ticker as u32) }
2912    }
2913
2914    /// Gets Histogram data from collected db stats. Requires stats to be enabled.
2915    pub fn get_histogram_data(&self, histogram: Histogram) -> HistogramData {
2916        unsafe {
2917            let data = HistogramData::default();
2918            ffi::rocksdb_options_statistics_get_histogram_data(
2919                self.inner,
2920                histogram as u32,
2921                data.inner,
2922            );
2923            data
2924        }
2925    }
2926
2927    /// If not zero, dump `rocksdb.stats` to LOG every `stats_dump_period_sec`.
2928    ///
2929    /// Default: `600` (10 mins)
2930    ///
2931    /// # Examples
2932    ///
2933    /// ```
2934    /// use rust_rocksdb::Options;
2935    ///
2936    /// let mut opts = Options::default();
2937    /// opts.set_stats_dump_period_sec(300);
2938    /// ```
2939    pub fn set_stats_dump_period_sec(&mut self, period: c_uint) {
2940        unsafe {
2941            ffi::rocksdb_options_set_stats_dump_period_sec(self.inner, period);
2942        }
2943    }
2944
2945    /// If not zero, dump rocksdb.stats to RocksDB to LOG every `stats_persist_period_sec`.
2946    ///
2947    /// Default: `600` (10 mins)
2948    ///
2949    /// # Examples
2950    ///
2951    /// ```
2952    /// use rust_rocksdb::Options;
2953    ///
2954    /// let mut opts = Options::default();
2955    /// opts.set_stats_persist_period_sec(5);
2956    /// ```
2957    pub fn set_stats_persist_period_sec(&mut self, period: c_uint) {
2958        unsafe {
2959            ffi::rocksdb_options_set_stats_persist_period_sec(self.inner, period);
2960        }
2961    }
2962
2963    /// When set to true, reading SST files will opt out of the filesystem's
2964    /// readahead. Setting this to false may improve sequential iteration
2965    /// performance.
2966    ///
2967    /// Default: `true`
2968    pub fn set_advise_random_on_open(&mut self, advise: bool) {
2969        unsafe {
2970            ffi::rocksdb_options_set_advise_random_on_open(self.inner, c_uchar::from(advise));
2971        }
2972    }
2973
2974    /// Enable/disable adaptive mutex, which spins in the user space before resorting to kernel.
2975    ///
2976    /// This could reduce context switch when the mutex is not
2977    /// heavily contended. However, if the mutex is hot, we could end up
2978    /// wasting spin time.
2979    ///
2980    /// Default: false
2981    pub fn set_use_adaptive_mutex(&mut self, enabled: bool) {
2982        unsafe {
2983            ffi::rocksdb_options_set_use_adaptive_mutex(self.inner, c_uchar::from(enabled));
2984        }
2985    }
2986
2987    /// Sets the number of levels for this database.
2988    pub fn set_num_levels(&mut self, n: c_int) {
2989        unsafe {
2990            ffi::rocksdb_options_set_num_levels(self.inner, n);
2991        }
2992    }
2993
2994    /// When a `prefix_extractor` is defined through `opts.set_prefix_extractor` this
2995    /// creates a prefix bloom filter for each memtable with the size of
2996    /// `write_buffer_size * memtable_prefix_bloom_ratio` (capped at 0.25).
2997    ///
2998    /// Default: `0`
2999    ///
3000    /// # Examples
3001    ///
3002    /// ```
3003    /// use rust_rocksdb::{Options, SliceTransform};
3004    ///
3005    /// let mut opts = Options::default();
3006    /// let transform = SliceTransform::create_fixed_prefix(10);
3007    /// opts.set_prefix_extractor(transform);
3008    /// opts.set_memtable_prefix_bloom_ratio(0.2);
3009    /// ```
3010    pub fn set_memtable_prefix_bloom_ratio(&mut self, ratio: f64) {
3011        unsafe {
3012            ffi::rocksdb_options_set_memtable_prefix_bloom_size_ratio(self.inner, ratio);
3013        }
3014    }
3015
3016    /// Sets the maximum number of bytes in all compacted files.
3017    /// We try to limit number of bytes in one compaction to be lower than this
3018    /// threshold. But it's not guaranteed.
3019    ///
3020    /// Value 0 will be sanitized.
3021    ///
3022    /// Default: target_file_size_base * 25
3023    pub fn set_max_compaction_bytes(&mut self, nbytes: u64) {
3024        unsafe {
3025            ffi::rocksdb_options_set_max_compaction_bytes(self.inner, nbytes);
3026        }
3027    }
3028
3029    /// Specifies the absolute path of the directory the
3030    /// write-ahead log (WAL) should be written to.
3031    ///
3032    /// Default: same directory as the database
3033    ///
3034    /// # Examples
3035    ///
3036    /// ```
3037    /// use rust_rocksdb::Options;
3038    ///
3039    /// let mut opts = Options::default();
3040    /// opts.set_wal_dir("/path/to/dir");
3041    /// ```
3042    pub fn set_wal_dir<P: AsRef<Path>>(&mut self, path: P) {
3043        let p = to_cpath(path).unwrap();
3044        unsafe {
3045            ffi::rocksdb_options_set_wal_dir(self.inner, p.as_ptr());
3046        }
3047    }
3048
3049    /// Sets the WAL ttl in seconds.
3050    ///
3051    /// The following two options affect how archived logs will be deleted.
3052    /// 1. If both set to 0, logs will be deleted asap and will not get into
3053    ///    the archive.
3054    /// 2. If wal_ttl_seconds is 0 and wal_size_limit_mb is not 0,
3055    ///    WAL files will be checked every 10 min and if total size is greater
3056    ///    then wal_size_limit_mb, they will be deleted starting with the
3057    ///    earliest until size_limit is met. All empty files will be deleted.
3058    /// 3. If wal_ttl_seconds is not 0 and wall_size_limit_mb is 0, then
3059    ///    WAL files will be checked every wal_ttl_seconds / 2 and those that
3060    ///    are older than wal_ttl_seconds will be deleted.
3061    /// 4. If both are not 0, WAL files will be checked every 10 min and both
3062    ///    checks will be performed with ttl being first.
3063    ///
3064    /// Default: 0
3065    pub fn set_wal_ttl_seconds(&mut self, secs: u64) {
3066        unsafe {
3067            ffi::rocksdb_options_set_WAL_ttl_seconds(self.inner, secs);
3068        }
3069    }
3070
3071    /// Sets the WAL size limit in MB.
3072    ///
3073    /// If total size of WAL files is greater then wal_size_limit_mb,
3074    /// they will be deleted starting with the earliest until size_limit is met.
3075    ///
3076    /// Default: 0
3077    pub fn set_wal_size_limit_mb(&mut self, size: u64) {
3078        unsafe {
3079            ffi::rocksdb_options_set_WAL_size_limit_MB(self.inner, size);
3080        }
3081    }
3082
3083    /// Sets the number of bytes to preallocate (via fallocate) the manifest files.
3084    ///
3085    /// Default is 4MB, which is reasonable to reduce random IO
3086    /// as well as prevent overallocation for mounts that preallocate
3087    /// large amounts of data (such as xfs's allocsize option).
3088    pub fn set_manifest_preallocation_size(&mut self, size: usize) {
3089        unsafe {
3090            ffi::rocksdb_options_set_manifest_preallocation_size(self.inner, size);
3091        }
3092    }
3093
3094    /// If true, then DB::Open() will not update the statistics used to optimize
3095    /// compaction decision by loading table properties from many files.
3096    /// Turning off this feature will improve DBOpen time especially in disk environment.
3097    ///
3098    /// Default: false
3099    pub fn set_skip_stats_update_on_db_open(&mut self, skip: bool) {
3100        unsafe {
3101            ffi::rocksdb_options_set_skip_stats_update_on_db_open(self.inner, c_uchar::from(skip));
3102        }
3103    }
3104
3105    /// Specify the maximal number of info log files to be kept.
3106    ///
3107    /// Default: 1000
3108    ///
3109    /// # Examples
3110    ///
3111    /// ```
3112    /// use rust_rocksdb::Options;
3113    ///
3114    /// let mut options = Options::default();
3115    /// options.set_keep_log_file_num(100);
3116    /// ```
3117    pub fn set_keep_log_file_num(&mut self, nfiles: usize) {
3118        unsafe {
3119            ffi::rocksdb_options_set_keep_log_file_num(self.inner, nfiles);
3120        }
3121    }
3122
3123    /// Allow the OS to mmap file for writing.
3124    ///
3125    /// Default: false
3126    ///
3127    /// # Examples
3128    ///
3129    /// ```
3130    /// use rust_rocksdb::Options;
3131    ///
3132    /// let mut options = Options::default();
3133    /// options.set_allow_mmap_writes(true);
3134    /// ```
3135    pub fn set_allow_mmap_writes(&mut self, is_enabled: bool) {
3136        unsafe {
3137            ffi::rocksdb_options_set_allow_mmap_writes(self.inner, c_uchar::from(is_enabled));
3138        }
3139    }
3140
3141    /// Allow the OS to mmap file for reading sst tables.
3142    ///
3143    /// Default: false
3144    ///
3145    /// # Examples
3146    ///
3147    /// ```
3148    /// use rust_rocksdb::Options;
3149    ///
3150    /// let mut options = Options::default();
3151    /// options.set_allow_mmap_reads(true);
3152    /// ```
3153    pub fn set_allow_mmap_reads(&mut self, is_enabled: bool) {
3154        unsafe {
3155            ffi::rocksdb_options_set_allow_mmap_reads(self.inner, c_uchar::from(is_enabled));
3156        }
3157    }
3158
3159    /// If enabled, WAL is not flushed automatically after each write. Instead it
3160    /// relies on manual invocation of `DB::flush_wal()` to write the WAL buffer
3161    /// to its file.
3162    ///
3163    /// Default: false
3164    ///
3165    /// # Examples
3166    ///
3167    /// ```
3168    /// use rust_rocksdb::Options;
3169    ///
3170    /// let mut options = Options::default();
3171    /// options.set_manual_wal_flush(true);
3172    /// ```
3173    pub fn set_manual_wal_flush(&mut self, is_enabled: bool) {
3174        unsafe {
3175            ffi::rocksdb_options_set_manual_wal_flush(self.inner, c_uchar::from(is_enabled));
3176        }
3177    }
3178
3179    /// Guarantee that all column families are flushed together atomically.
3180    /// This option applies to both manual flushes (`db.flush()`) and automatic
3181    /// background flushes caused when memtables are filled.
3182    ///
3183    /// Note that this is only useful when the WAL is disabled. When using the
3184    /// WAL, writes are always consistent across column families.
3185    ///
3186    /// Default: false
3187    ///
3188    /// # Examples
3189    ///
3190    /// ```
3191    /// use rust_rocksdb::Options;
3192    ///
3193    /// let mut options = Options::default();
3194    /// options.set_atomic_flush(true);
3195    /// ```
3196    pub fn set_atomic_flush(&mut self, atomic_flush: bool) {
3197        unsafe {
3198            ffi::rocksdb_options_set_atomic_flush(self.inner, c_uchar::from(atomic_flush));
3199        }
3200    }
3201
3202    /// Sets global cache for table-level rows.
3203    ///
3204    /// Default: null (disabled)
3205    /// Not supported in ROCKSDB_LITE mode!
3206    pub fn set_row_cache(&mut self, cache: &Cache) {
3207        unsafe {
3208            ffi::rocksdb_options_set_row_cache(self.inner, cache.0.inner.as_ptr());
3209        }
3210        self.outlive.row_cache = Some(cache.clone());
3211    }
3212
3213    /// Use to control write rate of flush and compaction. Flush has higher
3214    /// priority than compaction.
3215    /// If rate limiter is enabled, bytes_per_sync is set to 1MB by default.
3216    ///
3217    /// Default: disable
3218    ///
3219    /// # Examples
3220    ///
3221    /// ```
3222    /// use rust_rocksdb::Options;
3223    ///
3224    /// let mut options = Options::default();
3225    /// options.set_ratelimiter(1024 * 1024, 100 * 1000, 10);
3226    /// ```
3227    pub fn set_ratelimiter(
3228        &mut self,
3229        rate_bytes_per_sec: i64,
3230        refill_period_us: i64,
3231        fairness: i32,
3232    ) {
3233        unsafe {
3234            let ratelimiter =
3235                ffi::rocksdb_ratelimiter_create(rate_bytes_per_sec, refill_period_us, fairness);
3236            ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
3237            ffi::rocksdb_ratelimiter_destroy(ratelimiter);
3238        }
3239    }
3240
3241    /// Use to control write rate of flush and compaction. Flush has higher
3242    /// priority than compaction.
3243    /// If rate limiter is enabled, bytes_per_sync is set to 1MB by default.
3244    ///
3245    /// Default: disable
3246    pub fn set_auto_tuned_ratelimiter(
3247        &mut self,
3248        rate_bytes_per_sec: i64,
3249        refill_period_us: i64,
3250        fairness: i32,
3251    ) {
3252        unsafe {
3253            let ratelimiter = ffi::rocksdb_ratelimiter_create_auto_tuned(
3254                rate_bytes_per_sec,
3255                refill_period_us,
3256                fairness,
3257            );
3258            ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
3259            ffi::rocksdb_ratelimiter_destroy(ratelimiter);
3260        }
3261    }
3262
3263    /// Create a RateLimiter object, which can be shared among RocksDB instances to
3264    /// control write rate of flush and compaction.
3265    ///
3266    /// rate_bytes_per_sec: this is the only parameter you want to set most of the
3267    /// time. It controls the total write rate of compaction and flush in bytes per
3268    /// second. Currently, RocksDB does not enforce rate limit for anything other
3269    /// than flush and compaction, e.g. write to WAL.
3270    ///
3271    /// refill_period_us: this controls how often tokens are refilled. For example,
3272    /// when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to
3273    /// 100ms, then 1MB is refilled every 100ms internally. Larger value can lead to
3274    /// burstier writes while smaller value introduces more CPU overhead.
3275    /// The default should work for most cases.
3276    ///
3277    /// fairness: RateLimiter accepts high-pri requests and low-pri requests.
3278    /// A low-pri request is usually blocked in favor of hi-pri request. Currently,
3279    /// RocksDB assigns low-pri to request from compaction and high-pri to request
3280    /// from flush. Low-pri requests can get blocked if flush requests come in
3281    /// continuously. This fairness parameter grants low-pri requests permission by
3282    /// 1/fairness chance even though high-pri requests exist to avoid starvation.
3283    /// You should be good by leaving it at default 10.
3284    ///
3285    /// mode: Mode indicates which types of operations count against the limit.
3286    ///
3287    /// auto_tuned: Enables dynamic adjustment of rate limit within the range
3288    ///              `[rate_bytes_per_sec / 20, rate_bytes_per_sec]`, according to
3289    ///              the recent demand for background I/O.
3290    pub fn set_ratelimiter_with_mode(
3291        &mut self,
3292        rate_bytes_per_sec: i64,
3293        refill_period_us: i64,
3294        fairness: i32,
3295        mode: RateLimiterMode,
3296        auto_tuned: bool,
3297    ) {
3298        unsafe {
3299            let ratelimiter = ffi::rocksdb_ratelimiter_create_with_mode(
3300                rate_bytes_per_sec,
3301                refill_period_us,
3302                fairness,
3303                mode as c_int,
3304                auto_tuned,
3305            );
3306            ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
3307            ffi::rocksdb_ratelimiter_destroy(ratelimiter);
3308        }
3309    }
3310
3311    /// Sets the maximal size of the info log file.
3312    ///
3313    /// If the log file is larger than `max_log_file_size`, a new info log file
3314    /// will be created. If `max_log_file_size` is equal to zero, all logs will
3315    /// be written to one log file.
3316    ///
3317    /// Default: 0
3318    ///
3319    /// # Examples
3320    ///
3321    /// ```
3322    /// use rust_rocksdb::Options;
3323    ///
3324    /// let mut options = Options::default();
3325    /// options.set_max_log_file_size(0);
3326    /// ```
3327    pub fn set_max_log_file_size(&mut self, size: usize) {
3328        unsafe {
3329            ffi::rocksdb_options_set_max_log_file_size(self.inner, size);
3330        }
3331    }
3332
3333    /// Sets the time for the info log file to roll (in seconds).
3334    ///
3335    /// If specified with non-zero value, log file will be rolled
3336    /// if it has been active longer than `log_file_time_to_roll`.
3337    /// Default: 0 (disabled)
3338    pub fn set_log_file_time_to_roll(&mut self, secs: usize) {
3339        unsafe {
3340            ffi::rocksdb_options_set_log_file_time_to_roll(self.inner, secs);
3341        }
3342    }
3343
3344    /// Controls the recycling of log files.
3345    ///
3346    /// If non-zero, previously written log files will be reused for new logs,
3347    /// overwriting the old data. The value indicates how many such files we will
3348    /// keep around at any point in time for later use. This is more efficient
3349    /// because the blocks are already allocated and fdatasync does not need to
3350    /// update the inode after each write.
3351    ///
3352    /// Default: 0
3353    ///
3354    /// # Examples
3355    ///
3356    /// ```
3357    /// use rust_rocksdb::Options;
3358    ///
3359    /// let mut options = Options::default();
3360    /// options.set_recycle_log_file_num(5);
3361    /// ```
3362    pub fn set_recycle_log_file_num(&mut self, num: usize) {
3363        unsafe {
3364            ffi::rocksdb_options_set_recycle_log_file_num(self.inner, num);
3365        }
3366    }
3367
3368    /// Prints logs to stderr for faster debugging
3369    /// See official [wiki](https://github.com/facebook/rocksdb/wiki/Logger) for more information.
3370    pub fn set_stderr_logger(&mut self, log_level: LogLevel, prefix: impl CStrLike) {
3371        let p = prefix.into_c_string().unwrap();
3372
3373        unsafe {
3374            let logger = ffi::rocksdb_logger_create_stderr_logger(log_level as c_int, p.as_ptr());
3375            ffi::rocksdb_options_set_info_log(self.inner, logger);
3376            ffi::rocksdb_logger_destroy(logger);
3377        }
3378    }
3379
3380    /// Invokes callback with log messages.
3381    ///
3382    /// # Examples
3383    /// ```
3384    /// use rust_rocksdb::{LogLevel, Options};
3385    ///
3386    /// let mut options = Options::default();
3387    /// options.set_callback_logger(LogLevel::Debug, &|level, msg| println!("{level:?} {msg}"));
3388    /// ```
3389    pub fn set_callback_logger<'a, F>(&mut self, log_level: LogLevel, func: &'a F)
3390    where
3391        F: for<'b> FnMut(LogLevel, &'b str) + RefUnwindSafe + Send + Sync + 'a,
3392    {
3393        let func = func as *const F;
3394        let func = func.cast::<c_void>();
3395        unsafe {
3396            let logger = ffi::rocksdb_logger_create_callback_logger(
3397                log_level as c_int,
3398                Some(Self::logger_callback::<'a, F>),
3399                func.cast_mut(),
3400            );
3401            ffi::rocksdb_options_set_info_log(self.inner, logger);
3402            ffi::rocksdb_logger_destroy(logger);
3403        }
3404    }
3405
3406    extern "C" fn logger_callback<'a, F>(
3407        func: *mut c_void,
3408        level: u32,
3409        msg: *mut c_char,
3410        len: usize,
3411    ) where
3412        F: for<'b> FnMut(LogLevel, &'b str) + RefUnwindSafe + Send + Sync + 'a,
3413    {
3414        use std::{mem, process, str};
3415
3416        let level = unsafe { mem::transmute::<u32, LogLevel>(level) };
3417        let slice = unsafe { slice::from_raw_parts_mut(msg.cast::<u8>(), len) };
3418        let msg = unsafe { str::from_utf8_unchecked(slice) };
3419        let func = unsafe { &mut *func.cast::<F>() };
3420        let mut func = AssertUnwindSafe(func);
3421        if catch_unwind(move || func(level, msg)).is_err() {
3422            process::abort();
3423        }
3424    }
3425
3426    /// Sets the threshold at which all writes will be slowed down to at least delayed_write_rate if estimated
3427    /// bytes needed to be compaction exceed this threshold.
3428    ///
3429    /// Default: 64GB
3430    pub fn set_soft_pending_compaction_bytes_limit(&mut self, limit: usize) {
3431        unsafe {
3432            ffi::rocksdb_options_set_soft_pending_compaction_bytes_limit(self.inner, limit);
3433        }
3434    }
3435
3436    /// Sets the bytes threshold at which all writes are stopped if estimated bytes needed to be compaction exceed
3437    /// this threshold.
3438    ///
3439    /// Default: 256GB
3440    pub fn set_hard_pending_compaction_bytes_limit(&mut self, limit: usize) {
3441        unsafe {
3442            ffi::rocksdb_options_set_hard_pending_compaction_bytes_limit(self.inner, limit);
3443        }
3444    }
3445
3446    /// Sets the size of one block in arena memory allocation.
3447    ///
3448    /// If <= 0, a proper value is automatically calculated (usually 1/10 of
3449    /// writer_buffer_size).
3450    ///
3451    /// Default: 0
3452    pub fn set_arena_block_size(&mut self, size: usize) {
3453        unsafe {
3454            ffi::rocksdb_options_set_arena_block_size(self.inner, size);
3455        }
3456    }
3457
3458    /// If true, then print malloc stats together with rocksdb.stats when printing to LOG.
3459    ///
3460    /// Default: false
3461    pub fn set_dump_malloc_stats(&mut self, enabled: bool) {
3462        unsafe {
3463            ffi::rocksdb_options_set_dump_malloc_stats(self.inner, c_uchar::from(enabled));
3464        }
3465    }
3466
3467    /// Enable whole key bloom filter in memtable. Note this will only take effect
3468    /// if memtable_prefix_bloom_size_ratio is not 0. Enabling whole key filtering
3469    /// can potentially reduce CPU usage for point-look-ups.
3470    ///
3471    /// Default: false (disable)
3472    ///
3473    /// Dynamically changeable through SetOptions() API
3474    pub fn set_memtable_whole_key_filtering(&mut self, whole_key_filter: bool) {
3475        unsafe {
3476            ffi::rocksdb_options_set_memtable_whole_key_filtering(
3477                self.inner,
3478                c_uchar::from(whole_key_filter),
3479            );
3480        }
3481    }
3482
3483    /// Enable the use of key-value separation.
3484    ///
3485    /// More details can be found here: [Integrated BlobDB](http://rocksdb.org/blog/2021/05/26/integrated-blob-db.html).
3486    ///
3487    /// Default: false (disable)
3488    ///
3489    /// Dynamically changeable through SetOptions() API
3490    pub fn set_enable_blob_files(&mut self, val: bool) {
3491        unsafe {
3492            ffi::rocksdb_options_set_enable_blob_files(self.inner, u8::from(val));
3493        }
3494    }
3495
3496    /// Sets the minimum threshold value at or above which will be written
3497    /// to blob files during flush or compaction.
3498    ///
3499    /// Dynamically changeable through SetOptions() API
3500    pub fn set_min_blob_size(&mut self, val: u64) {
3501        unsafe {
3502            ffi::rocksdb_options_set_min_blob_size(self.inner, val);
3503        }
3504    }
3505
3506    /// Sets the size limit for blob files.
3507    ///
3508    /// Dynamically changeable through SetOptions() API
3509    pub fn set_blob_file_size(&mut self, val: u64) {
3510        unsafe {
3511            ffi::rocksdb_options_set_blob_file_size(self.inner, val);
3512        }
3513    }
3514
3515    /// Sets the blob compression type. All blob files use the same
3516    /// compression type.
3517    ///
3518    /// Dynamically changeable through SetOptions() API
3519    pub fn set_blob_compression_type(&mut self, val: DBCompressionType) {
3520        unsafe {
3521            ffi::rocksdb_options_set_blob_compression_type(self.inner, val as _);
3522        }
3523    }
3524
3525    /// If this is set to true RocksDB will actively relocate valid blobs from the oldest blob files
3526    /// as they are encountered during compaction.
3527    ///
3528    /// Dynamically changeable through SetOptions() API
3529    pub fn set_enable_blob_gc(&mut self, val: bool) {
3530        unsafe {
3531            ffi::rocksdb_options_set_enable_blob_gc(self.inner, u8::from(val));
3532        }
3533    }
3534
3535    /// Sets the threshold that the GC logic uses to determine which blob files should be considered “old.”
3536    ///
3537    /// For example, the default value of 0.25 signals to RocksDB that blobs residing in the
3538    /// oldest 25% of blob files should be relocated by GC. This parameter can be tuned to adjust
3539    /// the trade-off between write amplification and space amplification.
3540    ///
3541    /// Dynamically changeable through SetOptions() API
3542    pub fn set_blob_gc_age_cutoff(&mut self, val: c_double) {
3543        unsafe {
3544            ffi::rocksdb_options_set_blob_gc_age_cutoff(self.inner, val);
3545        }
3546    }
3547
3548    /// Sets the blob GC force threshold.
3549    ///
3550    /// Dynamically changeable through SetOptions() API
3551    pub fn set_blob_gc_force_threshold(&mut self, val: c_double) {
3552        unsafe {
3553            ffi::rocksdb_options_set_blob_gc_force_threshold(self.inner, val);
3554        }
3555    }
3556
3557    /// Sets the blob compaction read ahead size.
3558    ///
3559    /// Dynamically changeable through SetOptions() API
3560    pub fn set_blob_compaction_readahead_size(&mut self, val: u64) {
3561        unsafe {
3562            ffi::rocksdb_options_set_blob_compaction_readahead_size(self.inner, val);
3563        }
3564    }
3565
3566    /// Sets the blob cache.
3567    ///
3568    /// Using a dedicated object for blobs and using the same object for the block and blob caches
3569    /// are both supported. In the latter case, note that blobs are less valuable from a caching
3570    /// perspective than SST blocks, and some cache implementations have configuration options that
3571    /// can be used to prioritize items accordingly (see Cache::Priority and
3572    /// LRUCacheOptions::{high,low}_pri_pool_ratio).
3573    ///
3574    /// Default: disabled
3575    pub fn set_blob_cache(&mut self, cache: &Cache) {
3576        unsafe {
3577            ffi::rocksdb_options_set_blob_cache(self.inner, cache.0.inner.as_ptr());
3578        }
3579        self.outlive.blob_cache = Some(cache.clone());
3580    }
3581
3582    /// Set this option to true during creation of database if you want
3583    /// to be able to ingest behind (call IngestExternalFile() skipping keys
3584    /// that already exist, rather than overwriting matching keys).
3585    /// Setting this option to true has the following effects:
3586    ///
3587    /// 1. Disable some internal optimizations around SST file compression.
3588    /// 2. Reserve the last level for ingested files only.
3589    /// 3. Compaction will not include any file from the last level.
3590    ///
3591    /// Note that only Universal Compaction supports allow_ingest_behind.
3592    /// `num_levels` should be >= 3 if this option is turned on.
3593    ///
3594    /// DEFAULT: false
3595    /// Immutable.
3596    pub fn set_allow_ingest_behind(&mut self, val: bool) {
3597        unsafe {
3598            ffi::rocksdb_options_set_allow_ingest_behind(self.inner, c_uchar::from(val));
3599        }
3600    }
3601
3602    // A factory of a table property collector that marks an SST
3603    // file as need-compaction when it observe at least "D" deletion
3604    // entries in any "N" consecutive entries, or the ratio of tombstone
3605    // entries >= deletion_ratio.
3606    //
3607    // `window_size`: is the sliding window size "N"
3608    // `num_dels_trigger`: is the deletion trigger "D"
3609    // `deletion_ratio`: if <= 0 or > 1, disable triggering compaction based on
3610    // deletion ratio.
3611    pub fn add_compact_on_deletion_collector_factory(
3612        &mut self,
3613        window_size: size_t,
3614        num_dels_trigger: size_t,
3615        deletion_ratio: f64,
3616    ) {
3617        unsafe {
3618            ffi::rocksdb_options_add_compact_on_deletion_collector_factory_del_ratio(
3619                self.inner,
3620                window_size,
3621                num_dels_trigger,
3622                deletion_ratio,
3623            );
3624        }
3625    }
3626
3627    /// <https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager>
3628    /// Write buffer manager helps users control the total memory used by memtables across multiple column families and/or DB instances.
3629    /// Users can enable this control by 2 ways:
3630    ///
3631    /// 1- Limit the total memtable usage across multiple column families and DBs under a threshold.
3632    /// 2- Cost the memtable memory usage to block cache so that memory of RocksDB can be capped by the single limit.
3633    /// The usage of a write buffer manager is similar to rate_limiter and sst_file_manager.
3634    /// Users can create one write buffer manager object and pass it to all the options of column families or DBs whose memtable size they want to be controlled by this object.
3635    pub fn set_write_buffer_manager(&mut self, write_buffer_manager: &WriteBufferManager) {
3636        unsafe {
3637            ffi::rocksdb_options_set_write_buffer_manager(
3638                self.inner,
3639                write_buffer_manager.0.inner.as_ptr(),
3640            );
3641        }
3642        self.outlive.write_buffer_manager = Some(write_buffer_manager.clone());
3643    }
3644
3645    /// If true, working thread may avoid doing unnecessary and long-latency
3646    /// operation (such as deleting obsolete files directly or deleting memtable)
3647    /// and will instead schedule a background job to do it.
3648    ///
3649    /// Use it if you're latency-sensitive.
3650    ///
3651    /// Default: false (disabled)
3652    pub fn set_avoid_unnecessary_blocking_io(&mut self, val: bool) {
3653        unsafe {
3654            ffi::rocksdb_options_set_avoid_unnecessary_blocking_io(self.inner, u8::from(val));
3655        }
3656    }
3657
3658    /// Sets the compaction priority.
3659    ///
3660    /// If level compaction_style =
3661    /// kCompactionStyleLevel, for each level, which files are prioritized to be
3662    /// picked to compact.
3663    ///
3664    /// Default: `DBCompactionPri::MinOverlappingRatio`
3665    ///
3666    /// # Examples
3667    ///
3668    /// ```
3669    /// use rust_rocksdb::{Options, DBCompactionPri};
3670    ///
3671    /// let mut opts = Options::default();
3672    /// opts.set_compaction_pri(DBCompactionPri::RoundRobin);
3673    /// ```
3674    pub fn set_compaction_pri(&mut self, pri: DBCompactionPri) {
3675        unsafe {
3676            ffi::rocksdb_options_set_compaction_pri(self.inner, pri as c_int);
3677        }
3678    }
3679
3680    /// If true, the log numbers and sizes of the synced WALs are tracked
3681    /// in MANIFEST. During DB recovery, if a synced WAL is missing
3682    /// from disk, or the WAL's size does not match the recorded size in
3683    /// MANIFEST, an error will be reported and the recovery will be aborted.
3684    ///
3685    /// This is one additional protection against WAL corruption besides the
3686    /// per-WAL-entry checksum.
3687    ///
3688    /// Note that this option does not work with secondary instance.
3689    /// Currently, only syncing closed WALs are tracked. Calling `DB::SyncWAL()`,
3690    /// etc. or writing with `WriteOptions::sync=true` to sync the live WAL is not
3691    /// tracked for performance/efficiency reasons.
3692    ///
3693    /// See: <https://github.com/facebook/rocksdb/wiki/Track-WAL-in-MANIFEST>
3694    ///
3695    /// Default: false (disabled)
3696    pub fn set_track_and_verify_wals_in_manifest(&mut self, val: bool) {
3697        unsafe {
3698            ffi::rocksdb_options_set_track_and_verify_wals_in_manifest(self.inner, u8::from(val));
3699        }
3700    }
3701
3702    /// Returns the value of the `track_and_verify_wals_in_manifest` option.
3703    pub fn get_track_and_verify_wals_in_manifest(&self) -> bool {
3704        let val_u8 =
3705            unsafe { ffi::rocksdb_options_get_track_and_verify_wals_in_manifest(self.inner) };
3706        val_u8 != 0
3707    }
3708
3709    /// The DB unique ID can be saved in the DB manifest (preferred, this option)
3710    /// or an IDENTITY file (historical, deprecated), or both. If this option is
3711    /// set to false (old behavior), then `write_identity_file` must be set to true.
3712    /// The manifest is preferred because
3713    ///
3714    /// 1. The IDENTITY file is not checksummed, so it is not as safe against
3715    ///    corruption.
3716    /// 2. The IDENTITY file may or may not be copied with the DB (e.g. not
3717    ///    copied by BackupEngine), so is not reliable for the provenance of a DB.
3718    ///
3719    /// This option might eventually be obsolete and removed as Identity files
3720    /// are phased out.
3721    ///
3722    /// Default: true (enabled)
3723    pub fn set_write_dbid_to_manifest(&mut self, val: bool) {
3724        unsafe {
3725            ffi::rocksdb_options_set_write_dbid_to_manifest(self.inner, u8::from(val));
3726        }
3727    }
3728
3729    /// Returns the value of the `write_dbid_to_manifest` option.
3730    pub fn get_write_dbid_to_manifest(&self) -> bool {
3731        let val_u8 = unsafe { ffi::rocksdb_options_get_write_dbid_to_manifest(self.inner) };
3732        val_u8 != 0
3733    }
3734}
3735
3736impl Default for Options {
3737    fn default() -> Self {
3738        unsafe {
3739            let opts = ffi::rocksdb_options_create();
3740            assert!(!opts.is_null(), "Could not create RocksDB options");
3741
3742            Self {
3743                inner: opts,
3744                outlive: OptionsMustOutliveDB::default(),
3745            }
3746        }
3747    }
3748}
3749
3750impl FlushOptions {
3751    pub fn new() -> FlushOptions {
3752        FlushOptions::default()
3753    }
3754
3755    /// Waits until the flush is done.
3756    ///
3757    /// Default: true
3758    ///
3759    /// # Examples
3760    ///
3761    /// ```
3762    /// use rust_rocksdb::FlushOptions;
3763    ///
3764    /// let mut options = FlushOptions::default();
3765    /// options.set_wait(false);
3766    /// ```
3767    pub fn set_wait(&mut self, wait: bool) {
3768        unsafe {
3769            ffi::rocksdb_flushoptions_set_wait(self.inner, c_uchar::from(wait));
3770        }
3771    }
3772}
3773
3774impl Default for FlushOptions {
3775    fn default() -> Self {
3776        let flush_opts = unsafe { ffi::rocksdb_flushoptions_create() };
3777        assert!(
3778            !flush_opts.is_null(),
3779            "Could not create RocksDB flush options"
3780        );
3781
3782        Self { inner: flush_opts }
3783    }
3784}
3785
3786impl WriteOptions {
3787    pub fn new() -> WriteOptions {
3788        WriteOptions::default()
3789    }
3790
3791    /// Sets the sync mode. If true, the write will be flushed
3792    /// from the operating system buffer cache before the write is considered complete.
3793    /// If this flag is true, writes will be slower.
3794    ///
3795    /// Default: false
3796    pub fn set_sync(&mut self, sync: bool) {
3797        unsafe {
3798            ffi::rocksdb_writeoptions_set_sync(self.inner, c_uchar::from(sync));
3799        }
3800    }
3801
3802    /// Sets whether WAL should be active or not.
3803    /// If true, writes will not first go to the write ahead log,
3804    /// and the write may got lost after a crash.
3805    ///
3806    /// Default: false
3807    pub fn disable_wal(&mut self, disable: bool) {
3808        unsafe {
3809            ffi::rocksdb_writeoptions_disable_WAL(self.inner, c_int::from(disable));
3810        }
3811    }
3812
3813    /// If true and if user is trying to write to column families that don't exist (they were dropped),
3814    /// ignore the write (don't return an error). If there are multiple writes in a WriteBatch,
3815    /// other writes will succeed.
3816    ///
3817    /// Default: false
3818    pub fn set_ignore_missing_column_families(&mut self, ignore: bool) {
3819        unsafe {
3820            ffi::rocksdb_writeoptions_set_ignore_missing_column_families(
3821                self.inner,
3822                c_uchar::from(ignore),
3823            );
3824        }
3825    }
3826
3827    /// If true and we need to wait or sleep for the write request, fails
3828    /// immediately with Status::Incomplete().
3829    ///
3830    /// Default: false
3831    pub fn set_no_slowdown(&mut self, no_slowdown: bool) {
3832        unsafe {
3833            ffi::rocksdb_writeoptions_set_no_slowdown(self.inner, c_uchar::from(no_slowdown));
3834        }
3835    }
3836
3837    /// If true, this write request is of lower priority if compaction is
3838    /// behind. In this case, no_slowdown = true, the request will be cancelled
3839    /// immediately with Status::Incomplete() returned. Otherwise, it will be
3840    /// slowed down. The slowdown value is determined by RocksDB to guarantee
3841    /// it introduces minimum impacts to high priority writes.
3842    ///
3843    /// Default: false
3844    pub fn set_low_pri(&mut self, v: bool) {
3845        unsafe {
3846            ffi::rocksdb_writeoptions_set_low_pri(self.inner, c_uchar::from(v));
3847        }
3848    }
3849
3850    /// If true, writebatch will maintain the last insert positions of each
3851    /// memtable as hints in concurrent write. It can improve write performance
3852    /// in concurrent writes if keys in one writebatch are sequential. In
3853    /// non-concurrent writes (when concurrent_memtable_writes is false) this
3854    /// option will be ignored.
3855    ///
3856    /// Default: false
3857    pub fn set_memtable_insert_hint_per_batch(&mut self, v: bool) {
3858        unsafe {
3859            ffi::rocksdb_writeoptions_set_memtable_insert_hint_per_batch(
3860                self.inner,
3861                c_uchar::from(v),
3862            );
3863        }
3864    }
3865}
3866
3867impl Default for WriteOptions {
3868    fn default() -> Self {
3869        let write_opts = unsafe { ffi::rocksdb_writeoptions_create() };
3870        assert!(
3871            !write_opts.is_null(),
3872            "Could not create RocksDB write options"
3873        );
3874
3875        Self { inner: write_opts }
3876    }
3877}
3878
3879impl LruCacheOptions {
3880    /// Capacity of the cache, in the same units as the `charge` of each entry.
3881    /// This is typically measured in bytes, but can be a different unit if using
3882    /// kDontChargeCacheMetadata.
3883    pub fn set_capacity(&mut self, cap: usize) {
3884        unsafe {
3885            ffi::rocksdb_lru_cache_options_set_capacity(self.inner, cap);
3886        }
3887    }
3888
3889    /// Cache is sharded into 2^num_shard_bits shards, by hash of key.
3890    /// If < 0, a good default is chosen based on the capacity and the
3891    /// implementation. (Mutex-based implementations are much more reliant
3892    /// on many shards for parallel scalability.)
3893    pub fn set_num_shard_bits(&mut self, val: c_int) {
3894        unsafe {
3895            ffi::rocksdb_lru_cache_options_set_num_shard_bits(self.inner, val);
3896        }
3897    }
3898}
3899
3900impl Default for LruCacheOptions {
3901    fn default() -> Self {
3902        let inner = unsafe { ffi::rocksdb_lru_cache_options_create() };
3903        assert!(
3904            !inner.is_null(),
3905            "Could not create RocksDB LRU cache options"
3906        );
3907
3908        Self { inner }
3909    }
3910}
3911
3912#[derive(Debug, Copy, Clone, PartialEq, Eq)]
3913#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
3914#[repr(i32)]
3915pub enum ReadTier {
3916    /// Reads data in memtable, block cache, OS cache or storage.
3917    All = 0,
3918    /// Reads data in memtable or block cache.
3919    BlockCache,
3920    /// Reads persisted data. When WAL is disabled, this option will skip data in memtable.
3921    Persisted,
3922    /// Reads data in memtable. Used for memtable only iterators.
3923    Memtable,
3924}
3925
3926impl ReadOptions {
3927    // TODO add snapshot setting here
3928    // TODO add snapshot wrapper structs with proper destructors;
3929    // that struct needs an "iterator" impl too.
3930
3931    /// Specify whether the "data block"/"index block"/"filter block"
3932    /// read for this iteration should be cached in memory?
3933    /// Callers may wish to set this field to false for bulk scans.
3934    ///
3935    /// Default: true
3936    pub fn fill_cache(&mut self, v: bool) {
3937        unsafe {
3938            ffi::rocksdb_readoptions_set_fill_cache(self.inner, c_uchar::from(v));
3939        }
3940    }
3941
3942    /// Sets the snapshot which should be used for the read.
3943    /// The snapshot must belong to the DB that is being read and must
3944    /// not have been released.
3945    pub fn set_snapshot<D: DBAccess>(&mut self, snapshot: &SnapshotWithThreadMode<D>) {
3946        unsafe {
3947            ffi::rocksdb_readoptions_set_snapshot(self.inner, snapshot.inner);
3948        }
3949    }
3950
3951    /// Sets the lower bound for an iterator.
3952    pub fn set_iterate_lower_bound<K: Into<Vec<u8>>>(&mut self, key: K) {
3953        self.set_lower_bound_impl(Some(key.into()));
3954    }
3955
3956    /// Sets the upper bound for an iterator.
3957    /// The upper bound itself is not included on the iteration result.
3958    pub fn set_iterate_upper_bound<K: Into<Vec<u8>>>(&mut self, key: K) {
3959        self.set_upper_bound_impl(Some(key.into()));
3960    }
3961
3962    /// Sets lower and upper bounds based on the provided range.  This is
3963    /// similar to setting lower and upper bounds separately except that it also
3964    /// allows either bound to be reset.
3965    ///
3966    /// The argument can be a regular Rust range, e.g. `lower..upper`.  However,
3967    /// since RocksDB upper bound is always excluded (i.e. range can never be
3968    /// fully closed) inclusive ranges (`lower..=upper` and `..=upper`) are not
3969    /// supported.  For example:
3970    ///
3971    /// ```
3972    /// let mut options = rust_rocksdb::ReadOptions::default();
3973    /// options.set_iterate_range("xy".as_bytes().."xz".as_bytes());
3974    /// ```
3975    ///
3976    /// In addition, [`crate::PrefixRange`] can be used to specify a range of
3977    /// keys with a given prefix.  In particular, the above example is
3978    /// equivalent to:
3979    ///
3980    /// ```
3981    /// let mut options = rust_rocksdb::ReadOptions::default();
3982    /// options.set_iterate_range(rust_rocksdb::PrefixRange("xy".as_bytes()));
3983    /// ```
3984    ///
3985    /// Note that setting range using this method is separate to using prefix
3986    /// iterators.  Prefix iterators use prefix extractor configured for
3987    /// a column family.  Setting bounds via [`crate::PrefixRange`] is more akin
3988    /// to using manual prefix.
3989    ///
3990    /// Using this method clears any previously set bounds.  In other words, the
3991    /// bounds can be reset by setting the range to `..` as in:
3992    ///
3993    /// ```
3994    /// let mut options = rust_rocksdb::ReadOptions::default();
3995    /// options.set_iterate_range(..);
3996    /// ```
3997    pub fn set_iterate_range(&mut self, range: impl crate::IterateBounds) {
3998        let (lower, upper) = range.into_bounds();
3999        self.set_lower_bound_impl(lower);
4000        self.set_upper_bound_impl(upper);
4001    }
4002
4003    fn set_lower_bound_impl(&mut self, bound: Option<Vec<u8>>) {
4004        let (ptr, len) = if let Some(ref bound) = bound {
4005            (bound.as_ptr() as *const c_char, bound.len())
4006        } else if self.iterate_lower_bound.is_some() {
4007            (std::ptr::null(), 0)
4008        } else {
4009            return;
4010        };
4011        self.iterate_lower_bound = bound;
4012        unsafe {
4013            ffi::rocksdb_readoptions_set_iterate_lower_bound(self.inner, ptr, len);
4014        }
4015    }
4016
4017    fn set_upper_bound_impl(&mut self, bound: Option<Vec<u8>>) {
4018        let (ptr, len) = if let Some(ref bound) = bound {
4019            (bound.as_ptr() as *const c_char, bound.len())
4020        } else if self.iterate_upper_bound.is_some() {
4021            (std::ptr::null(), 0)
4022        } else {
4023            return;
4024        };
4025        self.iterate_upper_bound = bound;
4026        unsafe {
4027            ffi::rocksdb_readoptions_set_iterate_upper_bound(self.inner, ptr, len);
4028        }
4029    }
4030
4031    /// Specify if this read request should process data that ALREADY
4032    /// resides on a particular cache. If the required data is not
4033    /// found at the specified cache, then Status::Incomplete is returned.
4034    ///
4035    /// Default: ::All
4036    pub fn set_read_tier(&mut self, tier: ReadTier) {
4037        unsafe {
4038            ffi::rocksdb_readoptions_set_read_tier(self.inner, tier as c_int);
4039        }
4040    }
4041
4042    /// Enforce that the iterator only iterates over the same
4043    /// prefix as the seek.
4044    /// This option is effective only for prefix seeks, i.e. prefix_extractor is
4045    /// non-null for the column family and total_order_seek is false.  Unlike
4046    /// iterate_upper_bound, prefix_same_as_start only works within a prefix
4047    /// but in both directions.
4048    ///
4049    /// Default: false
4050    pub fn set_prefix_same_as_start(&mut self, v: bool) {
4051        unsafe {
4052            ffi::rocksdb_readoptions_set_prefix_same_as_start(self.inner, c_uchar::from(v));
4053        }
4054    }
4055
4056    /// Enable a total order seek regardless of index format (e.g. hash index)
4057    /// used in the table. Some table format (e.g. plain table) may not support
4058    /// this option.
4059    ///
4060    /// If true when calling Get(), we also skip prefix bloom when reading from
4061    /// block based table. It provides a way to read existing data after
4062    /// changing implementation of prefix extractor.
4063    pub fn set_total_order_seek(&mut self, v: bool) {
4064        unsafe {
4065            ffi::rocksdb_readoptions_set_total_order_seek(self.inner, c_uchar::from(v));
4066        }
4067    }
4068
4069    /// Sets a threshold for the number of keys that can be skipped
4070    /// before failing an iterator seek as incomplete. The default value of 0 should be used to
4071    /// never fail a request as incomplete, even on skipping too many keys.
4072    ///
4073    /// Default: 0
4074    pub fn set_max_skippable_internal_keys(&mut self, num: u64) {
4075        unsafe {
4076            ffi::rocksdb_readoptions_set_max_skippable_internal_keys(self.inner, num);
4077        }
4078    }
4079
4080    /// If true, when PurgeObsoleteFile is called in CleanupIteratorState, we schedule a background job
4081    /// in the flush job queue and delete obsolete files in background.
4082    ///
4083    /// Default: false
4084    pub fn set_background_purge_on_iterator_cleanup(&mut self, v: bool) {
4085        unsafe {
4086            ffi::rocksdb_readoptions_set_background_purge_on_iterator_cleanup(
4087                self.inner,
4088                c_uchar::from(v),
4089            );
4090        }
4091    }
4092
4093    /// If true, keys deleted using the DeleteRange() API will be visible to
4094    /// readers until they are naturally deleted during compaction.
4095    ///
4096    /// Default: false
4097    #[deprecated(
4098        note = "deprecated in RocksDB 10.2.1: no performance impact if DeleteRange is not used"
4099    )]
4100    pub fn set_ignore_range_deletions(&mut self, v: bool) {
4101        unsafe {
4102            ffi::rocksdb_readoptions_set_ignore_range_deletions(self.inner, c_uchar::from(v));
4103        }
4104    }
4105
4106    /// If true, all data read from underlying storage will be
4107    /// verified against corresponding checksums.
4108    ///
4109    /// Default: true
4110    pub fn set_verify_checksums(&mut self, v: bool) {
4111        unsafe {
4112            ffi::rocksdb_readoptions_set_verify_checksums(self.inner, c_uchar::from(v));
4113        }
4114    }
4115
4116    /// If non-zero, an iterator will create a new table reader which
4117    /// performs reads of the given size. Using a large size (> 2MB) can
4118    /// improve the performance of forward iteration on spinning disks.
4119    /// Default: 0
4120    ///
4121    /// ```
4122    /// use rust_rocksdb::{ReadOptions};
4123    ///
4124    /// let mut opts = ReadOptions::default();
4125    /// opts.set_readahead_size(4_194_304); // 4mb
4126    /// ```
4127    pub fn set_readahead_size(&mut self, v: usize) {
4128        unsafe {
4129            ffi::rocksdb_readoptions_set_readahead_size(self.inner, v as size_t);
4130        }
4131    }
4132
4133    /// If auto_readahead_size is set to true, it will auto tune the readahead_size
4134    /// during scans internally.
4135    /// For this feature to be enabled, iterate_upper_bound must also be specified.
4136    ///
4137    /// NOTE: - Recommended for forward Scans only.
4138    ///       - If there is a backward scans, this option will be
4139    ///         disabled internally and won't be enabled again if the forward scan
4140    ///         is issued again.
4141    ///
4142    /// Default: true
4143    pub fn set_auto_readahead_size(&mut self, v: bool) {
4144        unsafe {
4145            ffi::rocksdb_readoptions_set_auto_readahead_size(self.inner, c_uchar::from(v));
4146        }
4147    }
4148
4149    /// If true, create a tailing iterator. Note that tailing iterators
4150    /// only support moving in the forward direction. Iterating in reverse
4151    /// or seek_to_last are not supported.
4152    pub fn set_tailing(&mut self, v: bool) {
4153        unsafe {
4154            ffi::rocksdb_readoptions_set_tailing(self.inner, c_uchar::from(v));
4155        }
4156    }
4157
4158    /// Specifies the value of "pin_data". If true, it keeps the blocks
4159    /// loaded by the iterator pinned in memory as long as the iterator is not deleted,
4160    /// If used when reading from tables created with
4161    /// BlockBasedTableOptions::use_delta_encoding = false,
4162    /// Iterator's property "rocksdb.iterator.is-key-pinned" is guaranteed to
4163    /// return 1.
4164    ///
4165    /// Default: false
4166    pub fn set_pin_data(&mut self, v: bool) {
4167        unsafe {
4168            ffi::rocksdb_readoptions_set_pin_data(self.inner, c_uchar::from(v));
4169        }
4170    }
4171
4172    /// Asynchronously prefetch some data.
4173    ///
4174    /// Used for sequential reads and internal automatic prefetching.
4175    ///
4176    /// Default: `false`
4177    pub fn set_async_io(&mut self, v: bool) {
4178        unsafe {
4179            ffi::rocksdb_readoptions_set_async_io(self.inner, c_uchar::from(v));
4180        }
4181    }
4182
4183    /// Deadline for completing an API call (Get/MultiGet/Seek/Next for now)
4184    /// in microseconds.
4185    /// It should be set to microseconds since epoch, i.e, gettimeofday or
4186    /// equivalent plus allowed duration in microseconds.
4187    /// This is best effort. The call may exceed the deadline if there is IO
4188    /// involved and the file system doesn't support deadlines, or due to
4189    /// checking for deadline periodically rather than for every key if
4190    /// processing a batch
4191    pub fn set_deadline(&mut self, microseconds: u64) {
4192        unsafe {
4193            ffi::rocksdb_readoptions_set_deadline(self.inner, microseconds);
4194        }
4195    }
4196
4197    /// A timeout in microseconds to be passed to the underlying FileSystem for
4198    /// reads. As opposed to deadline, this determines the timeout for each
4199    /// individual file read request. If a MultiGet/Get/Seek/Next etc call
4200    /// results in multiple reads, each read can last up to io_timeout us.
4201    pub fn set_io_timeout(&mut self, microseconds: u64) {
4202        unsafe {
4203            ffi::rocksdb_readoptions_set_io_timeout(self.inner, microseconds);
4204        }
4205    }
4206
4207    /// Timestamp of operation. Read should return the latest data visible to the
4208    /// specified timestamp. All timestamps of the same database must be of the
4209    /// same length and format. The user is responsible for providing a customized
4210    /// compare function via Comparator to order <key, timestamp> tuples.
4211    /// For iterator, iter_start_ts is the lower bound (older) and timestamp
4212    /// serves as the upper bound. Versions of the same record that fall in
4213    /// the timestamp range will be returned. If iter_start_ts is nullptr,
4214    /// only the most recent version visible to timestamp is returned.
4215    /// The user-specified timestamp feature is still under active development,
4216    /// and the API is subject to change.
4217    pub fn set_timestamp<S: Into<Vec<u8>>>(&mut self, ts: S) {
4218        self.set_timestamp_impl(Some(ts.into()));
4219    }
4220
4221    fn set_timestamp_impl(&mut self, ts: Option<Vec<u8>>) {
4222        let (ptr, len) = if let Some(ref ts) = ts {
4223            (ts.as_ptr() as *const c_char, ts.len())
4224        } else if self.timestamp.is_some() {
4225            // The stored timestamp is a `Some` but we're updating it to a `None`.
4226            // This means to cancel a previously set timestamp.
4227            // To do this, use a null pointer and zero length.
4228            (std::ptr::null(), 0)
4229        } else {
4230            return;
4231        };
4232        self.timestamp = ts;
4233        unsafe {
4234            ffi::rocksdb_readoptions_set_timestamp(self.inner, ptr, len);
4235        }
4236    }
4237
4238    /// See `set_timestamp`
4239    pub fn set_iter_start_ts<S: Into<Vec<u8>>>(&mut self, ts: S) {
4240        self.set_iter_start_ts_impl(Some(ts.into()));
4241    }
4242
4243    fn set_iter_start_ts_impl(&mut self, ts: Option<Vec<u8>>) {
4244        let (ptr, len) = if let Some(ref ts) = ts {
4245            (ts.as_ptr() as *const c_char, ts.len())
4246        } else if self.timestamp.is_some() {
4247            (std::ptr::null(), 0)
4248        } else {
4249            return;
4250        };
4251        self.iter_start_ts = ts;
4252        unsafe {
4253            ffi::rocksdb_readoptions_set_iter_start_ts(self.inner, ptr, len);
4254        }
4255    }
4256}
4257
4258impl Default for ReadOptions {
4259    fn default() -> Self {
4260        unsafe {
4261            Self {
4262                inner: ffi::rocksdb_readoptions_create(),
4263                timestamp: None,
4264                iter_start_ts: None,
4265                iterate_upper_bound: None,
4266                iterate_lower_bound: None,
4267            }
4268        }
4269    }
4270}
4271
4272impl IngestExternalFileOptions {
4273    /// Can be set to true to move the files instead of copying them.
4274    pub fn set_move_files(&mut self, v: bool) {
4275        unsafe {
4276            ffi::rocksdb_ingestexternalfileoptions_set_move_files(self.inner, c_uchar::from(v));
4277        }
4278    }
4279
4280    /// If set to false, an ingested file keys could appear in existing snapshots
4281    /// that where created before the file was ingested.
4282    pub fn set_snapshot_consistency(&mut self, v: bool) {
4283        unsafe {
4284            ffi::rocksdb_ingestexternalfileoptions_set_snapshot_consistency(
4285                self.inner,
4286                c_uchar::from(v),
4287            );
4288        }
4289    }
4290
4291    /// If set to false, IngestExternalFile() will fail if the file key range
4292    /// overlaps with existing keys or tombstones in the DB.
4293    pub fn set_allow_global_seqno(&mut self, v: bool) {
4294        unsafe {
4295            ffi::rocksdb_ingestexternalfileoptions_set_allow_global_seqno(
4296                self.inner,
4297                c_uchar::from(v),
4298            );
4299        }
4300    }
4301
4302    /// If set to false and the file key range overlaps with the memtable key range
4303    /// (memtable flush required), IngestExternalFile will fail.
4304    pub fn set_allow_blocking_flush(&mut self, v: bool) {
4305        unsafe {
4306            ffi::rocksdb_ingestexternalfileoptions_set_allow_blocking_flush(
4307                self.inner,
4308                c_uchar::from(v),
4309            );
4310        }
4311    }
4312
4313    /// Set to true if you would like duplicate keys in the file being ingested
4314    /// to be skipped rather than overwriting existing data under that key.
4315    /// Usecase: back-fill of some historical data in the database without
4316    /// over-writing existing newer version of data.
4317    /// This option could only be used if the DB has been running
4318    /// with allow_ingest_behind=true since the dawn of time.
4319    /// All files will be ingested at the bottommost level with seqno=0.
4320    pub fn set_ingest_behind(&mut self, v: bool) {
4321        unsafe {
4322            ffi::rocksdb_ingestexternalfileoptions_set_ingest_behind(self.inner, c_uchar::from(v));
4323        }
4324    }
4325}
4326
4327impl Default for IngestExternalFileOptions {
4328    fn default() -> Self {
4329        unsafe {
4330            Self {
4331                inner: ffi::rocksdb_ingestexternalfileoptions_create(),
4332            }
4333        }
4334    }
4335}
4336
4337/// Used by BlockBasedOptions::set_index_type.
4338pub enum BlockBasedIndexType {
4339    /// A space efficient index block that is optimized for
4340    /// binary-search-based index.
4341    BinarySearch,
4342
4343    /// The hash index, if enabled, will perform a hash lookup if
4344    /// a prefix extractor has been provided through Options::set_prefix_extractor.
4345    HashSearch,
4346
4347    /// A two-level index implementation. Both levels are binary search indexes.
4348    TwoLevelIndexSearch,
4349}
4350
4351/// Used by BlockBasedOptions::set_data_block_index_type.
4352#[repr(C)]
4353pub enum DataBlockIndexType {
4354    /// Use binary search when performing point lookup for keys in data blocks.
4355    /// This is the default.
4356    BinarySearch = 0,
4357
4358    /// Appends a compact hash table to the end of the data block for efficient indexing. Backwards
4359    /// compatible with databases created without this feature. Once turned on, existing data will
4360    /// be gradually converted to the hash index format.
4361    BinaryAndHash = 1,
4362}
4363
4364/// Defines the underlying memtable implementation.
4365/// See official [wiki](https://github.com/facebook/rocksdb/wiki/MemTable) for more information.
4366pub enum MemtableFactory {
4367    Vector,
4368    HashSkipList {
4369        bucket_count: usize,
4370        height: i32,
4371        branching_factor: i32,
4372    },
4373    HashLinkList {
4374        bucket_count: usize,
4375    },
4376}
4377
4378/// Used by BlockBasedOptions::set_checksum_type.
4379pub enum ChecksumType {
4380    NoChecksum = 0,
4381    CRC32c = 1,
4382    XXHash = 2,
4383    XXHash64 = 3,
4384    XXH3 = 4, // Supported since RocksDB 6.27
4385}
4386
4387/// Used in [`PlainTableFactoryOptions`].
4388#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
4389pub enum KeyEncodingType {
4390    /// Always write full keys.
4391    #[default]
4392    Plain = 0,
4393    /// Find opportunities to write the same prefix for multiple rows.
4394    Prefix = 1,
4395}
4396
4397/// Used with DBOptions::set_plain_table_factory.
4398/// See official [wiki](https://github.com/facebook/rocksdb/wiki/PlainTable-Format) for more
4399/// information.
4400///
4401/// Defaults:
4402///  user_key_length: 0 (variable length)
4403///  bloom_bits_per_key: 10
4404///  hash_table_ratio: 0.75
4405///  index_sparseness: 16
4406///  huge_page_tlb_size: 0
4407///  encoding_type: KeyEncodingType::Plain
4408///  full_scan_mode: false
4409///  store_index_in_file: false
4410pub struct PlainTableFactoryOptions {
4411    pub user_key_length: u32,
4412    pub bloom_bits_per_key: i32,
4413    pub hash_table_ratio: f64,
4414    pub index_sparseness: usize,
4415    pub huge_page_tlb_size: usize,
4416    pub encoding_type: KeyEncodingType,
4417    pub full_scan_mode: bool,
4418    pub store_index_in_file: bool,
4419}
4420
4421#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4422#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4423pub enum DBCompressionType {
4424    None = ffi::rocksdb_no_compression as isize,
4425    Snappy = ffi::rocksdb_snappy_compression as isize,
4426    Zlib = ffi::rocksdb_zlib_compression as isize,
4427    Bz2 = ffi::rocksdb_bz2_compression as isize,
4428    Lz4 = ffi::rocksdb_lz4_compression as isize,
4429    Lz4hc = ffi::rocksdb_lz4hc_compression as isize,
4430    Zstd = ffi::rocksdb_zstd_compression as isize,
4431}
4432
4433#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4434#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4435pub enum DBCompactionStyle {
4436    Level = ffi::rocksdb_level_compaction as isize,
4437    Universal = ffi::rocksdb_universal_compaction as isize,
4438    Fifo = ffi::rocksdb_fifo_compaction as isize,
4439}
4440
4441#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4442#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4443pub enum DBRecoveryMode {
4444    TolerateCorruptedTailRecords = ffi::rocksdb_tolerate_corrupted_tail_records_recovery as isize,
4445    AbsoluteConsistency = ffi::rocksdb_absolute_consistency_recovery as isize,
4446    PointInTime = ffi::rocksdb_point_in_time_recovery as isize,
4447    SkipAnyCorruptedRecord = ffi::rocksdb_skip_any_corrupted_records_recovery as isize,
4448}
4449
4450#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4451#[repr(i32)]
4452pub enum RateLimiterMode {
4453    KReadsOnly = 0,
4454    KWritesOnly = 1,
4455    KAllIo = 2,
4456}
4457
4458#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4459#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4460pub enum DBCompactionPri {
4461    ByCompensatedSize = ffi::rocksdb_k_by_compensated_size_compaction_pri as isize,
4462    OldestLargestSeqFirst = ffi::rocksdb_k_oldest_largest_seq_first_compaction_pri as isize,
4463    OldestSmallestSeqFirst = ffi::rocksdb_k_oldest_smallest_seq_first_compaction_pri as isize,
4464    MinOverlappingRatio = ffi::rocksdb_k_min_overlapping_ratio_compaction_pri as isize,
4465    RoundRobin = ffi::rocksdb_k_round_robin_compaction_pri as isize,
4466}
4467
4468#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4469#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4470pub enum BlockBasedPinningTier {
4471    Fallback = ffi::rocksdb_block_based_k_fallback_pinning_tier as isize,
4472    None = ffi::rocksdb_block_based_k_none_pinning_tier as isize,
4473    FlushAndSimilar = ffi::rocksdb_block_based_k_flush_and_similar_pinning_tier as isize,
4474    All = ffi::rocksdb_block_based_k_all_pinning_tier as isize,
4475}
4476
4477pub struct FifoCompactOptions {
4478    pub(crate) inner: *mut ffi::rocksdb_fifo_compaction_options_t,
4479}
4480
4481impl Default for FifoCompactOptions {
4482    fn default() -> Self {
4483        let opts = unsafe { ffi::rocksdb_fifo_compaction_options_create() };
4484        assert!(
4485            !opts.is_null(),
4486            "Could not create RocksDB Fifo Compaction Options"
4487        );
4488
4489        Self { inner: opts }
4490    }
4491}
4492
4493impl Drop for FifoCompactOptions {
4494    fn drop(&mut self) {
4495        unsafe {
4496            ffi::rocksdb_fifo_compaction_options_destroy(self.inner);
4497        }
4498    }
4499}
4500
4501impl FifoCompactOptions {
4502    /// Sets the max table file size.
4503    ///
4504    /// Once the total sum of table files reaches this, we will delete the oldest
4505    /// table file
4506    ///
4507    /// Default: 1GB
4508    pub fn set_max_table_files_size(&mut self, nbytes: u64) {
4509        unsafe {
4510            ffi::rocksdb_fifo_compaction_options_set_max_table_files_size(self.inner, nbytes);
4511        }
4512    }
4513}
4514
4515#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4516#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4517pub enum UniversalCompactionStopStyle {
4518    Similar = ffi::rocksdb_similar_size_compaction_stop_style as isize,
4519    Total = ffi::rocksdb_total_size_compaction_stop_style as isize,
4520}
4521
4522pub struct UniversalCompactOptions {
4523    pub(crate) inner: *mut ffi::rocksdb_universal_compaction_options_t,
4524}
4525
4526impl Default for UniversalCompactOptions {
4527    fn default() -> Self {
4528        let opts = unsafe { ffi::rocksdb_universal_compaction_options_create() };
4529        assert!(
4530            !opts.is_null(),
4531            "Could not create RocksDB Universal Compaction Options"
4532        );
4533
4534        Self { inner: opts }
4535    }
4536}
4537
4538impl Drop for UniversalCompactOptions {
4539    fn drop(&mut self) {
4540        unsafe {
4541            ffi::rocksdb_universal_compaction_options_destroy(self.inner);
4542        }
4543    }
4544}
4545
4546impl UniversalCompactOptions {
4547    /// Sets the percentage flexibility while comparing file size.
4548    /// If the candidate file(s) size is 1% smaller than the next file's size,
4549    /// then include next file into this candidate set.
4550    ///
4551    /// Default: 1
4552    pub fn set_size_ratio(&mut self, ratio: c_int) {
4553        unsafe {
4554            ffi::rocksdb_universal_compaction_options_set_size_ratio(self.inner, ratio);
4555        }
4556    }
4557
4558    /// Sets the minimum number of files in a single compaction run.
4559    ///
4560    /// Default: 2
4561    pub fn set_min_merge_width(&mut self, num: c_int) {
4562        unsafe {
4563            ffi::rocksdb_universal_compaction_options_set_min_merge_width(self.inner, num);
4564        }
4565    }
4566
4567    /// Sets the maximum number of files in a single compaction run.
4568    ///
4569    /// Default: UINT_MAX
4570    pub fn set_max_merge_width(&mut self, num: c_int) {
4571        unsafe {
4572            ffi::rocksdb_universal_compaction_options_set_max_merge_width(self.inner, num);
4573        }
4574    }
4575
4576    /// sets the size amplification.
4577    ///
4578    /// It is defined as the amount (in percentage) of
4579    /// additional storage needed to store a single byte of data in the database.
4580    /// For example, a size amplification of 2% means that a database that
4581    /// contains 100 bytes of user-data may occupy upto 102 bytes of
4582    /// physical storage. By this definition, a fully compacted database has
4583    /// a size amplification of 0%. Rocksdb uses the following heuristic
4584    /// to calculate size amplification: it assumes that all files excluding
4585    /// the earliest file contribute to the size amplification.
4586    ///
4587    /// Default: 200, which means that a 100 byte database could require upto 300 bytes of storage.
4588    pub fn set_max_size_amplification_percent(&mut self, v: c_int) {
4589        unsafe {
4590            ffi::rocksdb_universal_compaction_options_set_max_size_amplification_percent(
4591                self.inner, v,
4592            );
4593        }
4594    }
4595
4596    /// Sets the percentage of compression size.
4597    ///
4598    /// If this option is set to be -1, all the output files
4599    /// will follow compression type specified.
4600    ///
4601    /// If this option is not negative, we will try to make sure compressed
4602    /// size is just above this value. In normal cases, at least this percentage
4603    /// of data will be compressed.
4604    /// When we are compacting to a new file, here is the criteria whether
4605    /// it needs to be compressed: assuming here are the list of files sorted
4606    /// by generation time:
4607    ///    A1...An B1...Bm C1...Ct
4608    /// where A1 is the newest and Ct is the oldest, and we are going to compact
4609    /// B1...Bm, we calculate the total size of all the files as total_size, as
4610    /// well as  the total size of C1...Ct as total_C, the compaction output file
4611    /// will be compressed iff
4612    ///   total_C / total_size < this percentage
4613    ///
4614    /// Default: -1
4615    pub fn set_compression_size_percent(&mut self, v: c_int) {
4616        unsafe {
4617            ffi::rocksdb_universal_compaction_options_set_compression_size_percent(self.inner, v);
4618        }
4619    }
4620
4621    /// Sets the algorithm used to stop picking files into a single compaction run.
4622    ///
4623    /// Default: ::Total
4624    pub fn set_stop_style(&mut self, style: UniversalCompactionStopStyle) {
4625        unsafe {
4626            ffi::rocksdb_universal_compaction_options_set_stop_style(self.inner, style as c_int);
4627        }
4628    }
4629}
4630
4631#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4632#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4633#[repr(u8)]
4634pub enum BottommostLevelCompaction {
4635    /// Skip bottommost level compaction
4636    Skip = 0,
4637    /// Only compact bottommost level if there is a compaction filter
4638    /// This is the default option
4639    IfHaveCompactionFilter,
4640    /// Always compact bottommost level
4641    Force,
4642    /// Always compact bottommost level but in bottommost level avoid
4643    /// double-compacting files created in the same compaction
4644    ForceOptimized,
4645}
4646
4647pub struct CompactOptions {
4648    pub(crate) inner: *mut ffi::rocksdb_compactoptions_t,
4649    full_history_ts_low: Option<Vec<u8>>,
4650}
4651
4652impl Default for CompactOptions {
4653    fn default() -> Self {
4654        let opts = unsafe { ffi::rocksdb_compactoptions_create() };
4655        assert!(!opts.is_null(), "Could not create RocksDB Compact Options");
4656
4657        Self {
4658            inner: opts,
4659            full_history_ts_low: None,
4660        }
4661    }
4662}
4663
4664impl Drop for CompactOptions {
4665    fn drop(&mut self) {
4666        unsafe {
4667            ffi::rocksdb_compactoptions_destroy(self.inner);
4668        }
4669    }
4670}
4671
4672impl CompactOptions {
4673    /// If more than one thread calls manual compaction,
4674    /// only one will actually schedule it while the other threads will simply wait
4675    /// for the scheduled manual compaction to complete. If exclusive_manual_compaction
4676    /// is set to true, the call will disable scheduling of automatic compaction jobs
4677    /// and wait for existing automatic compaction jobs to finish.
4678    pub fn set_exclusive_manual_compaction(&mut self, v: bool) {
4679        unsafe {
4680            ffi::rocksdb_compactoptions_set_exclusive_manual_compaction(
4681                self.inner,
4682                c_uchar::from(v),
4683            );
4684        }
4685    }
4686
4687    /// Sets bottommost level compaction.
4688    pub fn set_bottommost_level_compaction(&mut self, lvl: BottommostLevelCompaction) {
4689        unsafe {
4690            ffi::rocksdb_compactoptions_set_bottommost_level_compaction(self.inner, lvl as c_uchar);
4691        }
4692    }
4693
4694    /// If true, compacted files will be moved to the minimum level capable
4695    /// of holding the data or given level (specified non-negative target_level).
4696    pub fn set_change_level(&mut self, v: bool) {
4697        unsafe {
4698            ffi::rocksdb_compactoptions_set_change_level(self.inner, c_uchar::from(v));
4699        }
4700    }
4701
4702    /// If change_level is true and target_level have non-negative value, compacted
4703    /// files will be moved to target_level.
4704    pub fn set_target_level(&mut self, lvl: c_int) {
4705        unsafe {
4706            ffi::rocksdb_compactoptions_set_target_level(self.inner, lvl);
4707        }
4708    }
4709
4710    /// Set user-defined timestamp low bound, the data with older timestamp than
4711    /// low bound maybe GCed by compaction. Default: nullptr
4712    pub fn set_full_history_ts_low<S: Into<Vec<u8>>>(&mut self, ts: S) {
4713        self.set_full_history_ts_low_impl(Some(ts.into()));
4714    }
4715
4716    fn set_full_history_ts_low_impl(&mut self, ts: Option<Vec<u8>>) {
4717        let (ptr, len) = if let Some(ref ts) = ts {
4718            (ts.as_ptr() as *mut c_char, ts.len())
4719        } else if self.full_history_ts_low.is_some() {
4720            (std::ptr::null::<Vec<u8>>() as *mut c_char, 0)
4721        } else {
4722            return;
4723        };
4724        self.full_history_ts_low = ts;
4725        unsafe {
4726            ffi::rocksdb_compactoptions_set_full_history_ts_low(self.inner, ptr, len);
4727        }
4728    }
4729}
4730
4731pub struct WaitForCompactOptions {
4732    pub(crate) inner: *mut ffi::rocksdb_wait_for_compact_options_t,
4733}
4734
4735impl Default for WaitForCompactOptions {
4736    fn default() -> Self {
4737        let opts = unsafe { ffi::rocksdb_wait_for_compact_options_create() };
4738        assert!(
4739            !opts.is_null(),
4740            "Could not create RocksDB Wait For Compact Options"
4741        );
4742
4743        Self { inner: opts }
4744    }
4745}
4746
4747impl Drop for WaitForCompactOptions {
4748    fn drop(&mut self) {
4749        unsafe {
4750            ffi::rocksdb_wait_for_compact_options_destroy(self.inner);
4751        }
4752    }
4753}
4754
4755impl WaitForCompactOptions {
4756    /// If true, abort waiting if background jobs are paused. If false,
4757    /// ContinueBackgroundWork() must be called to resume the background jobs.
4758    /// Otherwise, jobs that were queued, but not scheduled yet may never finish
4759    /// and WaitForCompact() may wait indefinitely (if timeout is set, it will
4760    /// abort after the timeout).
4761    ///
4762    /// Default: false
4763    pub fn set_abort_on_pause(&mut self, v: bool) {
4764        unsafe {
4765            ffi::rocksdb_wait_for_compact_options_set_abort_on_pause(self.inner, c_uchar::from(v));
4766        }
4767    }
4768
4769    /// If true, flush all column families before starting to wait.
4770    ///
4771    /// Default: false
4772    pub fn set_flush(&mut self, v: bool) {
4773        unsafe {
4774            ffi::rocksdb_wait_for_compact_options_set_flush(self.inner, c_uchar::from(v));
4775        }
4776    }
4777
4778    /// Timeout in microseconds for waiting for compaction to complete.
4779    /// when timeout == 0, WaitForCompact() will wait as long as there's background
4780    /// work to finish.
4781    ///
4782    /// Default: 0
4783    pub fn set_timeout(&mut self, microseconds: u64) {
4784        unsafe {
4785            ffi::rocksdb_wait_for_compact_options_set_timeout(self.inner, microseconds);
4786        }
4787    }
4788}
4789
4790/// Represents a path where sst files can be put into
4791pub struct DBPath {
4792    pub(crate) inner: *mut ffi::rocksdb_dbpath_t,
4793}
4794
4795impl DBPath {
4796    /// Create a new path
4797    pub fn new<P: AsRef<Path>>(path: P, target_size: u64) -> Result<Self, Error> {
4798        let p = to_cpath(path.as_ref()).unwrap();
4799        let dbpath = unsafe { ffi::rocksdb_dbpath_create(p.as_ptr(), target_size) };
4800        if dbpath.is_null() {
4801            Err(Error::new(format!(
4802                "Could not create path for storing sst files at location: {}",
4803                path.as_ref().display()
4804            )))
4805        } else {
4806            Ok(DBPath { inner: dbpath })
4807        }
4808    }
4809}
4810
4811impl Drop for DBPath {
4812    fn drop(&mut self) {
4813        unsafe {
4814            ffi::rocksdb_dbpath_destroy(self.inner);
4815        }
4816    }
4817}
4818
4819#[cfg(test)]
4820mod tests {
4821    use crate::cache::Cache;
4822    use crate::db_options::WriteBufferManager;
4823    use crate::{MemtableFactory, Options};
4824
4825    #[test]
4826    fn test_enable_statistics() {
4827        let mut opts = Options::default();
4828        opts.enable_statistics();
4829        opts.set_stats_dump_period_sec(60);
4830        assert!(opts.get_statistics().is_some());
4831
4832        let opts = Options::default();
4833        assert!(opts.get_statistics().is_none());
4834    }
4835
4836    #[test]
4837    fn test_set_memtable_factory() {
4838        let mut opts = Options::default();
4839        opts.set_memtable_factory(MemtableFactory::Vector);
4840        opts.set_memtable_factory(MemtableFactory::HashLinkList { bucket_count: 100 });
4841        opts.set_memtable_factory(MemtableFactory::HashSkipList {
4842            bucket_count: 100,
4843            height: 4,
4844            branching_factor: 4,
4845        });
4846    }
4847
4848    #[test]
4849    fn test_use_fsync() {
4850        let mut opts = Options::default();
4851        assert!(!opts.get_use_fsync());
4852        opts.set_use_fsync(true);
4853        assert!(opts.get_use_fsync());
4854    }
4855
4856    #[test]
4857    fn test_set_stats_persist_period_sec() {
4858        let mut opts = Options::default();
4859        opts.enable_statistics();
4860        opts.set_stats_persist_period_sec(5);
4861        assert!(opts.get_statistics().is_some());
4862
4863        let opts = Options::default();
4864        assert!(opts.get_statistics().is_none());
4865    }
4866
4867    #[test]
4868    fn test_set_write_buffer_manager() {
4869        let mut opts = Options::default();
4870        let lrucache = Cache::new_lru_cache(100);
4871        let write_buffer_manager =
4872            WriteBufferManager::new_write_buffer_manager_with_cache(100, false, lrucache);
4873        assert_eq!(write_buffer_manager.get_buffer_size(), 100);
4874        assert_eq!(write_buffer_manager.get_usage(), 0);
4875        assert!(write_buffer_manager.enabled());
4876
4877        opts.set_write_buffer_manager(&write_buffer_manager);
4878        drop(opts);
4879
4880        // WriteBufferManager outlives options
4881        assert!(write_buffer_manager.enabled());
4882    }
4883}