rust_rocksdb/db_options.rs
1// Copyright 2020 Tyler Neely
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::ffi::CStr;
16use std::panic::{catch_unwind, AssertUnwindSafe};
17use std::path::Path;
18use std::ptr::null_mut;
19use std::slice;
20use std::sync::Arc;
21
22use libc::{self, c_char, c_double, c_int, c_uchar, c_uint, c_void, size_t};
23
24use crate::cache::Cache;
25use crate::column_family::ColumnFamilyTtl;
26use crate::event_listener::{new_event_listener, EventListener};
27use crate::sst_file_manager::SstFileManager;
28use crate::statistics::{Histogram, HistogramData, StatsLevel};
29use crate::write_buffer_manager::WriteBufferManager;
30use crate::{
31 compaction_filter::{self, CompactionFilterCallback, CompactionFilterFn},
32 compaction_filter_factory::{self, CompactionFilterFactory},
33 comparator::{
34 ComparatorCallback, ComparatorWithTsCallback, CompareFn, CompareTsFn, CompareWithoutTsFn,
35 },
36 db::DBAccess,
37 env::Env,
38 ffi,
39 ffi_util::{from_cstr, to_cpath, CStrLike},
40 merge_operator::{
41 self, full_merge_callback, partial_merge_callback, MergeFn, MergeOperatorCallback,
42 },
43 slice_transform::SliceTransform,
44 statistics::Ticker,
45 ColumnFamilyDescriptor, Error, SnapshotWithThreadMode,
46};
47
48// must be Send and Sync because it will be called by RocksDB from different threads
49type LogCallbackFn = dyn Fn(LogLevel, &str) + 'static + Send + Sync;
50
51// Holds a log callback to ensure it outlives any Options and DBs that use it.
52struct LogCallback {
53 callback: Box<LogCallbackFn>,
54}
55
56#[derive(Default)]
57pub(crate) struct OptionsMustOutliveDB {
58 env: Option<Env>,
59 row_cache: Option<Cache>,
60 blob_cache: Option<Cache>,
61 block_based: Option<BlockBasedOptionsMustOutliveDB>,
62 write_buffer_manager: Option<WriteBufferManager>,
63 sst_file_manager: Option<SstFileManager>,
64 log_callback: Option<Arc<LogCallback>>,
65}
66
67impl OptionsMustOutliveDB {
68 pub(crate) fn clone(&self) -> Self {
69 Self {
70 env: self.env.clone(),
71 row_cache: self.row_cache.clone(),
72 blob_cache: self.blob_cache.clone(),
73 block_based: self
74 .block_based
75 .as_ref()
76 .map(BlockBasedOptionsMustOutliveDB::clone),
77 write_buffer_manager: self.write_buffer_manager.clone(),
78 sst_file_manager: self.sst_file_manager.clone(),
79 log_callback: self.log_callback.clone(),
80 }
81 }
82}
83
84#[derive(Default)]
85struct BlockBasedOptionsMustOutliveDB {
86 block_cache: Option<Cache>,
87}
88
89impl BlockBasedOptionsMustOutliveDB {
90 fn clone(&self) -> Self {
91 Self {
92 block_cache: self.block_cache.clone(),
93 }
94 }
95}
96
97/// Database-wide options around performance and behavior.
98///
99/// Please read the official tuning [guide](https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide)
100/// and most importantly, measure performance under realistic workloads with realistic hardware.
101///
102/// # Examples
103///
104/// ```
105/// use rust_rocksdb::{Options, DB};
106/// use rust_rocksdb::DBCompactionStyle;
107///
108/// fn badly_tuned_for_somebody_elses_disk() -> DB {
109/// let path = "path/for/rocksdb/storageX";
110/// let mut opts = Options::default();
111/// opts.create_if_missing(true);
112/// opts.set_max_open_files(10000);
113/// opts.set_use_fsync(false);
114/// opts.set_bytes_per_sync(8388608);
115/// opts.optimize_for_point_lookup(1024);
116/// opts.set_table_cache_num_shard_bits(6);
117/// opts.set_max_write_buffer_number(32);
118/// opts.set_write_buffer_size(536870912);
119/// opts.set_target_file_size_base(1073741824);
120/// opts.set_min_write_buffer_number_to_merge(4);
121/// opts.set_level_zero_stop_writes_trigger(2000);
122/// opts.set_level_zero_slowdown_writes_trigger(0);
123/// opts.set_compaction_style(DBCompactionStyle::Universal);
124/// opts.set_disable_auto_compactions(true);
125///
126/// DB::open(&opts, path).unwrap()
127/// }
128/// ```
129pub struct Options {
130 pub(crate) inner: *mut ffi::rocksdb_options_t,
131 pub(crate) outlive: OptionsMustOutliveDB,
132}
133
134/// Optionally disable WAL or sync for this write.
135///
136/// # Examples
137///
138/// Making an unsafe write of a batch:
139///
140/// ```
141/// use rust_rocksdb::{DB, Options, WriteBatch, WriteOptions};
142///
143/// let tempdir = tempfile::Builder::new()
144/// .prefix("_path_for_rocksdb_storageY1")
145/// .tempdir()
146/// .expect("Failed to create temporary path for the _path_for_rocksdb_storageY1");
147/// let path = tempdir.path();
148/// {
149/// let db = DB::open_default(path).unwrap();
150/// let mut batch = WriteBatch::default();
151/// batch.put(b"my key", b"my value");
152/// batch.put(b"key2", b"value2");
153/// batch.put(b"key3", b"value3");
154///
155/// let mut write_options = WriteOptions::default();
156/// write_options.set_sync(false);
157/// write_options.disable_wal(true);
158///
159/// db.write_opt(&batch, &write_options);
160/// }
161/// let _ = DB::destroy(&Options::default(), path);
162/// ```
163pub struct WriteOptions {
164 pub(crate) inner: *mut ffi::rocksdb_writeoptions_t,
165}
166
167pub struct LruCacheOptions {
168 pub(crate) inner: *mut ffi::rocksdb_lru_cache_options_t,
169}
170
171/// Optionally wait for the memtable flush to be performed.
172///
173/// # Examples
174///
175/// Manually flushing the memtable:
176///
177/// ```
178/// use rust_rocksdb::{DB, Options, FlushOptions};
179///
180/// let tempdir = tempfile::Builder::new()
181/// .prefix("_path_for_rocksdb_storageY2")
182/// .tempdir()
183/// .expect("Failed to create temporary path for the _path_for_rocksdb_storageY2");
184/// let path = tempdir.path();
185/// {
186/// let db = DB::open_default(path).unwrap();
187///
188/// let mut flush_options = FlushOptions::default();
189/// flush_options.set_wait(true);
190///
191/// db.flush_opt(&flush_options);
192/// }
193/// let _ = DB::destroy(&Options::default(), path);
194/// ```
195pub struct FlushOptions {
196 pub(crate) inner: *mut ffi::rocksdb_flushoptions_t,
197}
198
199/// For configuring block-based file storage.
200pub struct BlockBasedOptions {
201 pub(crate) inner: *mut ffi::rocksdb_block_based_table_options_t,
202 outlive: BlockBasedOptionsMustOutliveDB,
203}
204
205pub struct ReadOptions {
206 pub(crate) inner: *mut ffi::rocksdb_readoptions_t,
207 // The `ReadOptions` owns a copy of the timestamp and iteration bounds.
208 // This is necessary to ensure the pointers we pass over the FFI live as
209 // long as the `ReadOptions`. This way, when performing the read operation,
210 // the pointers are guaranteed to be valid.
211 timestamp: Option<Vec<u8>>,
212 iter_start_ts: Option<Vec<u8>>,
213 iterate_upper_bound: Option<Vec<u8>>,
214 iterate_lower_bound: Option<Vec<u8>>,
215}
216
217/// Configuration of cuckoo-based storage.
218pub struct CuckooTableOptions {
219 pub(crate) inner: *mut ffi::rocksdb_cuckoo_table_options_t,
220}
221
222/// For configuring external files ingestion.
223///
224/// # Examples
225///
226/// Move files instead of copying them:
227///
228/// ```
229/// use rust_rocksdb::{DB, IngestExternalFileOptions, SstFileWriter, Options};
230///
231/// let writer_opts = Options::default();
232/// let mut writer = SstFileWriter::create(&writer_opts);
233/// let tempdir = tempfile::Builder::new()
234/// .tempdir()
235/// .expect("Failed to create temporary folder for the _path_for_sst_file");
236/// let path1 = tempdir.path().join("_path_for_sst_file");
237/// writer.open(path1.clone()).unwrap();
238/// writer.put(b"k1", b"v1").unwrap();
239/// writer.finish().unwrap();
240///
241/// let tempdir2 = tempfile::Builder::new()
242/// .prefix("_path_for_rocksdb_storageY3")
243/// .tempdir()
244/// .expect("Failed to create temporary path for the _path_for_rocksdb_storageY3");
245/// let path2 = tempdir2.path();
246/// {
247/// let db = DB::open_default(&path2).unwrap();
248/// let mut ingest_opts = IngestExternalFileOptions::default();
249/// ingest_opts.set_move_files(true);
250/// db.ingest_external_file_opts(&ingest_opts, vec![path1]).unwrap();
251/// }
252/// let _ = DB::destroy(&Options::default(), path2);
253/// ```
254pub struct IngestExternalFileOptions {
255 pub(crate) inner: *mut ffi::rocksdb_ingestexternalfileoptions_t,
256}
257
258// Safety note: auto-implementing Send on most db-related types is prevented by the inner FFI
259// pointer. In most cases, however, this pointer is Send-safe because it is never aliased and
260// rocksdb internally does not rely on thread-local information for its user-exposed types.
261unsafe impl Send for Options {}
262unsafe impl Send for WriteOptions {}
263unsafe impl Send for LruCacheOptions {}
264unsafe impl Send for FlushOptions {}
265unsafe impl Send for BlockBasedOptions {}
266unsafe impl Send for CuckooTableOptions {}
267unsafe impl Send for ReadOptions {}
268unsafe impl Send for IngestExternalFileOptions {}
269unsafe impl Send for CompactOptions {}
270
271// Sync is similarly safe for many types because they do not expose interior mutability, and their
272// use within the rocksdb library is generally behind a const reference
273unsafe impl Sync for Options {}
274unsafe impl Sync for WriteOptions {}
275unsafe impl Sync for LruCacheOptions {}
276unsafe impl Sync for FlushOptions {}
277unsafe impl Sync for BlockBasedOptions {}
278unsafe impl Sync for CuckooTableOptions {}
279unsafe impl Sync for ReadOptions {}
280unsafe impl Sync for IngestExternalFileOptions {}
281unsafe impl Sync for CompactOptions {}
282
283impl Drop for Options {
284 fn drop(&mut self) {
285 unsafe {
286 ffi::rocksdb_options_destroy(self.inner);
287 }
288 }
289}
290
291impl Clone for Options {
292 fn clone(&self) -> Self {
293 let inner = unsafe { ffi::rocksdb_options_create_copy(self.inner) };
294 assert!(!inner.is_null(), "Could not copy RocksDB options");
295
296 Self {
297 inner,
298 outlive: self.outlive.clone(),
299 }
300 }
301}
302
303impl Drop for BlockBasedOptions {
304 fn drop(&mut self) {
305 unsafe {
306 ffi::rocksdb_block_based_options_destroy(self.inner);
307 }
308 }
309}
310
311impl Drop for CuckooTableOptions {
312 fn drop(&mut self) {
313 unsafe {
314 ffi::rocksdb_cuckoo_options_destroy(self.inner);
315 }
316 }
317}
318
319impl Drop for FlushOptions {
320 fn drop(&mut self) {
321 unsafe {
322 ffi::rocksdb_flushoptions_destroy(self.inner);
323 }
324 }
325}
326
327impl Drop for WriteOptions {
328 fn drop(&mut self) {
329 unsafe {
330 ffi::rocksdb_writeoptions_destroy(self.inner);
331 }
332 }
333}
334
335impl Drop for LruCacheOptions {
336 fn drop(&mut self) {
337 unsafe {
338 ffi::rocksdb_lru_cache_options_destroy(self.inner);
339 }
340 }
341}
342
343impl Drop for ReadOptions {
344 fn drop(&mut self) {
345 unsafe {
346 ffi::rocksdb_readoptions_destroy(self.inner);
347 }
348 }
349}
350
351impl Drop for IngestExternalFileOptions {
352 fn drop(&mut self) {
353 unsafe {
354 ffi::rocksdb_ingestexternalfileoptions_destroy(self.inner);
355 }
356 }
357}
358
359impl BlockBasedOptions {
360 /// Approximate size of user data packed per block. Note that the
361 /// block size specified here corresponds to uncompressed data. The
362 /// actual size of the unit read from disk may be smaller if
363 /// compression is enabled. This parameter can be changed dynamically.
364 pub fn set_block_size(&mut self, size: usize) {
365 unsafe {
366 ffi::rocksdb_block_based_options_set_block_size(self.inner, size);
367 }
368 }
369
370 /// Block size for partitioned metadata. Currently applied to indexes when
371 /// kTwoLevelIndexSearch is used and to filters when partition_filters is used.
372 /// Note: Since in the current implementation the filters and index partitions
373 /// are aligned, an index/filter block is created when either index or filter
374 /// block size reaches the specified limit.
375 ///
376 /// Note: this limit is currently applied to only index blocks; a filter
377 /// partition is cut right after an index block is cut.
378 pub fn set_metadata_block_size(&mut self, size: usize) {
379 unsafe {
380 ffi::rocksdb_block_based_options_set_metadata_block_size(self.inner, size as u64);
381 }
382 }
383
384 /// Note: currently this option requires kTwoLevelIndexSearch to be set as
385 /// well.
386 ///
387 /// Use partitioned full filters for each SST file. This option is
388 /// incompatible with block-based filters.
389 pub fn set_partition_filters(&mut self, size: bool) {
390 unsafe {
391 ffi::rocksdb_block_based_options_set_partition_filters(self.inner, c_uchar::from(size));
392 }
393 }
394
395 /// Sets global cache for blocks (user data is stored in a set of blocks, and
396 /// a block is the unit of reading from disk).
397 ///
398 /// If set, use the specified cache for blocks.
399 /// By default, rocksdb will automatically create and use an 8MB internal cache.
400 pub fn set_block_cache(&mut self, cache: &Cache) {
401 unsafe {
402 ffi::rocksdb_block_based_options_set_block_cache(self.inner, cache.0.inner.as_ptr());
403 }
404 self.outlive.block_cache = Some(cache.clone());
405 }
406
407 /// Disable block cache
408 pub fn disable_cache(&mut self) {
409 unsafe {
410 ffi::rocksdb_block_based_options_set_no_block_cache(self.inner, c_uchar::from(true));
411 }
412 }
413
414 /// Sets a [Bloom filter](https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter)
415 /// policy to reduce disk reads.
416 ///
417 /// # Examples
418 ///
419 /// ```
420 /// use rust_rocksdb::BlockBasedOptions;
421 ///
422 /// let mut opts = BlockBasedOptions::default();
423 /// opts.set_bloom_filter(10.0, true);
424 /// ```
425 pub fn set_bloom_filter(&mut self, bits_per_key: c_double, block_based: bool) {
426 unsafe {
427 let bloom = if block_based {
428 ffi::rocksdb_filterpolicy_create_bloom(bits_per_key as _)
429 } else {
430 ffi::rocksdb_filterpolicy_create_bloom_full(bits_per_key as _)
431 };
432
433 ffi::rocksdb_block_based_options_set_filter_policy(self.inner, bloom);
434 }
435 }
436
437 /// Sets a [Ribbon filter](http://rocksdb.org/blog/2021/12/29/ribbon-filter.html)
438 /// policy to reduce disk reads.
439 ///
440 /// Ribbon filters use less memory in exchange for slightly more CPU usage
441 /// compared to an equivalent bloom filter.
442 ///
443 /// # Examples
444 ///
445 /// ```
446 /// use rust_rocksdb::BlockBasedOptions;
447 ///
448 /// let mut opts = BlockBasedOptions::default();
449 /// opts.set_ribbon_filter(10.0);
450 /// ```
451 pub fn set_ribbon_filter(&mut self, bloom_equivalent_bits_per_key: c_double) {
452 unsafe {
453 let ribbon = ffi::rocksdb_filterpolicy_create_ribbon(bloom_equivalent_bits_per_key);
454 ffi::rocksdb_block_based_options_set_filter_policy(self.inner, ribbon);
455 }
456 }
457
458 /// Sets a hybrid [Ribbon filter](http://rocksdb.org/blog/2021/12/29/ribbon-filter.html)
459 /// policy to reduce disk reads.
460 ///
461 /// Uses Bloom filters before the given level, and Ribbon filters for all
462 /// other levels. This combines the memory savings from Ribbon filters
463 /// with the lower CPU usage of Bloom filters.
464 ///
465 /// # Examples
466 ///
467 /// ```
468 /// use rust_rocksdb::BlockBasedOptions;
469 ///
470 /// let mut opts = BlockBasedOptions::default();
471 /// opts.set_hybrid_ribbon_filter(10.0, 2);
472 /// ```
473 pub fn set_hybrid_ribbon_filter(
474 &mut self,
475 bloom_equivalent_bits_per_key: c_double,
476 bloom_before_level: c_int,
477 ) {
478 unsafe {
479 let ribbon = ffi::rocksdb_filterpolicy_create_ribbon_hybrid(
480 bloom_equivalent_bits_per_key,
481 bloom_before_level,
482 );
483 ffi::rocksdb_block_based_options_set_filter_policy(self.inner, ribbon);
484 }
485 }
486
487 /// Whether to put index/filter blocks in the block cache. When false,
488 /// each "table reader" object will pre-load index/filter blocks during
489 /// table initialization. Index and filter partition blocks always use
490 /// block cache regardless of this option.
491 ///
492 /// Default: false
493 pub fn set_cache_index_and_filter_blocks(&mut self, v: bool) {
494 unsafe {
495 ffi::rocksdb_block_based_options_set_cache_index_and_filter_blocks(
496 self.inner,
497 c_uchar::from(v),
498 );
499 }
500 }
501
502 /// If `cache_index_and_filter_blocks` is enabled, cache index and filter
503 /// blocks with high priority. Depending on the block cache implementation,
504 /// index, filter, and other metadata blocks may be less likely to be
505 /// evicted than data blocks when this is set to true.
506 ///
507 /// Default: true.
508 pub fn set_cache_index_and_filter_blocks_with_high_priority(&mut self, v: bool) {
509 unsafe {
510 ffi::rocksdb_block_based_options_set_cache_index_and_filter_blocks_with_high_priority(
511 self.inner,
512 c_uchar::from(v),
513 );
514 }
515 }
516
517 /// Defines the index type to be used for SS-table lookups.
518 ///
519 /// # Examples
520 ///
521 /// ```
522 /// use rust_rocksdb::{BlockBasedOptions, BlockBasedIndexType, Options};
523 ///
524 /// let mut opts = Options::default();
525 /// let mut block_opts = BlockBasedOptions::default();
526 /// block_opts.set_index_type(BlockBasedIndexType::HashSearch);
527 /// ```
528 pub fn set_index_type(&mut self, index_type: BlockBasedIndexType) {
529 let index = index_type as i32;
530 unsafe {
531 ffi::rocksdb_block_based_options_set_index_type(self.inner, index);
532 }
533 }
534
535 /// If cache_index_and_filter_blocks is true and the below is true, then
536 /// filter and index blocks are stored in the cache, but a reference is
537 /// held in the "table reader" object so the blocks are pinned and only
538 /// evicted from cache when the table reader is freed.
539 ///
540 /// Default: false.
541 pub fn set_pin_l0_filter_and_index_blocks_in_cache(&mut self, v: bool) {
542 unsafe {
543 ffi::rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache(
544 self.inner,
545 c_uchar::from(v),
546 );
547 }
548 }
549
550 /// If cache_index_and_filter_blocks is true and the below is true, then
551 /// the top-level index of partitioned filter and index blocks are stored in
552 /// the cache, but a reference is held in the "table reader" object so the
553 /// blocks are pinned and only evicted from cache when the table reader is
554 /// freed. This is not limited to l0 in LSM tree.
555 ///
556 /// Default: true.
557 pub fn set_pin_top_level_index_and_filter(&mut self, v: bool) {
558 unsafe {
559 ffi::rocksdb_block_based_options_set_pin_top_level_index_and_filter(
560 self.inner,
561 c_uchar::from(v),
562 );
563 }
564 }
565
566 /// Format version, reserved for backward compatibility.
567 ///
568 /// See full [list](https://github.com/facebook/rocksdb/blob/v8.6.7/include/rocksdb/table.h#L493-L521)
569 /// of the supported versions.
570 ///
571 /// Default: 6.
572 pub fn set_format_version(&mut self, version: i32) {
573 unsafe {
574 ffi::rocksdb_block_based_options_set_format_version(self.inner, version);
575 }
576 }
577
578 /// Use delta encoding to compress keys in blocks.
579 /// ReadOptions::pin_data requires this option to be disabled.
580 ///
581 /// Default: true
582 pub fn set_use_delta_encoding(&mut self, enable: bool) {
583 unsafe {
584 ffi::rocksdb_block_based_options_set_use_delta_encoding(
585 self.inner,
586 c_uchar::from(enable),
587 );
588 }
589 }
590
591 /// Number of keys between restart points for delta encoding of keys.
592 /// This parameter can be changed dynamically. Most clients should
593 /// leave this parameter alone. The minimum value allowed is 1. Any smaller
594 /// value will be silently overwritten with 1.
595 ///
596 /// Default: 16.
597 pub fn set_block_restart_interval(&mut self, interval: i32) {
598 unsafe {
599 ffi::rocksdb_block_based_options_set_block_restart_interval(self.inner, interval);
600 }
601 }
602
603 /// Same as block_restart_interval but used for the index block.
604 /// If you don't plan to run RocksDB before version 5.16 and you are
605 /// using `index_block_restart_interval` > 1, you should
606 /// probably set the `format_version` to >= 4 as it would reduce the index size.
607 ///
608 /// Default: 1.
609 pub fn set_index_block_restart_interval(&mut self, interval: i32) {
610 unsafe {
611 ffi::rocksdb_block_based_options_set_index_block_restart_interval(self.inner, interval);
612 }
613 }
614
615 /// Set the data block index type for point lookups:
616 /// `DataBlockIndexType::BinarySearch` to use binary search within the data block.
617 /// `DataBlockIndexType::BinaryAndHash` to use the data block hash index in combination with
618 /// the normal binary search.
619 ///
620 /// The hash table utilization ratio is adjustable using [`set_data_block_hash_ratio`](#method.set_data_block_hash_ratio), which is
621 /// valid only when using `DataBlockIndexType::BinaryAndHash`.
622 ///
623 /// Default: `BinarySearch`
624 /// # Examples
625 ///
626 /// ```
627 /// use rust_rocksdb::{BlockBasedOptions, DataBlockIndexType, Options};
628 ///
629 /// let mut opts = Options::default();
630 /// let mut block_opts = BlockBasedOptions::default();
631 /// block_opts.set_data_block_index_type(DataBlockIndexType::BinaryAndHash);
632 /// block_opts.set_data_block_hash_ratio(0.85);
633 /// ```
634 pub fn set_data_block_index_type(&mut self, index_type: DataBlockIndexType) {
635 let index_t = index_type as i32;
636 unsafe {
637 ffi::rocksdb_block_based_options_set_data_block_index_type(self.inner, index_t);
638 }
639 }
640
641 /// Set the data block hash index utilization ratio.
642 ///
643 /// The smaller the utilization ratio, the less hash collisions happen, and so reduce the risk for a
644 /// point lookup to fall back to binary search due to the collisions. A small ratio means faster
645 /// lookup at the price of more space overhead.
646 ///
647 /// Default: 0.75
648 pub fn set_data_block_hash_ratio(&mut self, ratio: f64) {
649 unsafe {
650 ffi::rocksdb_block_based_options_set_data_block_hash_ratio(self.inner, ratio);
651 }
652 }
653
654 /// If false, place only prefixes in the filter, not whole keys.
655 ///
656 /// Defaults to true.
657 pub fn set_whole_key_filtering(&mut self, v: bool) {
658 unsafe {
659 ffi::rocksdb_block_based_options_set_whole_key_filtering(self.inner, c_uchar::from(v));
660 }
661 }
662
663 /// Use the specified checksum type.
664 /// Newly created table files will be protected with this checksum type.
665 /// Old table files will still be readable, even though they have different checksum type.
666 pub fn set_checksum_type(&mut self, checksum_type: ChecksumType) {
667 unsafe {
668 ffi::rocksdb_block_based_options_set_checksum(self.inner, checksum_type as c_char);
669 }
670 }
671
672 /// If true, generate Bloom/Ribbon filters that minimize memory internal
673 /// fragmentation.
674 /// See official [wiki](
675 /// https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter#reducing-internal-fragmentation)
676 /// for more information.
677 ///
678 /// Default: true.
679 /// # Examples
680 ///
681 /// ```
682 /// use rust_rocksdb::BlockBasedOptions;
683 ///
684 /// let mut opts = BlockBasedOptions::default();
685 /// opts.set_bloom_filter(10.0, true);
686 /// opts.set_optimize_filters_for_memory(true);
687 /// ```
688 pub fn set_optimize_filters_for_memory(&mut self, v: bool) {
689 unsafe {
690 ffi::rocksdb_block_based_options_set_optimize_filters_for_memory(
691 self.inner,
692 c_uchar::from(v),
693 );
694 }
695 }
696
697 /// The tier of block-based tables whose top-level index into metadata
698 /// partitions will be pinned. Currently indexes and filters may be
699 /// partitioned.
700 ///
701 /// Note `cache_index_and_filter_blocks` must be true for this option to have
702 /// any effect. Otherwise any top-level index into metadata partitions would be
703 /// held in table reader memory, outside the block cache.
704 ///
705 /// Default: `BlockBasedPinningTier:Fallback`
706 ///
707 /// # Example
708 ///
709 /// ```
710 /// use rust_rocksdb::{BlockBasedOptions, BlockBasedPinningTier, Options};
711 ///
712 /// let mut opts = Options::default();
713 /// let mut block_opts = BlockBasedOptions::default();
714 /// block_opts.set_top_level_index_pinning_tier(BlockBasedPinningTier::FlushAndSimilar);
715 /// ```
716 pub fn set_top_level_index_pinning_tier(&mut self, tier: BlockBasedPinningTier) {
717 unsafe {
718 ffi::rocksdb_block_based_options_set_top_level_index_pinning_tier(
719 self.inner,
720 tier as c_int,
721 );
722 }
723 }
724
725 /// The tier of block-based tables whose metadata partitions will be pinned.
726 /// Currently indexes and filters may be partitioned.
727 ///
728 /// Default: `BlockBasedPinningTier:Fallback`
729 ///
730 /// # Example
731 ///
732 /// ```
733 /// use rust_rocksdb::{BlockBasedOptions, BlockBasedPinningTier, Options};
734 ///
735 /// let mut opts = Options::default();
736 /// let mut block_opts = BlockBasedOptions::default();
737 /// block_opts.set_partition_pinning_tier(BlockBasedPinningTier::FlushAndSimilar);
738 /// ```
739 pub fn set_partition_pinning_tier(&mut self, tier: BlockBasedPinningTier) {
740 unsafe {
741 ffi::rocksdb_block_based_options_set_partition_pinning_tier(self.inner, tier as c_int);
742 }
743 }
744
745 /// The tier of block-based tables whose unpartitioned metadata blocks will be
746 /// pinned.
747 ///
748 /// Note `cache_index_and_filter_blocks` must be true for this option to have
749 /// any effect. Otherwise the unpartitioned meta-blocks would be held in table
750 /// reader memory, outside the block cache.
751 ///
752 /// Default: `BlockBasedPinningTier:Fallback`
753 ///
754 /// # Example
755 ///
756 /// ```
757 /// use rust_rocksdb::{BlockBasedOptions, BlockBasedPinningTier, Options};
758 ///
759 /// let mut opts = Options::default();
760 /// let mut block_opts = BlockBasedOptions::default();
761 /// block_opts.set_unpartitioned_pinning_tier(BlockBasedPinningTier::FlushAndSimilar);
762 /// ```
763 pub fn set_unpartitioned_pinning_tier(&mut self, tier: BlockBasedPinningTier) {
764 unsafe {
765 ffi::rocksdb_block_based_options_set_unpartitioned_pinning_tier(
766 self.inner,
767 tier as c_int,
768 );
769 }
770 }
771}
772
773impl Default for BlockBasedOptions {
774 fn default() -> Self {
775 let block_opts = unsafe { ffi::rocksdb_block_based_options_create() };
776 assert!(
777 !block_opts.is_null(),
778 "Could not create RocksDB block based options"
779 );
780
781 Self {
782 inner: block_opts,
783 outlive: BlockBasedOptionsMustOutliveDB::default(),
784 }
785 }
786}
787
788impl CuckooTableOptions {
789 /// Determines the utilization of hash tables. Smaller values
790 /// result in larger hash tables with fewer collisions.
791 /// Default: 0.9
792 pub fn set_hash_ratio(&mut self, ratio: f64) {
793 unsafe {
794 ffi::rocksdb_cuckoo_options_set_hash_ratio(self.inner, ratio);
795 }
796 }
797
798 /// A property used by builder to determine the depth to go to
799 /// to search for a path to displace elements in case of
800 /// collision. See Builder.MakeSpaceForKey method. Higher
801 /// values result in more efficient hash tables with fewer
802 /// lookups but take more time to build.
803 /// Default: 100
804 pub fn set_max_search_depth(&mut self, depth: u32) {
805 unsafe {
806 ffi::rocksdb_cuckoo_options_set_max_search_depth(self.inner, depth);
807 }
808 }
809
810 /// In case of collision while inserting, the builder
811 /// attempts to insert in the next cuckoo_block_size
812 /// locations before skipping over to the next Cuckoo hash
813 /// function. This makes lookups more cache friendly in case
814 /// of collisions.
815 /// Default: 5
816 pub fn set_cuckoo_block_size(&mut self, size: u32) {
817 unsafe {
818 ffi::rocksdb_cuckoo_options_set_cuckoo_block_size(self.inner, size);
819 }
820 }
821
822 /// If this option is enabled, user key is treated as uint64_t and its value
823 /// is used as hash value directly. This option changes builder's behavior.
824 /// Reader ignore this option and behave according to what specified in
825 /// table property.
826 /// Default: false
827 pub fn set_identity_as_first_hash(&mut self, flag: bool) {
828 unsafe {
829 ffi::rocksdb_cuckoo_options_set_identity_as_first_hash(self.inner, c_uchar::from(flag));
830 }
831 }
832
833 /// If this option is set to true, module is used during hash calculation.
834 /// This often yields better space efficiency at the cost of performance.
835 /// If this option is set to false, # of entries in table is constrained to
836 /// be power of two, and bit and is used to calculate hash, which is faster in general.
837 /// Default: true
838 pub fn set_use_module_hash(&mut self, flag: bool) {
839 unsafe {
840 ffi::rocksdb_cuckoo_options_set_use_module_hash(self.inner, c_uchar::from(flag));
841 }
842 }
843}
844
845impl Default for CuckooTableOptions {
846 fn default() -> Self {
847 let opts = unsafe { ffi::rocksdb_cuckoo_options_create() };
848 assert!(!opts.is_null(), "Could not create RocksDB cuckoo options");
849
850 Self { inner: opts }
851 }
852}
853
854// Verbosity of the LOG.
855#[derive(Debug, Copy, Clone, PartialEq, Eq)]
856#[repr(i32)]
857pub enum LogLevel {
858 Debug = 0,
859 Info,
860 Warn,
861 Error,
862 Fatal,
863 Header,
864}
865
866impl Options {
867 /// Constructs the DBOptions and ColumnFamilyDescriptors by loading the
868 /// latest RocksDB options file stored in the specified rocksdb database.
869 ///
870 /// *IMPORTANT*:
871 /// ROCKSDB DOES NOT STORE cf ttl in the options file. If you have set it via
872 /// [`ColumnFamilyDescriptor::new_with_ttl`] then you need to set it again after loading the options file.
873 /// Tll will be set to [`ColumnFamilyTtl::Disabled`] for all column families for your safety.
874 pub fn load_latest<P: AsRef<Path>>(
875 path: P,
876 env: Env,
877 ignore_unknown_options: bool,
878 cache: Cache,
879 ) -> Result<(Options, Vec<ColumnFamilyDescriptor>), Error> {
880 let path = to_cpath(path)?;
881 let mut db_options: *mut ffi::rocksdb_options_t = null_mut();
882 let mut num_column_families: usize = 0;
883 let mut column_family_names: *mut *mut c_char = null_mut();
884 let mut column_family_options: *mut *mut ffi::rocksdb_options_t = null_mut();
885 unsafe {
886 ffi_try!(ffi::rocksdb_load_latest_options(
887 path.as_ptr(),
888 env.0.inner,
889 ignore_unknown_options,
890 cache.0.inner.as_ptr(),
891 &mut db_options,
892 &mut num_column_families,
893 &mut column_family_names,
894 &mut column_family_options,
895 ));
896 }
897 let options = Options {
898 inner: db_options,
899 outlive: OptionsMustOutliveDB::default(),
900 };
901 let column_families = unsafe {
902 Options::read_column_descriptors(
903 num_column_families,
904 column_family_names,
905 column_family_options,
906 )
907 };
908 Ok((options, column_families))
909 }
910
911 /// read column descriptors from c pointers
912 #[inline]
913 unsafe fn read_column_descriptors(
914 num_column_families: usize,
915 column_family_names: *mut *mut c_char,
916 column_family_options: *mut *mut ffi::rocksdb_options_t,
917 ) -> Vec<ColumnFamilyDescriptor> {
918 unsafe {
919 let column_family_names_iter =
920 slice::from_raw_parts(column_family_names, num_column_families)
921 .iter()
922 .map(|ptr| from_cstr(*ptr));
923 let column_family_options_iter =
924 slice::from_raw_parts(column_family_options, num_column_families)
925 .iter()
926 .map(|ptr| Options {
927 inner: *ptr,
928 outlive: OptionsMustOutliveDB::default(),
929 });
930 let column_descriptors = column_family_names_iter
931 .zip(column_family_options_iter)
932 .map(|(name, options)| ColumnFamilyDescriptor {
933 name,
934 options,
935 ttl: ColumnFamilyTtl::Disabled,
936 })
937 .collect::<Vec<_>>();
938 // free pointers
939 for ptr in slice::from_raw_parts(column_family_names, num_column_families) {
940 ffi::rocksdb_free(*ptr as *mut c_void);
941 }
942 ffi::rocksdb_free(column_family_names as *mut c_void);
943 ffi::rocksdb_free(column_family_options as *mut c_void);
944 column_descriptors
945 }
946 }
947
948 /// Updates DBOptions with values parsed from a string.
949 ///
950 /// See official [wiki](
951 /// https://github.com/facebook/rocksdb/wiki/Option-String-and-Option-Map#option-string)
952 /// for more information.
953 pub fn set_options_from_string(&mut self, string: impl CStrLike) -> Result<&mut Self, Error> {
954 let c_string = string.into_c_string().unwrap();
955 let mut err: *mut c_char = null_mut();
956 let err_ptr: *mut *mut c_char = &mut err;
957 unsafe {
958 ffi::rocksdb_get_options_from_string(
959 self.inner,
960 c_string.as_ptr(),
961 self.inner,
962 err_ptr,
963 );
964 }
965
966 if err.is_null() {
967 Ok(self)
968 } else {
969 Err(Error::new(format!(
970 "Could not set options from string: {}",
971 crate::ffi_util::error_message(err)
972 )))
973 }
974 }
975
976 /// By default, RocksDB uses only one background thread for flush and
977 /// compaction. Calling this function will set it up such that total of
978 /// `total_threads` is used. Good value for `total_threads` is the number of
979 /// cores. You almost definitely want to call this function if your system is
980 /// bottlenecked by RocksDB.
981 ///
982 /// # Examples
983 ///
984 /// ```
985 /// use rust_rocksdb::Options;
986 ///
987 /// let mut opts = Options::default();
988 /// opts.increase_parallelism(3);
989 /// ```
990 pub fn increase_parallelism(&mut self, parallelism: i32) {
991 unsafe {
992 ffi::rocksdb_options_increase_parallelism(self.inner, parallelism);
993 }
994 }
995
996 /// Optimize level style compaction.
997 ///
998 /// Default values for some parameters in `Options` are not optimized for heavy
999 /// workloads and big datasets, which means you might observe write stalls under
1000 /// some conditions.
1001 ///
1002 /// This can be used as one of the starting points for tuning RocksDB options in
1003 /// such cases.
1004 ///
1005 /// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
1006 /// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
1007 /// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
1008 /// parameters were set before.
1009 ///
1010 /// It sets buffer sizes so that memory consumption would be constrained by
1011 /// `memtable_memory_budget`.
1012 pub fn optimize_level_style_compaction(&mut self, memtable_memory_budget: usize) {
1013 unsafe {
1014 ffi::rocksdb_options_optimize_level_style_compaction(
1015 self.inner,
1016 memtable_memory_budget as u64,
1017 );
1018 }
1019 }
1020
1021 /// Optimize universal style compaction.
1022 ///
1023 /// Default values for some parameters in `Options` are not optimized for heavy
1024 /// workloads and big datasets, which means you might observe write stalls under
1025 /// some conditions.
1026 ///
1027 /// This can be used as one of the starting points for tuning RocksDB options in
1028 /// such cases.
1029 ///
1030 /// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
1031 /// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
1032 /// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
1033 /// parameters were set before.
1034 ///
1035 /// It sets buffer sizes so that memory consumption would be constrained by
1036 /// `memtable_memory_budget`.
1037 pub fn optimize_universal_style_compaction(&mut self, memtable_memory_budget: usize) {
1038 unsafe {
1039 ffi::rocksdb_options_optimize_universal_style_compaction(
1040 self.inner,
1041 memtable_memory_budget as u64,
1042 );
1043 }
1044 }
1045
1046 /// If true, the database will be created if it is missing.
1047 ///
1048 /// Default: `false`
1049 ///
1050 /// # Examples
1051 ///
1052 /// ```
1053 /// use rust_rocksdb::Options;
1054 ///
1055 /// let mut opts = Options::default();
1056 /// opts.create_if_missing(true);
1057 /// ```
1058 pub fn create_if_missing(&mut self, create_if_missing: bool) {
1059 unsafe {
1060 ffi::rocksdb_options_set_create_if_missing(
1061 self.inner,
1062 c_uchar::from(create_if_missing),
1063 );
1064 }
1065 }
1066
1067 /// If true, any column families that didn't exist when opening the database
1068 /// will be created.
1069 ///
1070 /// Default: `false`
1071 ///
1072 /// # Examples
1073 ///
1074 /// ```
1075 /// use rust_rocksdb::Options;
1076 ///
1077 /// let mut opts = Options::default();
1078 /// opts.create_missing_column_families(true);
1079 /// ```
1080 pub fn create_missing_column_families(&mut self, create_missing_cfs: bool) {
1081 unsafe {
1082 ffi::rocksdb_options_set_create_missing_column_families(
1083 self.inner,
1084 c_uchar::from(create_missing_cfs),
1085 );
1086 }
1087 }
1088
1089 /// Specifies whether an error should be raised if the database already exists.
1090 ///
1091 /// Default: false
1092 pub fn set_error_if_exists(&mut self, enabled: bool) {
1093 unsafe {
1094 ffi::rocksdb_options_set_error_if_exists(self.inner, c_uchar::from(enabled));
1095 }
1096 }
1097
1098 /// Enable/disable paranoid checks.
1099 ///
1100 /// If true, the implementation will do aggressive checking of the
1101 /// data it is processing and will stop early if it detects any
1102 /// errors. This may have unforeseen ramifications: for example, a
1103 /// corruption of one DB entry may cause a large number of entries to
1104 /// become unreadable or for the entire DB to become unopenable.
1105 /// If any of the writes to the database fails (Put, Delete, Merge, Write),
1106 /// the database will switch to read-only mode and fail all other
1107 /// Write operations.
1108 ///
1109 /// Default: false
1110 pub fn set_paranoid_checks(&mut self, enabled: bool) {
1111 unsafe {
1112 ffi::rocksdb_options_set_paranoid_checks(self.inner, c_uchar::from(enabled));
1113 }
1114 }
1115
1116 /// A list of paths where SST files can be put into, with its target size.
1117 /// Newer data is placed into paths specified earlier in the vector while
1118 /// older data gradually moves to paths specified later in the vector.
1119 ///
1120 /// For example, you have a flash device with 10GB allocated for the DB,
1121 /// as well as a hard drive of 2TB, you should config it to be:
1122 /// [{"/flash_path", 10GB}, {"/hard_drive", 2TB}]
1123 ///
1124 /// The system will try to guarantee data under each path is close to but
1125 /// not larger than the target size. But current and future file sizes used
1126 /// by determining where to place a file are based on best-effort estimation,
1127 /// which means there is a chance that the actual size under the directory
1128 /// is slightly more than target size under some workloads. User should give
1129 /// some buffer room for those cases.
1130 ///
1131 /// If none of the paths has sufficient room to place a file, the file will
1132 /// be placed to the last path anyway, despite to the target size.
1133 ///
1134 /// Placing newer data to earlier paths is also best-efforts. User should
1135 /// expect user files to be placed in higher levels in some extreme cases.
1136 ///
1137 /// If left empty, only one path will be used, which is `path` passed when
1138 /// opening the DB.
1139 ///
1140 /// Default: empty
1141 pub fn set_db_paths(&mut self, paths: &[DBPath]) {
1142 let mut paths: Vec<_> = paths.iter().map(|path| path.inner.cast_const()).collect();
1143 let num_paths = paths.len();
1144 unsafe {
1145 ffi::rocksdb_options_set_db_paths(self.inner, paths.as_mut_ptr(), num_paths);
1146 }
1147 }
1148
1149 /// Use the specified object to interact with the environment,
1150 /// e.g. to read/write files, schedule background work, etc. In the near
1151 /// future, support for doing storage operations such as read/write files
1152 /// through env will be deprecated in favor of file_system.
1153 ///
1154 /// Default: Env::default()
1155 pub fn set_env(&mut self, env: &Env) {
1156 unsafe {
1157 ffi::rocksdb_options_set_env(self.inner, env.0.inner);
1158 }
1159 self.outlive.env = Some(env.clone());
1160 }
1161
1162 /// Sets the compression algorithm that will be used for compressing blocks.
1163 ///
1164 /// Default: `DBCompressionType::Snappy` (`DBCompressionType::None` if
1165 /// snappy feature is not enabled).
1166 ///
1167 /// # Examples
1168 ///
1169 /// ```
1170 /// use rust_rocksdb::{Options, DBCompressionType};
1171 ///
1172 /// let mut opts = Options::default();
1173 /// opts.set_compression_type(DBCompressionType::Snappy);
1174 /// ```
1175 pub fn set_compression_type(&mut self, t: DBCompressionType) {
1176 unsafe {
1177 ffi::rocksdb_options_set_compression(self.inner, t as c_int);
1178 }
1179 }
1180
1181 /// Number of threads for parallel compression.
1182 /// Parallel compression is enabled only if threads > 1.
1183 /// THE FEATURE IS STILL EXPERIMENTAL
1184 ///
1185 /// See [code](https://github.com/facebook/rocksdb/blob/v8.6.7/include/rocksdb/advanced_options.h#L116-L127)
1186 /// for more information.
1187 ///
1188 /// Default: 1
1189 ///
1190 /// Examples
1191 ///
1192 /// ```
1193 /// use rust_rocksdb::{Options, DBCompressionType};
1194 ///
1195 /// let mut opts = Options::default();
1196 /// opts.set_compression_type(DBCompressionType::Zstd);
1197 /// opts.set_compression_options_parallel_threads(3);
1198 /// ```
1199 pub fn set_compression_options_parallel_threads(&mut self, num: i32) {
1200 unsafe {
1201 ffi::rocksdb_options_set_compression_options_parallel_threads(self.inner, num);
1202 }
1203 }
1204
1205 /// Sets the compression algorithm that will be used for compressing WAL.
1206 ///
1207 /// At present, only ZSTD compression is supported!
1208 ///
1209 /// Default: `DBCompressionType::None`
1210 ///
1211 /// # Examples
1212 ///
1213 /// ```
1214 /// use rust_rocksdb::{Options, DBCompressionType};
1215 ///
1216 /// let mut opts = Options::default();
1217 /// opts.set_wal_compression_type(DBCompressionType::Zstd);
1218 /// // Or None to disable it
1219 /// opts.set_wal_compression_type(DBCompressionType::None);
1220 /// ```
1221 pub fn set_wal_compression_type(&mut self, t: DBCompressionType) {
1222 match t {
1223 DBCompressionType::None | DBCompressionType::Zstd => unsafe {
1224 ffi::rocksdb_options_set_wal_compression(self.inner, t as c_int);
1225 },
1226 other => unimplemented!("{:?} is not supported for WAL compression", other),
1227 }
1228 }
1229
1230 /// Sets the bottom-most compression algorithm that will be used for
1231 /// compressing blocks at the bottom-most level.
1232 ///
1233 /// Note that to actually enable bottom-most compression configuration after
1234 /// setting the compression type, it needs to be enabled by calling
1235 /// [`set_bottommost_compression_options`](#method.set_bottommost_compression_options) or
1236 /// [`set_bottommost_zstd_max_train_bytes`](#method.set_bottommost_zstd_max_train_bytes) method with `enabled` argument
1237 /// set to `true`.
1238 ///
1239 /// # Examples
1240 ///
1241 /// ```
1242 /// use rust_rocksdb::{Options, DBCompressionType};
1243 ///
1244 /// let mut opts = Options::default();
1245 /// opts.set_bottommost_compression_type(DBCompressionType::Zstd);
1246 /// opts.set_bottommost_zstd_max_train_bytes(0, true);
1247 /// ```
1248 pub fn set_bottommost_compression_type(&mut self, t: DBCompressionType) {
1249 unsafe {
1250 ffi::rocksdb_options_set_bottommost_compression(self.inner, t as c_int);
1251 }
1252 }
1253
1254 /// Different levels can have different compression policies. There
1255 /// are cases where most lower levels would like to use quick compression
1256 /// algorithms while the higher levels (which have more data) use
1257 /// compression algorithms that have better compression but could
1258 /// be slower. This array, if non-empty, should have an entry for
1259 /// each level of the database; these override the value specified in
1260 /// the previous field 'compression'.
1261 ///
1262 /// # Examples
1263 ///
1264 /// ```
1265 /// use rust_rocksdb::{Options, DBCompressionType};
1266 ///
1267 /// let mut opts = Options::default();
1268 /// opts.set_compression_per_level(&[
1269 /// DBCompressionType::None,
1270 /// DBCompressionType::None,
1271 /// DBCompressionType::Snappy,
1272 /// DBCompressionType::Snappy,
1273 /// DBCompressionType::Snappy
1274 /// ]);
1275 /// ```
1276 pub fn set_compression_per_level(&mut self, level_types: &[DBCompressionType]) {
1277 unsafe {
1278 let mut level_types: Vec<_> = level_types.iter().map(|&t| t as c_int).collect();
1279 ffi::rocksdb_options_set_compression_per_level(
1280 self.inner,
1281 level_types.as_mut_ptr(),
1282 level_types.len() as size_t,
1283 );
1284 }
1285 }
1286
1287 /// Maximum size of dictionaries used to prime the compression library.
1288 /// Enabling dictionary can improve compression ratios when there are
1289 /// repetitions across data blocks.
1290 ///
1291 /// The dictionary is created by sampling the SST file data. If
1292 /// `zstd_max_train_bytes` is nonzero, the samples are passed through zstd's
1293 /// dictionary generator. Otherwise, the random samples are used directly as
1294 /// the dictionary.
1295 ///
1296 /// When compression dictionary is disabled, we compress and write each block
1297 /// before buffering data for the next one. When compression dictionary is
1298 /// enabled, we buffer all SST file data in-memory so we can sample it, as data
1299 /// can only be compressed and written after the dictionary has been finalized.
1300 /// So users of this feature may see increased memory usage.
1301 ///
1302 /// Default: `0`
1303 ///
1304 /// # Examples
1305 ///
1306 /// ```
1307 /// use rust_rocksdb::Options;
1308 ///
1309 /// let mut opts = Options::default();
1310 /// opts.set_compression_options(4, 5, 6, 7);
1311 /// ```
1312 pub fn set_compression_options(
1313 &mut self,
1314 w_bits: c_int,
1315 level: c_int,
1316 strategy: c_int,
1317 max_dict_bytes: c_int,
1318 ) {
1319 unsafe {
1320 ffi::rocksdb_options_set_compression_options(
1321 self.inner,
1322 w_bits,
1323 level,
1324 strategy,
1325 max_dict_bytes,
1326 );
1327 }
1328 }
1329
1330 /// Sets compression options for blocks at the bottom-most level. Meaning
1331 /// of all settings is the same as in [`set_compression_options`](#method.set_compression_options) method but
1332 /// affect only the bottom-most compression which is set using
1333 /// [`set_bottommost_compression_type`](#method.set_bottommost_compression_type) method.
1334 ///
1335 /// # Examples
1336 ///
1337 /// ```
1338 /// use rust_rocksdb::{Options, DBCompressionType};
1339 ///
1340 /// let mut opts = Options::default();
1341 /// opts.set_bottommost_compression_type(DBCompressionType::Zstd);
1342 /// opts.set_bottommost_compression_options(4, 5, 6, 7, true);
1343 /// ```
1344 pub fn set_bottommost_compression_options(
1345 &mut self,
1346 w_bits: c_int,
1347 level: c_int,
1348 strategy: c_int,
1349 max_dict_bytes: c_int,
1350 enabled: bool,
1351 ) {
1352 unsafe {
1353 ffi::rocksdb_options_set_bottommost_compression_options(
1354 self.inner,
1355 w_bits,
1356 level,
1357 strategy,
1358 max_dict_bytes,
1359 c_uchar::from(enabled),
1360 );
1361 }
1362 }
1363
1364 /// Sets maximum size of training data passed to zstd's dictionary trainer. Using zstd's
1365 /// dictionary trainer can achieve even better compression ratio improvements than using
1366 /// `max_dict_bytes` alone.
1367 ///
1368 /// The training data will be used to generate a dictionary of max_dict_bytes.
1369 ///
1370 /// Default: 0.
1371 pub fn set_zstd_max_train_bytes(&mut self, value: c_int) {
1372 unsafe {
1373 ffi::rocksdb_options_set_compression_options_zstd_max_train_bytes(self.inner, value);
1374 }
1375 }
1376
1377 /// Sets maximum size of training data passed to zstd's dictionary trainer
1378 /// when compressing the bottom-most level. Using zstd's dictionary trainer
1379 /// can achieve even better compression ratio improvements than using
1380 /// `max_dict_bytes` alone.
1381 ///
1382 /// The training data will be used to generate a dictionary of
1383 /// `max_dict_bytes`.
1384 ///
1385 /// Default: 0.
1386 pub fn set_bottommost_zstd_max_train_bytes(&mut self, value: c_int, enabled: bool) {
1387 unsafe {
1388 ffi::rocksdb_options_set_bottommost_compression_options_zstd_max_train_bytes(
1389 self.inner,
1390 value,
1391 c_uchar::from(enabled),
1392 );
1393 }
1394 }
1395
1396 /// If non-zero, we perform bigger reads when doing compaction. If you're
1397 /// running RocksDB on spinning disks, you should set this to at least 2MB.
1398 /// That way RocksDB's compaction is doing sequential instead of random reads.
1399 ///
1400 /// Default: 2 * 1024 * 1024 (2 MB)
1401 pub fn set_compaction_readahead_size(&mut self, compaction_readahead_size: usize) {
1402 unsafe {
1403 ffi::rocksdb_options_compaction_readahead_size(self.inner, compaction_readahead_size);
1404 }
1405 }
1406
1407 /// Allow RocksDB to pick dynamic base of bytes for levels.
1408 /// With this feature turned on, RocksDB will automatically adjust max bytes for each level.
1409 /// The goal of this feature is to have lower bound on size amplification.
1410 ///
1411 /// Default: false.
1412 pub fn set_level_compaction_dynamic_level_bytes(&mut self, v: bool) {
1413 unsafe {
1414 ffi::rocksdb_options_set_level_compaction_dynamic_level_bytes(
1415 self.inner,
1416 c_uchar::from(v),
1417 );
1418 }
1419 }
1420
1421 /// This option has different meanings for different compaction styles:
1422 ///
1423 /// Leveled: files older than `periodic_compaction_seconds` will be picked up
1424 /// for compaction and will be re-written to the same level as they were
1425 /// before if level_compaction_dynamic_level_bytes is disabled. Otherwise,
1426 /// it will rewrite files to the next level except for the last level files
1427 /// to the same level.
1428 ///
1429 /// FIFO: not supported. Setting this option has no effect for FIFO compaction.
1430 ///
1431 /// Universal: when there are files older than `periodic_compaction_seconds`,
1432 /// rocksdb will try to do as large a compaction as possible including the
1433 /// last level. Such compaction is only skipped if only last level is to
1434 /// be compacted and no file in last level is older than
1435 /// `periodic_compaction_seconds`. See more in
1436 /// UniversalCompactionBuilder::PickPeriodicCompaction().
1437 /// For backward compatibility, the effective value of this option takes
1438 /// into account the value of option `ttl`. The logic is as follows:
1439 ///
1440 /// - both options are set to 30 days if they have the default value.
1441 /// - if both options are zero, zero is picked. Otherwise, we take the min
1442 /// value among non-zero options values (i.e. takes the stricter limit).
1443 ///
1444 /// One main use of the feature is to make sure a file goes through compaction
1445 /// filters periodically. Users can also use the feature to clear up SST
1446 /// files using old format.
1447 ///
1448 /// A file's age is computed by looking at file_creation_time or creation_time
1449 /// table properties in order, if they have valid non-zero values; if not, the
1450 /// age is based on the file's last modified time (given by the underlying
1451 /// Env).
1452 ///
1453 /// This option only supports block based table format for any compaction
1454 /// style.
1455 ///
1456 /// unit: seconds. Ex: 7 days = 7 * 24 * 60 * 60
1457 ///
1458 /// Values:
1459 /// 0: Turn off Periodic compactions.
1460 /// UINT64_MAX - 1 (0xfffffffffffffffe) is special flag to allow RocksDB to
1461 /// pick default.
1462 ///
1463 /// Default: 30 days if using block based table format + compaction filter +
1464 /// leveled compaction or block based table format + universal compaction.
1465 /// 0 (disabled) otherwise.
1466 ///
1467 pub fn set_periodic_compaction_seconds(&mut self, secs: u64) {
1468 unsafe {
1469 ffi::rocksdb_options_set_periodic_compaction_seconds(self.inner, secs);
1470 }
1471 }
1472
1473 /// When an iterator scans this number of invisible entries (tombstones or
1474 /// hidden puts) from the active memtable during a single iterator operation,
1475 /// we will attempt to flush the memtable. Currently only forward scans are
1476 /// supported (SeekToFirst(), Seek() and Next()).
1477 /// This option helps to reduce the overhead of scanning through a
1478 /// large number of entries in memtable.
1479 /// Users should consider enable deletion-triggered-compaction (see
1480 /// CompactOnDeletionCollectorFactory) together with this option to compact
1481 /// away tombstones after the memtable is flushed.
1482 ///
1483 /// Default: 0 (disabled)
1484 /// Dynamically changeable through the SetOptions() API.
1485 pub fn set_memtable_op_scan_flush_trigger(&mut self, num: u32) {
1486 unsafe {
1487 ffi::rocksdb_options_set_memtable_op_scan_flush_trigger(self.inner, num);
1488 }
1489 }
1490
1491 /// Similar to `memtable_op_scan_flush_trigger`, but this option applies to
1492 /// Next() calls between Seeks or until iterator destruction. If the average
1493 /// of the number of invisible entries scanned from the active memtable, the
1494 /// memtable will be marked for flush.
1495 /// Note that to avoid the case where the window between Seeks is too small,
1496 /// the option only takes effect if the total number of hidden entries scanned
1497 /// within a window is at least `memtable_op_scan_flush_trigger`. So this
1498 /// option is only effective when `memtable_op_scan_flush_trigger` is set.
1499 ///
1500 /// This option should be set to a lower value than
1501 /// `memtable_op_scan_flush_trigger`. It covers the case where an iterator
1502 /// scans through an expensive key range with many invisible entries from the
1503 /// active memtable, but the number of invisible entries per operation does not
1504 /// exceed `memtable_op_scan_flush_trigger`.
1505 ///
1506 /// Default: 0 (disabled)
1507 /// Dynamically changeable through the SetOptions() API.
1508 pub fn set_memtable_avg_op_scan_flush_trigger(&mut self, num: u32) {
1509 unsafe {
1510 ffi::rocksdb_options_set_memtable_avg_op_scan_flush_trigger(self.inner, num);
1511 }
1512 }
1513
1514 /// This option has different meanings for different compaction styles:
1515 ///
1516 /// Leveled: Non-bottom-level files with all keys older than TTL will go
1517 /// through the compaction process. This usually happens in a cascading
1518 /// way so that those entries will be compacted to bottommost level/file.
1519 /// The feature is used to remove stale entries that have been deleted or
1520 /// updated from the file system.
1521 ///
1522 /// FIFO: Files with all keys older than TTL will be deleted. TTL is only
1523 /// supported if option max_open_files is set to -1.
1524 ///
1525 /// Universal: users should only set the option `periodic_compaction_seconds`
1526 /// instead. For backward compatibility, this option has the same
1527 /// meaning as `periodic_compaction_seconds`. See more in comments for
1528 /// `periodic_compaction_seconds` on the interaction between these two
1529 /// options.
1530 ///
1531 /// This option only supports block based table format for any compaction
1532 /// style.
1533 ///
1534 /// unit: seconds. Ex: 1 day = 1 * 24 * 60 * 60
1535 /// 0 means disabling.
1536 /// UINT64_MAX - 1 (0xfffffffffffffffe) is special flag to allow RocksDB to
1537 /// pick default.
1538 ///
1539 /// Default: 30 days if using block based table. 0 (disable) otherwise.
1540 ///
1541 /// Dynamically changeable
1542 /// Note that dynamically changing this option only works for leveled and FIFO
1543 /// compaction. For universal compaction, dynamically changing this option has
1544 /// no effect, users should dynamically change `periodic_compaction_seconds`
1545 /// instead.
1546 pub fn set_ttl(&mut self, secs: u64) {
1547 unsafe {
1548 ffi::rocksdb_options_set_ttl(self.inner, secs);
1549 }
1550 }
1551
1552 pub fn set_merge_operator_associative<F: MergeFn + Clone>(
1553 &mut self,
1554 name: impl CStrLike,
1555 full_merge_fn: F,
1556 ) {
1557 let cb = Box::new(MergeOperatorCallback {
1558 name: name.into_c_string().unwrap(),
1559 full_merge_fn: full_merge_fn.clone(),
1560 partial_merge_fn: full_merge_fn,
1561 });
1562
1563 unsafe {
1564 let mo = ffi::rocksdb_mergeoperator_create(
1565 Box::into_raw(cb).cast::<c_void>(),
1566 Some(merge_operator::destructor_callback::<F, F>),
1567 Some(full_merge_callback::<F, F>),
1568 Some(partial_merge_callback::<F, F>),
1569 Some(merge_operator::delete_callback),
1570 Some(merge_operator::name_callback::<F, F>),
1571 );
1572 ffi::rocksdb_options_set_merge_operator(self.inner, mo);
1573 }
1574 }
1575
1576 pub fn set_merge_operator<F: MergeFn, PF: MergeFn>(
1577 &mut self,
1578 name: impl CStrLike,
1579 full_merge_fn: F,
1580 partial_merge_fn: PF,
1581 ) {
1582 let cb = Box::new(MergeOperatorCallback {
1583 name: name.into_c_string().unwrap(),
1584 full_merge_fn,
1585 partial_merge_fn,
1586 });
1587
1588 unsafe {
1589 let mo = ffi::rocksdb_mergeoperator_create(
1590 Box::into_raw(cb).cast::<c_void>(),
1591 Some(merge_operator::destructor_callback::<F, PF>),
1592 Some(full_merge_callback::<F, PF>),
1593 Some(partial_merge_callback::<F, PF>),
1594 Some(merge_operator::delete_callback),
1595 Some(merge_operator::name_callback::<F, PF>),
1596 );
1597 ffi::rocksdb_options_set_merge_operator(self.inner, mo);
1598 }
1599 }
1600
1601 #[deprecated(
1602 since = "0.5.0",
1603 note = "add_merge_operator has been renamed to set_merge_operator"
1604 )]
1605 pub fn add_merge_operator<F: MergeFn + Clone>(&mut self, name: &str, merge_fn: F) {
1606 self.set_merge_operator_associative(name, merge_fn);
1607 }
1608
1609 /// Sets a compaction filter used to determine if entries should be kept, changed,
1610 /// or removed during compaction.
1611 ///
1612 /// An example use case is to remove entries with an expired TTL.
1613 ///
1614 /// If you take a snapshot of the database, only values written since the last
1615 /// snapshot will be passed through the compaction filter.
1616 ///
1617 /// If multi-threaded compaction is used, `filter_fn` may be called multiple times
1618 /// simultaneously.
1619 pub fn set_compaction_filter<F>(&mut self, name: impl CStrLike, filter_fn: F)
1620 where
1621 F: CompactionFilterFn + Send + 'static,
1622 {
1623 let cb = Box::new(CompactionFilterCallback {
1624 name: name.into_c_string().unwrap(),
1625 filter_fn,
1626 });
1627
1628 unsafe {
1629 let cf = ffi::rocksdb_compactionfilter_create(
1630 Box::into_raw(cb).cast::<c_void>(),
1631 Some(compaction_filter::destructor_callback::<CompactionFilterCallback<F>>),
1632 Some(compaction_filter::filter_callback::<CompactionFilterCallback<F>>),
1633 Some(compaction_filter::name_callback::<CompactionFilterCallback<F>>),
1634 );
1635 ffi::rocksdb_options_set_compaction_filter(self.inner, cf);
1636 }
1637 }
1638
1639 pub fn add_event_listener<L: EventListener>(&mut self, l: L) {
1640 let handle = new_event_listener(l);
1641 unsafe { ffi::rocksdb_options_add_eventlistener(self.inner, handle.inner) }
1642 }
1643
1644 /// This is a factory that provides compaction filter objects which allow
1645 /// an application to modify/delete a key-value during background compaction.
1646 ///
1647 /// A new filter will be created on each compaction run. If multithreaded
1648 /// compaction is being used, each created CompactionFilter will only be used
1649 /// from a single thread and so does not need to be thread-safe.
1650 ///
1651 /// Default: nullptr
1652 pub fn set_compaction_filter_factory<F>(&mut self, factory: F)
1653 where
1654 F: CompactionFilterFactory + 'static,
1655 {
1656 let factory = Box::new(factory);
1657
1658 unsafe {
1659 let cff = ffi::rocksdb_compactionfilterfactory_create(
1660 Box::into_raw(factory).cast::<c_void>(),
1661 Some(compaction_filter_factory::destructor_callback::<F>),
1662 Some(compaction_filter_factory::create_compaction_filter_callback::<F>),
1663 Some(compaction_filter_factory::name_callback::<F>),
1664 );
1665
1666 ffi::rocksdb_options_set_compaction_filter_factory(self.inner, cff);
1667 }
1668 }
1669
1670 /// Sets the comparator used to define the order of keys in the table.
1671 /// Default: a comparator that uses lexicographic byte-wise ordering
1672 ///
1673 /// The client must ensure that the comparator supplied here has the same
1674 /// name and orders keys *exactly* the same as the comparator provided to
1675 /// previous open calls on the same DB.
1676 pub fn set_comparator(&mut self, name: impl CStrLike, compare_fn: Box<CompareFn>) {
1677 let cb = Box::new(ComparatorCallback {
1678 name: name.into_c_string().unwrap(),
1679 compare_fn,
1680 });
1681
1682 unsafe {
1683 let cmp = ffi::rocksdb_comparator_create(
1684 Box::into_raw(cb).cast::<c_void>(),
1685 Some(ComparatorCallback::destructor_callback),
1686 Some(ComparatorCallback::compare_callback),
1687 Some(ComparatorCallback::name_callback),
1688 );
1689 ffi::rocksdb_options_set_comparator(self.inner, cmp);
1690 }
1691 }
1692
1693 /// Sets the comparator that are timestamp-aware, used to define the order of keys in the table,
1694 /// taking timestamp into consideration.
1695 /// Find more information on timestamp-aware comparator on [here](https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp)
1696 ///
1697 /// The client must ensure that the comparator supplied here has the same
1698 /// name and orders keys *exactly* the same as the comparator provided to
1699 /// previous open calls on the same DB.
1700 pub fn set_comparator_with_ts(
1701 &mut self,
1702 name: impl CStrLike,
1703 timestamp_size: usize,
1704 compare_fn: Box<CompareFn>,
1705 compare_ts_fn: Box<CompareTsFn>,
1706 compare_without_ts_fn: Box<CompareWithoutTsFn>,
1707 ) {
1708 let cb = Box::new(ComparatorWithTsCallback {
1709 name: name.into_c_string().unwrap(),
1710 compare_fn,
1711 compare_ts_fn,
1712 compare_without_ts_fn,
1713 });
1714
1715 unsafe {
1716 let cmp = ffi::rocksdb_comparator_with_ts_create(
1717 Box::into_raw(cb).cast::<c_void>(),
1718 Some(ComparatorWithTsCallback::destructor_callback),
1719 Some(ComparatorWithTsCallback::compare_callback),
1720 Some(ComparatorWithTsCallback::compare_ts_callback),
1721 Some(ComparatorWithTsCallback::compare_without_ts_callback),
1722 Some(ComparatorWithTsCallback::name_callback),
1723 timestamp_size,
1724 );
1725 ffi::rocksdb_options_set_comparator(self.inner, cmp);
1726 }
1727 }
1728
1729 pub fn set_prefix_extractor(&mut self, prefix_extractor: SliceTransform) {
1730 unsafe {
1731 ffi::rocksdb_options_set_prefix_extractor(self.inner, prefix_extractor.inner);
1732 }
1733 }
1734
1735 // Use this if you don't need to keep the data sorted, i.e. you'll never use
1736 // an iterator, only Put() and Get() API calls
1737 //
1738 pub fn optimize_for_point_lookup(&mut self, block_cache_size_mb: u64) {
1739 unsafe {
1740 ffi::rocksdb_options_optimize_for_point_lookup(self.inner, block_cache_size_mb);
1741 }
1742 }
1743
1744 /// Sets the optimize_filters_for_hits flag
1745 ///
1746 /// Default: `false`
1747 ///
1748 /// # Examples
1749 ///
1750 /// ```
1751 /// use rust_rocksdb::Options;
1752 ///
1753 /// let mut opts = Options::default();
1754 /// opts.set_optimize_filters_for_hits(true);
1755 /// ```
1756 pub fn set_optimize_filters_for_hits(&mut self, optimize_for_hits: bool) {
1757 unsafe {
1758 ffi::rocksdb_options_set_optimize_filters_for_hits(
1759 self.inner,
1760 c_int::from(optimize_for_hits),
1761 );
1762 }
1763 }
1764
1765 /// Sets the periodicity when obsolete files get deleted.
1766 ///
1767 /// The files that get out of scope by compaction
1768 /// process will still get automatically delete on every compaction,
1769 /// regardless of this setting.
1770 ///
1771 /// Default: 6 hours
1772 pub fn set_delete_obsolete_files_period_micros(&mut self, micros: u64) {
1773 unsafe {
1774 ffi::rocksdb_options_set_delete_obsolete_files_period_micros(self.inner, micros);
1775 }
1776 }
1777
1778 /// Prepare the DB for bulk loading.
1779 ///
1780 /// All data will be in level 0 without any automatic compaction.
1781 /// It's recommended to manually call CompactRange(NULL, NULL) before reading
1782 /// from the database, because otherwise the read can be very slow.
1783 pub fn prepare_for_bulk_load(&mut self) {
1784 unsafe {
1785 ffi::rocksdb_options_prepare_for_bulk_load(self.inner);
1786 }
1787 }
1788
1789 /// Sets the number of open files that can be used by the DB. You may need to
1790 /// increase this if your database has a large working set. Value `-1` means
1791 /// files opened are always kept open. You can estimate number of files based
1792 /// on target_file_size_base and target_file_size_multiplier for level-based
1793 /// compaction. For universal-style compaction, you can usually set it to `-1`.
1794 ///
1795 /// Default: `-1`
1796 ///
1797 /// # Examples
1798 ///
1799 /// ```
1800 /// use rust_rocksdb::Options;
1801 ///
1802 /// let mut opts = Options::default();
1803 /// opts.set_max_open_files(10);
1804 /// ```
1805 pub fn set_max_open_files(&mut self, nfiles: c_int) {
1806 unsafe {
1807 ffi::rocksdb_options_set_max_open_files(self.inner, nfiles);
1808 }
1809 }
1810
1811 /// If max_open_files is -1, DB will open all files on DB::Open(). You can
1812 /// use this option to increase the number of threads used to open the files.
1813 /// Default: 16
1814 pub fn set_max_file_opening_threads(&mut self, nthreads: c_int) {
1815 unsafe {
1816 ffi::rocksdb_options_set_max_file_opening_threads(self.inner, nthreads);
1817 }
1818 }
1819
1820 /// By default, writes to stable storage use fdatasync (on platforms
1821 /// where this function is available). If this option is true,
1822 /// fsync is used instead.
1823 ///
1824 /// fsync and fdatasync are equally safe for our purposes and fdatasync is
1825 /// faster, so it is rarely necessary to set this option. It is provided
1826 /// as a workaround for kernel/filesystem bugs, such as one that affected
1827 /// fdatasync with ext4 in kernel versions prior to 3.7.
1828 ///
1829 /// Default: `false`
1830 ///
1831 /// # Examples
1832 ///
1833 /// ```
1834 /// use rust_rocksdb::Options;
1835 ///
1836 /// let mut opts = Options::default();
1837 /// opts.set_use_fsync(true);
1838 /// ```
1839 pub fn set_use_fsync(&mut self, useit: bool) {
1840 unsafe {
1841 ffi::rocksdb_options_set_use_fsync(self.inner, c_int::from(useit));
1842 }
1843 }
1844
1845 /// Returns the value of the `use_fsync` option.
1846 pub fn get_use_fsync(&self) -> bool {
1847 let val = unsafe { ffi::rocksdb_options_get_use_fsync(self.inner) };
1848 val != 0
1849 }
1850
1851 /// Specifies the absolute info LOG dir.
1852 ///
1853 /// If it is empty, the log files will be in the same dir as data.
1854 /// If it is non empty, the log files will be in the specified dir,
1855 /// and the db data dir's absolute path will be used as the log file
1856 /// name's prefix.
1857 ///
1858 /// Default: empty
1859 pub fn set_db_log_dir<P: AsRef<Path>>(&mut self, path: P) {
1860 let p = to_cpath(path).unwrap();
1861 unsafe {
1862 ffi::rocksdb_options_set_db_log_dir(self.inner, p.as_ptr());
1863 }
1864 }
1865
1866 /// Specifies the log level.
1867 /// Consider the `LogLevel` enum for a list of possible levels.
1868 ///
1869 /// Default: Info
1870 ///
1871 /// # Examples
1872 ///
1873 /// ```
1874 /// use rust_rocksdb::{Options, LogLevel};
1875 ///
1876 /// let mut opts = Options::default();
1877 /// opts.set_log_level(LogLevel::Warn);
1878 /// ```
1879 pub fn set_log_level(&mut self, level: LogLevel) {
1880 unsafe {
1881 ffi::rocksdb_options_set_info_log_level(self.inner, level as c_int);
1882 }
1883 }
1884
1885 /// Allows OS to incrementally sync files to disk while they are being
1886 /// written, asynchronously, in the background. This operation can be used
1887 /// to smooth out write I/Os over time. Users shouldn't rely on it for
1888 /// persistency guarantee.
1889 /// Issue one request for every bytes_per_sync written. `0` turns it off.
1890 ///
1891 /// Default: `0`
1892 ///
1893 /// You may consider using rate_limiter to regulate write rate to device.
1894 /// When rate limiter is enabled, it automatically enables bytes_per_sync
1895 /// to 1MB.
1896 ///
1897 /// This option applies to table files
1898 ///
1899 /// # Examples
1900 ///
1901 /// ```
1902 /// use rust_rocksdb::Options;
1903 ///
1904 /// let mut opts = Options::default();
1905 /// opts.set_bytes_per_sync(1024 * 1024);
1906 /// ```
1907 pub fn set_bytes_per_sync(&mut self, nbytes: u64) {
1908 unsafe {
1909 ffi::rocksdb_options_set_bytes_per_sync(self.inner, nbytes);
1910 }
1911 }
1912
1913 /// Same as bytes_per_sync, but applies to WAL files.
1914 ///
1915 /// Default: 0, turned off
1916 ///
1917 /// Dynamically changeable through SetDBOptions() API.
1918 pub fn set_wal_bytes_per_sync(&mut self, nbytes: u64) {
1919 unsafe {
1920 ffi::rocksdb_options_set_wal_bytes_per_sync(self.inner, nbytes);
1921 }
1922 }
1923
1924 /// Sets the maximum buffer size that is used by WritableFileWriter.
1925 ///
1926 /// On Windows, we need to maintain an aligned buffer for writes.
1927 /// We allow the buffer to grow until it's size hits the limit in buffered
1928 /// IO and fix the buffer size when using direct IO to ensure alignment of
1929 /// write requests if the logical sector size is unusual
1930 ///
1931 /// Default: 1024 * 1024 (1 MB)
1932 ///
1933 /// Dynamically changeable through SetDBOptions() API.
1934 pub fn set_writable_file_max_buffer_size(&mut self, nbytes: u64) {
1935 unsafe {
1936 ffi::rocksdb_options_set_writable_file_max_buffer_size(self.inner, nbytes);
1937 }
1938 }
1939
1940 /// If true, allow multi-writers to update mem tables in parallel.
1941 /// Only some memtable_factory-s support concurrent writes; currently it
1942 /// is implemented only for SkipListFactory. Concurrent memtable writes
1943 /// are not compatible with inplace_update_support or filter_deletes.
1944 /// It is strongly recommended to set enable_write_thread_adaptive_yield
1945 /// if you are going to use this feature.
1946 ///
1947 /// Default: true
1948 ///
1949 /// # Examples
1950 ///
1951 /// ```
1952 /// use rust_rocksdb::Options;
1953 ///
1954 /// let mut opts = Options::default();
1955 /// opts.set_allow_concurrent_memtable_write(false);
1956 /// ```
1957 pub fn set_allow_concurrent_memtable_write(&mut self, allow: bool) {
1958 unsafe {
1959 ffi::rocksdb_options_set_allow_concurrent_memtable_write(
1960 self.inner,
1961 c_uchar::from(allow),
1962 );
1963 }
1964 }
1965
1966 /// If true, threads synchronizing with the write batch group leader will wait for up to
1967 /// write_thread_max_yield_usec before blocking on a mutex. This can substantially improve
1968 /// throughput for concurrent workloads, regardless of whether allow_concurrent_memtable_write
1969 /// is enabled.
1970 ///
1971 /// Default: true
1972 pub fn set_enable_write_thread_adaptive_yield(&mut self, enabled: bool) {
1973 unsafe {
1974 ffi::rocksdb_options_set_enable_write_thread_adaptive_yield(
1975 self.inner,
1976 c_uchar::from(enabled),
1977 );
1978 }
1979 }
1980
1981 /// Specifies whether an iteration->Next() sequentially skips over keys with the same user-key or not.
1982 ///
1983 /// This number specifies the number of keys (with the same userkey)
1984 /// that will be sequentially skipped before a reseek is issued.
1985 ///
1986 /// Default: 8
1987 pub fn set_max_sequential_skip_in_iterations(&mut self, num: u64) {
1988 unsafe {
1989 ffi::rocksdb_options_set_max_sequential_skip_in_iterations(self.inner, num);
1990 }
1991 }
1992
1993 /// Enable direct I/O mode for reading
1994 /// they may or may not improve performance depending on the use case
1995 ///
1996 /// Files will be opened in "direct I/O" mode
1997 /// which means that data read from the disk will not be cached or
1998 /// buffered. The hardware buffer of the devices may however still
1999 /// be used. Memory mapped files are not impacted by these parameters.
2000 ///
2001 /// Default: false
2002 ///
2003 /// # Examples
2004 ///
2005 /// ```
2006 /// use rust_rocksdb::Options;
2007 ///
2008 /// let mut opts = Options::default();
2009 /// opts.set_use_direct_reads(true);
2010 /// ```
2011 pub fn set_use_direct_reads(&mut self, enabled: bool) {
2012 unsafe {
2013 ffi::rocksdb_options_set_use_direct_reads(self.inner, c_uchar::from(enabled));
2014 }
2015 }
2016
2017 /// Enable direct I/O mode for flush and compaction
2018 ///
2019 /// Files will be opened in "direct I/O" mode
2020 /// which means that data written to the disk will not be cached or
2021 /// buffered. The hardware buffer of the devices may however still
2022 /// be used. Memory mapped files are not impacted by these parameters.
2023 /// they may or may not improve performance depending on the use case
2024 ///
2025 /// Default: false
2026 ///
2027 /// # Examples
2028 ///
2029 /// ```
2030 /// use rust_rocksdb::Options;
2031 ///
2032 /// let mut opts = Options::default();
2033 /// opts.set_use_direct_io_for_flush_and_compaction(true);
2034 /// ```
2035 pub fn set_use_direct_io_for_flush_and_compaction(&mut self, enabled: bool) {
2036 unsafe {
2037 ffi::rocksdb_options_set_use_direct_io_for_flush_and_compaction(
2038 self.inner,
2039 c_uchar::from(enabled),
2040 );
2041 }
2042 }
2043
2044 /// Enable/disable child process inherit open files.
2045 ///
2046 /// Default: true
2047 pub fn set_is_fd_close_on_exec(&mut self, enabled: bool) {
2048 unsafe {
2049 ffi::rocksdb_options_set_is_fd_close_on_exec(self.inner, c_uchar::from(enabled));
2050 }
2051 }
2052
2053 /// Hints to the OS that it should not buffer disk I/O. Enabling this
2054 /// parameter may improve performance but increases pressure on the
2055 /// system cache.
2056 ///
2057 /// The exact behavior of this parameter is platform dependent.
2058 ///
2059 /// On POSIX systems, after RocksDB reads data from disk it will
2060 /// mark the pages as "unneeded". The operating system may or may not
2061 /// evict these pages from memory, reducing pressure on the system
2062 /// cache. If the disk block is requested again this can result in
2063 /// additional disk I/O.
2064 ///
2065 /// On WINDOWS systems, files will be opened in "unbuffered I/O" mode
2066 /// which means that data read from the disk will not be cached or
2067 /// bufferized. The hardware buffer of the devices may however still
2068 /// be used. Memory mapped files are not impacted by this parameter.
2069 ///
2070 /// Default: true
2071 ///
2072 /// # Examples
2073 ///
2074 /// ```
2075 /// use rust_rocksdb::Options;
2076 ///
2077 /// let mut opts = Options::default();
2078 /// #[allow(deprecated)]
2079 /// opts.set_allow_os_buffer(false);
2080 /// ```
2081 #[deprecated(
2082 since = "0.7.0",
2083 note = "replaced with set_use_direct_reads/set_use_direct_io_for_flush_and_compaction methods"
2084 )]
2085 pub fn set_allow_os_buffer(&mut self, is_allow: bool) {
2086 self.set_use_direct_reads(!is_allow);
2087 self.set_use_direct_io_for_flush_and_compaction(!is_allow);
2088 }
2089
2090 /// Sets the number of shards used for table cache.
2091 ///
2092 /// Default: `6`
2093 ///
2094 /// # Examples
2095 ///
2096 /// ```
2097 /// use rust_rocksdb::Options;
2098 ///
2099 /// let mut opts = Options::default();
2100 /// opts.set_table_cache_num_shard_bits(4);
2101 /// ```
2102 pub fn set_table_cache_num_shard_bits(&mut self, nbits: c_int) {
2103 unsafe {
2104 ffi::rocksdb_options_set_table_cache_numshardbits(self.inner, nbits);
2105 }
2106 }
2107
2108 /// By default target_file_size_multiplier is 1, which means
2109 /// by default files in different levels will have similar size.
2110 ///
2111 /// Dynamically changeable through SetOptions() API
2112 pub fn set_target_file_size_multiplier(&mut self, multiplier: i32) {
2113 unsafe {
2114 ffi::rocksdb_options_set_target_file_size_multiplier(self.inner, multiplier as c_int);
2115 }
2116 }
2117
2118 /// Sets the minimum number of write buffers that will be merged
2119 /// before writing to storage. If set to `1`, then
2120 /// all write buffers are flushed to L0 as individual files and this increases
2121 /// read amplification because a get request has to check in all of these
2122 /// files. Also, an in-memory merge may result in writing lesser
2123 /// data to storage if there are duplicate records in each of these
2124 /// individual write buffers.
2125 ///
2126 /// Default: `1`
2127 ///
2128 /// # Examples
2129 ///
2130 /// ```
2131 /// use rust_rocksdb::Options;
2132 ///
2133 /// let mut opts = Options::default();
2134 /// opts.set_min_write_buffer_number(2);
2135 /// ```
2136 pub fn set_min_write_buffer_number(&mut self, nbuf: c_int) {
2137 unsafe {
2138 ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, nbuf);
2139 }
2140 }
2141
2142 /// Sets the maximum number of write buffers that are built up in memory.
2143 /// The default and the minimum number is 2, so that when 1 write buffer
2144 /// is being flushed to storage, new writes can continue to the other
2145 /// write buffer.
2146 /// If max_write_buffer_number > 3, writing will be slowed down to
2147 /// options.delayed_write_rate if we are writing to the last write buffer
2148 /// allowed.
2149 ///
2150 /// Default: `2`
2151 ///
2152 /// # Examples
2153 ///
2154 /// ```
2155 /// use rust_rocksdb::Options;
2156 ///
2157 /// let mut opts = Options::default();
2158 /// opts.set_max_write_buffer_number(4);
2159 /// ```
2160 pub fn set_max_write_buffer_number(&mut self, nbuf: c_int) {
2161 unsafe {
2162 ffi::rocksdb_options_set_max_write_buffer_number(self.inner, nbuf);
2163 }
2164 }
2165
2166 /// Sets the amount of data to build up in memory (backed by an unsorted log
2167 /// on disk) before converting to a sorted on-disk file.
2168 ///
2169 /// Larger values increase performance, especially during bulk loads.
2170 /// Up to max_write_buffer_number write buffers may be held in memory
2171 /// at the same time,
2172 /// so you may wish to adjust this parameter to control memory usage.
2173 /// Also, a larger write buffer will result in a longer recovery time
2174 /// the next time the database is opened.
2175 ///
2176 /// Note that write_buffer_size is enforced per column family.
2177 /// See db_write_buffer_size for sharing memory across column families.
2178 ///
2179 /// Default: `0x4000000` (64MiB)
2180 ///
2181 /// Dynamically changeable through SetOptions() API
2182 ///
2183 /// # Examples
2184 ///
2185 /// ```
2186 /// use rust_rocksdb::Options;
2187 ///
2188 /// let mut opts = Options::default();
2189 /// opts.set_write_buffer_size(128 * 1024 * 1024);
2190 /// ```
2191 pub fn set_write_buffer_size(&mut self, size: usize) {
2192 unsafe {
2193 ffi::rocksdb_options_set_write_buffer_size(self.inner, size);
2194 }
2195 }
2196
2197 /// Amount of data to build up in memtables across all column
2198 /// families before writing to disk.
2199 ///
2200 /// This is distinct from write_buffer_size, which enforces a limit
2201 /// for a single memtable.
2202 ///
2203 /// This feature is disabled by default. Specify a non-zero value
2204 /// to enable it.
2205 ///
2206 /// Default: 0 (disabled)
2207 ///
2208 /// # Examples
2209 ///
2210 /// ```
2211 /// use rust_rocksdb::Options;
2212 ///
2213 /// let mut opts = Options::default();
2214 /// opts.set_db_write_buffer_size(128 * 1024 * 1024);
2215 /// ```
2216 pub fn set_db_write_buffer_size(&mut self, size: usize) {
2217 unsafe {
2218 ffi::rocksdb_options_set_db_write_buffer_size(self.inner, size);
2219 }
2220 }
2221
2222 /// Control maximum total data size for a level.
2223 /// max_bytes_for_level_base is the max total for level-1.
2224 /// Maximum number of bytes for level L can be calculated as
2225 /// (max_bytes_for_level_base) * (max_bytes_for_level_multiplier ^ (L-1))
2226 /// For example, if max_bytes_for_level_base is 200MB, and if
2227 /// max_bytes_for_level_multiplier is 10, total data size for level-1
2228 /// will be 200MB, total file size for level-2 will be 2GB,
2229 /// and total file size for level-3 will be 20GB.
2230 ///
2231 /// Default: `0x10000000` (256MiB).
2232 ///
2233 /// Dynamically changeable through SetOptions() API
2234 ///
2235 /// # Examples
2236 ///
2237 /// ```
2238 /// use rust_rocksdb::Options;
2239 ///
2240 /// let mut opts = Options::default();
2241 /// opts.set_max_bytes_for_level_base(512 * 1024 * 1024);
2242 /// ```
2243 pub fn set_max_bytes_for_level_base(&mut self, size: u64) {
2244 unsafe {
2245 ffi::rocksdb_options_set_max_bytes_for_level_base(self.inner, size);
2246 }
2247 }
2248
2249 /// Default: `10`
2250 ///
2251 /// # Examples
2252 ///
2253 /// ```
2254 /// use rust_rocksdb::Options;
2255 ///
2256 /// let mut opts = Options::default();
2257 /// opts.set_max_bytes_for_level_multiplier(4.0);
2258 /// ```
2259 pub fn set_max_bytes_for_level_multiplier(&mut self, mul: f64) {
2260 unsafe {
2261 ffi::rocksdb_options_set_max_bytes_for_level_multiplier(self.inner, mul);
2262 }
2263 }
2264
2265 /// The manifest file is rolled over on reaching this limit.
2266 /// The older manifest file be deleted.
2267 /// The default value is MAX_INT so that roll-over does not take place.
2268 ///
2269 /// # Examples
2270 ///
2271 /// ```
2272 /// use rust_rocksdb::Options;
2273 ///
2274 /// let mut opts = Options::default();
2275 /// opts.set_max_manifest_file_size(20 * 1024 * 1024);
2276 /// ```
2277 pub fn set_max_manifest_file_size(&mut self, size: usize) {
2278 unsafe {
2279 ffi::rocksdb_options_set_max_manifest_file_size(self.inner, size);
2280 }
2281 }
2282
2283 /// Sets the target file size for compaction.
2284 /// target_file_size_base is per-file size for level-1.
2285 /// Target file size for level L can be calculated by
2286 /// target_file_size_base * (target_file_size_multiplier ^ (L-1))
2287 /// For example, if target_file_size_base is 2MB and
2288 /// target_file_size_multiplier is 10, then each file on level-1 will
2289 /// be 2MB, and each file on level 2 will be 20MB,
2290 /// and each file on level-3 will be 200MB.
2291 ///
2292 /// Default: `0x4000000` (64MiB)
2293 ///
2294 /// Dynamically changeable through SetOptions() API
2295 ///
2296 /// # Examples
2297 ///
2298 /// ```
2299 /// use rust_rocksdb::Options;
2300 ///
2301 /// let mut opts = Options::default();
2302 /// opts.set_target_file_size_base(128 * 1024 * 1024);
2303 /// ```
2304 pub fn set_target_file_size_base(&mut self, size: u64) {
2305 unsafe {
2306 ffi::rocksdb_options_set_target_file_size_base(self.inner, size);
2307 }
2308 }
2309
2310 /// Sets the minimum number of write buffers that will be merged together
2311 /// before writing to storage. If set to `1`, then
2312 /// all write buffers are flushed to L0 as individual files and this increases
2313 /// read amplification because a get request has to check in all of these
2314 /// files. Also, an in-memory merge may result in writing lesser
2315 /// data to storage if there are duplicate records in each of these
2316 /// individual write buffers.
2317 ///
2318 /// Default: `1`
2319 ///
2320 /// # Examples
2321 ///
2322 /// ```
2323 /// use rust_rocksdb::Options;
2324 ///
2325 /// let mut opts = Options::default();
2326 /// opts.set_min_write_buffer_number_to_merge(2);
2327 /// ```
2328 pub fn set_min_write_buffer_number_to_merge(&mut self, to_merge: c_int) {
2329 unsafe {
2330 ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, to_merge);
2331 }
2332 }
2333
2334 /// Sets the number of files to trigger level-0 compaction. A value < `0` means that
2335 /// level-0 compaction will not be triggered by number of files at all.
2336 ///
2337 /// Default: `4`
2338 ///
2339 /// Dynamically changeable through SetOptions() API
2340 ///
2341 /// # Examples
2342 ///
2343 /// ```
2344 /// use rust_rocksdb::Options;
2345 ///
2346 /// let mut opts = Options::default();
2347 /// opts.set_level_zero_file_num_compaction_trigger(8);
2348 /// ```
2349 pub fn set_level_zero_file_num_compaction_trigger(&mut self, n: c_int) {
2350 unsafe {
2351 ffi::rocksdb_options_set_level0_file_num_compaction_trigger(self.inner, n);
2352 }
2353 }
2354
2355 /// Sets the soft limit on number of level-0 files. We start slowing down writes at this
2356 /// point. A value < `0` means that no writing slowdown will be triggered by
2357 /// number of files in level-0.
2358 ///
2359 /// Default: `20`
2360 ///
2361 /// Dynamically changeable through SetOptions() API
2362 ///
2363 /// # Examples
2364 ///
2365 /// ```
2366 /// use rust_rocksdb::Options;
2367 ///
2368 /// let mut opts = Options::default();
2369 /// opts.set_level_zero_slowdown_writes_trigger(10);
2370 /// ```
2371 pub fn set_level_zero_slowdown_writes_trigger(&mut self, n: c_int) {
2372 unsafe {
2373 ffi::rocksdb_options_set_level0_slowdown_writes_trigger(self.inner, n);
2374 }
2375 }
2376
2377 /// Sets the maximum number of level-0 files. We stop writes at this point.
2378 ///
2379 /// Default: `24`
2380 ///
2381 /// Dynamically changeable through SetOptions() API
2382 ///
2383 /// # Examples
2384 ///
2385 /// ```
2386 /// use rust_rocksdb::Options;
2387 ///
2388 /// let mut opts = Options::default();
2389 /// opts.set_level_zero_stop_writes_trigger(48);
2390 /// ```
2391 pub fn set_level_zero_stop_writes_trigger(&mut self, n: c_int) {
2392 unsafe {
2393 ffi::rocksdb_options_set_level0_stop_writes_trigger(self.inner, n);
2394 }
2395 }
2396
2397 /// Sets the compaction style.
2398 ///
2399 /// Default: DBCompactionStyle::Level
2400 ///
2401 /// # Examples
2402 ///
2403 /// ```
2404 /// use rust_rocksdb::{Options, DBCompactionStyle};
2405 ///
2406 /// let mut opts = Options::default();
2407 /// opts.set_compaction_style(DBCompactionStyle::Universal);
2408 /// ```
2409 pub fn set_compaction_style(&mut self, style: DBCompactionStyle) {
2410 unsafe {
2411 ffi::rocksdb_options_set_compaction_style(self.inner, style as c_int);
2412 }
2413 }
2414
2415 /// Sets the options needed to support Universal Style compactions.
2416 pub fn set_universal_compaction_options(&mut self, uco: &UniversalCompactOptions) {
2417 unsafe {
2418 ffi::rocksdb_options_set_universal_compaction_options(self.inner, uco.inner);
2419 }
2420 }
2421
2422 /// Sets the options for FIFO compaction style.
2423 pub fn set_fifo_compaction_options(&mut self, fco: &FifoCompactOptions) {
2424 unsafe {
2425 ffi::rocksdb_options_set_fifo_compaction_options(self.inner, fco.inner);
2426 }
2427 }
2428
2429 /// Sets unordered_write to true trades higher write throughput with
2430 /// relaxing the immutability guarantee of snapshots. This violates the
2431 /// repeatability one expects from ::Get from a snapshot, as well as
2432 /// ::MultiGet and Iterator's consistent-point-in-time view property.
2433 /// If the application cannot tolerate the relaxed guarantees, it can implement
2434 /// its own mechanisms to work around that and yet benefit from the higher
2435 /// throughput. Using TransactionDB with WRITE_PREPARED write policy and
2436 /// two_write_queues=true is one way to achieve immutable snapshots despite
2437 /// unordered_write.
2438 ///
2439 /// By default, i.e., when it is false, rocksdb does not advance the sequence
2440 /// number for new snapshots unless all the writes with lower sequence numbers
2441 /// are already finished. This provides the immutability that we expect from
2442 /// snapshots. Moreover, since Iterator and MultiGet internally depend on
2443 /// snapshots, the snapshot immutability results into Iterator and MultiGet
2444 /// offering consistent-point-in-time view. If set to true, although
2445 /// Read-Your-Own-Write property is still provided, the snapshot immutability
2446 /// property is relaxed: the writes issued after the snapshot is obtained (with
2447 /// larger sequence numbers) will be still not visible to the reads from that
2448 /// snapshot, however, there still might be pending writes (with lower sequence
2449 /// number) that will change the state visible to the snapshot after they are
2450 /// landed to the memtable.
2451 ///
2452 /// Default: false
2453 pub fn set_unordered_write(&mut self, unordered: bool) {
2454 unsafe {
2455 ffi::rocksdb_options_set_unordered_write(self.inner, c_uchar::from(unordered));
2456 }
2457 }
2458
2459 /// Sets maximum number of threads that will
2460 /// concurrently perform a compaction job by breaking it into multiple,
2461 /// smaller ones that are run simultaneously.
2462 ///
2463 /// Default: 1 (i.e. no subcompactions)
2464 pub fn set_max_subcompactions(&mut self, num: u32) {
2465 unsafe {
2466 ffi::rocksdb_options_set_max_subcompactions(self.inner, num);
2467 }
2468 }
2469
2470 /// Sets maximum number of concurrent background jobs
2471 /// (compactions and flushes).
2472 ///
2473 /// Default: 2
2474 ///
2475 /// Dynamically changeable through SetDBOptions() API.
2476 pub fn set_max_background_jobs(&mut self, jobs: c_int) {
2477 unsafe {
2478 ffi::rocksdb_options_set_max_background_jobs(self.inner, jobs);
2479 }
2480 }
2481
2482 /// Sets the maximum number of concurrent background compaction jobs, submitted to
2483 /// the default LOW priority thread pool.
2484 /// We first try to schedule compactions based on
2485 /// `base_background_compactions`. If the compaction cannot catch up , we
2486 /// will increase number of compaction threads up to
2487 /// `max_background_compactions`.
2488 ///
2489 /// If you're increasing this, also consider increasing number of threads in
2490 /// LOW priority thread pool. For more information, see
2491 /// Env::SetBackgroundThreads
2492 ///
2493 /// Default: `1`
2494 ///
2495 /// # Examples
2496 ///
2497 /// ```
2498 /// use rust_rocksdb::Options;
2499 ///
2500 /// let mut opts = Options::default();
2501 /// #[allow(deprecated)]
2502 /// opts.set_max_background_compactions(2);
2503 /// ```
2504 #[deprecated(
2505 since = "0.15.0",
2506 note = "RocksDB automatically decides this based on the value of max_background_jobs"
2507 )]
2508 pub fn set_max_background_compactions(&mut self, n: c_int) {
2509 unsafe {
2510 ffi::rocksdb_options_set_max_background_compactions(self.inner, n);
2511 }
2512 }
2513
2514 /// Sets the maximum number of concurrent background memtable flush jobs, submitted to
2515 /// the HIGH priority thread pool.
2516 ///
2517 /// By default, all background jobs (major compaction and memtable flush) go
2518 /// to the LOW priority pool. If this option is set to a positive number,
2519 /// memtable flush jobs will be submitted to the HIGH priority pool.
2520 /// It is important when the same Env is shared by multiple db instances.
2521 /// Without a separate pool, long running major compaction jobs could
2522 /// potentially block memtable flush jobs of other db instances, leading to
2523 /// unnecessary Put stalls.
2524 ///
2525 /// If you're increasing this, also consider increasing number of threads in
2526 /// HIGH priority thread pool. For more information, see
2527 /// Env::SetBackgroundThreads
2528 ///
2529 /// Default: `1`
2530 ///
2531 /// # Examples
2532 ///
2533 /// ```
2534 /// use rust_rocksdb::Options;
2535 ///
2536 /// let mut opts = Options::default();
2537 /// #[allow(deprecated)]
2538 /// opts.set_max_background_flushes(2);
2539 /// ```
2540 #[deprecated(
2541 since = "0.15.0",
2542 note = "RocksDB automatically decides this based on the value of max_background_jobs"
2543 )]
2544 pub fn set_max_background_flushes(&mut self, n: c_int) {
2545 unsafe {
2546 ffi::rocksdb_options_set_max_background_flushes(self.inner, n);
2547 }
2548 }
2549
2550 /// Disables automatic compactions. Manual compactions can still
2551 /// be issued on this column family
2552 ///
2553 /// Default: `false`
2554 ///
2555 /// Dynamically changeable through SetOptions() API
2556 ///
2557 /// # Examples
2558 ///
2559 /// ```
2560 /// use rust_rocksdb::Options;
2561 ///
2562 /// let mut opts = Options::default();
2563 /// opts.set_disable_auto_compactions(true);
2564 /// ```
2565 pub fn set_disable_auto_compactions(&mut self, disable: bool) {
2566 unsafe {
2567 ffi::rocksdb_options_set_disable_auto_compactions(self.inner, c_int::from(disable));
2568 }
2569 }
2570
2571 /// SetMemtableHugePageSize sets the page size for huge page for
2572 /// arena used by the memtable.
2573 /// If <=0, it won't allocate from huge page but from malloc.
2574 /// Users are responsible to reserve huge pages for it to be allocated. For
2575 /// example:
2576 /// sysctl -w vm.nr_hugepages=20
2577 /// See linux doc Documentation/vm/hugetlbpage.txt
2578 /// If there isn't enough free huge page available, it will fall back to
2579 /// malloc.
2580 ///
2581 /// Dynamically changeable through SetOptions() API
2582 pub fn set_memtable_huge_page_size(&mut self, size: size_t) {
2583 unsafe {
2584 ffi::rocksdb_options_set_memtable_huge_page_size(self.inner, size);
2585 }
2586 }
2587
2588 /// Sets the maximum number of successive merge operations on a key in the memtable.
2589 ///
2590 /// When a merge operation is added to the memtable and the maximum number of
2591 /// successive merges is reached, the value of the key will be calculated and
2592 /// inserted into the memtable instead of the merge operation. This will
2593 /// ensure that there are never more than max_successive_merges merge
2594 /// operations in the memtable.
2595 ///
2596 /// Default: 0 (disabled)
2597 pub fn set_max_successive_merges(&mut self, num: usize) {
2598 unsafe {
2599 ffi::rocksdb_options_set_max_successive_merges(self.inner, num);
2600 }
2601 }
2602
2603 /// Control locality of bloom filter probes to improve cache miss rate.
2604 /// This option only applies to memtable prefix bloom and plaintable
2605 /// prefix bloom. It essentially limits the max number of cache lines each
2606 /// bloom filter check can touch.
2607 ///
2608 /// This optimization is turned off when set to 0. The number should never
2609 /// be greater than number of probes. This option can boost performance
2610 /// for in-memory workload but should use with care since it can cause
2611 /// higher false positive rate.
2612 ///
2613 /// Default: 0
2614 pub fn set_bloom_locality(&mut self, v: u32) {
2615 unsafe {
2616 ffi::rocksdb_options_set_bloom_locality(self.inner, v);
2617 }
2618 }
2619
2620 /// Enable/disable thread-safe inplace updates.
2621 ///
2622 /// Requires updates if
2623 /// * key exists in current memtable
2624 /// * new sizeof(new_value) <= sizeof(old_value)
2625 /// * old_value for that key is a put i.e. kTypeValue
2626 ///
2627 /// Default: false.
2628 pub fn set_inplace_update_support(&mut self, enabled: bool) {
2629 unsafe {
2630 ffi::rocksdb_options_set_inplace_update_support(self.inner, c_uchar::from(enabled));
2631 }
2632 }
2633
2634 /// Sets the number of locks used for inplace update.
2635 ///
2636 /// Default: 10000 when inplace_update_support = true, otherwise 0.
2637 pub fn set_inplace_update_locks(&mut self, num: usize) {
2638 unsafe {
2639 ffi::rocksdb_options_set_inplace_update_num_locks(self.inner, num);
2640 }
2641 }
2642
2643 /// Different max-size multipliers for different levels.
2644 /// These are multiplied by max_bytes_for_level_multiplier to arrive
2645 /// at the max-size of each level.
2646 ///
2647 /// Default: 1
2648 ///
2649 /// Dynamically changeable through SetOptions() API
2650 pub fn set_max_bytes_for_level_multiplier_additional(&mut self, level_values: &[i32]) {
2651 let count = level_values.len();
2652 unsafe {
2653 ffi::rocksdb_options_set_max_bytes_for_level_multiplier_additional(
2654 self.inner,
2655 level_values.as_ptr().cast_mut(),
2656 count,
2657 );
2658 }
2659 }
2660
2661 /// If true, then DB::Open() will not fetch and check sizes of all sst files.
2662 /// This may significantly speed up startup if there are many sst files,
2663 /// especially when using non-default Env with expensive GetFileSize().
2664 /// We'll still check that all required sst files exist.
2665 /// If paranoid_checks is false, this option is ignored, and sst files are
2666 /// not checked at all.
2667 ///
2668 /// Default: false
2669 #[deprecated(note = "RocksDB >= 10.5: option is ignored: checking done with a thread pool")]
2670 pub fn set_skip_checking_sst_file_sizes_on_db_open(&mut self, value: bool) {
2671 unsafe {
2672 ffi::rocksdb_options_set_skip_checking_sst_file_sizes_on_db_open(
2673 self.inner,
2674 c_uchar::from(value),
2675 );
2676 }
2677 }
2678
2679 /// The total maximum size(bytes) of write buffers to maintain in memory
2680 /// including copies of buffers that have already been flushed. This parameter
2681 /// only affects trimming of flushed buffers and does not affect flushing.
2682 /// This controls the maximum amount of write history that will be available
2683 /// in memory for conflict checking when Transactions are used. The actual
2684 /// size of write history (flushed Memtables) might be higher than this limit
2685 /// if further trimming will reduce write history total size below this
2686 /// limit. For example, if max_write_buffer_size_to_maintain is set to 64MB,
2687 /// and there are three flushed Memtables, with sizes of 32MB, 20MB, 20MB.
2688 /// Because trimming the next Memtable of size 20MB will reduce total memory
2689 /// usage to 52MB which is below the limit, RocksDB will stop trimming.
2690 ///
2691 /// When using an OptimisticTransactionDB:
2692 /// If this value is too low, some transactions may fail at commit time due
2693 /// to not being able to determine whether there were any write conflicts.
2694 ///
2695 /// When using a TransactionDB:
2696 /// If Transaction::SetSnapshot is used, TransactionDB will read either
2697 /// in-memory write buffers or SST files to do write-conflict checking.
2698 /// Increasing this value can reduce the number of reads to SST files
2699 /// done for conflict detection.
2700 ///
2701 /// Setting this value to 0 will cause write buffers to be freed immediately
2702 /// after they are flushed. If this value is set to -1,
2703 /// 'max_write_buffer_number * write_buffer_size' will be used.
2704 ///
2705 /// Default:
2706 /// If using a TransactionDB/OptimisticTransactionDB, the default value will
2707 /// be set to the value of 'max_write_buffer_number * write_buffer_size'
2708 /// if it is not explicitly set by the user. Otherwise, the default is 0.
2709 pub fn set_max_write_buffer_size_to_maintain(&mut self, size: i64) {
2710 unsafe {
2711 ffi::rocksdb_options_set_max_write_buffer_size_to_maintain(self.inner, size);
2712 }
2713 }
2714
2715 /// By default, a single write thread queue is maintained. The thread gets
2716 /// to the head of the queue becomes write batch group leader and responsible
2717 /// for writing to WAL and memtable for the batch group.
2718 ///
2719 /// If enable_pipelined_write is true, separate write thread queue is
2720 /// maintained for WAL write and memtable write. A write thread first enter WAL
2721 /// writer queue and then memtable writer queue. Pending thread on the WAL
2722 /// writer queue thus only have to wait for previous writers to finish their
2723 /// WAL writing but not the memtable writing. Enabling the feature may improve
2724 /// write throughput and reduce latency of the prepare phase of two-phase
2725 /// commit.
2726 ///
2727 /// Default: false
2728 pub fn set_enable_pipelined_write(&mut self, value: bool) {
2729 unsafe {
2730 ffi::rocksdb_options_set_enable_pipelined_write(self.inner, c_uchar::from(value));
2731 }
2732 }
2733
2734 /// Defines the underlying memtable implementation.
2735 /// See official [wiki](https://github.com/facebook/rocksdb/wiki/MemTable) for more information.
2736 /// Defaults to using a skiplist.
2737 ///
2738 /// # Examples
2739 ///
2740 /// ```
2741 /// use rust_rocksdb::{Options, MemtableFactory};
2742 /// let mut opts = Options::default();
2743 /// let factory = MemtableFactory::HashSkipList {
2744 /// bucket_count: 1_000_000,
2745 /// height: 4,
2746 /// branching_factor: 4,
2747 /// };
2748 ///
2749 /// opts.set_allow_concurrent_memtable_write(false);
2750 /// opts.set_memtable_factory(factory);
2751 /// ```
2752 pub fn set_memtable_factory(&mut self, factory: MemtableFactory) {
2753 match factory {
2754 MemtableFactory::Vector => unsafe {
2755 ffi::rocksdb_options_set_memtable_vector_rep(self.inner);
2756 },
2757 MemtableFactory::HashSkipList {
2758 bucket_count,
2759 height,
2760 branching_factor,
2761 } => unsafe {
2762 ffi::rocksdb_options_set_hash_skip_list_rep(
2763 self.inner,
2764 bucket_count,
2765 height,
2766 branching_factor,
2767 );
2768 },
2769 MemtableFactory::HashLinkList { bucket_count } => unsafe {
2770 ffi::rocksdb_options_set_hash_link_list_rep(self.inner, bucket_count);
2771 },
2772 }
2773 }
2774
2775 pub fn set_block_based_table_factory(&mut self, factory: &BlockBasedOptions) {
2776 unsafe {
2777 ffi::rocksdb_options_set_block_based_table_factory(self.inner, factory.inner);
2778 }
2779 self.outlive.block_based = Some(factory.outlive.clone());
2780 }
2781
2782 /// Sets the table factory to a CuckooTableFactory (the default table
2783 /// factory is a block-based table factory that provides a default
2784 /// implementation of TableBuilder and TableReader with default
2785 /// BlockBasedTableOptions).
2786 /// See official [wiki](https://github.com/facebook/rocksdb/wiki/CuckooTable-Format) for more information on this table format.
2787 /// # Examples
2788 ///
2789 /// ```
2790 /// use rust_rocksdb::{Options, CuckooTableOptions};
2791 ///
2792 /// let mut opts = Options::default();
2793 /// let mut factory_opts = CuckooTableOptions::default();
2794 /// factory_opts.set_hash_ratio(0.8);
2795 /// factory_opts.set_max_search_depth(20);
2796 /// factory_opts.set_cuckoo_block_size(10);
2797 /// factory_opts.set_identity_as_first_hash(true);
2798 /// factory_opts.set_use_module_hash(false);
2799 ///
2800 /// opts.set_cuckoo_table_factory(&factory_opts);
2801 /// ```
2802 pub fn set_cuckoo_table_factory(&mut self, factory: &CuckooTableOptions) {
2803 unsafe {
2804 ffi::rocksdb_options_set_cuckoo_table_factory(self.inner, factory.inner);
2805 }
2806 }
2807
2808 // This is a factory that provides TableFactory objects.
2809 // Default: a block-based table factory that provides a default
2810 // implementation of TableBuilder and TableReader with default
2811 // BlockBasedTableOptions.
2812 /// Sets the factory as plain table.
2813 /// See official [wiki](https://github.com/facebook/rocksdb/wiki/PlainTable-Format) for more
2814 /// information.
2815 ///
2816 /// # Examples
2817 ///
2818 /// ```
2819 /// use rust_rocksdb::{KeyEncodingType, Options, PlainTableFactoryOptions};
2820 ///
2821 /// let mut opts = Options::default();
2822 /// let factory_opts = PlainTableFactoryOptions {
2823 /// user_key_length: 0,
2824 /// bloom_bits_per_key: 20,
2825 /// hash_table_ratio: 0.75,
2826 /// index_sparseness: 16,
2827 /// huge_page_tlb_size: 0,
2828 /// encoding_type: KeyEncodingType::Plain,
2829 /// full_scan_mode: false,
2830 /// store_index_in_file: false,
2831 /// };
2832 ///
2833 /// opts.set_plain_table_factory(&factory_opts);
2834 /// ```
2835 pub fn set_plain_table_factory(&mut self, options: &PlainTableFactoryOptions) {
2836 unsafe {
2837 ffi::rocksdb_options_set_plain_table_factory(
2838 self.inner,
2839 options.user_key_length,
2840 options.bloom_bits_per_key,
2841 options.hash_table_ratio,
2842 options.index_sparseness,
2843 options.huge_page_tlb_size,
2844 options.encoding_type as c_char,
2845 c_uchar::from(options.full_scan_mode),
2846 c_uchar::from(options.store_index_in_file),
2847 );
2848 }
2849 }
2850
2851 /// Sets the start level to use compression.
2852 pub fn set_min_level_to_compress(&mut self, lvl: c_int) {
2853 unsafe {
2854 ffi::rocksdb_options_set_min_level_to_compress(self.inner, lvl);
2855 }
2856 }
2857
2858 /// Measure IO stats in compactions and flushes, if `true`.
2859 ///
2860 /// Default: `false`
2861 ///
2862 /// # Examples
2863 ///
2864 /// ```
2865 /// use rust_rocksdb::Options;
2866 ///
2867 /// let mut opts = Options::default();
2868 /// opts.set_report_bg_io_stats(true);
2869 /// ```
2870 pub fn set_report_bg_io_stats(&mut self, enable: bool) {
2871 unsafe {
2872 ffi::rocksdb_options_set_report_bg_io_stats(self.inner, c_int::from(enable));
2873 }
2874 }
2875
2876 /// Once write-ahead logs exceed this size, we will start forcing the flush of
2877 /// column families whose memtables are backed by the oldest live WAL file
2878 /// (i.e. the ones that are causing all the space amplification).
2879 ///
2880 /// Default: `0`
2881 ///
2882 /// # Examples
2883 ///
2884 /// ```
2885 /// use rust_rocksdb::Options;
2886 ///
2887 /// let mut opts = Options::default();
2888 /// // Set max total wal size to 1G.
2889 /// opts.set_max_total_wal_size(1 << 30);
2890 /// ```
2891 pub fn set_max_total_wal_size(&mut self, size: u64) {
2892 unsafe {
2893 ffi::rocksdb_options_set_max_total_wal_size(self.inner, size);
2894 }
2895 }
2896
2897 /// Recovery mode to control the consistency while replaying WAL.
2898 ///
2899 /// Default: DBRecoveryMode::PointInTime
2900 ///
2901 /// # Examples
2902 ///
2903 /// ```
2904 /// use rust_rocksdb::{Options, DBRecoveryMode};
2905 ///
2906 /// let mut opts = Options::default();
2907 /// opts.set_wal_recovery_mode(DBRecoveryMode::AbsoluteConsistency);
2908 /// ```
2909 pub fn set_wal_recovery_mode(&mut self, mode: DBRecoveryMode) {
2910 unsafe {
2911 ffi::rocksdb_options_set_wal_recovery_mode(self.inner, mode as c_int);
2912 }
2913 }
2914
2915 pub fn enable_statistics(&mut self) {
2916 unsafe {
2917 ffi::rocksdb_options_enable_statistics(self.inner);
2918 }
2919 }
2920
2921 pub fn get_statistics(&self) -> Option<String> {
2922 unsafe {
2923 let value = ffi::rocksdb_options_statistics_get_string(self.inner);
2924 if value.is_null() {
2925 return None;
2926 }
2927
2928 // Must have valid UTF-8 format.
2929 let s = CStr::from_ptr(value).to_str().unwrap().to_owned();
2930 ffi::rocksdb_free(value as *mut c_void);
2931 Some(s)
2932 }
2933 }
2934
2935 /// StatsLevel can be used to reduce statistics overhead by skipping certain
2936 /// types of stats in the stats collection process.
2937 pub fn set_statistics_level(&self, level: StatsLevel) {
2938 unsafe { ffi::rocksdb_options_set_statistics_level(self.inner, level as c_int) }
2939 }
2940
2941 /// Returns the value of cumulative db counters if stat collection is enabled.
2942 pub fn get_ticker_count(&self, ticker: Ticker) -> u64 {
2943 unsafe { ffi::rocksdb_options_statistics_get_ticker_count(self.inner, ticker as u32) }
2944 }
2945
2946 /// Gets Histogram data from collected db stats. Requires stats to be enabled.
2947 pub fn get_histogram_data(&self, histogram: Histogram) -> HistogramData {
2948 unsafe {
2949 let data = HistogramData::default();
2950 ffi::rocksdb_options_statistics_get_histogram_data(
2951 self.inner,
2952 histogram as u32,
2953 data.inner,
2954 );
2955 data
2956 }
2957 }
2958
2959 /// If not zero, dump `rocksdb.stats` to LOG every `stats_dump_period_sec`.
2960 ///
2961 /// Default: `600` (10 mins)
2962 ///
2963 /// # Examples
2964 ///
2965 /// ```
2966 /// use rust_rocksdb::Options;
2967 ///
2968 /// let mut opts = Options::default();
2969 /// opts.set_stats_dump_period_sec(300);
2970 /// ```
2971 pub fn set_stats_dump_period_sec(&mut self, period: c_uint) {
2972 unsafe {
2973 ffi::rocksdb_options_set_stats_dump_period_sec(self.inner, period);
2974 }
2975 }
2976
2977 /// If not zero, dump rocksdb.stats to RocksDB to LOG every `stats_persist_period_sec`.
2978 ///
2979 /// Default: `600` (10 mins)
2980 ///
2981 /// # Examples
2982 ///
2983 /// ```
2984 /// use rust_rocksdb::Options;
2985 ///
2986 /// let mut opts = Options::default();
2987 /// opts.set_stats_persist_period_sec(5);
2988 /// ```
2989 pub fn set_stats_persist_period_sec(&mut self, period: c_uint) {
2990 unsafe {
2991 ffi::rocksdb_options_set_stats_persist_period_sec(self.inner, period);
2992 }
2993 }
2994
2995 /// When set to true, reading SST files will opt out of the filesystem's
2996 /// readahead. Setting this to false may improve sequential iteration
2997 /// performance.
2998 ///
2999 /// Default: `true`
3000 pub fn set_advise_random_on_open(&mut self, advise: bool) {
3001 unsafe {
3002 ffi::rocksdb_options_set_advise_random_on_open(self.inner, c_uchar::from(advise));
3003 }
3004 }
3005
3006 /// Enable/disable adaptive mutex, which spins in the user space before resorting to kernel.
3007 ///
3008 /// This could reduce context switch when the mutex is not
3009 /// heavily contended. However, if the mutex is hot, we could end up
3010 /// wasting spin time.
3011 ///
3012 /// Default: false
3013 pub fn set_use_adaptive_mutex(&mut self, enabled: bool) {
3014 unsafe {
3015 ffi::rocksdb_options_set_use_adaptive_mutex(self.inner, c_uchar::from(enabled));
3016 }
3017 }
3018
3019 /// Sets the number of levels for this database.
3020 pub fn set_num_levels(&mut self, n: c_int) {
3021 unsafe {
3022 ffi::rocksdb_options_set_num_levels(self.inner, n);
3023 }
3024 }
3025
3026 /// When a `prefix_extractor` is defined through `opts.set_prefix_extractor` this
3027 /// creates a prefix bloom filter for each memtable with the size of
3028 /// `write_buffer_size * memtable_prefix_bloom_ratio` (capped at 0.25).
3029 ///
3030 /// Default: `0`
3031 ///
3032 /// # Examples
3033 ///
3034 /// ```
3035 /// use rust_rocksdb::{Options, SliceTransform};
3036 ///
3037 /// let mut opts = Options::default();
3038 /// let transform = SliceTransform::create_fixed_prefix(10);
3039 /// opts.set_prefix_extractor(transform);
3040 /// opts.set_memtable_prefix_bloom_ratio(0.2);
3041 /// ```
3042 pub fn set_memtable_prefix_bloom_ratio(&mut self, ratio: f64) {
3043 unsafe {
3044 ffi::rocksdb_options_set_memtable_prefix_bloom_size_ratio(self.inner, ratio);
3045 }
3046 }
3047
3048 /// Sets the maximum number of bytes in all compacted files.
3049 /// We try to limit number of bytes in one compaction to be lower than this
3050 /// threshold. But it's not guaranteed.
3051 ///
3052 /// Value 0 will be sanitized.
3053 ///
3054 /// Default: target_file_size_base * 25
3055 pub fn set_max_compaction_bytes(&mut self, nbytes: u64) {
3056 unsafe {
3057 ffi::rocksdb_options_set_max_compaction_bytes(self.inner, nbytes);
3058 }
3059 }
3060
3061 /// Specifies the absolute path of the directory the
3062 /// write-ahead log (WAL) should be written to.
3063 ///
3064 /// Default: same directory as the database
3065 ///
3066 /// # Examples
3067 ///
3068 /// ```
3069 /// use rust_rocksdb::Options;
3070 ///
3071 /// let mut opts = Options::default();
3072 /// opts.set_wal_dir("/path/to/dir");
3073 /// ```
3074 pub fn set_wal_dir<P: AsRef<Path>>(&mut self, path: P) {
3075 let p = to_cpath(path).unwrap();
3076 unsafe {
3077 ffi::rocksdb_options_set_wal_dir(self.inner, p.as_ptr());
3078 }
3079 }
3080
3081 /// Sets the WAL ttl in seconds.
3082 ///
3083 /// The following two options affect how archived logs will be deleted.
3084 /// 1. If both set to 0, logs will be deleted asap and will not get into
3085 /// the archive.
3086 /// 2. If wal_ttl_seconds is 0 and wal_size_limit_mb is not 0,
3087 /// WAL files will be checked every 10 min and if total size is greater
3088 /// then wal_size_limit_mb, they will be deleted starting with the
3089 /// earliest until size_limit is met. All empty files will be deleted.
3090 /// 3. If wal_ttl_seconds is not 0 and wall_size_limit_mb is 0, then
3091 /// WAL files will be checked every wal_ttl_seconds / 2 and those that
3092 /// are older than wal_ttl_seconds will be deleted.
3093 /// 4. If both are not 0, WAL files will be checked every 10 min and both
3094 /// checks will be performed with ttl being first.
3095 ///
3096 /// Default: 0
3097 pub fn set_wal_ttl_seconds(&mut self, secs: u64) {
3098 unsafe {
3099 ffi::rocksdb_options_set_WAL_ttl_seconds(self.inner, secs);
3100 }
3101 }
3102
3103 /// Sets the WAL size limit in MB.
3104 ///
3105 /// If total size of WAL files is greater then wal_size_limit_mb,
3106 /// they will be deleted starting with the earliest until size_limit is met.
3107 ///
3108 /// Default: 0
3109 pub fn set_wal_size_limit_mb(&mut self, size: u64) {
3110 unsafe {
3111 ffi::rocksdb_options_set_WAL_size_limit_MB(self.inner, size);
3112 }
3113 }
3114
3115 /// Sets the number of bytes to preallocate (via fallocate) the manifest files.
3116 ///
3117 /// Default is 4MB, which is reasonable to reduce random IO
3118 /// as well as prevent overallocation for mounts that preallocate
3119 /// large amounts of data (such as xfs's allocsize option).
3120 pub fn set_manifest_preallocation_size(&mut self, size: usize) {
3121 unsafe {
3122 ffi::rocksdb_options_set_manifest_preallocation_size(self.inner, size);
3123 }
3124 }
3125
3126 /// If true, then DB::Open() will not update the statistics used to optimize
3127 /// compaction decision by loading table properties from many files.
3128 /// Turning off this feature will improve DBOpen time especially in disk environment.
3129 ///
3130 /// Default: false
3131 pub fn set_skip_stats_update_on_db_open(&mut self, skip: bool) {
3132 unsafe {
3133 ffi::rocksdb_options_set_skip_stats_update_on_db_open(self.inner, c_uchar::from(skip));
3134 }
3135 }
3136
3137 /// Specify the maximal number of info log files to be kept.
3138 ///
3139 /// Default: 1000
3140 ///
3141 /// # Examples
3142 ///
3143 /// ```
3144 /// use rust_rocksdb::Options;
3145 ///
3146 /// let mut options = Options::default();
3147 /// options.set_keep_log_file_num(100);
3148 /// ```
3149 pub fn set_keep_log_file_num(&mut self, nfiles: usize) {
3150 unsafe {
3151 ffi::rocksdb_options_set_keep_log_file_num(self.inner, nfiles);
3152 }
3153 }
3154
3155 /// Allow the OS to mmap file for writing.
3156 ///
3157 /// Default: false
3158 ///
3159 /// # Examples
3160 ///
3161 /// ```
3162 /// use rust_rocksdb::Options;
3163 ///
3164 /// let mut options = Options::default();
3165 /// options.set_allow_mmap_writes(true);
3166 /// ```
3167 pub fn set_allow_mmap_writes(&mut self, is_enabled: bool) {
3168 unsafe {
3169 ffi::rocksdb_options_set_allow_mmap_writes(self.inner, c_uchar::from(is_enabled));
3170 }
3171 }
3172
3173 /// Allow the OS to mmap file for reading sst tables.
3174 ///
3175 /// Default: false
3176 ///
3177 /// # Examples
3178 ///
3179 /// ```
3180 /// use rust_rocksdb::Options;
3181 ///
3182 /// let mut options = Options::default();
3183 /// options.set_allow_mmap_reads(true);
3184 /// ```
3185 pub fn set_allow_mmap_reads(&mut self, is_enabled: bool) {
3186 unsafe {
3187 ffi::rocksdb_options_set_allow_mmap_reads(self.inner, c_uchar::from(is_enabled));
3188 }
3189 }
3190
3191 /// If enabled, WAL is not flushed automatically after each write. Instead it
3192 /// relies on manual invocation of `DB::flush_wal()` to write the WAL buffer
3193 /// to its file.
3194 ///
3195 /// Default: false
3196 ///
3197 /// # Examples
3198 ///
3199 /// ```
3200 /// use rust_rocksdb::Options;
3201 ///
3202 /// let mut options = Options::default();
3203 /// options.set_manual_wal_flush(true);
3204 /// ```
3205 pub fn set_manual_wal_flush(&mut self, is_enabled: bool) {
3206 unsafe {
3207 ffi::rocksdb_options_set_manual_wal_flush(self.inner, c_uchar::from(is_enabled));
3208 }
3209 }
3210
3211 /// Guarantee that all column families are flushed together atomically.
3212 /// This option applies to both manual flushes (`db.flush()`) and automatic
3213 /// background flushes caused when memtables are filled.
3214 ///
3215 /// Note that this is only useful when the WAL is disabled. When using the
3216 /// WAL, writes are always consistent across column families.
3217 ///
3218 /// Default: false
3219 ///
3220 /// # Examples
3221 ///
3222 /// ```
3223 /// use rust_rocksdb::Options;
3224 ///
3225 /// let mut options = Options::default();
3226 /// options.set_atomic_flush(true);
3227 /// ```
3228 pub fn set_atomic_flush(&mut self, atomic_flush: bool) {
3229 unsafe {
3230 ffi::rocksdb_options_set_atomic_flush(self.inner, c_uchar::from(atomic_flush));
3231 }
3232 }
3233
3234 /// Sets global cache for table-level rows.
3235 ///
3236 /// Default: null (disabled)
3237 /// Not supported in ROCKSDB_LITE mode!
3238 pub fn set_row_cache(&mut self, cache: &Cache) {
3239 unsafe {
3240 ffi::rocksdb_options_set_row_cache(self.inner, cache.0.inner.as_ptr());
3241 }
3242 self.outlive.row_cache = Some(cache.clone());
3243 }
3244
3245 /// Use to control write rate of flush and compaction. Flush has higher
3246 /// priority than compaction.
3247 /// If rate limiter is enabled, bytes_per_sync is set to 1MB by default.
3248 ///
3249 /// Default: disable
3250 ///
3251 /// # Examples
3252 ///
3253 /// ```
3254 /// use rust_rocksdb::Options;
3255 ///
3256 /// let mut options = Options::default();
3257 /// options.set_ratelimiter(1024 * 1024, 100 * 1000, 10);
3258 /// ```
3259 pub fn set_ratelimiter(
3260 &mut self,
3261 rate_bytes_per_sec: i64,
3262 refill_period_us: i64,
3263 fairness: i32,
3264 ) {
3265 unsafe {
3266 let ratelimiter =
3267 ffi::rocksdb_ratelimiter_create(rate_bytes_per_sec, refill_period_us, fairness);
3268 ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
3269 ffi::rocksdb_ratelimiter_destroy(ratelimiter);
3270 }
3271 }
3272
3273 /// Use to control write rate of flush and compaction. Flush has higher
3274 /// priority than compaction.
3275 /// If rate limiter is enabled, bytes_per_sync is set to 1MB by default.
3276 ///
3277 /// Default: disable
3278 pub fn set_auto_tuned_ratelimiter(
3279 &mut self,
3280 rate_bytes_per_sec: i64,
3281 refill_period_us: i64,
3282 fairness: i32,
3283 ) {
3284 unsafe {
3285 let ratelimiter = ffi::rocksdb_ratelimiter_create_auto_tuned(
3286 rate_bytes_per_sec,
3287 refill_period_us,
3288 fairness,
3289 );
3290 ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
3291 ffi::rocksdb_ratelimiter_destroy(ratelimiter);
3292 }
3293 }
3294
3295 /// Create a RateLimiter object, which can be shared among RocksDB instances to
3296 /// control write rate of flush and compaction.
3297 ///
3298 /// rate_bytes_per_sec: this is the only parameter you want to set most of the
3299 /// time. It controls the total write rate of compaction and flush in bytes per
3300 /// second. Currently, RocksDB does not enforce rate limit for anything other
3301 /// than flush and compaction, e.g. write to WAL.
3302 ///
3303 /// refill_period_us: this controls how often tokens are refilled. For example,
3304 /// when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to
3305 /// 100ms, then 1MB is refilled every 100ms internally. Larger value can lead to
3306 /// burstier writes while smaller value introduces more CPU overhead.
3307 /// The default should work for most cases.
3308 ///
3309 /// fairness: RateLimiter accepts high-pri requests and low-pri requests.
3310 /// A low-pri request is usually blocked in favor of hi-pri request. Currently,
3311 /// RocksDB assigns low-pri to request from compaction and high-pri to request
3312 /// from flush. Low-pri requests can get blocked if flush requests come in
3313 /// continuously. This fairness parameter grants low-pri requests permission by
3314 /// 1/fairness chance even though high-pri requests exist to avoid starvation.
3315 /// You should be good by leaving it at default 10.
3316 ///
3317 /// mode: Mode indicates which types of operations count against the limit.
3318 ///
3319 /// auto_tuned: Enables dynamic adjustment of rate limit within the range
3320 /// `[rate_bytes_per_sec / 20, rate_bytes_per_sec]`, according to
3321 /// the recent demand for background I/O.
3322 pub fn set_ratelimiter_with_mode(
3323 &mut self,
3324 rate_bytes_per_sec: i64,
3325 refill_period_us: i64,
3326 fairness: i32,
3327 mode: RateLimiterMode,
3328 auto_tuned: bool,
3329 ) {
3330 unsafe {
3331 let ratelimiter = ffi::rocksdb_ratelimiter_create_with_mode(
3332 rate_bytes_per_sec,
3333 refill_period_us,
3334 fairness,
3335 mode as c_int,
3336 auto_tuned,
3337 );
3338 ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
3339 ffi::rocksdb_ratelimiter_destroy(ratelimiter);
3340 }
3341 }
3342
3343 /// Sets the maximal size of the info log file.
3344 ///
3345 /// If the log file is larger than `max_log_file_size`, a new info log file
3346 /// will be created. If `max_log_file_size` is equal to zero, all logs will
3347 /// be written to one log file.
3348 ///
3349 /// Default: 0
3350 ///
3351 /// # Examples
3352 ///
3353 /// ```
3354 /// use rust_rocksdb::Options;
3355 ///
3356 /// let mut options = Options::default();
3357 /// options.set_max_log_file_size(0);
3358 /// ```
3359 pub fn set_max_log_file_size(&mut self, size: usize) {
3360 unsafe {
3361 ffi::rocksdb_options_set_max_log_file_size(self.inner, size);
3362 }
3363 }
3364
3365 /// Sets the time for the info log file to roll (in seconds).
3366 ///
3367 /// If specified with non-zero value, log file will be rolled
3368 /// if it has been active longer than `log_file_time_to_roll`.
3369 /// Default: 0 (disabled)
3370 pub fn set_log_file_time_to_roll(&mut self, secs: usize) {
3371 unsafe {
3372 ffi::rocksdb_options_set_log_file_time_to_roll(self.inner, secs);
3373 }
3374 }
3375
3376 /// Controls the recycling of log files.
3377 ///
3378 /// If non-zero, previously written log files will be reused for new logs,
3379 /// overwriting the old data. The value indicates how many such files we will
3380 /// keep around at any point in time for later use. This is more efficient
3381 /// because the blocks are already allocated and fdatasync does not need to
3382 /// update the inode after each write.
3383 ///
3384 /// Default: 0
3385 ///
3386 /// # Examples
3387 ///
3388 /// ```
3389 /// use rust_rocksdb::Options;
3390 ///
3391 /// let mut options = Options::default();
3392 /// options.set_recycle_log_file_num(5);
3393 /// ```
3394 pub fn set_recycle_log_file_num(&mut self, num: usize) {
3395 unsafe {
3396 ffi::rocksdb_options_set_recycle_log_file_num(self.inner, num);
3397 }
3398 }
3399
3400 /// Prints logs to stderr for faster debugging
3401 /// See official [wiki](https://github.com/facebook/rocksdb/wiki/Logger) for more information.
3402 pub fn set_stderr_logger(&mut self, log_level: LogLevel, prefix: impl CStrLike) {
3403 let p = prefix.into_c_string().unwrap();
3404
3405 unsafe {
3406 let logger = ffi::rocksdb_logger_create_stderr_logger(log_level as c_int, p.as_ptr());
3407 ffi::rocksdb_options_set_info_log(self.inner, logger);
3408 ffi::rocksdb_logger_destroy(logger);
3409 }
3410 }
3411
3412 /// Invokes `callback` with RocksDB log messages with level >= `log_level`.
3413 ///
3414 /// The callback can be called concurrently by multiple RocksDB threads.
3415 ///
3416 /// # Examples
3417 /// ```
3418 /// use rust_rocksdb::{LogLevel, Options};
3419 ///
3420 /// let mut options = Options::default();
3421 /// options.set_callback_logger(LogLevel::Debug, move |level, msg| println!("{level:?} {msg}"));
3422 /// ```
3423 pub fn set_callback_logger(
3424 &mut self,
3425 log_level: LogLevel,
3426 callback: impl Fn(LogLevel, &str) + 'static + Send + Sync,
3427 ) {
3428 // store the closure in an Arc so it can be shared across multiple Option/DBs
3429 let holder = Arc::new(LogCallback {
3430 callback: Box::new(callback),
3431 });
3432 let holder_ptr = holder.as_ref() as *const LogCallback;
3433 let holder_cvoid = holder_ptr.cast::<c_void>().cast_mut();
3434
3435 unsafe {
3436 let logger = ffi::rocksdb_logger_create_callback_logger(
3437 log_level as c_int,
3438 Some(Self::logger_callback),
3439 holder_cvoid,
3440 );
3441 ffi::rocksdb_options_set_info_log(self.inner, logger);
3442 ffi::rocksdb_logger_destroy(logger);
3443 }
3444
3445 self.outlive.log_callback = Some(holder);
3446 }
3447
3448 extern "C" fn logger_callback(func: *mut c_void, level: u32, msg: *mut c_char, len: usize) {
3449 use std::{mem, process, str};
3450
3451 let level = unsafe { mem::transmute::<u32, LogLevel>(level) };
3452 let slice = unsafe { slice::from_raw_parts_mut(msg.cast::<u8>(), len) };
3453 let msg = unsafe { str::from_utf8_unchecked(slice) };
3454
3455 let holder = unsafe { &mut *func.cast::<LogCallback>() };
3456 let mut callback_in_catch_unwind = AssertUnwindSafe(&mut holder.callback);
3457 if catch_unwind(move || callback_in_catch_unwind(level, msg)).is_err() {
3458 process::abort();
3459 }
3460 }
3461
3462 /// Sets the threshold at which all writes will be slowed down to at least delayed_write_rate if estimated
3463 /// bytes needed to be compaction exceed this threshold.
3464 ///
3465 /// Default: 64GB
3466 pub fn set_soft_pending_compaction_bytes_limit(&mut self, limit: usize) {
3467 unsafe {
3468 ffi::rocksdb_options_set_soft_pending_compaction_bytes_limit(self.inner, limit);
3469 }
3470 }
3471
3472 /// Sets the bytes threshold at which all writes are stopped if estimated bytes needed to be compaction exceed
3473 /// this threshold.
3474 ///
3475 /// Default: 256GB
3476 pub fn set_hard_pending_compaction_bytes_limit(&mut self, limit: usize) {
3477 unsafe {
3478 ffi::rocksdb_options_set_hard_pending_compaction_bytes_limit(self.inner, limit);
3479 }
3480 }
3481
3482 /// Sets the size of one block in arena memory allocation.
3483 ///
3484 /// If <= 0, a proper value is automatically calculated (usually 1/10 of
3485 /// writer_buffer_size).
3486 ///
3487 /// Default: 0
3488 pub fn set_arena_block_size(&mut self, size: usize) {
3489 unsafe {
3490 ffi::rocksdb_options_set_arena_block_size(self.inner, size);
3491 }
3492 }
3493
3494 /// If true, then print malloc stats together with rocksdb.stats when printing to LOG.
3495 ///
3496 /// Default: false
3497 pub fn set_dump_malloc_stats(&mut self, enabled: bool) {
3498 unsafe {
3499 ffi::rocksdb_options_set_dump_malloc_stats(self.inner, c_uchar::from(enabled));
3500 }
3501 }
3502
3503 /// Enable whole key bloom filter in memtable. Note this will only take effect
3504 /// if memtable_prefix_bloom_size_ratio is not 0. Enabling whole key filtering
3505 /// can potentially reduce CPU usage for point-look-ups.
3506 ///
3507 /// Default: false (disable)
3508 ///
3509 /// Dynamically changeable through SetOptions() API
3510 pub fn set_memtable_whole_key_filtering(&mut self, whole_key_filter: bool) {
3511 unsafe {
3512 ffi::rocksdb_options_set_memtable_whole_key_filtering(
3513 self.inner,
3514 c_uchar::from(whole_key_filter),
3515 );
3516 }
3517 }
3518
3519 /// Enable the use of key-value separation.
3520 ///
3521 /// More details can be found here: [Integrated BlobDB](http://rocksdb.org/blog/2021/05/26/integrated-blob-db.html).
3522 ///
3523 /// Default: false (disable)
3524 ///
3525 /// Dynamically changeable through SetOptions() API
3526 pub fn set_enable_blob_files(&mut self, val: bool) {
3527 unsafe {
3528 ffi::rocksdb_options_set_enable_blob_files(self.inner, u8::from(val));
3529 }
3530 }
3531
3532 /// Sets the minimum threshold value at or above which will be written
3533 /// to blob files during flush or compaction.
3534 ///
3535 /// Dynamically changeable through SetOptions() API
3536 pub fn set_min_blob_size(&mut self, val: u64) {
3537 unsafe {
3538 ffi::rocksdb_options_set_min_blob_size(self.inner, val);
3539 }
3540 }
3541
3542 /// Sets the size limit for blob files.
3543 ///
3544 /// Dynamically changeable through SetOptions() API
3545 pub fn set_blob_file_size(&mut self, val: u64) {
3546 unsafe {
3547 ffi::rocksdb_options_set_blob_file_size(self.inner, val);
3548 }
3549 }
3550
3551 /// Sets the blob compression type. All blob files use the same
3552 /// compression type.
3553 ///
3554 /// Dynamically changeable through SetOptions() API
3555 pub fn set_blob_compression_type(&mut self, val: DBCompressionType) {
3556 unsafe {
3557 ffi::rocksdb_options_set_blob_compression_type(self.inner, val as _);
3558 }
3559 }
3560
3561 /// If this is set to true RocksDB will actively relocate valid blobs from the oldest blob files
3562 /// as they are encountered during compaction.
3563 ///
3564 /// Dynamically changeable through SetOptions() API
3565 pub fn set_enable_blob_gc(&mut self, val: bool) {
3566 unsafe {
3567 ffi::rocksdb_options_set_enable_blob_gc(self.inner, u8::from(val));
3568 }
3569 }
3570
3571 /// Sets the threshold that the GC logic uses to determine which blob files should be considered “old.”
3572 ///
3573 /// For example, the default value of 0.25 signals to RocksDB that blobs residing in the
3574 /// oldest 25% of blob files should be relocated by GC. This parameter can be tuned to adjust
3575 /// the trade-off between write amplification and space amplification.
3576 ///
3577 /// Dynamically changeable through SetOptions() API
3578 pub fn set_blob_gc_age_cutoff(&mut self, val: c_double) {
3579 unsafe {
3580 ffi::rocksdb_options_set_blob_gc_age_cutoff(self.inner, val);
3581 }
3582 }
3583
3584 /// Sets the blob GC force threshold.
3585 ///
3586 /// Dynamically changeable through SetOptions() API
3587 pub fn set_blob_gc_force_threshold(&mut self, val: c_double) {
3588 unsafe {
3589 ffi::rocksdb_options_set_blob_gc_force_threshold(self.inner, val);
3590 }
3591 }
3592
3593 /// Sets the blob compaction read ahead size.
3594 ///
3595 /// Dynamically changeable through SetOptions() API
3596 pub fn set_blob_compaction_readahead_size(&mut self, val: u64) {
3597 unsafe {
3598 ffi::rocksdb_options_set_blob_compaction_readahead_size(self.inner, val);
3599 }
3600 }
3601
3602 /// Sets the blob cache.
3603 ///
3604 /// Using a dedicated object for blobs and using the same object for the block and blob caches
3605 /// are both supported. In the latter case, note that blobs are less valuable from a caching
3606 /// perspective than SST blocks, and some cache implementations have configuration options that
3607 /// can be used to prioritize items accordingly (see Cache::Priority and
3608 /// LRUCacheOptions::{high,low}_pri_pool_ratio).
3609 ///
3610 /// Default: disabled
3611 pub fn set_blob_cache(&mut self, cache: &Cache) {
3612 unsafe {
3613 ffi::rocksdb_options_set_blob_cache(self.inner, cache.0.inner.as_ptr());
3614 }
3615 self.outlive.blob_cache = Some(cache.clone());
3616 }
3617
3618 /// Set this option to true during creation of database if you want
3619 /// to be able to ingest behind (call IngestExternalFile() skipping keys
3620 /// that already exist, rather than overwriting matching keys).
3621 /// Setting this option to true has the following effects:
3622 ///
3623 /// 1. Disable some internal optimizations around SST file compression.
3624 /// 2. Reserve the last level for ingested files only.
3625 /// 3. Compaction will not include any file from the last level.
3626 ///
3627 /// Note that only Universal Compaction supports allow_ingest_behind.
3628 /// `num_levels` should be >= 3 if this option is turned on.
3629 ///
3630 /// DEFAULT: false
3631 /// Immutable.
3632 pub fn set_allow_ingest_behind(&mut self, val: bool) {
3633 unsafe {
3634 ffi::rocksdb_options_set_allow_ingest_behind(self.inner, c_uchar::from(val));
3635 }
3636 }
3637
3638 // A factory of a table property collector that marks an SST
3639 // file as need-compaction when it observe at least "D" deletion
3640 // entries in any "N" consecutive entries, or the ratio of tombstone
3641 // entries >= deletion_ratio.
3642 //
3643 // `window_size`: is the sliding window size "N"
3644 // `num_dels_trigger`: is the deletion trigger "D"
3645 // `deletion_ratio`: if <= 0 or > 1, disable triggering compaction based on
3646 // deletion ratio.
3647 pub fn add_compact_on_deletion_collector_factory(
3648 &mut self,
3649 window_size: size_t,
3650 num_dels_trigger: size_t,
3651 deletion_ratio: f64,
3652 ) {
3653 unsafe {
3654 ffi::rocksdb_options_add_compact_on_deletion_collector_factory_del_ratio(
3655 self.inner,
3656 window_size,
3657 num_dels_trigger,
3658 deletion_ratio,
3659 );
3660 }
3661 }
3662
3663 /// Like [`Self::add_compact_on_deletion_collector_factory`], but only triggers
3664 /// compaction if the SST file size is at least `min_file_size` bytes.
3665 pub fn add_compact_on_deletion_collector_factory_min_file_size(
3666 &mut self,
3667 window_size: size_t,
3668 num_dels_trigger: size_t,
3669 deletion_ratio: f64,
3670 min_file_size: u64,
3671 ) {
3672 unsafe {
3673 ffi::rocksdb_options_add_compact_on_deletion_collector_factory_min_file_size(
3674 self.inner,
3675 window_size,
3676 num_dels_trigger,
3677 deletion_ratio,
3678 min_file_size,
3679 );
3680 }
3681 }
3682
3683 /// <https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager>
3684 /// Write buffer manager helps users control the total memory used by memtables across multiple column families and/or DB instances.
3685 /// Users can enable this control by 2 ways:
3686 ///
3687 /// 1- Limit the total memtable usage across multiple column families and DBs under a threshold.
3688 /// 2- Cost the memtable memory usage to block cache so that memory of RocksDB can be capped by the single limit.
3689 /// The usage of a write buffer manager is similar to rate_limiter and sst_file_manager.
3690 /// Users can create one write buffer manager object and pass it to all the options of column families or DBs whose memtable size they want to be controlled by this object.
3691 pub fn set_write_buffer_manager(&mut self, write_buffer_manager: &WriteBufferManager) {
3692 unsafe {
3693 ffi::rocksdb_options_set_write_buffer_manager(
3694 self.inner,
3695 write_buffer_manager.0.inner.as_ptr(),
3696 );
3697 }
3698 self.outlive.write_buffer_manager = Some(write_buffer_manager.clone());
3699 }
3700
3701 /// Sets an `SstFileManager` for this `Options`.
3702 ///
3703 /// SstFileManager tracks and controls total SST file space usage, enabling
3704 /// applications to cap disk utilization and throttle deletions.
3705 pub fn set_sst_file_manager(&mut self, sst_file_manager: &SstFileManager) {
3706 unsafe {
3707 ffi::rocksdb_options_set_sst_file_manager(
3708 self.inner,
3709 sst_file_manager.0.inner.as_ptr(),
3710 );
3711 }
3712 self.outlive.sst_file_manager = Some(sst_file_manager.clone());
3713 }
3714
3715 /// If true, working thread may avoid doing unnecessary and long-latency
3716 /// operation (such as deleting obsolete files directly or deleting memtable)
3717 /// and will instead schedule a background job to do it.
3718 ///
3719 /// Use it if you're latency-sensitive.
3720 ///
3721 /// Default: false (disabled)
3722 pub fn set_avoid_unnecessary_blocking_io(&mut self, val: bool) {
3723 unsafe {
3724 ffi::rocksdb_options_set_avoid_unnecessary_blocking_io(self.inner, u8::from(val));
3725 }
3726 }
3727
3728 /// Sets the compaction priority.
3729 ///
3730 /// If level compaction_style =
3731 /// kCompactionStyleLevel, for each level, which files are prioritized to be
3732 /// picked to compact.
3733 ///
3734 /// Default: `DBCompactionPri::MinOverlappingRatio`
3735 ///
3736 /// # Examples
3737 ///
3738 /// ```
3739 /// use rust_rocksdb::{Options, DBCompactionPri};
3740 ///
3741 /// let mut opts = Options::default();
3742 /// opts.set_compaction_pri(DBCompactionPri::RoundRobin);
3743 /// ```
3744 pub fn set_compaction_pri(&mut self, pri: DBCompactionPri) {
3745 unsafe {
3746 ffi::rocksdb_options_set_compaction_pri(self.inner, pri as c_int);
3747 }
3748 }
3749
3750 /// If true, the log numbers and sizes of the synced WALs are tracked
3751 /// in MANIFEST. During DB recovery, if a synced WAL is missing
3752 /// from disk, or the WAL's size does not match the recorded size in
3753 /// MANIFEST, an error will be reported and the recovery will be aborted.
3754 ///
3755 /// This is one additional protection against WAL corruption besides the
3756 /// per-WAL-entry checksum.
3757 ///
3758 /// Note that this option does not work with secondary instance.
3759 /// Currently, only syncing closed WALs are tracked. Calling `DB::SyncWAL()`,
3760 /// etc. or writing with `WriteOptions::sync=true` to sync the live WAL is not
3761 /// tracked for performance/efficiency reasons.
3762 ///
3763 /// See: <https://github.com/facebook/rocksdb/wiki/Track-WAL-in-MANIFEST>
3764 ///
3765 /// Default: false (disabled)
3766 pub fn set_track_and_verify_wals_in_manifest(&mut self, val: bool) {
3767 unsafe {
3768 ffi::rocksdb_options_set_track_and_verify_wals_in_manifest(self.inner, u8::from(val));
3769 }
3770 }
3771
3772 /// Returns the value of the `track_and_verify_wals_in_manifest` option.
3773 pub fn get_track_and_verify_wals_in_manifest(&self) -> bool {
3774 let val_u8 =
3775 unsafe { ffi::rocksdb_options_get_track_and_verify_wals_in_manifest(self.inner) };
3776 val_u8 != 0
3777 }
3778
3779 /// The DB unique ID can be saved in the DB manifest (preferred, this option)
3780 /// or an IDENTITY file (historical, deprecated), or both. If this option is
3781 /// set to false (old behavior), then `write_identity_file` must be set to true.
3782 /// The manifest is preferred because
3783 ///
3784 /// 1. The IDENTITY file is not checksummed, so it is not as safe against
3785 /// corruption.
3786 /// 2. The IDENTITY file may or may not be copied with the DB (e.g. not
3787 /// copied by BackupEngine), so is not reliable for the provenance of a DB.
3788 ///
3789 /// This option might eventually be obsolete and removed as Identity files
3790 /// are phased out.
3791 ///
3792 /// Default: true (enabled)
3793 pub fn set_write_dbid_to_manifest(&mut self, val: bool) {
3794 unsafe {
3795 ffi::rocksdb_options_set_write_dbid_to_manifest(self.inner, u8::from(val));
3796 }
3797 }
3798
3799 /// Returns the value of the `write_dbid_to_manifest` option.
3800 pub fn get_write_dbid_to_manifest(&self) -> bool {
3801 let val_u8 = unsafe { ffi::rocksdb_options_get_write_dbid_to_manifest(self.inner) };
3802 val_u8 != 0
3803 }
3804}
3805
3806impl Default for Options {
3807 fn default() -> Self {
3808 unsafe {
3809 let opts = ffi::rocksdb_options_create();
3810 assert!(!opts.is_null(), "Could not create RocksDB options");
3811
3812 Self {
3813 inner: opts,
3814 outlive: OptionsMustOutliveDB::default(),
3815 }
3816 }
3817 }
3818}
3819
3820impl FlushOptions {
3821 pub fn new() -> FlushOptions {
3822 FlushOptions::default()
3823 }
3824
3825 /// Waits until the flush is done.
3826 ///
3827 /// Default: true
3828 ///
3829 /// # Examples
3830 ///
3831 /// ```
3832 /// use rust_rocksdb::FlushOptions;
3833 ///
3834 /// let mut options = FlushOptions::default();
3835 /// options.set_wait(false);
3836 /// ```
3837 pub fn set_wait(&mut self, wait: bool) {
3838 unsafe {
3839 ffi::rocksdb_flushoptions_set_wait(self.inner, c_uchar::from(wait));
3840 }
3841 }
3842}
3843
3844impl Default for FlushOptions {
3845 fn default() -> Self {
3846 let flush_opts = unsafe { ffi::rocksdb_flushoptions_create() };
3847 assert!(
3848 !flush_opts.is_null(),
3849 "Could not create RocksDB flush options"
3850 );
3851
3852 Self { inner: flush_opts }
3853 }
3854}
3855
3856impl WriteOptions {
3857 pub fn new() -> WriteOptions {
3858 WriteOptions::default()
3859 }
3860
3861 /// Sets the sync mode. If true, the write will be flushed
3862 /// from the operating system buffer cache before the write is considered complete.
3863 /// If this flag is true, writes will be slower.
3864 ///
3865 /// Default: false
3866 pub fn set_sync(&mut self, sync: bool) {
3867 unsafe {
3868 ffi::rocksdb_writeoptions_set_sync(self.inner, c_uchar::from(sync));
3869 }
3870 }
3871
3872 /// Sets whether WAL should be active or not.
3873 /// If true, writes will not first go to the write ahead log,
3874 /// and the write may got lost after a crash.
3875 ///
3876 /// Default: false
3877 pub fn disable_wal(&mut self, disable: bool) {
3878 unsafe {
3879 ffi::rocksdb_writeoptions_disable_WAL(self.inner, c_int::from(disable));
3880 }
3881 }
3882
3883 /// If true and if user is trying to write to column families that don't exist (they were dropped),
3884 /// ignore the write (don't return an error). If there are multiple writes in a WriteBatch,
3885 /// other writes will succeed.
3886 ///
3887 /// Default: false
3888 pub fn set_ignore_missing_column_families(&mut self, ignore: bool) {
3889 unsafe {
3890 ffi::rocksdb_writeoptions_set_ignore_missing_column_families(
3891 self.inner,
3892 c_uchar::from(ignore),
3893 );
3894 }
3895 }
3896
3897 /// If true and we need to wait or sleep for the write request, fails
3898 /// immediately with Status::Incomplete().
3899 ///
3900 /// Default: false
3901 pub fn set_no_slowdown(&mut self, no_slowdown: bool) {
3902 unsafe {
3903 ffi::rocksdb_writeoptions_set_no_slowdown(self.inner, c_uchar::from(no_slowdown));
3904 }
3905 }
3906
3907 /// If true, this write request is of lower priority if compaction is
3908 /// behind. In this case, no_slowdown = true, the request will be cancelled
3909 /// immediately with Status::Incomplete() returned. Otherwise, it will be
3910 /// slowed down. The slowdown value is determined by RocksDB to guarantee
3911 /// it introduces minimum impacts to high priority writes.
3912 ///
3913 /// Default: false
3914 pub fn set_low_pri(&mut self, v: bool) {
3915 unsafe {
3916 ffi::rocksdb_writeoptions_set_low_pri(self.inner, c_uchar::from(v));
3917 }
3918 }
3919
3920 /// If true, writebatch will maintain the last insert positions of each
3921 /// memtable as hints in concurrent write. It can improve write performance
3922 /// in concurrent writes if keys in one writebatch are sequential. In
3923 /// non-concurrent writes (when concurrent_memtable_writes is false) this
3924 /// option will be ignored.
3925 ///
3926 /// Default: false
3927 pub fn set_memtable_insert_hint_per_batch(&mut self, v: bool) {
3928 unsafe {
3929 ffi::rocksdb_writeoptions_set_memtable_insert_hint_per_batch(
3930 self.inner,
3931 c_uchar::from(v),
3932 );
3933 }
3934 }
3935}
3936
3937impl Default for WriteOptions {
3938 fn default() -> Self {
3939 let write_opts = unsafe { ffi::rocksdb_writeoptions_create() };
3940 assert!(
3941 !write_opts.is_null(),
3942 "Could not create RocksDB write options"
3943 );
3944
3945 Self { inner: write_opts }
3946 }
3947}
3948
3949impl LruCacheOptions {
3950 /// Capacity of the cache, in the same units as the `charge` of each entry.
3951 /// This is typically measured in bytes, but can be a different unit if using
3952 /// kDontChargeCacheMetadata.
3953 pub fn set_capacity(&mut self, cap: usize) {
3954 unsafe {
3955 ffi::rocksdb_lru_cache_options_set_capacity(self.inner, cap);
3956 }
3957 }
3958
3959 /// Cache is sharded into 2^num_shard_bits shards, by hash of key.
3960 /// If < 0, a good default is chosen based on the capacity and the
3961 /// implementation. (Mutex-based implementations are much more reliant
3962 /// on many shards for parallel scalability.)
3963 pub fn set_num_shard_bits(&mut self, val: c_int) {
3964 unsafe {
3965 ffi::rocksdb_lru_cache_options_set_num_shard_bits(self.inner, val);
3966 }
3967 }
3968}
3969
3970impl Default for LruCacheOptions {
3971 fn default() -> Self {
3972 let inner = unsafe { ffi::rocksdb_lru_cache_options_create() };
3973 assert!(
3974 !inner.is_null(),
3975 "Could not create RocksDB LRU cache options"
3976 );
3977
3978 Self { inner }
3979 }
3980}
3981
3982#[derive(Debug, Copy, Clone, PartialEq, Eq)]
3983#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
3984#[repr(i32)]
3985pub enum ReadTier {
3986 /// Reads data in memtable, block cache, OS cache or storage.
3987 All = 0,
3988 /// Reads data in memtable or block cache.
3989 BlockCache,
3990 /// Reads persisted data. When WAL is disabled, this option will skip data in memtable.
3991 Persisted,
3992 /// Reads data in memtable. Used for memtable only iterators.
3993 Memtable,
3994}
3995
3996impl ReadOptions {
3997 // TODO add snapshot setting here
3998 // TODO add snapshot wrapper structs with proper destructors;
3999 // that struct needs an "iterator" impl too.
4000
4001 /// Specify whether the "data block"/"index block"/"filter block"
4002 /// read for this iteration should be cached in memory?
4003 /// Callers may wish to set this field to false for bulk scans.
4004 ///
4005 /// Default: true
4006 pub fn fill_cache(&mut self, v: bool) {
4007 unsafe {
4008 ffi::rocksdb_readoptions_set_fill_cache(self.inner, c_uchar::from(v));
4009 }
4010 }
4011
4012 /// Sets the snapshot which should be used for the read.
4013 /// The snapshot must belong to the DB that is being read and must
4014 /// not have been released.
4015 pub fn set_snapshot<D: DBAccess>(&mut self, snapshot: &SnapshotWithThreadMode<D>) {
4016 unsafe {
4017 ffi::rocksdb_readoptions_set_snapshot(self.inner, snapshot.inner);
4018 }
4019 }
4020
4021 /// Sets the lower bound for an iterator.
4022 pub fn set_iterate_lower_bound<K: Into<Vec<u8>>>(&mut self, key: K) {
4023 self.set_lower_bound_impl(Some(key.into()));
4024 }
4025
4026 /// Sets the upper bound for an iterator.
4027 /// The upper bound itself is not included on the iteration result.
4028 pub fn set_iterate_upper_bound<K: Into<Vec<u8>>>(&mut self, key: K) {
4029 self.set_upper_bound_impl(Some(key.into()));
4030 }
4031
4032 /// Sets lower and upper bounds based on the provided range. This is
4033 /// similar to setting lower and upper bounds separately except that it also
4034 /// allows either bound to be reset.
4035 ///
4036 /// The argument can be a regular Rust range, e.g. `lower..upper`. However,
4037 /// since RocksDB upper bound is always excluded (i.e. range can never be
4038 /// fully closed) inclusive ranges (`lower..=upper` and `..=upper`) are not
4039 /// supported. For example:
4040 ///
4041 /// ```
4042 /// let mut options = rust_rocksdb::ReadOptions::default();
4043 /// options.set_iterate_range("xy".as_bytes().."xz".as_bytes());
4044 /// ```
4045 ///
4046 /// In addition, [`crate::PrefixRange`] can be used to specify a range of
4047 /// keys with a given prefix. In particular, the above example is
4048 /// equivalent to:
4049 ///
4050 /// ```
4051 /// let mut options = rust_rocksdb::ReadOptions::default();
4052 /// options.set_iterate_range(rust_rocksdb::PrefixRange("xy".as_bytes()));
4053 /// ```
4054 ///
4055 /// Note that setting range using this method is separate to using prefix
4056 /// iterators. Prefix iterators use prefix extractor configured for
4057 /// a column family. Setting bounds via [`crate::PrefixRange`] is more akin
4058 /// to using manual prefix.
4059 ///
4060 /// Using this method clears any previously set bounds. In other words, the
4061 /// bounds can be reset by setting the range to `..` as in:
4062 ///
4063 /// ```
4064 /// let mut options = rust_rocksdb::ReadOptions::default();
4065 /// options.set_iterate_range(..);
4066 /// ```
4067 pub fn set_iterate_range(&mut self, range: impl crate::IterateBounds) {
4068 let (lower, upper) = range.into_bounds();
4069 self.set_lower_bound_impl(lower);
4070 self.set_upper_bound_impl(upper);
4071 }
4072
4073 fn set_lower_bound_impl(&mut self, bound: Option<Vec<u8>>) {
4074 let (ptr, len) = if let Some(ref bound) = bound {
4075 (bound.as_ptr() as *const c_char, bound.len())
4076 } else if self.iterate_lower_bound.is_some() {
4077 (std::ptr::null(), 0)
4078 } else {
4079 return;
4080 };
4081 self.iterate_lower_bound = bound;
4082 unsafe {
4083 ffi::rocksdb_readoptions_set_iterate_lower_bound(self.inner, ptr, len);
4084 }
4085 }
4086
4087 fn set_upper_bound_impl(&mut self, bound: Option<Vec<u8>>) {
4088 let (ptr, len) = if let Some(ref bound) = bound {
4089 (bound.as_ptr() as *const c_char, bound.len())
4090 } else if self.iterate_upper_bound.is_some() {
4091 (std::ptr::null(), 0)
4092 } else {
4093 return;
4094 };
4095 self.iterate_upper_bound = bound;
4096 unsafe {
4097 ffi::rocksdb_readoptions_set_iterate_upper_bound(self.inner, ptr, len);
4098 }
4099 }
4100
4101 /// Specify if this read request should process data that ALREADY
4102 /// resides on a particular cache. If the required data is not
4103 /// found at the specified cache, then Status::Incomplete is returned.
4104 ///
4105 /// Default: ::All
4106 pub fn set_read_tier(&mut self, tier: ReadTier) {
4107 unsafe {
4108 ffi::rocksdb_readoptions_set_read_tier(self.inner, tier as c_int);
4109 }
4110 }
4111
4112 /// Enforce that the iterator only iterates over the same
4113 /// prefix as the seek.
4114 /// This option is effective only for prefix seeks, i.e. prefix_extractor is
4115 /// non-null for the column family and total_order_seek is false. Unlike
4116 /// iterate_upper_bound, prefix_same_as_start only works within a prefix
4117 /// but in both directions.
4118 ///
4119 /// Default: false
4120 pub fn set_prefix_same_as_start(&mut self, v: bool) {
4121 unsafe {
4122 ffi::rocksdb_readoptions_set_prefix_same_as_start(self.inner, c_uchar::from(v));
4123 }
4124 }
4125
4126 /// Enable a total order seek regardless of index format (e.g. hash index)
4127 /// used in the table. Some table format (e.g. plain table) may not support
4128 /// this option.
4129 ///
4130 /// If true when calling Get(), we also skip prefix bloom when reading from
4131 /// block based table. It provides a way to read existing data after
4132 /// changing implementation of prefix extractor.
4133 pub fn set_total_order_seek(&mut self, v: bool) {
4134 unsafe {
4135 ffi::rocksdb_readoptions_set_total_order_seek(self.inner, c_uchar::from(v));
4136 }
4137 }
4138
4139 /// Sets a threshold for the number of keys that can be skipped
4140 /// before failing an iterator seek as incomplete. The default value of 0 should be used to
4141 /// never fail a request as incomplete, even on skipping too many keys.
4142 ///
4143 /// Default: 0
4144 pub fn set_max_skippable_internal_keys(&mut self, num: u64) {
4145 unsafe {
4146 ffi::rocksdb_readoptions_set_max_skippable_internal_keys(self.inner, num);
4147 }
4148 }
4149
4150 /// If true, when PurgeObsoleteFile is called in CleanupIteratorState, we schedule a background job
4151 /// in the flush job queue and delete obsolete files in background.
4152 ///
4153 /// Default: false
4154 pub fn set_background_purge_on_iterator_cleanup(&mut self, v: bool) {
4155 unsafe {
4156 ffi::rocksdb_readoptions_set_background_purge_on_iterator_cleanup(
4157 self.inner,
4158 c_uchar::from(v),
4159 );
4160 }
4161 }
4162
4163 /// If true, keys deleted using the DeleteRange() API will be visible to
4164 /// readers until they are naturally deleted during compaction.
4165 ///
4166 /// Default: false
4167 #[deprecated(
4168 note = "deprecated in RocksDB 10.2.1: no performance impact if DeleteRange is not used"
4169 )]
4170 pub fn set_ignore_range_deletions(&mut self, v: bool) {
4171 unsafe {
4172 ffi::rocksdb_readoptions_set_ignore_range_deletions(self.inner, c_uchar::from(v));
4173 }
4174 }
4175
4176 /// If true, all data read from underlying storage will be
4177 /// verified against corresponding checksums.
4178 ///
4179 /// Default: true
4180 pub fn set_verify_checksums(&mut self, v: bool) {
4181 unsafe {
4182 ffi::rocksdb_readoptions_set_verify_checksums(self.inner, c_uchar::from(v));
4183 }
4184 }
4185
4186 /// If non-zero, an iterator will create a new table reader which
4187 /// performs reads of the given size. Using a large size (> 2MB) can
4188 /// improve the performance of forward iteration on spinning disks.
4189 /// Default: 0
4190 ///
4191 /// ```
4192 /// use rust_rocksdb::{ReadOptions};
4193 ///
4194 /// let mut opts = ReadOptions::default();
4195 /// opts.set_readahead_size(4_194_304); // 4mb
4196 /// ```
4197 pub fn set_readahead_size(&mut self, v: usize) {
4198 unsafe {
4199 ffi::rocksdb_readoptions_set_readahead_size(self.inner, v as size_t);
4200 }
4201 }
4202
4203 /// If auto_readahead_size is set to true, it will auto tune the readahead_size
4204 /// during scans internally.
4205 /// For this feature to be enabled, iterate_upper_bound must also be specified.
4206 ///
4207 /// NOTE: - Recommended for forward Scans only.
4208 /// - If there is a backward scans, this option will be
4209 /// disabled internally and won't be enabled again if the forward scan
4210 /// is issued again.
4211 ///
4212 /// Default: true
4213 pub fn set_auto_readahead_size(&mut self, v: bool) {
4214 unsafe {
4215 ffi::rocksdb_readoptions_set_auto_readahead_size(self.inner, c_uchar::from(v));
4216 }
4217 }
4218
4219 /// If true, create a tailing iterator. Note that tailing iterators
4220 /// only support moving in the forward direction. Iterating in reverse
4221 /// or seek_to_last are not supported.
4222 pub fn set_tailing(&mut self, v: bool) {
4223 unsafe {
4224 ffi::rocksdb_readoptions_set_tailing(self.inner, c_uchar::from(v));
4225 }
4226 }
4227
4228 /// Specifies the value of "pin_data". If true, it keeps the blocks
4229 /// loaded by the iterator pinned in memory as long as the iterator is not deleted,
4230 /// If used when reading from tables created with
4231 /// BlockBasedTableOptions::use_delta_encoding = false,
4232 /// Iterator's property "rocksdb.iterator.is-key-pinned" is guaranteed to
4233 /// return 1.
4234 ///
4235 /// Default: false
4236 pub fn set_pin_data(&mut self, v: bool) {
4237 unsafe {
4238 ffi::rocksdb_readoptions_set_pin_data(self.inner, c_uchar::from(v));
4239 }
4240 }
4241
4242 /// Asynchronously prefetch some data.
4243 ///
4244 /// Used for sequential reads and internal automatic prefetching.
4245 ///
4246 /// Default: `false`
4247 pub fn set_async_io(&mut self, v: bool) {
4248 unsafe {
4249 ffi::rocksdb_readoptions_set_async_io(self.inner, c_uchar::from(v));
4250 }
4251 }
4252
4253 /// Deadline for completing an API call (Get/MultiGet/Seek/Next for now)
4254 /// in microseconds.
4255 /// It should be set to microseconds since epoch, i.e, gettimeofday or
4256 /// equivalent plus allowed duration in microseconds.
4257 /// This is best effort. The call may exceed the deadline if there is IO
4258 /// involved and the file system doesn't support deadlines, or due to
4259 /// checking for deadline periodically rather than for every key if
4260 /// processing a batch
4261 pub fn set_deadline(&mut self, microseconds: u64) {
4262 unsafe {
4263 ffi::rocksdb_readoptions_set_deadline(self.inner, microseconds);
4264 }
4265 }
4266
4267 /// A timeout in microseconds to be passed to the underlying FileSystem for
4268 /// reads. As opposed to deadline, this determines the timeout for each
4269 /// individual file read request. If a MultiGet/Get/Seek/Next etc call
4270 /// results in multiple reads, each read can last up to io_timeout us.
4271 pub fn set_io_timeout(&mut self, microseconds: u64) {
4272 unsafe {
4273 ffi::rocksdb_readoptions_set_io_timeout(self.inner, microseconds);
4274 }
4275 }
4276
4277 /// Timestamp of operation. Read should return the latest data visible to the
4278 /// specified timestamp. All timestamps of the same database must be of the
4279 /// same length and format. The user is responsible for providing a customized
4280 /// compare function via Comparator to order <key, timestamp> tuples.
4281 /// For iterator, iter_start_ts is the lower bound (older) and timestamp
4282 /// serves as the upper bound. Versions of the same record that fall in
4283 /// the timestamp range will be returned. If iter_start_ts is nullptr,
4284 /// only the most recent version visible to timestamp is returned.
4285 /// The user-specified timestamp feature is still under active development,
4286 /// and the API is subject to change.
4287 pub fn set_timestamp<S: Into<Vec<u8>>>(&mut self, ts: S) {
4288 self.set_timestamp_impl(Some(ts.into()));
4289 }
4290
4291 fn set_timestamp_impl(&mut self, ts: Option<Vec<u8>>) {
4292 let (ptr, len) = if let Some(ref ts) = ts {
4293 (ts.as_ptr() as *const c_char, ts.len())
4294 } else if self.timestamp.is_some() {
4295 // The stored timestamp is a `Some` but we're updating it to a `None`.
4296 // This means to cancel a previously set timestamp.
4297 // To do this, use a null pointer and zero length.
4298 (std::ptr::null(), 0)
4299 } else {
4300 return;
4301 };
4302 self.timestamp = ts;
4303 unsafe {
4304 ffi::rocksdb_readoptions_set_timestamp(self.inner, ptr, len);
4305 }
4306 }
4307
4308 /// See `set_timestamp`
4309 pub fn set_iter_start_ts<S: Into<Vec<u8>>>(&mut self, ts: S) {
4310 self.set_iter_start_ts_impl(Some(ts.into()));
4311 }
4312
4313 fn set_iter_start_ts_impl(&mut self, ts: Option<Vec<u8>>) {
4314 let (ptr, len) = if let Some(ref ts) = ts {
4315 (ts.as_ptr() as *const c_char, ts.len())
4316 } else if self.timestamp.is_some() {
4317 (std::ptr::null(), 0)
4318 } else {
4319 return;
4320 };
4321 self.iter_start_ts = ts;
4322 unsafe {
4323 ffi::rocksdb_readoptions_set_iter_start_ts(self.inner, ptr, len);
4324 }
4325 }
4326}
4327
4328impl Default for ReadOptions {
4329 fn default() -> Self {
4330 unsafe {
4331 Self {
4332 inner: ffi::rocksdb_readoptions_create(),
4333 timestamp: None,
4334 iter_start_ts: None,
4335 iterate_upper_bound: None,
4336 iterate_lower_bound: None,
4337 }
4338 }
4339 }
4340}
4341
4342impl IngestExternalFileOptions {
4343 /// Can be set to true to move the files instead of copying them.
4344 pub fn set_move_files(&mut self, v: bool) {
4345 unsafe {
4346 ffi::rocksdb_ingestexternalfileoptions_set_move_files(self.inner, c_uchar::from(v));
4347 }
4348 }
4349
4350 /// If set to false, an ingested file keys could appear in existing snapshots
4351 /// that where created before the file was ingested.
4352 pub fn set_snapshot_consistency(&mut self, v: bool) {
4353 unsafe {
4354 ffi::rocksdb_ingestexternalfileoptions_set_snapshot_consistency(
4355 self.inner,
4356 c_uchar::from(v),
4357 );
4358 }
4359 }
4360
4361 /// If set to false, IngestExternalFile() will fail if the file key range
4362 /// overlaps with existing keys or tombstones in the DB.
4363 pub fn set_allow_global_seqno(&mut self, v: bool) {
4364 unsafe {
4365 ffi::rocksdb_ingestexternalfileoptions_set_allow_global_seqno(
4366 self.inner,
4367 c_uchar::from(v),
4368 );
4369 }
4370 }
4371
4372 /// If set to false and the file key range overlaps with the memtable key range
4373 /// (memtable flush required), IngestExternalFile will fail.
4374 pub fn set_allow_blocking_flush(&mut self, v: bool) {
4375 unsafe {
4376 ffi::rocksdb_ingestexternalfileoptions_set_allow_blocking_flush(
4377 self.inner,
4378 c_uchar::from(v),
4379 );
4380 }
4381 }
4382
4383 /// Set to true if you would like duplicate keys in the file being ingested
4384 /// to be skipped rather than overwriting existing data under that key.
4385 /// Usecase: back-fill of some historical data in the database without
4386 /// over-writing existing newer version of data.
4387 /// This option could only be used if the DB has been running
4388 /// with allow_ingest_behind=true since the dawn of time.
4389 /// All files will be ingested at the bottommost level with seqno=0.
4390 pub fn set_ingest_behind(&mut self, v: bool) {
4391 unsafe {
4392 ffi::rocksdb_ingestexternalfileoptions_set_ingest_behind(self.inner, c_uchar::from(v));
4393 }
4394 }
4395}
4396
4397impl Default for IngestExternalFileOptions {
4398 fn default() -> Self {
4399 unsafe {
4400 Self {
4401 inner: ffi::rocksdb_ingestexternalfileoptions_create(),
4402 }
4403 }
4404 }
4405}
4406
4407/// Used by BlockBasedOptions::set_index_type.
4408pub enum BlockBasedIndexType {
4409 /// A space efficient index block that is optimized for
4410 /// binary-search-based index.
4411 BinarySearch,
4412
4413 /// The hash index, if enabled, will perform a hash lookup if
4414 /// a prefix extractor has been provided through Options::set_prefix_extractor.
4415 HashSearch,
4416
4417 /// A two-level index implementation. Both levels are binary search indexes.
4418 TwoLevelIndexSearch,
4419}
4420
4421/// Used by BlockBasedOptions::set_data_block_index_type.
4422#[repr(C)]
4423pub enum DataBlockIndexType {
4424 /// Use binary search when performing point lookup for keys in data blocks.
4425 /// This is the default.
4426 BinarySearch = 0,
4427
4428 /// Appends a compact hash table to the end of the data block for efficient indexing. Backwards
4429 /// compatible with databases created without this feature. Once turned on, existing data will
4430 /// be gradually converted to the hash index format.
4431 BinaryAndHash = 1,
4432}
4433
4434/// Defines the underlying memtable implementation.
4435/// See official [wiki](https://github.com/facebook/rocksdb/wiki/MemTable) for more information.
4436pub enum MemtableFactory {
4437 Vector,
4438 HashSkipList {
4439 bucket_count: usize,
4440 height: i32,
4441 branching_factor: i32,
4442 },
4443 HashLinkList {
4444 bucket_count: usize,
4445 },
4446}
4447
4448/// Used by BlockBasedOptions::set_checksum_type.
4449pub enum ChecksumType {
4450 NoChecksum = 0,
4451 CRC32c = 1,
4452 XXHash = 2,
4453 XXHash64 = 3,
4454 XXH3 = 4, // Supported since RocksDB 6.27
4455}
4456
4457/// Used in [`PlainTableFactoryOptions`].
4458#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
4459pub enum KeyEncodingType {
4460 /// Always write full keys.
4461 #[default]
4462 Plain = 0,
4463 /// Find opportunities to write the same prefix for multiple rows.
4464 Prefix = 1,
4465}
4466
4467/// Used with DBOptions::set_plain_table_factory.
4468/// See official [wiki](https://github.com/facebook/rocksdb/wiki/PlainTable-Format) for more
4469/// information.
4470///
4471/// Defaults:
4472/// user_key_length: 0 (variable length)
4473/// bloom_bits_per_key: 10
4474/// hash_table_ratio: 0.75
4475/// index_sparseness: 16
4476/// huge_page_tlb_size: 0
4477/// encoding_type: KeyEncodingType::Plain
4478/// full_scan_mode: false
4479/// store_index_in_file: false
4480pub struct PlainTableFactoryOptions {
4481 pub user_key_length: u32,
4482 pub bloom_bits_per_key: i32,
4483 pub hash_table_ratio: f64,
4484 pub index_sparseness: usize,
4485 pub huge_page_tlb_size: usize,
4486 pub encoding_type: KeyEncodingType,
4487 pub full_scan_mode: bool,
4488 pub store_index_in_file: bool,
4489}
4490
4491#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4492#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4493pub enum DBCompressionType {
4494 None = ffi::rocksdb_no_compression as isize,
4495 Snappy = ffi::rocksdb_snappy_compression as isize,
4496 Zlib = ffi::rocksdb_zlib_compression as isize,
4497 Bz2 = ffi::rocksdb_bz2_compression as isize,
4498 Lz4 = ffi::rocksdb_lz4_compression as isize,
4499 Lz4hc = ffi::rocksdb_lz4hc_compression as isize,
4500 Zstd = ffi::rocksdb_zstd_compression as isize,
4501}
4502
4503#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4504#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4505pub enum DBCompactionStyle {
4506 Level = ffi::rocksdb_level_compaction as isize,
4507 Universal = ffi::rocksdb_universal_compaction as isize,
4508 Fifo = ffi::rocksdb_fifo_compaction as isize,
4509}
4510
4511#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4512#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4513pub enum DBRecoveryMode {
4514 TolerateCorruptedTailRecords = ffi::rocksdb_tolerate_corrupted_tail_records_recovery as isize,
4515 AbsoluteConsistency = ffi::rocksdb_absolute_consistency_recovery as isize,
4516 PointInTime = ffi::rocksdb_point_in_time_recovery as isize,
4517 SkipAnyCorruptedRecord = ffi::rocksdb_skip_any_corrupted_records_recovery as isize,
4518}
4519
4520#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4521#[repr(i32)]
4522pub enum RateLimiterMode {
4523 KReadsOnly = 0,
4524 KWritesOnly = 1,
4525 KAllIo = 2,
4526}
4527
4528#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4529#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4530pub enum DBCompactionPri {
4531 ByCompensatedSize = ffi::rocksdb_k_by_compensated_size_compaction_pri as isize,
4532 OldestLargestSeqFirst = ffi::rocksdb_k_oldest_largest_seq_first_compaction_pri as isize,
4533 OldestSmallestSeqFirst = ffi::rocksdb_k_oldest_smallest_seq_first_compaction_pri as isize,
4534 MinOverlappingRatio = ffi::rocksdb_k_min_overlapping_ratio_compaction_pri as isize,
4535 RoundRobin = ffi::rocksdb_k_round_robin_compaction_pri as isize,
4536}
4537
4538#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4539#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4540pub enum BlockBasedPinningTier {
4541 Fallback = ffi::rocksdb_block_based_k_fallback_pinning_tier as isize,
4542 None = ffi::rocksdb_block_based_k_none_pinning_tier as isize,
4543 FlushAndSimilar = ffi::rocksdb_block_based_k_flush_and_similar_pinning_tier as isize,
4544 All = ffi::rocksdb_block_based_k_all_pinning_tier as isize,
4545}
4546
4547pub struct FifoCompactOptions {
4548 pub(crate) inner: *mut ffi::rocksdb_fifo_compaction_options_t,
4549}
4550
4551impl Default for FifoCompactOptions {
4552 fn default() -> Self {
4553 let opts = unsafe { ffi::rocksdb_fifo_compaction_options_create() };
4554 assert!(
4555 !opts.is_null(),
4556 "Could not create RocksDB Fifo Compaction Options"
4557 );
4558
4559 Self { inner: opts }
4560 }
4561}
4562
4563impl Drop for FifoCompactOptions {
4564 fn drop(&mut self) {
4565 unsafe {
4566 ffi::rocksdb_fifo_compaction_options_destroy(self.inner);
4567 }
4568 }
4569}
4570
4571impl FifoCompactOptions {
4572 /// Sets the max table file size.
4573 ///
4574 /// Once the total sum of table files reaches this, we will delete the oldest
4575 /// table file
4576 ///
4577 /// Default: 1GB
4578 pub fn set_max_table_files_size(&mut self, nbytes: u64) {
4579 unsafe {
4580 ffi::rocksdb_fifo_compaction_options_set_max_table_files_size(self.inner, nbytes);
4581 }
4582 }
4583}
4584
4585#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4586#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4587pub enum UniversalCompactionStopStyle {
4588 Similar = ffi::rocksdb_similar_size_compaction_stop_style as isize,
4589 Total = ffi::rocksdb_total_size_compaction_stop_style as isize,
4590}
4591
4592pub struct UniversalCompactOptions {
4593 pub(crate) inner: *mut ffi::rocksdb_universal_compaction_options_t,
4594}
4595
4596impl Default for UniversalCompactOptions {
4597 fn default() -> Self {
4598 let opts = unsafe { ffi::rocksdb_universal_compaction_options_create() };
4599 assert!(
4600 !opts.is_null(),
4601 "Could not create RocksDB Universal Compaction Options"
4602 );
4603
4604 Self { inner: opts }
4605 }
4606}
4607
4608impl Drop for UniversalCompactOptions {
4609 fn drop(&mut self) {
4610 unsafe {
4611 ffi::rocksdb_universal_compaction_options_destroy(self.inner);
4612 }
4613 }
4614}
4615
4616impl UniversalCompactOptions {
4617 /// Sets the percentage flexibility while comparing file size.
4618 /// If the candidate file(s) size is 1% smaller than the next file's size,
4619 /// then include next file into this candidate set.
4620 ///
4621 /// Default: 1
4622 pub fn set_size_ratio(&mut self, ratio: c_int) {
4623 unsafe {
4624 ffi::rocksdb_universal_compaction_options_set_size_ratio(self.inner, ratio);
4625 }
4626 }
4627
4628 /// Sets the minimum number of files in a single compaction run.
4629 ///
4630 /// Default: 2
4631 pub fn set_min_merge_width(&mut self, num: c_int) {
4632 unsafe {
4633 ffi::rocksdb_universal_compaction_options_set_min_merge_width(self.inner, num);
4634 }
4635 }
4636
4637 /// Sets the maximum number of files in a single compaction run.
4638 ///
4639 /// Default: UINT_MAX
4640 pub fn set_max_merge_width(&mut self, num: c_int) {
4641 unsafe {
4642 ffi::rocksdb_universal_compaction_options_set_max_merge_width(self.inner, num);
4643 }
4644 }
4645
4646 /// sets the size amplification.
4647 ///
4648 /// It is defined as the amount (in percentage) of
4649 /// additional storage needed to store a single byte of data in the database.
4650 /// For example, a size amplification of 2% means that a database that
4651 /// contains 100 bytes of user-data may occupy upto 102 bytes of
4652 /// physical storage. By this definition, a fully compacted database has
4653 /// a size amplification of 0%. Rocksdb uses the following heuristic
4654 /// to calculate size amplification: it assumes that all files excluding
4655 /// the earliest file contribute to the size amplification.
4656 ///
4657 /// Default: 200, which means that a 100 byte database could require upto 300 bytes of storage.
4658 pub fn set_max_size_amplification_percent(&mut self, v: c_int) {
4659 unsafe {
4660 ffi::rocksdb_universal_compaction_options_set_max_size_amplification_percent(
4661 self.inner, v,
4662 );
4663 }
4664 }
4665
4666 /// Sets the percentage of compression size.
4667 ///
4668 /// If this option is set to be -1, all the output files
4669 /// will follow compression type specified.
4670 ///
4671 /// If this option is not negative, we will try to make sure compressed
4672 /// size is just above this value. In normal cases, at least this percentage
4673 /// of data will be compressed.
4674 /// When we are compacting to a new file, here is the criteria whether
4675 /// it needs to be compressed: assuming here are the list of files sorted
4676 /// by generation time:
4677 /// A1...An B1...Bm C1...Ct
4678 /// where A1 is the newest and Ct is the oldest, and we are going to compact
4679 /// B1...Bm, we calculate the total size of all the files as total_size, as
4680 /// well as the total size of C1...Ct as total_C, the compaction output file
4681 /// will be compressed iff
4682 /// total_C / total_size < this percentage
4683 ///
4684 /// Default: -1
4685 pub fn set_compression_size_percent(&mut self, v: c_int) {
4686 unsafe {
4687 ffi::rocksdb_universal_compaction_options_set_compression_size_percent(self.inner, v);
4688 }
4689 }
4690
4691 /// Sets the algorithm used to stop picking files into a single compaction run.
4692 ///
4693 /// Default: ::Total
4694 pub fn set_stop_style(&mut self, style: UniversalCompactionStopStyle) {
4695 unsafe {
4696 ffi::rocksdb_universal_compaction_options_set_stop_style(self.inner, style as c_int);
4697 }
4698 }
4699}
4700
4701#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4702#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4703#[repr(u8)]
4704pub enum BottommostLevelCompaction {
4705 /// Skip bottommost level compaction
4706 Skip = 0,
4707 /// Only compact bottommost level if there is a compaction filter
4708 /// This is the default option
4709 IfHaveCompactionFilter,
4710 /// Always compact bottommost level
4711 Force,
4712 /// Always compact bottommost level but in bottommost level avoid
4713 /// double-compacting files created in the same compaction
4714 ForceOptimized,
4715}
4716
4717pub struct CompactOptions {
4718 pub(crate) inner: *mut ffi::rocksdb_compactoptions_t,
4719 full_history_ts_low: Option<Vec<u8>>,
4720}
4721
4722impl Default for CompactOptions {
4723 fn default() -> Self {
4724 let opts = unsafe { ffi::rocksdb_compactoptions_create() };
4725 assert!(!opts.is_null(), "Could not create RocksDB Compact Options");
4726
4727 Self {
4728 inner: opts,
4729 full_history_ts_low: None,
4730 }
4731 }
4732}
4733
4734impl Drop for CompactOptions {
4735 fn drop(&mut self) {
4736 unsafe {
4737 ffi::rocksdb_compactoptions_destroy(self.inner);
4738 }
4739 }
4740}
4741
4742impl CompactOptions {
4743 /// If more than one thread calls manual compaction,
4744 /// only one will actually schedule it while the other threads will simply wait
4745 /// for the scheduled manual compaction to complete. If exclusive_manual_compaction
4746 /// is set to true, the call will disable scheduling of automatic compaction jobs
4747 /// and wait for existing automatic compaction jobs to finish.
4748 pub fn set_exclusive_manual_compaction(&mut self, v: bool) {
4749 unsafe {
4750 ffi::rocksdb_compactoptions_set_exclusive_manual_compaction(
4751 self.inner,
4752 c_uchar::from(v),
4753 );
4754 }
4755 }
4756
4757 /// Sets bottommost level compaction.
4758 pub fn set_bottommost_level_compaction(&mut self, lvl: BottommostLevelCompaction) {
4759 unsafe {
4760 ffi::rocksdb_compactoptions_set_bottommost_level_compaction(self.inner, lvl as c_uchar);
4761 }
4762 }
4763
4764 /// If true, compacted files will be moved to the minimum level capable
4765 /// of holding the data or given level (specified non-negative target_level).
4766 pub fn set_change_level(&mut self, v: bool) {
4767 unsafe {
4768 ffi::rocksdb_compactoptions_set_change_level(self.inner, c_uchar::from(v));
4769 }
4770 }
4771
4772 /// If change_level is true and target_level have non-negative value, compacted
4773 /// files will be moved to target_level.
4774 pub fn set_target_level(&mut self, lvl: c_int) {
4775 unsafe {
4776 ffi::rocksdb_compactoptions_set_target_level(self.inner, lvl);
4777 }
4778 }
4779
4780 /// Set user-defined timestamp low bound, the data with older timestamp than
4781 /// low bound maybe GCed by compaction. Default: nullptr
4782 pub fn set_full_history_ts_low<S: Into<Vec<u8>>>(&mut self, ts: S) {
4783 self.set_full_history_ts_low_impl(Some(ts.into()));
4784 }
4785
4786 fn set_full_history_ts_low_impl(&mut self, ts: Option<Vec<u8>>) {
4787 let (ptr, len) = if let Some(ref ts) = ts {
4788 (ts.as_ptr() as *mut c_char, ts.len())
4789 } else if self.full_history_ts_low.is_some() {
4790 (std::ptr::null::<Vec<u8>>() as *mut c_char, 0)
4791 } else {
4792 return;
4793 };
4794 self.full_history_ts_low = ts;
4795 unsafe {
4796 ffi::rocksdb_compactoptions_set_full_history_ts_low(self.inner, ptr, len);
4797 }
4798 }
4799}
4800
4801pub struct WaitForCompactOptions {
4802 pub(crate) inner: *mut ffi::rocksdb_wait_for_compact_options_t,
4803}
4804
4805impl Default for WaitForCompactOptions {
4806 fn default() -> Self {
4807 let opts = unsafe { ffi::rocksdb_wait_for_compact_options_create() };
4808 assert!(
4809 !opts.is_null(),
4810 "Could not create RocksDB Wait For Compact Options"
4811 );
4812
4813 Self { inner: opts }
4814 }
4815}
4816
4817impl Drop for WaitForCompactOptions {
4818 fn drop(&mut self) {
4819 unsafe {
4820 ffi::rocksdb_wait_for_compact_options_destroy(self.inner);
4821 }
4822 }
4823}
4824
4825impl WaitForCompactOptions {
4826 /// If true, abort waiting if background jobs are paused. If false,
4827 /// ContinueBackgroundWork() must be called to resume the background jobs.
4828 /// Otherwise, jobs that were queued, but not scheduled yet may never finish
4829 /// and WaitForCompact() may wait indefinitely (if timeout is set, it will
4830 /// abort after the timeout).
4831 ///
4832 /// Default: false
4833 pub fn set_abort_on_pause(&mut self, v: bool) {
4834 unsafe {
4835 ffi::rocksdb_wait_for_compact_options_set_abort_on_pause(self.inner, c_uchar::from(v));
4836 }
4837 }
4838
4839 /// If true, flush all column families before starting to wait.
4840 ///
4841 /// Default: false
4842 pub fn set_flush(&mut self, v: bool) {
4843 unsafe {
4844 ffi::rocksdb_wait_for_compact_options_set_flush(self.inner, c_uchar::from(v));
4845 }
4846 }
4847
4848 /// Timeout in microseconds for waiting for compaction to complete.
4849 /// when timeout == 0, WaitForCompact() will wait as long as there's background
4850 /// work to finish.
4851 ///
4852 /// Default: 0
4853 pub fn set_timeout(&mut self, microseconds: u64) {
4854 unsafe {
4855 ffi::rocksdb_wait_for_compact_options_set_timeout(self.inner, microseconds);
4856 }
4857 }
4858}
4859
4860/// Represents a path where sst files can be put into
4861pub struct DBPath {
4862 pub(crate) inner: *mut ffi::rocksdb_dbpath_t,
4863}
4864
4865impl DBPath {
4866 /// Create a new path
4867 pub fn new<P: AsRef<Path>>(path: P, target_size: u64) -> Result<Self, Error> {
4868 let p = to_cpath(path.as_ref()).unwrap();
4869 let dbpath = unsafe { ffi::rocksdb_dbpath_create(p.as_ptr(), target_size) };
4870 if dbpath.is_null() {
4871 Err(Error::new(format!(
4872 "Could not create path for storing sst files at location: {}",
4873 path.as_ref().display()
4874 )))
4875 } else {
4876 Ok(DBPath { inner: dbpath })
4877 }
4878 }
4879}
4880
4881impl Drop for DBPath {
4882 fn drop(&mut self) {
4883 unsafe {
4884 ffi::rocksdb_dbpath_destroy(self.inner);
4885 }
4886 }
4887}
4888
4889#[cfg(test)]
4890mod tests {
4891 use crate::cache::Cache;
4892 use crate::db_options::WriteBufferManager;
4893 use crate::{MemtableFactory, Options};
4894
4895 #[test]
4896 fn test_enable_statistics() {
4897 let mut opts = Options::default();
4898 opts.enable_statistics();
4899 opts.set_stats_dump_period_sec(60);
4900 assert!(opts.get_statistics().is_some());
4901
4902 let opts = Options::default();
4903 assert!(opts.get_statistics().is_none());
4904 }
4905
4906 #[test]
4907 fn test_set_memtable_factory() {
4908 let mut opts = Options::default();
4909 opts.set_memtable_factory(MemtableFactory::Vector);
4910 opts.set_memtable_factory(MemtableFactory::HashLinkList { bucket_count: 100 });
4911 opts.set_memtable_factory(MemtableFactory::HashSkipList {
4912 bucket_count: 100,
4913 height: 4,
4914 branching_factor: 4,
4915 });
4916 }
4917
4918 #[test]
4919 fn test_use_fsync() {
4920 let mut opts = Options::default();
4921 assert!(!opts.get_use_fsync());
4922 opts.set_use_fsync(true);
4923 assert!(opts.get_use_fsync());
4924 }
4925
4926 #[test]
4927 fn test_set_stats_persist_period_sec() {
4928 let mut opts = Options::default();
4929 opts.enable_statistics();
4930 opts.set_stats_persist_period_sec(5);
4931 assert!(opts.get_statistics().is_some());
4932
4933 let opts = Options::default();
4934 assert!(opts.get_statistics().is_none());
4935 }
4936
4937 #[test]
4938 fn test_set_write_buffer_manager() {
4939 let mut opts = Options::default();
4940 let lrucache = Cache::new_lru_cache(100);
4941 let write_buffer_manager =
4942 WriteBufferManager::new_write_buffer_manager_with_cache(100, false, lrucache);
4943 assert_eq!(write_buffer_manager.get_buffer_size(), 100);
4944 assert_eq!(write_buffer_manager.get_usage(), 0);
4945 assert!(write_buffer_manager.enabled());
4946
4947 opts.set_write_buffer_manager(&write_buffer_manager);
4948 drop(opts);
4949
4950 // WriteBufferManager outlives options
4951 assert!(write_buffer_manager.enabled());
4952 }
4953}