rust_rocksdb/db_options.rs
1// Copyright 2020 Tyler Neely
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::panic::{AssertUnwindSafe, catch_unwind};
16use std::path::Path;
17use std::ptr::{NonNull, null_mut};
18use std::slice;
19use std::sync::Arc;
20
21use libc::{self, c_char, c_double, c_int, c_uchar, c_uint, c_void, size_t};
22
23use crate::cache::Cache;
24use crate::column_family::ColumnFamilyTtl;
25use crate::event_listener::{EventListener, new_event_listener};
26use crate::ffi_util::from_cstr_and_free;
27use crate::sst_file_manager::SstFileManager;
28use crate::statistics::{Histogram, HistogramData, StatsLevel};
29use crate::write_buffer_manager::WriteBufferManager;
30use crate::{
31 ColumnFamilyDescriptor, Error, SnapshotWithThreadMode,
32 compaction_filter::{self, CompactionFilterCallback, CompactionFilterFn},
33 compaction_filter_factory::{self, CompactionFilterFactory},
34 comparator::{
35 ComparatorCallback, ComparatorWithTsCallback, CompareFn, CompareTsFn, CompareWithoutTsFn,
36 },
37 db::DBAccess,
38 env::Env,
39 ffi,
40 ffi_util::{CStrLike, to_cpath},
41 merge_operator::{
42 self, MergeFn, MergeOperatorCallback, full_merge_callback, partial_merge_callback,
43 },
44 slice_transform::SliceTransform,
45 statistics::Ticker,
46};
47
48// must be Send and Sync because it will be called by RocksDB from different threads
49type LogCallbackFn = dyn Fn(LogLevel, &str) + 'static + Send + Sync;
50
51/// Type for log callbacks used by [`Options::set_info_logger`]. Use Box to pass a thin pointer to
52/// the C callback.
53type LoggerCallback = Box<dyn Fn(LogLevel, &str) + Sync + Send>;
54
55// Holds a log callback to ensure it outlives any Options and DBs that use it.
56struct LogCallback {
57 callback: Box<LogCallbackFn>,
58}
59
60/// Options that must outlive the DB, and may be shared between DBs. This is cloned and stored
61/// with every DB that is created from the options.
62#[derive(Default)]
63pub(crate) struct OptionsMustOutliveDB {
64 env: Option<Env>,
65 row_cache: Option<Cache>,
66 blob_cache: Option<Cache>,
67 block_based: Option<BlockBasedOptionsMustOutliveDB>,
68 write_buffer_manager: Option<WriteBufferManager>,
69 sst_file_manager: Option<SstFileManager>,
70 log_callback: Option<Arc<LogCallback>>,
71 comparator: Option<Arc<OwnedComparator>>,
72 compaction_filter: Option<Arc<OwnedCompactionFilter>>,
73 logger_callback: Option<Arc<LoggerCallback>>,
74}
75
76impl OptionsMustOutliveDB {
77 pub(crate) fn clone(&self) -> Self {
78 Self {
79 env: self.env.clone(),
80 row_cache: self.row_cache.clone(),
81 blob_cache: self.blob_cache.clone(),
82 block_based: self
83 .block_based
84 .as_ref()
85 .map(BlockBasedOptionsMustOutliveDB::clone),
86 write_buffer_manager: self.write_buffer_manager.clone(),
87 sst_file_manager: self.sst_file_manager.clone(),
88 log_callback: self.log_callback.clone(),
89 comparator: self.comparator.clone(),
90 compaction_filter: self.compaction_filter.clone(),
91 logger_callback: self.logger_callback.clone(),
92 }
93 }
94}
95
96/// Stores a `rocksdb_comparator_t` and destroys it when dropped.
97///
98/// This has an unsafe implementation of Send and Sync because it wraps a RocksDB pointer that
99/// is safe to share between threads.
100struct OwnedComparator {
101 inner: NonNull<ffi::rocksdb_comparator_t>,
102}
103
104impl OwnedComparator {
105 fn new(inner: NonNull<ffi::rocksdb_comparator_t>) -> Self {
106 Self { inner }
107 }
108}
109
110impl Drop for OwnedComparator {
111 fn drop(&mut self) {
112 unsafe {
113 ffi::rocksdb_comparator_destroy(self.inner.as_ptr());
114 }
115 }
116}
117
118/// Stores a `rocksdb_compactionfilter_t` and destroys it when dropped.
119///
120/// This has an unsafe implementation of Send and Sync because it wraps a RocksDB pointer that
121/// is safe to share between threads.
122struct OwnedCompactionFilter {
123 inner: NonNull<ffi::rocksdb_compactionfilter_t>,
124}
125
126impl OwnedCompactionFilter {
127 fn new(inner: NonNull<ffi::rocksdb_compactionfilter_t>) -> Self {
128 Self { inner }
129 }
130}
131
132impl Drop for OwnedCompactionFilter {
133 fn drop(&mut self) {
134 unsafe {
135 ffi::rocksdb_compactionfilter_destroy(self.inner.as_ptr());
136 }
137 }
138}
139
140#[derive(Default)]
141struct BlockBasedOptionsMustOutliveDB {
142 block_cache: Option<Cache>,
143}
144
145impl BlockBasedOptionsMustOutliveDB {
146 fn clone(&self) -> Self {
147 Self {
148 block_cache: self.block_cache.clone(),
149 }
150 }
151}
152
153/// Database-wide options around performance and behavior.
154///
155/// Please read the official tuning [guide](https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide)
156/// and most importantly, measure performance under realistic workloads with realistic hardware.
157///
158/// # Examples
159///
160/// ```
161/// use rust_rocksdb::{Options, DB};
162/// use rust_rocksdb::DBCompactionStyle;
163///
164/// fn badly_tuned_for_somebody_elses_disk() -> DB {
165/// let path = "path/for/rocksdb/storageX";
166/// let mut opts = Options::default();
167/// opts.create_if_missing(true);
168/// opts.set_max_open_files(10000);
169/// opts.set_use_fsync(false);
170/// opts.set_bytes_per_sync(8388608);
171/// opts.optimize_for_point_lookup(1024);
172/// opts.set_table_cache_num_shard_bits(6);
173/// opts.set_max_write_buffer_number(32);
174/// opts.set_write_buffer_size(536870912);
175/// opts.set_target_file_size_base(1073741824);
176/// opts.set_min_write_buffer_number_to_merge(4);
177/// opts.set_level_zero_stop_writes_trigger(2000);
178/// opts.set_level_zero_slowdown_writes_trigger(0);
179/// opts.set_compaction_style(DBCompactionStyle::Universal);
180/// opts.set_disable_auto_compactions(true);
181///
182/// DB::open(&opts, path).unwrap()
183/// }
184/// ```
185pub struct Options {
186 pub(crate) inner: *mut ffi::rocksdb_options_t,
187 pub(crate) outlive: OptionsMustOutliveDB,
188}
189
190/// Optionally disable WAL or sync for this write.
191///
192/// # Examples
193///
194/// Making an unsafe write of a batch:
195///
196/// ```
197/// use rust_rocksdb::{DB, Options, WriteBatch, WriteOptions};
198///
199/// let tempdir = tempfile::Builder::new()
200/// .prefix("_path_for_rocksdb_storageY1")
201/// .tempdir()
202/// .expect("Failed to create temporary path for the _path_for_rocksdb_storageY1");
203/// let path = tempdir.path();
204/// {
205/// let db = DB::open_default(path).unwrap();
206/// let mut batch = WriteBatch::default();
207/// batch.put(b"my key", b"my value");
208/// batch.put(b"key2", b"value2");
209/// batch.put(b"key3", b"value3");
210///
211/// let mut write_options = WriteOptions::default();
212/// write_options.set_sync(false);
213/// write_options.disable_wal(true);
214///
215/// db.write_opt(&batch, &write_options);
216/// }
217/// let _ = DB::destroy(&Options::default(), path);
218/// ```
219pub struct WriteOptions {
220 pub(crate) inner: *mut ffi::rocksdb_writeoptions_t,
221}
222
223pub struct LruCacheOptions {
224 pub(crate) inner: *mut ffi::rocksdb_lru_cache_options_t,
225}
226
227/// Optionally wait for the memtable flush to be performed.
228///
229/// # Examples
230///
231/// Manually flushing the memtable:
232///
233/// ```
234/// use rust_rocksdb::{DB, Options, FlushOptions};
235///
236/// let tempdir = tempfile::Builder::new()
237/// .prefix("_path_for_rocksdb_storageY2")
238/// .tempdir()
239/// .expect("Failed to create temporary path for the _path_for_rocksdb_storageY2");
240/// let path = tempdir.path();
241/// {
242/// let db = DB::open_default(path).unwrap();
243///
244/// let mut flush_options = FlushOptions::default();
245/// flush_options.set_wait(true);
246///
247/// db.flush_opt(&flush_options);
248/// }
249/// let _ = DB::destroy(&Options::default(), path);
250/// ```
251pub struct FlushOptions {
252 pub(crate) inner: *mut ffi::rocksdb_flushoptions_t,
253}
254
255/// For configuring block-based file storage.
256pub struct BlockBasedOptions {
257 pub(crate) inner: *mut ffi::rocksdb_block_based_table_options_t,
258 outlive: BlockBasedOptionsMustOutliveDB,
259}
260
261pub struct ReadOptions {
262 pub(crate) inner: *mut ffi::rocksdb_readoptions_t,
263 // The `ReadOptions` owns a copy of the timestamp and iteration bounds.
264 // This is necessary to ensure the pointers we pass over the FFI live as
265 // long as the `ReadOptions`. This way, when performing the read operation,
266 // the pointers are guaranteed to be valid.
267 timestamp: Option<Vec<u8>>,
268 iter_start_ts: Option<Vec<u8>>,
269 iterate_upper_bound: Option<Vec<u8>>,
270 iterate_lower_bound: Option<Vec<u8>>,
271}
272
273/// Configuration of cuckoo-based storage.
274pub struct CuckooTableOptions {
275 pub(crate) inner: *mut ffi::rocksdb_cuckoo_table_options_t,
276}
277
278/// For configuring external files ingestion.
279///
280/// # Examples
281///
282/// Move files instead of copying them:
283///
284/// ```
285/// use rust_rocksdb::{DB, IngestExternalFileOptions, SstFileWriter, Options};
286///
287/// let writer_opts = Options::default();
288/// let mut writer = SstFileWriter::create(&writer_opts);
289/// let tempdir = tempfile::Builder::new()
290/// .tempdir()
291/// .expect("Failed to create temporary folder for the _path_for_sst_file");
292/// let path1 = tempdir.path().join("_path_for_sst_file");
293/// writer.open(path1.clone()).unwrap();
294/// writer.put(b"k1", b"v1").unwrap();
295/// writer.finish().unwrap();
296///
297/// let tempdir2 = tempfile::Builder::new()
298/// .prefix("_path_for_rocksdb_storageY3")
299/// .tempdir()
300/// .expect("Failed to create temporary path for the _path_for_rocksdb_storageY3");
301/// let path2 = tempdir2.path();
302/// {
303/// let db = DB::open_default(&path2).unwrap();
304/// let mut ingest_opts = IngestExternalFileOptions::default();
305/// ingest_opts.set_move_files(true);
306/// db.ingest_external_file_opts(&ingest_opts, vec![path1]).unwrap();
307/// }
308/// let _ = DB::destroy(&Options::default(), path2);
309/// ```
310pub struct IngestExternalFileOptions {
311 pub(crate) inner: *mut ffi::rocksdb_ingestexternalfileoptions_t,
312}
313
314// Safety note: auto-implementing Send on most db-related types is prevented by the inner FFI
315// pointer. In most cases, however, this pointer is Send-safe because it is never aliased and
316// rocksdb internally does not rely on thread-local information for its user-exposed types.
317unsafe impl Send for Options {}
318unsafe impl Send for WriteOptions {}
319unsafe impl Send for LruCacheOptions {}
320unsafe impl Send for FlushOptions {}
321unsafe impl Send for BlockBasedOptions {}
322unsafe impl Send for CuckooTableOptions {}
323unsafe impl Send for ReadOptions {}
324unsafe impl Send for IngestExternalFileOptions {}
325unsafe impl Send for CompactOptions {}
326unsafe impl Send for ImportColumnFamilyOptions {}
327unsafe impl Send for OwnedComparator {}
328unsafe impl Send for OwnedCompactionFilter {}
329
330// Sync is similarly safe for many types because they do not expose interior mutability, and their
331// use within the rocksdb library is generally behind a const reference
332unsafe impl Sync for Options {}
333unsafe impl Sync for WriteOptions {}
334unsafe impl Sync for LruCacheOptions {}
335unsafe impl Sync for FlushOptions {}
336unsafe impl Sync for BlockBasedOptions {}
337unsafe impl Sync for CuckooTableOptions {}
338unsafe impl Sync for ReadOptions {}
339unsafe impl Sync for IngestExternalFileOptions {}
340unsafe impl Sync for CompactOptions {}
341unsafe impl Sync for ImportColumnFamilyOptions {}
342unsafe impl Sync for OwnedComparator {}
343unsafe impl Sync for OwnedCompactionFilter {}
344
345impl Drop for Options {
346 fn drop(&mut self) {
347 unsafe {
348 ffi::rocksdb_options_destroy(self.inner);
349 }
350 }
351}
352
353impl Clone for Options {
354 fn clone(&self) -> Self {
355 let inner = unsafe { ffi::rocksdb_options_create_copy(self.inner) };
356 assert!(!inner.is_null(), "Could not copy RocksDB options");
357
358 Self {
359 inner,
360 outlive: self.outlive.clone(),
361 }
362 }
363}
364
365impl Drop for BlockBasedOptions {
366 fn drop(&mut self) {
367 unsafe {
368 ffi::rocksdb_block_based_options_destroy(self.inner);
369 }
370 }
371}
372
373impl Drop for CuckooTableOptions {
374 fn drop(&mut self) {
375 unsafe {
376 ffi::rocksdb_cuckoo_options_destroy(self.inner);
377 }
378 }
379}
380
381impl Drop for FlushOptions {
382 fn drop(&mut self) {
383 unsafe {
384 ffi::rocksdb_flushoptions_destroy(self.inner);
385 }
386 }
387}
388
389impl Drop for WriteOptions {
390 fn drop(&mut self) {
391 unsafe {
392 ffi::rocksdb_writeoptions_destroy(self.inner);
393 }
394 }
395}
396
397impl Drop for LruCacheOptions {
398 fn drop(&mut self) {
399 unsafe {
400 ffi::rocksdb_lru_cache_options_destroy(self.inner);
401 }
402 }
403}
404
405impl Drop for ReadOptions {
406 fn drop(&mut self) {
407 unsafe {
408 ffi::rocksdb_readoptions_destroy(self.inner);
409 }
410 }
411}
412
413impl Drop for IngestExternalFileOptions {
414 fn drop(&mut self) {
415 unsafe {
416 ffi::rocksdb_ingestexternalfileoptions_destroy(self.inner);
417 }
418 }
419}
420
421impl BlockBasedOptions {
422 /// Approximate size of user data packed per block. Note that the
423 /// block size specified here corresponds to uncompressed data. The
424 /// actual size of the unit read from disk may be smaller if
425 /// compression is enabled. This parameter can be changed dynamically.
426 pub fn set_block_size(&mut self, size: usize) {
427 unsafe {
428 ffi::rocksdb_block_based_options_set_block_size(self.inner, size);
429 }
430 }
431
432 /// Block size for partitioned metadata. Currently applied to indexes when
433 /// kTwoLevelIndexSearch is used and to filters when partition_filters is used.
434 /// Note: Since in the current implementation the filters and index partitions
435 /// are aligned, an index/filter block is created when either index or filter
436 /// block size reaches the specified limit.
437 ///
438 /// Note: this limit is currently applied to only index blocks; a filter
439 /// partition is cut right after an index block is cut.
440 pub fn set_metadata_block_size(&mut self, size: usize) {
441 unsafe {
442 ffi::rocksdb_block_based_options_set_metadata_block_size(self.inner, size as u64);
443 }
444 }
445
446 /// Note: currently this option requires kTwoLevelIndexSearch to be set as
447 /// well.
448 ///
449 /// Use partitioned full filters for each SST file. This option is
450 /// incompatible with block-based filters.
451 pub fn set_partition_filters(&mut self, size: bool) {
452 unsafe {
453 ffi::rocksdb_block_based_options_set_partition_filters(self.inner, c_uchar::from(size));
454 }
455 }
456
457 /// Sets global cache for blocks (user data is stored in a set of blocks, and
458 /// a block is the unit of reading from disk).
459 ///
460 /// If set, use the specified cache for blocks.
461 /// By default, rocksdb will automatically create and use an 8MB internal cache.
462 pub fn set_block_cache(&mut self, cache: &Cache) {
463 unsafe {
464 ffi::rocksdb_block_based_options_set_block_cache(self.inner, cache.0.inner.as_ptr());
465 }
466 self.outlive.block_cache = Some(cache.clone());
467 }
468
469 /// Disable block cache
470 pub fn disable_cache(&mut self) {
471 unsafe {
472 ffi::rocksdb_block_based_options_set_no_block_cache(self.inner, c_uchar::from(true));
473 }
474 }
475
476 /// Sets a [Bloom filter](https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter)
477 /// policy to reduce disk reads.
478 ///
479 /// # Examples
480 ///
481 /// ```
482 /// use rust_rocksdb::BlockBasedOptions;
483 ///
484 /// let mut opts = BlockBasedOptions::default();
485 /// opts.set_bloom_filter(10.0, true);
486 /// ```
487 pub fn set_bloom_filter(&mut self, bits_per_key: c_double, block_based: bool) {
488 unsafe {
489 let bloom = if block_based {
490 ffi::rocksdb_filterpolicy_create_bloom(bits_per_key as _)
491 } else {
492 ffi::rocksdb_filterpolicy_create_bloom_full(bits_per_key as _)
493 };
494
495 ffi::rocksdb_block_based_options_set_filter_policy(self.inner, bloom);
496 }
497 }
498
499 /// Sets a [Ribbon filter](http://rocksdb.org/blog/2021/12/29/ribbon-filter.html)
500 /// policy to reduce disk reads.
501 ///
502 /// Ribbon filters use less memory in exchange for slightly more CPU usage
503 /// compared to an equivalent bloom filter.
504 ///
505 /// # Examples
506 ///
507 /// ```
508 /// use rust_rocksdb::BlockBasedOptions;
509 ///
510 /// let mut opts = BlockBasedOptions::default();
511 /// opts.set_ribbon_filter(10.0);
512 /// ```
513 pub fn set_ribbon_filter(&mut self, bloom_equivalent_bits_per_key: c_double) {
514 unsafe {
515 let ribbon = ffi::rocksdb_filterpolicy_create_ribbon(bloom_equivalent_bits_per_key);
516 ffi::rocksdb_block_based_options_set_filter_policy(self.inner, ribbon);
517 }
518 }
519
520 /// Sets a hybrid [Ribbon filter](http://rocksdb.org/blog/2021/12/29/ribbon-filter.html)
521 /// policy to reduce disk reads.
522 ///
523 /// Uses Bloom filters before the given level, and Ribbon filters for all
524 /// other levels. This combines the memory savings from Ribbon filters
525 /// with the lower CPU usage of Bloom filters.
526 ///
527 /// # Examples
528 ///
529 /// ```
530 /// use rust_rocksdb::BlockBasedOptions;
531 ///
532 /// let mut opts = BlockBasedOptions::default();
533 /// opts.set_hybrid_ribbon_filter(10.0, 2);
534 /// ```
535 pub fn set_hybrid_ribbon_filter(
536 &mut self,
537 bloom_equivalent_bits_per_key: c_double,
538 bloom_before_level: c_int,
539 ) {
540 unsafe {
541 let ribbon = ffi::rocksdb_filterpolicy_create_ribbon_hybrid(
542 bloom_equivalent_bits_per_key,
543 bloom_before_level,
544 );
545 ffi::rocksdb_block_based_options_set_filter_policy(self.inner, ribbon);
546 }
547 }
548
549 /// Whether to put index/filter blocks in the block cache. When false,
550 /// each "table reader" object will pre-load index/filter blocks during
551 /// table initialization. Index and filter partition blocks always use
552 /// block cache regardless of this option.
553 ///
554 /// Default: false
555 pub fn set_cache_index_and_filter_blocks(&mut self, v: bool) {
556 unsafe {
557 ffi::rocksdb_block_based_options_set_cache_index_and_filter_blocks(
558 self.inner,
559 c_uchar::from(v),
560 );
561 }
562 }
563
564 /// If `cache_index_and_filter_blocks` is enabled, cache index and filter
565 /// blocks with high priority. Depending on the block cache implementation,
566 /// index, filter, and other metadata blocks may be less likely to be
567 /// evicted than data blocks when this is set to true.
568 ///
569 /// Default: true.
570 pub fn set_cache_index_and_filter_blocks_with_high_priority(&mut self, v: bool) {
571 unsafe {
572 ffi::rocksdb_block_based_options_set_cache_index_and_filter_blocks_with_high_priority(
573 self.inner,
574 c_uchar::from(v),
575 );
576 }
577 }
578
579 /// Defines the index type to be used for SS-table lookups.
580 ///
581 /// # Examples
582 ///
583 /// ```
584 /// use rust_rocksdb::{BlockBasedOptions, BlockBasedIndexType, Options};
585 ///
586 /// let mut opts = Options::default();
587 /// let mut block_opts = BlockBasedOptions::default();
588 /// block_opts.set_index_type(BlockBasedIndexType::HashSearch);
589 /// ```
590 pub fn set_index_type(&mut self, index_type: BlockBasedIndexType) {
591 let index = index_type as i32;
592 unsafe {
593 ffi::rocksdb_block_based_options_set_index_type(self.inner, index);
594 }
595 }
596
597 /// If cache_index_and_filter_blocks is true and the below is true, then
598 /// filter and index blocks are stored in the cache, but a reference is
599 /// held in the "table reader" object so the blocks are pinned and only
600 /// evicted from cache when the table reader is freed.
601 ///
602 /// Default: false.
603 pub fn set_pin_l0_filter_and_index_blocks_in_cache(&mut self, v: bool) {
604 unsafe {
605 ffi::rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache(
606 self.inner,
607 c_uchar::from(v),
608 );
609 }
610 }
611
612 /// If cache_index_and_filter_blocks is true and the below is true, then
613 /// the top-level index of partitioned filter and index blocks are stored in
614 /// the cache, but a reference is held in the "table reader" object so the
615 /// blocks are pinned and only evicted from cache when the table reader is
616 /// freed. This is not limited to l0 in LSM tree.
617 ///
618 /// Default: true.
619 pub fn set_pin_top_level_index_and_filter(&mut self, v: bool) {
620 unsafe {
621 ffi::rocksdb_block_based_options_set_pin_top_level_index_and_filter(
622 self.inner,
623 c_uchar::from(v),
624 );
625 }
626 }
627
628 /// Format version, reserved for backward compatibility.
629 ///
630 /// See full [list](https://github.com/facebook/rocksdb/blob/v8.6.7/include/rocksdb/table.h#L493-L521)
631 /// of the supported versions.
632 ///
633 /// Default: 6.
634 pub fn set_format_version(&mut self, version: i32) {
635 unsafe {
636 ffi::rocksdb_block_based_options_set_format_version(self.inner, version);
637 }
638 }
639
640 /// Use delta encoding to compress keys in blocks.
641 /// ReadOptions::pin_data requires this option to be disabled.
642 ///
643 /// Default: true
644 pub fn set_use_delta_encoding(&mut self, enable: bool) {
645 unsafe {
646 ffi::rocksdb_block_based_options_set_use_delta_encoding(
647 self.inner,
648 c_uchar::from(enable),
649 );
650 }
651 }
652
653 /// Number of keys between restart points for delta encoding of keys.
654 /// This parameter can be changed dynamically. Most clients should
655 /// leave this parameter alone. The minimum value allowed is 1. Any smaller
656 /// value will be silently overwritten with 1.
657 ///
658 /// Default: 16.
659 pub fn set_block_restart_interval(&mut self, interval: i32) {
660 unsafe {
661 ffi::rocksdb_block_based_options_set_block_restart_interval(self.inner, interval);
662 }
663 }
664
665 /// Same as block_restart_interval but used for the index block.
666 /// If you don't plan to run RocksDB before version 5.16 and you are
667 /// using `index_block_restart_interval` > 1, you should
668 /// probably set the `format_version` to >= 4 as it would reduce the index size.
669 ///
670 /// Default: 1.
671 pub fn set_index_block_restart_interval(&mut self, interval: i32) {
672 unsafe {
673 ffi::rocksdb_block_based_options_set_index_block_restart_interval(self.inner, interval);
674 }
675 }
676
677 /// Set the data block index type for point lookups:
678 /// `DataBlockIndexType::BinarySearch` to use binary search within the data block.
679 /// `DataBlockIndexType::BinaryAndHash` to use the data block hash index in combination with
680 /// the normal binary search.
681 ///
682 /// The hash table utilization ratio is adjustable using [`set_data_block_hash_ratio`](#method.set_data_block_hash_ratio), which is
683 /// valid only when using `DataBlockIndexType::BinaryAndHash`.
684 ///
685 /// Default: `BinarySearch`
686 /// # Examples
687 ///
688 /// ```
689 /// use rust_rocksdb::{BlockBasedOptions, DataBlockIndexType, Options};
690 ///
691 /// let mut opts = Options::default();
692 /// let mut block_opts = BlockBasedOptions::default();
693 /// block_opts.set_data_block_index_type(DataBlockIndexType::BinaryAndHash);
694 /// block_opts.set_data_block_hash_ratio(0.85);
695 /// ```
696 pub fn set_data_block_index_type(&mut self, index_type: DataBlockIndexType) {
697 let index_t = index_type as i32;
698 unsafe {
699 ffi::rocksdb_block_based_options_set_data_block_index_type(self.inner, index_t);
700 }
701 }
702
703 /// Set the data block hash index utilization ratio.
704 ///
705 /// The smaller the utilization ratio, the less hash collisions happen, and so reduce the risk for a
706 /// point lookup to fall back to binary search due to the collisions. A small ratio means faster
707 /// lookup at the price of more space overhead.
708 ///
709 /// Default: 0.75
710 pub fn set_data_block_hash_ratio(&mut self, ratio: f64) {
711 unsafe {
712 ffi::rocksdb_block_based_options_set_data_block_hash_ratio(self.inner, ratio);
713 }
714 }
715
716 /// If false, place only prefixes in the filter, not whole keys.
717 ///
718 /// Defaults to true.
719 pub fn set_whole_key_filtering(&mut self, v: bool) {
720 unsafe {
721 ffi::rocksdb_block_based_options_set_whole_key_filtering(self.inner, c_uchar::from(v));
722 }
723 }
724
725 /// Use the specified checksum type.
726 /// Newly created table files will be protected with this checksum type.
727 /// Old table files will still be readable, even though they have different checksum type.
728 pub fn set_checksum_type(&mut self, checksum_type: ChecksumType) {
729 unsafe {
730 ffi::rocksdb_block_based_options_set_checksum(self.inner, checksum_type as c_char);
731 }
732 }
733
734 /// If true, generate Bloom/Ribbon filters that minimize memory internal
735 /// fragmentation.
736 /// See official [wiki](
737 /// https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter#reducing-internal-fragmentation)
738 /// for more information.
739 ///
740 /// Default: true.
741 /// # Examples
742 ///
743 /// ```
744 /// use rust_rocksdb::BlockBasedOptions;
745 ///
746 /// let mut opts = BlockBasedOptions::default();
747 /// opts.set_bloom_filter(10.0, true);
748 /// opts.set_optimize_filters_for_memory(true);
749 /// ```
750 pub fn set_optimize_filters_for_memory(&mut self, v: bool) {
751 unsafe {
752 ffi::rocksdb_block_based_options_set_optimize_filters_for_memory(
753 self.inner,
754 c_uchar::from(v),
755 );
756 }
757 }
758
759 /// The tier of block-based tables whose top-level index into metadata
760 /// partitions will be pinned. Currently indexes and filters may be
761 /// partitioned.
762 ///
763 /// Note `cache_index_and_filter_blocks` must be true for this option to have
764 /// any effect. Otherwise any top-level index into metadata partitions would be
765 /// held in table reader memory, outside the block cache.
766 ///
767 /// Default: `BlockBasedPinningTier:Fallback`
768 ///
769 /// # Example
770 ///
771 /// ```
772 /// use rust_rocksdb::{BlockBasedOptions, BlockBasedPinningTier, Options};
773 ///
774 /// let mut opts = Options::default();
775 /// let mut block_opts = BlockBasedOptions::default();
776 /// block_opts.set_top_level_index_pinning_tier(BlockBasedPinningTier::FlushAndSimilar);
777 /// ```
778 pub fn set_top_level_index_pinning_tier(&mut self, tier: BlockBasedPinningTier) {
779 unsafe {
780 ffi::rocksdb_block_based_options_set_top_level_index_pinning_tier(
781 self.inner,
782 tier as c_int,
783 );
784 }
785 }
786
787 /// The tier of block-based tables whose metadata partitions will be pinned.
788 /// Currently indexes and filters may be partitioned.
789 ///
790 /// Default: `BlockBasedPinningTier:Fallback`
791 ///
792 /// # Example
793 ///
794 /// ```
795 /// use rust_rocksdb::{BlockBasedOptions, BlockBasedPinningTier, Options};
796 ///
797 /// let mut opts = Options::default();
798 /// let mut block_opts = BlockBasedOptions::default();
799 /// block_opts.set_partition_pinning_tier(BlockBasedPinningTier::FlushAndSimilar);
800 /// ```
801 pub fn set_partition_pinning_tier(&mut self, tier: BlockBasedPinningTier) {
802 unsafe {
803 ffi::rocksdb_block_based_options_set_partition_pinning_tier(self.inner, tier as c_int);
804 }
805 }
806
807 /// The tier of block-based tables whose unpartitioned metadata blocks will be
808 /// pinned.
809 ///
810 /// Note `cache_index_and_filter_blocks` must be true for this option to have
811 /// any effect. Otherwise the unpartitioned meta-blocks would be held in table
812 /// reader memory, outside the block cache.
813 ///
814 /// Default: `BlockBasedPinningTier:Fallback`
815 ///
816 /// # Example
817 ///
818 /// ```
819 /// use rust_rocksdb::{BlockBasedOptions, BlockBasedPinningTier, Options};
820 ///
821 /// let mut opts = Options::default();
822 /// let mut block_opts = BlockBasedOptions::default();
823 /// block_opts.set_unpartitioned_pinning_tier(BlockBasedPinningTier::FlushAndSimilar);
824 /// ```
825 pub fn set_unpartitioned_pinning_tier(&mut self, tier: BlockBasedPinningTier) {
826 unsafe {
827 ffi::rocksdb_block_based_options_set_unpartitioned_pinning_tier(
828 self.inner,
829 tier as c_int,
830 );
831 }
832 }
833}
834
835impl Default for BlockBasedOptions {
836 fn default() -> Self {
837 let block_opts = unsafe { ffi::rocksdb_block_based_options_create() };
838 assert!(
839 !block_opts.is_null(),
840 "Could not create RocksDB block based options"
841 );
842
843 Self {
844 inner: block_opts,
845 outlive: BlockBasedOptionsMustOutliveDB::default(),
846 }
847 }
848}
849
850impl CuckooTableOptions {
851 /// Determines the utilization of hash tables. Smaller values
852 /// result in larger hash tables with fewer collisions.
853 /// Default: 0.9
854 pub fn set_hash_ratio(&mut self, ratio: f64) {
855 unsafe {
856 ffi::rocksdb_cuckoo_options_set_hash_ratio(self.inner, ratio);
857 }
858 }
859
860 /// A property used by builder to determine the depth to go to
861 /// to search for a path to displace elements in case of
862 /// collision. See Builder.MakeSpaceForKey method. Higher
863 /// values result in more efficient hash tables with fewer
864 /// lookups but take more time to build.
865 /// Default: 100
866 pub fn set_max_search_depth(&mut self, depth: u32) {
867 unsafe {
868 ffi::rocksdb_cuckoo_options_set_max_search_depth(self.inner, depth);
869 }
870 }
871
872 /// In case of collision while inserting, the builder
873 /// attempts to insert in the next cuckoo_block_size
874 /// locations before skipping over to the next Cuckoo hash
875 /// function. This makes lookups more cache friendly in case
876 /// of collisions.
877 /// Default: 5
878 pub fn set_cuckoo_block_size(&mut self, size: u32) {
879 unsafe {
880 ffi::rocksdb_cuckoo_options_set_cuckoo_block_size(self.inner, size);
881 }
882 }
883
884 /// If this option is enabled, user key is treated as uint64_t and its value
885 /// is used as hash value directly. This option changes builder's behavior.
886 /// Reader ignore this option and behave according to what specified in
887 /// table property.
888 /// Default: false
889 pub fn set_identity_as_first_hash(&mut self, flag: bool) {
890 unsafe {
891 ffi::rocksdb_cuckoo_options_set_identity_as_first_hash(self.inner, c_uchar::from(flag));
892 }
893 }
894
895 /// If this option is set to true, module is used during hash calculation.
896 /// This often yields better space efficiency at the cost of performance.
897 /// If this option is set to false, # of entries in table is constrained to
898 /// be power of two, and bit and is used to calculate hash, which is faster in general.
899 /// Default: true
900 pub fn set_use_module_hash(&mut self, flag: bool) {
901 unsafe {
902 ffi::rocksdb_cuckoo_options_set_use_module_hash(self.inner, c_uchar::from(flag));
903 }
904 }
905}
906
907impl Default for CuckooTableOptions {
908 fn default() -> Self {
909 let opts = unsafe { ffi::rocksdb_cuckoo_options_create() };
910 assert!(!opts.is_null(), "Could not create RocksDB cuckoo options");
911
912 Self { inner: opts }
913 }
914}
915
916// Verbosity of the LOG.
917#[derive(Debug, Copy, Clone, PartialEq, Eq)]
918#[repr(i32)]
919pub enum LogLevel {
920 Debug = 0,
921 Info,
922 Warn,
923 Error,
924 Fatal,
925 Header,
926}
927
928impl LogLevel {
929 pub(crate) fn try_from_raw(raw: i32) -> Option<Self> {
930 match raw {
931 n if n == LogLevel::Debug as i32 => Some(LogLevel::Debug),
932 n if n == LogLevel::Info as i32 => Some(LogLevel::Info),
933 n if n == LogLevel::Warn as i32 => Some(LogLevel::Warn),
934 n if n == LogLevel::Error as i32 => Some(LogLevel::Error),
935 n if n == LogLevel::Fatal as i32 => Some(LogLevel::Fatal),
936 n if n == LogLevel::Header as i32 => Some(LogLevel::Header),
937 _ => None,
938 }
939 }
940}
941
942impl Options {
943 /// Constructs the DBOptions and ColumnFamilyDescriptors by loading the
944 /// latest RocksDB options file stored in the specified rocksdb database.
945 ///
946 /// *IMPORTANT*:
947 /// ROCKSDB DOES NOT STORE cf ttl in the options file. If you have set it via
948 /// [`ColumnFamilyDescriptor::new_with_ttl`] then you need to set it again after loading the options file.
949 /// Tll will be set to [`ColumnFamilyTtl::Disabled`] for all column families for your safety.
950 pub fn load_latest<P: AsRef<Path>>(
951 path: P,
952 env: Env,
953 ignore_unknown_options: bool,
954 cache: Cache,
955 ) -> Result<(Options, Vec<ColumnFamilyDescriptor>), Error> {
956 let path = to_cpath(path)?;
957 let mut db_options: *mut ffi::rocksdb_options_t = null_mut();
958 let mut num_column_families: usize = 0;
959 let mut column_family_names: *mut *mut c_char = null_mut();
960 let mut column_family_options: *mut *mut ffi::rocksdb_options_t = null_mut();
961 unsafe {
962 ffi_try!(ffi::rocksdb_load_latest_options(
963 path.as_ptr(),
964 env.0.inner,
965 ignore_unknown_options,
966 cache.0.inner.as_ptr(),
967 &raw mut db_options,
968 &raw mut num_column_families,
969 &raw mut column_family_names,
970 &raw mut column_family_options,
971 ));
972 }
973 let options = Options {
974 inner: db_options,
975 outlive: OptionsMustOutliveDB::default(),
976 };
977 // read_column_descriptors frees column_family_names and the column_family_options array.
978 // We can't call rocksdb_load_latest_options_destroy because it also frees options, and
979 // the individual `column_family_options` pointers. We want to return them.
980 let column_families = unsafe {
981 Options::read_column_descriptors(
982 num_column_families,
983 column_family_names,
984 column_family_options,
985 )
986 };
987 Ok((options, column_families))
988 }
989
990 /// Constructs a new `DBOptions` from `self` and a string `opts_str` with the syntax detailed in the blogpost
991 /// [Reading RocksDB options from a file](https://rocksdb.org/blog/2015/02/24/reading-rocksdb-options-from-a-file.html)
992 pub fn get_options_from_string<S: AsRef<str>>(
993 &mut self,
994 opts_str: S,
995 ) -> Result<Options, Error> {
996 // create the rocksdb_options_t and immediately wrap it so we don't forget to free it
997 let options = Options {
998 inner: unsafe { ffi::rocksdb_options_create() },
999 outlive: OptionsMustOutliveDB::default(),
1000 };
1001
1002 let opts_cstr = opts_str.as_ref().into_c_string().map_err(|e| {
1003 Error::new(format!(
1004 "options string must not contain NUL (0x00) bytes: {e}"
1005 ))
1006 })?;
1007 unsafe {
1008 ffi_try!(ffi::rocksdb_get_options_from_string(
1009 self.inner.cast_const(),
1010 opts_cstr.as_ptr(),
1011 options.inner,
1012 ));
1013 }
1014 Ok(options)
1015 }
1016
1017 /// Reads column descriptors from C pointers. This frees the `column_family_names` and
1018 /// `column_family_options` arrays, and the strings contained in `column_family_names`. It does
1019 /// *not* free the `rocksdb_options_t*` pointers contained in `column_family_options`.
1020 #[inline]
1021 unsafe fn read_column_descriptors(
1022 num_column_families: usize,
1023 column_family_names: *mut *mut c_char,
1024 column_family_options: *mut *mut ffi::rocksdb_options_t,
1025 ) -> Vec<ColumnFamilyDescriptor> {
1026 let column_family_names_iter = unsafe {
1027 slice::from_raw_parts(column_family_names, num_column_families)
1028 .iter()
1029 .map(|ptr| from_cstr_and_free(*ptr))
1030 };
1031 let column_family_options_iter = unsafe {
1032 slice::from_raw_parts(column_family_options, num_column_families)
1033 .iter()
1034 .map(|ptr| Options {
1035 inner: *ptr,
1036 outlive: OptionsMustOutliveDB::default(),
1037 })
1038 };
1039 let column_descriptors = column_family_names_iter
1040 .zip(column_family_options_iter)
1041 .map(|(name, options)| ColumnFamilyDescriptor {
1042 name,
1043 options,
1044 ttl: ColumnFamilyTtl::Disabled,
1045 })
1046 .collect::<Vec<_>>();
1047
1048 // free the arrays
1049 unsafe {
1050 // we freed each string in the column_family_names array using from_cstr_and_free
1051 ffi::rocksdb_free(column_family_names as *mut c_void);
1052 // we don't want to free the contents of this array because we return it
1053 ffi::rocksdb_free(column_family_options as *mut c_void);
1054 column_descriptors
1055 }
1056 }
1057
1058 /// By default, RocksDB uses only one background thread for flush and
1059 /// compaction. Calling this function will set it up such that total of
1060 /// `total_threads` is used. Good value for `total_threads` is the number of
1061 /// cores. You almost definitely want to call this function if your system is
1062 /// bottlenecked by RocksDB.
1063 ///
1064 /// # Examples
1065 ///
1066 /// ```
1067 /// use rust_rocksdb::Options;
1068 ///
1069 /// let mut opts = Options::default();
1070 /// opts.increase_parallelism(3);
1071 /// ```
1072 pub fn increase_parallelism(&mut self, parallelism: i32) {
1073 unsafe {
1074 ffi::rocksdb_options_increase_parallelism(self.inner, parallelism);
1075 }
1076 }
1077
1078 /// Optimize level style compaction.
1079 ///
1080 /// Default values for some parameters in `Options` are not optimized for heavy
1081 /// workloads and big datasets, which means you might observe write stalls under
1082 /// some conditions.
1083 ///
1084 /// This can be used as one of the starting points for tuning RocksDB options in
1085 /// such cases.
1086 ///
1087 /// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
1088 /// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
1089 /// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
1090 /// parameters were set before.
1091 ///
1092 /// It sets buffer sizes so that memory consumption would be constrained by
1093 /// `memtable_memory_budget`.
1094 pub fn optimize_level_style_compaction(&mut self, memtable_memory_budget: usize) {
1095 unsafe {
1096 ffi::rocksdb_options_optimize_level_style_compaction(
1097 self.inner,
1098 memtable_memory_budget as u64,
1099 );
1100 }
1101 }
1102
1103 /// Optimize universal style compaction.
1104 ///
1105 /// Default values for some parameters in `Options` are not optimized for heavy
1106 /// workloads and big datasets, which means you might observe write stalls under
1107 /// some conditions.
1108 ///
1109 /// This can be used as one of the starting points for tuning RocksDB options in
1110 /// such cases.
1111 ///
1112 /// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
1113 /// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
1114 /// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
1115 /// parameters were set before.
1116 ///
1117 /// It sets buffer sizes so that memory consumption would be constrained by
1118 /// `memtable_memory_budget`.
1119 pub fn optimize_universal_style_compaction(&mut self, memtable_memory_budget: usize) {
1120 unsafe {
1121 ffi::rocksdb_options_optimize_universal_style_compaction(
1122 self.inner,
1123 memtable_memory_budget as u64,
1124 );
1125 }
1126 }
1127
1128 /// If true, the database will be created if it is missing.
1129 ///
1130 /// Default: `false`
1131 ///
1132 /// # Examples
1133 ///
1134 /// ```
1135 /// use rust_rocksdb::Options;
1136 ///
1137 /// let mut opts = Options::default();
1138 /// opts.create_if_missing(true);
1139 /// ```
1140 pub fn create_if_missing(&mut self, create_if_missing: bool) {
1141 unsafe {
1142 ffi::rocksdb_options_set_create_if_missing(
1143 self.inner,
1144 c_uchar::from(create_if_missing),
1145 );
1146 }
1147 }
1148
1149 /// If true, any column families that didn't exist when opening the database
1150 /// will be created.
1151 ///
1152 /// Default: `false`
1153 ///
1154 /// # Examples
1155 ///
1156 /// ```
1157 /// use rust_rocksdb::Options;
1158 ///
1159 /// let mut opts = Options::default();
1160 /// opts.create_missing_column_families(true);
1161 /// ```
1162 pub fn create_missing_column_families(&mut self, create_missing_cfs: bool) {
1163 unsafe {
1164 ffi::rocksdb_options_set_create_missing_column_families(
1165 self.inner,
1166 c_uchar::from(create_missing_cfs),
1167 );
1168 }
1169 }
1170
1171 /// Specifies whether an error should be raised if the database already exists.
1172 ///
1173 /// Default: false
1174 pub fn set_error_if_exists(&mut self, enabled: bool) {
1175 unsafe {
1176 ffi::rocksdb_options_set_error_if_exists(self.inner, c_uchar::from(enabled));
1177 }
1178 }
1179
1180 /// Enable/disable paranoid checks.
1181 ///
1182 /// If true, the implementation will do aggressive checking of the
1183 /// data it is processing and will stop early if it detects any
1184 /// errors. This may have unforeseen ramifications: for example, a
1185 /// corruption of one DB entry may cause a large number of entries to
1186 /// become unreadable or for the entire DB to become unopenable.
1187 /// If any of the writes to the database fails (Put, Delete, Merge, Write),
1188 /// the database will switch to read-only mode and fail all other
1189 /// Write operations.
1190 ///
1191 /// Default: false
1192 pub fn set_paranoid_checks(&mut self, enabled: bool) {
1193 unsafe {
1194 ffi::rocksdb_options_set_paranoid_checks(self.inner, c_uchar::from(enabled));
1195 }
1196 }
1197
1198 /// A list of paths where SST files can be put into, with its target size.
1199 /// Newer data is placed into paths specified earlier in the vector while
1200 /// older data gradually moves to paths specified later in the vector.
1201 ///
1202 /// For example, you have a flash device with 10GB allocated for the DB,
1203 /// as well as a hard drive of 2TB, you should config it to be:
1204 /// [{"/flash_path", 10GB}, {"/hard_drive", 2TB}]
1205 ///
1206 /// The system will try to guarantee data under each path is close to but
1207 /// not larger than the target size. But current and future file sizes used
1208 /// by determining where to place a file are based on best-effort estimation,
1209 /// which means there is a chance that the actual size under the directory
1210 /// is slightly more than target size under some workloads. User should give
1211 /// some buffer room for those cases.
1212 ///
1213 /// If none of the paths has sufficient room to place a file, the file will
1214 /// be placed to the last path anyway, despite to the target size.
1215 ///
1216 /// Placing newer data to earlier paths is also best-efforts. User should
1217 /// expect user files to be placed in higher levels in some extreme cases.
1218 ///
1219 /// If left empty, only one path will be used, which is `path` passed when
1220 /// opening the DB.
1221 ///
1222 /// Default: empty
1223 pub fn set_db_paths(&mut self, paths: &[DBPath]) {
1224 let mut paths: Vec<_> = paths.iter().map(|path| path.inner.cast_const()).collect();
1225 let num_paths = paths.len();
1226 unsafe {
1227 ffi::rocksdb_options_set_db_paths(self.inner, paths.as_mut_ptr(), num_paths);
1228 }
1229 }
1230
1231 /// Use the specified object to interact with the environment,
1232 /// e.g. to read/write files, schedule background work, etc. In the near
1233 /// future, support for doing storage operations such as read/write files
1234 /// through env will be deprecated in favor of file_system.
1235 ///
1236 /// Default: Env::default()
1237 pub fn set_env(&mut self, env: &Env) {
1238 unsafe {
1239 ffi::rocksdb_options_set_env(self.inner, env.0.inner);
1240 }
1241 self.outlive.env = Some(env.clone());
1242 }
1243
1244 /// Sets the compression algorithm that will be used for compressing blocks.
1245 ///
1246 /// Default: `DBCompressionType::Snappy` (`DBCompressionType::None` if
1247 /// snappy feature is not enabled).
1248 ///
1249 /// # Examples
1250 ///
1251 /// ```
1252 /// use rust_rocksdb::{Options, DBCompressionType};
1253 ///
1254 /// let mut opts = Options::default();
1255 /// opts.set_compression_type(DBCompressionType::Snappy);
1256 /// ```
1257 pub fn set_compression_type(&mut self, t: DBCompressionType) {
1258 unsafe {
1259 ffi::rocksdb_options_set_compression(self.inner, t as c_int);
1260 }
1261 }
1262
1263 /// Number of threads for parallel compression.
1264 /// Parallel compression is enabled only if threads > 1.
1265 /// THE FEATURE IS STILL EXPERIMENTAL
1266 ///
1267 /// See [code](https://github.com/facebook/rocksdb/blob/v8.6.7/include/rocksdb/advanced_options.h#L116-L127)
1268 /// for more information.
1269 ///
1270 /// Default: 1
1271 ///
1272 /// Examples
1273 ///
1274 /// ```
1275 /// use rust_rocksdb::{Options, DBCompressionType};
1276 ///
1277 /// let mut opts = Options::default();
1278 /// opts.set_compression_type(DBCompressionType::Zstd);
1279 /// opts.set_compression_options_parallel_threads(3);
1280 /// ```
1281 pub fn set_compression_options_parallel_threads(&mut self, num: i32) {
1282 unsafe {
1283 ffi::rocksdb_options_set_compression_options_parallel_threads(self.inner, num);
1284 }
1285 }
1286
1287 /// Sets the compression algorithm that will be used for compressing WAL.
1288 ///
1289 /// At present, only ZSTD compression is supported!
1290 ///
1291 /// Default: `DBCompressionType::None`
1292 ///
1293 /// # Examples
1294 ///
1295 /// ```
1296 /// use rust_rocksdb::{Options, DBCompressionType};
1297 ///
1298 /// let mut opts = Options::default();
1299 /// opts.set_wal_compression_type(DBCompressionType::Zstd);
1300 /// // Or None to disable it
1301 /// opts.set_wal_compression_type(DBCompressionType::None);
1302 /// ```
1303 pub fn set_wal_compression_type(&mut self, t: DBCompressionType) {
1304 match t {
1305 DBCompressionType::None | DBCompressionType::Zstd => unsafe {
1306 ffi::rocksdb_options_set_wal_compression(self.inner, t as c_int);
1307 },
1308 other => unimplemented!("{:?} is not supported for WAL compression", other),
1309 }
1310 }
1311
1312 /// Sets the bottom-most compression algorithm that will be used for
1313 /// compressing blocks at the bottom-most level.
1314 ///
1315 /// Note that to actually enable bottom-most compression configuration after
1316 /// setting the compression type, it needs to be enabled by calling
1317 /// [`set_bottommost_compression_options`](#method.set_bottommost_compression_options) or
1318 /// [`set_bottommost_zstd_max_train_bytes`](#method.set_bottommost_zstd_max_train_bytes) method with `enabled` argument
1319 /// set to `true`.
1320 ///
1321 /// # Examples
1322 ///
1323 /// ```
1324 /// use rust_rocksdb::{Options, DBCompressionType};
1325 ///
1326 /// let mut opts = Options::default();
1327 /// opts.set_bottommost_compression_type(DBCompressionType::Zstd);
1328 /// opts.set_bottommost_zstd_max_train_bytes(0, true);
1329 /// ```
1330 pub fn set_bottommost_compression_type(&mut self, t: DBCompressionType) {
1331 unsafe {
1332 ffi::rocksdb_options_set_bottommost_compression(self.inner, t as c_int);
1333 }
1334 }
1335
1336 /// Different levels can have different compression policies. There
1337 /// are cases where most lower levels would like to use quick compression
1338 /// algorithms while the higher levels (which have more data) use
1339 /// compression algorithms that have better compression but could
1340 /// be slower. This array, if non-empty, should have an entry for
1341 /// each level of the database; these override the value specified in
1342 /// the previous field 'compression'.
1343 ///
1344 /// # Examples
1345 ///
1346 /// ```
1347 /// use rust_rocksdb::{Options, DBCompressionType};
1348 ///
1349 /// let mut opts = Options::default();
1350 /// opts.set_compression_per_level(&[
1351 /// DBCompressionType::None,
1352 /// DBCompressionType::None,
1353 /// DBCompressionType::Snappy,
1354 /// DBCompressionType::Snappy,
1355 /// DBCompressionType::Snappy
1356 /// ]);
1357 /// ```
1358 pub fn set_compression_per_level(&mut self, level_types: &[DBCompressionType]) {
1359 unsafe {
1360 let mut level_types: Vec<_> = level_types.iter().map(|&t| t as c_int).collect();
1361 ffi::rocksdb_options_set_compression_per_level(
1362 self.inner,
1363 level_types.as_mut_ptr(),
1364 level_types.len() as size_t,
1365 );
1366 }
1367 }
1368
1369 /// Maximum size of dictionaries used to prime the compression library.
1370 /// Enabling dictionary can improve compression ratios when there are
1371 /// repetitions across data blocks.
1372 ///
1373 /// The dictionary is created by sampling the SST file data. If
1374 /// `zstd_max_train_bytes` is nonzero, the samples are passed through zstd's
1375 /// dictionary generator. Otherwise, the random samples are used directly as
1376 /// the dictionary.
1377 ///
1378 /// When compression dictionary is disabled, we compress and write each block
1379 /// before buffering data for the next one. When compression dictionary is
1380 /// enabled, we buffer all SST file data in-memory so we can sample it, as data
1381 /// can only be compressed and written after the dictionary has been finalized.
1382 /// So users of this feature may see increased memory usage.
1383 ///
1384 /// Default: `0`
1385 ///
1386 /// # Examples
1387 ///
1388 /// ```
1389 /// use rust_rocksdb::Options;
1390 ///
1391 /// let mut opts = Options::default();
1392 /// opts.set_compression_options(4, 5, 6, 7);
1393 /// ```
1394 pub fn set_compression_options(
1395 &mut self,
1396 w_bits: c_int,
1397 level: c_int,
1398 strategy: c_int,
1399 max_dict_bytes: c_int,
1400 ) {
1401 unsafe {
1402 ffi::rocksdb_options_set_compression_options(
1403 self.inner,
1404 w_bits,
1405 level,
1406 strategy,
1407 max_dict_bytes,
1408 );
1409 }
1410 }
1411
1412 /// Sets compression options for blocks at the bottom-most level. Meaning
1413 /// of all settings is the same as in [`set_compression_options`](#method.set_compression_options) method but
1414 /// affect only the bottom-most compression which is set using
1415 /// [`set_bottommost_compression_type`](#method.set_bottommost_compression_type) method.
1416 ///
1417 /// # Examples
1418 ///
1419 /// ```
1420 /// use rust_rocksdb::{Options, DBCompressionType};
1421 ///
1422 /// let mut opts = Options::default();
1423 /// opts.set_bottommost_compression_type(DBCompressionType::Zstd);
1424 /// opts.set_bottommost_compression_options(4, 5, 6, 7, true);
1425 /// ```
1426 pub fn set_bottommost_compression_options(
1427 &mut self,
1428 w_bits: c_int,
1429 level: c_int,
1430 strategy: c_int,
1431 max_dict_bytes: c_int,
1432 enabled: bool,
1433 ) {
1434 unsafe {
1435 ffi::rocksdb_options_set_bottommost_compression_options(
1436 self.inner,
1437 w_bits,
1438 level,
1439 strategy,
1440 max_dict_bytes,
1441 c_uchar::from(enabled),
1442 );
1443 }
1444 }
1445
1446 /// Sets maximum size of training data passed to zstd's dictionary trainer. Using zstd's
1447 /// dictionary trainer can achieve even better compression ratio improvements than using
1448 /// `max_dict_bytes` alone.
1449 ///
1450 /// The training data will be used to generate a dictionary of max_dict_bytes.
1451 ///
1452 /// Default: 0.
1453 pub fn set_zstd_max_train_bytes(&mut self, value: c_int) {
1454 unsafe {
1455 ffi::rocksdb_options_set_compression_options_zstd_max_train_bytes(self.inner, value);
1456 }
1457 }
1458
1459 /// Sets maximum size of training data passed to zstd's dictionary trainer
1460 /// when compressing the bottom-most level. Using zstd's dictionary trainer
1461 /// can achieve even better compression ratio improvements than using
1462 /// `max_dict_bytes` alone.
1463 ///
1464 /// The training data will be used to generate a dictionary of
1465 /// `max_dict_bytes`.
1466 ///
1467 /// Default: 0.
1468 pub fn set_bottommost_zstd_max_train_bytes(&mut self, value: c_int, enabled: bool) {
1469 unsafe {
1470 ffi::rocksdb_options_set_bottommost_compression_options_zstd_max_train_bytes(
1471 self.inner,
1472 value,
1473 c_uchar::from(enabled),
1474 );
1475 }
1476 }
1477
1478 /// If non-zero, we perform bigger reads when doing compaction. If you're
1479 /// running RocksDB on spinning disks, you should set this to at least 2MB.
1480 /// That way RocksDB's compaction is doing sequential instead of random reads.
1481 ///
1482 /// Default: 2 * 1024 * 1024 (2 MB)
1483 pub fn set_compaction_readahead_size(&mut self, compaction_readahead_size: usize) {
1484 unsafe {
1485 ffi::rocksdb_options_compaction_readahead_size(self.inner, compaction_readahead_size);
1486 }
1487 }
1488
1489 /// Allow RocksDB to pick dynamic base of bytes for levels.
1490 /// With this feature turned on, RocksDB will automatically adjust max bytes for each level.
1491 /// The goal of this feature is to have lower bound on size amplification.
1492 ///
1493 /// Default: false.
1494 pub fn set_level_compaction_dynamic_level_bytes(&mut self, v: bool) {
1495 unsafe {
1496 ffi::rocksdb_options_set_level_compaction_dynamic_level_bytes(
1497 self.inner,
1498 c_uchar::from(v),
1499 );
1500 }
1501 }
1502
1503 /// This option has different meanings for different compaction styles:
1504 ///
1505 /// Leveled: files older than `periodic_compaction_seconds` will be picked up
1506 /// for compaction and will be re-written to the same level as they were
1507 /// before if level_compaction_dynamic_level_bytes is disabled. Otherwise,
1508 /// it will rewrite files to the next level except for the last level files
1509 /// to the same level.
1510 ///
1511 /// FIFO: not supported. Setting this option has no effect for FIFO compaction.
1512 ///
1513 /// Universal: when there are files older than `periodic_compaction_seconds`,
1514 /// rocksdb will try to do as large a compaction as possible including the
1515 /// last level. Such compaction is only skipped if only last level is to
1516 /// be compacted and no file in last level is older than
1517 /// `periodic_compaction_seconds`. See more in
1518 /// UniversalCompactionBuilder::PickPeriodicCompaction().
1519 /// For backward compatibility, the effective value of this option takes
1520 /// into account the value of option `ttl`. The logic is as follows:
1521 ///
1522 /// - both options are set to 30 days if they have the default value.
1523 /// - if both options are zero, zero is picked. Otherwise, we take the min
1524 /// value among non-zero options values (i.e. takes the stricter limit).
1525 ///
1526 /// One main use of the feature is to make sure a file goes through compaction
1527 /// filters periodically. Users can also use the feature to clear up SST
1528 /// files using old format.
1529 ///
1530 /// A file's age is computed by looking at file_creation_time or creation_time
1531 /// table properties in order, if they have valid non-zero values; if not, the
1532 /// age is based on the file's last modified time (given by the underlying
1533 /// Env).
1534 ///
1535 /// This option only supports block based table format for any compaction
1536 /// style.
1537 ///
1538 /// unit: seconds. Ex: 7 days = 7 * 24 * 60 * 60
1539 ///
1540 /// Values:
1541 /// 0: Turn off Periodic compactions.
1542 /// UINT64_MAX - 1 (0xfffffffffffffffe) is special flag to allow RocksDB to
1543 /// pick default.
1544 ///
1545 /// Default: 30 days if using block based table format + compaction filter +
1546 /// leveled compaction or block based table format + universal compaction.
1547 /// 0 (disabled) otherwise.
1548 ///
1549 pub fn set_periodic_compaction_seconds(&mut self, secs: u64) {
1550 unsafe {
1551 ffi::rocksdb_options_set_periodic_compaction_seconds(self.inner, secs);
1552 }
1553 }
1554
1555 /// When an iterator scans this number of invisible entries (tombstones or
1556 /// hidden puts) from the active memtable during a single iterator operation,
1557 /// we will attempt to flush the memtable. Currently only forward scans are
1558 /// supported (SeekToFirst(), Seek() and Next()).
1559 /// This option helps to reduce the overhead of scanning through a
1560 /// large number of entries in memtable.
1561 /// Users should consider enable deletion-triggered-compaction (see
1562 /// CompactOnDeletionCollectorFactory) together with this option to compact
1563 /// away tombstones after the memtable is flushed.
1564 ///
1565 /// Default: 0 (disabled)
1566 /// Dynamically changeable through the SetOptions() API.
1567 pub fn set_memtable_op_scan_flush_trigger(&mut self, num: u32) {
1568 unsafe {
1569 ffi::rocksdb_options_set_memtable_op_scan_flush_trigger(self.inner, num);
1570 }
1571 }
1572
1573 /// Similar to `memtable_op_scan_flush_trigger`, but this option applies to
1574 /// Next() calls between Seeks or until iterator destruction. If the average
1575 /// of the number of invisible entries scanned from the active memtable, the
1576 /// memtable will be marked for flush.
1577 /// Note that to avoid the case where the window between Seeks is too small,
1578 /// the option only takes effect if the total number of hidden entries scanned
1579 /// within a window is at least `memtable_op_scan_flush_trigger`. So this
1580 /// option is only effective when `memtable_op_scan_flush_trigger` is set.
1581 ///
1582 /// This option should be set to a lower value than
1583 /// `memtable_op_scan_flush_trigger`. It covers the case where an iterator
1584 /// scans through an expensive key range with many invisible entries from the
1585 /// active memtable, but the number of invisible entries per operation does not
1586 /// exceed `memtable_op_scan_flush_trigger`.
1587 ///
1588 /// Default: 0 (disabled)
1589 /// Dynamically changeable through the SetOptions() API.
1590 pub fn set_memtable_avg_op_scan_flush_trigger(&mut self, num: u32) {
1591 unsafe {
1592 ffi::rocksdb_options_set_memtable_avg_op_scan_flush_trigger(self.inner, num);
1593 }
1594 }
1595
1596 /// This option has different meanings for different compaction styles:
1597 ///
1598 /// Leveled: Non-bottom-level files with all keys older than TTL will go
1599 /// through the compaction process. This usually happens in a cascading
1600 /// way so that those entries will be compacted to bottommost level/file.
1601 /// The feature is used to remove stale entries that have been deleted or
1602 /// updated from the file system.
1603 ///
1604 /// FIFO: Files with all keys older than TTL will be deleted. TTL is only
1605 /// supported if option max_open_files is set to -1.
1606 ///
1607 /// Universal: users should only set the option `periodic_compaction_seconds`
1608 /// instead. For backward compatibility, this option has the same
1609 /// meaning as `periodic_compaction_seconds`. See more in comments for
1610 /// `periodic_compaction_seconds` on the interaction between these two
1611 /// options.
1612 ///
1613 /// This option only supports block based table format for any compaction
1614 /// style.
1615 ///
1616 /// unit: seconds. Ex: 1 day = 1 * 24 * 60 * 60
1617 /// 0 means disabling.
1618 /// UINT64_MAX - 1 (0xfffffffffffffffe) is special flag to allow RocksDB to
1619 /// pick default.
1620 ///
1621 /// Default: 30 days if using block based table. 0 (disable) otherwise.
1622 ///
1623 /// Dynamically changeable
1624 /// Note that dynamically changing this option only works for leveled and FIFO
1625 /// compaction. For universal compaction, dynamically changing this option has
1626 /// no effect, users should dynamically change `periodic_compaction_seconds`
1627 /// instead.
1628 pub fn set_ttl(&mut self, secs: u64) {
1629 unsafe {
1630 ffi::rocksdb_options_set_ttl(self.inner, secs);
1631 }
1632 }
1633
1634 pub fn set_merge_operator_associative<F: MergeFn + Clone>(
1635 &mut self,
1636 name: impl CStrLike,
1637 full_merge_fn: F,
1638 ) {
1639 let cb = Box::new(MergeOperatorCallback {
1640 name: name.into_c_string().unwrap(),
1641 full_merge_fn: full_merge_fn.clone(),
1642 partial_merge_fn: full_merge_fn,
1643 });
1644
1645 unsafe {
1646 let mo = ffi::rocksdb_mergeoperator_create(
1647 Box::into_raw(cb).cast::<c_void>(),
1648 Some(merge_operator::destructor_callback::<F, F>),
1649 Some(full_merge_callback::<F, F>),
1650 Some(partial_merge_callback::<F, F>),
1651 Some(merge_operator::delete_callback),
1652 Some(merge_operator::name_callback::<F, F>),
1653 );
1654 ffi::rocksdb_options_set_merge_operator(self.inner, mo);
1655 }
1656 }
1657
1658 pub fn set_merge_operator<F: MergeFn, PF: MergeFn>(
1659 &mut self,
1660 name: impl CStrLike,
1661 full_merge_fn: F,
1662 partial_merge_fn: PF,
1663 ) {
1664 let cb = Box::new(MergeOperatorCallback {
1665 name: name.into_c_string().unwrap(),
1666 full_merge_fn,
1667 partial_merge_fn,
1668 });
1669
1670 unsafe {
1671 let mo = ffi::rocksdb_mergeoperator_create(
1672 Box::into_raw(cb).cast::<c_void>(),
1673 Some(merge_operator::destructor_callback::<F, PF>),
1674 Some(full_merge_callback::<F, PF>),
1675 Some(partial_merge_callback::<F, PF>),
1676 Some(merge_operator::delete_callback),
1677 Some(merge_operator::name_callback::<F, PF>),
1678 );
1679 ffi::rocksdb_options_set_merge_operator(self.inner, mo);
1680 }
1681 }
1682
1683 #[deprecated(
1684 since = "0.5.0",
1685 note = "add_merge_operator has been renamed to set_merge_operator"
1686 )]
1687 pub fn add_merge_operator<F: MergeFn + Clone>(&mut self, name: &str, merge_fn: F) {
1688 self.set_merge_operator_associative(name, merge_fn);
1689 }
1690
1691 /// Sets a compaction filter used to determine if entries should be kept, changed,
1692 /// or removed during compaction.
1693 ///
1694 /// An example use case is to remove entries with an expired TTL.
1695 ///
1696 /// If you take a snapshot of the database, only values written since the last
1697 /// snapshot will be passed through the compaction filter.
1698 ///
1699 /// If multi-threaded compaction is used, `filter_fn` may be called multiple times
1700 /// simultaneously.
1701 pub fn set_compaction_filter<F>(&mut self, name: impl CStrLike, filter_fn: F)
1702 where
1703 F: CompactionFilterFn + Send + 'static,
1704 {
1705 let cb = Box::new(CompactionFilterCallback {
1706 name: name.into_c_string().unwrap(),
1707 filter_fn,
1708 });
1709
1710 let filter = unsafe {
1711 let cf = ffi::rocksdb_compactionfilter_create(
1712 Box::into_raw(cb).cast::<c_void>(),
1713 Some(compaction_filter::destructor_callback::<CompactionFilterCallback<F>>),
1714 Some(compaction_filter::filter_callback::<CompactionFilterCallback<F>>),
1715 Some(compaction_filter::name_callback::<CompactionFilterCallback<F>>),
1716 );
1717 ffi::rocksdb_options_set_compaction_filter(self.inner, cf);
1718
1719 OwnedCompactionFilter::new(NonNull::new(cf).unwrap())
1720 };
1721 self.outlive.compaction_filter = Some(Arc::new(filter));
1722 }
1723
1724 pub fn add_event_listener<L: EventListener>(&mut self, l: L) {
1725 let handle = new_event_listener(l);
1726 unsafe { ffi::rocksdb_options_add_eventlistener(self.inner, handle.inner) }
1727 }
1728
1729 /// This is a factory that provides compaction filter objects which allow
1730 /// an application to modify/delete a key-value during background compaction.
1731 ///
1732 /// A new filter will be created on each compaction run. If multithreaded
1733 /// compaction is being used, each created CompactionFilter will only be used
1734 /// from a single thread and so does not need to be thread-safe.
1735 ///
1736 /// Default: nullptr
1737 pub fn set_compaction_filter_factory<F>(&mut self, factory: F)
1738 where
1739 F: CompactionFilterFactory + 'static,
1740 {
1741 let factory = Box::new(factory);
1742
1743 unsafe {
1744 let cff = ffi::rocksdb_compactionfilterfactory_create(
1745 Box::into_raw(factory).cast::<c_void>(),
1746 Some(compaction_filter_factory::destructor_callback::<F>),
1747 Some(compaction_filter_factory::create_compaction_filter_callback::<F>),
1748 Some(compaction_filter_factory::name_callback::<F>),
1749 );
1750
1751 ffi::rocksdb_options_set_compaction_filter_factory(self.inner, cff);
1752 }
1753 }
1754
1755 /// Sets the comparator used to define the order of keys in the table.
1756 /// Default: a comparator that uses lexicographic byte-wise ordering
1757 ///
1758 /// The client must ensure that the comparator supplied here has the same
1759 /// name and orders keys *exactly* the same as the comparator provided to
1760 /// previous open calls on the same DB.
1761 pub fn set_comparator(&mut self, name: impl CStrLike, compare_fn: Box<CompareFn>) {
1762 let cb = Box::new(ComparatorCallback {
1763 name: name.into_c_string().unwrap(),
1764 compare_fn,
1765 });
1766
1767 let cmp = unsafe {
1768 let cmp = ffi::rocksdb_comparator_create(
1769 Box::into_raw(cb).cast::<c_void>(),
1770 Some(ComparatorCallback::destructor_callback),
1771 Some(ComparatorCallback::compare_callback),
1772 Some(ComparatorCallback::name_callback),
1773 );
1774 ffi::rocksdb_options_set_comparator(self.inner, cmp);
1775 OwnedComparator::new(NonNull::new(cmp).unwrap())
1776 };
1777 self.outlive.comparator = Some(Arc::new(cmp));
1778 }
1779
1780 /// Sets the comparator that are timestamp-aware, used to define the order of keys in the table,
1781 /// taking timestamp into consideration.
1782 /// Find more information on timestamp-aware comparator on [here](https://github.com/facebook/rocksdb/wiki/User-defined-Timestamp)
1783 ///
1784 /// The client must ensure that the comparator supplied here has the same
1785 /// name and orders keys *exactly* the same as the comparator provided to
1786 /// previous open calls on the same DB.
1787 pub fn set_comparator_with_ts(
1788 &mut self,
1789 name: impl CStrLike,
1790 timestamp_size: usize,
1791 compare_fn: Box<CompareFn>,
1792 compare_ts_fn: Box<CompareTsFn>,
1793 compare_without_ts_fn: Box<CompareWithoutTsFn>,
1794 ) {
1795 let cb = Box::new(ComparatorWithTsCallback {
1796 name: name.into_c_string().unwrap(),
1797 compare_fn,
1798 compare_ts_fn,
1799 compare_without_ts_fn,
1800 });
1801
1802 let cmp = unsafe {
1803 let cmp = ffi::rocksdb_comparator_with_ts_create(
1804 Box::into_raw(cb).cast::<c_void>(),
1805 Some(ComparatorWithTsCallback::destructor_callback),
1806 Some(ComparatorWithTsCallback::compare_callback),
1807 Some(ComparatorWithTsCallback::compare_ts_callback),
1808 Some(ComparatorWithTsCallback::compare_without_ts_callback),
1809 Some(ComparatorWithTsCallback::name_callback),
1810 timestamp_size,
1811 );
1812 ffi::rocksdb_options_set_comparator(self.inner, cmp);
1813 OwnedComparator::new(NonNull::new(cmp).unwrap())
1814 };
1815 self.outlive.comparator = Some(Arc::new(cmp));
1816 }
1817
1818 pub fn set_prefix_extractor(&mut self, prefix_extractor: SliceTransform) {
1819 unsafe {
1820 ffi::rocksdb_options_set_prefix_extractor(self.inner, prefix_extractor.inner);
1821 }
1822 }
1823
1824 // Use this if you don't need to keep the data sorted, i.e. you'll never use
1825 // an iterator, only Put() and Get() API calls
1826 //
1827 pub fn optimize_for_point_lookup(&mut self, block_cache_size_mb: u64) {
1828 unsafe {
1829 ffi::rocksdb_options_optimize_for_point_lookup(self.inner, block_cache_size_mb);
1830 }
1831 }
1832
1833 /// Sets the optimize_filters_for_hits flag
1834 ///
1835 /// Default: `false`
1836 ///
1837 /// # Examples
1838 ///
1839 /// ```
1840 /// use rust_rocksdb::Options;
1841 ///
1842 /// let mut opts = Options::default();
1843 /// opts.set_optimize_filters_for_hits(true);
1844 /// ```
1845 pub fn set_optimize_filters_for_hits(&mut self, optimize_for_hits: bool) {
1846 unsafe {
1847 ffi::rocksdb_options_set_optimize_filters_for_hits(
1848 self.inner,
1849 c_int::from(optimize_for_hits),
1850 );
1851 }
1852 }
1853
1854 /// Sets the periodicity when obsolete files get deleted.
1855 ///
1856 /// The files that get out of scope by compaction
1857 /// process will still get automatically delete on every compaction,
1858 /// regardless of this setting.
1859 ///
1860 /// Default: 6 hours
1861 pub fn set_delete_obsolete_files_period_micros(&mut self, micros: u64) {
1862 unsafe {
1863 ffi::rocksdb_options_set_delete_obsolete_files_period_micros(self.inner, micros);
1864 }
1865 }
1866
1867 /// Prepare the DB for bulk loading.
1868 ///
1869 /// All data will be in level 0 without any automatic compaction.
1870 /// It's recommended to manually call CompactRange(NULL, NULL) before reading
1871 /// from the database, because otherwise the read can be very slow.
1872 pub fn prepare_for_bulk_load(&mut self) {
1873 unsafe {
1874 ffi::rocksdb_options_prepare_for_bulk_load(self.inner);
1875 }
1876 }
1877
1878 /// Sets the number of open files that can be used by the DB. You may need to
1879 /// increase this if your database has a large working set. Value `-1` means
1880 /// files opened are always kept open. You can estimate number of files based
1881 /// on target_file_size_base and target_file_size_multiplier for level-based
1882 /// compaction. For universal-style compaction, you can usually set it to `-1`.
1883 ///
1884 /// Default: `-1`
1885 ///
1886 /// # Examples
1887 ///
1888 /// ```
1889 /// use rust_rocksdb::Options;
1890 ///
1891 /// let mut opts = Options::default();
1892 /// opts.set_max_open_files(10);
1893 /// ```
1894 pub fn set_max_open_files(&mut self, nfiles: c_int) {
1895 unsafe {
1896 ffi::rocksdb_options_set_max_open_files(self.inner, nfiles);
1897 }
1898 }
1899
1900 /// If max_open_files is -1, DB will open all files on DB::Open(). You can
1901 /// use this option to increase the number of threads used to open the files.
1902 /// Default: 16
1903 pub fn set_max_file_opening_threads(&mut self, nthreads: c_int) {
1904 unsafe {
1905 ffi::rocksdb_options_set_max_file_opening_threads(self.inner, nthreads);
1906 }
1907 }
1908
1909 /// By default, writes to stable storage use fdatasync (on platforms
1910 /// where this function is available). If this option is true,
1911 /// fsync is used instead.
1912 ///
1913 /// fsync and fdatasync are equally safe for our purposes and fdatasync is
1914 /// faster, so it is rarely necessary to set this option. It is provided
1915 /// as a workaround for kernel/filesystem bugs, such as one that affected
1916 /// fdatasync with ext4 in kernel versions prior to 3.7.
1917 ///
1918 /// Default: `false`
1919 ///
1920 /// # Examples
1921 ///
1922 /// ```
1923 /// use rust_rocksdb::Options;
1924 ///
1925 /// let mut opts = Options::default();
1926 /// opts.set_use_fsync(true);
1927 /// ```
1928 pub fn set_use_fsync(&mut self, useit: bool) {
1929 unsafe {
1930 ffi::rocksdb_options_set_use_fsync(self.inner, c_int::from(useit));
1931 }
1932 }
1933
1934 /// Returns the value of the `use_fsync` option.
1935 pub fn get_use_fsync(&self) -> bool {
1936 let val = unsafe { ffi::rocksdb_options_get_use_fsync(self.inner) };
1937 val != 0
1938 }
1939
1940 /// Specifies the absolute info LOG dir.
1941 ///
1942 /// If it is empty, the log files will be in the same dir as data.
1943 /// If it is non empty, the log files will be in the specified dir,
1944 /// and the db data dir's absolute path will be used as the log file
1945 /// name's prefix.
1946 ///
1947 /// Default: empty
1948 pub fn set_db_log_dir<P: AsRef<Path>>(&mut self, path: P) {
1949 let p = to_cpath(path).unwrap();
1950 unsafe {
1951 ffi::rocksdb_options_set_db_log_dir(self.inner, p.as_ptr());
1952 }
1953 }
1954
1955 /// Specifies the log level.
1956 /// Consider the `LogLevel` enum for a list of possible levels.
1957 ///
1958 /// Default: Info
1959 ///
1960 /// # Examples
1961 ///
1962 /// ```
1963 /// use rust_rocksdb::{Options, LogLevel};
1964 ///
1965 /// let mut opts = Options::default();
1966 /// opts.set_log_level(LogLevel::Warn);
1967 /// ```
1968 pub fn set_log_level(&mut self, level: LogLevel) {
1969 unsafe {
1970 ffi::rocksdb_options_set_info_log_level(self.inner, level as c_int);
1971 }
1972 }
1973
1974 /// Allows OS to incrementally sync files to disk while they are being
1975 /// written, asynchronously, in the background. This operation can be used
1976 /// to smooth out write I/Os over time. Users shouldn't rely on it for
1977 /// persistency guarantee.
1978 /// Issue one request for every bytes_per_sync written. `0` turns it off.
1979 ///
1980 /// Default: `0`
1981 ///
1982 /// You may consider using rate_limiter to regulate write rate to device.
1983 /// When rate limiter is enabled, it automatically enables bytes_per_sync
1984 /// to 1MB.
1985 ///
1986 /// This option applies to table files
1987 ///
1988 /// # Examples
1989 ///
1990 /// ```
1991 /// use rust_rocksdb::Options;
1992 ///
1993 /// let mut opts = Options::default();
1994 /// opts.set_bytes_per_sync(1024 * 1024);
1995 /// ```
1996 pub fn set_bytes_per_sync(&mut self, nbytes: u64) {
1997 unsafe {
1998 ffi::rocksdb_options_set_bytes_per_sync(self.inner, nbytes);
1999 }
2000 }
2001
2002 /// Same as bytes_per_sync, but applies to WAL files.
2003 ///
2004 /// Default: 0, turned off
2005 ///
2006 /// Dynamically changeable through SetDBOptions() API.
2007 pub fn set_wal_bytes_per_sync(&mut self, nbytes: u64) {
2008 unsafe {
2009 ffi::rocksdb_options_set_wal_bytes_per_sync(self.inner, nbytes);
2010 }
2011 }
2012
2013 /// Sets the maximum buffer size that is used by WritableFileWriter.
2014 ///
2015 /// On Windows, we need to maintain an aligned buffer for writes.
2016 /// We allow the buffer to grow until it's size hits the limit in buffered
2017 /// IO and fix the buffer size when using direct IO to ensure alignment of
2018 /// write requests if the logical sector size is unusual
2019 ///
2020 /// Default: 1024 * 1024 (1 MB)
2021 ///
2022 /// Dynamically changeable through SetDBOptions() API.
2023 pub fn set_writable_file_max_buffer_size(&mut self, nbytes: u64) {
2024 unsafe {
2025 ffi::rocksdb_options_set_writable_file_max_buffer_size(self.inner, nbytes);
2026 }
2027 }
2028
2029 /// If true, allow multi-writers to update mem tables in parallel.
2030 /// Only some memtable_factory-s support concurrent writes; currently it
2031 /// is implemented only for SkipListFactory. Concurrent memtable writes
2032 /// are not compatible with inplace_update_support or filter_deletes.
2033 /// It is strongly recommended to set enable_write_thread_adaptive_yield
2034 /// if you are going to use this feature.
2035 ///
2036 /// Default: true
2037 ///
2038 /// # Examples
2039 ///
2040 /// ```
2041 /// use rust_rocksdb::Options;
2042 ///
2043 /// let mut opts = Options::default();
2044 /// opts.set_allow_concurrent_memtable_write(false);
2045 /// ```
2046 pub fn set_allow_concurrent_memtable_write(&mut self, allow: bool) {
2047 unsafe {
2048 ffi::rocksdb_options_set_allow_concurrent_memtable_write(
2049 self.inner,
2050 c_uchar::from(allow),
2051 );
2052 }
2053 }
2054
2055 /// If true, threads synchronizing with the write batch group leader will wait for up to
2056 /// write_thread_max_yield_usec before blocking on a mutex. This can substantially improve
2057 /// throughput for concurrent workloads, regardless of whether allow_concurrent_memtable_write
2058 /// is enabled.
2059 ///
2060 /// Default: true
2061 pub fn set_enable_write_thread_adaptive_yield(&mut self, enabled: bool) {
2062 unsafe {
2063 ffi::rocksdb_options_set_enable_write_thread_adaptive_yield(
2064 self.inner,
2065 c_uchar::from(enabled),
2066 );
2067 }
2068 }
2069
2070 /// Specifies whether an iteration->Next() sequentially skips over keys with the same user-key or not.
2071 ///
2072 /// This number specifies the number of keys (with the same userkey)
2073 /// that will be sequentially skipped before a reseek is issued.
2074 ///
2075 /// Default: 8
2076 pub fn set_max_sequential_skip_in_iterations(&mut self, num: u64) {
2077 unsafe {
2078 ffi::rocksdb_options_set_max_sequential_skip_in_iterations(self.inner, num);
2079 }
2080 }
2081
2082 /// Enable direct I/O mode for reading
2083 /// they may or may not improve performance depending on the use case
2084 ///
2085 /// Files will be opened in "direct I/O" mode
2086 /// which means that data read from the disk will not be cached or
2087 /// buffered. The hardware buffer of the devices may however still
2088 /// be used. Memory mapped files are not impacted by these parameters.
2089 ///
2090 /// Default: false
2091 ///
2092 /// # Examples
2093 ///
2094 /// ```
2095 /// use rust_rocksdb::Options;
2096 ///
2097 /// let mut opts = Options::default();
2098 /// opts.set_use_direct_reads(true);
2099 /// ```
2100 pub fn set_use_direct_reads(&mut self, enabled: bool) {
2101 unsafe {
2102 ffi::rocksdb_options_set_use_direct_reads(self.inner, c_uchar::from(enabled));
2103 }
2104 }
2105
2106 /// Enable direct I/O mode for flush and compaction
2107 ///
2108 /// Files will be opened in "direct I/O" mode
2109 /// which means that data written to the disk will not be cached or
2110 /// buffered. The hardware buffer of the devices may however still
2111 /// be used. Memory mapped files are not impacted by these parameters.
2112 /// they may or may not improve performance depending on the use case
2113 ///
2114 /// Default: false
2115 ///
2116 /// # Examples
2117 ///
2118 /// ```
2119 /// use rust_rocksdb::Options;
2120 ///
2121 /// let mut opts = Options::default();
2122 /// opts.set_use_direct_io_for_flush_and_compaction(true);
2123 /// ```
2124 pub fn set_use_direct_io_for_flush_and_compaction(&mut self, enabled: bool) {
2125 unsafe {
2126 ffi::rocksdb_options_set_use_direct_io_for_flush_and_compaction(
2127 self.inner,
2128 c_uchar::from(enabled),
2129 );
2130 }
2131 }
2132
2133 /// Enable/disable child process inherit open files.
2134 ///
2135 /// Default: true
2136 pub fn set_is_fd_close_on_exec(&mut self, enabled: bool) {
2137 unsafe {
2138 ffi::rocksdb_options_set_is_fd_close_on_exec(self.inner, c_uchar::from(enabled));
2139 }
2140 }
2141
2142 /// Hints to the OS that it should not buffer disk I/O. Enabling this
2143 /// parameter may improve performance but increases pressure on the
2144 /// system cache.
2145 ///
2146 /// The exact behavior of this parameter is platform dependent.
2147 ///
2148 /// On POSIX systems, after RocksDB reads data from disk it will
2149 /// mark the pages as "unneeded". The operating system may or may not
2150 /// evict these pages from memory, reducing pressure on the system
2151 /// cache. If the disk block is requested again this can result in
2152 /// additional disk I/O.
2153 ///
2154 /// On WINDOWS systems, files will be opened in "unbuffered I/O" mode
2155 /// which means that data read from the disk will not be cached or
2156 /// bufferized. The hardware buffer of the devices may however still
2157 /// be used. Memory mapped files are not impacted by this parameter.
2158 ///
2159 /// Default: true
2160 ///
2161 /// # Examples
2162 ///
2163 /// ```
2164 /// use rust_rocksdb::Options;
2165 ///
2166 /// let mut opts = Options::default();
2167 /// #[allow(deprecated)]
2168 /// opts.set_allow_os_buffer(false);
2169 /// ```
2170 #[deprecated(
2171 since = "0.7.0",
2172 note = "replaced with set_use_direct_reads/set_use_direct_io_for_flush_and_compaction methods"
2173 )]
2174 pub fn set_allow_os_buffer(&mut self, is_allow: bool) {
2175 self.set_use_direct_reads(!is_allow);
2176 self.set_use_direct_io_for_flush_and_compaction(!is_allow);
2177 }
2178
2179 /// Sets the number of shards used for table cache.
2180 ///
2181 /// Default: `6`
2182 ///
2183 /// # Examples
2184 ///
2185 /// ```
2186 /// use rust_rocksdb::Options;
2187 ///
2188 /// let mut opts = Options::default();
2189 /// opts.set_table_cache_num_shard_bits(4);
2190 /// ```
2191 pub fn set_table_cache_num_shard_bits(&mut self, nbits: c_int) {
2192 unsafe {
2193 ffi::rocksdb_options_set_table_cache_numshardbits(self.inner, nbits);
2194 }
2195 }
2196
2197 /// By default target_file_size_multiplier is 1, which means
2198 /// by default files in different levels will have similar size.
2199 ///
2200 /// Dynamically changeable through SetOptions() API
2201 pub fn set_target_file_size_multiplier(&mut self, multiplier: i32) {
2202 unsafe {
2203 ffi::rocksdb_options_set_target_file_size_multiplier(self.inner, multiplier as c_int);
2204 }
2205 }
2206
2207 /// Sets the minimum number of write buffers that will be merged
2208 /// before writing to storage. If set to `1`, then
2209 /// all write buffers are flushed to L0 as individual files and this increases
2210 /// read amplification because a get request has to check in all of these
2211 /// files. Also, an in-memory merge may result in writing lesser
2212 /// data to storage if there are duplicate records in each of these
2213 /// individual write buffers.
2214 ///
2215 /// Default: `1`
2216 ///
2217 /// # Examples
2218 ///
2219 /// ```
2220 /// use rust_rocksdb::Options;
2221 ///
2222 /// let mut opts = Options::default();
2223 /// opts.set_min_write_buffer_number(2);
2224 /// ```
2225 pub fn set_min_write_buffer_number(&mut self, nbuf: c_int) {
2226 unsafe {
2227 ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, nbuf);
2228 }
2229 }
2230
2231 /// Sets the maximum number of write buffers that are built up in memory.
2232 /// The default and the minimum number is 2, so that when 1 write buffer
2233 /// is being flushed to storage, new writes can continue to the other
2234 /// write buffer.
2235 /// If max_write_buffer_number > 3, writing will be slowed down to
2236 /// options.delayed_write_rate if we are writing to the last write buffer
2237 /// allowed.
2238 ///
2239 /// Default: `2`
2240 ///
2241 /// # Examples
2242 ///
2243 /// ```
2244 /// use rust_rocksdb::Options;
2245 ///
2246 /// let mut opts = Options::default();
2247 /// opts.set_max_write_buffer_number(4);
2248 /// ```
2249 pub fn set_max_write_buffer_number(&mut self, nbuf: c_int) {
2250 unsafe {
2251 ffi::rocksdb_options_set_max_write_buffer_number(self.inner, nbuf);
2252 }
2253 }
2254
2255 /// Sets the amount of data to build up in memory (backed by an unsorted log
2256 /// on disk) before converting to a sorted on-disk file.
2257 ///
2258 /// Larger values increase performance, especially during bulk loads.
2259 /// Up to max_write_buffer_number write buffers may be held in memory
2260 /// at the same time,
2261 /// so you may wish to adjust this parameter to control memory usage.
2262 /// Also, a larger write buffer will result in a longer recovery time
2263 /// the next time the database is opened.
2264 ///
2265 /// Note that write_buffer_size is enforced per column family.
2266 /// See db_write_buffer_size for sharing memory across column families.
2267 ///
2268 /// Default: `0x4000000` (64MiB)
2269 ///
2270 /// Dynamically changeable through SetOptions() API
2271 ///
2272 /// # Examples
2273 ///
2274 /// ```
2275 /// use rust_rocksdb::Options;
2276 ///
2277 /// let mut opts = Options::default();
2278 /// opts.set_write_buffer_size(128 * 1024 * 1024);
2279 /// ```
2280 pub fn set_write_buffer_size(&mut self, size: usize) {
2281 unsafe {
2282 ffi::rocksdb_options_set_write_buffer_size(self.inner, size);
2283 }
2284 }
2285
2286 /// Amount of data to build up in memtables across all column
2287 /// families before writing to disk.
2288 ///
2289 /// This is distinct from write_buffer_size, which enforces a limit
2290 /// for a single memtable.
2291 ///
2292 /// This feature is disabled by default. Specify a non-zero value
2293 /// to enable it.
2294 ///
2295 /// Default: 0 (disabled)
2296 ///
2297 /// # Examples
2298 ///
2299 /// ```
2300 /// use rust_rocksdb::Options;
2301 ///
2302 /// let mut opts = Options::default();
2303 /// opts.set_db_write_buffer_size(128 * 1024 * 1024);
2304 /// ```
2305 pub fn set_db_write_buffer_size(&mut self, size: usize) {
2306 unsafe {
2307 ffi::rocksdb_options_set_db_write_buffer_size(self.inner, size);
2308 }
2309 }
2310
2311 /// Control maximum total data size for a level.
2312 /// max_bytes_for_level_base is the max total for level-1.
2313 /// Maximum number of bytes for level L can be calculated as
2314 /// (max_bytes_for_level_base) * (max_bytes_for_level_multiplier ^ (L-1))
2315 /// For example, if max_bytes_for_level_base is 200MB, and if
2316 /// max_bytes_for_level_multiplier is 10, total data size for level-1
2317 /// will be 200MB, total file size for level-2 will be 2GB,
2318 /// and total file size for level-3 will be 20GB.
2319 ///
2320 /// Default: `0x10000000` (256MiB).
2321 ///
2322 /// Dynamically changeable through SetOptions() API
2323 ///
2324 /// # Examples
2325 ///
2326 /// ```
2327 /// use rust_rocksdb::Options;
2328 ///
2329 /// let mut opts = Options::default();
2330 /// opts.set_max_bytes_for_level_base(512 * 1024 * 1024);
2331 /// ```
2332 pub fn set_max_bytes_for_level_base(&mut self, size: u64) {
2333 unsafe {
2334 ffi::rocksdb_options_set_max_bytes_for_level_base(self.inner, size);
2335 }
2336 }
2337
2338 /// Default: `10`
2339 ///
2340 /// # Examples
2341 ///
2342 /// ```
2343 /// use rust_rocksdb::Options;
2344 ///
2345 /// let mut opts = Options::default();
2346 /// opts.set_max_bytes_for_level_multiplier(4.0);
2347 /// ```
2348 pub fn set_max_bytes_for_level_multiplier(&mut self, mul: f64) {
2349 unsafe {
2350 ffi::rocksdb_options_set_max_bytes_for_level_multiplier(self.inner, mul);
2351 }
2352 }
2353
2354 /// The manifest file is rolled over on reaching this limit.
2355 /// The older manifest file be deleted.
2356 /// The default value is MAX_INT so that roll-over does not take place.
2357 ///
2358 /// # Examples
2359 ///
2360 /// ```
2361 /// use rust_rocksdb::Options;
2362 ///
2363 /// let mut opts = Options::default();
2364 /// opts.set_max_manifest_file_size(20 * 1024 * 1024);
2365 /// ```
2366 pub fn set_max_manifest_file_size(&mut self, size: usize) {
2367 unsafe {
2368 ffi::rocksdb_options_set_max_manifest_file_size(self.inner, size);
2369 }
2370 }
2371
2372 /// Sets the target file size for compaction.
2373 /// target_file_size_base is per-file size for level-1.
2374 /// Target file size for level L can be calculated by
2375 /// target_file_size_base * (target_file_size_multiplier ^ (L-1))
2376 /// For example, if target_file_size_base is 2MB and
2377 /// target_file_size_multiplier is 10, then each file on level-1 will
2378 /// be 2MB, and each file on level 2 will be 20MB,
2379 /// and each file on level-3 will be 200MB.
2380 ///
2381 /// Default: `0x4000000` (64MiB)
2382 ///
2383 /// Dynamically changeable through SetOptions() API
2384 ///
2385 /// # Examples
2386 ///
2387 /// ```
2388 /// use rust_rocksdb::Options;
2389 ///
2390 /// let mut opts = Options::default();
2391 /// opts.set_target_file_size_base(128 * 1024 * 1024);
2392 /// ```
2393 pub fn set_target_file_size_base(&mut self, size: u64) {
2394 unsafe {
2395 ffi::rocksdb_options_set_target_file_size_base(self.inner, size);
2396 }
2397 }
2398
2399 /// Sets the minimum number of write buffers that will be merged together
2400 /// before writing to storage. If set to `1`, then
2401 /// all write buffers are flushed to L0 as individual files and this increases
2402 /// read amplification because a get request has to check in all of these
2403 /// files. Also, an in-memory merge may result in writing lesser
2404 /// data to storage if there are duplicate records in each of these
2405 /// individual write buffers.
2406 ///
2407 /// Default: `1`
2408 ///
2409 /// # Examples
2410 ///
2411 /// ```
2412 /// use rust_rocksdb::Options;
2413 ///
2414 /// let mut opts = Options::default();
2415 /// opts.set_min_write_buffer_number_to_merge(2);
2416 /// ```
2417 pub fn set_min_write_buffer_number_to_merge(&mut self, to_merge: c_int) {
2418 unsafe {
2419 ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, to_merge);
2420 }
2421 }
2422
2423 /// Sets the number of files to trigger level-0 compaction. A value < `0` means that
2424 /// level-0 compaction will not be triggered by number of files at all.
2425 ///
2426 /// Default: `4`
2427 ///
2428 /// Dynamically changeable through SetOptions() API
2429 ///
2430 /// # Examples
2431 ///
2432 /// ```
2433 /// use rust_rocksdb::Options;
2434 ///
2435 /// let mut opts = Options::default();
2436 /// opts.set_level_zero_file_num_compaction_trigger(8);
2437 /// ```
2438 pub fn set_level_zero_file_num_compaction_trigger(&mut self, n: c_int) {
2439 unsafe {
2440 ffi::rocksdb_options_set_level0_file_num_compaction_trigger(self.inner, n);
2441 }
2442 }
2443
2444 /// Sets the soft limit on number of level-0 files. We start slowing down writes at this
2445 /// point. A value < `0` means that no writing slowdown will be triggered by
2446 /// number of files in level-0.
2447 ///
2448 /// Default: `20`
2449 ///
2450 /// Dynamically changeable through SetOptions() API
2451 ///
2452 /// # Examples
2453 ///
2454 /// ```
2455 /// use rust_rocksdb::Options;
2456 ///
2457 /// let mut opts = Options::default();
2458 /// opts.set_level_zero_slowdown_writes_trigger(10);
2459 /// ```
2460 pub fn set_level_zero_slowdown_writes_trigger(&mut self, n: c_int) {
2461 unsafe {
2462 ffi::rocksdb_options_set_level0_slowdown_writes_trigger(self.inner, n);
2463 }
2464 }
2465
2466 /// Sets the maximum number of level-0 files. We stop writes at this point.
2467 ///
2468 /// Default: `24`
2469 ///
2470 /// Dynamically changeable through SetOptions() API
2471 ///
2472 /// # Examples
2473 ///
2474 /// ```
2475 /// use rust_rocksdb::Options;
2476 ///
2477 /// let mut opts = Options::default();
2478 /// opts.set_level_zero_stop_writes_trigger(48);
2479 /// ```
2480 pub fn set_level_zero_stop_writes_trigger(&mut self, n: c_int) {
2481 unsafe {
2482 ffi::rocksdb_options_set_level0_stop_writes_trigger(self.inner, n);
2483 }
2484 }
2485
2486 /// Sets the compaction style.
2487 ///
2488 /// Default: DBCompactionStyle::Level
2489 ///
2490 /// # Examples
2491 ///
2492 /// ```
2493 /// use rust_rocksdb::{Options, DBCompactionStyle};
2494 ///
2495 /// let mut opts = Options::default();
2496 /// opts.set_compaction_style(DBCompactionStyle::Universal);
2497 /// ```
2498 pub fn set_compaction_style(&mut self, style: DBCompactionStyle) {
2499 unsafe {
2500 ffi::rocksdb_options_set_compaction_style(self.inner, style as c_int);
2501 }
2502 }
2503
2504 /// Sets the options needed to support Universal Style compactions.
2505 pub fn set_universal_compaction_options(&mut self, uco: &UniversalCompactOptions) {
2506 unsafe {
2507 ffi::rocksdb_options_set_universal_compaction_options(self.inner, uco.inner);
2508 }
2509 }
2510
2511 /// Sets the options for FIFO compaction style.
2512 pub fn set_fifo_compaction_options(&mut self, fco: &FifoCompactOptions) {
2513 unsafe {
2514 ffi::rocksdb_options_set_fifo_compaction_options(self.inner, fco.inner);
2515 }
2516 }
2517
2518 /// Sets unordered_write to true trades higher write throughput with
2519 /// relaxing the immutability guarantee of snapshots. This violates the
2520 /// repeatability one expects from ::Get from a snapshot, as well as
2521 /// ::MultiGet and Iterator's consistent-point-in-time view property.
2522 /// If the application cannot tolerate the relaxed guarantees, it can implement
2523 /// its own mechanisms to work around that and yet benefit from the higher
2524 /// throughput. Using TransactionDB with WRITE_PREPARED write policy and
2525 /// two_write_queues=true is one way to achieve immutable snapshots despite
2526 /// unordered_write.
2527 ///
2528 /// By default, i.e., when it is false, rocksdb does not advance the sequence
2529 /// number for new snapshots unless all the writes with lower sequence numbers
2530 /// are already finished. This provides the immutability that we expect from
2531 /// snapshots. Moreover, since Iterator and MultiGet internally depend on
2532 /// snapshots, the snapshot immutability results into Iterator and MultiGet
2533 /// offering consistent-point-in-time view. If set to true, although
2534 /// Read-Your-Own-Write property is still provided, the snapshot immutability
2535 /// property is relaxed: the writes issued after the snapshot is obtained (with
2536 /// larger sequence numbers) will be still not visible to the reads from that
2537 /// snapshot, however, there still might be pending writes (with lower sequence
2538 /// number) that will change the state visible to the snapshot after they are
2539 /// landed to the memtable.
2540 ///
2541 /// Default: false
2542 pub fn set_unordered_write(&mut self, unordered: bool) {
2543 unsafe {
2544 ffi::rocksdb_options_set_unordered_write(self.inner, c_uchar::from(unordered));
2545 }
2546 }
2547
2548 /// Sets maximum number of threads that will
2549 /// concurrently perform a compaction job by breaking it into multiple,
2550 /// smaller ones that are run simultaneously.
2551 ///
2552 /// Default: 1 (i.e. no subcompactions)
2553 pub fn set_max_subcompactions(&mut self, num: u32) {
2554 unsafe {
2555 ffi::rocksdb_options_set_max_subcompactions(self.inner, num);
2556 }
2557 }
2558
2559 /// Sets maximum number of concurrent background jobs
2560 /// (compactions and flushes).
2561 ///
2562 /// Default: 2
2563 ///
2564 /// Dynamically changeable through SetDBOptions() API.
2565 pub fn set_max_background_jobs(&mut self, jobs: c_int) {
2566 unsafe {
2567 ffi::rocksdb_options_set_max_background_jobs(self.inner, jobs);
2568 }
2569 }
2570
2571 /// Sets the maximum number of concurrent background compaction jobs, submitted to
2572 /// the default LOW priority thread pool.
2573 /// We first try to schedule compactions based on
2574 /// `base_background_compactions`. If the compaction cannot catch up , we
2575 /// will increase number of compaction threads up to
2576 /// `max_background_compactions`.
2577 ///
2578 /// If you're increasing this, also consider increasing number of threads in
2579 /// LOW priority thread pool. For more information, see
2580 /// Env::SetBackgroundThreads
2581 ///
2582 /// Default: `1`
2583 ///
2584 /// # Examples
2585 ///
2586 /// ```
2587 /// use rust_rocksdb::Options;
2588 ///
2589 /// let mut opts = Options::default();
2590 /// #[allow(deprecated)]
2591 /// opts.set_max_background_compactions(2);
2592 /// ```
2593 #[deprecated(
2594 since = "0.15.0",
2595 note = "RocksDB automatically decides this based on the value of max_background_jobs"
2596 )]
2597 pub fn set_max_background_compactions(&mut self, n: c_int) {
2598 unsafe {
2599 ffi::rocksdb_options_set_max_background_compactions(self.inner, n);
2600 }
2601 }
2602
2603 /// Sets the maximum number of concurrent background memtable flush jobs, submitted to
2604 /// the HIGH priority thread pool.
2605 ///
2606 /// By default, all background jobs (major compaction and memtable flush) go
2607 /// to the LOW priority pool. If this option is set to a positive number,
2608 /// memtable flush jobs will be submitted to the HIGH priority pool.
2609 /// It is important when the same Env is shared by multiple db instances.
2610 /// Without a separate pool, long running major compaction jobs could
2611 /// potentially block memtable flush jobs of other db instances, leading to
2612 /// unnecessary Put stalls.
2613 ///
2614 /// If you're increasing this, also consider increasing number of threads in
2615 /// HIGH priority thread pool. For more information, see
2616 /// Env::SetBackgroundThreads
2617 ///
2618 /// Default: `1`
2619 ///
2620 /// # Examples
2621 ///
2622 /// ```
2623 /// use rust_rocksdb::Options;
2624 ///
2625 /// let mut opts = Options::default();
2626 /// #[allow(deprecated)]
2627 /// opts.set_max_background_flushes(2);
2628 /// ```
2629 #[deprecated(
2630 since = "0.15.0",
2631 note = "RocksDB automatically decides this based on the value of max_background_jobs"
2632 )]
2633 pub fn set_max_background_flushes(&mut self, n: c_int) {
2634 unsafe {
2635 ffi::rocksdb_options_set_max_background_flushes(self.inner, n);
2636 }
2637 }
2638
2639 /// Disables automatic compactions. Manual compactions can still
2640 /// be issued on this column family
2641 ///
2642 /// Default: `false`
2643 ///
2644 /// Dynamically changeable through SetOptions() API
2645 ///
2646 /// # Examples
2647 ///
2648 /// ```
2649 /// use rust_rocksdb::Options;
2650 ///
2651 /// let mut opts = Options::default();
2652 /// opts.set_disable_auto_compactions(true);
2653 /// ```
2654 pub fn set_disable_auto_compactions(&mut self, disable: bool) {
2655 unsafe {
2656 ffi::rocksdb_options_set_disable_auto_compactions(self.inner, c_int::from(disable));
2657 }
2658 }
2659
2660 /// SetMemtableHugePageSize sets the page size for huge page for
2661 /// arena used by the memtable.
2662 /// If <=0, it won't allocate from huge page but from malloc.
2663 /// Users are responsible to reserve huge pages for it to be allocated. For
2664 /// example:
2665 /// sysctl -w vm.nr_hugepages=20
2666 /// See linux doc Documentation/vm/hugetlbpage.txt
2667 /// If there isn't enough free huge page available, it will fall back to
2668 /// malloc.
2669 ///
2670 /// Dynamically changeable through SetOptions() API
2671 pub fn set_memtable_huge_page_size(&mut self, size: size_t) {
2672 unsafe {
2673 ffi::rocksdb_options_set_memtable_huge_page_size(self.inner, size);
2674 }
2675 }
2676
2677 /// Sets the maximum number of successive merge operations on a key in the memtable.
2678 ///
2679 /// When a merge operation is added to the memtable and the maximum number of
2680 /// successive merges is reached, the value of the key will be calculated and
2681 /// inserted into the memtable instead of the merge operation. This will
2682 /// ensure that there are never more than max_successive_merges merge
2683 /// operations in the memtable.
2684 ///
2685 /// Default: 0 (disabled)
2686 pub fn set_max_successive_merges(&mut self, num: usize) {
2687 unsafe {
2688 ffi::rocksdb_options_set_max_successive_merges(self.inner, num);
2689 }
2690 }
2691
2692 /// Control locality of bloom filter probes to improve cache miss rate.
2693 /// This option only applies to memtable prefix bloom and plaintable
2694 /// prefix bloom. It essentially limits the max number of cache lines each
2695 /// bloom filter check can touch.
2696 ///
2697 /// This optimization is turned off when set to 0. The number should never
2698 /// be greater than number of probes. This option can boost performance
2699 /// for in-memory workload but should use with care since it can cause
2700 /// higher false positive rate.
2701 ///
2702 /// Default: 0
2703 pub fn set_bloom_locality(&mut self, v: u32) {
2704 unsafe {
2705 ffi::rocksdb_options_set_bloom_locality(self.inner, v);
2706 }
2707 }
2708
2709 /// Enable/disable thread-safe inplace updates.
2710 ///
2711 /// Requires updates if
2712 /// * key exists in current memtable
2713 /// * new sizeof(new_value) <= sizeof(old_value)
2714 /// * old_value for that key is a put i.e. kTypeValue
2715 ///
2716 /// Default: false.
2717 pub fn set_inplace_update_support(&mut self, enabled: bool) {
2718 unsafe {
2719 ffi::rocksdb_options_set_inplace_update_support(self.inner, c_uchar::from(enabled));
2720 }
2721 }
2722
2723 /// Sets the number of locks used for inplace update.
2724 ///
2725 /// Default: 10000 when inplace_update_support = true, otherwise 0.
2726 pub fn set_inplace_update_locks(&mut self, num: usize) {
2727 unsafe {
2728 ffi::rocksdb_options_set_inplace_update_num_locks(self.inner, num);
2729 }
2730 }
2731
2732 /// Different max-size multipliers for different levels.
2733 /// These are multiplied by max_bytes_for_level_multiplier to arrive
2734 /// at the max-size of each level.
2735 ///
2736 /// Default: 1
2737 ///
2738 /// Dynamically changeable through SetOptions() API
2739 pub fn set_max_bytes_for_level_multiplier_additional(&mut self, level_values: &[i32]) {
2740 let count = level_values.len();
2741 unsafe {
2742 ffi::rocksdb_options_set_max_bytes_for_level_multiplier_additional(
2743 self.inner,
2744 level_values.as_ptr().cast_mut(),
2745 count,
2746 );
2747 }
2748 }
2749
2750 /// The total maximum size(bytes) of write buffers to maintain in memory
2751 /// including copies of buffers that have already been flushed. This parameter
2752 /// only affects trimming of flushed buffers and does not affect flushing.
2753 /// This controls the maximum amount of write history that will be available
2754 /// in memory for conflict checking when Transactions are used. The actual
2755 /// size of write history (flushed Memtables) might be higher than this limit
2756 /// if further trimming will reduce write history total size below this
2757 /// limit. For example, if max_write_buffer_size_to_maintain is set to 64MB,
2758 /// and there are three flushed Memtables, with sizes of 32MB, 20MB, 20MB.
2759 /// Because trimming the next Memtable of size 20MB will reduce total memory
2760 /// usage to 52MB which is below the limit, RocksDB will stop trimming.
2761 ///
2762 /// When using an OptimisticTransactionDB:
2763 /// If this value is too low, some transactions may fail at commit time due
2764 /// to not being able to determine whether there were any write conflicts.
2765 ///
2766 /// When using a TransactionDB:
2767 /// If Transaction::SetSnapshot is used, TransactionDB will read either
2768 /// in-memory write buffers or SST files to do write-conflict checking.
2769 /// Increasing this value can reduce the number of reads to SST files
2770 /// done for conflict detection.
2771 ///
2772 /// Setting this value to 0 will cause write buffers to be freed immediately
2773 /// after they are flushed. If this value is set to -1,
2774 /// 'max_write_buffer_number * write_buffer_size' will be used.
2775 ///
2776 /// Default:
2777 /// If using a TransactionDB/OptimisticTransactionDB, the default value will
2778 /// be set to the value of 'max_write_buffer_number * write_buffer_size'
2779 /// if it is not explicitly set by the user. Otherwise, the default is 0.
2780 pub fn set_max_write_buffer_size_to_maintain(&mut self, size: i64) {
2781 unsafe {
2782 ffi::rocksdb_options_set_max_write_buffer_size_to_maintain(self.inner, size);
2783 }
2784 }
2785
2786 /// By default, a single write thread queue is maintained. The thread gets
2787 /// to the head of the queue becomes write batch group leader and responsible
2788 /// for writing to WAL and memtable for the batch group.
2789 ///
2790 /// If enable_pipelined_write is true, separate write thread queue is
2791 /// maintained for WAL write and memtable write. A write thread first enter WAL
2792 /// writer queue and then memtable writer queue. Pending thread on the WAL
2793 /// writer queue thus only have to wait for previous writers to finish their
2794 /// WAL writing but not the memtable writing. Enabling the feature may improve
2795 /// write throughput and reduce latency of the prepare phase of two-phase
2796 /// commit.
2797 ///
2798 /// Default: false
2799 pub fn set_enable_pipelined_write(&mut self, value: bool) {
2800 unsafe {
2801 ffi::rocksdb_options_set_enable_pipelined_write(self.inner, c_uchar::from(value));
2802 }
2803 }
2804
2805 /// Defines the underlying memtable implementation.
2806 /// See official [wiki](https://github.com/facebook/rocksdb/wiki/MemTable) for more information.
2807 /// Defaults to using a skiplist.
2808 ///
2809 /// # Examples
2810 ///
2811 /// ```
2812 /// use rust_rocksdb::{Options, MemtableFactory};
2813 /// let mut opts = Options::default();
2814 /// let factory = MemtableFactory::HashSkipList {
2815 /// bucket_count: 1_000_000,
2816 /// height: 4,
2817 /// branching_factor: 4,
2818 /// };
2819 ///
2820 /// opts.set_allow_concurrent_memtable_write(false);
2821 /// opts.set_memtable_factory(factory);
2822 /// ```
2823 pub fn set_memtable_factory(&mut self, factory: MemtableFactory) {
2824 match factory {
2825 MemtableFactory::Vector => unsafe {
2826 ffi::rocksdb_options_set_memtable_vector_rep(self.inner);
2827 },
2828 MemtableFactory::HashSkipList {
2829 bucket_count,
2830 height,
2831 branching_factor,
2832 } => unsafe {
2833 ffi::rocksdb_options_set_hash_skip_list_rep(
2834 self.inner,
2835 bucket_count,
2836 height,
2837 branching_factor,
2838 );
2839 },
2840 MemtableFactory::HashLinkList { bucket_count } => unsafe {
2841 ffi::rocksdb_options_set_hash_link_list_rep(self.inner, bucket_count);
2842 },
2843 }
2844 }
2845
2846 pub fn set_block_based_table_factory(&mut self, factory: &BlockBasedOptions) {
2847 unsafe {
2848 ffi::rocksdb_options_set_block_based_table_factory(self.inner, factory.inner);
2849 }
2850 self.outlive.block_based = Some(factory.outlive.clone());
2851 }
2852
2853 /// Sets the table factory to a CuckooTableFactory (the default table
2854 /// factory is a block-based table factory that provides a default
2855 /// implementation of TableBuilder and TableReader with default
2856 /// BlockBasedTableOptions).
2857 /// See official [wiki](https://github.com/facebook/rocksdb/wiki/CuckooTable-Format) for more information on this table format.
2858 /// # Examples
2859 ///
2860 /// ```
2861 /// use rust_rocksdb::{Options, CuckooTableOptions};
2862 ///
2863 /// let mut opts = Options::default();
2864 /// let mut factory_opts = CuckooTableOptions::default();
2865 /// factory_opts.set_hash_ratio(0.8);
2866 /// factory_opts.set_max_search_depth(20);
2867 /// factory_opts.set_cuckoo_block_size(10);
2868 /// factory_opts.set_identity_as_first_hash(true);
2869 /// factory_opts.set_use_module_hash(false);
2870 ///
2871 /// opts.set_cuckoo_table_factory(&factory_opts);
2872 /// ```
2873 pub fn set_cuckoo_table_factory(&mut self, factory: &CuckooTableOptions) {
2874 unsafe {
2875 ffi::rocksdb_options_set_cuckoo_table_factory(self.inner, factory.inner);
2876 }
2877 }
2878
2879 // This is a factory that provides TableFactory objects.
2880 // Default: a block-based table factory that provides a default
2881 // implementation of TableBuilder and TableReader with default
2882 // BlockBasedTableOptions.
2883 /// Sets the factory as plain table.
2884 /// See official [wiki](https://github.com/facebook/rocksdb/wiki/PlainTable-Format) for more
2885 /// information.
2886 ///
2887 /// # Examples
2888 ///
2889 /// ```
2890 /// use rust_rocksdb::{KeyEncodingType, Options, PlainTableFactoryOptions};
2891 ///
2892 /// let mut opts = Options::default();
2893 /// let factory_opts = PlainTableFactoryOptions {
2894 /// user_key_length: 0,
2895 /// bloom_bits_per_key: 20,
2896 /// hash_table_ratio: 0.75,
2897 /// index_sparseness: 16,
2898 /// huge_page_tlb_size: 0,
2899 /// encoding_type: KeyEncodingType::Plain,
2900 /// full_scan_mode: false,
2901 /// store_index_in_file: false,
2902 /// };
2903 ///
2904 /// opts.set_plain_table_factory(&factory_opts);
2905 /// ```
2906 pub fn set_plain_table_factory(&mut self, options: &PlainTableFactoryOptions) {
2907 unsafe {
2908 ffi::rocksdb_options_set_plain_table_factory(
2909 self.inner,
2910 options.user_key_length,
2911 options.bloom_bits_per_key,
2912 options.hash_table_ratio,
2913 options.index_sparseness,
2914 options.huge_page_tlb_size,
2915 options.encoding_type as c_char,
2916 c_uchar::from(options.full_scan_mode),
2917 c_uchar::from(options.store_index_in_file),
2918 );
2919 }
2920 }
2921
2922 /// Sets the start level to use compression.
2923 pub fn set_min_level_to_compress(&mut self, lvl: c_int) {
2924 unsafe {
2925 ffi::rocksdb_options_set_min_level_to_compress(self.inner, lvl);
2926 }
2927 }
2928
2929 /// Measure IO stats in compactions and flushes, if `true`.
2930 ///
2931 /// Default: `false`
2932 ///
2933 /// # Examples
2934 ///
2935 /// ```
2936 /// use rust_rocksdb::Options;
2937 ///
2938 /// let mut opts = Options::default();
2939 /// opts.set_report_bg_io_stats(true);
2940 /// ```
2941 pub fn set_report_bg_io_stats(&mut self, enable: bool) {
2942 unsafe {
2943 ffi::rocksdb_options_set_report_bg_io_stats(self.inner, c_int::from(enable));
2944 }
2945 }
2946
2947 /// Once write-ahead logs exceed this size, we will start forcing the flush of
2948 /// column families whose memtables are backed by the oldest live WAL file
2949 /// (i.e. the ones that are causing all the space amplification).
2950 ///
2951 /// Default: `0`
2952 ///
2953 /// # Examples
2954 ///
2955 /// ```
2956 /// use rust_rocksdb::Options;
2957 ///
2958 /// let mut opts = Options::default();
2959 /// // Set max total wal size to 1G.
2960 /// opts.set_max_total_wal_size(1 << 30);
2961 /// ```
2962 pub fn set_max_total_wal_size(&mut self, size: u64) {
2963 unsafe {
2964 ffi::rocksdb_options_set_max_total_wal_size(self.inner, size);
2965 }
2966 }
2967
2968 /// Recovery mode to control the consistency while replaying WAL.
2969 ///
2970 /// Default: DBRecoveryMode::PointInTime
2971 ///
2972 /// # Examples
2973 ///
2974 /// ```
2975 /// use rust_rocksdb::{Options, DBRecoveryMode};
2976 ///
2977 /// let mut opts = Options::default();
2978 /// opts.set_wal_recovery_mode(DBRecoveryMode::AbsoluteConsistency);
2979 /// ```
2980 pub fn set_wal_recovery_mode(&mut self, mode: DBRecoveryMode) {
2981 unsafe {
2982 ffi::rocksdb_options_set_wal_recovery_mode(self.inner, mode as c_int);
2983 }
2984 }
2985
2986 /// Enables recording RocksDB statistics.
2987 ///
2988 /// The statistics in this Options object are shared between all DB instances.
2989 /// See [`get_statistics`](Self::get_statistics), [`get_ticker_count`](Self::get_ticker_count),
2990 /// and [`get_histogram_data`](Self::get_histogram_data).
2991 pub fn enable_statistics(&mut self) {
2992 unsafe {
2993 ffi::rocksdb_options_enable_statistics(self.inner);
2994 }
2995 }
2996
2997 /// Returns a string containing RocksDB statistics if enabled using
2998 /// [`enable_statistics`](Self::enable_statistics).
2999 pub fn get_statistics(&self) -> Option<String> {
3000 unsafe {
3001 let value = ffi::rocksdb_options_statistics_get_string(self.inner);
3002 if value.is_null() {
3003 return None;
3004 }
3005
3006 // Must have valid UTF-8 format.
3007 Some(from_cstr_and_free(value))
3008 }
3009 }
3010
3011 /// StatsLevel can be used to reduce statistics overhead by skipping certain
3012 /// types of stats in the stats collection process.
3013 ///
3014 /// Only takes effect if stats are enabled first using
3015 /// [`enable_statistics`](Self::enable_statistics).
3016 pub fn set_statistics_level(&self, level: StatsLevel) {
3017 unsafe { ffi::rocksdb_options_set_statistics_level(self.inner, level as c_int) }
3018 }
3019
3020 /// Returns a counter if statistics are enabled using
3021 /// [`enable_statistics`](Self::enable_statistics).
3022 pub fn get_ticker_count(&self, ticker: Ticker) -> u64 {
3023 unsafe { ffi::rocksdb_options_statistics_get_ticker_count(self.inner, ticker as u32) }
3024 }
3025
3026 /// Returns a histogram if statistics are enabled using
3027 /// [`enable_statistics`](Self::enable_statistics).
3028 pub fn get_histogram_data(&self, histogram: Histogram) -> HistogramData {
3029 unsafe {
3030 let data = HistogramData::default();
3031 ffi::rocksdb_options_statistics_get_histogram_data(
3032 self.inner,
3033 histogram as u32,
3034 data.inner,
3035 );
3036 data
3037 }
3038 }
3039
3040 /// If not zero, dump `rocksdb.stats` to LOG every `stats_dump_period_sec`.
3041 ///
3042 /// Default: `600` (10 mins)
3043 ///
3044 /// # Examples
3045 ///
3046 /// ```
3047 /// use rust_rocksdb::Options;
3048 ///
3049 /// let mut opts = Options::default();
3050 /// opts.set_stats_dump_period_sec(300);
3051 /// ```
3052 pub fn set_stats_dump_period_sec(&mut self, period: c_uint) {
3053 unsafe {
3054 ffi::rocksdb_options_set_stats_dump_period_sec(self.inner, period);
3055 }
3056 }
3057
3058 /// If not zero, dump rocksdb.stats to RocksDB to LOG every `stats_persist_period_sec`.
3059 ///
3060 /// Default: `600` (10 mins)
3061 ///
3062 /// # Examples
3063 ///
3064 /// ```
3065 /// use rust_rocksdb::Options;
3066 ///
3067 /// let mut opts = Options::default();
3068 /// opts.set_stats_persist_period_sec(5);
3069 /// ```
3070 pub fn set_stats_persist_period_sec(&mut self, period: c_uint) {
3071 unsafe {
3072 ffi::rocksdb_options_set_stats_persist_period_sec(self.inner, period);
3073 }
3074 }
3075
3076 /// When set to true, reading SST files will opt out of the filesystem's
3077 /// readahead. Setting this to false may improve sequential iteration
3078 /// performance.
3079 ///
3080 /// Default: `true`
3081 pub fn set_advise_random_on_open(&mut self, advise: bool) {
3082 unsafe {
3083 ffi::rocksdb_options_set_advise_random_on_open(self.inner, c_uchar::from(advise));
3084 }
3085 }
3086
3087 /// Enable/disable adaptive mutex, which spins in the user space before resorting to kernel.
3088 ///
3089 /// This could reduce context switch when the mutex is not
3090 /// heavily contended. However, if the mutex is hot, we could end up
3091 /// wasting spin time.
3092 ///
3093 /// Default: false
3094 pub fn set_use_adaptive_mutex(&mut self, enabled: bool) {
3095 unsafe {
3096 ffi::rocksdb_options_set_use_adaptive_mutex(self.inner, c_uchar::from(enabled));
3097 }
3098 }
3099
3100 /// Sets the number of levels for this database.
3101 pub fn set_num_levels(&mut self, n: c_int) {
3102 unsafe {
3103 ffi::rocksdb_options_set_num_levels(self.inner, n);
3104 }
3105 }
3106
3107 /// When a `prefix_extractor` is defined through `opts.set_prefix_extractor` this
3108 /// creates a prefix bloom filter for each memtable with the size of
3109 /// `write_buffer_size * memtable_prefix_bloom_ratio` (capped at 0.25).
3110 ///
3111 /// Default: `0`
3112 ///
3113 /// # Examples
3114 ///
3115 /// ```
3116 /// use rust_rocksdb::{Options, SliceTransform};
3117 ///
3118 /// let mut opts = Options::default();
3119 /// let transform = SliceTransform::create_fixed_prefix(10);
3120 /// opts.set_prefix_extractor(transform);
3121 /// opts.set_memtable_prefix_bloom_ratio(0.2);
3122 /// ```
3123 pub fn set_memtable_prefix_bloom_ratio(&mut self, ratio: f64) {
3124 unsafe {
3125 ffi::rocksdb_options_set_memtable_prefix_bloom_size_ratio(self.inner, ratio);
3126 }
3127 }
3128
3129 /// Sets the maximum number of bytes in all compacted files.
3130 /// We try to limit number of bytes in one compaction to be lower than this
3131 /// threshold. But it's not guaranteed.
3132 ///
3133 /// Value 0 will be sanitized.
3134 ///
3135 /// Default: target_file_size_base * 25
3136 pub fn set_max_compaction_bytes(&mut self, nbytes: u64) {
3137 unsafe {
3138 ffi::rocksdb_options_set_max_compaction_bytes(self.inner, nbytes);
3139 }
3140 }
3141
3142 /// Specifies the absolute path of the directory the
3143 /// write-ahead log (WAL) should be written to.
3144 ///
3145 /// Default: same directory as the database
3146 ///
3147 /// # Examples
3148 ///
3149 /// ```
3150 /// use rust_rocksdb::Options;
3151 ///
3152 /// let mut opts = Options::default();
3153 /// opts.set_wal_dir("/path/to/dir");
3154 /// ```
3155 pub fn set_wal_dir<P: AsRef<Path>>(&mut self, path: P) {
3156 let p = to_cpath(path).unwrap();
3157 unsafe {
3158 ffi::rocksdb_options_set_wal_dir(self.inner, p.as_ptr());
3159 }
3160 }
3161
3162 /// Sets the WAL ttl in seconds.
3163 ///
3164 /// The following two options affect how archived logs will be deleted.
3165 /// 1. If both set to 0, logs will be deleted asap and will not get into
3166 /// the archive.
3167 /// 2. If wal_ttl_seconds is 0 and wal_size_limit_mb is not 0,
3168 /// WAL files will be checked every 10 min and if total size is greater
3169 /// then wal_size_limit_mb, they will be deleted starting with the
3170 /// earliest until size_limit is met. All empty files will be deleted.
3171 /// 3. If wal_ttl_seconds is not 0 and wall_size_limit_mb is 0, then
3172 /// WAL files will be checked every wal_ttl_seconds / 2 and those that
3173 /// are older than wal_ttl_seconds will be deleted.
3174 /// 4. If both are not 0, WAL files will be checked every 10 min and both
3175 /// checks will be performed with ttl being first.
3176 ///
3177 /// Default: 0
3178 pub fn set_wal_ttl_seconds(&mut self, secs: u64) {
3179 unsafe {
3180 ffi::rocksdb_options_set_WAL_ttl_seconds(self.inner, secs);
3181 }
3182 }
3183
3184 /// Sets the WAL size limit in MB.
3185 ///
3186 /// If total size of WAL files is greater then wal_size_limit_mb,
3187 /// they will be deleted starting with the earliest until size_limit is met.
3188 ///
3189 /// Default: 0
3190 pub fn set_wal_size_limit_mb(&mut self, size: u64) {
3191 unsafe {
3192 ffi::rocksdb_options_set_WAL_size_limit_MB(self.inner, size);
3193 }
3194 }
3195
3196 /// Sets the number of bytes to preallocate (via fallocate) the manifest files.
3197 ///
3198 /// Default is 4MB, which is reasonable to reduce random IO
3199 /// as well as prevent overallocation for mounts that preallocate
3200 /// large amounts of data (such as xfs's allocsize option).
3201 pub fn set_manifest_preallocation_size(&mut self, size: usize) {
3202 unsafe {
3203 ffi::rocksdb_options_set_manifest_preallocation_size(self.inner, size);
3204 }
3205 }
3206
3207 /// If true, then DB::Open() will not update the statistics used to optimize
3208 /// compaction decision by loading table properties from many files.
3209 /// Turning off this feature will improve DBOpen time especially in disk environment.
3210 ///
3211 /// Default: false
3212 pub fn set_skip_stats_update_on_db_open(&mut self, skip: bool) {
3213 unsafe {
3214 ffi::rocksdb_options_set_skip_stats_update_on_db_open(self.inner, c_uchar::from(skip));
3215 }
3216 }
3217
3218 /// Specify the maximal number of info log files to be kept.
3219 ///
3220 /// Default: 1000
3221 ///
3222 /// # Examples
3223 ///
3224 /// ```
3225 /// use rust_rocksdb::Options;
3226 ///
3227 /// let mut options = Options::default();
3228 /// options.set_keep_log_file_num(100);
3229 /// ```
3230 pub fn set_keep_log_file_num(&mut self, nfiles: usize) {
3231 unsafe {
3232 ffi::rocksdb_options_set_keep_log_file_num(self.inner, nfiles);
3233 }
3234 }
3235
3236 /// Allow the OS to mmap file for writing.
3237 ///
3238 /// Default: false
3239 ///
3240 /// # Examples
3241 ///
3242 /// ```
3243 /// use rust_rocksdb::Options;
3244 ///
3245 /// let mut options = Options::default();
3246 /// options.set_allow_mmap_writes(true);
3247 /// ```
3248 pub fn set_allow_mmap_writes(&mut self, is_enabled: bool) {
3249 unsafe {
3250 ffi::rocksdb_options_set_allow_mmap_writes(self.inner, c_uchar::from(is_enabled));
3251 }
3252 }
3253
3254 /// Allow the OS to mmap file for reading sst tables.
3255 ///
3256 /// Default: false
3257 ///
3258 /// # Examples
3259 ///
3260 /// ```
3261 /// use rust_rocksdb::Options;
3262 ///
3263 /// let mut options = Options::default();
3264 /// options.set_allow_mmap_reads(true);
3265 /// ```
3266 pub fn set_allow_mmap_reads(&mut self, is_enabled: bool) {
3267 unsafe {
3268 ffi::rocksdb_options_set_allow_mmap_reads(self.inner, c_uchar::from(is_enabled));
3269 }
3270 }
3271
3272 /// If enabled, WAL is not flushed automatically after each write. Instead it
3273 /// relies on manual invocation of `DB::flush_wal()` to write the WAL buffer
3274 /// to its file.
3275 ///
3276 /// Default: false
3277 ///
3278 /// # Examples
3279 ///
3280 /// ```
3281 /// use rust_rocksdb::Options;
3282 ///
3283 /// let mut options = Options::default();
3284 /// options.set_manual_wal_flush(true);
3285 /// ```
3286 pub fn set_manual_wal_flush(&mut self, is_enabled: bool) {
3287 unsafe {
3288 ffi::rocksdb_options_set_manual_wal_flush(self.inner, c_uchar::from(is_enabled));
3289 }
3290 }
3291
3292 /// Guarantee that all column families are flushed together atomically.
3293 /// This option applies to both manual flushes (`db.flush()`) and automatic
3294 /// background flushes caused when memtables are filled.
3295 ///
3296 /// Note that this is only useful when the WAL is disabled. When using the
3297 /// WAL, writes are always consistent across column families.
3298 ///
3299 /// Default: false
3300 ///
3301 /// # Examples
3302 ///
3303 /// ```
3304 /// use rust_rocksdb::Options;
3305 ///
3306 /// let mut options = Options::default();
3307 /// options.set_atomic_flush(true);
3308 /// ```
3309 pub fn set_atomic_flush(&mut self, atomic_flush: bool) {
3310 unsafe {
3311 ffi::rocksdb_options_set_atomic_flush(self.inner, c_uchar::from(atomic_flush));
3312 }
3313 }
3314
3315 /// Sets global cache for table-level rows.
3316 ///
3317 /// Default: null (disabled)
3318 /// Not supported in ROCKSDB_LITE mode!
3319 pub fn set_row_cache(&mut self, cache: &Cache) {
3320 unsafe {
3321 ffi::rocksdb_options_set_row_cache(self.inner, cache.0.inner.as_ptr());
3322 }
3323 self.outlive.row_cache = Some(cache.clone());
3324 }
3325
3326 /// Use to control write rate of flush and compaction. Flush has higher
3327 /// priority than compaction.
3328 /// If rate limiter is enabled, bytes_per_sync is set to 1MB by default.
3329 ///
3330 /// Default: disable
3331 ///
3332 /// # Examples
3333 ///
3334 /// ```
3335 /// use rust_rocksdb::Options;
3336 ///
3337 /// let mut options = Options::default();
3338 /// options.set_ratelimiter(1024 * 1024, 100 * 1000, 10);
3339 /// ```
3340 pub fn set_ratelimiter(
3341 &mut self,
3342 rate_bytes_per_sec: i64,
3343 refill_period_us: i64,
3344 fairness: i32,
3345 ) {
3346 unsafe {
3347 let ratelimiter =
3348 ffi::rocksdb_ratelimiter_create(rate_bytes_per_sec, refill_period_us, fairness);
3349 ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
3350 ffi::rocksdb_ratelimiter_destroy(ratelimiter);
3351 }
3352 }
3353
3354 /// Use to control write rate of flush and compaction. Flush has higher
3355 /// priority than compaction.
3356 /// If rate limiter is enabled, bytes_per_sync is set to 1MB by default.
3357 ///
3358 /// Default: disable
3359 pub fn set_auto_tuned_ratelimiter(
3360 &mut self,
3361 rate_bytes_per_sec: i64,
3362 refill_period_us: i64,
3363 fairness: i32,
3364 ) {
3365 unsafe {
3366 let ratelimiter = ffi::rocksdb_ratelimiter_create_auto_tuned(
3367 rate_bytes_per_sec,
3368 refill_period_us,
3369 fairness,
3370 );
3371 ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
3372 ffi::rocksdb_ratelimiter_destroy(ratelimiter);
3373 }
3374 }
3375
3376 /// Create a RateLimiter object, which can be shared among RocksDB instances to
3377 /// control write rate of flush and compaction.
3378 ///
3379 /// rate_bytes_per_sec: this is the only parameter you want to set most of the
3380 /// time. It controls the total write rate of compaction and flush in bytes per
3381 /// second. Currently, RocksDB does not enforce rate limit for anything other
3382 /// than flush and compaction, e.g. write to WAL.
3383 ///
3384 /// refill_period_us: this controls how often tokens are refilled. For example,
3385 /// when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to
3386 /// 100ms, then 1MB is refilled every 100ms internally. Larger value can lead to
3387 /// burstier writes while smaller value introduces more CPU overhead.
3388 /// The default should work for most cases.
3389 ///
3390 /// fairness: RateLimiter accepts high-pri requests and low-pri requests.
3391 /// A low-pri request is usually blocked in favor of hi-pri request. Currently,
3392 /// RocksDB assigns low-pri to request from compaction and high-pri to request
3393 /// from flush. Low-pri requests can get blocked if flush requests come in
3394 /// continuously. This fairness parameter grants low-pri requests permission by
3395 /// 1/fairness chance even though high-pri requests exist to avoid starvation.
3396 /// You should be good by leaving it at default 10.
3397 ///
3398 /// mode: Mode indicates which types of operations count against the limit.
3399 ///
3400 /// auto_tuned: Enables dynamic adjustment of rate limit within the range
3401 /// `[rate_bytes_per_sec / 20, rate_bytes_per_sec]`, according to
3402 /// the recent demand for background I/O.
3403 pub fn set_ratelimiter_with_mode(
3404 &mut self,
3405 rate_bytes_per_sec: i64,
3406 refill_period_us: i64,
3407 fairness: i32,
3408 mode: RateLimiterMode,
3409 auto_tuned: bool,
3410 ) {
3411 unsafe {
3412 let ratelimiter = ffi::rocksdb_ratelimiter_create_with_mode(
3413 rate_bytes_per_sec,
3414 refill_period_us,
3415 fairness,
3416 mode as c_int,
3417 auto_tuned,
3418 );
3419 ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
3420 ffi::rocksdb_ratelimiter_destroy(ratelimiter);
3421 }
3422 }
3423
3424 /// Sets the maximal size of the info log file.
3425 ///
3426 /// If the log file is larger than `max_log_file_size`, a new info log file
3427 /// will be created. If `max_log_file_size` is equal to zero, all logs will
3428 /// be written to one log file.
3429 ///
3430 /// Default: 0
3431 ///
3432 /// # Examples
3433 ///
3434 /// ```
3435 /// use rust_rocksdb::Options;
3436 ///
3437 /// let mut options = Options::default();
3438 /// options.set_max_log_file_size(0);
3439 /// ```
3440 pub fn set_max_log_file_size(&mut self, size: usize) {
3441 unsafe {
3442 ffi::rocksdb_options_set_max_log_file_size(self.inner, size);
3443 }
3444 }
3445
3446 /// Sets the time for the info log file to roll (in seconds).
3447 ///
3448 /// If specified with non-zero value, log file will be rolled
3449 /// if it has been active longer than `log_file_time_to_roll`.
3450 /// Default: 0 (disabled)
3451 pub fn set_log_file_time_to_roll(&mut self, secs: usize) {
3452 unsafe {
3453 ffi::rocksdb_options_set_log_file_time_to_roll(self.inner, secs);
3454 }
3455 }
3456
3457 /// Controls the recycling of log files.
3458 ///
3459 /// If non-zero, previously written log files will be reused for new logs,
3460 /// overwriting the old data. The value indicates how many such files we will
3461 /// keep around at any point in time for later use. This is more efficient
3462 /// because the blocks are already allocated and fdatasync does not need to
3463 /// update the inode after each write.
3464 ///
3465 /// Default: 0
3466 ///
3467 /// # Examples
3468 ///
3469 /// ```
3470 /// use rust_rocksdb::Options;
3471 ///
3472 /// let mut options = Options::default();
3473 /// options.set_recycle_log_file_num(5);
3474 /// ```
3475 pub fn set_recycle_log_file_num(&mut self, num: usize) {
3476 unsafe {
3477 ffi::rocksdb_options_set_recycle_log_file_num(self.inner, num);
3478 }
3479 }
3480
3481 /// Prints logs to stderr for faster debugging
3482 /// See official [wiki](https://github.com/facebook/rocksdb/wiki/Logger) for more information.
3483 pub fn set_stderr_logger(&mut self, log_level: LogLevel, prefix: impl CStrLike) {
3484 let p = prefix.into_c_string().unwrap();
3485
3486 unsafe {
3487 let logger = ffi::rocksdb_logger_create_stderr_logger(log_level as c_int, p.as_ptr());
3488 ffi::rocksdb_options_set_info_log(self.inner, logger);
3489 ffi::rocksdb_logger_destroy(logger);
3490 }
3491 }
3492
3493 /// Invokes `callback` with RocksDB log messages with level >= `log_level`.
3494 ///
3495 /// The callback can be called concurrently by multiple RocksDB threads.
3496 ///
3497 /// # Examples
3498 /// ```
3499 /// use rust_rocksdb::{LogLevel, Options};
3500 ///
3501 /// let mut options = Options::default();
3502 /// options.set_callback_logger(LogLevel::Debug, move |level, msg| println!("{level:?} {msg}"));
3503 /// ```
3504 pub fn set_callback_logger(
3505 &mut self,
3506 log_level: LogLevel,
3507 callback: impl Fn(LogLevel, &str) + 'static + Send + Sync,
3508 ) {
3509 // store the closure in an Arc so it can be shared across multiple Option/DBs
3510 let holder = Arc::new(LogCallback {
3511 callback: Box::new(callback),
3512 });
3513 let holder_ptr = holder.as_ref() as *const LogCallback;
3514 let holder_cvoid = holder_ptr.cast::<c_void>().cast_mut();
3515
3516 unsafe {
3517 let logger = ffi::rocksdb_logger_create_callback_logger(
3518 log_level as c_int,
3519 Some(Self::logger_callback),
3520 holder_cvoid,
3521 );
3522 ffi::rocksdb_options_set_info_log(self.inner, logger);
3523 ffi::rocksdb_logger_destroy(logger);
3524 }
3525
3526 self.outlive.log_callback = Some(holder);
3527 }
3528
3529 extern "C" fn logger_callback(func: *mut c_void, level: u32, msg: *mut c_char, len: usize) {
3530 use std::{mem, process, str};
3531
3532 let level = unsafe { mem::transmute::<u32, LogLevel>(level) };
3533 let slice = unsafe { slice::from_raw_parts_mut(msg.cast::<u8>(), len) };
3534 let msg = unsafe { str::from_utf8_unchecked(slice) };
3535
3536 let holder = unsafe { &mut *func.cast::<LogCallback>() };
3537 let mut callback_in_catch_unwind = AssertUnwindSafe(&mut holder.callback);
3538 if catch_unwind(move || callback_in_catch_unwind(level, msg)).is_err() {
3539 process::abort();
3540 }
3541 }
3542
3543 /// Sets the threshold at which all writes will be slowed down to at least delayed_write_rate if estimated
3544 /// bytes needed to be compaction exceed this threshold.
3545 ///
3546 /// Default: 64GB
3547 pub fn set_soft_pending_compaction_bytes_limit(&mut self, limit: usize) {
3548 unsafe {
3549 ffi::rocksdb_options_set_soft_pending_compaction_bytes_limit(self.inner, limit);
3550 }
3551 }
3552
3553 /// Sets the bytes threshold at which all writes are stopped if estimated bytes needed to be compaction exceed
3554 /// this threshold.
3555 ///
3556 /// Default: 256GB
3557 pub fn set_hard_pending_compaction_bytes_limit(&mut self, limit: usize) {
3558 unsafe {
3559 ffi::rocksdb_options_set_hard_pending_compaction_bytes_limit(self.inner, limit);
3560 }
3561 }
3562
3563 /// Sets the size of one block in arena memory allocation.
3564 ///
3565 /// If <= 0, a proper value is automatically calculated (usually 1/10 of
3566 /// writer_buffer_size).
3567 ///
3568 /// Default: 0
3569 pub fn set_arena_block_size(&mut self, size: usize) {
3570 unsafe {
3571 ffi::rocksdb_options_set_arena_block_size(self.inner, size);
3572 }
3573 }
3574
3575 /// If true, then print malloc stats together with rocksdb.stats when printing to LOG.
3576 ///
3577 /// Default: false
3578 pub fn set_dump_malloc_stats(&mut self, enabled: bool) {
3579 unsafe {
3580 ffi::rocksdb_options_set_dump_malloc_stats(self.inner, c_uchar::from(enabled));
3581 }
3582 }
3583
3584 /// Enable whole key bloom filter in memtable. Note this will only take effect
3585 /// if memtable_prefix_bloom_size_ratio is not 0. Enabling whole key filtering
3586 /// can potentially reduce CPU usage for point-look-ups.
3587 ///
3588 /// Default: false (disable)
3589 ///
3590 /// Dynamically changeable through SetOptions() API
3591 pub fn set_memtable_whole_key_filtering(&mut self, whole_key_filter: bool) {
3592 unsafe {
3593 ffi::rocksdb_options_set_memtable_whole_key_filtering(
3594 self.inner,
3595 c_uchar::from(whole_key_filter),
3596 );
3597 }
3598 }
3599
3600 /// Enable the use of key-value separation.
3601 ///
3602 /// More details can be found here: [Integrated BlobDB](http://rocksdb.org/blog/2021/05/26/integrated-blob-db.html).
3603 ///
3604 /// Default: false (disable)
3605 ///
3606 /// Dynamically changeable through SetOptions() API
3607 pub fn set_enable_blob_files(&mut self, val: bool) {
3608 unsafe {
3609 ffi::rocksdb_options_set_enable_blob_files(self.inner, u8::from(val));
3610 }
3611 }
3612
3613 /// Sets the minimum threshold value at or above which will be written
3614 /// to blob files during flush or compaction.
3615 ///
3616 /// Dynamically changeable through SetOptions() API
3617 pub fn set_min_blob_size(&mut self, val: u64) {
3618 unsafe {
3619 ffi::rocksdb_options_set_min_blob_size(self.inner, val);
3620 }
3621 }
3622
3623 /// Sets the size limit for blob files.
3624 ///
3625 /// Dynamically changeable through SetOptions() API
3626 pub fn set_blob_file_size(&mut self, val: u64) {
3627 unsafe {
3628 ffi::rocksdb_options_set_blob_file_size(self.inner, val);
3629 }
3630 }
3631
3632 /// Sets the blob compression type. All blob files use the same
3633 /// compression type.
3634 ///
3635 /// Dynamically changeable through SetOptions() API
3636 pub fn set_blob_compression_type(&mut self, val: DBCompressionType) {
3637 unsafe {
3638 ffi::rocksdb_options_set_blob_compression_type(self.inner, val as _);
3639 }
3640 }
3641
3642 /// If this is set to true RocksDB will actively relocate valid blobs from the oldest blob files
3643 /// as they are encountered during compaction.
3644 ///
3645 /// Dynamically changeable through SetOptions() API
3646 pub fn set_enable_blob_gc(&mut self, val: bool) {
3647 unsafe {
3648 ffi::rocksdb_options_set_enable_blob_gc(self.inner, u8::from(val));
3649 }
3650 }
3651
3652 /// Sets the threshold that the GC logic uses to determine which blob files should be considered “old.”
3653 ///
3654 /// For example, the default value of 0.25 signals to RocksDB that blobs residing in the
3655 /// oldest 25% of blob files should be relocated by GC. This parameter can be tuned to adjust
3656 /// the trade-off between write amplification and space amplification.
3657 ///
3658 /// Dynamically changeable through SetOptions() API
3659 pub fn set_blob_gc_age_cutoff(&mut self, val: c_double) {
3660 unsafe {
3661 ffi::rocksdb_options_set_blob_gc_age_cutoff(self.inner, val);
3662 }
3663 }
3664
3665 /// Sets the blob GC force threshold.
3666 ///
3667 /// Dynamically changeable through SetOptions() API
3668 pub fn set_blob_gc_force_threshold(&mut self, val: c_double) {
3669 unsafe {
3670 ffi::rocksdb_options_set_blob_gc_force_threshold(self.inner, val);
3671 }
3672 }
3673
3674 /// Sets the blob compaction read ahead size.
3675 ///
3676 /// Dynamically changeable through SetOptions() API
3677 pub fn set_blob_compaction_readahead_size(&mut self, val: u64) {
3678 unsafe {
3679 ffi::rocksdb_options_set_blob_compaction_readahead_size(self.inner, val);
3680 }
3681 }
3682
3683 /// Sets the blob cache.
3684 ///
3685 /// Using a dedicated object for blobs and using the same object for the block and blob caches
3686 /// are both supported. In the latter case, note that blobs are less valuable from a caching
3687 /// perspective than SST blocks, and some cache implementations have configuration options that
3688 /// can be used to prioritize items accordingly (see Cache::Priority and
3689 /// LRUCacheOptions::{high,low}_pri_pool_ratio).
3690 ///
3691 /// Default: disabled
3692 pub fn set_blob_cache(&mut self, cache: &Cache) {
3693 unsafe {
3694 ffi::rocksdb_options_set_blob_cache(self.inner, cache.0.inner.as_ptr());
3695 }
3696 self.outlive.blob_cache = Some(cache.clone());
3697 }
3698
3699 /// Set this option to true during creation of database if you want
3700 /// to be able to ingest behind (call IngestExternalFile() skipping keys
3701 /// that already exist, rather than overwriting matching keys).
3702 /// Setting this option to true has the following effects:
3703 ///
3704 /// 1. Disable some internal optimizations around SST file compression.
3705 /// 2. Reserve the last level for ingested files only.
3706 /// 3. Compaction will not include any file from the last level.
3707 ///
3708 /// Note that only Universal Compaction supports allow_ingest_behind.
3709 /// `num_levels` should be >= 3 if this option is turned on.
3710 ///
3711 /// DEFAULT: false
3712 /// Immutable.
3713 pub fn set_allow_ingest_behind(&mut self, val: bool) {
3714 unsafe {
3715 ffi::rocksdb_options_set_allow_ingest_behind(self.inner, c_uchar::from(val));
3716 }
3717 }
3718
3719 // A factory of a table property collector that marks an SST
3720 // file as need-compaction when it observe at least "D" deletion
3721 // entries in any "N" consecutive entries, or the ratio of tombstone
3722 // entries >= deletion_ratio.
3723 //
3724 // `window_size`: is the sliding window size "N"
3725 // `num_dels_trigger`: is the deletion trigger "D"
3726 // `deletion_ratio`: if <= 0 or > 1, disable triggering compaction based on
3727 // deletion ratio.
3728 pub fn add_compact_on_deletion_collector_factory(
3729 &mut self,
3730 window_size: size_t,
3731 num_dels_trigger: size_t,
3732 deletion_ratio: f64,
3733 ) {
3734 unsafe {
3735 ffi::rocksdb_options_add_compact_on_deletion_collector_factory_del_ratio(
3736 self.inner,
3737 window_size,
3738 num_dels_trigger,
3739 deletion_ratio,
3740 );
3741 }
3742 }
3743
3744 /// Like [`Self::add_compact_on_deletion_collector_factory`], but only triggers
3745 /// compaction if the SST file size is at least `min_file_size` bytes.
3746 pub fn add_compact_on_deletion_collector_factory_min_file_size(
3747 &mut self,
3748 window_size: size_t,
3749 num_dels_trigger: size_t,
3750 deletion_ratio: f64,
3751 min_file_size: u64,
3752 ) {
3753 unsafe {
3754 ffi::rocksdb_options_add_compact_on_deletion_collector_factory_min_file_size(
3755 self.inner,
3756 window_size,
3757 num_dels_trigger,
3758 deletion_ratio,
3759 min_file_size,
3760 );
3761 }
3762 }
3763
3764 /// <https://github.com/facebook/rocksdb/wiki/Write-Buffer-Manager>
3765 /// Write buffer manager helps users control the total memory used by memtables across multiple column families and/or DB instances.
3766 /// Users can enable this control by 2 ways:
3767 ///
3768 /// 1- Limit the total memtable usage across multiple column families and DBs under a threshold.
3769 /// 2- Cost the memtable memory usage to block cache so that memory of RocksDB can be capped by the single limit.
3770 /// The usage of a write buffer manager is similar to rate_limiter and sst_file_manager.
3771 /// Users can create one write buffer manager object and pass it to all the options of column families or DBs whose memtable size they want to be controlled by this object.
3772 pub fn set_write_buffer_manager(&mut self, write_buffer_manager: &WriteBufferManager) {
3773 unsafe {
3774 ffi::rocksdb_options_set_write_buffer_manager(
3775 self.inner,
3776 write_buffer_manager.0.inner.as_ptr(),
3777 );
3778 }
3779 self.outlive.write_buffer_manager = Some(write_buffer_manager.clone());
3780 }
3781
3782 /// Sets an `SstFileManager` for this `Options`.
3783 ///
3784 /// SstFileManager tracks and controls total SST file space usage, enabling
3785 /// applications to cap disk utilization and throttle deletions.
3786 pub fn set_sst_file_manager(&mut self, sst_file_manager: &SstFileManager) {
3787 unsafe {
3788 ffi::rocksdb_options_set_sst_file_manager(
3789 self.inner,
3790 sst_file_manager.0.inner.as_ptr(),
3791 );
3792 }
3793 self.outlive.sst_file_manager = Some(sst_file_manager.clone());
3794 }
3795
3796 /// If true, working thread may avoid doing unnecessary and long-latency
3797 /// operation (such as deleting obsolete files directly or deleting memtable)
3798 /// and will instead schedule a background job to do it.
3799 ///
3800 /// Use it if you're latency-sensitive.
3801 ///
3802 /// Default: false (disabled)
3803 pub fn set_avoid_unnecessary_blocking_io(&mut self, val: bool) {
3804 unsafe {
3805 ffi::rocksdb_options_set_avoid_unnecessary_blocking_io(self.inner, u8::from(val));
3806 }
3807 }
3808
3809 /// Sets the compaction priority.
3810 ///
3811 /// If level compaction_style =
3812 /// kCompactionStyleLevel, for each level, which files are prioritized to be
3813 /// picked to compact.
3814 ///
3815 /// Default: `DBCompactionPri::MinOverlappingRatio`
3816 ///
3817 /// # Examples
3818 ///
3819 /// ```
3820 /// use rust_rocksdb::{Options, DBCompactionPri};
3821 ///
3822 /// let mut opts = Options::default();
3823 /// opts.set_compaction_pri(DBCompactionPri::RoundRobin);
3824 /// ```
3825 pub fn set_compaction_pri(&mut self, pri: DBCompactionPri) {
3826 unsafe {
3827 ffi::rocksdb_options_set_compaction_pri(self.inner, pri as c_int);
3828 }
3829 }
3830
3831 /// If true, the log numbers and sizes of the synced WALs are tracked
3832 /// in MANIFEST. During DB recovery, if a synced WAL is missing
3833 /// from disk, or the WAL's size does not match the recorded size in
3834 /// MANIFEST, an error will be reported and the recovery will be aborted.
3835 ///
3836 /// This is one additional protection against WAL corruption besides the
3837 /// per-WAL-entry checksum.
3838 ///
3839 /// Note that this option does not work with secondary instance.
3840 /// Currently, only syncing closed WALs are tracked. Calling `DB::SyncWAL()`,
3841 /// etc. or writing with `WriteOptions::sync=true` to sync the live WAL is not
3842 /// tracked for performance/efficiency reasons.
3843 ///
3844 /// See: <https://github.com/facebook/rocksdb/wiki/Track-WAL-in-MANIFEST>
3845 ///
3846 /// Default: false (disabled)
3847 pub fn set_track_and_verify_wals_in_manifest(&mut self, val: bool) {
3848 unsafe {
3849 ffi::rocksdb_options_set_track_and_verify_wals_in_manifest(self.inner, u8::from(val));
3850 }
3851 }
3852
3853 /// Returns the value of the `track_and_verify_wals_in_manifest` option.
3854 pub fn get_track_and_verify_wals_in_manifest(&self) -> bool {
3855 let val_u8 =
3856 unsafe { ffi::rocksdb_options_get_track_and_verify_wals_in_manifest(self.inner) };
3857 val_u8 != 0
3858 }
3859
3860 /// The DB unique ID can be saved in the DB manifest (preferred, this option)
3861 /// or an IDENTITY file (historical, deprecated), or both. If this option is
3862 /// set to false (old behavior), then `write_identity_file` must be set to true.
3863 /// The manifest is preferred because
3864 ///
3865 /// 1. The IDENTITY file is not checksummed, so it is not as safe against
3866 /// corruption.
3867 /// 2. The IDENTITY file may or may not be copied with the DB (e.g. not
3868 /// copied by BackupEngine), so is not reliable for the provenance of a DB.
3869 ///
3870 /// This option might eventually be obsolete and removed as Identity files
3871 /// are phased out.
3872 ///
3873 /// Default: true (enabled)
3874 pub fn set_write_dbid_to_manifest(&mut self, val: bool) {
3875 unsafe {
3876 ffi::rocksdb_options_set_write_dbid_to_manifest(self.inner, u8::from(val));
3877 }
3878 }
3879
3880 /// Returns the value of the `write_dbid_to_manifest` option.
3881 pub fn get_write_dbid_to_manifest(&self) -> bool {
3882 let val_u8 = unsafe { ffi::rocksdb_options_get_write_dbid_to_manifest(self.inner) };
3883 val_u8 != 0
3884 }
3885
3886 /// Sets the logger to use.
3887 ///
3888 /// By default `rocksdb` writes its internal logs to a file in the database
3889 /// directory; this can be changed to a custom callback with the
3890 /// [`InfoLogger::new_callback_logger`] constructor.
3891 pub fn set_info_logger(&mut self, mut logger: InfoLogger) {
3892 // Move the callback so it can be shared across database instances
3893 self.outlive.logger_callback = logger.callback.take();
3894 unsafe {
3895 ffi::rocksdb_options_set_info_log(self.inner, logger.inner);
3896 }
3897 }
3898
3899 /// Returns a reference to the currently configured logger.
3900 pub fn get_info_logger(&self) -> InfoLogger {
3901 let raw = unsafe { ffi::rocksdb_options_get_info_log(self.inner) };
3902 InfoLogger {
3903 inner: raw,
3904 callback: self.outlive.logger_callback.clone(),
3905 }
3906 }
3907}
3908
3909impl Default for Options {
3910 fn default() -> Self {
3911 unsafe {
3912 let opts = ffi::rocksdb_options_create();
3913 assert!(!opts.is_null(), "Could not create RocksDB options");
3914
3915 Self {
3916 inner: opts,
3917 outlive: OptionsMustOutliveDB::default(),
3918 }
3919 }
3920 }
3921}
3922
3923impl FlushOptions {
3924 pub fn new() -> FlushOptions {
3925 FlushOptions::default()
3926 }
3927
3928 /// Waits until the flush is done.
3929 ///
3930 /// Default: true
3931 ///
3932 /// # Examples
3933 ///
3934 /// ```
3935 /// use rust_rocksdb::FlushOptions;
3936 ///
3937 /// let mut options = FlushOptions::default();
3938 /// options.set_wait(false);
3939 /// ```
3940 pub fn set_wait(&mut self, wait: bool) {
3941 unsafe {
3942 ffi::rocksdb_flushoptions_set_wait(self.inner, c_uchar::from(wait));
3943 }
3944 }
3945}
3946
3947impl Default for FlushOptions {
3948 fn default() -> Self {
3949 let flush_opts = unsafe { ffi::rocksdb_flushoptions_create() };
3950 assert!(
3951 !flush_opts.is_null(),
3952 "Could not create RocksDB flush options"
3953 );
3954
3955 Self { inner: flush_opts }
3956 }
3957}
3958
3959impl WriteOptions {
3960 pub fn new() -> WriteOptions {
3961 WriteOptions::default()
3962 }
3963
3964 /// Sets the sync mode. If true, the write will be flushed
3965 /// from the operating system buffer cache before the write is considered complete.
3966 /// If this flag is true, writes will be slower.
3967 ///
3968 /// Default: false
3969 pub fn set_sync(&mut self, sync: bool) {
3970 unsafe {
3971 ffi::rocksdb_writeoptions_set_sync(self.inner, c_uchar::from(sync));
3972 }
3973 }
3974
3975 /// Sets whether WAL should be active or not.
3976 /// If true, writes will not first go to the write ahead log,
3977 /// and the write may got lost after a crash.
3978 ///
3979 /// Default: false
3980 pub fn disable_wal(&mut self, disable: bool) {
3981 unsafe {
3982 ffi::rocksdb_writeoptions_disable_WAL(self.inner, c_int::from(disable));
3983 }
3984 }
3985
3986 /// If true and if user is trying to write to column families that don't exist (they were dropped),
3987 /// ignore the write (don't return an error). If there are multiple writes in a WriteBatch,
3988 /// other writes will succeed.
3989 ///
3990 /// Default: false
3991 pub fn set_ignore_missing_column_families(&mut self, ignore: bool) {
3992 unsafe {
3993 ffi::rocksdb_writeoptions_set_ignore_missing_column_families(
3994 self.inner,
3995 c_uchar::from(ignore),
3996 );
3997 }
3998 }
3999
4000 /// If true and we need to wait or sleep for the write request, fails
4001 /// immediately with Status::Incomplete().
4002 ///
4003 /// Default: false
4004 pub fn set_no_slowdown(&mut self, no_slowdown: bool) {
4005 unsafe {
4006 ffi::rocksdb_writeoptions_set_no_slowdown(self.inner, c_uchar::from(no_slowdown));
4007 }
4008 }
4009
4010 /// If true, this write request is of lower priority if compaction is
4011 /// behind. In this case, no_slowdown = true, the request will be cancelled
4012 /// immediately with Status::Incomplete() returned. Otherwise, it will be
4013 /// slowed down. The slowdown value is determined by RocksDB to guarantee
4014 /// it introduces minimum impacts to high priority writes.
4015 ///
4016 /// Default: false
4017 pub fn set_low_pri(&mut self, v: bool) {
4018 unsafe {
4019 ffi::rocksdb_writeoptions_set_low_pri(self.inner, c_uchar::from(v));
4020 }
4021 }
4022
4023 /// If true, writebatch will maintain the last insert positions of each
4024 /// memtable as hints in concurrent write. It can improve write performance
4025 /// in concurrent writes if keys in one writebatch are sequential. In
4026 /// non-concurrent writes (when concurrent_memtable_writes is false) this
4027 /// option will be ignored.
4028 ///
4029 /// Default: false
4030 pub fn set_memtable_insert_hint_per_batch(&mut self, v: bool) {
4031 unsafe {
4032 ffi::rocksdb_writeoptions_set_memtable_insert_hint_per_batch(
4033 self.inner,
4034 c_uchar::from(v),
4035 );
4036 }
4037 }
4038}
4039
4040impl Default for WriteOptions {
4041 fn default() -> Self {
4042 let write_opts = unsafe { ffi::rocksdb_writeoptions_create() };
4043 assert!(
4044 !write_opts.is_null(),
4045 "Could not create RocksDB write options"
4046 );
4047
4048 Self { inner: write_opts }
4049 }
4050}
4051
4052impl LruCacheOptions {
4053 /// Capacity of the cache, in the same units as the `charge` of each entry.
4054 /// This is typically measured in bytes, but can be a different unit if using
4055 /// kDontChargeCacheMetadata.
4056 pub fn set_capacity(&mut self, cap: usize) {
4057 unsafe {
4058 ffi::rocksdb_lru_cache_options_set_capacity(self.inner, cap);
4059 }
4060 }
4061
4062 /// Cache is sharded into 2^num_shard_bits shards, by hash of key.
4063 /// If < 0, a good default is chosen based on the capacity and the
4064 /// implementation. (Mutex-based implementations are much more reliant
4065 /// on many shards for parallel scalability.)
4066 pub fn set_num_shard_bits(&mut self, val: c_int) {
4067 unsafe {
4068 ffi::rocksdb_lru_cache_options_set_num_shard_bits(self.inner, val);
4069 }
4070 }
4071}
4072
4073impl Default for LruCacheOptions {
4074 fn default() -> Self {
4075 let inner = unsafe { ffi::rocksdb_lru_cache_options_create() };
4076 assert!(
4077 !inner.is_null(),
4078 "Could not create RocksDB LRU cache options"
4079 );
4080
4081 Self { inner }
4082 }
4083}
4084
4085#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4086#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4087#[repr(i32)]
4088pub enum ReadTier {
4089 /// Reads data in memtable, block cache, OS cache or storage.
4090 All = 0,
4091 /// Reads data in memtable or block cache.
4092 BlockCache,
4093 /// Reads persisted data. When WAL is disabled, this option will skip data in memtable.
4094 Persisted,
4095 /// Reads data in memtable. Used for memtable only iterators.
4096 Memtable,
4097}
4098
4099impl ReadOptions {
4100 // TODO add snapshot setting here
4101 // TODO add snapshot wrapper structs with proper destructors;
4102 // that struct needs an "iterator" impl too.
4103
4104 /// Specify whether the "data block"/"index block"/"filter block"
4105 /// read for this iteration should be cached in memory?
4106 /// Callers may wish to set this field to false for bulk scans.
4107 ///
4108 /// Default: true
4109 pub fn fill_cache(&mut self, v: bool) {
4110 unsafe {
4111 ffi::rocksdb_readoptions_set_fill_cache(self.inner, c_uchar::from(v));
4112 }
4113 }
4114
4115 /// Sets the snapshot which should be used for the read.
4116 /// The snapshot must belong to the DB that is being read and must
4117 /// not have been released.
4118 pub fn set_snapshot<D: DBAccess>(&mut self, snapshot: &SnapshotWithThreadMode<D>) {
4119 unsafe {
4120 ffi::rocksdb_readoptions_set_snapshot(self.inner, snapshot.inner);
4121 }
4122 }
4123
4124 /// Sets the lower bound for an iterator.
4125 pub fn set_iterate_lower_bound<K: Into<Vec<u8>>>(&mut self, key: K) {
4126 self.set_lower_bound_impl(Some(key.into()));
4127 }
4128
4129 /// Sets the upper bound for an iterator.
4130 /// The upper bound itself is not included on the iteration result.
4131 pub fn set_iterate_upper_bound<K: Into<Vec<u8>>>(&mut self, key: K) {
4132 self.set_upper_bound_impl(Some(key.into()));
4133 }
4134
4135 /// Sets lower and upper bounds based on the provided range. This is
4136 /// similar to setting lower and upper bounds separately except that it also
4137 /// allows either bound to be reset.
4138 ///
4139 /// The argument can be a regular Rust range, e.g. `lower..upper`. However,
4140 /// since RocksDB upper bound is always excluded (i.e. range can never be
4141 /// fully closed) inclusive ranges (`lower..=upper` and `..=upper`) are not
4142 /// supported. For example:
4143 ///
4144 /// ```
4145 /// let mut options = rust_rocksdb::ReadOptions::default();
4146 /// options.set_iterate_range("xy".as_bytes().."xz".as_bytes());
4147 /// ```
4148 ///
4149 /// In addition, [`crate::PrefixRange`] can be used to specify a range of
4150 /// keys with a given prefix. In particular, the above example is
4151 /// equivalent to:
4152 ///
4153 /// ```
4154 /// let mut options = rust_rocksdb::ReadOptions::default();
4155 /// options.set_iterate_range(rust_rocksdb::PrefixRange("xy".as_bytes()));
4156 /// ```
4157 ///
4158 /// Note that setting range using this method is separate to using prefix
4159 /// iterators. Prefix iterators use prefix extractor configured for
4160 /// a column family. Setting bounds via [`crate::PrefixRange`] is more akin
4161 /// to using manual prefix.
4162 ///
4163 /// Using this method clears any previously set bounds. In other words, the
4164 /// bounds can be reset by setting the range to `..` as in:
4165 ///
4166 /// ```
4167 /// let mut options = rust_rocksdb::ReadOptions::default();
4168 /// options.set_iterate_range(..);
4169 /// ```
4170 pub fn set_iterate_range(&mut self, range: impl crate::IterateBounds) {
4171 let (lower, upper) = range.into_bounds();
4172 self.set_lower_bound_impl(lower);
4173 self.set_upper_bound_impl(upper);
4174 }
4175
4176 fn set_lower_bound_impl(&mut self, bound: Option<Vec<u8>>) {
4177 let (ptr, len) = if let Some(ref bound) = bound {
4178 (bound.as_ptr() as *const c_char, bound.len())
4179 } else if self.iterate_lower_bound.is_some() {
4180 (std::ptr::null(), 0)
4181 } else {
4182 return;
4183 };
4184 self.iterate_lower_bound = bound;
4185 unsafe {
4186 ffi::rocksdb_readoptions_set_iterate_lower_bound(self.inner, ptr, len);
4187 }
4188 }
4189
4190 fn set_upper_bound_impl(&mut self, bound: Option<Vec<u8>>) {
4191 let (ptr, len) = if let Some(ref bound) = bound {
4192 (bound.as_ptr() as *const c_char, bound.len())
4193 } else if self.iterate_upper_bound.is_some() {
4194 (std::ptr::null(), 0)
4195 } else {
4196 return;
4197 };
4198 self.iterate_upper_bound = bound;
4199 unsafe {
4200 ffi::rocksdb_readoptions_set_iterate_upper_bound(self.inner, ptr, len);
4201 }
4202 }
4203
4204 /// Specify if this read request should process data that ALREADY
4205 /// resides on a particular cache. If the required data is not
4206 /// found at the specified cache, then Status::Incomplete is returned.
4207 ///
4208 /// Default: ::All
4209 pub fn set_read_tier(&mut self, tier: ReadTier) {
4210 unsafe {
4211 ffi::rocksdb_readoptions_set_read_tier(self.inner, tier as c_int);
4212 }
4213 }
4214
4215 /// Enforce that the iterator only iterates over the same
4216 /// prefix as the seek.
4217 /// This option is effective only for prefix seeks, i.e. prefix_extractor is
4218 /// non-null for the column family and total_order_seek is false. Unlike
4219 /// iterate_upper_bound, prefix_same_as_start only works within a prefix
4220 /// but in both directions.
4221 ///
4222 /// Default: false
4223 pub fn set_prefix_same_as_start(&mut self, v: bool) {
4224 unsafe {
4225 ffi::rocksdb_readoptions_set_prefix_same_as_start(self.inner, c_uchar::from(v));
4226 }
4227 }
4228
4229 /// Enable a total order seek regardless of index format (e.g. hash index)
4230 /// used in the table. Some table format (e.g. plain table) may not support
4231 /// this option.
4232 ///
4233 /// If true when calling Get(), we also skip prefix bloom when reading from
4234 /// block based table. It provides a way to read existing data after
4235 /// changing implementation of prefix extractor.
4236 pub fn set_total_order_seek(&mut self, v: bool) {
4237 unsafe {
4238 ffi::rocksdb_readoptions_set_total_order_seek(self.inner, c_uchar::from(v));
4239 }
4240 }
4241
4242 /// Sets a threshold for the number of keys that can be skipped
4243 /// before failing an iterator seek as incomplete. The default value of 0 should be used to
4244 /// never fail a request as incomplete, even on skipping too many keys.
4245 ///
4246 /// Default: 0
4247 pub fn set_max_skippable_internal_keys(&mut self, num: u64) {
4248 unsafe {
4249 ffi::rocksdb_readoptions_set_max_skippable_internal_keys(self.inner, num);
4250 }
4251 }
4252
4253 /// If true, when PurgeObsoleteFile is called in CleanupIteratorState, we schedule a background job
4254 /// in the flush job queue and delete obsolete files in background.
4255 ///
4256 /// Default: false
4257 pub fn set_background_purge_on_iterator_cleanup(&mut self, v: bool) {
4258 unsafe {
4259 ffi::rocksdb_readoptions_set_background_purge_on_iterator_cleanup(
4260 self.inner,
4261 c_uchar::from(v),
4262 );
4263 }
4264 }
4265
4266 /// If true, keys deleted using the DeleteRange() API will be visible to
4267 /// readers until they are naturally deleted during compaction.
4268 ///
4269 /// Default: false
4270 #[deprecated(
4271 note = "deprecated in RocksDB 10.2.1: no performance impact if DeleteRange is not used"
4272 )]
4273 pub fn set_ignore_range_deletions(&mut self, v: bool) {
4274 unsafe {
4275 ffi::rocksdb_readoptions_set_ignore_range_deletions(self.inner, c_uchar::from(v));
4276 }
4277 }
4278
4279 /// If true, all data read from underlying storage will be
4280 /// verified against corresponding checksums.
4281 ///
4282 /// Default: true
4283 pub fn set_verify_checksums(&mut self, v: bool) {
4284 unsafe {
4285 ffi::rocksdb_readoptions_set_verify_checksums(self.inner, c_uchar::from(v));
4286 }
4287 }
4288
4289 /// If non-zero, an iterator will create a new table reader which
4290 /// performs reads of the given size. Using a large size (> 2MB) can
4291 /// improve the performance of forward iteration on spinning disks.
4292 /// Default: 0
4293 ///
4294 /// ```
4295 /// use rust_rocksdb::{ReadOptions};
4296 ///
4297 /// let mut opts = ReadOptions::default();
4298 /// opts.set_readahead_size(4_194_304); // 4mb
4299 /// ```
4300 pub fn set_readahead_size(&mut self, v: usize) {
4301 unsafe {
4302 ffi::rocksdb_readoptions_set_readahead_size(self.inner, v as size_t);
4303 }
4304 }
4305
4306 /// If auto_readahead_size is set to true, it will auto tune the readahead_size
4307 /// during scans internally.
4308 /// For this feature to be enabled, iterate_upper_bound must also be specified.
4309 ///
4310 /// NOTE: - Recommended for forward Scans only.
4311 /// - If there is a backward scans, this option will be
4312 /// disabled internally and won't be enabled again if the forward scan
4313 /// is issued again.
4314 ///
4315 /// Default: true
4316 pub fn set_auto_readahead_size(&mut self, v: bool) {
4317 unsafe {
4318 ffi::rocksdb_readoptions_set_auto_readahead_size(self.inner, c_uchar::from(v));
4319 }
4320 }
4321
4322 /// If true, create a tailing iterator. Note that tailing iterators
4323 /// only support moving in the forward direction. Iterating in reverse
4324 /// or seek_to_last are not supported.
4325 pub fn set_tailing(&mut self, v: bool) {
4326 unsafe {
4327 ffi::rocksdb_readoptions_set_tailing(self.inner, c_uchar::from(v));
4328 }
4329 }
4330
4331 /// Specifies the value of "pin_data". If true, it keeps the blocks
4332 /// loaded by the iterator pinned in memory as long as the iterator is not deleted,
4333 /// If used when reading from tables created with
4334 /// BlockBasedTableOptions::use_delta_encoding = false,
4335 /// Iterator's property "rocksdb.iterator.is-key-pinned" is guaranteed to
4336 /// return 1.
4337 ///
4338 /// Default: false
4339 pub fn set_pin_data(&mut self, v: bool) {
4340 unsafe {
4341 ffi::rocksdb_readoptions_set_pin_data(self.inner, c_uchar::from(v));
4342 }
4343 }
4344
4345 /// Asynchronously prefetch some data.
4346 ///
4347 /// Used for sequential reads and internal automatic prefetching.
4348 ///
4349 /// Default: `false`
4350 pub fn set_async_io(&mut self, v: bool) {
4351 unsafe {
4352 ffi::rocksdb_readoptions_set_async_io(self.inner, c_uchar::from(v));
4353 }
4354 }
4355
4356 /// Deadline for completing an API call (Get/MultiGet/Seek/Next for now)
4357 /// in microseconds.
4358 /// It should be set to microseconds since epoch, i.e, gettimeofday or
4359 /// equivalent plus allowed duration in microseconds.
4360 /// This is best effort. The call may exceed the deadline if there is IO
4361 /// involved and the file system doesn't support deadlines, or due to
4362 /// checking for deadline periodically rather than for every key if
4363 /// processing a batch
4364 pub fn set_deadline(&mut self, microseconds: u64) {
4365 unsafe {
4366 ffi::rocksdb_readoptions_set_deadline(self.inner, microseconds);
4367 }
4368 }
4369
4370 /// A timeout in microseconds to be passed to the underlying FileSystem for
4371 /// reads. As opposed to deadline, this determines the timeout for each
4372 /// individual file read request. If a MultiGet/Get/Seek/Next etc call
4373 /// results in multiple reads, each read can last up to io_timeout us.
4374 pub fn set_io_timeout(&mut self, microseconds: u64) {
4375 unsafe {
4376 ffi::rocksdb_readoptions_set_io_timeout(self.inner, microseconds);
4377 }
4378 }
4379
4380 /// Timestamp of operation. Read should return the latest data visible to the
4381 /// specified timestamp. All timestamps of the same database must be of the
4382 /// same length and format. The user is responsible for providing a customized
4383 /// compare function via Comparator to order <key, timestamp> tuples.
4384 /// For iterator, iter_start_ts is the lower bound (older) and timestamp
4385 /// serves as the upper bound. Versions of the same record that fall in
4386 /// the timestamp range will be returned. If iter_start_ts is nullptr,
4387 /// only the most recent version visible to timestamp is returned.
4388 /// The user-specified timestamp feature is still under active development,
4389 /// and the API is subject to change.
4390 pub fn set_timestamp<S: Into<Vec<u8>>>(&mut self, ts: S) {
4391 self.set_timestamp_impl(Some(ts.into()));
4392 }
4393
4394 fn set_timestamp_impl(&mut self, ts: Option<Vec<u8>>) {
4395 let (ptr, len) = if let Some(ref ts) = ts {
4396 (ts.as_ptr() as *const c_char, ts.len())
4397 } else if self.timestamp.is_some() {
4398 // The stored timestamp is a `Some` but we're updating it to a `None`.
4399 // This means to cancel a previously set timestamp.
4400 // To do this, use a null pointer and zero length.
4401 (std::ptr::null(), 0)
4402 } else {
4403 return;
4404 };
4405 self.timestamp = ts;
4406 unsafe {
4407 ffi::rocksdb_readoptions_set_timestamp(self.inner, ptr, len);
4408 }
4409 }
4410
4411 /// See `set_timestamp`
4412 pub fn set_iter_start_ts<S: Into<Vec<u8>>>(&mut self, ts: S) {
4413 self.set_iter_start_ts_impl(Some(ts.into()));
4414 }
4415
4416 fn set_iter_start_ts_impl(&mut self, ts: Option<Vec<u8>>) {
4417 let (ptr, len) = if let Some(ref ts) = ts {
4418 (ts.as_ptr() as *const c_char, ts.len())
4419 } else if self.timestamp.is_some() {
4420 (std::ptr::null(), 0)
4421 } else {
4422 return;
4423 };
4424 self.iter_start_ts = ts;
4425 unsafe {
4426 ffi::rocksdb_readoptions_set_iter_start_ts(self.inner, ptr, len);
4427 }
4428 }
4429}
4430
4431impl Default for ReadOptions {
4432 fn default() -> Self {
4433 unsafe {
4434 Self {
4435 inner: ffi::rocksdb_readoptions_create(),
4436 timestamp: None,
4437 iter_start_ts: None,
4438 iterate_upper_bound: None,
4439 iterate_lower_bound: None,
4440 }
4441 }
4442 }
4443}
4444
4445impl IngestExternalFileOptions {
4446 /// Can be set to true to move the files instead of copying them.
4447 pub fn set_move_files(&mut self, v: bool) {
4448 unsafe {
4449 ffi::rocksdb_ingestexternalfileoptions_set_move_files(self.inner, c_uchar::from(v));
4450 }
4451 }
4452
4453 /// If set to false, an ingested file keys could appear in existing snapshots
4454 /// that where created before the file was ingested.
4455 pub fn set_snapshot_consistency(&mut self, v: bool) {
4456 unsafe {
4457 ffi::rocksdb_ingestexternalfileoptions_set_snapshot_consistency(
4458 self.inner,
4459 c_uchar::from(v),
4460 );
4461 }
4462 }
4463
4464 /// If set to false, IngestExternalFile() will fail if the file key range
4465 /// overlaps with existing keys or tombstones in the DB.
4466 pub fn set_allow_global_seqno(&mut self, v: bool) {
4467 unsafe {
4468 ffi::rocksdb_ingestexternalfileoptions_set_allow_global_seqno(
4469 self.inner,
4470 c_uchar::from(v),
4471 );
4472 }
4473 }
4474
4475 /// If set to false and the file key range overlaps with the memtable key range
4476 /// (memtable flush required), IngestExternalFile will fail.
4477 pub fn set_allow_blocking_flush(&mut self, v: bool) {
4478 unsafe {
4479 ffi::rocksdb_ingestexternalfileoptions_set_allow_blocking_flush(
4480 self.inner,
4481 c_uchar::from(v),
4482 );
4483 }
4484 }
4485
4486 /// Set to true if you would like duplicate keys in the file being ingested
4487 /// to be skipped rather than overwriting existing data under that key.
4488 /// Usecase: back-fill of some historical data in the database without
4489 /// over-writing existing newer version of data.
4490 /// This option could only be used if the DB has been running
4491 /// with allow_ingest_behind=true since the dawn of time.
4492 /// All files will be ingested at the bottommost level with seqno=0.
4493 pub fn set_ingest_behind(&mut self, v: bool) {
4494 unsafe {
4495 ffi::rocksdb_ingestexternalfileoptions_set_ingest_behind(self.inner, c_uchar::from(v));
4496 }
4497 }
4498}
4499
4500impl Default for IngestExternalFileOptions {
4501 fn default() -> Self {
4502 unsafe {
4503 Self {
4504 inner: ffi::rocksdb_ingestexternalfileoptions_create(),
4505 }
4506 }
4507 }
4508}
4509
4510/// Used by BlockBasedOptions::set_index_type.
4511pub enum BlockBasedIndexType {
4512 /// A space efficient index block that is optimized for
4513 /// binary-search-based index.
4514 BinarySearch,
4515
4516 /// The hash index, if enabled, will perform a hash lookup if
4517 /// a prefix extractor has been provided through Options::set_prefix_extractor.
4518 HashSearch,
4519
4520 /// A two-level index implementation. Both levels are binary search indexes.
4521 TwoLevelIndexSearch,
4522}
4523
4524/// Used by BlockBasedOptions::set_data_block_index_type.
4525#[repr(C)]
4526pub enum DataBlockIndexType {
4527 /// Use binary search when performing point lookup for keys in data blocks.
4528 /// This is the default.
4529 BinarySearch = 0,
4530
4531 /// Appends a compact hash table to the end of the data block for efficient indexing. Backwards
4532 /// compatible with databases created without this feature. Once turned on, existing data will
4533 /// be gradually converted to the hash index format.
4534 BinaryAndHash = 1,
4535}
4536
4537/// Defines the underlying memtable implementation.
4538/// See official [wiki](https://github.com/facebook/rocksdb/wiki/MemTable) for more information.
4539pub enum MemtableFactory {
4540 Vector,
4541 HashSkipList {
4542 bucket_count: usize,
4543 height: i32,
4544 branching_factor: i32,
4545 },
4546 HashLinkList {
4547 bucket_count: usize,
4548 },
4549}
4550
4551/// Used by BlockBasedOptions::set_checksum_type.
4552pub enum ChecksumType {
4553 NoChecksum = 0,
4554 CRC32c = 1,
4555 XXHash = 2,
4556 XXHash64 = 3,
4557 XXH3 = 4, // Supported since RocksDB 6.27
4558}
4559
4560/// Used in [`PlainTableFactoryOptions`].
4561#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
4562pub enum KeyEncodingType {
4563 /// Always write full keys.
4564 #[default]
4565 Plain = 0,
4566 /// Find opportunities to write the same prefix for multiple rows.
4567 Prefix = 1,
4568}
4569
4570/// Used with DBOptions::set_plain_table_factory.
4571/// See official [wiki](https://github.com/facebook/rocksdb/wiki/PlainTable-Format) for more
4572/// information.
4573///
4574/// Defaults:
4575/// user_key_length: 0 (variable length)
4576/// bloom_bits_per_key: 10
4577/// hash_table_ratio: 0.75
4578/// index_sparseness: 16
4579/// huge_page_tlb_size: 0
4580/// encoding_type: KeyEncodingType::Plain
4581/// full_scan_mode: false
4582/// store_index_in_file: false
4583pub struct PlainTableFactoryOptions {
4584 pub user_key_length: u32,
4585 pub bloom_bits_per_key: i32,
4586 pub hash_table_ratio: f64,
4587 pub index_sparseness: usize,
4588 pub huge_page_tlb_size: usize,
4589 pub encoding_type: KeyEncodingType,
4590 pub full_scan_mode: bool,
4591 pub store_index_in_file: bool,
4592}
4593
4594#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4595#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4596pub enum DBCompressionType {
4597 None = ffi::rocksdb_no_compression as isize,
4598 Snappy = ffi::rocksdb_snappy_compression as isize,
4599 Zlib = ffi::rocksdb_zlib_compression as isize,
4600 Bz2 = ffi::rocksdb_bz2_compression as isize,
4601 Lz4 = ffi::rocksdb_lz4_compression as isize,
4602 Lz4hc = ffi::rocksdb_lz4hc_compression as isize,
4603 Zstd = ffi::rocksdb_zstd_compression as isize,
4604}
4605
4606#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4607#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4608pub enum DBCompactionStyle {
4609 Level = ffi::rocksdb_level_compaction as isize,
4610 Universal = ffi::rocksdb_universal_compaction as isize,
4611 Fifo = ffi::rocksdb_fifo_compaction as isize,
4612}
4613
4614#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4615#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4616pub enum DBRecoveryMode {
4617 TolerateCorruptedTailRecords = ffi::rocksdb_tolerate_corrupted_tail_records_recovery as isize,
4618 AbsoluteConsistency = ffi::rocksdb_absolute_consistency_recovery as isize,
4619 PointInTime = ffi::rocksdb_point_in_time_recovery as isize,
4620 SkipAnyCorruptedRecord = ffi::rocksdb_skip_any_corrupted_records_recovery as isize,
4621}
4622
4623#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4624#[repr(i32)]
4625pub enum RateLimiterMode {
4626 KReadsOnly = 0,
4627 KWritesOnly = 1,
4628 KAllIo = 2,
4629}
4630
4631#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4632#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4633pub enum DBCompactionPri {
4634 ByCompensatedSize = ffi::rocksdb_k_by_compensated_size_compaction_pri as isize,
4635 OldestLargestSeqFirst = ffi::rocksdb_k_oldest_largest_seq_first_compaction_pri as isize,
4636 OldestSmallestSeqFirst = ffi::rocksdb_k_oldest_smallest_seq_first_compaction_pri as isize,
4637 MinOverlappingRatio = ffi::rocksdb_k_min_overlapping_ratio_compaction_pri as isize,
4638 RoundRobin = ffi::rocksdb_k_round_robin_compaction_pri as isize,
4639}
4640
4641#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4642#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4643pub enum BlockBasedPinningTier {
4644 Fallback = ffi::rocksdb_block_based_k_fallback_pinning_tier as isize,
4645 None = ffi::rocksdb_block_based_k_none_pinning_tier as isize,
4646 FlushAndSimilar = ffi::rocksdb_block_based_k_flush_and_similar_pinning_tier as isize,
4647 All = ffi::rocksdb_block_based_k_all_pinning_tier as isize,
4648}
4649
4650pub struct FifoCompactOptions {
4651 pub(crate) inner: *mut ffi::rocksdb_fifo_compaction_options_t,
4652}
4653
4654impl Default for FifoCompactOptions {
4655 fn default() -> Self {
4656 let opts = unsafe { ffi::rocksdb_fifo_compaction_options_create() };
4657 assert!(
4658 !opts.is_null(),
4659 "Could not create RocksDB Fifo Compaction Options"
4660 );
4661
4662 Self { inner: opts }
4663 }
4664}
4665
4666impl Drop for FifoCompactOptions {
4667 fn drop(&mut self) {
4668 unsafe {
4669 ffi::rocksdb_fifo_compaction_options_destroy(self.inner);
4670 }
4671 }
4672}
4673
4674impl FifoCompactOptions {
4675 /// Sets the max table file size.
4676 ///
4677 /// Once the total sum of table files reaches this, we will delete the oldest
4678 /// table file
4679 ///
4680 /// Default: 1GB
4681 pub fn set_max_table_files_size(&mut self, nbytes: u64) {
4682 unsafe {
4683 ffi::rocksdb_fifo_compaction_options_set_max_table_files_size(self.inner, nbytes);
4684 }
4685 }
4686}
4687
4688#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4689#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4690pub enum UniversalCompactionStopStyle {
4691 Similar = ffi::rocksdb_similar_size_compaction_stop_style as isize,
4692 Total = ffi::rocksdb_total_size_compaction_stop_style as isize,
4693}
4694
4695pub struct UniversalCompactOptions {
4696 pub(crate) inner: *mut ffi::rocksdb_universal_compaction_options_t,
4697}
4698
4699impl Default for UniversalCompactOptions {
4700 fn default() -> Self {
4701 let opts = unsafe { ffi::rocksdb_universal_compaction_options_create() };
4702 assert!(
4703 !opts.is_null(),
4704 "Could not create RocksDB Universal Compaction Options"
4705 );
4706
4707 Self { inner: opts }
4708 }
4709}
4710
4711impl Drop for UniversalCompactOptions {
4712 fn drop(&mut self) {
4713 unsafe {
4714 ffi::rocksdb_universal_compaction_options_destroy(self.inner);
4715 }
4716 }
4717}
4718
4719impl UniversalCompactOptions {
4720 /// Sets the percentage flexibility while comparing file size.
4721 /// If the candidate file(s) size is 1% smaller than the next file's size,
4722 /// then include next file into this candidate set.
4723 ///
4724 /// Default: 1
4725 pub fn set_size_ratio(&mut self, ratio: c_int) {
4726 unsafe {
4727 ffi::rocksdb_universal_compaction_options_set_size_ratio(self.inner, ratio);
4728 }
4729 }
4730
4731 /// Sets the minimum number of files in a single compaction run.
4732 ///
4733 /// Default: 2
4734 pub fn set_min_merge_width(&mut self, num: c_int) {
4735 unsafe {
4736 ffi::rocksdb_universal_compaction_options_set_min_merge_width(self.inner, num);
4737 }
4738 }
4739
4740 /// Sets the maximum number of files in a single compaction run.
4741 ///
4742 /// Default: UINT_MAX
4743 pub fn set_max_merge_width(&mut self, num: c_int) {
4744 unsafe {
4745 ffi::rocksdb_universal_compaction_options_set_max_merge_width(self.inner, num);
4746 }
4747 }
4748
4749 /// sets the size amplification.
4750 ///
4751 /// It is defined as the amount (in percentage) of
4752 /// additional storage needed to store a single byte of data in the database.
4753 /// For example, a size amplification of 2% means that a database that
4754 /// contains 100 bytes of user-data may occupy upto 102 bytes of
4755 /// physical storage. By this definition, a fully compacted database has
4756 /// a size amplification of 0%. Rocksdb uses the following heuristic
4757 /// to calculate size amplification: it assumes that all files excluding
4758 /// the earliest file contribute to the size amplification.
4759 ///
4760 /// Default: 200, which means that a 100 byte database could require upto 300 bytes of storage.
4761 pub fn set_max_size_amplification_percent(&mut self, v: c_int) {
4762 unsafe {
4763 ffi::rocksdb_universal_compaction_options_set_max_size_amplification_percent(
4764 self.inner, v,
4765 );
4766 }
4767 }
4768
4769 /// Sets the percentage of compression size.
4770 ///
4771 /// If this option is set to be -1, all the output files
4772 /// will follow compression type specified.
4773 ///
4774 /// If this option is not negative, we will try to make sure compressed
4775 /// size is just above this value. In normal cases, at least this percentage
4776 /// of data will be compressed.
4777 /// When we are compacting to a new file, here is the criteria whether
4778 /// it needs to be compressed: assuming here are the list of files sorted
4779 /// by generation time:
4780 /// A1...An B1...Bm C1...Ct
4781 /// where A1 is the newest and Ct is the oldest, and we are going to compact
4782 /// B1...Bm, we calculate the total size of all the files as total_size, as
4783 /// well as the total size of C1...Ct as total_C, the compaction output file
4784 /// will be compressed iff
4785 /// total_C / total_size < this percentage
4786 ///
4787 /// Default: -1
4788 pub fn set_compression_size_percent(&mut self, v: c_int) {
4789 unsafe {
4790 ffi::rocksdb_universal_compaction_options_set_compression_size_percent(self.inner, v);
4791 }
4792 }
4793
4794 /// Sets the algorithm used to stop picking files into a single compaction run.
4795 ///
4796 /// Default: ::Total
4797 pub fn set_stop_style(&mut self, style: UniversalCompactionStopStyle) {
4798 unsafe {
4799 ffi::rocksdb_universal_compaction_options_set_stop_style(self.inner, style as c_int);
4800 }
4801 }
4802}
4803
4804#[derive(Debug, Copy, Clone, PartialEq, Eq)]
4805#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
4806#[repr(u8)]
4807pub enum BottommostLevelCompaction {
4808 /// Skip bottommost level compaction
4809 Skip = 0,
4810 /// Only compact bottommost level if there is a compaction filter
4811 /// This is the default option
4812 IfHaveCompactionFilter,
4813 /// Always compact bottommost level
4814 Force,
4815 /// Always compact bottommost level but in bottommost level avoid
4816 /// double-compacting files created in the same compaction
4817 ForceOptimized,
4818}
4819
4820pub struct CompactOptions {
4821 pub(crate) inner: *mut ffi::rocksdb_compactoptions_t,
4822 full_history_ts_low: Option<Vec<u8>>,
4823}
4824
4825impl Default for CompactOptions {
4826 fn default() -> Self {
4827 let opts = unsafe { ffi::rocksdb_compactoptions_create() };
4828 assert!(!opts.is_null(), "Could not create RocksDB Compact Options");
4829
4830 Self {
4831 inner: opts,
4832 full_history_ts_low: None,
4833 }
4834 }
4835}
4836
4837impl Drop for CompactOptions {
4838 fn drop(&mut self) {
4839 unsafe {
4840 ffi::rocksdb_compactoptions_destroy(self.inner);
4841 }
4842 }
4843}
4844
4845impl CompactOptions {
4846 /// If more than one thread calls manual compaction,
4847 /// only one will actually schedule it while the other threads will simply wait
4848 /// for the scheduled manual compaction to complete. If exclusive_manual_compaction
4849 /// is set to true, the call will disable scheduling of automatic compaction jobs
4850 /// and wait for existing automatic compaction jobs to finish.
4851 pub fn set_exclusive_manual_compaction(&mut self, v: bool) {
4852 unsafe {
4853 ffi::rocksdb_compactoptions_set_exclusive_manual_compaction(
4854 self.inner,
4855 c_uchar::from(v),
4856 );
4857 }
4858 }
4859
4860 /// Sets bottommost level compaction.
4861 pub fn set_bottommost_level_compaction(&mut self, lvl: BottommostLevelCompaction) {
4862 unsafe {
4863 ffi::rocksdb_compactoptions_set_bottommost_level_compaction(self.inner, lvl as c_uchar);
4864 }
4865 }
4866
4867 /// If true, compacted files will be moved to the minimum level capable
4868 /// of holding the data or given level (specified non-negative target_level).
4869 pub fn set_change_level(&mut self, v: bool) {
4870 unsafe {
4871 ffi::rocksdb_compactoptions_set_change_level(self.inner, c_uchar::from(v));
4872 }
4873 }
4874
4875 /// If change_level is true and target_level have non-negative value, compacted
4876 /// files will be moved to target_level.
4877 pub fn set_target_level(&mut self, lvl: c_int) {
4878 unsafe {
4879 ffi::rocksdb_compactoptions_set_target_level(self.inner, lvl);
4880 }
4881 }
4882
4883 /// Set user-defined timestamp low bound, the data with older timestamp than
4884 /// low bound maybe GCed by compaction. Default: nullptr
4885 pub fn set_full_history_ts_low<S: Into<Vec<u8>>>(&mut self, ts: S) {
4886 self.set_full_history_ts_low_impl(Some(ts.into()));
4887 }
4888
4889 fn set_full_history_ts_low_impl(&mut self, ts: Option<Vec<u8>>) {
4890 let (ptr, len) = if let Some(ref ts) = ts {
4891 (ts.as_ptr() as *mut c_char, ts.len())
4892 } else if self.full_history_ts_low.is_some() {
4893 (std::ptr::null::<Vec<u8>>() as *mut c_char, 0)
4894 } else {
4895 return;
4896 };
4897 self.full_history_ts_low = ts;
4898 unsafe {
4899 ffi::rocksdb_compactoptions_set_full_history_ts_low(self.inner, ptr, len);
4900 }
4901 }
4902}
4903
4904pub struct WaitForCompactOptions {
4905 pub(crate) inner: *mut ffi::rocksdb_wait_for_compact_options_t,
4906}
4907
4908impl Default for WaitForCompactOptions {
4909 fn default() -> Self {
4910 let opts = unsafe { ffi::rocksdb_wait_for_compact_options_create() };
4911 assert!(
4912 !opts.is_null(),
4913 "Could not create RocksDB Wait For Compact Options"
4914 );
4915
4916 Self { inner: opts }
4917 }
4918}
4919
4920impl Drop for WaitForCompactOptions {
4921 fn drop(&mut self) {
4922 unsafe {
4923 ffi::rocksdb_wait_for_compact_options_destroy(self.inner);
4924 }
4925 }
4926}
4927
4928impl WaitForCompactOptions {
4929 /// If true, abort waiting if background jobs are paused. If false,
4930 /// ContinueBackgroundWork() must be called to resume the background jobs.
4931 /// Otherwise, jobs that were queued, but not scheduled yet may never finish
4932 /// and WaitForCompact() may wait indefinitely (if timeout is set, it will
4933 /// abort after the timeout).
4934 ///
4935 /// Default: false
4936 pub fn set_abort_on_pause(&mut self, v: bool) {
4937 unsafe {
4938 ffi::rocksdb_wait_for_compact_options_set_abort_on_pause(self.inner, c_uchar::from(v));
4939 }
4940 }
4941
4942 /// If true, flush all column families before starting to wait.
4943 ///
4944 /// Default: false
4945 pub fn set_flush(&mut self, v: bool) {
4946 unsafe {
4947 ffi::rocksdb_wait_for_compact_options_set_flush(self.inner, c_uchar::from(v));
4948 }
4949 }
4950
4951 /// Timeout in microseconds for waiting for compaction to complete.
4952 /// when timeout == 0, WaitForCompact() will wait as long as there's background
4953 /// work to finish.
4954 ///
4955 /// Default: 0
4956 pub fn set_timeout(&mut self, microseconds: u64) {
4957 unsafe {
4958 ffi::rocksdb_wait_for_compact_options_set_timeout(self.inner, microseconds);
4959 }
4960 }
4961}
4962
4963/// Represents a path where sst files can be put into
4964pub struct DBPath {
4965 pub(crate) inner: *mut ffi::rocksdb_dbpath_t,
4966}
4967
4968impl DBPath {
4969 /// Create a new path
4970 pub fn new<P: AsRef<Path>>(path: P, target_size: u64) -> Result<Self, Error> {
4971 let p = to_cpath(path.as_ref()).unwrap();
4972 let dbpath = unsafe { ffi::rocksdb_dbpath_create(p.as_ptr(), target_size) };
4973 if dbpath.is_null() {
4974 Err(Error::new(format!(
4975 "Could not create path for storing sst files at location: {}",
4976 path.as_ref().display()
4977 )))
4978 } else {
4979 Ok(DBPath { inner: dbpath })
4980 }
4981 }
4982}
4983
4984impl Drop for DBPath {
4985 fn drop(&mut self) {
4986 unsafe {
4987 ffi::rocksdb_dbpath_destroy(self.inner);
4988 }
4989 }
4990}
4991
4992pub struct InfoLogger {
4993 pub(crate) inner: *mut ffi::rocksdb_logger_t,
4994 callback: Option<Arc<LoggerCallback>>,
4995}
4996
4997impl InfoLogger {
4998 /// Creates a new logger that redirects logs to `STDERR` with an optional
4999 /// prefix.
5000 pub fn new_stderr_logger<S: AsRef<str>>(log_level: LogLevel, prefix: Option<S>) -> Self {
5001 let prefix = prefix.map(|s| {
5002 s.as_ref()
5003 .into_c_string()
5004 .expect("cannot have NULL in prefix")
5005 });
5006 let prefix_ptr = match prefix.as_ref() {
5007 Some(s) => s.as_ptr(),
5008 None => std::ptr::null(),
5009 };
5010 let inner =
5011 unsafe { ffi::rocksdb_logger_create_stderr_logger(log_level as i32, prefix_ptr) };
5012 Self {
5013 inner,
5014 // no Rust callback: RocksDB implements this
5015 callback: None,
5016 }
5017 }
5018
5019 /// Creates a new logger that redirects logs to a custom callback.
5020 pub fn new_callback_logger<F: Fn(LogLevel, &str) + Sync + Send + 'static>(
5021 level: LogLevel,
5022 cb: F,
5023 ) -> Self {
5024 // use an Arc<Box<...>> so we can reference count, and still pass a thin pointer to C
5025 let arc_cb: Arc<LoggerCallback> = Arc::new(Box::new(cb));
5026 let raw_cb: LoggerCallbackPtr = Arc::as_ptr(&arc_cb);
5027 let inner = unsafe {
5028 ffi::rocksdb_logger_create_callback_logger(
5029 level as i32,
5030 Some(logger_callback),
5031 raw_cb as *mut c_void,
5032 )
5033 };
5034 Self {
5035 inner,
5036 callback: Some(arc_cb),
5037 }
5038 }
5039}
5040
5041impl Drop for InfoLogger {
5042 fn drop(&mut self) {
5043 unsafe {
5044 ffi::rocksdb_logger_destroy(self.inner);
5045 }
5046 }
5047}
5048
5049/// Options for importing column families. See
5050/// [DB::create_column_family_with_import](crate::DB::create_column_family_with_import).
5051pub struct ImportColumnFamilyOptions {
5052 pub(crate) inner: *mut ffi::rocksdb_import_column_family_options_t,
5053}
5054
5055impl ImportColumnFamilyOptions {
5056 pub fn new() -> Self {
5057 let inner = unsafe { ffi::rocksdb_import_column_family_options_create() };
5058 ImportColumnFamilyOptions { inner }
5059 }
5060
5061 /// Determines whether to move the provided set of files on import. The default
5062 /// behavior is to copy the external files on import. Setting `move_files` to `true`
5063 /// will move the files instead of copying them. See
5064 /// [DB::create_column_family_with_import](crate::DB::create_column_family_with_import)
5065 /// for more information.
5066 pub fn set_move_files(&mut self, move_files: bool) {
5067 unsafe {
5068 ffi::rocksdb_import_column_family_options_set_move_files(
5069 self.inner,
5070 c_uchar::from(move_files),
5071 );
5072 }
5073 }
5074}
5075
5076impl Default for ImportColumnFamilyOptions {
5077 fn default() -> Self {
5078 Self::new()
5079 }
5080}
5081
5082impl Drop for ImportColumnFamilyOptions {
5083 fn drop(&mut self) {
5084 unsafe { ffi::rocksdb_import_column_family_options_destroy(self.inner) }
5085 }
5086}
5087
5088/// Ensures the unsafe casts use the same type.
5089type LoggerCallbackPtr = *const LoggerCallback;
5090
5091unsafe extern "C" fn logger_callback(
5092 raw_cb: *mut c_void,
5093 level: c_uint,
5094 msg: *mut c_char,
5095 len: size_t,
5096) {
5097 let rust_callback: &LoggerCallback = unsafe { &*(raw_cb as LoggerCallbackPtr) };
5098 let raw_msg = unsafe { std::slice::from_raw_parts(msg as *const u8, len) };
5099 let msg = String::from_utf8_lossy(raw_msg);
5100 let level =
5101 LogLevel::try_from_raw(level as i32).expect("rocksdb generated an invalid log level");
5102 (rust_callback)(level, &msg);
5103}
5104
5105#[cfg(test)]
5106mod tests {
5107 use crate::cache::Cache;
5108 use crate::db_options::{DBCompactionPri, InfoLogger, WriteBufferManager};
5109 use crate::{MemtableFactory, Options};
5110
5111 #[test]
5112 fn test_enable_statistics() {
5113 let mut opts = Options::default();
5114 assert_eq!(None, opts.get_statistics());
5115 opts.enable_statistics();
5116 opts.set_stats_dump_period_sec(60);
5117 assert!(opts.get_statistics().is_some());
5118
5119 let opts = Options::default();
5120 assert!(opts.get_statistics().is_none());
5121 }
5122
5123 #[test]
5124 fn test_set_memtable_factory() {
5125 let mut opts = Options::default();
5126 opts.set_memtable_factory(MemtableFactory::Vector);
5127 opts.set_memtable_factory(MemtableFactory::HashLinkList { bucket_count: 100 });
5128 opts.set_memtable_factory(MemtableFactory::HashSkipList {
5129 bucket_count: 100,
5130 height: 4,
5131 branching_factor: 4,
5132 });
5133 }
5134
5135 #[test]
5136 fn test_use_fsync() {
5137 let mut opts = Options::default();
5138 assert!(!opts.get_use_fsync());
5139 opts.set_use_fsync(true);
5140 assert!(opts.get_use_fsync());
5141 }
5142
5143 #[test]
5144 fn test_set_stats_persist_period_sec() {
5145 let mut opts = Options::default();
5146 opts.enable_statistics();
5147 opts.set_stats_persist_period_sec(5);
5148 assert!(opts.get_statistics().is_some());
5149
5150 let opts = Options::default();
5151 assert!(opts.get_statistics().is_none());
5152 }
5153
5154 #[test]
5155 fn test_set_write_buffer_manager() {
5156 let mut opts = Options::default();
5157 let lrucache = Cache::new_lru_cache(100);
5158 let write_buffer_manager =
5159 WriteBufferManager::new_write_buffer_manager_with_cache(100, false, lrucache);
5160 assert_eq!(write_buffer_manager.get_buffer_size(), 100);
5161 assert_eq!(write_buffer_manager.get_usage(), 0);
5162 assert!(write_buffer_manager.enabled());
5163
5164 opts.set_write_buffer_manager(&write_buffer_manager);
5165 drop(opts);
5166
5167 // WriteBufferManager outlives options
5168 assert!(write_buffer_manager.enabled());
5169 }
5170
5171 #[test]
5172 fn compaction_pri() {
5173 let mut opts = Options::default();
5174 opts.set_compaction_pri(DBCompactionPri::RoundRobin);
5175 opts.create_if_missing(true);
5176 let tmp = tempfile::tempdir().unwrap();
5177 let _db = crate::DB::open(&opts, tmp.path()).unwrap();
5178
5179 let options = std::fs::read_dir(tmp.path())
5180 .unwrap()
5181 .find_map(|x| {
5182 let x = x.ok()?;
5183 x.file_name()
5184 .into_string()
5185 .unwrap()
5186 .contains("OPTIONS")
5187 .then_some(x.path())
5188 })
5189 .map(std::fs::read_to_string)
5190 .unwrap()
5191 .unwrap();
5192
5193 assert!(options.contains("compaction_pri=kRoundRobin"));
5194 }
5195
5196 #[test]
5197 fn test_callback_logger() {
5198 let (log_snd, log_rcv) = std::sync::mpsc::channel();
5199 let callback = move |level, msg: &str| {
5200 log_snd.send((level, msg.to_string())).ok();
5201 };
5202
5203 let mut opts = Options::default();
5204 opts.create_if_missing(true);
5205 opts.set_info_logger(InfoLogger::new_callback_logger(
5206 super::LogLevel::Debug,
5207 callback,
5208 ));
5209
5210 // create 2 DBs with the options then drop the options to ensure it is reference counted
5211 let tmp = tempfile::tempdir().unwrap();
5212 let db = crate::DB::open(&opts, tmp.path()).unwrap();
5213 db.put(b"testkey", b"testvalue").unwrap();
5214 db.flush().unwrap();
5215 db.delete(b"testkey").unwrap();
5216 db.flush().unwrap();
5217 db.compact_range(Some(b"a"), Some(b"z"));
5218 assert!(log_rcv.try_recv().is_ok());
5219 drop(db);
5220
5221 let tmp2 = tempfile::tempdir().unwrap();
5222 let db2 = crate::DB::open(&opts, tmp2.path()).unwrap();
5223
5224 // get the configured logger before dropping the options
5225 let logger = opts.get_info_logger();
5226 drop(opts);
5227
5228 // clear the logs and make sure the callback is called by db2
5229 while log_rcv.try_recv().is_ok() {}
5230 assert!(log_rcv.try_recv().is_err());
5231
5232 db2.put(b"testkey2", b"testvalue2").unwrap();
5233 db2.flush().unwrap();
5234 db2.delete(b"testkey2").unwrap();
5235 db2.flush().unwrap();
5236 db2.compact_range(Some(b"a"), Some(b"z"));
5237
5238 drop(db2);
5239 assert!(log_rcv.try_recv().is_ok());
5240
5241 // clear the logs
5242 while log_rcv.try_recv().is_ok() {}
5243 assert!(log_rcv.try_recv().is_err());
5244
5245 // create a db with the copied logger to check lifetimes
5246 let tmp3 = tempfile::tempdir().unwrap();
5247 let mut opts2 = Options::default();
5248 opts2.create_if_missing(true);
5249 opts2.set_info_logger(logger);
5250 let db3 = crate::DB::open(&opts2, tmp3.path()).unwrap();
5251 drop(opts2);
5252 db3.put(b"testkey3", b"testvalue3").unwrap();
5253 db3.flush().unwrap();
5254 db3.delete(b"testkey3").unwrap();
5255 db3.flush().unwrap();
5256 db3.compact_range(Some(b"a"), Some(b"z"));
5257 assert!(log_rcv.try_recv().is_ok());
5258 drop(db3);
5259 }
5260}