bijou_rocksdb/db_options.rs
1// Copyright 2020 Tyler Neely
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::ffi::CStr;
16use std::path::Path;
17use std::ptr::{null_mut, NonNull};
18use std::slice;
19use std::sync::Arc;
20
21use libc::{self, c_char, c_double, c_int, c_uchar, c_uint, c_void, size_t};
22
23use crate::{
24 compaction_filter::{self, CompactionFilterCallback, CompactionFilterFn},
25 compaction_filter_factory::{self, CompactionFilterFactory},
26 comparator::{self, ComparatorCallback, CompareFn},
27 db::DBAccess,
28 env::Env,
29 ffi,
30 ffi_util::{from_cstr, to_cpath, CStrLike},
31 merge_operator::{
32 self, full_merge_callback, partial_merge_callback, MergeFn, MergeOperatorCallback,
33 },
34 slice_transform::SliceTransform,
35 ColumnFamilyDescriptor, Error, SnapshotWithThreadMode,
36};
37
38pub(crate) struct CacheWrapper {
39 pub(crate) inner: NonNull<ffi::rocksdb_cache_t>,
40}
41
42impl Drop for CacheWrapper {
43 fn drop(&mut self) {
44 unsafe {
45 ffi::rocksdb_cache_destroy(self.inner.as_ptr());
46 }
47 }
48}
49
50#[derive(Clone)]
51pub struct Cache(pub(crate) Arc<CacheWrapper>);
52
53impl Cache {
54 /// Creates an LRU cache with capacity in bytes.
55 pub fn new_lru_cache(capacity: size_t) -> Cache {
56 let inner = NonNull::new(unsafe { ffi::rocksdb_cache_create_lru(capacity) }).unwrap();
57 Cache(Arc::new(CacheWrapper { inner }))
58 }
59
60 /// Creates a HyperClockCache with capacity in bytes.
61 ///
62 /// `estimated_entry_charge` is an important tuning parameter. The optimal
63 /// choice at any given time is
64 /// `(cache.get_usage() - 64 * cache.get_table_address_count()) /
65 /// cache.get_occupancy_count()`, or approximately `cache.get_usage() /
66 /// cache.get_occupancy_count()`.
67 ///
68 /// However, the value cannot be changed dynamically, so as the cache
69 /// composition changes at runtime, the following tradeoffs apply:
70 ///
71 /// * If the estimate is substantially too high (e.g., 25% higher),
72 /// the cache may have to evict entries to prevent load factors that
73 /// would dramatically affect lookup times.
74 /// * If the estimate is substantially too low (e.g., less than half),
75 /// then meta data space overhead is substantially higher.
76 ///
77 /// The latter is generally preferable, and picking the larger of
78 /// block size and meta data block size is a reasonable choice that
79 /// errs towards this side.
80 pub fn new_hyper_clock_cache(capacity: size_t, estimated_entry_charge: size_t) -> Cache {
81 Cache(Arc::new(CacheWrapper {
82 inner: NonNull::new(unsafe {
83 ffi::rocksdb_cache_create_hyper_clock(capacity, estimated_entry_charge)
84 })
85 .unwrap(),
86 }))
87 }
88
89 /// Returns the cache memory usage in bytes.
90 pub fn get_usage(&self) -> usize {
91 unsafe { ffi::rocksdb_cache_get_usage(self.0.inner.as_ptr()) }
92 }
93
94 /// Returns the pinned memory usage in bytes.
95 pub fn get_pinned_usage(&self) -> usize {
96 unsafe { ffi::rocksdb_cache_get_pinned_usage(self.0.inner.as_ptr()) }
97 }
98
99 /// Sets cache capacity in bytes.
100 pub fn set_capacity(&mut self, capacity: size_t) {
101 unsafe {
102 ffi::rocksdb_cache_set_capacity(self.0.inner.as_ptr(), capacity);
103 }
104 }
105}
106
107#[derive(Default)]
108pub(crate) struct OptionsMustOutliveDB {
109 env: Option<Env>,
110 row_cache: Option<Cache>,
111 block_based: Option<BlockBasedOptionsMustOutliveDB>,
112}
113
114impl OptionsMustOutliveDB {
115 pub(crate) fn clone(&self) -> Self {
116 Self {
117 env: self.env.as_ref().map(Env::clone),
118 row_cache: self.row_cache.as_ref().map(Cache::clone),
119 block_based: self
120 .block_based
121 .as_ref()
122 .map(BlockBasedOptionsMustOutliveDB::clone),
123 }
124 }
125}
126
127#[derive(Default)]
128struct BlockBasedOptionsMustOutliveDB {
129 block_cache: Option<Cache>,
130}
131
132impl BlockBasedOptionsMustOutliveDB {
133 fn clone(&self) -> Self {
134 Self {
135 block_cache: self.block_cache.as_ref().map(Cache::clone),
136 }
137 }
138}
139
140/// Database-wide options around performance and behavior.
141///
142/// Please read the official tuning [guide](https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide)
143/// and most importantly, measure performance under realistic workloads with realistic hardware.
144///
145/// # Examples
146///
147/// ```
148/// use rocksdb::{Options, DB};
149/// use rocksdb::DBCompactionStyle;
150///
151/// fn badly_tuned_for_somebody_elses_disk() -> DB {
152/// let path = "path/for/rocksdb/storageX";
153/// let mut opts = Options::default();
154/// opts.create_if_missing(true);
155/// opts.set_max_open_files(10000);
156/// opts.set_use_fsync(false);
157/// opts.set_bytes_per_sync(8388608);
158/// opts.optimize_for_point_lookup(1024);
159/// opts.set_table_cache_num_shard_bits(6);
160/// opts.set_max_write_buffer_number(32);
161/// opts.set_write_buffer_size(536870912);
162/// opts.set_target_file_size_base(1073741824);
163/// opts.set_min_write_buffer_number_to_merge(4);
164/// opts.set_level_zero_stop_writes_trigger(2000);
165/// opts.set_level_zero_slowdown_writes_trigger(0);
166/// opts.set_compaction_style(DBCompactionStyle::Universal);
167/// opts.set_disable_auto_compactions(true);
168///
169/// DB::open(&opts, path).unwrap()
170/// }
171/// ```
172pub struct Options {
173 pub(crate) inner: *mut ffi::rocksdb_options_t,
174 pub(crate) outlive: OptionsMustOutliveDB,
175}
176
177/// Optionally disable WAL or sync for this write.
178///
179/// # Examples
180///
181/// Making an unsafe write of a batch:
182///
183/// ```
184/// use rocksdb::{DB, Options, WriteBatch, WriteOptions};
185///
186/// let path = "_path_for_rocksdb_storageY1";
187/// {
188/// let db = DB::open_default(path).unwrap();
189/// let mut batch = WriteBatch::default();
190/// batch.put(b"my key", b"my value");
191/// batch.put(b"key2", b"value2");
192/// batch.put(b"key3", b"value3");
193///
194/// let mut write_options = WriteOptions::default();
195/// write_options.set_sync(false);
196/// write_options.disable_wal(true);
197///
198/// db.write_opt(batch, &write_options);
199/// }
200/// let _ = DB::destroy(&Options::default(), path);
201/// ```
202pub struct WriteOptions {
203 pub(crate) inner: *mut ffi::rocksdb_writeoptions_t,
204}
205
206/// Optionally wait for the memtable flush to be performed.
207///
208/// # Examples
209///
210/// Manually flushing the memtable:
211///
212/// ```
213/// use rocksdb::{DB, Options, FlushOptions};
214///
215/// let path = "_path_for_rocksdb_storageY2";
216/// {
217/// let db = DB::open_default(path).unwrap();
218///
219/// let mut flush_options = FlushOptions::default();
220/// flush_options.set_wait(true);
221///
222/// db.flush_opt(&flush_options);
223/// }
224/// let _ = DB::destroy(&Options::default(), path);
225/// ```
226pub struct FlushOptions {
227 pub(crate) inner: *mut ffi::rocksdb_flushoptions_t,
228}
229
230/// For configuring block-based file storage.
231pub struct BlockBasedOptions {
232 pub(crate) inner: *mut ffi::rocksdb_block_based_table_options_t,
233 outlive: BlockBasedOptionsMustOutliveDB,
234}
235
236pub struct ReadOptions {
237 pub(crate) inner: *mut ffi::rocksdb_readoptions_t,
238 iterate_upper_bound: Option<Vec<u8>>,
239 iterate_lower_bound: Option<Vec<u8>>,
240}
241
242/// Configuration of cuckoo-based storage.
243pub struct CuckooTableOptions {
244 pub(crate) inner: *mut ffi::rocksdb_cuckoo_table_options_t,
245}
246
247/// For configuring external files ingestion.
248///
249/// # Examples
250///
251/// Move files instead of copying them:
252///
253/// ```
254/// use rocksdb::{DB, IngestExternalFileOptions, SstFileWriter, Options};
255///
256/// let writer_opts = Options::default();
257/// let mut writer = SstFileWriter::create(&writer_opts);
258/// writer.open("_path_for_sst_file").unwrap();
259/// writer.put(b"k1", b"v1").unwrap();
260/// writer.finish().unwrap();
261///
262/// let path = "_path_for_rocksdb_storageY3";
263/// {
264/// let db = DB::open_default(&path).unwrap();
265/// let mut ingest_opts = IngestExternalFileOptions::default();
266/// ingest_opts.set_move_files(true);
267/// db.ingest_external_file_opts(&ingest_opts, vec!["_path_for_sst_file"]).unwrap();
268/// }
269/// let _ = DB::destroy(&Options::default(), path);
270/// ```
271pub struct IngestExternalFileOptions {
272 pub(crate) inner: *mut ffi::rocksdb_ingestexternalfileoptions_t,
273}
274
275// Safety note: auto-implementing Send on most db-related types is prevented by the inner FFI
276// pointer. In most cases, however, this pointer is Send-safe because it is never aliased and
277// rocksdb internally does not rely on thread-local information for its user-exposed types.
278unsafe impl Send for Options {}
279unsafe impl Send for WriteOptions {}
280unsafe impl Send for BlockBasedOptions {}
281unsafe impl Send for CuckooTableOptions {}
282unsafe impl Send for ReadOptions {}
283unsafe impl Send for IngestExternalFileOptions {}
284unsafe impl Send for CacheWrapper {}
285
286// Sync is similarly safe for many types because they do not expose interior mutability, and their
287// use within the rocksdb library is generally behind a const reference
288unsafe impl Sync for Options {}
289unsafe impl Sync for WriteOptions {}
290unsafe impl Sync for BlockBasedOptions {}
291unsafe impl Sync for CuckooTableOptions {}
292unsafe impl Sync for ReadOptions {}
293unsafe impl Sync for IngestExternalFileOptions {}
294unsafe impl Sync for CacheWrapper {}
295
296impl Drop for Options {
297 fn drop(&mut self) {
298 unsafe {
299 ffi::rocksdb_options_destroy(self.inner);
300 }
301 }
302}
303
304impl Clone for Options {
305 fn clone(&self) -> Self {
306 let inner = unsafe { ffi::rocksdb_options_create_copy(self.inner) };
307 assert!(!inner.is_null(), "Could not copy RocksDB options");
308
309 Self {
310 inner,
311 outlive: self.outlive.clone(),
312 }
313 }
314}
315
316impl Drop for BlockBasedOptions {
317 fn drop(&mut self) {
318 unsafe {
319 ffi::rocksdb_block_based_options_destroy(self.inner);
320 }
321 }
322}
323
324impl Drop for CuckooTableOptions {
325 fn drop(&mut self) {
326 unsafe {
327 ffi::rocksdb_cuckoo_options_destroy(self.inner);
328 }
329 }
330}
331
332impl Drop for FlushOptions {
333 fn drop(&mut self) {
334 unsafe {
335 ffi::rocksdb_flushoptions_destroy(self.inner);
336 }
337 }
338}
339
340impl Drop for WriteOptions {
341 fn drop(&mut self) {
342 unsafe {
343 ffi::rocksdb_writeoptions_destroy(self.inner);
344 }
345 }
346}
347
348impl Drop for ReadOptions {
349 fn drop(&mut self) {
350 unsafe {
351 ffi::rocksdb_readoptions_destroy(self.inner);
352 }
353 }
354}
355
356impl Drop for IngestExternalFileOptions {
357 fn drop(&mut self) {
358 unsafe {
359 ffi::rocksdb_ingestexternalfileoptions_destroy(self.inner);
360 }
361 }
362}
363
364impl BlockBasedOptions {
365 /// Approximate size of user data packed per block. Note that the
366 /// block size specified here corresponds to uncompressed data. The
367 /// actual size of the unit read from disk may be smaller if
368 /// compression is enabled. This parameter can be changed dynamically.
369 pub fn set_block_size(&mut self, size: usize) {
370 unsafe {
371 ffi::rocksdb_block_based_options_set_block_size(self.inner, size);
372 }
373 }
374
375 /// Block size for partitioned metadata. Currently applied to indexes when
376 /// kTwoLevelIndexSearch is used and to filters when partition_filters is used.
377 /// Note: Since in the current implementation the filters and index partitions
378 /// are aligned, an index/filter block is created when either index or filter
379 /// block size reaches the specified limit.
380 ///
381 /// Note: this limit is currently applied to only index blocks; a filter
382 /// partition is cut right after an index block is cut.
383 pub fn set_metadata_block_size(&mut self, size: usize) {
384 unsafe {
385 ffi::rocksdb_block_based_options_set_metadata_block_size(self.inner, size as u64);
386 }
387 }
388
389 /// Note: currently this option requires kTwoLevelIndexSearch to be set as
390 /// well.
391 ///
392 /// Use partitioned full filters for each SST file. This option is
393 /// incompatible with block-based filters.
394 pub fn set_partition_filters(&mut self, size: bool) {
395 unsafe {
396 ffi::rocksdb_block_based_options_set_partition_filters(self.inner, c_uchar::from(size));
397 }
398 }
399
400 /// Sets global cache for blocks (user data is stored in a set of blocks, and
401 /// a block is the unit of reading from disk). Cache must outlive DB instance which uses it.
402 ///
403 /// If set, use the specified cache for blocks.
404 /// By default, rocksdb will automatically create and use an 8MB internal cache.
405 pub fn set_block_cache(&mut self, cache: &Cache) {
406 unsafe {
407 ffi::rocksdb_block_based_options_set_block_cache(self.inner, cache.0.inner.as_ptr());
408 }
409 self.outlive.block_cache = Some(cache.clone());
410 }
411
412 /// Disable block cache
413 pub fn disable_cache(&mut self) {
414 unsafe {
415 ffi::rocksdb_block_based_options_set_no_block_cache(self.inner, c_uchar::from(true));
416 }
417 }
418
419 /// Sets a [Bloom filter](https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter)
420 /// policy to reduce disk reads.
421 ///
422 /// # Examples
423 ///
424 /// ```
425 /// use rocksdb::BlockBasedOptions;
426 ///
427 /// let mut opts = BlockBasedOptions::default();
428 /// opts.set_bloom_filter(10.0, true);
429 /// ```
430 pub fn set_bloom_filter(&mut self, bits_per_key: c_double, block_based: bool) {
431 unsafe {
432 let bloom = if block_based {
433 ffi::rocksdb_filterpolicy_create_bloom(bits_per_key as _)
434 } else {
435 ffi::rocksdb_filterpolicy_create_bloom_full(bits_per_key as _)
436 };
437
438 ffi::rocksdb_block_based_options_set_filter_policy(self.inner, bloom);
439 }
440 }
441
442 /// Sets a [Ribbon filter](http://rocksdb.org/blog/2021/12/29/ribbon-filter.html)
443 /// policy to reduce disk reads.
444 ///
445 /// Ribbon filters use less memory in exchange for slightly more CPU usage
446 /// compared to an equivalent bloom filter.
447 ///
448 /// # Examples
449 ///
450 /// ```
451 /// use rocksdb::BlockBasedOptions;
452 ///
453 /// let mut opts = BlockBasedOptions::default();
454 /// opts.set_ribbon_filter(10.0);
455 /// ```
456 pub fn set_ribbon_filter(&mut self, bloom_equivalent_bits_per_key: c_double) {
457 unsafe {
458 let ribbon = ffi::rocksdb_filterpolicy_create_ribbon(bloom_equivalent_bits_per_key);
459 ffi::rocksdb_block_based_options_set_filter_policy(self.inner, ribbon);
460 }
461 }
462
463 /// Sets a hybrid [Ribbon filter](http://rocksdb.org/blog/2021/12/29/ribbon-filter.html)
464 /// policy to reduce disk reads.
465 ///
466 /// Uses Bloom filters before the given level, and Ribbon filters for all
467 /// other levels. This combines the memory savings from Ribbon filters
468 /// with the lower CPU usage of Bloom filters.
469 ///
470 /// # Examples
471 ///
472 /// ```
473 /// use rocksdb::BlockBasedOptions;
474 ///
475 /// let mut opts = BlockBasedOptions::default();
476 /// opts.set_hybrid_ribbon_filter(10.0, 2);
477 /// ```
478 pub fn set_hybrid_ribbon_filter(
479 &mut self,
480 bloom_equivalent_bits_per_key: c_double,
481 bloom_before_level: c_int,
482 ) {
483 unsafe {
484 let ribbon = ffi::rocksdb_filterpolicy_create_ribbon_hybrid(
485 bloom_equivalent_bits_per_key,
486 bloom_before_level,
487 );
488 ffi::rocksdb_block_based_options_set_filter_policy(self.inner, ribbon);
489 }
490 }
491
492 /// If cache_index_and_filter_blocks is enabled, cache index and filter blocks with high priority.
493 /// If set to true, depending on implementation of block cache,
494 /// index and filter blocks may be less likely to be evicted than data blocks.
495 pub fn set_cache_index_and_filter_blocks(&mut self, v: bool) {
496 unsafe {
497 ffi::rocksdb_block_based_options_set_cache_index_and_filter_blocks(
498 self.inner,
499 c_uchar::from(v),
500 );
501 }
502 }
503
504 /// Defines the index type to be used for SS-table lookups.
505 ///
506 /// # Examples
507 ///
508 /// ```
509 /// use rocksdb::{BlockBasedOptions, BlockBasedIndexType, Options};
510 ///
511 /// let mut opts = Options::default();
512 /// let mut block_opts = BlockBasedOptions::default();
513 /// block_opts.set_index_type(BlockBasedIndexType::HashSearch);
514 /// ```
515 pub fn set_index_type(&mut self, index_type: BlockBasedIndexType) {
516 let index = index_type as i32;
517 unsafe {
518 ffi::rocksdb_block_based_options_set_index_type(self.inner, index);
519 }
520 }
521
522 /// If cache_index_and_filter_blocks is true and the below is true, then
523 /// filter and index blocks are stored in the cache, but a reference is
524 /// held in the "table reader" object so the blocks are pinned and only
525 /// evicted from cache when the table reader is freed.
526 ///
527 /// Default: false.
528 pub fn set_pin_l0_filter_and_index_blocks_in_cache(&mut self, v: bool) {
529 unsafe {
530 ffi::rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache(
531 self.inner,
532 c_uchar::from(v),
533 );
534 }
535 }
536
537 /// If cache_index_and_filter_blocks is true and the below is true, then
538 /// the top-level index of partitioned filter and index blocks are stored in
539 /// the cache, but a reference is held in the "table reader" object so the
540 /// blocks are pinned and only evicted from cache when the table reader is
541 /// freed. This is not limited to l0 in LSM tree.
542 ///
543 /// Default: false.
544 pub fn set_pin_top_level_index_and_filter(&mut self, v: bool) {
545 unsafe {
546 ffi::rocksdb_block_based_options_set_pin_top_level_index_and_filter(
547 self.inner,
548 c_uchar::from(v),
549 );
550 }
551 }
552
553 /// Format version, reserved for backward compatibility.
554 ///
555 /// See full [list](https://github.com/facebook/rocksdb/blob/f059c7d9b96300091e07429a60f4ad55dac84859/include/rocksdb/table.h#L249-L274)
556 /// of the supported versions.
557 ///
558 /// Default: 2.
559 pub fn set_format_version(&mut self, version: i32) {
560 unsafe {
561 ffi::rocksdb_block_based_options_set_format_version(self.inner, version);
562 }
563 }
564
565 /// Number of keys between restart points for delta encoding of keys.
566 /// This parameter can be changed dynamically. Most clients should
567 /// leave this parameter alone. The minimum value allowed is 1. Any smaller
568 /// value will be silently overwritten with 1.
569 ///
570 /// Default: 16.
571 pub fn set_block_restart_interval(&mut self, interval: i32) {
572 unsafe {
573 ffi::rocksdb_block_based_options_set_block_restart_interval(self.inner, interval);
574 }
575 }
576
577 /// Same as block_restart_interval but used for the index block.
578 /// If you don't plan to run RocksDB before version 5.16 and you are
579 /// using `index_block_restart_interval` > 1, you should
580 /// probably set the `format_version` to >= 4 as it would reduce the index size.
581 ///
582 /// Default: 1.
583 pub fn set_index_block_restart_interval(&mut self, interval: i32) {
584 unsafe {
585 ffi::rocksdb_block_based_options_set_index_block_restart_interval(self.inner, interval);
586 }
587 }
588
589 /// Set the data block index type for point lookups:
590 /// `DataBlockIndexType::BinarySearch` to use binary search within the data block.
591 /// `DataBlockIndexType::BinaryAndHash` to use the data block hash index in combination with
592 /// the normal binary search.
593 ///
594 /// The hash table utilization ratio is adjustable using [`set_data_block_hash_ratio`](#method.set_data_block_hash_ratio), which is
595 /// valid only when using `DataBlockIndexType::BinaryAndHash`.
596 ///
597 /// Default: `BinarySearch`
598 /// # Examples
599 ///
600 /// ```
601 /// use rocksdb::{BlockBasedOptions, DataBlockIndexType, Options};
602 ///
603 /// let mut opts = Options::default();
604 /// let mut block_opts = BlockBasedOptions::default();
605 /// block_opts.set_data_block_index_type(DataBlockIndexType::BinaryAndHash);
606 /// block_opts.set_data_block_hash_ratio(0.85);
607 /// ```
608 pub fn set_data_block_index_type(&mut self, index_type: DataBlockIndexType) {
609 let index_t = index_type as i32;
610 unsafe {
611 ffi::rocksdb_block_based_options_set_data_block_index_type(self.inner, index_t);
612 }
613 }
614
615 /// Set the data block hash index utilization ratio.
616 ///
617 /// The smaller the utilization ratio, the less hash collisions happen, and so reduce the risk for a
618 /// point lookup to fall back to binary search due to the collisions. A small ratio means faster
619 /// lookup at the price of more space overhead.
620 ///
621 /// Default: 0.75
622 pub fn set_data_block_hash_ratio(&mut self, ratio: f64) {
623 unsafe {
624 ffi::rocksdb_block_based_options_set_data_block_hash_ratio(self.inner, ratio);
625 }
626 }
627
628 /// If false, place only prefixes in the filter, not whole keys.
629 ///
630 /// Defaults to true.
631 pub fn set_whole_key_filtering(&mut self, v: bool) {
632 unsafe {
633 ffi::rocksdb_block_based_options_set_whole_key_filtering(self.inner, c_uchar::from(v));
634 }
635 }
636
637 /// Use the specified checksum type.
638 /// Newly created table files will be protected with this checksum type.
639 /// Old table files will still be readable, even though they have different checksum type.
640 pub fn set_checksum_type(&mut self, checksum_type: ChecksumType) {
641 unsafe {
642 ffi::rocksdb_block_based_options_set_checksum(self.inner, checksum_type as c_char);
643 }
644 }
645}
646
647impl Default for BlockBasedOptions {
648 fn default() -> Self {
649 let block_opts = unsafe { ffi::rocksdb_block_based_options_create() };
650 assert!(
651 !block_opts.is_null(),
652 "Could not create RocksDB block based options"
653 );
654
655 Self {
656 inner: block_opts,
657 outlive: BlockBasedOptionsMustOutliveDB::default(),
658 }
659 }
660}
661
662impl CuckooTableOptions {
663 /// Determines the utilization of hash tables. Smaller values
664 /// result in larger hash tables with fewer collisions.
665 /// Default: 0.9
666 pub fn set_hash_ratio(&mut self, ratio: f64) {
667 unsafe {
668 ffi::rocksdb_cuckoo_options_set_hash_ratio(self.inner, ratio);
669 }
670 }
671
672 /// A property used by builder to determine the depth to go to
673 /// to search for a path to displace elements in case of
674 /// collision. See Builder.MakeSpaceForKey method. Higher
675 /// values result in more efficient hash tables with fewer
676 /// lookups but take more time to build.
677 /// Default: 100
678 pub fn set_max_search_depth(&mut self, depth: u32) {
679 unsafe {
680 ffi::rocksdb_cuckoo_options_set_max_search_depth(self.inner, depth);
681 }
682 }
683
684 /// In case of collision while inserting, the builder
685 /// attempts to insert in the next cuckoo_block_size
686 /// locations before skipping over to the next Cuckoo hash
687 /// function. This makes lookups more cache friendly in case
688 /// of collisions.
689 /// Default: 5
690 pub fn set_cuckoo_block_size(&mut self, size: u32) {
691 unsafe {
692 ffi::rocksdb_cuckoo_options_set_cuckoo_block_size(self.inner, size);
693 }
694 }
695
696 /// If this option is enabled, user key is treated as uint64_t and its value
697 /// is used as hash value directly. This option changes builder's behavior.
698 /// Reader ignore this option and behave according to what specified in
699 /// table property.
700 /// Default: false
701 pub fn set_identity_as_first_hash(&mut self, flag: bool) {
702 unsafe {
703 ffi::rocksdb_cuckoo_options_set_identity_as_first_hash(self.inner, c_uchar::from(flag));
704 }
705 }
706
707 /// If this option is set to true, module is used during hash calculation.
708 /// This often yields better space efficiency at the cost of performance.
709 /// If this option is set to false, # of entries in table is constrained to
710 /// be power of two, and bit and is used to calculate hash, which is faster in general.
711 /// Default: true
712 pub fn set_use_module_hash(&mut self, flag: bool) {
713 unsafe {
714 ffi::rocksdb_cuckoo_options_set_use_module_hash(self.inner, c_uchar::from(flag));
715 }
716 }
717}
718
719impl Default for CuckooTableOptions {
720 fn default() -> Self {
721 let opts = unsafe { ffi::rocksdb_cuckoo_options_create() };
722 assert!(!opts.is_null(), "Could not create RocksDB cuckoo options");
723
724 Self { inner: opts }
725 }
726}
727
728// Verbosity of the LOG.
729#[derive(Debug, Copy, Clone, PartialEq, Eq)]
730#[repr(i32)]
731pub enum LogLevel {
732 Debug = 0,
733 Info,
734 Warn,
735 Error,
736 Fatal,
737 Header,
738}
739
740impl Options {
741 /// Constructs the DBOptions and ColumnFamilyDescriptors by loading the
742 /// latest RocksDB options file stored in the specified rocksdb database.
743 pub fn load_latest<P: AsRef<Path>>(
744 path: P,
745 env: Env,
746 ignore_unknown_options: bool,
747 cache: Cache,
748 ) -> Result<(Options, Vec<ColumnFamilyDescriptor>), Error> {
749 let path = to_cpath(path)?;
750 let mut db_options: *mut ffi::rocksdb_options_t = null_mut();
751 let mut num_column_families: usize = 0;
752 let mut column_family_names: *mut *mut c_char = null_mut();
753 let mut column_family_options: *mut *mut ffi::rocksdb_options_t = null_mut();
754 unsafe {
755 ffi_try!(ffi::rocksdb_load_latest_options(
756 path.as_ptr(),
757 env.0.inner,
758 ignore_unknown_options,
759 cache.0.inner.as_ptr(),
760 &mut db_options,
761 &mut num_column_families,
762 &mut column_family_names,
763 &mut column_family_options,
764 ));
765 }
766 let options = Options {
767 inner: db_options,
768 outlive: OptionsMustOutliveDB::default(),
769 };
770 let column_families = unsafe {
771 Options::read_column_descriptors(
772 num_column_families,
773 column_family_names,
774 column_family_options,
775 )
776 };
777 Ok((options, column_families))
778 }
779
780 /// read column descriptors from c pointers
781 #[inline]
782 unsafe fn read_column_descriptors(
783 num_column_families: usize,
784 column_family_names: *mut *mut c_char,
785 column_family_options: *mut *mut ffi::rocksdb_options_t,
786 ) -> Vec<ColumnFamilyDescriptor> {
787 let column_family_names_iter =
788 slice::from_raw_parts(column_family_names, num_column_families)
789 .iter()
790 .map(|ptr| from_cstr(*ptr));
791 let column_family_options_iter =
792 slice::from_raw_parts(column_family_options, num_column_families)
793 .iter()
794 .map(|ptr| Options {
795 inner: *ptr,
796 outlive: OptionsMustOutliveDB::default(),
797 });
798 let column_descriptors = column_family_names_iter
799 .zip(column_family_options_iter)
800 .map(|(name, options)| ColumnFamilyDescriptor { name, options })
801 .collect::<Vec<_>>();
802 // free pointers
803 slice::from_raw_parts(column_family_names, num_column_families)
804 .iter()
805 .for_each(|ptr| ffi::rocksdb_free(*ptr as *mut c_void));
806 ffi::rocksdb_free(column_family_names as *mut c_void);
807 ffi::rocksdb_free(column_family_options as *mut c_void);
808 column_descriptors
809 }
810
811 /// By default, RocksDB uses only one background thread for flush and
812 /// compaction. Calling this function will set it up such that total of
813 /// `total_threads` is used. Good value for `total_threads` is the number of
814 /// cores. You almost definitely want to call this function if your system is
815 /// bottlenecked by RocksDB.
816 ///
817 /// # Examples
818 ///
819 /// ```
820 /// use rocksdb::Options;
821 ///
822 /// let mut opts = Options::default();
823 /// opts.increase_parallelism(3);
824 /// ```
825 pub fn increase_parallelism(&mut self, parallelism: i32) {
826 unsafe {
827 ffi::rocksdb_options_increase_parallelism(self.inner, parallelism);
828 }
829 }
830
831 /// Optimize level style compaction.
832 ///
833 /// Default values for some parameters in `Options` are not optimized for heavy
834 /// workloads and big datasets, which means you might observe write stalls under
835 /// some conditions.
836 ///
837 /// This can be used as one of the starting points for tuning RocksDB options in
838 /// such cases.
839 ///
840 /// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
841 /// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
842 /// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
843 /// parameters were set before.
844 ///
845 /// It sets buffer sizes so that memory consumption would be constrained by
846 /// `memtable_memory_budget`.
847 pub fn optimize_level_style_compaction(&mut self, memtable_memory_budget: usize) {
848 unsafe {
849 ffi::rocksdb_options_optimize_level_style_compaction(
850 self.inner,
851 memtable_memory_budget as u64,
852 );
853 }
854 }
855
856 /// Optimize universal style compaction.
857 ///
858 /// Default values for some parameters in `Options` are not optimized for heavy
859 /// workloads and big datasets, which means you might observe write stalls under
860 /// some conditions.
861 ///
862 /// This can be used as one of the starting points for tuning RocksDB options in
863 /// such cases.
864 ///
865 /// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
866 /// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
867 /// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
868 /// parameters were set before.
869 ///
870 /// It sets buffer sizes so that memory consumption would be constrained by
871 /// `memtable_memory_budget`.
872 pub fn optimize_universal_style_compaction(&mut self, memtable_memory_budget: usize) {
873 unsafe {
874 ffi::rocksdb_options_optimize_universal_style_compaction(
875 self.inner,
876 memtable_memory_budget as u64,
877 );
878 }
879 }
880
881 /// If true, the database will be created if it is missing.
882 ///
883 /// Default: `false`
884 ///
885 /// # Examples
886 ///
887 /// ```
888 /// use rocksdb::Options;
889 ///
890 /// let mut opts = Options::default();
891 /// opts.create_if_missing(true);
892 /// ```
893 pub fn create_if_missing(&mut self, create_if_missing: bool) {
894 unsafe {
895 ffi::rocksdb_options_set_create_if_missing(
896 self.inner,
897 c_uchar::from(create_if_missing),
898 );
899 }
900 }
901
902 /// If true, any column families that didn't exist when opening the database
903 /// will be created.
904 ///
905 /// Default: `false`
906 ///
907 /// # Examples
908 ///
909 /// ```
910 /// use rocksdb::Options;
911 ///
912 /// let mut opts = Options::default();
913 /// opts.create_missing_column_families(true);
914 /// ```
915 pub fn create_missing_column_families(&mut self, create_missing_cfs: bool) {
916 unsafe {
917 ffi::rocksdb_options_set_create_missing_column_families(
918 self.inner,
919 c_uchar::from(create_missing_cfs),
920 );
921 }
922 }
923
924 /// Specifies whether an error should be raised if the database already exists.
925 ///
926 /// Default: false
927 pub fn set_error_if_exists(&mut self, enabled: bool) {
928 unsafe {
929 ffi::rocksdb_options_set_error_if_exists(self.inner, c_uchar::from(enabled));
930 }
931 }
932
933 /// Enable/disable paranoid checks.
934 ///
935 /// If true, the implementation will do aggressive checking of the
936 /// data it is processing and will stop early if it detects any
937 /// errors. This may have unforeseen ramifications: for example, a
938 /// corruption of one DB entry may cause a large number of entries to
939 /// become unreadable or for the entire DB to become unopenable.
940 /// If any of the writes to the database fails (Put, Delete, Merge, Write),
941 /// the database will switch to read-only mode and fail all other
942 /// Write operations.
943 ///
944 /// Default: false
945 pub fn set_paranoid_checks(&mut self, enabled: bool) {
946 unsafe {
947 ffi::rocksdb_options_set_paranoid_checks(self.inner, c_uchar::from(enabled));
948 }
949 }
950
951 /// A list of paths where SST files can be put into, with its target size.
952 /// Newer data is placed into paths specified earlier in the vector while
953 /// older data gradually moves to paths specified later in the vector.
954 ///
955 /// For example, you have a flash device with 10GB allocated for the DB,
956 /// as well as a hard drive of 2TB, you should config it to be:
957 /// [{"/flash_path", 10GB}, {"/hard_drive", 2TB}]
958 ///
959 /// The system will try to guarantee data under each path is close to but
960 /// not larger than the target size. But current and future file sizes used
961 /// by determining where to place a file are based on best-effort estimation,
962 /// which means there is a chance that the actual size under the directory
963 /// is slightly more than target size under some workloads. User should give
964 /// some buffer room for those cases.
965 ///
966 /// If none of the paths has sufficient room to place a file, the file will
967 /// be placed to the last path anyway, despite to the target size.
968 ///
969 /// Placing newer data to earlier paths is also best-efforts. User should
970 /// expect user files to be placed in higher levels in some extreme cases.
971 ///
972 /// If left empty, only one path will be used, which is `path` passed when
973 /// opening the DB.
974 ///
975 /// Default: empty
976 pub fn set_db_paths(&mut self, paths: &[DBPath]) {
977 let mut paths: Vec<_> = paths
978 .iter()
979 .map(|path| path.inner as *const ffi::rocksdb_dbpath_t)
980 .collect();
981 let num_paths = paths.len();
982 unsafe {
983 ffi::rocksdb_options_set_db_paths(self.inner, paths.as_mut_ptr(), num_paths);
984 }
985 }
986
987 /// Use the specified object to interact with the environment,
988 /// e.g. to read/write files, schedule background work, etc. In the near
989 /// future, support for doing storage operations such as read/write files
990 /// through env will be deprecated in favor of file_system.
991 ///
992 /// Default: Env::default()
993 pub fn set_env(&mut self, env: &Env) {
994 unsafe {
995 ffi::rocksdb_options_set_env(self.inner, env.0.inner);
996 }
997 self.outlive.env = Some(env.clone());
998 }
999
1000 /// Sets the compression algorithm that will be used for compressing blocks.
1001 ///
1002 /// Default: `DBCompressionType::Snappy` (`DBCompressionType::None` if
1003 /// snappy feature is not enabled).
1004 ///
1005 /// # Examples
1006 ///
1007 /// ```
1008 /// use rocksdb::{Options, DBCompressionType};
1009 ///
1010 /// let mut opts = Options::default();
1011 /// opts.set_compression_type(DBCompressionType::Snappy);
1012 /// ```
1013 pub fn set_compression_type(&mut self, t: DBCompressionType) {
1014 unsafe {
1015 ffi::rocksdb_options_set_compression(self.inner, t as c_int);
1016 }
1017 }
1018
1019 /// Sets the bottom-most compression algorithm that will be used for
1020 /// compressing blocks at the bottom-most level.
1021 ///
1022 /// Note that to actually unable bottom-most compression configuration after
1023 /// setting the compression type it needs to be enabled by calling
1024 /// [`set_bottommost_compression_options`](#method.set_bottommost_compression_options) or
1025 /// [`set_bottommost_zstd_max_train_bytes`](#method.set_bottommost_zstd_max_train_bytes) method with `enabled` argument
1026 /// set to `true`.
1027 ///
1028 /// # Examples
1029 ///
1030 /// ```
1031 /// use rocksdb::{Options, DBCompressionType};
1032 ///
1033 /// let mut opts = Options::default();
1034 /// opts.set_bottommost_compression_type(DBCompressionType::Zstd);
1035 /// opts.set_bottommost_zstd_max_train_bytes(0, true);
1036 /// ```
1037 pub fn set_bottommost_compression_type(&mut self, t: DBCompressionType) {
1038 unsafe {
1039 ffi::rocksdb_options_set_bottommost_compression(self.inner, t as c_int);
1040 }
1041 }
1042
1043 /// Different levels can have different compression policies. There
1044 /// are cases where most lower levels would like to use quick compression
1045 /// algorithms while the higher levels (which have more data) use
1046 /// compression algorithms that have better compression but could
1047 /// be slower. This array, if non-empty, should have an entry for
1048 /// each level of the database; these override the value specified in
1049 /// the previous field 'compression'.
1050 ///
1051 /// # Examples
1052 ///
1053 /// ```
1054 /// use rocksdb::{Options, DBCompressionType};
1055 ///
1056 /// let mut opts = Options::default();
1057 /// opts.set_compression_per_level(&[
1058 /// DBCompressionType::None,
1059 /// DBCompressionType::None,
1060 /// DBCompressionType::Snappy,
1061 /// DBCompressionType::Snappy,
1062 /// DBCompressionType::Snappy
1063 /// ]);
1064 /// ```
1065 pub fn set_compression_per_level(&mut self, level_types: &[DBCompressionType]) {
1066 unsafe {
1067 let mut level_types: Vec<_> = level_types.iter().map(|&t| t as c_int).collect();
1068 ffi::rocksdb_options_set_compression_per_level(
1069 self.inner,
1070 level_types.as_mut_ptr(),
1071 level_types.len() as size_t,
1072 );
1073 }
1074 }
1075
1076 /// Maximum size of dictionaries used to prime the compression library.
1077 /// Enabling dictionary can improve compression ratios when there are
1078 /// repetitions across data blocks.
1079 ///
1080 /// The dictionary is created by sampling the SST file data. If
1081 /// `zstd_max_train_bytes` is nonzero, the samples are passed through zstd's
1082 /// dictionary generator. Otherwise, the random samples are used directly as
1083 /// the dictionary.
1084 ///
1085 /// When compression dictionary is disabled, we compress and write each block
1086 /// before buffering data for the next one. When compression dictionary is
1087 /// enabled, we buffer all SST file data in-memory so we can sample it, as data
1088 /// can only be compressed and written after the dictionary has been finalized.
1089 /// So users of this feature may see increased memory usage.
1090 ///
1091 /// Default: `0`
1092 ///
1093 /// # Examples
1094 ///
1095 /// ```
1096 /// use rocksdb::Options;
1097 ///
1098 /// let mut opts = Options::default();
1099 /// opts.set_compression_options(4, 5, 6, 7);
1100 /// ```
1101 pub fn set_compression_options(
1102 &mut self,
1103 w_bits: c_int,
1104 level: c_int,
1105 strategy: c_int,
1106 max_dict_bytes: c_int,
1107 ) {
1108 unsafe {
1109 ffi::rocksdb_options_set_compression_options(
1110 self.inner,
1111 w_bits,
1112 level,
1113 strategy,
1114 max_dict_bytes,
1115 );
1116 }
1117 }
1118
1119 /// Sets compression options for blocks at the bottom-most level. Meaning
1120 /// of all settings is the same as in [`set_compression_options`](#method.set_compression_options) method but
1121 /// affect only the bottom-most compression which is set using
1122 /// [`set_bottommost_compression_type`](#method.set_bottommost_compression_type) method.
1123 ///
1124 /// # Examples
1125 ///
1126 /// ```
1127 /// use rocksdb::{Options, DBCompressionType};
1128 ///
1129 /// let mut opts = Options::default();
1130 /// opts.set_bottommost_compression_type(DBCompressionType::Zstd);
1131 /// opts.set_bottommost_compression_options(4, 5, 6, 7, true);
1132 /// ```
1133 pub fn set_bottommost_compression_options(
1134 &mut self,
1135 w_bits: c_int,
1136 level: c_int,
1137 strategy: c_int,
1138 max_dict_bytes: c_int,
1139 enabled: bool,
1140 ) {
1141 unsafe {
1142 ffi::rocksdb_options_set_bottommost_compression_options(
1143 self.inner,
1144 w_bits,
1145 level,
1146 strategy,
1147 max_dict_bytes,
1148 c_uchar::from(enabled),
1149 );
1150 }
1151 }
1152
1153 /// Sets maximum size of training data passed to zstd's dictionary trainer. Using zstd's
1154 /// dictionary trainer can achieve even better compression ratio improvements than using
1155 /// `max_dict_bytes` alone.
1156 ///
1157 /// The training data will be used to generate a dictionary of max_dict_bytes.
1158 ///
1159 /// Default: 0.
1160 pub fn set_zstd_max_train_bytes(&mut self, value: c_int) {
1161 unsafe {
1162 ffi::rocksdb_options_set_compression_options_zstd_max_train_bytes(self.inner, value);
1163 }
1164 }
1165
1166 /// Sets maximum size of training data passed to zstd's dictionary trainer
1167 /// when compressing the bottom-most level. Using zstd's dictionary trainer
1168 /// can achieve even better compression ratio improvements than using
1169 /// `max_dict_bytes` alone.
1170 ///
1171 /// The training data will be used to generate a dictionary of
1172 /// `max_dict_bytes`.
1173 ///
1174 /// Default: 0.
1175 pub fn set_bottommost_zstd_max_train_bytes(&mut self, value: c_int, enabled: bool) {
1176 unsafe {
1177 ffi::rocksdb_options_set_bottommost_compression_options_zstd_max_train_bytes(
1178 self.inner,
1179 value,
1180 c_uchar::from(enabled),
1181 );
1182 }
1183 }
1184
1185 /// If non-zero, we perform bigger reads when doing compaction. If you're
1186 /// running RocksDB on spinning disks, you should set this to at least 2MB.
1187 /// That way RocksDB's compaction is doing sequential instead of random reads.
1188 ///
1189 /// When non-zero, we also force new_table_reader_for_compaction_inputs to
1190 /// true.
1191 ///
1192 /// Default: `0`
1193 pub fn set_compaction_readahead_size(&mut self, compaction_readahead_size: usize) {
1194 unsafe {
1195 ffi::rocksdb_options_compaction_readahead_size(self.inner, compaction_readahead_size);
1196 }
1197 }
1198
1199 /// Allow RocksDB to pick dynamic base of bytes for levels.
1200 /// With this feature turned on, RocksDB will automatically adjust max bytes for each level.
1201 /// The goal of this feature is to have lower bound on size amplification.
1202 ///
1203 /// Default: false.
1204 pub fn set_level_compaction_dynamic_level_bytes(&mut self, v: bool) {
1205 unsafe {
1206 ffi::rocksdb_options_set_level_compaction_dynamic_level_bytes(
1207 self.inner,
1208 c_uchar::from(v),
1209 );
1210 }
1211 }
1212
1213 pub fn set_merge_operator_associative<F: MergeFn + Clone>(
1214 &mut self,
1215 name: impl CStrLike,
1216 full_merge_fn: F,
1217 ) {
1218 let cb = Box::new(MergeOperatorCallback {
1219 name: name.into_c_string().unwrap(),
1220 full_merge_fn: full_merge_fn.clone(),
1221 partial_merge_fn: full_merge_fn,
1222 });
1223
1224 unsafe {
1225 let mo = ffi::rocksdb_mergeoperator_create(
1226 Box::into_raw(cb).cast::<c_void>(),
1227 Some(merge_operator::destructor_callback::<F, F>),
1228 Some(full_merge_callback::<F, F>),
1229 Some(partial_merge_callback::<F, F>),
1230 Some(merge_operator::delete_callback),
1231 Some(merge_operator::name_callback::<F, F>),
1232 );
1233 ffi::rocksdb_options_set_merge_operator(self.inner, mo);
1234 }
1235 }
1236
1237 pub fn set_merge_operator<F: MergeFn, PF: MergeFn>(
1238 &mut self,
1239 name: impl CStrLike,
1240 full_merge_fn: F,
1241 partial_merge_fn: PF,
1242 ) {
1243 let cb = Box::new(MergeOperatorCallback {
1244 name: name.into_c_string().unwrap(),
1245 full_merge_fn,
1246 partial_merge_fn,
1247 });
1248
1249 unsafe {
1250 let mo = ffi::rocksdb_mergeoperator_create(
1251 Box::into_raw(cb).cast::<c_void>(),
1252 Some(merge_operator::destructor_callback::<F, PF>),
1253 Some(full_merge_callback::<F, PF>),
1254 Some(partial_merge_callback::<F, PF>),
1255 Some(merge_operator::delete_callback),
1256 Some(merge_operator::name_callback::<F, PF>),
1257 );
1258 ffi::rocksdb_options_set_merge_operator(self.inner, mo);
1259 }
1260 }
1261
1262 #[deprecated(
1263 since = "0.5.0",
1264 note = "add_merge_operator has been renamed to set_merge_operator"
1265 )]
1266 pub fn add_merge_operator<F: MergeFn + Clone>(&mut self, name: &str, merge_fn: F) {
1267 self.set_merge_operator_associative(name, merge_fn);
1268 }
1269
1270 /// Sets a compaction filter used to determine if entries should be kept, changed,
1271 /// or removed during compaction.
1272 ///
1273 /// An example use case is to remove entries with an expired TTL.
1274 ///
1275 /// If you take a snapshot of the database, only values written since the last
1276 /// snapshot will be passed through the compaction filter.
1277 ///
1278 /// If multi-threaded compaction is used, `filter_fn` may be called multiple times
1279 /// simultaneously.
1280 pub fn set_compaction_filter<F>(&mut self, name: impl CStrLike, filter_fn: F)
1281 where
1282 F: CompactionFilterFn + Send + 'static,
1283 {
1284 let cb = Box::new(CompactionFilterCallback {
1285 name: name.into_c_string().unwrap(),
1286 filter_fn,
1287 });
1288
1289 unsafe {
1290 let cf = ffi::rocksdb_compactionfilter_create(
1291 Box::into_raw(cb).cast::<c_void>(),
1292 Some(compaction_filter::destructor_callback::<CompactionFilterCallback<F>>),
1293 Some(compaction_filter::filter_callback::<CompactionFilterCallback<F>>),
1294 Some(compaction_filter::name_callback::<CompactionFilterCallback<F>>),
1295 );
1296 ffi::rocksdb_options_set_compaction_filter(self.inner, cf);
1297 }
1298 }
1299
1300 /// This is a factory that provides compaction filter objects which allow
1301 /// an application to modify/delete a key-value during background compaction.
1302 ///
1303 /// A new filter will be created on each compaction run. If multithreaded
1304 /// compaction is being used, each created CompactionFilter will only be used
1305 /// from a single thread and so does not need to be thread-safe.
1306 ///
1307 /// Default: nullptr
1308 pub fn set_compaction_filter_factory<F>(&mut self, factory: F)
1309 where
1310 F: CompactionFilterFactory + 'static,
1311 {
1312 let factory = Box::new(factory);
1313
1314 unsafe {
1315 let cff = ffi::rocksdb_compactionfilterfactory_create(
1316 Box::into_raw(factory).cast::<c_void>(),
1317 Some(compaction_filter_factory::destructor_callback::<F>),
1318 Some(compaction_filter_factory::create_compaction_filter_callback::<F>),
1319 Some(compaction_filter_factory::name_callback::<F>),
1320 );
1321
1322 ffi::rocksdb_options_set_compaction_filter_factory(self.inner, cff);
1323 }
1324 }
1325
1326 /// Sets the comparator used to define the order of keys in the table.
1327 /// Default: a comparator that uses lexicographic byte-wise ordering
1328 ///
1329 /// The client must ensure that the comparator supplied here has the same
1330 /// name and orders keys *exactly* the same as the comparator provided to
1331 /// previous open calls on the same DB.
1332 pub fn set_comparator(&mut self, name: impl CStrLike, compare_fn: Box<CompareFn>) {
1333 let cb = Box::new(ComparatorCallback {
1334 name: name.into_c_string().unwrap(),
1335 f: compare_fn,
1336 });
1337
1338 unsafe {
1339 let cmp = ffi::rocksdb_comparator_create(
1340 Box::into_raw(cb).cast::<c_void>(),
1341 Some(comparator::destructor_callback),
1342 Some(comparator::compare_callback),
1343 Some(comparator::name_callback),
1344 );
1345 ffi::rocksdb_options_set_comparator(self.inner, cmp);
1346 }
1347 }
1348
1349 pub fn set_prefix_extractor(&mut self, prefix_extractor: SliceTransform) {
1350 unsafe {
1351 ffi::rocksdb_options_set_prefix_extractor(self.inner, prefix_extractor.inner);
1352 }
1353 }
1354
1355 pub fn optimize_for_point_lookup(&mut self, cache_size: u64) {
1356 unsafe {
1357 ffi::rocksdb_options_optimize_for_point_lookup(self.inner, cache_size);
1358 }
1359 }
1360
1361 /// Sets the optimize_filters_for_hits flag
1362 ///
1363 /// Default: `false`
1364 ///
1365 /// # Examples
1366 ///
1367 /// ```
1368 /// use rocksdb::Options;
1369 ///
1370 /// let mut opts = Options::default();
1371 /// opts.set_optimize_filters_for_hits(true);
1372 /// ```
1373 pub fn set_optimize_filters_for_hits(&mut self, optimize_for_hits: bool) {
1374 unsafe {
1375 ffi::rocksdb_options_set_optimize_filters_for_hits(
1376 self.inner,
1377 c_int::from(optimize_for_hits),
1378 );
1379 }
1380 }
1381
1382 /// Sets the periodicity when obsolete files get deleted.
1383 ///
1384 /// The files that get out of scope by compaction
1385 /// process will still get automatically delete on every compaction,
1386 /// regardless of this setting.
1387 ///
1388 /// Default: 6 hours
1389 pub fn set_delete_obsolete_files_period_micros(&mut self, micros: u64) {
1390 unsafe {
1391 ffi::rocksdb_options_set_delete_obsolete_files_period_micros(self.inner, micros);
1392 }
1393 }
1394
1395 /// Prepare the DB for bulk loading.
1396 ///
1397 /// All data will be in level 0 without any automatic compaction.
1398 /// It's recommended to manually call CompactRange(NULL, NULL) before reading
1399 /// from the database, because otherwise the read can be very slow.
1400 pub fn prepare_for_bulk_load(&mut self) {
1401 unsafe {
1402 ffi::rocksdb_options_prepare_for_bulk_load(self.inner);
1403 }
1404 }
1405
1406 /// Sets the number of open files that can be used by the DB. You may need to
1407 /// increase this if your database has a large working set. Value `-1` means
1408 /// files opened are always kept open. You can estimate number of files based
1409 /// on target_file_size_base and target_file_size_multiplier for level-based
1410 /// compaction. For universal-style compaction, you can usually set it to `-1`.
1411 ///
1412 /// Default: `-1`
1413 ///
1414 /// # Examples
1415 ///
1416 /// ```
1417 /// use rocksdb::Options;
1418 ///
1419 /// let mut opts = Options::default();
1420 /// opts.set_max_open_files(10);
1421 /// ```
1422 pub fn set_max_open_files(&mut self, nfiles: c_int) {
1423 unsafe {
1424 ffi::rocksdb_options_set_max_open_files(self.inner, nfiles);
1425 }
1426 }
1427
1428 /// If max_open_files is -1, DB will open all files on DB::Open(). You can
1429 /// use this option to increase the number of threads used to open the files.
1430 /// Default: 16
1431 pub fn set_max_file_opening_threads(&mut self, nthreads: c_int) {
1432 unsafe {
1433 ffi::rocksdb_options_set_max_file_opening_threads(self.inner, nthreads);
1434 }
1435 }
1436
1437 /// By default, writes to stable storage use fdatasync (on platforms
1438 /// where this function is available). If this option is true,
1439 /// fsync is used instead.
1440 ///
1441 /// fsync and fdatasync are equally safe for our purposes and fdatasync is
1442 /// faster, so it is rarely necessary to set this option. It is provided
1443 /// as a workaround for kernel/filesystem bugs, such as one that affected
1444 /// fdatasync with ext4 in kernel versions prior to 3.7.
1445 ///
1446 /// Default: `false`
1447 ///
1448 /// # Examples
1449 ///
1450 /// ```
1451 /// use rocksdb::Options;
1452 ///
1453 /// let mut opts = Options::default();
1454 /// opts.set_use_fsync(true);
1455 /// ```
1456 pub fn set_use_fsync(&mut self, useit: bool) {
1457 unsafe {
1458 ffi::rocksdb_options_set_use_fsync(self.inner, c_int::from(useit));
1459 }
1460 }
1461
1462 /// Specifies the absolute info LOG dir.
1463 ///
1464 /// If it is empty, the log files will be in the same dir as data.
1465 /// If it is non empty, the log files will be in the specified dir,
1466 /// and the db data dir's absolute path will be used as the log file
1467 /// name's prefix.
1468 ///
1469 /// Default: empty
1470 pub fn set_db_log_dir<P: AsRef<Path>>(&mut self, path: P) {
1471 let p = to_cpath(path).unwrap();
1472 unsafe {
1473 ffi::rocksdb_options_set_db_log_dir(self.inner, p.as_ptr());
1474 }
1475 }
1476
1477 /// Specifies the log level.
1478 /// Consider the `LogLevel` enum for a list of possible levels.
1479 ///
1480 /// Default: Info
1481 ///
1482 /// # Examples
1483 ///
1484 /// ```
1485 /// use rocksdb::{Options, LogLevel};
1486 ///
1487 /// let mut opts = Options::default();
1488 /// opts.set_log_level(LogLevel::Warn);
1489 /// ```
1490 pub fn set_log_level(&mut self, level: LogLevel) {
1491 unsafe {
1492 ffi::rocksdb_options_set_info_log_level(self.inner, level as c_int);
1493 }
1494 }
1495
1496 /// Allows OS to incrementally sync files to disk while they are being
1497 /// written, asynchronously, in the background. This operation can be used
1498 /// to smooth out write I/Os over time. Users shouldn't rely on it for
1499 /// persistency guarantee.
1500 /// Issue one request for every bytes_per_sync written. `0` turns it off.
1501 ///
1502 /// Default: `0`
1503 ///
1504 /// You may consider using rate_limiter to regulate write rate to device.
1505 /// When rate limiter is enabled, it automatically enables bytes_per_sync
1506 /// to 1MB.
1507 ///
1508 /// This option applies to table files
1509 ///
1510 /// # Examples
1511 ///
1512 /// ```
1513 /// use rocksdb::Options;
1514 ///
1515 /// let mut opts = Options::default();
1516 /// opts.set_bytes_per_sync(1024 * 1024);
1517 /// ```
1518 pub fn set_bytes_per_sync(&mut self, nbytes: u64) {
1519 unsafe {
1520 ffi::rocksdb_options_set_bytes_per_sync(self.inner, nbytes);
1521 }
1522 }
1523
1524 /// Same as bytes_per_sync, but applies to WAL files.
1525 ///
1526 /// Default: 0, turned off
1527 ///
1528 /// Dynamically changeable through SetDBOptions() API.
1529 pub fn set_wal_bytes_per_sync(&mut self, nbytes: u64) {
1530 unsafe {
1531 ffi::rocksdb_options_set_wal_bytes_per_sync(self.inner, nbytes);
1532 }
1533 }
1534
1535 /// Sets the maximum buffer size that is used by WritableFileWriter.
1536 ///
1537 /// On Windows, we need to maintain an aligned buffer for writes.
1538 /// We allow the buffer to grow until it's size hits the limit in buffered
1539 /// IO and fix the buffer size when using direct IO to ensure alignment of
1540 /// write requests if the logical sector size is unusual
1541 ///
1542 /// Default: 1024 * 1024 (1 MB)
1543 ///
1544 /// Dynamically changeable through SetDBOptions() API.
1545 pub fn set_writable_file_max_buffer_size(&mut self, nbytes: u64) {
1546 unsafe {
1547 ffi::rocksdb_options_set_writable_file_max_buffer_size(self.inner, nbytes);
1548 }
1549 }
1550
1551 /// If true, allow multi-writers to update mem tables in parallel.
1552 /// Only some memtable_factory-s support concurrent writes; currently it
1553 /// is implemented only for SkipListFactory. Concurrent memtable writes
1554 /// are not compatible with inplace_update_support or filter_deletes.
1555 /// It is strongly recommended to set enable_write_thread_adaptive_yield
1556 /// if you are going to use this feature.
1557 ///
1558 /// Default: true
1559 ///
1560 /// # Examples
1561 ///
1562 /// ```
1563 /// use rocksdb::Options;
1564 ///
1565 /// let mut opts = Options::default();
1566 /// opts.set_allow_concurrent_memtable_write(false);
1567 /// ```
1568 pub fn set_allow_concurrent_memtable_write(&mut self, allow: bool) {
1569 unsafe {
1570 ffi::rocksdb_options_set_allow_concurrent_memtable_write(
1571 self.inner,
1572 c_uchar::from(allow),
1573 );
1574 }
1575 }
1576
1577 /// If true, threads synchronizing with the write batch group leader will wait for up to
1578 /// write_thread_max_yield_usec before blocking on a mutex. This can substantially improve
1579 /// throughput for concurrent workloads, regardless of whether allow_concurrent_memtable_write
1580 /// is enabled.
1581 ///
1582 /// Default: true
1583 pub fn set_enable_write_thread_adaptive_yield(&mut self, enabled: bool) {
1584 unsafe {
1585 ffi::rocksdb_options_set_enable_write_thread_adaptive_yield(
1586 self.inner,
1587 c_uchar::from(enabled),
1588 );
1589 }
1590 }
1591
1592 /// Specifies whether an iteration->Next() sequentially skips over keys with the same user-key or not.
1593 ///
1594 /// This number specifies the number of keys (with the same userkey)
1595 /// that will be sequentially skipped before a reseek is issued.
1596 ///
1597 /// Default: 8
1598 pub fn set_max_sequential_skip_in_iterations(&mut self, num: u64) {
1599 unsafe {
1600 ffi::rocksdb_options_set_max_sequential_skip_in_iterations(self.inner, num);
1601 }
1602 }
1603
1604 /// Enable direct I/O mode for reading
1605 /// they may or may not improve performance depending on the use case
1606 ///
1607 /// Files will be opened in "direct I/O" mode
1608 /// which means that data read from the disk will not be cached or
1609 /// buffered. The hardware buffer of the devices may however still
1610 /// be used. Memory mapped files are not impacted by these parameters.
1611 ///
1612 /// Default: false
1613 ///
1614 /// # Examples
1615 ///
1616 /// ```
1617 /// use rocksdb::Options;
1618 ///
1619 /// let mut opts = Options::default();
1620 /// opts.set_use_direct_reads(true);
1621 /// ```
1622 pub fn set_use_direct_reads(&mut self, enabled: bool) {
1623 unsafe {
1624 ffi::rocksdb_options_set_use_direct_reads(self.inner, c_uchar::from(enabled));
1625 }
1626 }
1627
1628 /// Enable direct I/O mode for flush and compaction
1629 ///
1630 /// Files will be opened in "direct I/O" mode
1631 /// which means that data written to the disk will not be cached or
1632 /// buffered. The hardware buffer of the devices may however still
1633 /// be used. Memory mapped files are not impacted by these parameters.
1634 /// they may or may not improve performance depending on the use case
1635 ///
1636 /// Default: false
1637 ///
1638 /// # Examples
1639 ///
1640 /// ```
1641 /// use rocksdb::Options;
1642 ///
1643 /// let mut opts = Options::default();
1644 /// opts.set_use_direct_io_for_flush_and_compaction(true);
1645 /// ```
1646 pub fn set_use_direct_io_for_flush_and_compaction(&mut self, enabled: bool) {
1647 unsafe {
1648 ffi::rocksdb_options_set_use_direct_io_for_flush_and_compaction(
1649 self.inner,
1650 c_uchar::from(enabled),
1651 );
1652 }
1653 }
1654
1655 /// Enable/dsiable child process inherit open files.
1656 ///
1657 /// Default: true
1658 pub fn set_is_fd_close_on_exec(&mut self, enabled: bool) {
1659 unsafe {
1660 ffi::rocksdb_options_set_is_fd_close_on_exec(self.inner, c_uchar::from(enabled));
1661 }
1662 }
1663
1664 /// Hints to the OS that it should not buffer disk I/O. Enabling this
1665 /// parameter may improve performance but increases pressure on the
1666 /// system cache.
1667 ///
1668 /// The exact behavior of this parameter is platform dependent.
1669 ///
1670 /// On POSIX systems, after RocksDB reads data from disk it will
1671 /// mark the pages as "unneeded". The operating system may - or may not
1672 /// - evict these pages from memory, reducing pressure on the system
1673 /// cache. If the disk block is requested again this can result in
1674 /// additional disk I/O.
1675 ///
1676 /// On WINDOWS systems, files will be opened in "unbuffered I/O" mode
1677 /// which means that data read from the disk will not be cached or
1678 /// bufferized. The hardware buffer of the devices may however still
1679 /// be used. Memory mapped files are not impacted by this parameter.
1680 ///
1681 /// Default: true
1682 ///
1683 /// # Examples
1684 ///
1685 /// ```
1686 /// use rocksdb::Options;
1687 ///
1688 /// let mut opts = Options::default();
1689 /// #[allow(deprecated)]
1690 /// opts.set_allow_os_buffer(false);
1691 /// ```
1692 #[deprecated(
1693 since = "0.7.0",
1694 note = "replaced with set_use_direct_reads/set_use_direct_io_for_flush_and_compaction methods"
1695 )]
1696 pub fn set_allow_os_buffer(&mut self, is_allow: bool) {
1697 self.set_use_direct_reads(!is_allow);
1698 self.set_use_direct_io_for_flush_and_compaction(!is_allow);
1699 }
1700
1701 /// Sets the number of shards used for table cache.
1702 ///
1703 /// Default: `6`
1704 ///
1705 /// # Examples
1706 ///
1707 /// ```
1708 /// use rocksdb::Options;
1709 ///
1710 /// let mut opts = Options::default();
1711 /// opts.set_table_cache_num_shard_bits(4);
1712 /// ```
1713 pub fn set_table_cache_num_shard_bits(&mut self, nbits: c_int) {
1714 unsafe {
1715 ffi::rocksdb_options_set_table_cache_numshardbits(self.inner, nbits);
1716 }
1717 }
1718
1719 /// By default target_file_size_multiplier is 1, which means
1720 /// by default files in different levels will have similar size.
1721 ///
1722 /// Dynamically changeable through SetOptions() API
1723 pub fn set_target_file_size_multiplier(&mut self, multiplier: i32) {
1724 unsafe {
1725 ffi::rocksdb_options_set_target_file_size_multiplier(self.inner, multiplier as c_int);
1726 }
1727 }
1728
1729 /// Sets the minimum number of write buffers that will be merged together
1730 /// before writing to storage. If set to `1`, then
1731 /// all write buffers are flushed to L0 as individual files and this increases
1732 /// read amplification because a get request has to check in all of these
1733 /// files. Also, an in-memory merge may result in writing lesser
1734 /// data to storage if there are duplicate records in each of these
1735 /// individual write buffers.
1736 ///
1737 /// Default: `1`
1738 ///
1739 /// # Examples
1740 ///
1741 /// ```
1742 /// use rocksdb::Options;
1743 ///
1744 /// let mut opts = Options::default();
1745 /// opts.set_min_write_buffer_number(2);
1746 /// ```
1747 pub fn set_min_write_buffer_number(&mut self, nbuf: c_int) {
1748 unsafe {
1749 ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, nbuf);
1750 }
1751 }
1752
1753 /// Sets the maximum number of write buffers that are built up in memory.
1754 /// The default and the minimum number is 2, so that when 1 write buffer
1755 /// is being flushed to storage, new writes can continue to the other
1756 /// write buffer.
1757 /// If max_write_buffer_number > 3, writing will be slowed down to
1758 /// options.delayed_write_rate if we are writing to the last write buffer
1759 /// allowed.
1760 ///
1761 /// Default: `2`
1762 ///
1763 /// # Examples
1764 ///
1765 /// ```
1766 /// use rocksdb::Options;
1767 ///
1768 /// let mut opts = Options::default();
1769 /// opts.set_max_write_buffer_number(4);
1770 /// ```
1771 pub fn set_max_write_buffer_number(&mut self, nbuf: c_int) {
1772 unsafe {
1773 ffi::rocksdb_options_set_max_write_buffer_number(self.inner, nbuf);
1774 }
1775 }
1776
1777 /// Sets the amount of data to build up in memory (backed by an unsorted log
1778 /// on disk) before converting to a sorted on-disk file.
1779 ///
1780 /// Larger values increase performance, especially during bulk loads.
1781 /// Up to max_write_buffer_number write buffers may be held in memory
1782 /// at the same time,
1783 /// so you may wish to adjust this parameter to control memory usage.
1784 /// Also, a larger write buffer will result in a longer recovery time
1785 /// the next time the database is opened.
1786 ///
1787 /// Note that write_buffer_size is enforced per column family.
1788 /// See db_write_buffer_size for sharing memory across column families.
1789 ///
1790 /// Default: `0x4000000` (64MiB)
1791 ///
1792 /// Dynamically changeable through SetOptions() API
1793 ///
1794 /// # Examples
1795 ///
1796 /// ```
1797 /// use rocksdb::Options;
1798 ///
1799 /// let mut opts = Options::default();
1800 /// opts.set_write_buffer_size(128 * 1024 * 1024);
1801 /// ```
1802 pub fn set_write_buffer_size(&mut self, size: usize) {
1803 unsafe {
1804 ffi::rocksdb_options_set_write_buffer_size(self.inner, size);
1805 }
1806 }
1807
1808 /// Amount of data to build up in memtables across all column
1809 /// families before writing to disk.
1810 ///
1811 /// This is distinct from write_buffer_size, which enforces a limit
1812 /// for a single memtable.
1813 ///
1814 /// This feature is disabled by default. Specify a non-zero value
1815 /// to enable it.
1816 ///
1817 /// Default: 0 (disabled)
1818 ///
1819 /// # Examples
1820 ///
1821 /// ```
1822 /// use rocksdb::Options;
1823 ///
1824 /// let mut opts = Options::default();
1825 /// opts.set_db_write_buffer_size(128 * 1024 * 1024);
1826 /// ```
1827 pub fn set_db_write_buffer_size(&mut self, size: usize) {
1828 unsafe {
1829 ffi::rocksdb_options_set_db_write_buffer_size(self.inner, size);
1830 }
1831 }
1832
1833 /// Control maximum total data size for a level.
1834 /// max_bytes_for_level_base is the max total for level-1.
1835 /// Maximum number of bytes for level L can be calculated as
1836 /// (max_bytes_for_level_base) * (max_bytes_for_level_multiplier ^ (L-1))
1837 /// For example, if max_bytes_for_level_base is 200MB, and if
1838 /// max_bytes_for_level_multiplier is 10, total data size for level-1
1839 /// will be 200MB, total file size for level-2 will be 2GB,
1840 /// and total file size for level-3 will be 20GB.
1841 ///
1842 /// Default: `0x10000000` (256MiB).
1843 ///
1844 /// Dynamically changeable through SetOptions() API
1845 ///
1846 /// # Examples
1847 ///
1848 /// ```
1849 /// use rocksdb::Options;
1850 ///
1851 /// let mut opts = Options::default();
1852 /// opts.set_max_bytes_for_level_base(512 * 1024 * 1024);
1853 /// ```
1854 pub fn set_max_bytes_for_level_base(&mut self, size: u64) {
1855 unsafe {
1856 ffi::rocksdb_options_set_max_bytes_for_level_base(self.inner, size);
1857 }
1858 }
1859
1860 /// Default: `10`
1861 ///
1862 /// # Examples
1863 ///
1864 /// ```
1865 /// use rocksdb::Options;
1866 ///
1867 /// let mut opts = Options::default();
1868 /// opts.set_max_bytes_for_level_multiplier(4.0);
1869 /// ```
1870 pub fn set_max_bytes_for_level_multiplier(&mut self, mul: f64) {
1871 unsafe {
1872 ffi::rocksdb_options_set_max_bytes_for_level_multiplier(self.inner, mul);
1873 }
1874 }
1875
1876 /// The manifest file is rolled over on reaching this limit.
1877 /// The older manifest file be deleted.
1878 /// The default value is MAX_INT so that roll-over does not take place.
1879 ///
1880 /// # Examples
1881 ///
1882 /// ```
1883 /// use rocksdb::Options;
1884 ///
1885 /// let mut opts = Options::default();
1886 /// opts.set_max_manifest_file_size(20 * 1024 * 1024);
1887 /// ```
1888 pub fn set_max_manifest_file_size(&mut self, size: usize) {
1889 unsafe {
1890 ffi::rocksdb_options_set_max_manifest_file_size(self.inner, size);
1891 }
1892 }
1893
1894 /// Sets the target file size for compaction.
1895 /// target_file_size_base is per-file size for level-1.
1896 /// Target file size for level L can be calculated by
1897 /// target_file_size_base * (target_file_size_multiplier ^ (L-1))
1898 /// For example, if target_file_size_base is 2MB and
1899 /// target_file_size_multiplier is 10, then each file on level-1 will
1900 /// be 2MB, and each file on level 2 will be 20MB,
1901 /// and each file on level-3 will be 200MB.
1902 ///
1903 /// Default: `0x4000000` (64MiB)
1904 ///
1905 /// Dynamically changeable through SetOptions() API
1906 ///
1907 /// # Examples
1908 ///
1909 /// ```
1910 /// use rocksdb::Options;
1911 ///
1912 /// let mut opts = Options::default();
1913 /// opts.set_target_file_size_base(128 * 1024 * 1024);
1914 /// ```
1915 pub fn set_target_file_size_base(&mut self, size: u64) {
1916 unsafe {
1917 ffi::rocksdb_options_set_target_file_size_base(self.inner, size);
1918 }
1919 }
1920
1921 /// Sets the minimum number of write buffers that will be merged together
1922 /// before writing to storage. If set to `1`, then
1923 /// all write buffers are flushed to L0 as individual files and this increases
1924 /// read amplification because a get request has to check in all of these
1925 /// files. Also, an in-memory merge may result in writing lesser
1926 /// data to storage if there are duplicate records in each of these
1927 /// individual write buffers.
1928 ///
1929 /// Default: `1`
1930 ///
1931 /// # Examples
1932 ///
1933 /// ```
1934 /// use rocksdb::Options;
1935 ///
1936 /// let mut opts = Options::default();
1937 /// opts.set_min_write_buffer_number_to_merge(2);
1938 /// ```
1939 pub fn set_min_write_buffer_number_to_merge(&mut self, to_merge: c_int) {
1940 unsafe {
1941 ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, to_merge);
1942 }
1943 }
1944
1945 /// Sets the number of files to trigger level-0 compaction. A value < `0` means that
1946 /// level-0 compaction will not be triggered by number of files at all.
1947 ///
1948 /// Default: `4`
1949 ///
1950 /// Dynamically changeable through SetOptions() API
1951 ///
1952 /// # Examples
1953 ///
1954 /// ```
1955 /// use rocksdb::Options;
1956 ///
1957 /// let mut opts = Options::default();
1958 /// opts.set_level_zero_file_num_compaction_trigger(8);
1959 /// ```
1960 pub fn set_level_zero_file_num_compaction_trigger(&mut self, n: c_int) {
1961 unsafe {
1962 ffi::rocksdb_options_set_level0_file_num_compaction_trigger(self.inner, n);
1963 }
1964 }
1965
1966 /// Sets the soft limit on number of level-0 files. We start slowing down writes at this
1967 /// point. A value < `0` means that no writing slow down will be triggered by
1968 /// number of files in level-0.
1969 ///
1970 /// Default: `20`
1971 ///
1972 /// Dynamically changeable through SetOptions() API
1973 ///
1974 /// # Examples
1975 ///
1976 /// ```
1977 /// use rocksdb::Options;
1978 ///
1979 /// let mut opts = Options::default();
1980 /// opts.set_level_zero_slowdown_writes_trigger(10);
1981 /// ```
1982 pub fn set_level_zero_slowdown_writes_trigger(&mut self, n: c_int) {
1983 unsafe {
1984 ffi::rocksdb_options_set_level0_slowdown_writes_trigger(self.inner, n);
1985 }
1986 }
1987
1988 /// Sets the maximum number of level-0 files. We stop writes at this point.
1989 ///
1990 /// Default: `24`
1991 ///
1992 /// Dynamically changeable through SetOptions() API
1993 ///
1994 /// # Examples
1995 ///
1996 /// ```
1997 /// use rocksdb::Options;
1998 ///
1999 /// let mut opts = Options::default();
2000 /// opts.set_level_zero_stop_writes_trigger(48);
2001 /// ```
2002 pub fn set_level_zero_stop_writes_trigger(&mut self, n: c_int) {
2003 unsafe {
2004 ffi::rocksdb_options_set_level0_stop_writes_trigger(self.inner, n);
2005 }
2006 }
2007
2008 /// Sets the compaction style.
2009 ///
2010 /// Default: DBCompactionStyle::Level
2011 ///
2012 /// # Examples
2013 ///
2014 /// ```
2015 /// use rocksdb::{Options, DBCompactionStyle};
2016 ///
2017 /// let mut opts = Options::default();
2018 /// opts.set_compaction_style(DBCompactionStyle::Universal);
2019 /// ```
2020 pub fn set_compaction_style(&mut self, style: DBCompactionStyle) {
2021 unsafe {
2022 ffi::rocksdb_options_set_compaction_style(self.inner, style as c_int);
2023 }
2024 }
2025
2026 /// Sets the options needed to support Universal Style compactions.
2027 pub fn set_universal_compaction_options(&mut self, uco: &UniversalCompactOptions) {
2028 unsafe {
2029 ffi::rocksdb_options_set_universal_compaction_options(self.inner, uco.inner);
2030 }
2031 }
2032
2033 /// Sets the options for FIFO compaction style.
2034 pub fn set_fifo_compaction_options(&mut self, fco: &FifoCompactOptions) {
2035 unsafe {
2036 ffi::rocksdb_options_set_fifo_compaction_options(self.inner, fco.inner);
2037 }
2038 }
2039
2040 /// Sets unordered_write to true trades higher write throughput with
2041 /// relaxing the immutability guarantee of snapshots. This violates the
2042 /// repeatability one expects from ::Get from a snapshot, as well as
2043 /// ::MultiGet and Iterator's consistent-point-in-time view property.
2044 /// If the application cannot tolerate the relaxed guarantees, it can implement
2045 /// its own mechanisms to work around that and yet benefit from the higher
2046 /// throughput. Using TransactionDB with WRITE_PREPARED write policy and
2047 /// two_write_queues=true is one way to achieve immutable snapshots despite
2048 /// unordered_write.
2049 ///
2050 /// By default, i.e., when it is false, rocksdb does not advance the sequence
2051 /// number for new snapshots unless all the writes with lower sequence numbers
2052 /// are already finished. This provides the immutability that we except from
2053 /// snapshots. Moreover, since Iterator and MultiGet internally depend on
2054 /// snapshots, the snapshot immutability results into Iterator and MultiGet
2055 /// offering consistent-point-in-time view. If set to true, although
2056 /// Read-Your-Own-Write property is still provided, the snapshot immutability
2057 /// property is relaxed: the writes issued after the snapshot is obtained (with
2058 /// larger sequence numbers) will be still not visible to the reads from that
2059 /// snapshot, however, there still might be pending writes (with lower sequence
2060 /// number) that will change the state visible to the snapshot after they are
2061 /// landed to the memtable.
2062 ///
2063 /// Default: false
2064 pub fn set_unordered_write(&mut self, unordered: bool) {
2065 unsafe {
2066 ffi::rocksdb_options_set_unordered_write(self.inner, c_uchar::from(unordered));
2067 }
2068 }
2069
2070 /// Sets maximum number of threads that will
2071 /// concurrently perform a compaction job by breaking it into multiple,
2072 /// smaller ones that are run simultaneously.
2073 ///
2074 /// Default: 1 (i.e. no subcompactions)
2075 pub fn set_max_subcompactions(&mut self, num: u32) {
2076 unsafe {
2077 ffi::rocksdb_options_set_max_subcompactions(self.inner, num);
2078 }
2079 }
2080
2081 /// Sets maximum number of concurrent background jobs
2082 /// (compactions and flushes).
2083 ///
2084 /// Default: 2
2085 ///
2086 /// Dynamically changeable through SetDBOptions() API.
2087 pub fn set_max_background_jobs(&mut self, jobs: c_int) {
2088 unsafe {
2089 ffi::rocksdb_options_set_max_background_jobs(self.inner, jobs);
2090 }
2091 }
2092
2093 /// Sets the maximum number of concurrent background compaction jobs, submitted to
2094 /// the default LOW priority thread pool.
2095 /// We first try to schedule compactions based on
2096 /// `base_background_compactions`. If the compaction cannot catch up , we
2097 /// will increase number of compaction threads up to
2098 /// `max_background_compactions`.
2099 ///
2100 /// If you're increasing this, also consider increasing number of threads in
2101 /// LOW priority thread pool. For more information, see
2102 /// Env::SetBackgroundThreads
2103 ///
2104 /// Default: `1`
2105 ///
2106 /// # Examples
2107 ///
2108 /// ```
2109 /// use rocksdb::Options;
2110 ///
2111 /// let mut opts = Options::default();
2112 /// #[allow(deprecated)]
2113 /// opts.set_max_background_compactions(2);
2114 /// ```
2115 #[deprecated(
2116 since = "0.15.0",
2117 note = "RocksDB automatically decides this based on the value of max_background_jobs"
2118 )]
2119 pub fn set_max_background_compactions(&mut self, n: c_int) {
2120 unsafe {
2121 ffi::rocksdb_options_set_max_background_compactions(self.inner, n);
2122 }
2123 }
2124
2125 /// Sets the maximum number of concurrent background memtable flush jobs, submitted to
2126 /// the HIGH priority thread pool.
2127 ///
2128 /// By default, all background jobs (major compaction and memtable flush) go
2129 /// to the LOW priority pool. If this option is set to a positive number,
2130 /// memtable flush jobs will be submitted to the HIGH priority pool.
2131 /// It is important when the same Env is shared by multiple db instances.
2132 /// Without a separate pool, long running major compaction jobs could
2133 /// potentially block memtable flush jobs of other db instances, leading to
2134 /// unnecessary Put stalls.
2135 ///
2136 /// If you're increasing this, also consider increasing number of threads in
2137 /// HIGH priority thread pool. For more information, see
2138 /// Env::SetBackgroundThreads
2139 ///
2140 /// Default: `1`
2141 ///
2142 /// # Examples
2143 ///
2144 /// ```
2145 /// use rocksdb::Options;
2146 ///
2147 /// let mut opts = Options::default();
2148 /// #[allow(deprecated)]
2149 /// opts.set_max_background_flushes(2);
2150 /// ```
2151 #[deprecated(
2152 since = "0.15.0",
2153 note = "RocksDB automatically decides this based on the value of max_background_jobs"
2154 )]
2155 pub fn set_max_background_flushes(&mut self, n: c_int) {
2156 unsafe {
2157 ffi::rocksdb_options_set_max_background_flushes(self.inner, n);
2158 }
2159 }
2160
2161 /// Disables automatic compactions. Manual compactions can still
2162 /// be issued on this column family
2163 ///
2164 /// Default: `false`
2165 ///
2166 /// Dynamically changeable through SetOptions() API
2167 ///
2168 /// # Examples
2169 ///
2170 /// ```
2171 /// use rocksdb::Options;
2172 ///
2173 /// let mut opts = Options::default();
2174 /// opts.set_disable_auto_compactions(true);
2175 /// ```
2176 pub fn set_disable_auto_compactions(&mut self, disable: bool) {
2177 unsafe {
2178 ffi::rocksdb_options_set_disable_auto_compactions(self.inner, c_int::from(disable));
2179 }
2180 }
2181
2182 /// SetMemtableHugePageSize sets the page size for huge page for
2183 /// arena used by the memtable.
2184 /// If <=0, it won't allocate from huge page but from malloc.
2185 /// Users are responsible to reserve huge pages for it to be allocated. For
2186 /// example:
2187 /// sysctl -w vm.nr_hugepages=20
2188 /// See linux doc Documentation/vm/hugetlbpage.txt
2189 /// If there isn't enough free huge page available, it will fall back to
2190 /// malloc.
2191 ///
2192 /// Dynamically changeable through SetOptions() API
2193 pub fn set_memtable_huge_page_size(&mut self, size: size_t) {
2194 unsafe {
2195 ffi::rocksdb_options_set_memtable_huge_page_size(self.inner, size);
2196 }
2197 }
2198
2199 /// Sets the maximum number of successive merge operations on a key in the memtable.
2200 ///
2201 /// When a merge operation is added to the memtable and the maximum number of
2202 /// successive merges is reached, the value of the key will be calculated and
2203 /// inserted into the memtable instead of the merge operation. This will
2204 /// ensure that there are never more than max_successive_merges merge
2205 /// operations in the memtable.
2206 ///
2207 /// Default: 0 (disabled)
2208 pub fn set_max_successive_merges(&mut self, num: usize) {
2209 unsafe {
2210 ffi::rocksdb_options_set_max_successive_merges(self.inner, num);
2211 }
2212 }
2213
2214 /// Control locality of bloom filter probes to improve cache miss rate.
2215 /// This option only applies to memtable prefix bloom and plaintable
2216 /// prefix bloom. It essentially limits the max number of cache lines each
2217 /// bloom filter check can touch.
2218 ///
2219 /// This optimization is turned off when set to 0. The number should never
2220 /// be greater than number of probes. This option can boost performance
2221 /// for in-memory workload but should use with care since it can cause
2222 /// higher false positive rate.
2223 ///
2224 /// Default: 0
2225 pub fn set_bloom_locality(&mut self, v: u32) {
2226 unsafe {
2227 ffi::rocksdb_options_set_bloom_locality(self.inner, v);
2228 }
2229 }
2230
2231 /// Enable/disable thread-safe inplace updates.
2232 ///
2233 /// Requires updates if
2234 /// * key exists in current memtable
2235 /// * new sizeof(new_value) <= sizeof(old_value)
2236 /// * old_value for that key is a put i.e. kTypeValue
2237 ///
2238 /// Default: false.
2239 pub fn set_inplace_update_support(&mut self, enabled: bool) {
2240 unsafe {
2241 ffi::rocksdb_options_set_inplace_update_support(self.inner, c_uchar::from(enabled));
2242 }
2243 }
2244
2245 /// Sets the number of locks used for inplace update.
2246 ///
2247 /// Default: 10000 when inplace_update_support = true, otherwise 0.
2248 pub fn set_inplace_update_locks(&mut self, num: usize) {
2249 unsafe {
2250 ffi::rocksdb_options_set_inplace_update_num_locks(self.inner, num);
2251 }
2252 }
2253
2254 /// Different max-size multipliers for different levels.
2255 /// These are multiplied by max_bytes_for_level_multiplier to arrive
2256 /// at the max-size of each level.
2257 ///
2258 /// Default: 1
2259 ///
2260 /// Dynamically changeable through SetOptions() API
2261 pub fn set_max_bytes_for_level_multiplier_additional(&mut self, level_values: &[i32]) {
2262 let count = level_values.len();
2263 unsafe {
2264 ffi::rocksdb_options_set_max_bytes_for_level_multiplier_additional(
2265 self.inner,
2266 level_values.as_ptr() as *mut c_int,
2267 count,
2268 );
2269 }
2270 }
2271
2272 /// If true, then DB::Open() will not fetch and check sizes of all sst files.
2273 /// This may significantly speed up startup if there are many sst files,
2274 /// especially when using non-default Env with expensive GetFileSize().
2275 /// We'll still check that all required sst files exist.
2276 /// If paranoid_checks is false, this option is ignored, and sst files are
2277 /// not checked at all.
2278 ///
2279 /// Default: false
2280 pub fn set_skip_checking_sst_file_sizes_on_db_open(&mut self, value: bool) {
2281 unsafe {
2282 ffi::rocksdb_options_set_skip_checking_sst_file_sizes_on_db_open(
2283 self.inner,
2284 c_uchar::from(value),
2285 );
2286 }
2287 }
2288
2289 /// The total maximum size(bytes) of write buffers to maintain in memory
2290 /// including copies of buffers that have already been flushed. This parameter
2291 /// only affects trimming of flushed buffers and does not affect flushing.
2292 /// This controls the maximum amount of write history that will be available
2293 /// in memory for conflict checking when Transactions are used. The actual
2294 /// size of write history (flushed Memtables) might be higher than this limit
2295 /// if further trimming will reduce write history total size below this
2296 /// limit. For example, if max_write_buffer_size_to_maintain is set to 64MB,
2297 /// and there are three flushed Memtables, with sizes of 32MB, 20MB, 20MB.
2298 /// Because trimming the next Memtable of size 20MB will reduce total memory
2299 /// usage to 52MB which is below the limit, RocksDB will stop trimming.
2300 ///
2301 /// When using an OptimisticTransactionDB:
2302 /// If this value is too low, some transactions may fail at commit time due
2303 /// to not being able to determine whether there were any write conflicts.
2304 ///
2305 /// When using a TransactionDB:
2306 /// If Transaction::SetSnapshot is used, TransactionDB will read either
2307 /// in-memory write buffers or SST files to do write-conflict checking.
2308 /// Increasing this value can reduce the number of reads to SST files
2309 /// done for conflict detection.
2310 ///
2311 /// Setting this value to 0 will cause write buffers to be freed immediately
2312 /// after they are flushed. If this value is set to -1,
2313 /// 'max_write_buffer_number * write_buffer_size' will be used.
2314 ///
2315 /// Default:
2316 /// If using a TransactionDB/OptimisticTransactionDB, the default value will
2317 /// be set to the value of 'max_write_buffer_number * write_buffer_size'
2318 /// if it is not explicitly set by the user. Otherwise, the default is 0.
2319 pub fn set_max_write_buffer_size_to_maintain(&mut self, size: i64) {
2320 unsafe {
2321 ffi::rocksdb_options_set_max_write_buffer_size_to_maintain(self.inner, size);
2322 }
2323 }
2324
2325 /// By default, a single write thread queue is maintained. The thread gets
2326 /// to the head of the queue becomes write batch group leader and responsible
2327 /// for writing to WAL and memtable for the batch group.
2328 ///
2329 /// If enable_pipelined_write is true, separate write thread queue is
2330 /// maintained for WAL write and memtable write. A write thread first enter WAL
2331 /// writer queue and then memtable writer queue. Pending thread on the WAL
2332 /// writer queue thus only have to wait for previous writers to finish their
2333 /// WAL writing but not the memtable writing. Enabling the feature may improve
2334 /// write throughput and reduce latency of the prepare phase of two-phase
2335 /// commit.
2336 ///
2337 /// Default: false
2338 pub fn set_enable_pipelined_write(&mut self, value: bool) {
2339 unsafe {
2340 ffi::rocksdb_options_set_enable_pipelined_write(self.inner, c_uchar::from(value));
2341 }
2342 }
2343
2344 /// Defines the underlying memtable implementation.
2345 /// See official [wiki](https://github.com/facebook/rocksdb/wiki/MemTable) for more information.
2346 /// Defaults to using a skiplist.
2347 ///
2348 /// # Examples
2349 ///
2350 /// ```
2351 /// use rocksdb::{Options, MemtableFactory};
2352 /// let mut opts = Options::default();
2353 /// let factory = MemtableFactory::HashSkipList {
2354 /// bucket_count: 1_000_000,
2355 /// height: 4,
2356 /// branching_factor: 4,
2357 /// };
2358 ///
2359 /// opts.set_allow_concurrent_memtable_write(false);
2360 /// opts.set_memtable_factory(factory);
2361 /// ```
2362 pub fn set_memtable_factory(&mut self, factory: MemtableFactory) {
2363 match factory {
2364 MemtableFactory::Vector => unsafe {
2365 ffi::rocksdb_options_set_memtable_vector_rep(self.inner);
2366 },
2367 MemtableFactory::HashSkipList {
2368 bucket_count,
2369 height,
2370 branching_factor,
2371 } => unsafe {
2372 ffi::rocksdb_options_set_hash_skip_list_rep(
2373 self.inner,
2374 bucket_count,
2375 height,
2376 branching_factor,
2377 );
2378 },
2379 MemtableFactory::HashLinkList { bucket_count } => unsafe {
2380 ffi::rocksdb_options_set_hash_link_list_rep(self.inner, bucket_count);
2381 },
2382 };
2383 }
2384
2385 pub fn set_block_based_table_factory(&mut self, factory: &BlockBasedOptions) {
2386 unsafe {
2387 ffi::rocksdb_options_set_block_based_table_factory(self.inner, factory.inner);
2388 }
2389 self.outlive.block_based = Some(factory.outlive.clone());
2390 }
2391
2392 /// Sets the table factory to a CuckooTableFactory (the default table
2393 /// factory is a block-based table factory that provides a default
2394 /// implementation of TableBuilder and TableReader with default
2395 /// BlockBasedTableOptions).
2396 /// See official [wiki](https://github.com/facebook/rocksdb/wiki/CuckooTable-Format) for more information on this table format.
2397 /// # Examples
2398 ///
2399 /// ```
2400 /// use rocksdb::{Options, CuckooTableOptions};
2401 ///
2402 /// let mut opts = Options::default();
2403 /// let mut factory_opts = CuckooTableOptions::default();
2404 /// factory_opts.set_hash_ratio(0.8);
2405 /// factory_opts.set_max_search_depth(20);
2406 /// factory_opts.set_cuckoo_block_size(10);
2407 /// factory_opts.set_identity_as_first_hash(true);
2408 /// factory_opts.set_use_module_hash(false);
2409 ///
2410 /// opts.set_cuckoo_table_factory(&factory_opts);
2411 /// ```
2412 pub fn set_cuckoo_table_factory(&mut self, factory: &CuckooTableOptions) {
2413 unsafe {
2414 ffi::rocksdb_options_set_cuckoo_table_factory(self.inner, factory.inner);
2415 }
2416 }
2417
2418 // This is a factory that provides TableFactory objects.
2419 // Default: a block-based table factory that provides a default
2420 // implementation of TableBuilder and TableReader with default
2421 // BlockBasedTableOptions.
2422 /// Sets the factory as plain table.
2423 /// See official [wiki](https://github.com/facebook/rocksdb/wiki/PlainTable-Format) for more
2424 /// information.
2425 ///
2426 /// # Examples
2427 ///
2428 /// ```
2429 /// use rocksdb::{Options, PlainTableFactoryOptions};
2430 ///
2431 /// let mut opts = Options::default();
2432 /// let factory_opts = PlainTableFactoryOptions {
2433 /// user_key_length: 0,
2434 /// bloom_bits_per_key: 20,
2435 /// hash_table_ratio: 0.75,
2436 /// index_sparseness: 16,
2437 /// };
2438 ///
2439 /// opts.set_plain_table_factory(&factory_opts);
2440 /// ```
2441 pub fn set_plain_table_factory(&mut self, options: &PlainTableFactoryOptions) {
2442 unsafe {
2443 ffi::rocksdb_options_set_plain_table_factory(
2444 self.inner,
2445 options.user_key_length,
2446 options.bloom_bits_per_key,
2447 options.hash_table_ratio,
2448 options.index_sparseness,
2449 );
2450 }
2451 }
2452
2453 /// Sets the start level to use compression.
2454 pub fn set_min_level_to_compress(&mut self, lvl: c_int) {
2455 unsafe {
2456 ffi::rocksdb_options_set_min_level_to_compress(self.inner, lvl);
2457 }
2458 }
2459
2460 /// Measure IO stats in compactions and flushes, if `true`.
2461 ///
2462 /// Default: `false`
2463 ///
2464 /// # Examples
2465 ///
2466 /// ```
2467 /// use rocksdb::Options;
2468 ///
2469 /// let mut opts = Options::default();
2470 /// opts.set_report_bg_io_stats(true);
2471 /// ```
2472 pub fn set_report_bg_io_stats(&mut self, enable: bool) {
2473 unsafe {
2474 ffi::rocksdb_options_set_report_bg_io_stats(self.inner, c_int::from(enable));
2475 }
2476 }
2477
2478 /// Once write-ahead logs exceed this size, we will start forcing the flush of
2479 /// column families whose memtables are backed by the oldest live WAL file
2480 /// (i.e. the ones that are causing all the space amplification).
2481 ///
2482 /// Default: `0`
2483 ///
2484 /// # Examples
2485 ///
2486 /// ```
2487 /// use rocksdb::Options;
2488 ///
2489 /// let mut opts = Options::default();
2490 /// // Set max total wal size to 1G.
2491 /// opts.set_max_total_wal_size(1 << 30);
2492 /// ```
2493 pub fn set_max_total_wal_size(&mut self, size: u64) {
2494 unsafe {
2495 ffi::rocksdb_options_set_max_total_wal_size(self.inner, size);
2496 }
2497 }
2498
2499 /// Recovery mode to control the consistency while replaying WAL.
2500 ///
2501 /// Default: DBRecoveryMode::PointInTime
2502 ///
2503 /// # Examples
2504 ///
2505 /// ```
2506 /// use rocksdb::{Options, DBRecoveryMode};
2507 ///
2508 /// let mut opts = Options::default();
2509 /// opts.set_wal_recovery_mode(DBRecoveryMode::AbsoluteConsistency);
2510 /// ```
2511 pub fn set_wal_recovery_mode(&mut self, mode: DBRecoveryMode) {
2512 unsafe {
2513 ffi::rocksdb_options_set_wal_recovery_mode(self.inner, mode as c_int);
2514 }
2515 }
2516
2517 pub fn enable_statistics(&mut self) {
2518 unsafe {
2519 ffi::rocksdb_options_enable_statistics(self.inner);
2520 }
2521 }
2522
2523 pub fn get_statistics(&self) -> Option<String> {
2524 unsafe {
2525 let value = ffi::rocksdb_options_statistics_get_string(self.inner);
2526 if value.is_null() {
2527 return None;
2528 }
2529
2530 // Must have valid UTF-8 format.
2531 let s = CStr::from_ptr(value).to_str().unwrap().to_owned();
2532 ffi::rocksdb_free(value as *mut c_void);
2533 Some(s)
2534 }
2535 }
2536
2537 /// If not zero, dump `rocksdb.stats` to LOG every `stats_dump_period_sec`.
2538 ///
2539 /// Default: `600` (10 mins)
2540 ///
2541 /// # Examples
2542 ///
2543 /// ```
2544 /// use rocksdb::Options;
2545 ///
2546 /// let mut opts = Options::default();
2547 /// opts.set_stats_dump_period_sec(300);
2548 /// ```
2549 pub fn set_stats_dump_period_sec(&mut self, period: c_uint) {
2550 unsafe {
2551 ffi::rocksdb_options_set_stats_dump_period_sec(self.inner, period);
2552 }
2553 }
2554
2555 /// If not zero, dump rocksdb.stats to RocksDB to LOG every `stats_persist_period_sec`.
2556 ///
2557 /// Default: `600` (10 mins)
2558 ///
2559 /// # Examples
2560 ///
2561 /// ```
2562 /// use rocksdb::Options;
2563 ///
2564 /// let mut opts = Options::default();
2565 /// opts.set_stats_persist_period_sec(5);
2566 /// ```
2567 pub fn set_stats_persist_period_sec(&mut self, period: c_uint) {
2568 unsafe {
2569 ffi::rocksdb_options_set_stats_persist_period_sec(self.inner, period);
2570 }
2571 }
2572
2573 /// When set to true, reading SST files will opt out of the filesystem's
2574 /// readahead. Setting this to false may improve sequential iteration
2575 /// performance.
2576 ///
2577 /// Default: `true`
2578 pub fn set_advise_random_on_open(&mut self, advise: bool) {
2579 unsafe {
2580 ffi::rocksdb_options_set_advise_random_on_open(self.inner, c_uchar::from(advise));
2581 }
2582 }
2583
2584 /// Specifies the file access pattern once a compaction is started.
2585 ///
2586 /// It will be applied to all input files of a compaction.
2587 ///
2588 /// Default: Normal
2589 pub fn set_access_hint_on_compaction_start(&mut self, pattern: AccessHint) {
2590 unsafe {
2591 ffi::rocksdb_options_set_access_hint_on_compaction_start(self.inner, pattern as c_int);
2592 }
2593 }
2594
2595 /// Enable/disable adaptive mutex, which spins in the user space before resorting to kernel.
2596 ///
2597 /// This could reduce context switch when the mutex is not
2598 /// heavily contended. However, if the mutex is hot, we could end up
2599 /// wasting spin time.
2600 ///
2601 /// Default: false
2602 pub fn set_use_adaptive_mutex(&mut self, enabled: bool) {
2603 unsafe {
2604 ffi::rocksdb_options_set_use_adaptive_mutex(self.inner, c_uchar::from(enabled));
2605 }
2606 }
2607
2608 /// Sets the number of levels for this database.
2609 pub fn set_num_levels(&mut self, n: c_int) {
2610 unsafe {
2611 ffi::rocksdb_options_set_num_levels(self.inner, n);
2612 }
2613 }
2614
2615 /// When a `prefix_extractor` is defined through `opts.set_prefix_extractor` this
2616 /// creates a prefix bloom filter for each memtable with the size of
2617 /// `write_buffer_size * memtable_prefix_bloom_ratio` (capped at 0.25).
2618 ///
2619 /// Default: `0`
2620 ///
2621 /// # Examples
2622 ///
2623 /// ```
2624 /// use rocksdb::{Options, SliceTransform};
2625 ///
2626 /// let mut opts = Options::default();
2627 /// let transform = SliceTransform::create_fixed_prefix(10);
2628 /// opts.set_prefix_extractor(transform);
2629 /// opts.set_memtable_prefix_bloom_ratio(0.2);
2630 /// ```
2631 pub fn set_memtable_prefix_bloom_ratio(&mut self, ratio: f64) {
2632 unsafe {
2633 ffi::rocksdb_options_set_memtable_prefix_bloom_size_ratio(self.inner, ratio);
2634 }
2635 }
2636
2637 /// Sets the maximum number of bytes in all compacted files.
2638 /// We try to limit number of bytes in one compaction to be lower than this
2639 /// threshold. But it's not guaranteed.
2640 ///
2641 /// Value 0 will be sanitized.
2642 ///
2643 /// Default: target_file_size_base * 25
2644 pub fn set_max_compaction_bytes(&mut self, nbytes: u64) {
2645 unsafe {
2646 ffi::rocksdb_options_set_max_compaction_bytes(self.inner, nbytes);
2647 }
2648 }
2649
2650 /// Specifies the absolute path of the directory the
2651 /// write-ahead log (WAL) should be written to.
2652 ///
2653 /// Default: same directory as the database
2654 ///
2655 /// # Examples
2656 ///
2657 /// ```
2658 /// use rocksdb::Options;
2659 ///
2660 /// let mut opts = Options::default();
2661 /// opts.set_wal_dir("/path/to/dir");
2662 /// ```
2663 pub fn set_wal_dir<P: AsRef<Path>>(&mut self, path: P) {
2664 let p = to_cpath(path).unwrap();
2665 unsafe {
2666 ffi::rocksdb_options_set_wal_dir(self.inner, p.as_ptr());
2667 }
2668 }
2669
2670 /// Sets the WAL ttl in seconds.
2671 ///
2672 /// The following two options affect how archived logs will be deleted.
2673 /// 1. If both set to 0, logs will be deleted asap and will not get into
2674 /// the archive.
2675 /// 2. If wal_ttl_seconds is 0 and wal_size_limit_mb is not 0,
2676 /// WAL files will be checked every 10 min and if total size is greater
2677 /// then wal_size_limit_mb, they will be deleted starting with the
2678 /// earliest until size_limit is met. All empty files will be deleted.
2679 /// 3. If wal_ttl_seconds is not 0 and wall_size_limit_mb is 0, then
2680 /// WAL files will be checked every wal_ttl_seconds / 2 and those that
2681 /// are older than wal_ttl_seconds will be deleted.
2682 /// 4. If both are not 0, WAL files will be checked every 10 min and both
2683 /// checks will be performed with ttl being first.
2684 ///
2685 /// Default: 0
2686 pub fn set_wal_ttl_seconds(&mut self, secs: u64) {
2687 unsafe {
2688 ffi::rocksdb_options_set_WAL_ttl_seconds(self.inner, secs);
2689 }
2690 }
2691
2692 /// Sets the WAL size limit in MB.
2693 ///
2694 /// If total size of WAL files is greater then wal_size_limit_mb,
2695 /// they will be deleted starting with the earliest until size_limit is met.
2696 ///
2697 /// Default: 0
2698 pub fn set_wal_size_limit_mb(&mut self, size: u64) {
2699 unsafe {
2700 ffi::rocksdb_options_set_WAL_size_limit_MB(self.inner, size);
2701 }
2702 }
2703
2704 /// Sets the number of bytes to preallocate (via fallocate) the manifest files.
2705 ///
2706 /// Default is 4MB, which is reasonable to reduce random IO
2707 /// as well as prevent overallocation for mounts that preallocate
2708 /// large amounts of data (such as xfs's allocsize option).
2709 pub fn set_manifest_preallocation_size(&mut self, size: usize) {
2710 unsafe {
2711 ffi::rocksdb_options_set_manifest_preallocation_size(self.inner, size);
2712 }
2713 }
2714
2715 /// If true, then DB::Open() will not update the statistics used to optimize
2716 /// compaction decision by loading table properties from many files.
2717 /// Turning off this feature will improve DBOpen time especially in disk environment.
2718 ///
2719 /// Default: false
2720 pub fn set_skip_stats_update_on_db_open(&mut self, skip: bool) {
2721 unsafe {
2722 ffi::rocksdb_options_set_skip_stats_update_on_db_open(self.inner, c_uchar::from(skip));
2723 }
2724 }
2725
2726 /// Specify the maximal number of info log files to be kept.
2727 ///
2728 /// Default: 1000
2729 ///
2730 /// # Examples
2731 ///
2732 /// ```
2733 /// use rocksdb::Options;
2734 ///
2735 /// let mut options = Options::default();
2736 /// options.set_keep_log_file_num(100);
2737 /// ```
2738 pub fn set_keep_log_file_num(&mut self, nfiles: usize) {
2739 unsafe {
2740 ffi::rocksdb_options_set_keep_log_file_num(self.inner, nfiles);
2741 }
2742 }
2743
2744 /// Allow the OS to mmap file for writing.
2745 ///
2746 /// Default: false
2747 ///
2748 /// # Examples
2749 ///
2750 /// ```
2751 /// use rocksdb::Options;
2752 ///
2753 /// let mut options = Options::default();
2754 /// options.set_allow_mmap_writes(true);
2755 /// ```
2756 pub fn set_allow_mmap_writes(&mut self, is_enabled: bool) {
2757 unsafe {
2758 ffi::rocksdb_options_set_allow_mmap_writes(self.inner, c_uchar::from(is_enabled));
2759 }
2760 }
2761
2762 /// Allow the OS to mmap file for reading sst tables.
2763 ///
2764 /// Default: false
2765 ///
2766 /// # Examples
2767 ///
2768 /// ```
2769 /// use rocksdb::Options;
2770 ///
2771 /// let mut options = Options::default();
2772 /// options.set_allow_mmap_reads(true);
2773 /// ```
2774 pub fn set_allow_mmap_reads(&mut self, is_enabled: bool) {
2775 unsafe {
2776 ffi::rocksdb_options_set_allow_mmap_reads(self.inner, c_uchar::from(is_enabled));
2777 }
2778 }
2779
2780 /// If enabled, WAL is not flushed automatically after each write. Instead it
2781 /// relies on manual invocation of `DB::flush_wal()` to write the WAL buffer
2782 /// to its file.
2783 ///
2784 /// Default: false
2785 ///
2786 /// # Examples
2787 ///
2788 /// ```
2789 /// use rocksdb::Options;
2790 ///
2791 /// let mut options = Options::default();
2792 /// options.set_manual_wal_flush(true);
2793 /// ```
2794 pub fn set_manual_wal_flush(&mut self, is_enabled: bool) {
2795 unsafe {
2796 ffi::rocksdb_options_set_manual_wal_flush(self.inner, c_uchar::from(is_enabled));
2797 }
2798 }
2799
2800 /// Guarantee that all column families are flushed together atomically.
2801 /// This option applies to both manual flushes (`db.flush()`) and automatic
2802 /// background flushes caused when memtables are filled.
2803 ///
2804 /// Note that this is only useful when the WAL is disabled. When using the
2805 /// WAL, writes are always consistent across column families.
2806 ///
2807 /// Default: false
2808 ///
2809 /// # Examples
2810 ///
2811 /// ```
2812 /// use rocksdb::Options;
2813 ///
2814 /// let mut options = Options::default();
2815 /// options.set_atomic_flush(true);
2816 /// ```
2817 pub fn set_atomic_flush(&mut self, atomic_flush: bool) {
2818 unsafe {
2819 ffi::rocksdb_options_set_atomic_flush(self.inner, c_uchar::from(atomic_flush));
2820 }
2821 }
2822
2823 /// Sets global cache for table-level rows. Cache must outlive DB instance which uses it.
2824 ///
2825 /// Default: null (disabled)
2826 /// Not supported in ROCKSDB_LITE mode!
2827 pub fn set_row_cache(&mut self, cache: &Cache) {
2828 unsafe {
2829 ffi::rocksdb_options_set_row_cache(self.inner, cache.0.inner.as_ptr());
2830 }
2831 self.outlive.row_cache = Some(cache.clone());
2832 }
2833
2834 /// Use to control write rate of flush and compaction. Flush has higher
2835 /// priority than compaction.
2836 /// If rate limiter is enabled, bytes_per_sync is set to 1MB by default.
2837 ///
2838 /// Default: disable
2839 ///
2840 /// # Examples
2841 ///
2842 /// ```
2843 /// use rocksdb::Options;
2844 ///
2845 /// let mut options = Options::default();
2846 /// options.set_ratelimiter(1024 * 1024, 100 * 1000, 10);
2847 /// ```
2848 pub fn set_ratelimiter(
2849 &mut self,
2850 rate_bytes_per_sec: i64,
2851 refill_period_us: i64,
2852 fairness: i32,
2853 ) {
2854 unsafe {
2855 let ratelimiter =
2856 ffi::rocksdb_ratelimiter_create(rate_bytes_per_sec, refill_period_us, fairness);
2857 // Since limiter is wrapped in shared_ptr, we don't need to
2858 // call rocksdb_ratelimiter_destroy explicitly.
2859 ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
2860 }
2861 }
2862
2863 /// Sets the maximal size of the info log file.
2864 ///
2865 /// If the log file is larger than `max_log_file_size`, a new info log file
2866 /// will be created. If `max_log_file_size` is equal to zero, all logs will
2867 /// be written to one log file.
2868 ///
2869 /// Default: 0
2870 ///
2871 /// # Examples
2872 ///
2873 /// ```
2874 /// use rocksdb::Options;
2875 ///
2876 /// let mut options = Options::default();
2877 /// options.set_max_log_file_size(0);
2878 /// ```
2879 pub fn set_max_log_file_size(&mut self, size: usize) {
2880 unsafe {
2881 ffi::rocksdb_options_set_max_log_file_size(self.inner, size);
2882 }
2883 }
2884
2885 /// Sets the time for the info log file to roll (in seconds).
2886 ///
2887 /// If specified with non-zero value, log file will be rolled
2888 /// if it has been active longer than `log_file_time_to_roll`.
2889 /// Default: 0 (disabled)
2890 pub fn set_log_file_time_to_roll(&mut self, secs: usize) {
2891 unsafe {
2892 ffi::rocksdb_options_set_log_file_time_to_roll(self.inner, secs);
2893 }
2894 }
2895
2896 /// Controls the recycling of log files.
2897 ///
2898 /// If non-zero, previously written log files will be reused for new logs,
2899 /// overwriting the old data. The value indicates how many such files we will
2900 /// keep around at any point in time for later use. This is more efficient
2901 /// because the blocks are already allocated and fdatasync does not need to
2902 /// update the inode after each write.
2903 ///
2904 /// Default: 0
2905 ///
2906 /// # Examples
2907 ///
2908 /// ```
2909 /// use rocksdb::Options;
2910 ///
2911 /// let mut options = Options::default();
2912 /// options.set_recycle_log_file_num(5);
2913 /// ```
2914 pub fn set_recycle_log_file_num(&mut self, num: usize) {
2915 unsafe {
2916 ffi::rocksdb_options_set_recycle_log_file_num(self.inner, num);
2917 }
2918 }
2919
2920 /// Sets the threshold at which all writes will be slowed down to at least delayed_write_rate if estimated
2921 /// bytes needed to be compaction exceed this threshold.
2922 ///
2923 /// Default: 64GB
2924 pub fn set_soft_pending_compaction_bytes_limit(&mut self, limit: usize) {
2925 unsafe {
2926 ffi::rocksdb_options_set_soft_pending_compaction_bytes_limit(self.inner, limit);
2927 }
2928 }
2929
2930 /// Sets the bytes threshold at which all writes are stopped if estimated bytes needed to be compaction exceed
2931 /// this threshold.
2932 ///
2933 /// Default: 256GB
2934 pub fn set_hard_pending_compaction_bytes_limit(&mut self, limit: usize) {
2935 unsafe {
2936 ffi::rocksdb_options_set_hard_pending_compaction_bytes_limit(self.inner, limit);
2937 }
2938 }
2939
2940 /// Sets the size of one block in arena memory allocation.
2941 ///
2942 /// If <= 0, a proper value is automatically calculated (usually 1/10 of
2943 /// writer_buffer_size).
2944 ///
2945 /// Default: 0
2946 pub fn set_arena_block_size(&mut self, size: usize) {
2947 unsafe {
2948 ffi::rocksdb_options_set_arena_block_size(self.inner, size);
2949 }
2950 }
2951
2952 /// If true, then print malloc stats together with rocksdb.stats when printing to LOG.
2953 ///
2954 /// Default: false
2955 pub fn set_dump_malloc_stats(&mut self, enabled: bool) {
2956 unsafe {
2957 ffi::rocksdb_options_set_dump_malloc_stats(self.inner, c_uchar::from(enabled));
2958 }
2959 }
2960
2961 /// Enable whole key bloom filter in memtable. Note this will only take effect
2962 /// if memtable_prefix_bloom_size_ratio is not 0. Enabling whole key filtering
2963 /// can potentially reduce CPU usage for point-look-ups.
2964 ///
2965 /// Default: false (disable)
2966 ///
2967 /// Dynamically changeable through SetOptions() API
2968 pub fn set_memtable_whole_key_filtering(&mut self, whole_key_filter: bool) {
2969 unsafe {
2970 ffi::rocksdb_options_set_memtable_whole_key_filtering(
2971 self.inner,
2972 c_uchar::from(whole_key_filter),
2973 );
2974 }
2975 }
2976
2977 /// Enable the use of key-value separation.
2978 ///
2979 /// More details can be found here: [Integrated BlobDB](http://rocksdb.org/blog/2021/05/26/integrated-blob-db.html).
2980 ///
2981 /// Default: false (disable)
2982 ///
2983 /// Dynamically changeable through SetOptions() API
2984 pub fn set_enable_blob_files(&mut self, val: bool) {
2985 unsafe {
2986 ffi::rocksdb_options_set_enable_blob_files(self.inner, u8::from(val));
2987 }
2988 }
2989
2990 /// Sets the minimum threshold value at or above which will be written
2991 /// to blob files during flush or compaction.
2992 ///
2993 /// Dynamically changeable through SetOptions() API
2994 pub fn set_min_blob_size(&mut self, val: u64) {
2995 unsafe {
2996 ffi::rocksdb_options_set_min_blob_size(self.inner, val);
2997 }
2998 }
2999
3000 /// Sets the size limit for blob files.
3001 ///
3002 /// Dynamically changeable through SetOptions() API
3003 pub fn set_blob_file_size(&mut self, val: u64) {
3004 unsafe {
3005 ffi::rocksdb_options_set_blob_file_size(self.inner, val);
3006 }
3007 }
3008
3009 /// Sets the blob compression type. All blob files use the same
3010 /// compression type.
3011 ///
3012 /// Dynamically changeable through SetOptions() API
3013 pub fn set_blob_compression_type(&mut self, val: DBCompressionType) {
3014 unsafe {
3015 ffi::rocksdb_options_set_blob_compression_type(self.inner, val as _);
3016 }
3017 }
3018
3019 /// If this is set to true RocksDB will actively relocate valid blobs from the oldest blob files
3020 /// as they are encountered during compaction.
3021 ///
3022 /// Dynamically changeable through SetOptions() API
3023 pub fn set_enable_blob_gc(&mut self, val: bool) {
3024 unsafe {
3025 ffi::rocksdb_options_set_enable_blob_gc(self.inner, u8::from(val));
3026 }
3027 }
3028
3029 /// Sets the threshold that the GC logic uses to determine which blob files should be considered “old.”
3030 ///
3031 /// For example, the default value of 0.25 signals to RocksDB that blobs residing in the
3032 /// oldest 25% of blob files should be relocated by GC. This parameter can be tuned to adjust
3033 /// the trade-off between write amplification and space amplification.
3034 ///
3035 /// Dynamically changeable through SetOptions() API
3036 pub fn set_blob_gc_age_cutoff(&mut self, val: c_double) {
3037 unsafe {
3038 ffi::rocksdb_options_set_blob_gc_age_cutoff(self.inner, val);
3039 }
3040 }
3041
3042 /// Sets the blob GC force threshold.
3043 ///
3044 /// Dynamically changeable through SetOptions() API
3045 pub fn set_blob_gc_force_threshold(&mut self, val: c_double) {
3046 unsafe {
3047 ffi::rocksdb_options_set_blob_gc_force_threshold(self.inner, val);
3048 }
3049 }
3050
3051 /// Sets the blob compaction read ahead size.
3052 ///
3053 /// Dynamically changeable through SetOptions() API
3054 pub fn set_blob_compaction_readahead_size(&mut self, val: u64) {
3055 unsafe {
3056 ffi::rocksdb_options_set_blob_compaction_readahead_size(self.inner, val);
3057 }
3058 }
3059
3060 /// Set this option to true during creation of database if you want
3061 /// to be able to ingest behind (call IngestExternalFile() skipping keys
3062 /// that already exist, rather than overwriting matching keys).
3063 /// Setting this option to true has the following effects:
3064 /// 1) Disable some internal optimizations around SST file compression.
3065 /// 2) Reserve the last level for ingested files only.
3066 /// 3) Compaction will not include any file from the last level.
3067 /// Note that only Universal Compaction supports allow_ingest_behind.
3068 /// `num_levels` should be >= 3 if this option is turned on.
3069 ///
3070 /// DEFAULT: false
3071 /// Immutable.
3072 pub fn set_allow_ingest_behind(&mut self, val: bool) {
3073 unsafe {
3074 ffi::rocksdb_options_set_allow_ingest_behind(self.inner, c_uchar::from(val));
3075 }
3076 }
3077}
3078
3079impl Default for Options {
3080 fn default() -> Self {
3081 unsafe {
3082 let opts = ffi::rocksdb_options_create();
3083 assert!(!opts.is_null(), "Could not create RocksDB options");
3084
3085 Self {
3086 inner: opts,
3087 outlive: OptionsMustOutliveDB::default(),
3088 }
3089 }
3090 }
3091}
3092
3093impl FlushOptions {
3094 pub fn new() -> FlushOptions {
3095 FlushOptions::default()
3096 }
3097
3098 /// Waits until the flush is done.
3099 ///
3100 /// Default: true
3101 ///
3102 /// # Examples
3103 ///
3104 /// ```
3105 /// use rocksdb::FlushOptions;
3106 ///
3107 /// let mut options = FlushOptions::default();
3108 /// options.set_wait(false);
3109 /// ```
3110 pub fn set_wait(&mut self, wait: bool) {
3111 unsafe {
3112 ffi::rocksdb_flushoptions_set_wait(self.inner, c_uchar::from(wait));
3113 }
3114 }
3115}
3116
3117impl Default for FlushOptions {
3118 fn default() -> Self {
3119 let flush_opts = unsafe { ffi::rocksdb_flushoptions_create() };
3120 assert!(
3121 !flush_opts.is_null(),
3122 "Could not create RocksDB flush options"
3123 );
3124
3125 Self { inner: flush_opts }
3126 }
3127}
3128
3129impl WriteOptions {
3130 pub fn new() -> WriteOptions {
3131 WriteOptions::default()
3132 }
3133
3134 /// Sets the sync mode. If true, the write will be flushed
3135 /// from the operating system buffer cache before the write is considered complete.
3136 /// If this flag is true, writes will be slower.
3137 ///
3138 /// Default: false
3139 pub fn set_sync(&mut self, sync: bool) {
3140 unsafe {
3141 ffi::rocksdb_writeoptions_set_sync(self.inner, c_uchar::from(sync));
3142 }
3143 }
3144
3145 /// Sets whether WAL should be active or not.
3146 /// If true, writes will not first go to the write ahead log,
3147 /// and the write may got lost after a crash.
3148 ///
3149 /// Default: false
3150 pub fn disable_wal(&mut self, disable: bool) {
3151 unsafe {
3152 ffi::rocksdb_writeoptions_disable_WAL(self.inner, c_int::from(disable));
3153 }
3154 }
3155
3156 /// If true and if user is trying to write to column families that don't exist (they were dropped),
3157 /// ignore the write (don't return an error). If there are multiple writes in a WriteBatch,
3158 /// other writes will succeed.
3159 ///
3160 /// Default: false
3161 pub fn set_ignore_missing_column_families(&mut self, ignore: bool) {
3162 unsafe {
3163 ffi::rocksdb_writeoptions_set_ignore_missing_column_families(
3164 self.inner,
3165 c_uchar::from(ignore),
3166 );
3167 }
3168 }
3169
3170 /// If true and we need to wait or sleep for the write request, fails
3171 /// immediately with Status::Incomplete().
3172 ///
3173 /// Default: false
3174 pub fn set_no_slowdown(&mut self, no_slowdown: bool) {
3175 unsafe {
3176 ffi::rocksdb_writeoptions_set_no_slowdown(self.inner, c_uchar::from(no_slowdown));
3177 }
3178 }
3179
3180 /// If true, this write request is of lower priority if compaction is
3181 /// behind. In this case, no_slowdown = true, the request will be cancelled
3182 /// immediately with Status::Incomplete() returned. Otherwise, it will be
3183 /// slowed down. The slowdown value is determined by RocksDB to guarantee
3184 /// it introduces minimum impacts to high priority writes.
3185 ///
3186 /// Default: false
3187 pub fn set_low_pri(&mut self, v: bool) {
3188 unsafe {
3189 ffi::rocksdb_writeoptions_set_low_pri(self.inner, c_uchar::from(v));
3190 }
3191 }
3192
3193 /// If true, writebatch will maintain the last insert positions of each
3194 /// memtable as hints in concurrent write. It can improve write performance
3195 /// in concurrent writes if keys in one writebatch are sequential. In
3196 /// non-concurrent writes (when concurrent_memtable_writes is false) this
3197 /// option will be ignored.
3198 ///
3199 /// Default: false
3200 pub fn set_memtable_insert_hint_per_batch(&mut self, v: bool) {
3201 unsafe {
3202 ffi::rocksdb_writeoptions_set_memtable_insert_hint_per_batch(
3203 self.inner,
3204 c_uchar::from(v),
3205 );
3206 }
3207 }
3208}
3209
3210impl Default for WriteOptions {
3211 fn default() -> Self {
3212 let write_opts = unsafe { ffi::rocksdb_writeoptions_create() };
3213 assert!(
3214 !write_opts.is_null(),
3215 "Could not create RocksDB write options"
3216 );
3217
3218 Self { inner: write_opts }
3219 }
3220}
3221
3222#[derive(Debug, Copy, Clone, PartialEq, Eq)]
3223#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
3224#[repr(i32)]
3225pub enum ReadTier {
3226 /// Reads data in memtable, block cache, OS cache or storage.
3227 All = 0,
3228 /// Reads data in memtable or block cache.
3229 BlockCache,
3230}
3231
3232impl ReadOptions {
3233 // TODO add snapshot setting here
3234 // TODO add snapshot wrapper structs with proper destructors;
3235 // that struct needs an "iterator" impl too.
3236
3237 /// Specify whether the "data block"/"index block"/"filter block"
3238 /// read for this iteration should be cached in memory?
3239 /// Callers may wish to set this field to false for bulk scans.
3240 ///
3241 /// Default: true
3242 pub fn fill_cache(&mut self, v: bool) {
3243 unsafe {
3244 ffi::rocksdb_readoptions_set_fill_cache(self.inner, c_uchar::from(v));
3245 }
3246 }
3247
3248 /// Sets the snapshot which should be used for the read.
3249 /// The snapshot must belong to the DB that is being read and must
3250 /// not have been released.
3251 pub fn set_snapshot<D: DBAccess>(&mut self, snapshot: &SnapshotWithThreadMode<D>) {
3252 unsafe {
3253 ffi::rocksdb_readoptions_set_snapshot(self.inner, snapshot.inner);
3254 }
3255 }
3256
3257 /// Sets the lower bound for an iterator.
3258 pub fn set_iterate_lower_bound<K: Into<Vec<u8>>>(&mut self, key: K) {
3259 self.set_lower_bound_impl(Some(key.into()));
3260 }
3261
3262 /// Sets the upper bound for an iterator.
3263 /// The upper bound itself is not included on the iteration result.
3264 pub fn set_iterate_upper_bound<K: Into<Vec<u8>>>(&mut self, key: K) {
3265 self.set_upper_bound_impl(Some(key.into()));
3266 }
3267
3268 /// Sets lower and upper bounds based on the provided range. This is
3269 /// similar to setting lower and upper bounds separately except that it also
3270 /// allows either bound to be reset.
3271 ///
3272 /// The argument can be a regular Rust range, e.g. `lower..upper`. However,
3273 /// since RocksDB upper bound is always excluded (i.e. range can never be
3274 /// fully closed) inclusive ranges (`lower..=upper` and `..=upper`) are not
3275 /// supported. For example:
3276 ///
3277 /// ```
3278 /// let mut options = rocksdb::ReadOptions::default();
3279 /// options.set_iterate_range("xy".as_bytes().."xz".as_bytes());
3280 /// ```
3281 ///
3282 /// In addition, [`crate::PrefixRange`] can be used to specify a range of
3283 /// keys with a given prefix. In particular, the above example is
3284 /// equivalent to:
3285 ///
3286 /// ```
3287 /// let mut options = rocksdb::ReadOptions::default();
3288 /// options.set_iterate_range(rocksdb::PrefixRange("xy".as_bytes()));
3289 /// ```
3290 ///
3291 /// Note that setting range using this method is separate to using prefix
3292 /// iterators. Prefix iterators use prefix extractor configured for
3293 /// a column family. Setting bounds via [`crate::PrefixRange`] is more akin
3294 /// to using manual prefix.
3295 ///
3296 /// Using this method clears any previously set bounds. In other words, the
3297 /// bounds can be reset by setting the range to `..` as in:
3298 ///
3299 /// ```
3300 /// let mut options = rocksdb::ReadOptions::default();
3301 /// options.set_iterate_range(..);
3302 /// ```
3303 pub fn set_iterate_range(&mut self, range: impl crate::IterateBounds) {
3304 let (lower, upper) = range.into_bounds();
3305 self.set_lower_bound_impl(lower);
3306 self.set_upper_bound_impl(upper);
3307 }
3308
3309 fn set_lower_bound_impl(&mut self, bound: Option<Vec<u8>>) {
3310 let (ptr, len) = if let Some(ref bound) = bound {
3311 (bound.as_ptr() as *const c_char, bound.len())
3312 } else if self.iterate_lower_bound.is_some() {
3313 (std::ptr::null(), 0)
3314 } else {
3315 return;
3316 };
3317 self.iterate_lower_bound = bound;
3318 unsafe {
3319 ffi::rocksdb_readoptions_set_iterate_lower_bound(self.inner, ptr, len);
3320 }
3321 }
3322
3323 fn set_upper_bound_impl(&mut self, bound: Option<Vec<u8>>) {
3324 let (ptr, len) = if let Some(ref bound) = bound {
3325 (bound.as_ptr() as *const c_char, bound.len())
3326 } else if self.iterate_upper_bound.is_some() {
3327 (std::ptr::null(), 0)
3328 } else {
3329 return;
3330 };
3331 self.iterate_upper_bound = bound;
3332 unsafe {
3333 ffi::rocksdb_readoptions_set_iterate_upper_bound(self.inner, ptr, len);
3334 }
3335 }
3336
3337 /// Specify if this read request should process data that ALREADY
3338 /// resides on a particular cache. If the required data is not
3339 /// found at the specified cache, then Status::Incomplete is returned.
3340 ///
3341 /// Default: ::All
3342 pub fn set_read_tier(&mut self, tier: ReadTier) {
3343 unsafe {
3344 ffi::rocksdb_readoptions_set_read_tier(self.inner, tier as c_int);
3345 }
3346 }
3347
3348 /// Enforce that the iterator only iterates over the same
3349 /// prefix as the seek.
3350 /// This option is effective only for prefix seeks, i.e. prefix_extractor is
3351 /// non-null for the column family and total_order_seek is false. Unlike
3352 /// iterate_upper_bound, prefix_same_as_start only works within a prefix
3353 /// but in both directions.
3354 ///
3355 /// Default: false
3356 pub fn set_prefix_same_as_start(&mut self, v: bool) {
3357 unsafe {
3358 ffi::rocksdb_readoptions_set_prefix_same_as_start(self.inner, c_uchar::from(v));
3359 }
3360 }
3361
3362 /// Enable a total order seek regardless of index format (e.g. hash index)
3363 /// used in the table. Some table format (e.g. plain table) may not support
3364 /// this option.
3365 ///
3366 /// If true when calling Get(), we also skip prefix bloom when reading from
3367 /// block based table. It provides a way to read existing data after
3368 /// changing implementation of prefix extractor.
3369 pub fn set_total_order_seek(&mut self, v: bool) {
3370 unsafe {
3371 ffi::rocksdb_readoptions_set_total_order_seek(self.inner, c_uchar::from(v));
3372 }
3373 }
3374
3375 /// Sets a threshold for the number of keys that can be skipped
3376 /// before failing an iterator seek as incomplete. The default value of 0 should be used to
3377 /// never fail a request as incomplete, even on skipping too many keys.
3378 ///
3379 /// Default: 0
3380 pub fn set_max_skippable_internal_keys(&mut self, num: u64) {
3381 unsafe {
3382 ffi::rocksdb_readoptions_set_max_skippable_internal_keys(self.inner, num);
3383 }
3384 }
3385
3386 /// If true, when PurgeObsoleteFile is called in CleanupIteratorState, we schedule a background job
3387 /// in the flush job queue and delete obsolete files in background.
3388 ///
3389 /// Default: false
3390 pub fn set_background_purge_on_iterator_cleanup(&mut self, v: bool) {
3391 unsafe {
3392 ffi::rocksdb_readoptions_set_background_purge_on_iterator_cleanup(
3393 self.inner,
3394 c_uchar::from(v),
3395 );
3396 }
3397 }
3398
3399 /// If true, keys deleted using the DeleteRange() API will be visible to
3400 /// readers until they are naturally deleted during compaction. This improves
3401 /// read performance in DBs with many range deletions.
3402 ///
3403 /// Default: false
3404 pub fn set_ignore_range_deletions(&mut self, v: bool) {
3405 unsafe {
3406 ffi::rocksdb_readoptions_set_ignore_range_deletions(self.inner, c_uchar::from(v));
3407 }
3408 }
3409
3410 /// If true, all data read from underlying storage will be
3411 /// verified against corresponding checksums.
3412 ///
3413 /// Default: true
3414 pub fn set_verify_checksums(&mut self, v: bool) {
3415 unsafe {
3416 ffi::rocksdb_readoptions_set_verify_checksums(self.inner, c_uchar::from(v));
3417 }
3418 }
3419
3420 /// If non-zero, an iterator will create a new table reader which
3421 /// performs reads of the given size. Using a large size (> 2MB) can
3422 /// improve the performance of forward iteration on spinning disks.
3423 /// Default: 0
3424 ///
3425 /// ```
3426 /// use rocksdb::{ReadOptions};
3427 ///
3428 /// let mut opts = ReadOptions::default();
3429 /// opts.set_readahead_size(4_194_304); // 4mb
3430 /// ```
3431 pub fn set_readahead_size(&mut self, v: usize) {
3432 unsafe {
3433 ffi::rocksdb_readoptions_set_readahead_size(self.inner, v as size_t);
3434 }
3435 }
3436
3437 /// If true, create a tailing iterator. Note that tailing iterators
3438 /// only support moving in the forward direction. Iterating in reverse
3439 /// or seek_to_last are not supported.
3440 pub fn set_tailing(&mut self, v: bool) {
3441 unsafe {
3442 ffi::rocksdb_readoptions_set_tailing(self.inner, c_uchar::from(v));
3443 }
3444 }
3445
3446 /// Specifies the value of "pin_data". If true, it keeps the blocks
3447 /// loaded by the iterator pinned in memory as long as the iterator is not deleted,
3448 /// If used when reading from tables created with
3449 /// BlockBasedTableOptions::use_delta_encoding = false,
3450 /// Iterator's property "rocksdb.iterator.is-key-pinned" is guaranteed to
3451 /// return 1.
3452 ///
3453 /// Default: false
3454 pub fn set_pin_data(&mut self, v: bool) {
3455 unsafe {
3456 ffi::rocksdb_readoptions_set_pin_data(self.inner, c_uchar::from(v));
3457 }
3458 }
3459
3460 /// Asynchronously prefetch some data.
3461 ///
3462 /// Used for sequential reads and internal automatic prefetching.
3463 ///
3464 /// Default: `false`
3465 pub fn set_async_io(&mut self, v: bool) {
3466 unsafe {
3467 ffi::rocksdb_readoptions_set_async_io(self.inner, c_uchar::from(v));
3468 }
3469 }
3470}
3471
3472impl Default for ReadOptions {
3473 fn default() -> Self {
3474 unsafe {
3475 Self {
3476 inner: ffi::rocksdb_readoptions_create(),
3477 iterate_upper_bound: None,
3478 iterate_lower_bound: None,
3479 }
3480 }
3481 }
3482}
3483
3484impl IngestExternalFileOptions {
3485 /// Can be set to true to move the files instead of copying them.
3486 pub fn set_move_files(&mut self, v: bool) {
3487 unsafe {
3488 ffi::rocksdb_ingestexternalfileoptions_set_move_files(self.inner, c_uchar::from(v));
3489 }
3490 }
3491
3492 /// If set to false, an ingested file keys could appear in existing snapshots
3493 /// that where created before the file was ingested.
3494 pub fn set_snapshot_consistency(&mut self, v: bool) {
3495 unsafe {
3496 ffi::rocksdb_ingestexternalfileoptions_set_snapshot_consistency(
3497 self.inner,
3498 c_uchar::from(v),
3499 );
3500 }
3501 }
3502
3503 /// If set to false, IngestExternalFile() will fail if the file key range
3504 /// overlaps with existing keys or tombstones in the DB.
3505 pub fn set_allow_global_seqno(&mut self, v: bool) {
3506 unsafe {
3507 ffi::rocksdb_ingestexternalfileoptions_set_allow_global_seqno(
3508 self.inner,
3509 c_uchar::from(v),
3510 );
3511 }
3512 }
3513
3514 /// If set to false and the file key range overlaps with the memtable key range
3515 /// (memtable flush required), IngestExternalFile will fail.
3516 pub fn set_allow_blocking_flush(&mut self, v: bool) {
3517 unsafe {
3518 ffi::rocksdb_ingestexternalfileoptions_set_allow_blocking_flush(
3519 self.inner,
3520 c_uchar::from(v),
3521 );
3522 }
3523 }
3524
3525 /// Set to true if you would like duplicate keys in the file being ingested
3526 /// to be skipped rather than overwriting existing data under that key.
3527 /// Usecase: back-fill of some historical data in the database without
3528 /// over-writing existing newer version of data.
3529 /// This option could only be used if the DB has been running
3530 /// with allow_ingest_behind=true since the dawn of time.
3531 /// All files will be ingested at the bottommost level with seqno=0.
3532 pub fn set_ingest_behind(&mut self, v: bool) {
3533 unsafe {
3534 ffi::rocksdb_ingestexternalfileoptions_set_ingest_behind(self.inner, c_uchar::from(v));
3535 }
3536 }
3537}
3538
3539impl Default for IngestExternalFileOptions {
3540 fn default() -> Self {
3541 unsafe {
3542 Self {
3543 inner: ffi::rocksdb_ingestexternalfileoptions_create(),
3544 }
3545 }
3546 }
3547}
3548
3549/// Used by BlockBasedOptions::set_index_type.
3550pub enum BlockBasedIndexType {
3551 /// A space efficient index block that is optimized for
3552 /// binary-search-based index.
3553 BinarySearch,
3554
3555 /// The hash index, if enabled, will perform a hash lookup if
3556 /// a prefix extractor has been provided through Options::set_prefix_extractor.
3557 HashSearch,
3558
3559 /// A two-level index implementation. Both levels are binary search indexes.
3560 TwoLevelIndexSearch,
3561}
3562
3563/// Used by BlockBasedOptions::set_data_block_index_type.
3564#[repr(C)]
3565pub enum DataBlockIndexType {
3566 /// Use binary search when performing point lookup for keys in data blocks.
3567 /// This is the default.
3568 BinarySearch = 0,
3569
3570 /// Appends a compact hash table to the end of the data block for efficient indexing. Backwards
3571 /// compatible with databases created without this feature. Once turned on, existing data will
3572 /// be gradually converted to the hash index format.
3573 BinaryAndHash = 1,
3574}
3575
3576/// Defines the underlying memtable implementation.
3577/// See official [wiki](https://github.com/facebook/rocksdb/wiki/MemTable) for more information.
3578pub enum MemtableFactory {
3579 Vector,
3580 HashSkipList {
3581 bucket_count: usize,
3582 height: i32,
3583 branching_factor: i32,
3584 },
3585 HashLinkList {
3586 bucket_count: usize,
3587 },
3588}
3589
3590/// Used by BlockBasedOptions::set_checksum_type.
3591pub enum ChecksumType {
3592 NoChecksum = 0,
3593 CRC32c = 1,
3594 XXHash = 2,
3595 XXHash64 = 3,
3596 XXH3 = 4, // Supported since RocksDB 6.27
3597}
3598
3599/// Used with DBOptions::set_plain_table_factory.
3600/// See official [wiki](https://github.com/facebook/rocksdb/wiki/PlainTable-Format) for more
3601/// information.
3602///
3603/// Defaults:
3604/// user_key_length: 0 (variable length)
3605/// bloom_bits_per_key: 10
3606/// hash_table_ratio: 0.75
3607/// index_sparseness: 16
3608pub struct PlainTableFactoryOptions {
3609 pub user_key_length: u32,
3610 pub bloom_bits_per_key: i32,
3611 pub hash_table_ratio: f64,
3612 pub index_sparseness: usize,
3613}
3614
3615#[derive(Debug, Copy, Clone, PartialEq, Eq)]
3616#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
3617pub enum DBCompressionType {
3618 None = ffi::rocksdb_no_compression as isize,
3619 Snappy = ffi::rocksdb_snappy_compression as isize,
3620 Zlib = ffi::rocksdb_zlib_compression as isize,
3621 Bz2 = ffi::rocksdb_bz2_compression as isize,
3622 Lz4 = ffi::rocksdb_lz4_compression as isize,
3623 Lz4hc = ffi::rocksdb_lz4hc_compression as isize,
3624 Zstd = ffi::rocksdb_zstd_compression as isize,
3625}
3626
3627#[derive(Debug, Copy, Clone, PartialEq, Eq)]
3628#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
3629pub enum DBCompactionStyle {
3630 Level = ffi::rocksdb_level_compaction as isize,
3631 Universal = ffi::rocksdb_universal_compaction as isize,
3632 Fifo = ffi::rocksdb_fifo_compaction as isize,
3633}
3634
3635#[derive(Debug, Copy, Clone, PartialEq, Eq)]
3636#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
3637pub enum DBRecoveryMode {
3638 TolerateCorruptedTailRecords = ffi::rocksdb_tolerate_corrupted_tail_records_recovery as isize,
3639 AbsoluteConsistency = ffi::rocksdb_absolute_consistency_recovery as isize,
3640 PointInTime = ffi::rocksdb_point_in_time_recovery as isize,
3641 SkipAnyCorruptedRecord = ffi::rocksdb_skip_any_corrupted_records_recovery as isize,
3642}
3643
3644/// File access pattern once a compaction has started
3645#[derive(Debug, Copy, Clone, PartialEq, Eq)]
3646#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
3647#[repr(i32)]
3648pub enum AccessHint {
3649 None = 0,
3650 Normal,
3651 Sequential,
3652 WillNeed,
3653}
3654
3655pub struct FifoCompactOptions {
3656 pub(crate) inner: *mut ffi::rocksdb_fifo_compaction_options_t,
3657}
3658
3659impl Default for FifoCompactOptions {
3660 fn default() -> Self {
3661 let opts = unsafe { ffi::rocksdb_fifo_compaction_options_create() };
3662 assert!(
3663 !opts.is_null(),
3664 "Could not create RocksDB Fifo Compaction Options"
3665 );
3666
3667 Self { inner: opts }
3668 }
3669}
3670
3671impl Drop for FifoCompactOptions {
3672 fn drop(&mut self) {
3673 unsafe {
3674 ffi::rocksdb_fifo_compaction_options_destroy(self.inner);
3675 }
3676 }
3677}
3678
3679impl FifoCompactOptions {
3680 /// Sets the max table file size.
3681 ///
3682 /// Once the total sum of table files reaches this, we will delete the oldest
3683 /// table file
3684 ///
3685 /// Default: 1GB
3686 pub fn set_max_table_files_size(&mut self, nbytes: u64) {
3687 unsafe {
3688 ffi::rocksdb_fifo_compaction_options_set_max_table_files_size(self.inner, nbytes);
3689 }
3690 }
3691}
3692
3693#[derive(Debug, Copy, Clone, PartialEq, Eq)]
3694#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
3695pub enum UniversalCompactionStopStyle {
3696 Similar = ffi::rocksdb_similar_size_compaction_stop_style as isize,
3697 Total = ffi::rocksdb_total_size_compaction_stop_style as isize,
3698}
3699
3700pub struct UniversalCompactOptions {
3701 pub(crate) inner: *mut ffi::rocksdb_universal_compaction_options_t,
3702}
3703
3704impl Default for UniversalCompactOptions {
3705 fn default() -> Self {
3706 let opts = unsafe { ffi::rocksdb_universal_compaction_options_create() };
3707 assert!(
3708 !opts.is_null(),
3709 "Could not create RocksDB Universal Compaction Options"
3710 );
3711
3712 Self { inner: opts }
3713 }
3714}
3715
3716impl Drop for UniversalCompactOptions {
3717 fn drop(&mut self) {
3718 unsafe {
3719 ffi::rocksdb_universal_compaction_options_destroy(self.inner);
3720 }
3721 }
3722}
3723
3724impl UniversalCompactOptions {
3725 /// Sets the percentage flexibility while comparing file size.
3726 /// If the candidate file(s) size is 1% smaller than the next file's size,
3727 /// then include next file into this candidate set.
3728 ///
3729 /// Default: 1
3730 pub fn set_size_ratio(&mut self, ratio: c_int) {
3731 unsafe {
3732 ffi::rocksdb_universal_compaction_options_set_size_ratio(self.inner, ratio);
3733 }
3734 }
3735
3736 /// Sets the minimum number of files in a single compaction run.
3737 ///
3738 /// Default: 2
3739 pub fn set_min_merge_width(&mut self, num: c_int) {
3740 unsafe {
3741 ffi::rocksdb_universal_compaction_options_set_min_merge_width(self.inner, num);
3742 }
3743 }
3744
3745 /// Sets the maximum number of files in a single compaction run.
3746 ///
3747 /// Default: UINT_MAX
3748 pub fn set_max_merge_width(&mut self, num: c_int) {
3749 unsafe {
3750 ffi::rocksdb_universal_compaction_options_set_max_merge_width(self.inner, num);
3751 }
3752 }
3753
3754 /// sets the size amplification.
3755 ///
3756 /// It is defined as the amount (in percentage) of
3757 /// additional storage needed to store a single byte of data in the database.
3758 /// For example, a size amplification of 2% means that a database that
3759 /// contains 100 bytes of user-data may occupy upto 102 bytes of
3760 /// physical storage. By this definition, a fully compacted database has
3761 /// a size amplification of 0%. Rocksdb uses the following heuristic
3762 /// to calculate size amplification: it assumes that all files excluding
3763 /// the earliest file contribute to the size amplification.
3764 ///
3765 /// Default: 200, which means that a 100 byte database could require upto 300 bytes of storage.
3766 pub fn set_max_size_amplification_percent(&mut self, v: c_int) {
3767 unsafe {
3768 ffi::rocksdb_universal_compaction_options_set_max_size_amplification_percent(
3769 self.inner, v,
3770 );
3771 }
3772 }
3773
3774 /// Sets the percentage of compression size.
3775 ///
3776 /// If this option is set to be -1, all the output files
3777 /// will follow compression type specified.
3778 ///
3779 /// If this option is not negative, we will try to make sure compressed
3780 /// size is just above this value. In normal cases, at least this percentage
3781 /// of data will be compressed.
3782 /// When we are compacting to a new file, here is the criteria whether
3783 /// it needs to be compressed: assuming here are the list of files sorted
3784 /// by generation time:
3785 /// A1...An B1...Bm C1...Ct
3786 /// where A1 is the newest and Ct is the oldest, and we are going to compact
3787 /// B1...Bm, we calculate the total size of all the files as total_size, as
3788 /// well as the total size of C1...Ct as total_C, the compaction output file
3789 /// will be compressed iff
3790 /// total_C / total_size < this percentage
3791 ///
3792 /// Default: -1
3793 pub fn set_compression_size_percent(&mut self, v: c_int) {
3794 unsafe {
3795 ffi::rocksdb_universal_compaction_options_set_compression_size_percent(self.inner, v);
3796 }
3797 }
3798
3799 /// Sets the algorithm used to stop picking files into a single compaction run.
3800 ///
3801 /// Default: ::Total
3802 pub fn set_stop_style(&mut self, style: UniversalCompactionStopStyle) {
3803 unsafe {
3804 ffi::rocksdb_universal_compaction_options_set_stop_style(self.inner, style as c_int);
3805 }
3806 }
3807}
3808
3809#[derive(Debug, Copy, Clone, PartialEq, Eq)]
3810#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
3811#[repr(u8)]
3812pub enum BottommostLevelCompaction {
3813 /// Skip bottommost level compaction
3814 Skip = 0,
3815 /// Only compact bottommost level if there is a compaction filter
3816 /// This is the default option
3817 IfHaveCompactionFilter,
3818 /// Always compact bottommost level
3819 Force,
3820 /// Always compact bottommost level but in bottommost level avoid
3821 /// double-compacting files created in the same compaction
3822 ForceOptimized,
3823}
3824
3825pub struct CompactOptions {
3826 pub(crate) inner: *mut ffi::rocksdb_compactoptions_t,
3827}
3828
3829impl Default for CompactOptions {
3830 fn default() -> Self {
3831 let opts = unsafe { ffi::rocksdb_compactoptions_create() };
3832 assert!(!opts.is_null(), "Could not create RocksDB Compact Options");
3833
3834 Self { inner: opts }
3835 }
3836}
3837
3838impl Drop for CompactOptions {
3839 fn drop(&mut self) {
3840 unsafe {
3841 ffi::rocksdb_compactoptions_destroy(self.inner);
3842 }
3843 }
3844}
3845
3846impl CompactOptions {
3847 /// If more than one thread calls manual compaction,
3848 /// only one will actually schedule it while the other threads will simply wait
3849 /// for the scheduled manual compaction to complete. If exclusive_manual_compaction
3850 /// is set to true, the call will disable scheduling of automatic compaction jobs
3851 /// and wait for existing automatic compaction jobs to finish.
3852 pub fn set_exclusive_manual_compaction(&mut self, v: bool) {
3853 unsafe {
3854 ffi::rocksdb_compactoptions_set_exclusive_manual_compaction(
3855 self.inner,
3856 c_uchar::from(v),
3857 );
3858 }
3859 }
3860
3861 /// Sets bottommost level compaction.
3862 pub fn set_bottommost_level_compaction(&mut self, lvl: BottommostLevelCompaction) {
3863 unsafe {
3864 ffi::rocksdb_compactoptions_set_bottommost_level_compaction(self.inner, lvl as c_uchar);
3865 }
3866 }
3867
3868 /// If true, compacted files will be moved to the minimum level capable
3869 /// of holding the data or given level (specified non-negative target_level).
3870 pub fn set_change_level(&mut self, v: bool) {
3871 unsafe {
3872 ffi::rocksdb_compactoptions_set_change_level(self.inner, c_uchar::from(v));
3873 }
3874 }
3875
3876 /// If change_level is true and target_level have non-negative value, compacted
3877 /// files will be moved to target_level.
3878 pub fn set_target_level(&mut self, lvl: c_int) {
3879 unsafe {
3880 ffi::rocksdb_compactoptions_set_target_level(self.inner, lvl);
3881 }
3882 }
3883}
3884
3885/// Represents a path where sst files can be put into
3886pub struct DBPath {
3887 pub(crate) inner: *mut ffi::rocksdb_dbpath_t,
3888}
3889
3890impl DBPath {
3891 /// Create a new path
3892 pub fn new<P: AsRef<Path>>(path: P, target_size: u64) -> Result<Self, Error> {
3893 let p = to_cpath(path.as_ref()).unwrap();
3894 let dbpath = unsafe { ffi::rocksdb_dbpath_create(p.as_ptr(), target_size) };
3895 if dbpath.is_null() {
3896 Err(Error::new(format!(
3897 "Could not create path for storing sst files at location: {}",
3898 path.as_ref().display()
3899 )))
3900 } else {
3901 Ok(DBPath { inner: dbpath })
3902 }
3903 }
3904}
3905
3906impl Drop for DBPath {
3907 fn drop(&mut self) {
3908 unsafe {
3909 ffi::rocksdb_dbpath_destroy(self.inner);
3910 }
3911 }
3912}
3913
3914#[cfg(test)]
3915mod tests {
3916 use crate::{MemtableFactory, Options};
3917
3918 #[test]
3919 fn test_enable_statistics() {
3920 let mut opts = Options::default();
3921 opts.enable_statistics();
3922 opts.set_stats_dump_period_sec(60);
3923 assert!(opts.get_statistics().is_some());
3924
3925 let opts = Options::default();
3926 assert!(opts.get_statistics().is_none());
3927 }
3928
3929 #[test]
3930 fn test_set_memtable_factory() {
3931 let mut opts = Options::default();
3932 opts.set_memtable_factory(MemtableFactory::Vector);
3933 opts.set_memtable_factory(MemtableFactory::HashLinkList { bucket_count: 100 });
3934 opts.set_memtable_factory(MemtableFactory::HashSkipList {
3935 bucket_count: 100,
3936 height: 4,
3937 branching_factor: 4,
3938 });
3939 }
3940
3941 #[test]
3942 fn test_set_stats_persist_period_sec() {
3943 let mut opts = Options::default();
3944 opts.enable_statistics();
3945 opts.set_stats_persist_period_sec(5);
3946 assert!(opts.get_statistics().is_some());
3947
3948 let opts = Options::default();
3949 assert!(opts.get_statistics().is_none());
3950 }
3951}