haizhi_rocksdb/db_options.rs
1// Copyright 2020 Tyler Neely
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use std::ffi::CStr;
16use std::path::Path;
17use std::ptr::{null_mut, NonNull};
18use std::slice;
19use std::sync::Arc;
20
21use libc::{self, c_char, c_double, c_int, c_uchar, c_uint, c_void, size_t};
22
23use crate::{
24 compaction_filter::{self, CompactionFilterCallback, CompactionFilterFn},
25 compaction_filter_factory::{self, CompactionFilterFactory},
26 comparator::{self, ComparatorCallback, CompareFn},
27 db::DBAccess,
28 env::Env,
29 event_listener::{self, EventListener},
30 ffi,
31 ffi_util::{from_cstr, to_cpath, CStrLike},
32 merge_operator::{
33 self, full_merge_callback, partial_merge_callback, MergeFn, MergeOperatorCallback,
34 },
35 slice_transform::SliceTransform,
36 ColumnFamilyDescriptor, Error, SnapshotWithThreadMode,
37};
38
39pub(crate) struct CacheWrapper {
40 pub(crate) inner: NonNull<ffi::rocksdb_cache_t>,
41}
42
43impl Drop for CacheWrapper {
44 fn drop(&mut self) {
45 unsafe {
46 ffi::rocksdb_cache_destroy(self.inner.as_ptr());
47 }
48 }
49}
50
51#[derive(Clone)]
52pub struct Cache(pub(crate) Arc<CacheWrapper>);
53
54impl Cache {
55 /// Creates an LRU cache with capacity in bytes.
56 pub fn new_lru_cache(capacity: size_t) -> Result<Cache, Error> {
57 let inner = NonNull::new(unsafe { ffi::rocksdb_cache_create_lru(capacity) })
58 .ok_or(Error::new("Could not create Cache".to_owned()))?;
59 Ok(Cache(Arc::new(CacheWrapper { inner })))
60 }
61
62 /// Creates a HyperClockCache with capacity in bytes.
63 ///
64 /// `estimated_entry_charge` is an important tuning parameter. The optimal
65 /// choice at any given time is
66 /// `(cache.get_usage() - 64 * cache.get_table_address_count()) /
67 /// cache.get_occupancy_count()`, or approximately `cache.get_usage() /
68 /// cache.get_occupancy_count()`.
69 ///
70 /// However, the value cannot be changed dynamically, so as the cache
71 /// composition changes at runtime, the following tradeoffs apply:
72 ///
73 /// * If the estimate is substantially too high (e.g., 25% higher),
74 /// the cache may have to evict entries to prevent load factors that
75 /// would dramatically affect lookup times.
76 /// * If the estimate is substantially too low (e.g., less than half),
77 /// then meta data space overhead is substantially higher.
78 ///
79 /// The latter is generally preferable, and picking the larger of
80 /// block size and meta data block size is a reasonable choice that
81 /// errs towards this side.
82 pub fn new_hyper_clock_cache(capacity: size_t, estimated_entry_charge: size_t) -> Cache {
83 Cache(Arc::new(CacheWrapper {
84 inner: NonNull::new(unsafe {
85 ffi::rocksdb_cache_create_hyper_clock(capacity, estimated_entry_charge)
86 })
87 .unwrap(),
88 }))
89 }
90
91 /// Returns the cache memory usage in bytes.
92 pub fn get_usage(&self) -> usize {
93 unsafe { ffi::rocksdb_cache_get_usage(self.0.inner.as_ptr()) }
94 }
95
96 /// Returns the pinned memory usage in bytes.
97 pub fn get_pinned_usage(&self) -> usize {
98 unsafe { ffi::rocksdb_cache_get_pinned_usage(self.0.inner.as_ptr()) }
99 }
100
101 /// Sets cache capacity in bytes.
102 pub fn set_capacity(&mut self, capacity: size_t) {
103 unsafe {
104 ffi::rocksdb_cache_set_capacity(self.0.inner.as_ptr(), capacity);
105 }
106 }
107}
108
109#[derive(Default)]
110pub(crate) struct OptionsMustOutliveDB {
111 env: Option<Env>,
112 row_cache: Option<Cache>,
113 block_based: Option<BlockBasedOptionsMustOutliveDB>,
114}
115
116impl OptionsMustOutliveDB {
117 pub(crate) fn clone(&self) -> Self {
118 Self {
119 env: self.env.as_ref().map(Env::clone),
120 row_cache: self.row_cache.as_ref().map(Cache::clone),
121 block_based: self
122 .block_based
123 .as_ref()
124 .map(BlockBasedOptionsMustOutliveDB::clone),
125 }
126 }
127}
128
129#[derive(Default)]
130struct BlockBasedOptionsMustOutliveDB {
131 block_cache: Option<Cache>,
132}
133
134impl BlockBasedOptionsMustOutliveDB {
135 fn clone(&self) -> Self {
136 Self {
137 block_cache: self.block_cache.as_ref().map(Cache::clone),
138 }
139 }
140}
141
142/// Database-wide options around performance and behavior.
143///
144/// Please read the official tuning [guide](https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide)
145/// and most importantly, measure performance under realistic workloads with realistic hardware.
146///
147/// # Examples
148///
149/// ```
150/// use rocksdb::{Options, DB};
151/// use rocksdb::DBCompactionStyle;
152///
153/// fn badly_tuned_for_somebody_elses_disk() -> DB {
154/// let path = "path/for/rocksdb/storageX";
155/// let mut opts = Options::default();
156/// opts.create_if_missing(true);
157/// opts.set_max_open_files(10000);
158/// opts.set_use_fsync(false);
159/// opts.set_bytes_per_sync(8388608);
160/// opts.optimize_for_point_lookup(1024);
161/// opts.set_table_cache_num_shard_bits(6);
162/// opts.set_max_write_buffer_number(32);
163/// opts.set_write_buffer_size(536870912);
164/// opts.set_target_file_size_base(1073741824);
165/// opts.set_min_write_buffer_number_to_merge(4);
166/// opts.set_level_zero_stop_writes_trigger(2000);
167/// opts.set_level_zero_slowdown_writes_trigger(0);
168/// opts.set_compaction_style(DBCompactionStyle::Universal);
169/// opts.set_disable_auto_compactions(true);
170///
171/// DB::open(&opts, path).unwrap()
172/// }
173/// ```
174pub struct Options {
175 pub(crate) inner: *mut ffi::rocksdb_options_t,
176 pub(crate) outlive: OptionsMustOutliveDB,
177}
178
179/// Optionally disable WAL or sync for this write.
180///
181/// # Examples
182///
183/// Making an unsafe write of a batch:
184///
185/// ```
186/// use rocksdb::{DB, Options, WriteBatch, WriteOptions};
187///
188/// let path = "_path_for_rocksdb_storageY1";
189/// {
190/// let db = DB::open_default(path).unwrap();
191/// let mut batch = WriteBatch::default();
192/// batch.put(b"my key", b"my value");
193/// batch.put(b"key2", b"value2");
194/// batch.put(b"key3", b"value3");
195///
196/// let mut write_options = WriteOptions::default();
197/// write_options.set_sync(false);
198/// write_options.disable_wal(true);
199///
200/// db.write_opt(batch, &write_options);
201/// }
202/// let _ = DB::destroy(&Options::default(), path);
203/// ```
204pub struct WriteOptions {
205 pub(crate) inner: *mut ffi::rocksdb_writeoptions_t,
206}
207
208/// Optionally wait for the memtable flush to be performed.
209///
210/// # Examples
211///
212/// Manually flushing the memtable:
213///
214/// ```
215/// use rocksdb::{DB, Options, FlushOptions};
216///
217/// let path = "_path_for_rocksdb_storageY2";
218/// {
219/// let db = DB::open_default(path).unwrap();
220///
221/// let mut flush_options = FlushOptions::default();
222/// flush_options.set_wait(true);
223///
224/// db.flush_opt(&flush_options);
225/// }
226/// let _ = DB::destroy(&Options::default(), path);
227/// ```
228pub struct FlushOptions {
229 pub(crate) inner: *mut ffi::rocksdb_flushoptions_t,
230}
231
232/// For configuring block-based file storage.
233pub struct BlockBasedOptions {
234 pub(crate) inner: *mut ffi::rocksdb_block_based_table_options_t,
235 outlive: BlockBasedOptionsMustOutliveDB,
236}
237
238pub struct ReadOptions {
239 pub(crate) inner: *mut ffi::rocksdb_readoptions_t,
240 iterate_upper_bound: Option<Vec<u8>>,
241 iterate_lower_bound: Option<Vec<u8>>,
242}
243
244/// Configuration of cuckoo-based storage.
245pub struct CuckooTableOptions {
246 pub(crate) inner: *mut ffi::rocksdb_cuckoo_table_options_t,
247}
248
249/// For configuring external files ingestion.
250///
251/// # Examples
252///
253/// Move files instead of copying them:
254///
255/// ```
256/// use rocksdb::{DB, IngestExternalFileOptions, SstFileWriter, Options};
257///
258/// let writer_opts = Options::default();
259/// let mut writer = SstFileWriter::create(&writer_opts);
260/// writer.open("_path_for_sst_file").unwrap();
261/// writer.put(b"k1", b"v1").unwrap();
262/// writer.finish().unwrap();
263///
264/// let path = "_path_for_rocksdb_storageY3";
265/// {
266/// let db = DB::open_default(&path).unwrap();
267/// let mut ingest_opts = IngestExternalFileOptions::default();
268/// ingest_opts.set_move_files(true);
269/// db.ingest_external_file_opts(&ingest_opts, vec!["_path_for_sst_file"]).unwrap();
270/// }
271/// let _ = DB::destroy(&Options::default(), path);
272/// ```
273pub struct IngestExternalFileOptions {
274 pub(crate) inner: *mut ffi::rocksdb_ingestexternalfileoptions_t,
275}
276
277// Safety note: auto-implementing Send on most db-related types is prevented by the inner FFI
278// pointer. In most cases, however, this pointer is Send-safe because it is never aliased and
279// rocksdb internally does not rely on thread-local information for its user-exposed types.
280unsafe impl Send for Options {}
281unsafe impl Send for WriteOptions {}
282unsafe impl Send for BlockBasedOptions {}
283unsafe impl Send for CuckooTableOptions {}
284unsafe impl Send for ReadOptions {}
285unsafe impl Send for IngestExternalFileOptions {}
286unsafe impl Send for CacheWrapper {}
287
288// Sync is similarly safe for many types because they do not expose interior mutability, and their
289// use within the rocksdb library is generally behind a const reference
290unsafe impl Sync for Options {}
291unsafe impl Sync for WriteOptions {}
292unsafe impl Sync for BlockBasedOptions {}
293unsafe impl Sync for CuckooTableOptions {}
294unsafe impl Sync for ReadOptions {}
295unsafe impl Sync for IngestExternalFileOptions {}
296unsafe impl Sync for CacheWrapper {}
297
298impl Drop for Options {
299 fn drop(&mut self) {
300 unsafe {
301 ffi::rocksdb_options_destroy(self.inner);
302 }
303 }
304}
305
306impl Clone for Options {
307 fn clone(&self) -> Self {
308 let inner = unsafe { ffi::rocksdb_options_create_copy(self.inner) };
309 assert!(!inner.is_null(), "Could not copy RocksDB options");
310
311 Self {
312 inner,
313 outlive: self.outlive.clone(),
314 }
315 }
316}
317
318impl Drop for BlockBasedOptions {
319 fn drop(&mut self) {
320 unsafe {
321 ffi::rocksdb_block_based_options_destroy(self.inner);
322 }
323 }
324}
325
326impl Drop for CuckooTableOptions {
327 fn drop(&mut self) {
328 unsafe {
329 ffi::rocksdb_cuckoo_options_destroy(self.inner);
330 }
331 }
332}
333
334impl Drop for FlushOptions {
335 fn drop(&mut self) {
336 unsafe {
337 ffi::rocksdb_flushoptions_destroy(self.inner);
338 }
339 }
340}
341
342impl Drop for WriteOptions {
343 fn drop(&mut self) {
344 unsafe {
345 ffi::rocksdb_writeoptions_destroy(self.inner);
346 }
347 }
348}
349
350impl Drop for ReadOptions {
351 fn drop(&mut self) {
352 unsafe {
353 ffi::rocksdb_readoptions_destroy(self.inner);
354 }
355 }
356}
357
358impl Drop for IngestExternalFileOptions {
359 fn drop(&mut self) {
360 unsafe {
361 ffi::rocksdb_ingestexternalfileoptions_destroy(self.inner);
362 }
363 }
364}
365
366impl BlockBasedOptions {
367 /// Approximate size of user data packed per block. Note that the
368 /// block size specified here corresponds to uncompressed data. The
369 /// actual size of the unit read from disk may be smaller if
370 /// compression is enabled. This parameter can be changed dynamically.
371 pub fn set_block_size(&mut self, size: usize) {
372 unsafe {
373 ffi::rocksdb_block_based_options_set_block_size(self.inner, size);
374 }
375 }
376
377 /// Block size for partitioned metadata. Currently applied to indexes when
378 /// kTwoLevelIndexSearch is used and to filters when partition_filters is used.
379 /// Note: Since in the current implementation the filters and index partitions
380 /// are aligned, an index/filter block is created when either index or filter
381 /// block size reaches the specified limit.
382 ///
383 /// Note: this limit is currently applied to only index blocks; a filter
384 /// partition is cut right after an index block is cut.
385 pub fn set_metadata_block_size(&mut self, size: usize) {
386 unsafe {
387 ffi::rocksdb_block_based_options_set_metadata_block_size(self.inner, size as u64);
388 }
389 }
390
391 /// Note: currently this option requires kTwoLevelIndexSearch to be set as
392 /// well.
393 ///
394 /// Use partitioned full filters for each SST file. This option is
395 /// incompatible with block-based filters.
396 pub fn set_partition_filters(&mut self, size: bool) {
397 unsafe {
398 ffi::rocksdb_block_based_options_set_partition_filters(self.inner, c_uchar::from(size));
399 }
400 }
401
402 /// Sets global cache for blocks (user data is stored in a set of blocks, and
403 /// a block is the unit of reading from disk). Cache must outlive DB instance which uses it.
404 ///
405 /// If set, use the specified cache for blocks.
406 /// By default, rocksdb will automatically create and use an 8MB internal cache.
407 pub fn set_block_cache(&mut self, cache: &Cache) {
408 unsafe {
409 ffi::rocksdb_block_based_options_set_block_cache(self.inner, cache.0.inner.as_ptr());
410 }
411 self.outlive.block_cache = Some(cache.clone());
412 }
413
414 /// Disable block cache
415 pub fn disable_cache(&mut self) {
416 unsafe {
417 ffi::rocksdb_block_based_options_set_no_block_cache(self.inner, c_uchar::from(true));
418 }
419 }
420
421 /// Sets a [Bloom filter](https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter)
422 /// policy to reduce disk reads.
423 ///
424 /// # Examples
425 ///
426 /// ```
427 /// use rocksdb::BlockBasedOptions;
428 ///
429 /// let mut opts = BlockBasedOptions::default();
430 /// opts.set_bloom_filter(10.0, true);
431 /// ```
432 pub fn set_bloom_filter(&mut self, bits_per_key: c_double, block_based: bool) {
433 unsafe {
434 let bloom = if block_based {
435 ffi::rocksdb_filterpolicy_create_bloom(bits_per_key as _)
436 } else {
437 ffi::rocksdb_filterpolicy_create_bloom_full(bits_per_key as _)
438 };
439
440 ffi::rocksdb_block_based_options_set_filter_policy(self.inner, bloom);
441 }
442 }
443
444 /// Sets a [Ribbon filter](http://rocksdb.org/blog/2021/12/29/ribbon-filter.html)
445 /// policy to reduce disk reads.
446 ///
447 /// Ribbon filters use less memory in exchange for slightly more CPU usage
448 /// compared to an equivalent bloom filter.
449 ///
450 /// # Examples
451 ///
452 /// ```
453 /// use rocksdb::BlockBasedOptions;
454 ///
455 /// let mut opts = BlockBasedOptions::default();
456 /// opts.set_ribbon_filter(10.0);
457 /// ```
458 pub fn set_ribbon_filter(&mut self, bloom_equivalent_bits_per_key: c_double) {
459 unsafe {
460 let ribbon = ffi::rocksdb_filterpolicy_create_ribbon(bloom_equivalent_bits_per_key);
461 ffi::rocksdb_block_based_options_set_filter_policy(self.inner, ribbon);
462 }
463 }
464
465 /// Sets a hybrid [Ribbon filter](http://rocksdb.org/blog/2021/12/29/ribbon-filter.html)
466 /// policy to reduce disk reads.
467 ///
468 /// Uses Bloom filters before the given level, and Ribbon filters for all
469 /// other levels. This combines the memory savings from Ribbon filters
470 /// with the lower CPU usage of Bloom filters.
471 ///
472 /// # Examples
473 ///
474 /// ```
475 /// use rocksdb::BlockBasedOptions;
476 ///
477 /// let mut opts = BlockBasedOptions::default();
478 /// opts.set_hybrid_ribbon_filter(10.0, 2);
479 /// ```
480 pub fn set_hybrid_ribbon_filter(
481 &mut self,
482 bloom_equivalent_bits_per_key: c_double,
483 bloom_before_level: c_int,
484 ) {
485 unsafe {
486 let ribbon = ffi::rocksdb_filterpolicy_create_ribbon_hybrid(
487 bloom_equivalent_bits_per_key,
488 bloom_before_level,
489 );
490 ffi::rocksdb_block_based_options_set_filter_policy(self.inner, ribbon);
491 }
492 }
493
494 /// If cache_index_and_filter_blocks is enabled, cache index and filter blocks with high priority.
495 /// If set to true, depending on implementation of block cache,
496 /// index and filter blocks may be less likely to be evicted than data blocks.
497 pub fn set_cache_index_and_filter_blocks(&mut self, v: bool) {
498 unsafe {
499 ffi::rocksdb_block_based_options_set_cache_index_and_filter_blocks(
500 self.inner,
501 c_uchar::from(v),
502 );
503 }
504 }
505
506 /// Defines the index type to be used for SS-table lookups.
507 ///
508 /// # Examples
509 ///
510 /// ```
511 /// use rocksdb::{BlockBasedOptions, BlockBasedIndexType, Options};
512 ///
513 /// let mut opts = Options::default();
514 /// let mut block_opts = BlockBasedOptions::default();
515 /// block_opts.set_index_type(BlockBasedIndexType::HashSearch);
516 /// ```
517 pub fn set_index_type(&mut self, index_type: BlockBasedIndexType) {
518 let index = index_type as i32;
519 unsafe {
520 ffi::rocksdb_block_based_options_set_index_type(self.inner, index);
521 }
522 }
523
524 /// If cache_index_and_filter_blocks is true and the below is true, then
525 /// filter and index blocks are stored in the cache, but a reference is
526 /// held in the "table reader" object so the blocks are pinned and only
527 /// evicted from cache when the table reader is freed.
528 ///
529 /// Default: false.
530 pub fn set_pin_l0_filter_and_index_blocks_in_cache(&mut self, v: bool) {
531 unsafe {
532 ffi::rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache(
533 self.inner,
534 c_uchar::from(v),
535 );
536 }
537 }
538
539 /// If cache_index_and_filter_blocks is true and the below is true, then
540 /// the top-level index of partitioned filter and index blocks are stored in
541 /// the cache, but a reference is held in the "table reader" object so the
542 /// blocks are pinned and only evicted from cache when the table reader is
543 /// freed. This is not limited to l0 in LSM tree.
544 ///
545 /// Default: false.
546 pub fn set_pin_top_level_index_and_filter(&mut self, v: bool) {
547 unsafe {
548 ffi::rocksdb_block_based_options_set_pin_top_level_index_and_filter(
549 self.inner,
550 c_uchar::from(v),
551 );
552 }
553 }
554
555 /// Format version, reserved for backward compatibility.
556 ///
557 /// See full [list](https://github.com/facebook/rocksdb/blob/f059c7d9b96300091e07429a60f4ad55dac84859/include/rocksdb/table.h#L249-L274)
558 /// of the supported versions.
559 ///
560 /// Default: 2.
561 pub fn set_format_version(&mut self, version: i32) {
562 unsafe {
563 ffi::rocksdb_block_based_options_set_format_version(self.inner, version);
564 }
565 }
566
567 /// Number of keys between restart points for delta encoding of keys.
568 /// This parameter can be changed dynamically. Most clients should
569 /// leave this parameter alone. The minimum value allowed is 1. Any smaller
570 /// value will be silently overwritten with 1.
571 ///
572 /// Default: 16.
573 pub fn set_block_restart_interval(&mut self, interval: i32) {
574 unsafe {
575 ffi::rocksdb_block_based_options_set_block_restart_interval(self.inner, interval);
576 }
577 }
578
579 /// Same as block_restart_interval but used for the index block.
580 /// If you don't plan to run RocksDB before version 5.16 and you are
581 /// using `index_block_restart_interval` > 1, you should
582 /// probably set the `format_version` to >= 4 as it would reduce the index size.
583 ///
584 /// Default: 1.
585 pub fn set_index_block_restart_interval(&mut self, interval: i32) {
586 unsafe {
587 ffi::rocksdb_block_based_options_set_index_block_restart_interval(self.inner, interval);
588 }
589 }
590
591 /// Set the data block index type for point lookups:
592 /// `DataBlockIndexType::BinarySearch` to use binary search within the data block.
593 /// `DataBlockIndexType::BinaryAndHash` to use the data block hash index in combination with
594 /// the normal binary search.
595 ///
596 /// The hash table utilization ratio is adjustable using [`set_data_block_hash_ratio`](#method.set_data_block_hash_ratio), which is
597 /// valid only when using `DataBlockIndexType::BinaryAndHash`.
598 ///
599 /// Default: `BinarySearch`
600 /// # Examples
601 ///
602 /// ```
603 /// use rocksdb::{BlockBasedOptions, DataBlockIndexType, Options};
604 ///
605 /// let mut opts = Options::default();
606 /// let mut block_opts = BlockBasedOptions::default();
607 /// block_opts.set_data_block_index_type(DataBlockIndexType::BinaryAndHash);
608 /// block_opts.set_data_block_hash_ratio(0.85);
609 /// ```
610 pub fn set_data_block_index_type(&mut self, index_type: DataBlockIndexType) {
611 let index_t = index_type as i32;
612 unsafe {
613 ffi::rocksdb_block_based_options_set_data_block_index_type(self.inner, index_t);
614 }
615 }
616
617 /// Set the data block hash index utilization ratio.
618 ///
619 /// The smaller the utilization ratio, the less hash collisions happen, and so reduce the risk for a
620 /// point lookup to fall back to binary search due to the collisions. A small ratio means faster
621 /// lookup at the price of more space overhead.
622 ///
623 /// Default: 0.75
624 pub fn set_data_block_hash_ratio(&mut self, ratio: f64) {
625 unsafe {
626 ffi::rocksdb_block_based_options_set_data_block_hash_ratio(self.inner, ratio);
627 }
628 }
629
630 /// If false, place only prefixes in the filter, not whole keys.
631 ///
632 /// Defaults to true.
633 pub fn set_whole_key_filtering(&mut self, v: bool) {
634 unsafe {
635 ffi::rocksdb_block_based_options_set_whole_key_filtering(self.inner, c_uchar::from(v));
636 }
637 }
638
639 /// Use the specified checksum type.
640 /// Newly created table files will be protected with this checksum type.
641 /// Old table files will still be readable, even though they have different checksum type.
642 pub fn set_checksum_type(&mut self, checksum_type: ChecksumType) {
643 unsafe {
644 ffi::rocksdb_block_based_options_set_checksum(self.inner, checksum_type as c_char);
645 }
646 }
647}
648
649impl Default for BlockBasedOptions {
650 fn default() -> Self {
651 let block_opts = unsafe { ffi::rocksdb_block_based_options_create() };
652 assert!(
653 !block_opts.is_null(),
654 "Could not create RocksDB block based options"
655 );
656
657 Self {
658 inner: block_opts,
659 outlive: BlockBasedOptionsMustOutliveDB::default(),
660 }
661 }
662}
663
664impl CuckooTableOptions {
665 /// Determines the utilization of hash tables. Smaller values
666 /// result in larger hash tables with fewer collisions.
667 /// Default: 0.9
668 pub fn set_hash_ratio(&mut self, ratio: f64) {
669 unsafe {
670 ffi::rocksdb_cuckoo_options_set_hash_ratio(self.inner, ratio);
671 }
672 }
673
674 /// A property used by builder to determine the depth to go to
675 /// to search for a path to displace elements in case of
676 /// collision. See Builder.MakeSpaceForKey method. Higher
677 /// values result in more efficient hash tables with fewer
678 /// lookups but take more time to build.
679 /// Default: 100
680 pub fn set_max_search_depth(&mut self, depth: u32) {
681 unsafe {
682 ffi::rocksdb_cuckoo_options_set_max_search_depth(self.inner, depth);
683 }
684 }
685
686 /// In case of collision while inserting, the builder
687 /// attempts to insert in the next cuckoo_block_size
688 /// locations before skipping over to the next Cuckoo hash
689 /// function. This makes lookups more cache friendly in case
690 /// of collisions.
691 /// Default: 5
692 pub fn set_cuckoo_block_size(&mut self, size: u32) {
693 unsafe {
694 ffi::rocksdb_cuckoo_options_set_cuckoo_block_size(self.inner, size);
695 }
696 }
697
698 /// If this option is enabled, user key is treated as uint64_t and its value
699 /// is used as hash value directly. This option changes builder's behavior.
700 /// Reader ignore this option and behave according to what specified in
701 /// table property.
702 /// Default: false
703 pub fn set_identity_as_first_hash(&mut self, flag: bool) {
704 unsafe {
705 ffi::rocksdb_cuckoo_options_set_identity_as_first_hash(self.inner, c_uchar::from(flag));
706 }
707 }
708
709 /// If this option is set to true, module is used during hash calculation.
710 /// This often yields better space efficiency at the cost of performance.
711 /// If this option is set to false, # of entries in table is constrained to
712 /// be power of two, and bit and is used to calculate hash, which is faster in general.
713 /// Default: true
714 pub fn set_use_module_hash(&mut self, flag: bool) {
715 unsafe {
716 ffi::rocksdb_cuckoo_options_set_use_module_hash(self.inner, c_uchar::from(flag));
717 }
718 }
719}
720
721impl Default for CuckooTableOptions {
722 fn default() -> Self {
723 let opts = unsafe { ffi::rocksdb_cuckoo_options_create() };
724 assert!(!opts.is_null(), "Could not create RocksDB cuckoo options");
725
726 Self { inner: opts }
727 }
728}
729
730// Verbosity of the LOG.
731#[derive(Debug, Copy, Clone, PartialEq, Eq)]
732#[repr(i32)]
733pub enum LogLevel {
734 Debug = 0,
735 Info,
736 Warn,
737 Error,
738 Fatal,
739 Header,
740}
741
742pub(crate) struct RateLimterInner {
743 pub(crate) inner: NonNull<ffi::rocksdb_ratelimiter_t>,
744}
745
746impl Drop for RateLimterInner {
747 fn drop(&mut self) {
748 unsafe {
749 ffi::rocksdb_ratelimiter_destroy(self.inner.as_ptr());
750 }
751 }
752}
753
754#[derive(Clone)]
755pub struct RateLimter(pub(crate) Arc<RateLimterInner>);
756
757impl RateLimter {
758 pub fn new(
759 rate_bytes_per_sec: i64,
760 refill_period_us: i64,
761 fairness: i32,
762 ) -> Result<RateLimter, Error> {
763 let inner = NonNull::new(unsafe {
764 ffi::rocksdb_ratelimiter_create(rate_bytes_per_sec, refill_period_us, fairness)
765 })
766 .ok_or(Error::new("Could not create RateLimter".to_owned()))?;
767 Ok(RateLimter(Arc::new(RateLimterInner { inner })))
768 }
769}
770
771pub(crate) struct SstFileManagerInner {
772 pub(crate) inner: NonNull<ffi::rocksdb_sstfilemanager_t>,
773}
774
775impl Drop for SstFileManagerInner {
776 fn drop(&mut self) {
777 unsafe {
778 ffi::rocksdb_sstfilemanager_destroy(self.inner.as_ptr());
779 }
780 }
781}
782
783#[derive(Clone)]
784pub struct SstFileManager(pub(crate) Arc<SstFileManagerInner>);
785
786impl SstFileManager {
787 pub fn new() -> Result<SstFileManager, Error> {
788 let inner = NonNull::new(unsafe { ffi::rocksdb_sstfilemanager_create() })
789 .ok_or(Error::new("Could not create SstFileManager".to_owned()))?;
790 Ok(SstFileManager(Arc::new(SstFileManagerInner { inner })))
791 }
792}
793
794impl Options {
795 /// Constructs the DBOptions and ColumnFamilyDescriptors by loading the
796 /// latest RocksDB options file stored in the specified rocksdb database.
797 pub fn load_latest<P: AsRef<Path>>(
798 path: P,
799 env: Env,
800 ignore_unknown_options: bool,
801 cache: Cache,
802 ) -> Result<(Options, Vec<ColumnFamilyDescriptor>), Error> {
803 let path = to_cpath(path)?;
804 let mut db_options: *mut ffi::rocksdb_options_t = null_mut();
805 let mut num_column_families: usize = 0;
806 let mut column_family_names: *mut *mut c_char = null_mut();
807 let mut column_family_options: *mut *mut ffi::rocksdb_options_t = null_mut();
808 unsafe {
809 ffi_try!(ffi::rocksdb_load_latest_options(
810 path.as_ptr(),
811 env.0.inner,
812 ignore_unknown_options,
813 cache.0.inner.as_ptr(),
814 &mut db_options,
815 &mut num_column_families,
816 &mut column_family_names,
817 &mut column_family_options,
818 ));
819 }
820 let options = Options {
821 inner: db_options,
822 outlive: OptionsMustOutliveDB::default(),
823 };
824 let column_families = unsafe {
825 Options::read_column_descriptors(
826 num_column_families,
827 column_family_names,
828 column_family_options,
829 )
830 };
831 Ok((options, column_families))
832 }
833
834 /// read column descriptors from c pointers
835 #[inline]
836 unsafe fn read_column_descriptors(
837 num_column_families: usize,
838 column_family_names: *mut *mut c_char,
839 column_family_options: *mut *mut ffi::rocksdb_options_t,
840 ) -> Vec<ColumnFamilyDescriptor> {
841 let column_family_names_iter =
842 slice::from_raw_parts(column_family_names, num_column_families)
843 .iter()
844 .map(|ptr| from_cstr(*ptr));
845 let column_family_options_iter =
846 slice::from_raw_parts(column_family_options, num_column_families)
847 .iter()
848 .map(|ptr| Options {
849 inner: *ptr,
850 outlive: OptionsMustOutliveDB::default(),
851 });
852 let column_descriptors = column_family_names_iter
853 .zip(column_family_options_iter)
854 .map(|(name, options)| ColumnFamilyDescriptor { name, options })
855 .collect::<Vec<_>>();
856 // free pointers
857 slice::from_raw_parts(column_family_names, num_column_families)
858 .iter()
859 .for_each(|ptr| ffi::rocksdb_free(*ptr as *mut c_void));
860 ffi::rocksdb_free(column_family_names as *mut c_void);
861 ffi::rocksdb_free(column_family_options as *mut c_void);
862 column_descriptors
863 }
864
865 /// By default, RocksDB uses only one background thread for flush and
866 /// compaction. Calling this function will set it up such that total of
867 /// `total_threads` is used. Good value for `total_threads` is the number of
868 /// cores. You almost definitely want to call this function if your system is
869 /// bottlenecked by RocksDB.
870 ///
871 /// # Examples
872 ///
873 /// ```
874 /// use rocksdb::Options;
875 ///
876 /// let mut opts = Options::default();
877 /// opts.increase_parallelism(3);
878 /// ```
879 pub fn increase_parallelism(&mut self, parallelism: i32) {
880 unsafe {
881 ffi::rocksdb_options_increase_parallelism(self.inner, parallelism);
882 }
883 }
884
885 /// Optimize level style compaction.
886 ///
887 /// Default values for some parameters in `Options` are not optimized for heavy
888 /// workloads and big datasets, which means you might observe write stalls under
889 /// some conditions.
890 ///
891 /// This can be used as one of the starting points for tuning RocksDB options in
892 /// such cases.
893 ///
894 /// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
895 /// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
896 /// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
897 /// parameters were set before.
898 ///
899 /// It sets buffer sizes so that memory consumption would be constrained by
900 /// `memtable_memory_budget`.
901 pub fn optimize_level_style_compaction(&mut self, memtable_memory_budget: usize) {
902 unsafe {
903 ffi::rocksdb_options_optimize_level_style_compaction(
904 self.inner,
905 memtable_memory_budget as u64,
906 );
907 }
908 }
909
910 /// Optimize universal style compaction.
911 ///
912 /// Default values for some parameters in `Options` are not optimized for heavy
913 /// workloads and big datasets, which means you might observe write stalls under
914 /// some conditions.
915 ///
916 /// This can be used as one of the starting points for tuning RocksDB options in
917 /// such cases.
918 ///
919 /// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
920 /// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
921 /// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
922 /// parameters were set before.
923 ///
924 /// It sets buffer sizes so that memory consumption would be constrained by
925 /// `memtable_memory_budget`.
926 pub fn optimize_universal_style_compaction(&mut self, memtable_memory_budget: usize) {
927 unsafe {
928 ffi::rocksdb_options_optimize_universal_style_compaction(
929 self.inner,
930 memtable_memory_budget as u64,
931 );
932 }
933 }
934
935 pub fn add_event_listener<L: EventListener>(&mut self, l: L) {
936 let listener = Box::new(l);
937
938 unsafe {
939 let listener = ffi::rocksdb_event_listener_create(
940 Box::into_raw(listener).cast::<c_void>(),
941 Some(event_listener::on_flush_begin::<L>),
942 Some(event_listener::on_flush_completed::<L>),
943 );
944
945 ffi::rocksdb_options_add_event_listener(self.inner, listener);
946 }
947 }
948
949 pub fn set_periodic_compaction_seconds(&mut self, sec: u64) {
950 unsafe {
951 ffi::rocksdb_options_set_periodic_compaction_seconds(self.inner, sec);
952 }
953 }
954
955 /// If true, the database will be created if it is missing.
956 ///
957 /// Default: `false`
958 ///
959 /// # Examples
960 ///
961 /// ```
962 /// use rocksdb::Options;
963 ///
964 /// let mut opts = Options::default();
965 /// opts.create_if_missing(true);
966 /// ```
967 pub fn create_if_missing(&mut self, create_if_missing: bool) {
968 unsafe {
969 ffi::rocksdb_options_set_create_if_missing(
970 self.inner,
971 c_uchar::from(create_if_missing),
972 );
973 }
974 }
975
976 /// If true, any column families that didn't exist when opening the database
977 /// will be created.
978 ///
979 /// Default: `false`
980 ///
981 /// # Examples
982 ///
983 /// ```
984 /// use rocksdb::Options;
985 ///
986 /// let mut opts = Options::default();
987 /// opts.create_missing_column_families(true);
988 /// ```
989 pub fn create_missing_column_families(&mut self, create_missing_cfs: bool) {
990 unsafe {
991 ffi::rocksdb_options_set_create_missing_column_families(
992 self.inner,
993 c_uchar::from(create_missing_cfs),
994 );
995 }
996 }
997
998 /// Specifies whether an error should be raised if the database already exists.
999 ///
1000 /// Default: false
1001 pub fn set_error_if_exists(&mut self, enabled: bool) {
1002 unsafe {
1003 ffi::rocksdb_options_set_error_if_exists(self.inner, c_uchar::from(enabled));
1004 }
1005 }
1006
1007 /// Enable/disable paranoid checks.
1008 ///
1009 /// If true, the implementation will do aggressive checking of the
1010 /// data it is processing and will stop early if it detects any
1011 /// errors. This may have unforeseen ramifications: for example, a
1012 /// corruption of one DB entry may cause a large number of entries to
1013 /// become unreadable or for the entire DB to become unopenable.
1014 /// If any of the writes to the database fails (Put, Delete, Merge, Write),
1015 /// the database will switch to read-only mode and fail all other
1016 /// Write operations.
1017 ///
1018 /// Default: false
1019 pub fn set_paranoid_checks(&mut self, enabled: bool) {
1020 unsafe {
1021 ffi::rocksdb_options_set_paranoid_checks(self.inner, c_uchar::from(enabled));
1022 }
1023 }
1024
1025 /// A list of paths where SST files can be put into, with its target size.
1026 /// Newer data is placed into paths specified earlier in the vector while
1027 /// older data gradually moves to paths specified later in the vector.
1028 ///
1029 /// For example, you have a flash device with 10GB allocated for the DB,
1030 /// as well as a hard drive of 2TB, you should config it to be:
1031 /// [{"/flash_path", 10GB}, {"/hard_drive", 2TB}]
1032 ///
1033 /// The system will try to guarantee data under each path is close to but
1034 /// not larger than the target size. But current and future file sizes used
1035 /// by determining where to place a file are based on best-effort estimation,
1036 /// which means there is a chance that the actual size under the directory
1037 /// is slightly more than target size under some workloads. User should give
1038 /// some buffer room for those cases.
1039 ///
1040 /// If none of the paths has sufficient room to place a file, the file will
1041 /// be placed to the last path anyway, despite to the target size.
1042 ///
1043 /// Placing newer data to earlier paths is also best-efforts. User should
1044 /// expect user files to be placed in higher levels in some extreme cases.
1045 ///
1046 /// If left empty, only one path will be used, which is `path` passed when
1047 /// opening the DB.
1048 ///
1049 /// Default: empty
1050 pub fn set_db_paths(&mut self, paths: &[DBPath]) {
1051 let mut paths: Vec<_> = paths
1052 .iter()
1053 .map(|path| path.inner as *const ffi::rocksdb_dbpath_t)
1054 .collect();
1055 let num_paths = paths.len();
1056 unsafe {
1057 ffi::rocksdb_options_set_db_paths(self.inner, paths.as_mut_ptr(), num_paths);
1058 }
1059 }
1060
1061 /// Use the specified object to interact with the environment,
1062 /// e.g. to read/write files, schedule background work, etc. In the near
1063 /// future, support for doing storage operations such as read/write files
1064 /// through env will be deprecated in favor of file_system.
1065 ///
1066 /// Default: Env::default()
1067 pub fn set_env(&mut self, env: &Env) {
1068 unsafe {
1069 ffi::rocksdb_options_set_env(self.inner, env.0.inner);
1070 }
1071 self.outlive.env = Some(env.clone());
1072 }
1073
1074 /// Sets the compression algorithm that will be used for compressing blocks.
1075 ///
1076 /// Default: `DBCompressionType::Snappy` (`DBCompressionType::None` if
1077 /// snappy feature is not enabled).
1078 ///
1079 /// # Examples
1080 ///
1081 /// ```
1082 /// use rocksdb::{Options, DBCompressionType};
1083 ///
1084 /// let mut opts = Options::default();
1085 /// opts.set_compression_type(DBCompressionType::Snappy);
1086 /// ```
1087 pub fn set_compression_type(&mut self, t: DBCompressionType) {
1088 unsafe {
1089 ffi::rocksdb_options_set_compression(self.inner, t as c_int);
1090 }
1091 }
1092
1093 /// Sets the bottom-most compression algorithm that will be used for
1094 /// compressing blocks at the bottom-most level.
1095 ///
1096 /// Note that to actually unable bottom-most compression configuration after
1097 /// setting the compression type it needs to be enabled by calling
1098 /// [`set_bottommost_compression_options`](#method.set_bottommost_compression_options) or
1099 /// [`set_bottommost_zstd_max_train_bytes`](#method.set_bottommost_zstd_max_train_bytes) method with `enabled` argument
1100 /// set to `true`.
1101 ///
1102 /// # Examples
1103 ///
1104 /// ```
1105 /// use rocksdb::{Options, DBCompressionType};
1106 ///
1107 /// let mut opts = Options::default();
1108 /// opts.set_bottommost_compression_type(DBCompressionType::Zstd);
1109 /// opts.set_bottommost_zstd_max_train_bytes(0, true);
1110 /// ```
1111 pub fn set_bottommost_compression_type(&mut self, t: DBCompressionType) {
1112 unsafe {
1113 ffi::rocksdb_options_set_bottommost_compression(self.inner, t as c_int);
1114 }
1115 }
1116
1117 /// Different levels can have different compression policies. There
1118 /// are cases where most lower levels would like to use quick compression
1119 /// algorithms while the higher levels (which have more data) use
1120 /// compression algorithms that have better compression but could
1121 /// be slower. This array, if non-empty, should have an entry for
1122 /// each level of the database; these override the value specified in
1123 /// the previous field 'compression'.
1124 ///
1125 /// # Examples
1126 ///
1127 /// ```
1128 /// use rocksdb::{Options, DBCompressionType};
1129 ///
1130 /// let mut opts = Options::default();
1131 /// opts.set_compression_per_level(&[
1132 /// DBCompressionType::None,
1133 /// DBCompressionType::None,
1134 /// DBCompressionType::Snappy,
1135 /// DBCompressionType::Snappy,
1136 /// DBCompressionType::Snappy
1137 /// ]);
1138 /// ```
1139 pub fn set_compression_per_level(&mut self, level_types: &[DBCompressionType]) {
1140 unsafe {
1141 let mut level_types: Vec<_> = level_types.iter().map(|&t| t as c_int).collect();
1142 ffi::rocksdb_options_set_compression_per_level(
1143 self.inner,
1144 level_types.as_mut_ptr(),
1145 level_types.len() as size_t,
1146 );
1147 }
1148 }
1149
1150 /// Maximum size of dictionaries used to prime the compression library.
1151 /// Enabling dictionary can improve compression ratios when there are
1152 /// repetitions across data blocks.
1153 ///
1154 /// The dictionary is created by sampling the SST file data. If
1155 /// `zstd_max_train_bytes` is nonzero, the samples are passed through zstd's
1156 /// dictionary generator. Otherwise, the random samples are used directly as
1157 /// the dictionary.
1158 ///
1159 /// When compression dictionary is disabled, we compress and write each block
1160 /// before buffering data for the next one. When compression dictionary is
1161 /// enabled, we buffer all SST file data in-memory so we can sample it, as data
1162 /// can only be compressed and written after the dictionary has been finalized.
1163 /// So users of this feature may see increased memory usage.
1164 ///
1165 /// Default: `0`
1166 ///
1167 /// # Examples
1168 ///
1169 /// ```
1170 /// use rocksdb::Options;
1171 ///
1172 /// let mut opts = Options::default();
1173 /// opts.set_compression_options(4, 5, 6, 7);
1174 /// ```
1175 pub fn set_compression_options(
1176 &mut self,
1177 w_bits: c_int,
1178 level: c_int,
1179 strategy: c_int,
1180 max_dict_bytes: c_int,
1181 ) {
1182 unsafe {
1183 ffi::rocksdb_options_set_compression_options(
1184 self.inner,
1185 w_bits,
1186 level,
1187 strategy,
1188 max_dict_bytes,
1189 );
1190 }
1191 }
1192
1193 /// Sets compression options for blocks at the bottom-most level. Meaning
1194 /// of all settings is the same as in [`set_compression_options`](#method.set_compression_options) method but
1195 /// affect only the bottom-most compression which is set using
1196 /// [`set_bottommost_compression_type`](#method.set_bottommost_compression_type) method.
1197 ///
1198 /// # Examples
1199 ///
1200 /// ```
1201 /// use rocksdb::{Options, DBCompressionType};
1202 ///
1203 /// let mut opts = Options::default();
1204 /// opts.set_bottommost_compression_type(DBCompressionType::Zstd);
1205 /// opts.set_bottommost_compression_options(4, 5, 6, 7, true);
1206 /// ```
1207 pub fn set_bottommost_compression_options(
1208 &mut self,
1209 w_bits: c_int,
1210 level: c_int,
1211 strategy: c_int,
1212 max_dict_bytes: c_int,
1213 enabled: bool,
1214 ) {
1215 unsafe {
1216 ffi::rocksdb_options_set_bottommost_compression_options(
1217 self.inner,
1218 w_bits,
1219 level,
1220 strategy,
1221 max_dict_bytes,
1222 c_uchar::from(enabled),
1223 );
1224 }
1225 }
1226
1227 /// Sets maximum size of training data passed to zstd's dictionary trainer. Using zstd's
1228 /// dictionary trainer can achieve even better compression ratio improvements than using
1229 /// `max_dict_bytes` alone.
1230 ///
1231 /// The training data will be used to generate a dictionary of max_dict_bytes.
1232 ///
1233 /// Default: 0.
1234 pub fn set_zstd_max_train_bytes(&mut self, value: c_int) {
1235 unsafe {
1236 ffi::rocksdb_options_set_compression_options_zstd_max_train_bytes(self.inner, value);
1237 }
1238 }
1239
1240 /// Sets maximum size of training data passed to zstd's dictionary trainer
1241 /// when compressing the bottom-most level. Using zstd's dictionary trainer
1242 /// can achieve even better compression ratio improvements than using
1243 /// `max_dict_bytes` alone.
1244 ///
1245 /// The training data will be used to generate a dictionary of
1246 /// `max_dict_bytes`.
1247 ///
1248 /// Default: 0.
1249 pub fn set_bottommost_zstd_max_train_bytes(&mut self, value: c_int, enabled: bool) {
1250 unsafe {
1251 ffi::rocksdb_options_set_bottommost_compression_options_zstd_max_train_bytes(
1252 self.inner,
1253 value,
1254 c_uchar::from(enabled),
1255 );
1256 }
1257 }
1258
1259 /// If non-zero, we perform bigger reads when doing compaction. If you're
1260 /// running RocksDB on spinning disks, you should set this to at least 2MB.
1261 /// That way RocksDB's compaction is doing sequential instead of random reads.
1262 ///
1263 /// When non-zero, we also force new_table_reader_for_compaction_inputs to
1264 /// true.
1265 ///
1266 /// Default: `0`
1267 pub fn set_compaction_readahead_size(&mut self, compaction_readahead_size: usize) {
1268 unsafe {
1269 ffi::rocksdb_options_compaction_readahead_size(self.inner, compaction_readahead_size);
1270 }
1271 }
1272
1273 /// Allow RocksDB to pick dynamic base of bytes for levels.
1274 /// With this feature turned on, RocksDB will automatically adjust max bytes for each level.
1275 /// The goal of this feature is to have lower bound on size amplification.
1276 ///
1277 /// Default: false.
1278 pub fn set_level_compaction_dynamic_level_bytes(&mut self, v: bool) {
1279 unsafe {
1280 ffi::rocksdb_options_set_level_compaction_dynamic_level_bytes(
1281 self.inner,
1282 c_uchar::from(v),
1283 );
1284 }
1285 }
1286
1287 pub fn set_merge_operator_associative<F: MergeFn + Clone>(
1288 &mut self,
1289 name: impl CStrLike,
1290 full_merge_fn: F,
1291 ) {
1292 let cb = Box::new(MergeOperatorCallback {
1293 name: name.into_c_string().unwrap(),
1294 full_merge_fn: full_merge_fn.clone(),
1295 partial_merge_fn: full_merge_fn,
1296 });
1297
1298 unsafe {
1299 let mo = ffi::rocksdb_mergeoperator_create(
1300 Box::into_raw(cb).cast::<c_void>(),
1301 Some(merge_operator::destructor_callback::<F, F>),
1302 Some(full_merge_callback::<F, F>),
1303 Some(partial_merge_callback::<F, F>),
1304 Some(merge_operator::delete_callback),
1305 Some(merge_operator::name_callback::<F, F>),
1306 );
1307 ffi::rocksdb_options_set_merge_operator(self.inner, mo);
1308 }
1309 }
1310
1311 pub fn set_merge_operator<F: MergeFn, PF: MergeFn>(
1312 &mut self,
1313 name: impl CStrLike,
1314 full_merge_fn: F,
1315 partial_merge_fn: PF,
1316 ) {
1317 let cb = Box::new(MergeOperatorCallback {
1318 name: name.into_c_string().unwrap(),
1319 full_merge_fn,
1320 partial_merge_fn,
1321 });
1322
1323 unsafe {
1324 let mo = ffi::rocksdb_mergeoperator_create(
1325 Box::into_raw(cb).cast::<c_void>(),
1326 Some(merge_operator::destructor_callback::<F, PF>),
1327 Some(full_merge_callback::<F, PF>),
1328 Some(partial_merge_callback::<F, PF>),
1329 Some(merge_operator::delete_callback),
1330 Some(merge_operator::name_callback::<F, PF>),
1331 );
1332 ffi::rocksdb_options_set_merge_operator(self.inner, mo);
1333 }
1334 }
1335
1336 #[deprecated(
1337 since = "0.5.0",
1338 note = "add_merge_operator has been renamed to set_merge_operator"
1339 )]
1340 pub fn add_merge_operator<F: MergeFn + Clone>(&mut self, name: &str, merge_fn: F) {
1341 self.set_merge_operator_associative(name, merge_fn);
1342 }
1343
1344 /// Sets a compaction filter used to determine if entries should be kept, changed,
1345 /// or removed during compaction.
1346 ///
1347 /// An example use case is to remove entries with an expired TTL.
1348 ///
1349 /// If you take a snapshot of the database, only values written since the last
1350 /// snapshot will be passed through the compaction filter.
1351 ///
1352 /// If multi-threaded compaction is used, `filter_fn` may be called multiple times
1353 /// simultaneously.
1354 pub fn set_compaction_filter<F>(&mut self, name: impl CStrLike, filter_fn: F)
1355 where
1356 F: CompactionFilterFn + Send + 'static,
1357 {
1358 let cb = Box::new(CompactionFilterCallback {
1359 name: name.into_c_string().unwrap(),
1360 filter_fn,
1361 });
1362
1363 unsafe {
1364 let cf = ffi::rocksdb_compactionfilter_create(
1365 Box::into_raw(cb).cast::<c_void>(),
1366 Some(compaction_filter::destructor_callback::<CompactionFilterCallback<F>>),
1367 Some(compaction_filter::filter_callback::<CompactionFilterCallback<F>>),
1368 Some(compaction_filter::name_callback::<CompactionFilterCallback<F>>),
1369 );
1370 ffi::rocksdb_options_set_compaction_filter(self.inner, cf);
1371 }
1372 }
1373
1374 /// This is a factory that provides compaction filter objects which allow
1375 /// an application to modify/delete a key-value during background compaction.
1376 ///
1377 /// A new filter will be created on each compaction run. If multithreaded
1378 /// compaction is being used, each created CompactionFilter will only be used
1379 /// from a single thread and so does not need to be thread-safe.
1380 ///
1381 /// Default: nullptr
1382 pub fn set_compaction_filter_factory<F>(&mut self, factory: F)
1383 where
1384 F: CompactionFilterFactory + 'static,
1385 {
1386 let factory = Box::new(factory);
1387
1388 unsafe {
1389 let cff = ffi::rocksdb_compactionfilterfactory_create(
1390 Box::into_raw(factory).cast::<c_void>(),
1391 Some(compaction_filter_factory::destructor_callback::<F>),
1392 Some(compaction_filter_factory::create_compaction_filter_callback::<F>),
1393 Some(compaction_filter_factory::name_callback::<F>),
1394 );
1395
1396 ffi::rocksdb_options_set_compaction_filter_factory(self.inner, cff);
1397 }
1398 }
1399
1400 /// Sets the comparator used to define the order of keys in the table.
1401 /// Default: a comparator that uses lexicographic byte-wise ordering
1402 ///
1403 /// The client must ensure that the comparator supplied here has the same
1404 /// name and orders keys *exactly* the same as the comparator provided to
1405 /// previous open calls on the same DB.
1406 pub fn set_comparator(&mut self, name: impl CStrLike, compare_fn: Box<CompareFn>) {
1407 let cb = Box::new(ComparatorCallback {
1408 name: name.into_c_string().unwrap(),
1409 f: compare_fn,
1410 });
1411
1412 unsafe {
1413 let cmp = ffi::rocksdb_comparator_create(
1414 Box::into_raw(cb).cast::<c_void>(),
1415 Some(comparator::destructor_callback),
1416 Some(comparator::compare_callback),
1417 Some(comparator::name_callback),
1418 );
1419 ffi::rocksdb_options_set_comparator(self.inner, cmp);
1420 }
1421 }
1422
1423 pub fn set_prefix_extractor(&mut self, prefix_extractor: SliceTransform) {
1424 unsafe {
1425 ffi::rocksdb_options_set_prefix_extractor(self.inner, prefix_extractor.inner);
1426 }
1427 }
1428
1429 pub fn optimize_for_point_lookup(&mut self, cache_size: u64) {
1430 unsafe {
1431 ffi::rocksdb_options_optimize_for_point_lookup(self.inner, cache_size);
1432 }
1433 }
1434
1435 /// Sets the optimize_filters_for_hits flag
1436 ///
1437 /// Default: `false`
1438 ///
1439 /// # Examples
1440 ///
1441 /// ```
1442 /// use rocksdb::Options;
1443 ///
1444 /// let mut opts = Options::default();
1445 /// opts.set_optimize_filters_for_hits(true);
1446 /// ```
1447 pub fn set_optimize_filters_for_hits(&mut self, optimize_for_hits: bool) {
1448 unsafe {
1449 ffi::rocksdb_options_set_optimize_filters_for_hits(
1450 self.inner,
1451 c_int::from(optimize_for_hits),
1452 );
1453 }
1454 }
1455
1456 /// Sets the periodicity when obsolete files get deleted.
1457 ///
1458 /// The files that get out of scope by compaction
1459 /// process will still get automatically delete on every compaction,
1460 /// regardless of this setting.
1461 ///
1462 /// Default: 6 hours
1463 pub fn set_delete_obsolete_files_period_micros(&mut self, micros: u64) {
1464 unsafe {
1465 ffi::rocksdb_options_set_delete_obsolete_files_period_micros(self.inner, micros);
1466 }
1467 }
1468
1469 /// Prepare the DB for bulk loading.
1470 ///
1471 /// All data will be in level 0 without any automatic compaction.
1472 /// It's recommended to manually call CompactRange(NULL, NULL) before reading
1473 /// from the database, because otherwise the read can be very slow.
1474 pub fn prepare_for_bulk_load(&mut self) {
1475 unsafe {
1476 ffi::rocksdb_options_prepare_for_bulk_load(self.inner);
1477 }
1478 }
1479
1480 /// Sets the number of open files that can be used by the DB. You may need to
1481 /// increase this if your database has a large working set. Value `-1` means
1482 /// files opened are always kept open. You can estimate number of files based
1483 /// on target_file_size_base and target_file_size_multiplier for level-based
1484 /// compaction. For universal-style compaction, you can usually set it to `-1`.
1485 ///
1486 /// Default: `-1`
1487 ///
1488 /// # Examples
1489 ///
1490 /// ```
1491 /// use rocksdb::Options;
1492 ///
1493 /// let mut opts = Options::default();
1494 /// opts.set_max_open_files(10);
1495 /// ```
1496 pub fn set_max_open_files(&mut self, nfiles: c_int) {
1497 unsafe {
1498 ffi::rocksdb_options_set_max_open_files(self.inner, nfiles);
1499 }
1500 }
1501
1502 /// If max_open_files is -1, DB will open all files on DB::Open(). You can
1503 /// use this option to increase the number of threads used to open the files.
1504 /// Default: 16
1505 pub fn set_max_file_opening_threads(&mut self, nthreads: c_int) {
1506 unsafe {
1507 ffi::rocksdb_options_set_max_file_opening_threads(self.inner, nthreads);
1508 }
1509 }
1510
1511 /// By default, writes to stable storage use fdatasync (on platforms
1512 /// where this function is available). If this option is true,
1513 /// fsync is used instead.
1514 ///
1515 /// fsync and fdatasync are equally safe for our purposes and fdatasync is
1516 /// faster, so it is rarely necessary to set this option. It is provided
1517 /// as a workaround for kernel/filesystem bugs, such as one that affected
1518 /// fdatasync with ext4 in kernel versions prior to 3.7.
1519 ///
1520 /// Default: `false`
1521 ///
1522 /// # Examples
1523 ///
1524 /// ```
1525 /// use rocksdb::Options;
1526 ///
1527 /// let mut opts = Options::default();
1528 /// opts.set_use_fsync(true);
1529 /// ```
1530 pub fn set_use_fsync(&mut self, useit: bool) {
1531 unsafe {
1532 ffi::rocksdb_options_set_use_fsync(self.inner, c_int::from(useit));
1533 }
1534 }
1535
1536 /// Specifies the absolute info LOG dir.
1537 ///
1538 /// If it is empty, the log files will be in the same dir as data.
1539 /// If it is non empty, the log files will be in the specified dir,
1540 /// and the db data dir's absolute path will be used as the log file
1541 /// name's prefix.
1542 ///
1543 /// Default: empty
1544 pub fn set_db_log_dir<P: AsRef<Path>>(&mut self, path: P) {
1545 let p = to_cpath(path).unwrap();
1546 unsafe {
1547 ffi::rocksdb_options_set_db_log_dir(self.inner, p.as_ptr());
1548 }
1549 }
1550
1551 /// Specifies the log level.
1552 /// Consider the `LogLevel` enum for a list of possible levels.
1553 ///
1554 /// Default: Info
1555 ///
1556 /// # Examples
1557 ///
1558 /// ```
1559 /// use rocksdb::{Options, LogLevel};
1560 ///
1561 /// let mut opts = Options::default();
1562 /// opts.set_log_level(LogLevel::Warn);
1563 /// ```
1564 pub fn set_log_level(&mut self, level: LogLevel) {
1565 unsafe {
1566 ffi::rocksdb_options_set_info_log_level(self.inner, level as c_int);
1567 }
1568 }
1569
1570 /// Allows OS to incrementally sync files to disk while they are being
1571 /// written, asynchronously, in the background. This operation can be used
1572 /// to smooth out write I/Os over time. Users shouldn't rely on it for
1573 /// persistency guarantee.
1574 /// Issue one request for every bytes_per_sync written. `0` turns it off.
1575 ///
1576 /// Default: `0`
1577 ///
1578 /// You may consider using rate_limiter to regulate write rate to device.
1579 /// When rate limiter is enabled, it automatically enables bytes_per_sync
1580 /// to 1MB.
1581 ///
1582 /// This option applies to table files
1583 ///
1584 /// # Examples
1585 ///
1586 /// ```
1587 /// use rocksdb::Options;
1588 ///
1589 /// let mut opts = Options::default();
1590 /// opts.set_bytes_per_sync(1024 * 1024);
1591 /// ```
1592 pub fn set_bytes_per_sync(&mut self, nbytes: u64) {
1593 unsafe {
1594 ffi::rocksdb_options_set_bytes_per_sync(self.inner, nbytes);
1595 }
1596 }
1597
1598 /// Same as bytes_per_sync, but applies to WAL files.
1599 ///
1600 /// Default: 0, turned off
1601 ///
1602 /// Dynamically changeable through SetDBOptions() API.
1603 pub fn set_wal_bytes_per_sync(&mut self, nbytes: u64) {
1604 unsafe {
1605 ffi::rocksdb_options_set_wal_bytes_per_sync(self.inner, nbytes);
1606 }
1607 }
1608
1609 /// Sets the maximum buffer size that is used by WritableFileWriter.
1610 ///
1611 /// On Windows, we need to maintain an aligned buffer for writes.
1612 /// We allow the buffer to grow until it's size hits the limit in buffered
1613 /// IO and fix the buffer size when using direct IO to ensure alignment of
1614 /// write requests if the logical sector size is unusual
1615 ///
1616 /// Default: 1024 * 1024 (1 MB)
1617 ///
1618 /// Dynamically changeable through SetDBOptions() API.
1619 pub fn set_writable_file_max_buffer_size(&mut self, nbytes: u64) {
1620 unsafe {
1621 ffi::rocksdb_options_set_writable_file_max_buffer_size(self.inner, nbytes);
1622 }
1623 }
1624
1625 /// If true, allow multi-writers to update mem tables in parallel.
1626 /// Only some memtable_factory-s support concurrent writes; currently it
1627 /// is implemented only for SkipListFactory. Concurrent memtable writes
1628 /// are not compatible with inplace_update_support or filter_deletes.
1629 /// It is strongly recommended to set enable_write_thread_adaptive_yield
1630 /// if you are going to use this feature.
1631 ///
1632 /// Default: true
1633 ///
1634 /// # Examples
1635 ///
1636 /// ```
1637 /// use rocksdb::Options;
1638 ///
1639 /// let mut opts = Options::default();
1640 /// opts.set_allow_concurrent_memtable_write(false);
1641 /// ```
1642 pub fn set_allow_concurrent_memtable_write(&mut self, allow: bool) {
1643 unsafe {
1644 ffi::rocksdb_options_set_allow_concurrent_memtable_write(
1645 self.inner,
1646 c_uchar::from(allow),
1647 );
1648 }
1649 }
1650
1651 /// If true, threads synchronizing with the write batch group leader will wait for up to
1652 /// write_thread_max_yield_usec before blocking on a mutex. This can substantially improve
1653 /// throughput for concurrent workloads, regardless of whether allow_concurrent_memtable_write
1654 /// is enabled.
1655 ///
1656 /// Default: true
1657 pub fn set_enable_write_thread_adaptive_yield(&mut self, enabled: bool) {
1658 unsafe {
1659 ffi::rocksdb_options_set_enable_write_thread_adaptive_yield(
1660 self.inner,
1661 c_uchar::from(enabled),
1662 );
1663 }
1664 }
1665
1666 /// Specifies whether an iteration->Next() sequentially skips over keys with the same user-key or not.
1667 ///
1668 /// This number specifies the number of keys (with the same userkey)
1669 /// that will be sequentially skipped before a reseek is issued.
1670 ///
1671 /// Default: 8
1672 pub fn set_max_sequential_skip_in_iterations(&mut self, num: u64) {
1673 unsafe {
1674 ffi::rocksdb_options_set_max_sequential_skip_in_iterations(self.inner, num);
1675 }
1676 }
1677
1678 /// Enable direct I/O mode for reading
1679 /// they may or may not improve performance depending on the use case
1680 ///
1681 /// Files will be opened in "direct I/O" mode
1682 /// which means that data read from the disk will not be cached or
1683 /// buffered. The hardware buffer of the devices may however still
1684 /// be used. Memory mapped files are not impacted by these parameters.
1685 ///
1686 /// Default: false
1687 ///
1688 /// # Examples
1689 ///
1690 /// ```
1691 /// use rocksdb::Options;
1692 ///
1693 /// let mut opts = Options::default();
1694 /// opts.set_use_direct_reads(true);
1695 /// ```
1696 pub fn set_use_direct_reads(&mut self, enabled: bool) {
1697 unsafe {
1698 ffi::rocksdb_options_set_use_direct_reads(self.inner, c_uchar::from(enabled));
1699 }
1700 }
1701
1702 /// Enable direct I/O mode for flush and compaction
1703 ///
1704 /// Files will be opened in "direct I/O" mode
1705 /// which means that data written to the disk will not be cached or
1706 /// buffered. The hardware buffer of the devices may however still
1707 /// be used. Memory mapped files are not impacted by these parameters.
1708 /// they may or may not improve performance depending on the use case
1709 ///
1710 /// Default: false
1711 ///
1712 /// # Examples
1713 ///
1714 /// ```
1715 /// use rocksdb::Options;
1716 ///
1717 /// let mut opts = Options::default();
1718 /// opts.set_use_direct_io_for_flush_and_compaction(true);
1719 /// ```
1720 pub fn set_use_direct_io_for_flush_and_compaction(&mut self, enabled: bool) {
1721 unsafe {
1722 ffi::rocksdb_options_set_use_direct_io_for_flush_and_compaction(
1723 self.inner,
1724 c_uchar::from(enabled),
1725 );
1726 }
1727 }
1728
1729 /// Enable/dsiable child process inherit open files.
1730 ///
1731 /// Default: true
1732 pub fn set_is_fd_close_on_exec(&mut self, enabled: bool) {
1733 unsafe {
1734 ffi::rocksdb_options_set_is_fd_close_on_exec(self.inner, c_uchar::from(enabled));
1735 }
1736 }
1737
1738 /// Hints to the OS that it should not buffer disk I/O. Enabling this
1739 /// parameter may improve performance but increases pressure on the
1740 /// system cache.
1741 ///
1742 /// The exact behavior of this parameter is platform dependent.
1743 ///
1744 /// On POSIX systems, after RocksDB reads data from disk it will
1745 /// mark the pages as "unneeded". The operating system may - or may not
1746 /// - evict these pages from memory, reducing pressure on the system
1747 /// cache. If the disk block is requested again this can result in
1748 /// additional disk I/O.
1749 ///
1750 /// On WINDOWS systems, files will be opened in "unbuffered I/O" mode
1751 /// which means that data read from the disk will not be cached or
1752 /// bufferized. The hardware buffer of the devices may however still
1753 /// be used. Memory mapped files are not impacted by this parameter.
1754 ///
1755 /// Default: true
1756 ///
1757 /// # Examples
1758 ///
1759 /// ```
1760 /// use rocksdb::Options;
1761 ///
1762 /// let mut opts = Options::default();
1763 /// #[allow(deprecated)]
1764 /// opts.set_allow_os_buffer(false);
1765 /// ```
1766 #[deprecated(
1767 since = "0.7.0",
1768 note = "replaced with set_use_direct_reads/set_use_direct_io_for_flush_and_compaction methods"
1769 )]
1770 pub fn set_allow_os_buffer(&mut self, is_allow: bool) {
1771 self.set_use_direct_reads(!is_allow);
1772 self.set_use_direct_io_for_flush_and_compaction(!is_allow);
1773 }
1774
1775 /// Sets the number of shards used for table cache.
1776 ///
1777 /// Default: `6`
1778 ///
1779 /// # Examples
1780 ///
1781 /// ```
1782 /// use rocksdb::Options;
1783 ///
1784 /// let mut opts = Options::default();
1785 /// opts.set_table_cache_num_shard_bits(4);
1786 /// ```
1787 pub fn set_table_cache_num_shard_bits(&mut self, nbits: c_int) {
1788 unsafe {
1789 ffi::rocksdb_options_set_table_cache_numshardbits(self.inner, nbits);
1790 }
1791 }
1792
1793 /// By default target_file_size_multiplier is 1, which means
1794 /// by default files in different levels will have similar size.
1795 ///
1796 /// Dynamically changeable through SetOptions() API
1797 pub fn set_target_file_size_multiplier(&mut self, multiplier: i32) {
1798 unsafe {
1799 ffi::rocksdb_options_set_target_file_size_multiplier(self.inner, multiplier as c_int);
1800 }
1801 }
1802
1803 /// Sets the minimum number of write buffers that will be merged together
1804 /// before writing to storage. If set to `1`, then
1805 /// all write buffers are flushed to L0 as individual files and this increases
1806 /// read amplification because a get request has to check in all of these
1807 /// files. Also, an in-memory merge may result in writing lesser
1808 /// data to storage if there are duplicate records in each of these
1809 /// individual write buffers.
1810 ///
1811 /// Default: `1`
1812 ///
1813 /// # Examples
1814 ///
1815 /// ```
1816 /// use rocksdb::Options;
1817 ///
1818 /// let mut opts = Options::default();
1819 /// opts.set_min_write_buffer_number(2);
1820 /// ```
1821 pub fn set_min_write_buffer_number(&mut self, nbuf: c_int) {
1822 unsafe {
1823 ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, nbuf);
1824 }
1825 }
1826
1827 /// Sets the maximum number of write buffers that are built up in memory.
1828 /// The default and the minimum number is 2, so that when 1 write buffer
1829 /// is being flushed to storage, new writes can continue to the other
1830 /// write buffer.
1831 /// If max_write_buffer_number > 3, writing will be slowed down to
1832 /// options.delayed_write_rate if we are writing to the last write buffer
1833 /// allowed.
1834 ///
1835 /// Default: `2`
1836 ///
1837 /// # Examples
1838 ///
1839 /// ```
1840 /// use rocksdb::Options;
1841 ///
1842 /// let mut opts = Options::default();
1843 /// opts.set_max_write_buffer_number(4);
1844 /// ```
1845 pub fn set_max_write_buffer_number(&mut self, nbuf: c_int) {
1846 unsafe {
1847 ffi::rocksdb_options_set_max_write_buffer_number(self.inner, nbuf);
1848 }
1849 }
1850
1851 /// Sets the amount of data to build up in memory (backed by an unsorted log
1852 /// on disk) before converting to a sorted on-disk file.
1853 ///
1854 /// Larger values increase performance, especially during bulk loads.
1855 /// Up to max_write_buffer_number write buffers may be held in memory
1856 /// at the same time,
1857 /// so you may wish to adjust this parameter to control memory usage.
1858 /// Also, a larger write buffer will result in a longer recovery time
1859 /// the next time the database is opened.
1860 ///
1861 /// Note that write_buffer_size is enforced per column family.
1862 /// See db_write_buffer_size for sharing memory across column families.
1863 ///
1864 /// Default: `0x4000000` (64MiB)
1865 ///
1866 /// Dynamically changeable through SetOptions() API
1867 ///
1868 /// # Examples
1869 ///
1870 /// ```
1871 /// use rocksdb::Options;
1872 ///
1873 /// let mut opts = Options::default();
1874 /// opts.set_write_buffer_size(128 * 1024 * 1024);
1875 /// ```
1876 pub fn set_write_buffer_size(&mut self, size: usize) {
1877 unsafe {
1878 ffi::rocksdb_options_set_write_buffer_size(self.inner, size);
1879 }
1880 }
1881
1882 /// Amount of data to build up in memtables across all column
1883 /// families before writing to disk.
1884 ///
1885 /// This is distinct from write_buffer_size, which enforces a limit
1886 /// for a single memtable.
1887 ///
1888 /// This feature is disabled by default. Specify a non-zero value
1889 /// to enable it.
1890 ///
1891 /// Default: 0 (disabled)
1892 ///
1893 /// # Examples
1894 ///
1895 /// ```
1896 /// use rocksdb::Options;
1897 ///
1898 /// let mut opts = Options::default();
1899 /// opts.set_db_write_buffer_size(128 * 1024 * 1024);
1900 /// ```
1901 pub fn set_db_write_buffer_size(&mut self, size: usize) {
1902 unsafe {
1903 ffi::rocksdb_options_set_db_write_buffer_size(self.inner, size);
1904 }
1905 }
1906
1907 /// Control maximum total data size for a level.
1908 /// max_bytes_for_level_base is the max total for level-1.
1909 /// Maximum number of bytes for level L can be calculated as
1910 /// (max_bytes_for_level_base) * (max_bytes_for_level_multiplier ^ (L-1))
1911 /// For example, if max_bytes_for_level_base is 200MB, and if
1912 /// max_bytes_for_level_multiplier is 10, total data size for level-1
1913 /// will be 200MB, total file size for level-2 will be 2GB,
1914 /// and total file size for level-3 will be 20GB.
1915 ///
1916 /// Default: `0x10000000` (256MiB).
1917 ///
1918 /// Dynamically changeable through SetOptions() API
1919 ///
1920 /// # Examples
1921 ///
1922 /// ```
1923 /// use rocksdb::Options;
1924 ///
1925 /// let mut opts = Options::default();
1926 /// opts.set_max_bytes_for_level_base(512 * 1024 * 1024);
1927 /// ```
1928 pub fn set_max_bytes_for_level_base(&mut self, size: u64) {
1929 unsafe {
1930 ffi::rocksdb_options_set_max_bytes_for_level_base(self.inner, size);
1931 }
1932 }
1933
1934 /// Default: `10`
1935 ///
1936 /// # Examples
1937 ///
1938 /// ```
1939 /// use rocksdb::Options;
1940 ///
1941 /// let mut opts = Options::default();
1942 /// opts.set_max_bytes_for_level_multiplier(4.0);
1943 /// ```
1944 pub fn set_max_bytes_for_level_multiplier(&mut self, mul: f64) {
1945 unsafe {
1946 ffi::rocksdb_options_set_max_bytes_for_level_multiplier(self.inner, mul);
1947 }
1948 }
1949
1950 /// The manifest file is rolled over on reaching this limit.
1951 /// The older manifest file be deleted.
1952 /// The default value is MAX_INT so that roll-over does not take place.
1953 ///
1954 /// # Examples
1955 ///
1956 /// ```
1957 /// use rocksdb::Options;
1958 ///
1959 /// let mut opts = Options::default();
1960 /// opts.set_max_manifest_file_size(20 * 1024 * 1024);
1961 /// ```
1962 pub fn set_max_manifest_file_size(&mut self, size: usize) {
1963 unsafe {
1964 ffi::rocksdb_options_set_max_manifest_file_size(self.inner, size);
1965 }
1966 }
1967
1968 /// Sets the target file size for compaction.
1969 /// target_file_size_base is per-file size for level-1.
1970 /// Target file size for level L can be calculated by
1971 /// target_file_size_base * (target_file_size_multiplier ^ (L-1))
1972 /// For example, if target_file_size_base is 2MB and
1973 /// target_file_size_multiplier is 10, then each file on level-1 will
1974 /// be 2MB, and each file on level 2 will be 20MB,
1975 /// and each file on level-3 will be 200MB.
1976 ///
1977 /// Default: `0x4000000` (64MiB)
1978 ///
1979 /// Dynamically changeable through SetOptions() API
1980 ///
1981 /// # Examples
1982 ///
1983 /// ```
1984 /// use rocksdb::Options;
1985 ///
1986 /// let mut opts = Options::default();
1987 /// opts.set_target_file_size_base(128 * 1024 * 1024);
1988 /// ```
1989 pub fn set_target_file_size_base(&mut self, size: u64) {
1990 unsafe {
1991 ffi::rocksdb_options_set_target_file_size_base(self.inner, size);
1992 }
1993 }
1994
1995 /// Sets the minimum number of write buffers that will be merged together
1996 /// before writing to storage. If set to `1`, then
1997 /// all write buffers are flushed to L0 as individual files and this increases
1998 /// read amplification because a get request has to check in all of these
1999 /// files. Also, an in-memory merge may result in writing lesser
2000 /// data to storage if there are duplicate records in each of these
2001 /// individual write buffers.
2002 ///
2003 /// Default: `1`
2004 ///
2005 /// # Examples
2006 ///
2007 /// ```
2008 /// use rocksdb::Options;
2009 ///
2010 /// let mut opts = Options::default();
2011 /// opts.set_min_write_buffer_number_to_merge(2);
2012 /// ```
2013 pub fn set_min_write_buffer_number_to_merge(&mut self, to_merge: c_int) {
2014 unsafe {
2015 ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, to_merge);
2016 }
2017 }
2018
2019 /// Sets the number of files to trigger level-0 compaction. A value < `0` means that
2020 /// level-0 compaction will not be triggered by number of files at all.
2021 ///
2022 /// Default: `4`
2023 ///
2024 /// Dynamically changeable through SetOptions() API
2025 ///
2026 /// # Examples
2027 ///
2028 /// ```
2029 /// use rocksdb::Options;
2030 ///
2031 /// let mut opts = Options::default();
2032 /// opts.set_level_zero_file_num_compaction_trigger(8);
2033 /// ```
2034 pub fn set_level_zero_file_num_compaction_trigger(&mut self, n: c_int) {
2035 unsafe {
2036 ffi::rocksdb_options_set_level0_file_num_compaction_trigger(self.inner, n);
2037 }
2038 }
2039
2040 /// Sets the soft limit on number of level-0 files. We start slowing down writes at this
2041 /// point. A value < `0` means that no writing slow down will be triggered by
2042 /// number of files in level-0.
2043 ///
2044 /// Default: `20`
2045 ///
2046 /// Dynamically changeable through SetOptions() API
2047 ///
2048 /// # Examples
2049 ///
2050 /// ```
2051 /// use rocksdb::Options;
2052 ///
2053 /// let mut opts = Options::default();
2054 /// opts.set_level_zero_slowdown_writes_trigger(10);
2055 /// ```
2056 pub fn set_level_zero_slowdown_writes_trigger(&mut self, n: c_int) {
2057 unsafe {
2058 ffi::rocksdb_options_set_level0_slowdown_writes_trigger(self.inner, n);
2059 }
2060 }
2061
2062 /// Sets the maximum number of level-0 files. We stop writes at this point.
2063 ///
2064 /// Default: `24`
2065 ///
2066 /// Dynamically changeable through SetOptions() API
2067 ///
2068 /// # Examples
2069 ///
2070 /// ```
2071 /// use rocksdb::Options;
2072 ///
2073 /// let mut opts = Options::default();
2074 /// opts.set_level_zero_stop_writes_trigger(48);
2075 /// ```
2076 pub fn set_level_zero_stop_writes_trigger(&mut self, n: c_int) {
2077 unsafe {
2078 ffi::rocksdb_options_set_level0_stop_writes_trigger(self.inner, n);
2079 }
2080 }
2081
2082 /// Sets the compaction style.
2083 ///
2084 /// Default: DBCompactionStyle::Level
2085 ///
2086 /// # Examples
2087 ///
2088 /// ```
2089 /// use rocksdb::{Options, DBCompactionStyle};
2090 ///
2091 /// let mut opts = Options::default();
2092 /// opts.set_compaction_style(DBCompactionStyle::Universal);
2093 /// ```
2094 pub fn set_compaction_style(&mut self, style: DBCompactionStyle) {
2095 unsafe {
2096 ffi::rocksdb_options_set_compaction_style(self.inner, style as c_int);
2097 }
2098 }
2099
2100 /// Sets the options needed to support Universal Style compactions.
2101 pub fn set_universal_compaction_options(&mut self, uco: &UniversalCompactOptions) {
2102 unsafe {
2103 ffi::rocksdb_options_set_universal_compaction_options(self.inner, uco.inner);
2104 }
2105 }
2106
2107 /// Sets the options for FIFO compaction style.
2108 pub fn set_fifo_compaction_options(&mut self, fco: &FifoCompactOptions) {
2109 unsafe {
2110 ffi::rocksdb_options_set_fifo_compaction_options(self.inner, fco.inner);
2111 }
2112 }
2113
2114 /// Sets unordered_write to true trades higher write throughput with
2115 /// relaxing the immutability guarantee of snapshots. This violates the
2116 /// repeatability one expects from ::Get from a snapshot, as well as
2117 /// ::MultiGet and Iterator's consistent-point-in-time view property.
2118 /// If the application cannot tolerate the relaxed guarantees, it can implement
2119 /// its own mechanisms to work around that and yet benefit from the higher
2120 /// throughput. Using TransactionDB with WRITE_PREPARED write policy and
2121 /// two_write_queues=true is one way to achieve immutable snapshots despite
2122 /// unordered_write.
2123 ///
2124 /// By default, i.e., when it is false, rocksdb does not advance the sequence
2125 /// number for new snapshots unless all the writes with lower sequence numbers
2126 /// are already finished. This provides the immutability that we except from
2127 /// snapshots. Moreover, since Iterator and MultiGet internally depend on
2128 /// snapshots, the snapshot immutability results into Iterator and MultiGet
2129 /// offering consistent-point-in-time view. If set to true, although
2130 /// Read-Your-Own-Write property is still provided, the snapshot immutability
2131 /// property is relaxed: the writes issued after the snapshot is obtained (with
2132 /// larger sequence numbers) will be still not visible to the reads from that
2133 /// snapshot, however, there still might be pending writes (with lower sequence
2134 /// number) that will change the state visible to the snapshot after they are
2135 /// landed to the memtable.
2136 ///
2137 /// Default: false
2138 pub fn set_unordered_write(&mut self, unordered: bool) {
2139 unsafe {
2140 ffi::rocksdb_options_set_unordered_write(self.inner, c_uchar::from(unordered));
2141 }
2142 }
2143
2144 /// Sets maximum number of threads that will
2145 /// concurrently perform a compaction job by breaking it into multiple,
2146 /// smaller ones that are run simultaneously.
2147 ///
2148 /// Default: 1 (i.e. no subcompactions)
2149 pub fn set_max_subcompactions(&mut self, num: u32) {
2150 unsafe {
2151 ffi::rocksdb_options_set_max_subcompactions(self.inner, num);
2152 }
2153 }
2154
2155 /// Sets maximum number of concurrent background jobs
2156 /// (compactions and flushes).
2157 ///
2158 /// Default: 2
2159 ///
2160 /// Dynamically changeable through SetDBOptions() API.
2161 pub fn set_max_background_jobs(&mut self, jobs: c_int) {
2162 unsafe {
2163 ffi::rocksdb_options_set_max_background_jobs(self.inner, jobs);
2164 }
2165 }
2166
2167 /// Sets the maximum number of concurrent background compaction jobs, submitted to
2168 /// the default LOW priority thread pool.
2169 /// We first try to schedule compactions based on
2170 /// `base_background_compactions`. If the compaction cannot catch up , we
2171 /// will increase number of compaction threads up to
2172 /// `max_background_compactions`.
2173 ///
2174 /// If you're increasing this, also consider increasing number of threads in
2175 /// LOW priority thread pool. For more information, see
2176 /// Env::SetBackgroundThreads
2177 ///
2178 /// Default: `1`
2179 ///
2180 /// # Examples
2181 ///
2182 /// ```
2183 /// use rocksdb::Options;
2184 ///
2185 /// let mut opts = Options::default();
2186 /// #[allow(deprecated)]
2187 /// opts.set_max_background_compactions(2);
2188 /// ```
2189 #[deprecated(
2190 since = "0.15.0",
2191 note = "RocksDB automatically decides this based on the value of max_background_jobs"
2192 )]
2193 pub fn set_max_background_compactions(&mut self, n: c_int) {
2194 unsafe {
2195 ffi::rocksdb_options_set_max_background_compactions(self.inner, n);
2196 }
2197 }
2198
2199 /// Sets the maximum number of concurrent background memtable flush jobs, submitted to
2200 /// the HIGH priority thread pool.
2201 ///
2202 /// By default, all background jobs (major compaction and memtable flush) go
2203 /// to the LOW priority pool. If this option is set to a positive number,
2204 /// memtable flush jobs will be submitted to the HIGH priority pool.
2205 /// It is important when the same Env is shared by multiple db instances.
2206 /// Without a separate pool, long running major compaction jobs could
2207 /// potentially block memtable flush jobs of other db instances, leading to
2208 /// unnecessary Put stalls.
2209 ///
2210 /// If you're increasing this, also consider increasing number of threads in
2211 /// HIGH priority thread pool. For more information, see
2212 /// Env::SetBackgroundThreads
2213 ///
2214 /// Default: `1`
2215 ///
2216 /// # Examples
2217 ///
2218 /// ```
2219 /// use rocksdb::Options;
2220 ///
2221 /// let mut opts = Options::default();
2222 /// #[allow(deprecated)]
2223 /// opts.set_max_background_flushes(2);
2224 /// ```
2225 #[deprecated(
2226 since = "0.15.0",
2227 note = "RocksDB automatically decides this based on the value of max_background_jobs"
2228 )]
2229 pub fn set_max_background_flushes(&mut self, n: c_int) {
2230 unsafe {
2231 ffi::rocksdb_options_set_max_background_flushes(self.inner, n);
2232 }
2233 }
2234
2235 /// Disables automatic compactions. Manual compactions can still
2236 /// be issued on this column family
2237 ///
2238 /// Default: `false`
2239 ///
2240 /// Dynamically changeable through SetOptions() API
2241 ///
2242 /// # Examples
2243 ///
2244 /// ```
2245 /// use rocksdb::Options;
2246 ///
2247 /// let mut opts = Options::default();
2248 /// opts.set_disable_auto_compactions(true);
2249 /// ```
2250 pub fn set_disable_auto_compactions(&mut self, disable: bool) {
2251 unsafe {
2252 ffi::rocksdb_options_set_disable_auto_compactions(self.inner, c_int::from(disable));
2253 }
2254 }
2255
2256 /// SetMemtableHugePageSize sets the page size for huge page for
2257 /// arena used by the memtable.
2258 /// If <=0, it won't allocate from huge page but from malloc.
2259 /// Users are responsible to reserve huge pages for it to be allocated. For
2260 /// example:
2261 /// sysctl -w vm.nr_hugepages=20
2262 /// See linux doc Documentation/vm/hugetlbpage.txt
2263 /// If there isn't enough free huge page available, it will fall back to
2264 /// malloc.
2265 ///
2266 /// Dynamically changeable through SetOptions() API
2267 pub fn set_memtable_huge_page_size(&mut self, size: size_t) {
2268 unsafe {
2269 ffi::rocksdb_options_set_memtable_huge_page_size(self.inner, size);
2270 }
2271 }
2272
2273 /// Sets the maximum number of successive merge operations on a key in the memtable.
2274 ///
2275 /// When a merge operation is added to the memtable and the maximum number of
2276 /// successive merges is reached, the value of the key will be calculated and
2277 /// inserted into the memtable instead of the merge operation. This will
2278 /// ensure that there are never more than max_successive_merges merge
2279 /// operations in the memtable.
2280 ///
2281 /// Default: 0 (disabled)
2282 pub fn set_max_successive_merges(&mut self, num: usize) {
2283 unsafe {
2284 ffi::rocksdb_options_set_max_successive_merges(self.inner, num);
2285 }
2286 }
2287
2288 /// Control locality of bloom filter probes to improve cache miss rate.
2289 /// This option only applies to memtable prefix bloom and plaintable
2290 /// prefix bloom. It essentially limits the max number of cache lines each
2291 /// bloom filter check can touch.
2292 ///
2293 /// This optimization is turned off when set to 0. The number should never
2294 /// be greater than number of probes. This option can boost performance
2295 /// for in-memory workload but should use with care since it can cause
2296 /// higher false positive rate.
2297 ///
2298 /// Default: 0
2299 pub fn set_bloom_locality(&mut self, v: u32) {
2300 unsafe {
2301 ffi::rocksdb_options_set_bloom_locality(self.inner, v);
2302 }
2303 }
2304
2305 /// Enable/disable thread-safe inplace updates.
2306 ///
2307 /// Requires updates if
2308 /// * key exists in current memtable
2309 /// * new sizeof(new_value) <= sizeof(old_value)
2310 /// * old_value for that key is a put i.e. kTypeValue
2311 ///
2312 /// Default: false.
2313 pub fn set_inplace_update_support(&mut self, enabled: bool) {
2314 unsafe {
2315 ffi::rocksdb_options_set_inplace_update_support(self.inner, c_uchar::from(enabled));
2316 }
2317 }
2318
2319 /// Sets the number of locks used for inplace update.
2320 ///
2321 /// Default: 10000 when inplace_update_support = true, otherwise 0.
2322 pub fn set_inplace_update_locks(&mut self, num: usize) {
2323 unsafe {
2324 ffi::rocksdb_options_set_inplace_update_num_locks(self.inner, num);
2325 }
2326 }
2327
2328 /// Different max-size multipliers for different levels.
2329 /// These are multiplied by max_bytes_for_level_multiplier to arrive
2330 /// at the max-size of each level.
2331 ///
2332 /// Default: 1
2333 ///
2334 /// Dynamically changeable through SetOptions() API
2335 pub fn set_max_bytes_for_level_multiplier_additional(&mut self, level_values: &[i32]) {
2336 let count = level_values.len();
2337 unsafe {
2338 ffi::rocksdb_options_set_max_bytes_for_level_multiplier_additional(
2339 self.inner,
2340 level_values.as_ptr() as *mut c_int,
2341 count,
2342 );
2343 }
2344 }
2345
2346 /// If true, then DB::Open() will not fetch and check sizes of all sst files.
2347 /// This may significantly speed up startup if there are many sst files,
2348 /// especially when using non-default Env with expensive GetFileSize().
2349 /// We'll still check that all required sst files exist.
2350 /// If paranoid_checks is false, this option is ignored, and sst files are
2351 /// not checked at all.
2352 ///
2353 /// Default: false
2354 pub fn set_skip_checking_sst_file_sizes_on_db_open(&mut self, value: bool) {
2355 unsafe {
2356 ffi::rocksdb_options_set_skip_checking_sst_file_sizes_on_db_open(
2357 self.inner,
2358 c_uchar::from(value),
2359 );
2360 }
2361 }
2362
2363 /// The total maximum size(bytes) of write buffers to maintain in memory
2364 /// including copies of buffers that have already been flushed. This parameter
2365 /// only affects trimming of flushed buffers and does not affect flushing.
2366 /// This controls the maximum amount of write history that will be available
2367 /// in memory for conflict checking when Transactions are used. The actual
2368 /// size of write history (flushed Memtables) might be higher than this limit
2369 /// if further trimming will reduce write history total size below this
2370 /// limit. For example, if max_write_buffer_size_to_maintain is set to 64MB,
2371 /// and there are three flushed Memtables, with sizes of 32MB, 20MB, 20MB.
2372 /// Because trimming the next Memtable of size 20MB will reduce total memory
2373 /// usage to 52MB which is below the limit, RocksDB will stop trimming.
2374 ///
2375 /// When using an OptimisticTransactionDB:
2376 /// If this value is too low, some transactions may fail at commit time due
2377 /// to not being able to determine whether there were any write conflicts.
2378 ///
2379 /// When using a TransactionDB:
2380 /// If Transaction::SetSnapshot is used, TransactionDB will read either
2381 /// in-memory write buffers or SST files to do write-conflict checking.
2382 /// Increasing this value can reduce the number of reads to SST files
2383 /// done for conflict detection.
2384 ///
2385 /// Setting this value to 0 will cause write buffers to be freed immediately
2386 /// after they are flushed. If this value is set to -1,
2387 /// 'max_write_buffer_number * write_buffer_size' will be used.
2388 ///
2389 /// Default:
2390 /// If using a TransactionDB/OptimisticTransactionDB, the default value will
2391 /// be set to the value of 'max_write_buffer_number * write_buffer_size'
2392 /// if it is not explicitly set by the user. Otherwise, the default is 0.
2393 pub fn set_max_write_buffer_size_to_maintain(&mut self, size: i64) {
2394 unsafe {
2395 ffi::rocksdb_options_set_max_write_buffer_size_to_maintain(self.inner, size);
2396 }
2397 }
2398
2399 /// By default, a single write thread queue is maintained. The thread gets
2400 /// to the head of the queue becomes write batch group leader and responsible
2401 /// for writing to WAL and memtable for the batch group.
2402 ///
2403 /// If enable_pipelined_write is true, separate write thread queue is
2404 /// maintained for WAL write and memtable write. A write thread first enter WAL
2405 /// writer queue and then memtable writer queue. Pending thread on the WAL
2406 /// writer queue thus only have to wait for previous writers to finish their
2407 /// WAL writing but not the memtable writing. Enabling the feature may improve
2408 /// write throughput and reduce latency of the prepare phase of two-phase
2409 /// commit.
2410 ///
2411 /// Default: false
2412 pub fn set_enable_pipelined_write(&mut self, value: bool) {
2413 unsafe {
2414 ffi::rocksdb_options_set_enable_pipelined_write(self.inner, c_uchar::from(value));
2415 }
2416 }
2417
2418 /// Defines the underlying memtable implementation.
2419 /// See official [wiki](https://github.com/facebook/rocksdb/wiki/MemTable) for more information.
2420 /// Defaults to using a skiplist.
2421 ///
2422 /// # Examples
2423 ///
2424 /// ```
2425 /// use rocksdb::{Options, MemtableFactory};
2426 /// let mut opts = Options::default();
2427 /// let factory = MemtableFactory::HashSkipList {
2428 /// bucket_count: 1_000_000,
2429 /// height: 4,
2430 /// branching_factor: 4,
2431 /// };
2432 ///
2433 /// opts.set_allow_concurrent_memtable_write(false);
2434 /// opts.set_memtable_factory(factory);
2435 /// ```
2436 pub fn set_memtable_factory(&mut self, factory: MemtableFactory) {
2437 match factory {
2438 MemtableFactory::Vector => unsafe {
2439 ffi::rocksdb_options_set_memtable_vector_rep(self.inner);
2440 },
2441 MemtableFactory::HashSkipList {
2442 bucket_count,
2443 height,
2444 branching_factor,
2445 } => unsafe {
2446 ffi::rocksdb_options_set_hash_skip_list_rep(
2447 self.inner,
2448 bucket_count,
2449 height,
2450 branching_factor,
2451 );
2452 },
2453 MemtableFactory::HashLinkList { bucket_count } => unsafe {
2454 ffi::rocksdb_options_set_hash_link_list_rep(self.inner, bucket_count);
2455 },
2456 };
2457 }
2458
2459 pub fn set_block_based_table_factory(&mut self, factory: &BlockBasedOptions) {
2460 unsafe {
2461 ffi::rocksdb_options_set_block_based_table_factory(self.inner, factory.inner);
2462 }
2463 self.outlive.block_based = Some(factory.outlive.clone());
2464 }
2465
2466 /// Sets the table factory to a CuckooTableFactory (the default table
2467 /// factory is a block-based table factory that provides a default
2468 /// implementation of TableBuilder and TableReader with default
2469 /// BlockBasedTableOptions).
2470 /// See official [wiki](https://github.com/facebook/rocksdb/wiki/CuckooTable-Format) for more information on this table format.
2471 /// # Examples
2472 ///
2473 /// ```
2474 /// use rocksdb::{Options, CuckooTableOptions};
2475 ///
2476 /// let mut opts = Options::default();
2477 /// let mut factory_opts = CuckooTableOptions::default();
2478 /// factory_opts.set_hash_ratio(0.8);
2479 /// factory_opts.set_max_search_depth(20);
2480 /// factory_opts.set_cuckoo_block_size(10);
2481 /// factory_opts.set_identity_as_first_hash(true);
2482 /// factory_opts.set_use_module_hash(false);
2483 ///
2484 /// opts.set_cuckoo_table_factory(&factory_opts);
2485 /// ```
2486 pub fn set_cuckoo_table_factory(&mut self, factory: &CuckooTableOptions) {
2487 unsafe {
2488 ffi::rocksdb_options_set_cuckoo_table_factory(self.inner, factory.inner);
2489 }
2490 }
2491
2492 // This is a factory that provides TableFactory objects.
2493 // Default: a block-based table factory that provides a default
2494 // implementation of TableBuilder and TableReader with default
2495 // BlockBasedTableOptions.
2496 /// Sets the factory as plain table.
2497 /// See official [wiki](https://github.com/facebook/rocksdb/wiki/PlainTable-Format) for more
2498 /// information.
2499 ///
2500 /// # Examples
2501 ///
2502 /// ```
2503 /// use rocksdb::{Options, PlainTableFactoryOptions};
2504 ///
2505 /// let mut opts = Options::default();
2506 /// let factory_opts = PlainTableFactoryOptions {
2507 /// user_key_length: 0,
2508 /// bloom_bits_per_key: 20,
2509 /// hash_table_ratio: 0.75,
2510 /// index_sparseness: 16,
2511 /// };
2512 ///
2513 /// opts.set_plain_table_factory(&factory_opts);
2514 /// ```
2515 pub fn set_plain_table_factory(&mut self, options: &PlainTableFactoryOptions) {
2516 unsafe {
2517 ffi::rocksdb_options_set_plain_table_factory(
2518 self.inner,
2519 options.user_key_length,
2520 options.bloom_bits_per_key,
2521 options.hash_table_ratio,
2522 options.index_sparseness,
2523 );
2524 }
2525 }
2526
2527 /// Sets the start level to use compression.
2528 pub fn set_min_level_to_compress(&mut self, lvl: c_int) {
2529 unsafe {
2530 ffi::rocksdb_options_set_min_level_to_compress(self.inner, lvl);
2531 }
2532 }
2533
2534 /// Measure IO stats in compactions and flushes, if `true`.
2535 ///
2536 /// Default: `false`
2537 ///
2538 /// # Examples
2539 ///
2540 /// ```
2541 /// use rocksdb::Options;
2542 ///
2543 /// let mut opts = Options::default();
2544 /// opts.set_report_bg_io_stats(true);
2545 /// ```
2546 pub fn set_report_bg_io_stats(&mut self, enable: bool) {
2547 unsafe {
2548 ffi::rocksdb_options_set_report_bg_io_stats(self.inner, c_int::from(enable));
2549 }
2550 }
2551
2552 /// Once write-ahead logs exceed this size, we will start forcing the flush of
2553 /// column families whose memtables are backed by the oldest live WAL file
2554 /// (i.e. the ones that are causing all the space amplification).
2555 ///
2556 /// Default: `0`
2557 ///
2558 /// # Examples
2559 ///
2560 /// ```
2561 /// use rocksdb::Options;
2562 ///
2563 /// let mut opts = Options::default();
2564 /// // Set max total wal size to 1G.
2565 /// opts.set_max_total_wal_size(1 << 30);
2566 /// ```
2567 pub fn set_max_total_wal_size(&mut self, size: u64) {
2568 unsafe {
2569 ffi::rocksdb_options_set_max_total_wal_size(self.inner, size);
2570 }
2571 }
2572
2573 /// Recovery mode to control the consistency while replaying WAL.
2574 ///
2575 /// Default: DBRecoveryMode::PointInTime
2576 ///
2577 /// # Examples
2578 ///
2579 /// ```
2580 /// use rocksdb::{Options, DBRecoveryMode};
2581 ///
2582 /// let mut opts = Options::default();
2583 /// opts.set_wal_recovery_mode(DBRecoveryMode::AbsoluteConsistency);
2584 /// ```
2585 pub fn set_wal_recovery_mode(&mut self, mode: DBRecoveryMode) {
2586 unsafe {
2587 ffi::rocksdb_options_set_wal_recovery_mode(self.inner, mode as c_int);
2588 }
2589 }
2590
2591 pub fn enable_statistics(&mut self) {
2592 unsafe {
2593 ffi::rocksdb_options_enable_statistics(self.inner);
2594 }
2595 }
2596
2597 pub fn get_statistics(&self) -> Option<String> {
2598 unsafe {
2599 let value = ffi::rocksdb_options_statistics_get_string(self.inner);
2600 if value.is_null() {
2601 return None;
2602 }
2603
2604 // Must have valid UTF-8 format.
2605 let s = CStr::from_ptr(value).to_str().unwrap().to_owned();
2606 libc::free(value as *mut c_void);
2607 Some(s)
2608 }
2609 }
2610
2611 /// If not zero, dump `rocksdb.stats` to LOG every `stats_dump_period_sec`.
2612 ///
2613 /// Default: `600` (10 mins)
2614 ///
2615 /// # Examples
2616 ///
2617 /// ```
2618 /// use rocksdb::Options;
2619 ///
2620 /// let mut opts = Options::default();
2621 /// opts.set_stats_dump_period_sec(300);
2622 /// ```
2623 pub fn set_stats_dump_period_sec(&mut self, period: c_uint) {
2624 unsafe {
2625 ffi::rocksdb_options_set_stats_dump_period_sec(self.inner, period);
2626 }
2627 }
2628
2629 /// If not zero, dump rocksdb.stats to RocksDB to LOG every `stats_persist_period_sec`.
2630 ///
2631 /// Default: `600` (10 mins)
2632 ///
2633 /// # Examples
2634 ///
2635 /// ```
2636 /// use rocksdb::Options;
2637 ///
2638 /// let mut opts = Options::default();
2639 /// opts.set_stats_persist_period_sec(5);
2640 /// ```
2641 pub fn set_stats_persist_period_sec(&mut self, period: c_uint) {
2642 unsafe {
2643 ffi::rocksdb_options_set_stats_persist_period_sec(self.inner, period);
2644 }
2645 }
2646
2647 /// When set to true, reading SST files will opt out of the filesystem's
2648 /// readahead. Setting this to false may improve sequential iteration
2649 /// performance.
2650 ///
2651 /// Default: `true`
2652 pub fn set_advise_random_on_open(&mut self, advise: bool) {
2653 unsafe {
2654 ffi::rocksdb_options_set_advise_random_on_open(self.inner, c_uchar::from(advise));
2655 }
2656 }
2657
2658 /// Specifies the file access pattern once a compaction is started.
2659 ///
2660 /// It will be applied to all input files of a compaction.
2661 ///
2662 /// Default: Normal
2663 pub fn set_access_hint_on_compaction_start(&mut self, pattern: AccessHint) {
2664 unsafe {
2665 ffi::rocksdb_options_set_access_hint_on_compaction_start(self.inner, pattern as c_int);
2666 }
2667 }
2668
2669 /// Enable/disable adaptive mutex, which spins in the user space before resorting to kernel.
2670 ///
2671 /// This could reduce context switch when the mutex is not
2672 /// heavily contended. However, if the mutex is hot, we could end up
2673 /// wasting spin time.
2674 ///
2675 /// Default: false
2676 pub fn set_use_adaptive_mutex(&mut self, enabled: bool) {
2677 unsafe {
2678 ffi::rocksdb_options_set_use_adaptive_mutex(self.inner, c_uchar::from(enabled));
2679 }
2680 }
2681
2682 /// Sets the number of levels for this database.
2683 pub fn set_num_levels(&mut self, n: c_int) {
2684 unsafe {
2685 ffi::rocksdb_options_set_num_levels(self.inner, n);
2686 }
2687 }
2688
2689 /// When a `prefix_extractor` is defined through `opts.set_prefix_extractor` this
2690 /// creates a prefix bloom filter for each memtable with the size of
2691 /// `write_buffer_size * memtable_prefix_bloom_ratio` (capped at 0.25).
2692 ///
2693 /// Default: `0`
2694 ///
2695 /// # Examples
2696 ///
2697 /// ```
2698 /// use rocksdb::{Options, SliceTransform};
2699 ///
2700 /// let mut opts = Options::default();
2701 /// let transform = SliceTransform::create_fixed_prefix(10);
2702 /// opts.set_prefix_extractor(transform);
2703 /// opts.set_memtable_prefix_bloom_ratio(0.2);
2704 /// ```
2705 pub fn set_memtable_prefix_bloom_ratio(&mut self, ratio: f64) {
2706 unsafe {
2707 ffi::rocksdb_options_set_memtable_prefix_bloom_size_ratio(self.inner, ratio);
2708 }
2709 }
2710
2711 /// Sets the maximum number of bytes in all compacted files.
2712 /// We try to limit number of bytes in one compaction to be lower than this
2713 /// threshold. But it's not guaranteed.
2714 ///
2715 /// Value 0 will be sanitized.
2716 ///
2717 /// Default: target_file_size_base * 25
2718 pub fn set_max_compaction_bytes(&mut self, nbytes: u64) {
2719 unsafe {
2720 ffi::rocksdb_options_set_max_compaction_bytes(self.inner, nbytes);
2721 }
2722 }
2723
2724 /// Specifies the absolute path of the directory the
2725 /// write-ahead log (WAL) should be written to.
2726 ///
2727 /// Default: same directory as the database
2728 ///
2729 /// # Examples
2730 ///
2731 /// ```
2732 /// use rocksdb::Options;
2733 ///
2734 /// let mut opts = Options::default();
2735 /// opts.set_wal_dir("/path/to/dir");
2736 /// ```
2737 pub fn set_wal_dir<P: AsRef<Path>>(&mut self, path: P) {
2738 let p = to_cpath(path).unwrap();
2739 unsafe {
2740 ffi::rocksdb_options_set_wal_dir(self.inner, p.as_ptr());
2741 }
2742 }
2743
2744 /// Sets the WAL ttl in seconds.
2745 ///
2746 /// The following two options affect how archived logs will be deleted.
2747 /// 1. If both set to 0, logs will be deleted asap and will not get into
2748 /// the archive.
2749 /// 2. If wal_ttl_seconds is 0 and wal_size_limit_mb is not 0,
2750 /// WAL files will be checked every 10 min and if total size is greater
2751 /// then wal_size_limit_mb, they will be deleted starting with the
2752 /// earliest until size_limit is met. All empty files will be deleted.
2753 /// 3. If wal_ttl_seconds is not 0 and wall_size_limit_mb is 0, then
2754 /// WAL files will be checked every wal_ttl_seconds / 2 and those that
2755 /// are older than wal_ttl_seconds will be deleted.
2756 /// 4. If both are not 0, WAL files will be checked every 10 min and both
2757 /// checks will be performed with ttl being first.
2758 ///
2759 /// Default: 0
2760 pub fn set_wal_ttl_seconds(&mut self, secs: u64) {
2761 unsafe {
2762 ffi::rocksdb_options_set_WAL_ttl_seconds(self.inner, secs);
2763 }
2764 }
2765
2766 /// Sets the WAL size limit in MB.
2767 ///
2768 /// If total size of WAL files is greater then wal_size_limit_mb,
2769 /// they will be deleted starting with the earliest until size_limit is met.
2770 ///
2771 /// Default: 0
2772 pub fn set_wal_size_limit_mb(&mut self, size: u64) {
2773 unsafe {
2774 ffi::rocksdb_options_set_WAL_size_limit_MB(self.inner, size);
2775 }
2776 }
2777
2778 /// Sets the number of bytes to preallocate (via fallocate) the manifest files.
2779 ///
2780 /// Default is 4MB, which is reasonable to reduce random IO
2781 /// as well as prevent overallocation for mounts that preallocate
2782 /// large amounts of data (such as xfs's allocsize option).
2783 pub fn set_manifest_preallocation_size(&mut self, size: usize) {
2784 unsafe {
2785 ffi::rocksdb_options_set_manifest_preallocation_size(self.inner, size);
2786 }
2787 }
2788
2789 /// If true, then DB::Open() will not update the statistics used to optimize
2790 /// compaction decision by loading table properties from many files.
2791 /// Turning off this feature will improve DBOpen time especially in disk environment.
2792 ///
2793 /// Default: false
2794 pub fn set_skip_stats_update_on_db_open(&mut self, skip: bool) {
2795 unsafe {
2796 ffi::rocksdb_options_set_skip_stats_update_on_db_open(self.inner, c_uchar::from(skip));
2797 }
2798 }
2799
2800 /// Specify the maximal number of info log files to be kept.
2801 ///
2802 /// Default: 1000
2803 ///
2804 /// # Examples
2805 ///
2806 /// ```
2807 /// use rocksdb::Options;
2808 ///
2809 /// let mut options = Options::default();
2810 /// options.set_keep_log_file_num(100);
2811 /// ```
2812 pub fn set_keep_log_file_num(&mut self, nfiles: usize) {
2813 unsafe {
2814 ffi::rocksdb_options_set_keep_log_file_num(self.inner, nfiles);
2815 }
2816 }
2817
2818 /// Allow the OS to mmap file for writing.
2819 ///
2820 /// Default: false
2821 ///
2822 /// # Examples
2823 ///
2824 /// ```
2825 /// use rocksdb::Options;
2826 ///
2827 /// let mut options = Options::default();
2828 /// options.set_allow_mmap_writes(true);
2829 /// ```
2830 pub fn set_allow_mmap_writes(&mut self, is_enabled: bool) {
2831 unsafe {
2832 ffi::rocksdb_options_set_allow_mmap_writes(self.inner, c_uchar::from(is_enabled));
2833 }
2834 }
2835
2836 /// Allow the OS to mmap file for reading sst tables.
2837 ///
2838 /// Default: false
2839 ///
2840 /// # Examples
2841 ///
2842 /// ```
2843 /// use rocksdb::Options;
2844 ///
2845 /// let mut options = Options::default();
2846 /// options.set_allow_mmap_reads(true);
2847 /// ```
2848 pub fn set_allow_mmap_reads(&mut self, is_enabled: bool) {
2849 unsafe {
2850 ffi::rocksdb_options_set_allow_mmap_reads(self.inner, c_uchar::from(is_enabled));
2851 }
2852 }
2853
2854 /// If enabled, WAL is not flushed automatically after each write. Instead it
2855 /// relies on manual invocation of `DB::flush_wal()` to write the WAL buffer
2856 /// to its file.
2857 ///
2858 /// Default: false
2859 ///
2860 /// # Examples
2861 ///
2862 /// ```
2863 /// use rocksdb::Options;
2864 ///
2865 /// let mut options = Options::default();
2866 /// options.set_manual_wal_flush(true);
2867 /// ```
2868 pub fn set_manual_wal_flush(&mut self, is_enabled: bool) {
2869 unsafe {
2870 ffi::rocksdb_options_set_manual_wal_flush(self.inner, c_uchar::from(is_enabled));
2871 }
2872 }
2873
2874 /// Guarantee that all column families are flushed together atomically.
2875 /// This option applies to both manual flushes (`db.flush()`) and automatic
2876 /// background flushes caused when memtables are filled.
2877 ///
2878 /// Note that this is only useful when the WAL is disabled. When using the
2879 /// WAL, writes are always consistent across column families.
2880 ///
2881 /// Default: false
2882 ///
2883 /// # Examples
2884 ///
2885 /// ```
2886 /// use rocksdb::Options;
2887 ///
2888 /// let mut options = Options::default();
2889 /// options.set_atomic_flush(true);
2890 /// ```
2891 pub fn set_atomic_flush(&mut self, atomic_flush: bool) {
2892 unsafe {
2893 ffi::rocksdb_options_set_atomic_flush(self.inner, c_uchar::from(atomic_flush));
2894 }
2895 }
2896
2897 /// Sets global cache for table-level rows. Cache must outlive DB instance which uses it.
2898 ///
2899 /// Default: null (disabled)
2900 /// Not supported in ROCKSDB_LITE mode!
2901 pub fn set_row_cache(&mut self, cache: &Cache) {
2902 unsafe {
2903 ffi::rocksdb_options_set_row_cache(self.inner, cache.0.inner.as_ptr());
2904 }
2905 self.outlive.row_cache = Some(cache.clone());
2906 }
2907
2908 /// Use to control write rate of flush and compaction. Flush has higher
2909 /// priority than compaction.
2910 /// If rate limiter is enabled, bytes_per_sync is set to 1MB by default.
2911 ///
2912 /// Default: disable
2913 ///
2914 /// # Examples
2915 ///
2916 /// ```
2917 /// use rocksdb::Options;
2918 ///
2919 /// let mut options = Options::default();
2920 /// options.set_ratelimiter(1024 * 1024, 100 * 1000, 10);
2921 /// ```
2922 pub fn set_ratelimiter(
2923 &mut self,
2924 rate_bytes_per_sec: i64,
2925 refill_period_us: i64,
2926 fairness: i32,
2927 ) {
2928 unsafe {
2929 let ratelimiter =
2930 ffi::rocksdb_ratelimiter_create(rate_bytes_per_sec, refill_period_us, fairness);
2931 // Since limiter is wrapped in shared_ptr, we don't need to
2932 // call rocksdb_ratelimiter_destroy explicitly.
2933 ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
2934 }
2935 }
2936
2937 pub fn set_ratelimiter_by_self(&mut self, ratelimiter: &RateLimter) {
2938 unsafe {
2939 // Since limiter is wrapped in shared_ptr, we don't need to
2940 // call rocksdb_ratelimiter_destroy explicitly.
2941 ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter.0.inner.as_ptr());
2942 }
2943 }
2944
2945 pub fn set_sstfilemanager(&mut self, sstfilemanager: &SstFileManager) {
2946 unsafe {
2947 ffi::rocksdb_options_set_sstfilemanager(self.inner, sstfilemanager.0.inner.as_ptr());
2948 }
2949 }
2950
2951 /// Sets the maximal size of the info log file.
2952 ///
2953 /// If the log file is larger than `max_log_file_size`, a new info log file
2954 /// will be created. If `max_log_file_size` is equal to zero, all logs will
2955 /// be written to one log file.
2956 ///
2957 /// Default: 0
2958 ///
2959 /// # Examples
2960 ///
2961 /// ```
2962 /// use rocksdb::Options;
2963 ///
2964 /// let mut options = Options::default();
2965 /// options.set_max_log_file_size(0);
2966 /// ```
2967 pub fn set_max_log_file_size(&mut self, size: usize) {
2968 unsafe {
2969 ffi::rocksdb_options_set_max_log_file_size(self.inner, size);
2970 }
2971 }
2972
2973 /// Sets the time for the info log file to roll (in seconds).
2974 ///
2975 /// If specified with non-zero value, log file will be rolled
2976 /// if it has been active longer than `log_file_time_to_roll`.
2977 /// Default: 0 (disabled)
2978 pub fn set_log_file_time_to_roll(&mut self, secs: usize) {
2979 unsafe {
2980 ffi::rocksdb_options_set_log_file_time_to_roll(self.inner, secs);
2981 }
2982 }
2983
2984 /// Controls the recycling of log files.
2985 ///
2986 /// If non-zero, previously written log files will be reused for new logs,
2987 /// overwriting the old data. The value indicates how many such files we will
2988 /// keep around at any point in time for later use. This is more efficient
2989 /// because the blocks are already allocated and fdatasync does not need to
2990 /// update the inode after each write.
2991 ///
2992 /// Default: 0
2993 ///
2994 /// # Examples
2995 ///
2996 /// ```
2997 /// use rocksdb::Options;
2998 ///
2999 /// let mut options = Options::default();
3000 /// options.set_recycle_log_file_num(5);
3001 /// ```
3002 pub fn set_recycle_log_file_num(&mut self, num: usize) {
3003 unsafe {
3004 ffi::rocksdb_options_set_recycle_log_file_num(self.inner, num);
3005 }
3006 }
3007
3008 /// Sets the threshold at which all writes will be slowed down to at least delayed_write_rate if estimated
3009 /// bytes needed to be compaction exceed this threshold.
3010 ///
3011 /// Default: 64GB
3012 pub fn set_soft_pending_compaction_bytes_limit(&mut self, limit: usize) {
3013 unsafe {
3014 ffi::rocksdb_options_set_soft_pending_compaction_bytes_limit(self.inner, limit);
3015 }
3016 }
3017
3018 /// Sets the bytes threshold at which all writes are stopped if estimated bytes needed to be compaction exceed
3019 /// this threshold.
3020 ///
3021 /// Default: 256GB
3022 pub fn set_hard_pending_compaction_bytes_limit(&mut self, limit: usize) {
3023 unsafe {
3024 ffi::rocksdb_options_set_hard_pending_compaction_bytes_limit(self.inner, limit);
3025 }
3026 }
3027
3028 /// Sets the size of one block in arena memory allocation.
3029 ///
3030 /// If <= 0, a proper value is automatically calculated (usually 1/10 of
3031 /// writer_buffer_size).
3032 ///
3033 /// Default: 0
3034 pub fn set_arena_block_size(&mut self, size: usize) {
3035 unsafe {
3036 ffi::rocksdb_options_set_arena_block_size(self.inner, size);
3037 }
3038 }
3039
3040 /// If true, then print malloc stats together with rocksdb.stats when printing to LOG.
3041 ///
3042 /// Default: false
3043 pub fn set_dump_malloc_stats(&mut self, enabled: bool) {
3044 unsafe {
3045 ffi::rocksdb_options_set_dump_malloc_stats(self.inner, c_uchar::from(enabled));
3046 }
3047 }
3048
3049 /// Enable whole key bloom filter in memtable. Note this will only take effect
3050 /// if memtable_prefix_bloom_size_ratio is not 0. Enabling whole key filtering
3051 /// can potentially reduce CPU usage for point-look-ups.
3052 ///
3053 /// Default: false (disable)
3054 ///
3055 /// Dynamically changeable through SetOptions() API
3056 pub fn set_memtable_whole_key_filtering(&mut self, whole_key_filter: bool) {
3057 unsafe {
3058 ffi::rocksdb_options_set_memtable_whole_key_filtering(
3059 self.inner,
3060 c_uchar::from(whole_key_filter),
3061 );
3062 }
3063 }
3064
3065 /// Enable the use of key-value separation.
3066 ///
3067 /// More details can be found here: [Integrated BlobDB](http://rocksdb.org/blog/2021/05/26/integrated-blob-db.html).
3068 ///
3069 /// Default: false (disable)
3070 ///
3071 /// Dynamically changeable through SetOptions() API
3072 pub fn set_enable_blob_files(&mut self, val: bool) {
3073 unsafe {
3074 ffi::rocksdb_options_set_enable_blob_files(self.inner, u8::from(val));
3075 }
3076 }
3077
3078 /// Sets the minimum threshold value at or above which will be written
3079 /// to blob files during flush or compaction.
3080 ///
3081 /// Dynamically changeable through SetOptions() API
3082 pub fn set_min_blob_size(&mut self, val: u64) {
3083 unsafe {
3084 ffi::rocksdb_options_set_min_blob_size(self.inner, val);
3085 }
3086 }
3087
3088 /// Sets the size limit for blob files.
3089 ///
3090 /// Dynamically changeable through SetOptions() API
3091 pub fn set_blob_file_size(&mut self, val: u64) {
3092 unsafe {
3093 ffi::rocksdb_options_set_blob_file_size(self.inner, val);
3094 }
3095 }
3096
3097 /// Sets the blob compression type. All blob files use the same
3098 /// compression type.
3099 ///
3100 /// Dynamically changeable through SetOptions() API
3101 pub fn set_blob_compression_type(&mut self, val: DBCompressionType) {
3102 unsafe {
3103 ffi::rocksdb_options_set_blob_compression_type(self.inner, val as _);
3104 }
3105 }
3106
3107 /// If this is set to true RocksDB will actively relocate valid blobs from the oldest blob files
3108 /// as they are encountered during compaction.
3109 ///
3110 /// Dynamically changeable through SetOptions() API
3111 pub fn set_enable_blob_gc(&mut self, val: bool) {
3112 unsafe {
3113 ffi::rocksdb_options_set_enable_blob_gc(self.inner, u8::from(val));
3114 }
3115 }
3116
3117 /// Sets the threshold that the GC logic uses to determine which blob files should be considered “old.”
3118 ///
3119 /// For example, the default value of 0.25 signals to RocksDB that blobs residing in the
3120 /// oldest 25% of blob files should be relocated by GC. This parameter can be tuned to adjust
3121 /// the trade-off between write amplification and space amplification.
3122 ///
3123 /// Dynamically changeable through SetOptions() API
3124 pub fn set_blob_gc_age_cutoff(&mut self, val: c_double) {
3125 unsafe {
3126 ffi::rocksdb_options_set_blob_gc_age_cutoff(self.inner, val);
3127 }
3128 }
3129
3130 /// Sets the blob GC force threshold.
3131 ///
3132 /// Dynamically changeable through SetOptions() API
3133 pub fn set_blob_gc_force_threshold(&mut self, val: c_double) {
3134 unsafe {
3135 ffi::rocksdb_options_set_blob_gc_force_threshold(self.inner, val);
3136 }
3137 }
3138
3139 /// Sets the blob compaction read ahead size.
3140 ///
3141 /// Dynamically changeable through SetOptions() API
3142 pub fn set_blob_compaction_readahead_size(&mut self, val: u64) {
3143 unsafe {
3144 ffi::rocksdb_options_set_blob_compaction_readahead_size(self.inner, val);
3145 }
3146 }
3147}
3148
3149impl Default for Options {
3150 fn default() -> Self {
3151 unsafe {
3152 let opts = ffi::rocksdb_options_create();
3153 assert!(!opts.is_null(), "Could not create RocksDB options");
3154
3155 Self {
3156 inner: opts,
3157 outlive: OptionsMustOutliveDB::default(),
3158 }
3159 }
3160 }
3161}
3162
3163impl FlushOptions {
3164 pub fn new() -> FlushOptions {
3165 FlushOptions::default()
3166 }
3167
3168 /// Waits until the flush is done.
3169 ///
3170 /// Default: true
3171 ///
3172 /// # Examples
3173 ///
3174 /// ```
3175 /// use rocksdb::FlushOptions;
3176 ///
3177 /// let mut options = FlushOptions::default();
3178 /// options.set_wait(false);
3179 /// ```
3180 pub fn set_wait(&mut self, wait: bool) {
3181 unsafe {
3182 ffi::rocksdb_flushoptions_set_wait(self.inner, c_uchar::from(wait));
3183 }
3184 }
3185}
3186
3187impl Default for FlushOptions {
3188 fn default() -> Self {
3189 let flush_opts = unsafe { ffi::rocksdb_flushoptions_create() };
3190 assert!(
3191 !flush_opts.is_null(),
3192 "Could not create RocksDB flush options"
3193 );
3194
3195 Self { inner: flush_opts }
3196 }
3197}
3198
3199impl WriteOptions {
3200 pub fn new() -> WriteOptions {
3201 WriteOptions::default()
3202 }
3203
3204 /// Sets the sync mode. If true, the write will be flushed
3205 /// from the operating system buffer cache before the write is considered complete.
3206 /// If this flag is true, writes will be slower.
3207 ///
3208 /// Default: false
3209 pub fn set_sync(&mut self, sync: bool) {
3210 unsafe {
3211 ffi::rocksdb_writeoptions_set_sync(self.inner, c_uchar::from(sync));
3212 }
3213 }
3214
3215 /// Sets whether WAL should be active or not.
3216 /// If true, writes will not first go to the write ahead log,
3217 /// and the write may got lost after a crash.
3218 ///
3219 /// Default: false
3220 pub fn disable_wal(&mut self, disable: bool) {
3221 unsafe {
3222 ffi::rocksdb_writeoptions_disable_WAL(self.inner, c_int::from(disable));
3223 }
3224 }
3225
3226 /// If true and if user is trying to write to column families that don't exist (they were dropped),
3227 /// ignore the write (don't return an error). If there are multiple writes in a WriteBatch,
3228 /// other writes will succeed.
3229 ///
3230 /// Default: false
3231 pub fn set_ignore_missing_column_families(&mut self, ignore: bool) {
3232 unsafe {
3233 ffi::rocksdb_writeoptions_set_ignore_missing_column_families(
3234 self.inner,
3235 c_uchar::from(ignore),
3236 );
3237 }
3238 }
3239
3240 /// If true and we need to wait or sleep for the write request, fails
3241 /// immediately with Status::Incomplete().
3242 ///
3243 /// Default: false
3244 pub fn set_no_slowdown(&mut self, no_slowdown: bool) {
3245 unsafe {
3246 ffi::rocksdb_writeoptions_set_no_slowdown(self.inner, c_uchar::from(no_slowdown));
3247 }
3248 }
3249
3250 /// If true, this write request is of lower priority if compaction is
3251 /// behind. In this case, no_slowdown = true, the request will be cancelled
3252 /// immediately with Status::Incomplete() returned. Otherwise, it will be
3253 /// slowed down. The slowdown value is determined by RocksDB to guarantee
3254 /// it introduces minimum impacts to high priority writes.
3255 ///
3256 /// Default: false
3257 pub fn set_low_pri(&mut self, v: bool) {
3258 unsafe {
3259 ffi::rocksdb_writeoptions_set_low_pri(self.inner, c_uchar::from(v));
3260 }
3261 }
3262
3263 /// If true, writebatch will maintain the last insert positions of each
3264 /// memtable as hints in concurrent write. It can improve write performance
3265 /// in concurrent writes if keys in one writebatch are sequential. In
3266 /// non-concurrent writes (when concurrent_memtable_writes is false) this
3267 /// option will be ignored.
3268 ///
3269 /// Default: false
3270 pub fn set_memtable_insert_hint_per_batch(&mut self, v: bool) {
3271 unsafe {
3272 ffi::rocksdb_writeoptions_set_memtable_insert_hint_per_batch(
3273 self.inner,
3274 c_uchar::from(v),
3275 );
3276 }
3277 }
3278}
3279
3280impl Default for WriteOptions {
3281 fn default() -> Self {
3282 let write_opts = unsafe { ffi::rocksdb_writeoptions_create() };
3283 assert!(
3284 !write_opts.is_null(),
3285 "Could not create RocksDB write options"
3286 );
3287
3288 Self { inner: write_opts }
3289 }
3290}
3291
3292#[derive(Debug, Copy, Clone, PartialEq, Eq)]
3293#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
3294#[repr(i32)]
3295pub enum ReadTier {
3296 /// Reads data in memtable, block cache, OS cache or storage.
3297 All = 0,
3298 /// Reads data in memtable or block cache.
3299 BlockCache,
3300}
3301
3302impl ReadOptions {
3303 // TODO add snapshot setting here
3304 // TODO add snapshot wrapper structs with proper destructors;
3305 // that struct needs an "iterator" impl too.
3306
3307 /// Specify whether the "data block"/"index block"/"filter block"
3308 /// read for this iteration should be cached in memory?
3309 /// Callers may wish to set this field to false for bulk scans.
3310 ///
3311 /// Default: true
3312 pub fn fill_cache(&mut self, v: bool) {
3313 unsafe {
3314 ffi::rocksdb_readoptions_set_fill_cache(self.inner, c_uchar::from(v));
3315 }
3316 }
3317
3318 /// Sets the snapshot which should be used for the read.
3319 /// The snapshot must belong to the DB that is being read and must
3320 /// not have been released.
3321 pub fn set_snapshot<D: DBAccess>(&mut self, snapshot: &SnapshotWithThreadMode<D>) {
3322 unsafe {
3323 ffi::rocksdb_readoptions_set_snapshot(self.inner, snapshot.inner);
3324 }
3325 }
3326
3327 /// Sets the lower bound for an iterator.
3328 pub fn set_iterate_lower_bound<K: Into<Vec<u8>>>(&mut self, key: K) {
3329 self.set_lower_bound_impl(Some(key.into()));
3330 }
3331
3332 /// Sets the upper bound for an iterator.
3333 /// The upper bound itself is not included on the iteration result.
3334 pub fn set_iterate_upper_bound<K: Into<Vec<u8>>>(&mut self, key: K) {
3335 self.set_upper_bound_impl(Some(key.into()));
3336 }
3337
3338 /// Sets lower and upper bounds based on the provided range. This is
3339 /// similar to setting lower and upper bounds separately except that it also
3340 /// allows either bound to be reset.
3341 ///
3342 /// The argument can be a regular Rust range, e.g. `lower..upper`. However,
3343 /// since RocksDB upper bound is always excluded (i.e. range can never be
3344 /// fully closed) inclusive ranges (`lower..=upper` and `..=upper`) are not
3345 /// supported. For example:
3346 ///
3347 /// ```
3348 /// let mut options = rocksdb::ReadOptions::default();
3349 /// options.set_iterate_range("xy".as_bytes().."xz".as_bytes());
3350 /// ```
3351 ///
3352 /// In addition, [`crate::PrefixRange`] can be used to specify a range of
3353 /// keys with a given prefix. In particular, the above example is
3354 /// equivalent to:
3355 ///
3356 /// ```
3357 /// let mut options = rocksdb::ReadOptions::default();
3358 /// options.set_iterate_range(rocksdb::PrefixRange("xy".as_bytes()));
3359 /// ```
3360 ///
3361 /// Note that setting range using this method is separate to using prefix
3362 /// iterators. Prefix iterators use prefix extractor configured for
3363 /// a column family. Setting bounds via [`crate::PrefixRange`] is more akin
3364 /// to using manual prefix.
3365 ///
3366 /// Using this method clears any previously set bounds. In other words, the
3367 /// bounds can be reset by setting the range to `..` as in:
3368 ///
3369 /// ```
3370 /// let mut options = rocksdb::ReadOptions::default();
3371 /// options.set_iterate_range(..);
3372 /// ```
3373 pub fn set_iterate_range(&mut self, range: impl crate::IterateBounds) {
3374 let (lower, upper) = range.into_bounds();
3375 self.set_lower_bound_impl(lower);
3376 self.set_upper_bound_impl(upper);
3377 }
3378
3379 fn set_lower_bound_impl(&mut self, bound: Option<Vec<u8>>) {
3380 let (ptr, len) = if let Some(ref bound) = bound {
3381 (bound.as_ptr() as *const c_char, bound.len())
3382 } else if self.iterate_lower_bound.is_some() {
3383 (std::ptr::null(), 0)
3384 } else {
3385 return;
3386 };
3387 self.iterate_lower_bound = bound;
3388 unsafe {
3389 ffi::rocksdb_readoptions_set_iterate_lower_bound(self.inner, ptr, len);
3390 }
3391 }
3392
3393 fn set_upper_bound_impl(&mut self, bound: Option<Vec<u8>>) {
3394 let (ptr, len) = if let Some(ref bound) = bound {
3395 (bound.as_ptr() as *const c_char, bound.len())
3396 } else if self.iterate_upper_bound.is_some() {
3397 (std::ptr::null(), 0)
3398 } else {
3399 return;
3400 };
3401 self.iterate_upper_bound = bound;
3402 unsafe {
3403 ffi::rocksdb_readoptions_set_iterate_upper_bound(self.inner, ptr, len);
3404 }
3405 }
3406
3407 /// Specify if this read request should process data that ALREADY
3408 /// resides on a particular cache. If the required data is not
3409 /// found at the specified cache, then Status::Incomplete is returned.
3410 ///
3411 /// Default: ::All
3412 pub fn set_read_tier(&mut self, tier: ReadTier) {
3413 unsafe {
3414 ffi::rocksdb_readoptions_set_read_tier(self.inner, tier as c_int);
3415 }
3416 }
3417
3418 /// Enforce that the iterator only iterates over the same
3419 /// prefix as the seek.
3420 /// This option is effective only for prefix seeks, i.e. prefix_extractor is
3421 /// non-null for the column family and total_order_seek is false. Unlike
3422 /// iterate_upper_bound, prefix_same_as_start only works within a prefix
3423 /// but in both directions.
3424 ///
3425 /// Default: false
3426 pub fn set_prefix_same_as_start(&mut self, v: bool) {
3427 unsafe {
3428 ffi::rocksdb_readoptions_set_prefix_same_as_start(self.inner, c_uchar::from(v));
3429 }
3430 }
3431
3432 /// Enable a total order seek regardless of index format (e.g. hash index)
3433 /// used in the table. Some table format (e.g. plain table) may not support
3434 /// this option.
3435 ///
3436 /// If true when calling Get(), we also skip prefix bloom when reading from
3437 /// block based table. It provides a way to read existing data after
3438 /// changing implementation of prefix extractor.
3439 pub fn set_total_order_seek(&mut self, v: bool) {
3440 unsafe {
3441 ffi::rocksdb_readoptions_set_total_order_seek(self.inner, c_uchar::from(v));
3442 }
3443 }
3444
3445 /// Sets a threshold for the number of keys that can be skipped
3446 /// before failing an iterator seek as incomplete. The default value of 0 should be used to
3447 /// never fail a request as incomplete, even on skipping too many keys.
3448 ///
3449 /// Default: 0
3450 pub fn set_max_skippable_internal_keys(&mut self, num: u64) {
3451 unsafe {
3452 ffi::rocksdb_readoptions_set_max_skippable_internal_keys(self.inner, num);
3453 }
3454 }
3455
3456 /// If true, when PurgeObsoleteFile is called in CleanupIteratorState, we schedule a background job
3457 /// in the flush job queue and delete obsolete files in background.
3458 ///
3459 /// Default: false
3460 pub fn set_background_purge_on_iterator_cleanup(&mut self, v: bool) {
3461 unsafe {
3462 ffi::rocksdb_readoptions_set_background_purge_on_iterator_cleanup(
3463 self.inner,
3464 c_uchar::from(v),
3465 );
3466 }
3467 }
3468
3469 /// If true, keys deleted using the DeleteRange() API will be visible to
3470 /// readers until they are naturally deleted during compaction. This improves
3471 /// read performance in DBs with many range deletions.
3472 ///
3473 /// Default: false
3474 pub fn set_ignore_range_deletions(&mut self, v: bool) {
3475 unsafe {
3476 ffi::rocksdb_readoptions_set_ignore_range_deletions(self.inner, c_uchar::from(v));
3477 }
3478 }
3479
3480 /// If true, all data read from underlying storage will be
3481 /// verified against corresponding checksums.
3482 ///
3483 /// Default: true
3484 pub fn set_verify_checksums(&mut self, v: bool) {
3485 unsafe {
3486 ffi::rocksdb_readoptions_set_verify_checksums(self.inner, c_uchar::from(v));
3487 }
3488 }
3489
3490 /// If non-zero, an iterator will create a new table reader which
3491 /// performs reads of the given size. Using a large size (> 2MB) can
3492 /// improve the performance of forward iteration on spinning disks.
3493 /// Default: 0
3494 ///
3495 /// ```
3496 /// use rocksdb::{ReadOptions};
3497 ///
3498 /// let mut opts = ReadOptions::default();
3499 /// opts.set_readahead_size(4_194_304); // 4mb
3500 /// ```
3501 pub fn set_readahead_size(&mut self, v: usize) {
3502 unsafe {
3503 ffi::rocksdb_readoptions_set_readahead_size(self.inner, v as size_t);
3504 }
3505 }
3506
3507 /// If true, create a tailing iterator. Note that tailing iterators
3508 /// only support moving in the forward direction. Iterating in reverse
3509 /// or seek_to_last are not supported.
3510 pub fn set_tailing(&mut self, v: bool) {
3511 unsafe {
3512 ffi::rocksdb_readoptions_set_tailing(self.inner, c_uchar::from(v));
3513 }
3514 }
3515
3516 /// Specifies the value of "pin_data". If true, it keeps the blocks
3517 /// loaded by the iterator pinned in memory as long as the iterator is not deleted,
3518 /// If used when reading from tables created with
3519 /// BlockBasedTableOptions::use_delta_encoding = false,
3520 /// Iterator's property "rocksdb.iterator.is-key-pinned" is guaranteed to
3521 /// return 1.
3522 ///
3523 /// Default: false
3524 pub fn set_pin_data(&mut self, v: bool) {
3525 unsafe {
3526 ffi::rocksdb_readoptions_set_pin_data(self.inner, c_uchar::from(v));
3527 }
3528 }
3529
3530 /// Asynchronously prefetch some data.
3531 ///
3532 /// Used for sequential reads and internal automatic prefetching.
3533 ///
3534 /// Default: `false`
3535 pub fn set_async_io(&mut self, v: bool) {
3536 unsafe {
3537 ffi::rocksdb_readoptions_set_async_io(self.inner, c_uchar::from(v));
3538 }
3539 }
3540}
3541
3542impl Default for ReadOptions {
3543 fn default() -> Self {
3544 unsafe {
3545 Self {
3546 inner: ffi::rocksdb_readoptions_create(),
3547 iterate_upper_bound: None,
3548 iterate_lower_bound: None,
3549 }
3550 }
3551 }
3552}
3553
3554impl IngestExternalFileOptions {
3555 /// Can be set to true to move the files instead of copying them.
3556 pub fn set_move_files(&mut self, v: bool) {
3557 unsafe {
3558 ffi::rocksdb_ingestexternalfileoptions_set_move_files(self.inner, c_uchar::from(v));
3559 }
3560 }
3561
3562 /// If set to false, an ingested file keys could appear in existing snapshots
3563 /// that where created before the file was ingested.
3564 pub fn set_snapshot_consistency(&mut self, v: bool) {
3565 unsafe {
3566 ffi::rocksdb_ingestexternalfileoptions_set_snapshot_consistency(
3567 self.inner,
3568 c_uchar::from(v),
3569 );
3570 }
3571 }
3572
3573 /// If set to false, IngestExternalFile() will fail if the file key range
3574 /// overlaps with existing keys or tombstones in the DB.
3575 pub fn set_allow_global_seqno(&mut self, v: bool) {
3576 unsafe {
3577 ffi::rocksdb_ingestexternalfileoptions_set_allow_global_seqno(
3578 self.inner,
3579 c_uchar::from(v),
3580 );
3581 }
3582 }
3583
3584 /// If set to false and the file key range overlaps with the memtable key range
3585 /// (memtable flush required), IngestExternalFile will fail.
3586 pub fn set_allow_blocking_flush(&mut self, v: bool) {
3587 unsafe {
3588 ffi::rocksdb_ingestexternalfileoptions_set_allow_blocking_flush(
3589 self.inner,
3590 c_uchar::from(v),
3591 );
3592 }
3593 }
3594
3595 /// Set to true if you would like duplicate keys in the file being ingested
3596 /// to be skipped rather than overwriting existing data under that key.
3597 /// Usecase: back-fill of some historical data in the database without
3598 /// over-writing existing newer version of data.
3599 /// This option could only be used if the DB has been running
3600 /// with allow_ingest_behind=true since the dawn of time.
3601 /// All files will be ingested at the bottommost level with seqno=0.
3602 pub fn set_ingest_behind(&mut self, v: bool) {
3603 unsafe {
3604 ffi::rocksdb_ingestexternalfileoptions_set_ingest_behind(self.inner, c_uchar::from(v));
3605 }
3606 }
3607}
3608
3609impl Default for IngestExternalFileOptions {
3610 fn default() -> Self {
3611 unsafe {
3612 Self {
3613 inner: ffi::rocksdb_ingestexternalfileoptions_create(),
3614 }
3615 }
3616 }
3617}
3618
3619/// Used by BlockBasedOptions::set_index_type.
3620pub enum BlockBasedIndexType {
3621 /// A space efficient index block that is optimized for
3622 /// binary-search-based index.
3623 BinarySearch,
3624
3625 /// The hash index, if enabled, will perform a hash lookup if
3626 /// a prefix extractor has been provided through Options::set_prefix_extractor.
3627 HashSearch,
3628
3629 /// A two-level index implementation. Both levels are binary search indexes.
3630 TwoLevelIndexSearch,
3631}
3632
3633/// Used by BlockBasedOptions::set_data_block_index_type.
3634#[repr(C)]
3635pub enum DataBlockIndexType {
3636 /// Use binary search when performing point lookup for keys in data blocks.
3637 /// This is the default.
3638 BinarySearch = 0,
3639
3640 /// Appends a compact hash table to the end of the data block for efficient indexing. Backwards
3641 /// compatible with databases created without this feature. Once turned on, existing data will
3642 /// be gradually converted to the hash index format.
3643 BinaryAndHash = 1,
3644}
3645
3646/// Defines the underlying memtable implementation.
3647/// See official [wiki](https://github.com/facebook/rocksdb/wiki/MemTable) for more information.
3648pub enum MemtableFactory {
3649 Vector,
3650 HashSkipList {
3651 bucket_count: usize,
3652 height: i32,
3653 branching_factor: i32,
3654 },
3655 HashLinkList {
3656 bucket_count: usize,
3657 },
3658}
3659
3660/// Used by BlockBasedOptions::set_checksum_type.
3661pub enum ChecksumType {
3662 NoChecksum = 0,
3663 CRC32c = 1,
3664 XXHash = 2,
3665 XXHash64 = 3,
3666 XXH3 = 4, // Supported since RocksDB 6.27
3667}
3668
3669/// Used with DBOptions::set_plain_table_factory.
3670/// See official [wiki](https://github.com/facebook/rocksdb/wiki/PlainTable-Format) for more
3671/// information.
3672///
3673/// Defaults:
3674/// user_key_length: 0 (variable length)
3675/// bloom_bits_per_key: 10
3676/// hash_table_ratio: 0.75
3677/// index_sparseness: 16
3678pub struct PlainTableFactoryOptions {
3679 pub user_key_length: u32,
3680 pub bloom_bits_per_key: i32,
3681 pub hash_table_ratio: f64,
3682 pub index_sparseness: usize,
3683}
3684
3685#[derive(Debug, Copy, Clone, PartialEq, Eq)]
3686#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
3687pub enum DBCompressionType {
3688 None = ffi::rocksdb_no_compression as isize,
3689 Snappy = ffi::rocksdb_snappy_compression as isize,
3690 Zlib = ffi::rocksdb_zlib_compression as isize,
3691 Bz2 = ffi::rocksdb_bz2_compression as isize,
3692 Lz4 = ffi::rocksdb_lz4_compression as isize,
3693 Lz4hc = ffi::rocksdb_lz4hc_compression as isize,
3694 Zstd = ffi::rocksdb_zstd_compression as isize,
3695}
3696
3697#[derive(Debug, Copy, Clone, PartialEq, Eq)]
3698#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
3699pub enum DBCompactionStyle {
3700 Level = ffi::rocksdb_level_compaction as isize,
3701 Universal = ffi::rocksdb_universal_compaction as isize,
3702 Fifo = ffi::rocksdb_fifo_compaction as isize,
3703}
3704
3705#[derive(Debug, Copy, Clone, PartialEq, Eq)]
3706#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
3707pub enum DBRecoveryMode {
3708 TolerateCorruptedTailRecords = ffi::rocksdb_tolerate_corrupted_tail_records_recovery as isize,
3709 AbsoluteConsistency = ffi::rocksdb_absolute_consistency_recovery as isize,
3710 PointInTime = ffi::rocksdb_point_in_time_recovery as isize,
3711 SkipAnyCorruptedRecord = ffi::rocksdb_skip_any_corrupted_records_recovery as isize,
3712}
3713
3714/// File access pattern once a compaction has started
3715#[derive(Debug, Copy, Clone, PartialEq, Eq)]
3716#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
3717#[repr(i32)]
3718pub enum AccessHint {
3719 None = 0,
3720 Normal,
3721 Sequential,
3722 WillNeed,
3723}
3724
3725pub struct FifoCompactOptions {
3726 pub(crate) inner: *mut ffi::rocksdb_fifo_compaction_options_t,
3727}
3728
3729impl Default for FifoCompactOptions {
3730 fn default() -> Self {
3731 let opts = unsafe { ffi::rocksdb_fifo_compaction_options_create() };
3732 assert!(
3733 !opts.is_null(),
3734 "Could not create RocksDB Fifo Compaction Options"
3735 );
3736
3737 Self { inner: opts }
3738 }
3739}
3740
3741impl Drop for FifoCompactOptions {
3742 fn drop(&mut self) {
3743 unsafe {
3744 ffi::rocksdb_fifo_compaction_options_destroy(self.inner);
3745 }
3746 }
3747}
3748
3749impl FifoCompactOptions {
3750 /// Sets the max table file size.
3751 ///
3752 /// Once the total sum of table files reaches this, we will delete the oldest
3753 /// table file
3754 ///
3755 /// Default: 1GB
3756 pub fn set_max_table_files_size(&mut self, nbytes: u64) {
3757 unsafe {
3758 ffi::rocksdb_fifo_compaction_options_set_max_table_files_size(self.inner, nbytes);
3759 }
3760 }
3761}
3762
3763#[derive(Debug, Copy, Clone, PartialEq, Eq)]
3764#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
3765pub enum UniversalCompactionStopStyle {
3766 Similar = ffi::rocksdb_similar_size_compaction_stop_style as isize,
3767 Total = ffi::rocksdb_total_size_compaction_stop_style as isize,
3768}
3769
3770pub struct UniversalCompactOptions {
3771 pub(crate) inner: *mut ffi::rocksdb_universal_compaction_options_t,
3772}
3773
3774impl Default for UniversalCompactOptions {
3775 fn default() -> Self {
3776 let opts = unsafe { ffi::rocksdb_universal_compaction_options_create() };
3777 assert!(
3778 !opts.is_null(),
3779 "Could not create RocksDB Universal Compaction Options"
3780 );
3781
3782 Self { inner: opts }
3783 }
3784}
3785
3786impl Drop for UniversalCompactOptions {
3787 fn drop(&mut self) {
3788 unsafe {
3789 ffi::rocksdb_universal_compaction_options_destroy(self.inner);
3790 }
3791 }
3792}
3793
3794impl UniversalCompactOptions {
3795 /// Sets the percentage flexibility while comparing file size.
3796 /// If the candidate file(s) size is 1% smaller than the next file's size,
3797 /// then include next file into this candidate set.
3798 ///
3799 /// Default: 1
3800 pub fn set_size_ratio(&mut self, ratio: c_int) {
3801 unsafe {
3802 ffi::rocksdb_universal_compaction_options_set_size_ratio(self.inner, ratio);
3803 }
3804 }
3805
3806 /// Sets the minimum number of files in a single compaction run.
3807 ///
3808 /// Default: 2
3809 pub fn set_min_merge_width(&mut self, num: c_int) {
3810 unsafe {
3811 ffi::rocksdb_universal_compaction_options_set_min_merge_width(self.inner, num);
3812 }
3813 }
3814
3815 /// Sets the maximum number of files in a single compaction run.
3816 ///
3817 /// Default: UINT_MAX
3818 pub fn set_max_merge_width(&mut self, num: c_int) {
3819 unsafe {
3820 ffi::rocksdb_universal_compaction_options_set_max_merge_width(self.inner, num);
3821 }
3822 }
3823
3824 /// sets the size amplification.
3825 ///
3826 /// It is defined as the amount (in percentage) of
3827 /// additional storage needed to store a single byte of data in the database.
3828 /// For example, a size amplification of 2% means that a database that
3829 /// contains 100 bytes of user-data may occupy upto 102 bytes of
3830 /// physical storage. By this definition, a fully compacted database has
3831 /// a size amplification of 0%. Rocksdb uses the following heuristic
3832 /// to calculate size amplification: it assumes that all files excluding
3833 /// the earliest file contribute to the size amplification.
3834 ///
3835 /// Default: 200, which means that a 100 byte database could require upto 300 bytes of storage.
3836 pub fn set_max_size_amplification_percent(&mut self, v: c_int) {
3837 unsafe {
3838 ffi::rocksdb_universal_compaction_options_set_max_size_amplification_percent(
3839 self.inner, v,
3840 );
3841 }
3842 }
3843
3844 /// Sets the percentage of compression size.
3845 ///
3846 /// If this option is set to be -1, all the output files
3847 /// will follow compression type specified.
3848 ///
3849 /// If this option is not negative, we will try to make sure compressed
3850 /// size is just above this value. In normal cases, at least this percentage
3851 /// of data will be compressed.
3852 /// When we are compacting to a new file, here is the criteria whether
3853 /// it needs to be compressed: assuming here are the list of files sorted
3854 /// by generation time:
3855 /// A1...An B1...Bm C1...Ct
3856 /// where A1 is the newest and Ct is the oldest, and we are going to compact
3857 /// B1...Bm, we calculate the total size of all the files as total_size, as
3858 /// well as the total size of C1...Ct as total_C, the compaction output file
3859 /// will be compressed iff
3860 /// total_C / total_size < this percentage
3861 ///
3862 /// Default: -1
3863 pub fn set_compression_size_percent(&mut self, v: c_int) {
3864 unsafe {
3865 ffi::rocksdb_universal_compaction_options_set_compression_size_percent(self.inner, v);
3866 }
3867 }
3868
3869 /// Sets the algorithm used to stop picking files into a single compaction run.
3870 ///
3871 /// Default: ::Total
3872 pub fn set_stop_style(&mut self, style: UniversalCompactionStopStyle) {
3873 unsafe {
3874 ffi::rocksdb_universal_compaction_options_set_stop_style(self.inner, style as c_int);
3875 }
3876 }
3877}
3878
3879#[derive(Debug, Copy, Clone, PartialEq, Eq)]
3880#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
3881#[repr(u8)]
3882pub enum BottommostLevelCompaction {
3883 /// Skip bottommost level compaction
3884 Skip = 0,
3885 /// Only compact bottommost level if there is a compaction filter
3886 /// This is the default option
3887 IfHaveCompactionFilter,
3888 /// Always compact bottommost level
3889 Force,
3890 /// Always compact bottommost level but in bottommost level avoid
3891 /// double-compacting files created in the same compaction
3892 ForceOptimized,
3893}
3894
3895pub struct CompactOptions {
3896 pub(crate) inner: *mut ffi::rocksdb_compactoptions_t,
3897}
3898
3899impl Default for CompactOptions {
3900 fn default() -> Self {
3901 let opts = unsafe { ffi::rocksdb_compactoptions_create() };
3902 assert!(!opts.is_null(), "Could not create RocksDB Compact Options");
3903
3904 Self { inner: opts }
3905 }
3906}
3907
3908impl Drop for CompactOptions {
3909 fn drop(&mut self) {
3910 unsafe {
3911 ffi::rocksdb_compactoptions_destroy(self.inner);
3912 }
3913 }
3914}
3915
3916impl CompactOptions {
3917 /// If more than one thread calls manual compaction,
3918 /// only one will actually schedule it while the other threads will simply wait
3919 /// for the scheduled manual compaction to complete. If exclusive_manual_compaction
3920 /// is set to true, the call will disable scheduling of automatic compaction jobs
3921 /// and wait for existing automatic compaction jobs to finish.
3922 pub fn set_exclusive_manual_compaction(&mut self, v: bool) {
3923 unsafe {
3924 ffi::rocksdb_compactoptions_set_exclusive_manual_compaction(
3925 self.inner,
3926 c_uchar::from(v),
3927 );
3928 }
3929 }
3930
3931 /// Sets bottommost level compaction.
3932 pub fn set_bottommost_level_compaction(&mut self, lvl: BottommostLevelCompaction) {
3933 unsafe {
3934 ffi::rocksdb_compactoptions_set_bottommost_level_compaction(self.inner, lvl as c_uchar);
3935 }
3936 }
3937
3938 /// If true, compacted files will be moved to the minimum level capable
3939 /// of holding the data or given level (specified non-negative target_level).
3940 pub fn set_change_level(&mut self, v: bool) {
3941 unsafe {
3942 ffi::rocksdb_compactoptions_set_change_level(self.inner, c_uchar::from(v));
3943 }
3944 }
3945
3946 /// If change_level is true and target_level have non-negative value, compacted
3947 /// files will be moved to target_level.
3948 pub fn set_target_level(&mut self, lvl: c_int) {
3949 unsafe {
3950 ffi::rocksdb_compactoptions_set_target_level(self.inner, lvl);
3951 }
3952 }
3953}
3954
3955/// Represents a path where sst files can be put into
3956pub struct DBPath {
3957 pub(crate) inner: *mut ffi::rocksdb_dbpath_t,
3958}
3959
3960impl DBPath {
3961 /// Create a new path
3962 pub fn new<P: AsRef<Path>>(path: P, target_size: u64) -> Result<Self, Error> {
3963 let p = to_cpath(path.as_ref()).unwrap();
3964 let dbpath = unsafe { ffi::rocksdb_dbpath_create(p.as_ptr(), target_size) };
3965 if dbpath.is_null() {
3966 Err(Error::new(format!(
3967 "Could not create path for storing sst files at location: {}",
3968 path.as_ref().display()
3969 )))
3970 } else {
3971 Ok(DBPath { inner: dbpath })
3972 }
3973 }
3974}
3975
3976impl Drop for DBPath {
3977 fn drop(&mut self) {
3978 unsafe {
3979 ffi::rocksdb_dbpath_destroy(self.inner);
3980 }
3981 }
3982}
3983
3984#[cfg(test)]
3985mod tests {
3986 use crate::{MemtableFactory, Options};
3987
3988 #[test]
3989 fn test_enable_statistics() {
3990 let mut opts = Options::default();
3991 opts.enable_statistics();
3992 opts.set_stats_dump_period_sec(60);
3993 assert!(opts.get_statistics().is_some());
3994
3995 let opts = Options::default();
3996 assert!(opts.get_statistics().is_none());
3997 }
3998
3999 #[test]
4000 fn test_set_memtable_factory() {
4001 let mut opts = Options::default();
4002 opts.set_memtable_factory(MemtableFactory::Vector);
4003 opts.set_memtable_factory(MemtableFactory::HashLinkList { bucket_count: 100 });
4004 opts.set_memtable_factory(MemtableFactory::HashSkipList {
4005 bucket_count: 100,
4006 height: 4,
4007 branching_factor: 4,
4008 });
4009 }
4010
4011 #[test]
4012 fn test_set_stats_persist_period_sec() {
4013 let mut opts = Options::default();
4014 opts.enable_statistics();
4015 opts.set_stats_persist_period_sec(5);
4016 assert!(opts.get_statistics().is_some());
4017
4018 let opts = Options::default();
4019 assert!(opts.get_statistics().is_none());
4020 }
4021}