1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
//! Universal style of compaction.

use std::os::raw::c_uint;
use std::mem;

use rocks_sys as ll;

use crate::to_raw::ToRaw;

/// Algorithm used to make a compaction request stop picking new files
/// into a single compaction run
#[repr(C)]
pub enum CompactionStopStyle {
    /// pick files of similar size
    SimilarSize,
    /// total size of picked files > next file
    TotalSize,
}


pub struct CompactionOptionsUniversal {
    raw: *mut ll::rocks_universal_compaction_options_t,
}

impl Default for CompactionOptionsUniversal {
    fn default() -> Self {
        CompactionOptionsUniversal { raw: unsafe { ll::rocks_universal_compaction_options_create() } }
    }
}

impl ToRaw<ll::rocks_universal_compaction_options_t> for CompactionOptionsUniversal {
    fn raw(&self) -> *mut ll::rocks_universal_compaction_options_t {
        self.raw
    }
}

impl Drop for CompactionOptionsUniversal {
    fn drop(&mut self) {
        unsafe { ll::rocks_universal_compaction_options_destroy(self.raw) }
    }
}

impl CompactionOptionsUniversal {
    /// Percentage flexibilty while comparing file size. If the candidate file(s)
    /// size is 1% smaller than the next file's size, then include next file into
    /// this candidate set.
    ///
    /// Default: 1
    pub fn size_ratio(self, val: u32) -> Self {
        unsafe {
            ll::rocks_universal_compaction_options_set_size_ratio(self.raw, val as c_uint);
        }
        self
    }

    /// The minimum number of files in a single compaction run.
    ///
    /// Default: 2
    pub fn min_merge_width(self, val: u32) -> Self {
        unsafe {
            ll::rocks_universal_compaction_options_set_min_merge_width(self.raw, val as c_uint);
        }
        self
    }

    /// The maximum number of files in a single compaction run. Default: UINT_MAX
    pub fn max_merge_width(self, val: u32) -> Self {
        unsafe {
            ll::rocks_universal_compaction_options_set_max_merge_width(self.raw, val as c_uint);
        }
        self
    }

    /// The size amplification is defined as the amount (in percentage) of
    /// additional storage needed to store a single byte of data in the database.
    /// For example, a size amplification of 2% means that a database that
    /// contains 100 bytes of user-data may occupy upto 102 bytes of
    /// physical storage. By this definition, a fully compacted database has
    /// a size amplification of 0%. Rocksdb uses the following heuristic
    /// to calculate size amplification: it assumes that all files excluding
    /// the earliest file contribute to the size amplification.
    ///
    /// Default: 200, which means that a 100 byte database could require upto
    /// 300 bytes of storage.
    pub fn max_size_amplification_percent(self, val: u32) -> Self {
        unsafe {
            ll::rocks_universal_compaction_options_set_max_size_amplification_percent(self.raw, val);
        }
        self
    }

    /// If this option is set to be -1 (the default value), all the output files
    /// will follow compression type specified.
    ///
    /// If this option is not negative, we will try to make sure compressed
    /// size is just above this value. In normal cases, at least this percentage
    /// of data will be compressed.
    ///
    /// When we are compacting to a new file, here is the criteria whether
    /// it needs to be compressed: assuming here are the list of files sorted
    /// by generation time:
    ///
    /// > `A1...An B1...Bm C1...Ct`
    ///
    /// where A1 is the newest and Ct is the oldest, and we are going to compact
    /// B1...Bm, we calculate the total size of all the files as total_size, as
    /// well as  the total size of C1...Ct as total_C, the compaction output file
    /// will be compressed iff
    ///
    /// > `total_C / total_size < this percentage`
    ///
    /// Default: -1
    pub fn compression_size_percent(self, val: i32) -> Self {
        unsafe {
            ll::rocks_universal_compaction_options_set_compression_size_percent(self.raw, val);
        }
        self
    }

    /// The algorithm used to stop picking files into a single compaction run
    /// Default: kCompactionStopStyleTotalSize
    pub fn stop_style(self, val: CompactionStopStyle) -> Self {
        unsafe {
            ll::rocks_universal_compaction_options_set_stop_style(self.raw, mem::transmute(val));
        }
        self
    }

    /// Option to optimize the universal multi level compaction by enabling
    /// trivial move for non overlapping files.
    /// Default: false
    pub fn allow_trivial_move(self, val: bool) -> Self {
        unsafe {
            ll::rocks_universal_compaction_options_set_allow_trivial_move(self.raw, val as u8);
        }
        self
    }
}