1use libc::{c_int, c_uchar};
16use std::marker::PhantomData;
17
18use crate::cache::Cache;
19use crate::ffi_util::from_cstr_and_free;
20use crate::{DB, DBCommon, ThreadMode, TransactionDB};
21use crate::{Error, db::DBInner, ffi};
22
23#[derive(Debug, Copy, Clone, PartialEq, Eq)]
24#[repr(i32)]
25pub enum PerfStatsLevel {
26 Uninitialized = 0,
28 Disable,
30 EnableCount,
32 EnableTimeExceptForMutex,
34 EnableTimeAndCPUTimeExceptForMutex,
37 EnableTime,
39 OutOfBound,
41}
42
43include!("perf_enum.rs");
45
46pub fn set_perf_stats(lvl: PerfStatsLevel) {
48 unsafe {
49 ffi::rocksdb_set_perf_level(lvl as c_int);
50 }
51}
52
53pub struct PerfContext {
56 pub(crate) inner: *mut ffi::rocksdb_perfcontext_t,
57}
58
59impl Default for PerfContext {
60 fn default() -> Self {
61 let ctx = unsafe { ffi::rocksdb_perfcontext_create() };
62 assert!(!ctx.is_null(), "Could not create Perf Context");
63
64 Self { inner: ctx }
65 }
66}
67
68impl Drop for PerfContext {
69 fn drop(&mut self) {
70 unsafe {
71 ffi::rocksdb_perfcontext_destroy(self.inner);
72 }
73 }
74}
75
76impl PerfContext {
77 pub fn reset(&mut self) {
79 unsafe {
80 ffi::rocksdb_perfcontext_reset(self.inner);
81 }
82 }
83
84 pub fn report(&self, exclude_zero_counters: bool) -> String {
86 unsafe {
87 let ptr =
88 ffi::rocksdb_perfcontext_report(self.inner, c_uchar::from(exclude_zero_counters));
89 from_cstr_and_free(ptr)
90 }
91 }
92
93 pub fn metric(&self, id: PerfMetric) -> u64 {
95 unsafe { ffi::rocksdb_perfcontext_metric(self.inner, id as c_int) }
96 }
97}
98
99pub struct MemoryUsageStats {
101 pub mem_table_total: u64,
103 pub mem_table_unflushed: u64,
105 pub mem_table_readers_total: u64,
107 pub cache_total: u64,
109}
110
111pub struct MemoryUsage {
113 inner: *mut ffi::rocksdb_memory_usage_t,
114}
115
116impl Drop for MemoryUsage {
117 fn drop(&mut self) {
118 unsafe {
119 ffi::rocksdb_approximate_memory_usage_destroy(self.inner);
120 }
121 }
122}
123
124impl MemoryUsage {
125 pub fn approximate_mem_table_total(&self) -> u64 {
127 unsafe { ffi::rocksdb_approximate_memory_usage_get_mem_table_total(self.inner) }
128 }
129
130 pub fn approximate_mem_table_unflushed(&self) -> u64 {
132 unsafe { ffi::rocksdb_approximate_memory_usage_get_mem_table_unflushed(self.inner) }
133 }
134
135 pub fn approximate_mem_table_readers_total(&self) -> u64 {
137 unsafe { ffi::rocksdb_approximate_memory_usage_get_mem_table_readers_total(self.inner) }
138 }
139
140 pub fn approximate_cache_total(&self) -> u64 {
142 unsafe { ffi::rocksdb_approximate_memory_usage_get_cache_total(self.inner) }
143 }
144}
145
146pub struct MemoryUsageBuilder<'a> {
150 inner: *mut ffi::rocksdb_memory_consumers_t,
151 base_dbs: Vec<*mut ffi::rocksdb_t>,
152 _marker: PhantomData<&'a ()>,
154}
155
156impl Drop for MemoryUsageBuilder<'_> {
157 fn drop(&mut self) {
158 unsafe {
159 ffi::rocksdb_memory_consumers_destroy(self.inner);
160 }
161 for base_db in &self.base_dbs {
162 unsafe {
163 ffi::rocksdb_transactiondb_close_base_db(*base_db);
164 }
165 }
166 }
167}
168
169impl<'a> MemoryUsageBuilder<'a> {
170 pub fn new() -> Result<Self, Error> {
172 let mc = unsafe { ffi::rocksdb_memory_consumers_create() };
173 if mc.is_null() {
174 Err(Error::new(
175 "Could not create MemoryUsage builder".to_owned(),
176 ))
177 } else {
178 Ok(Self {
179 inner: mc,
180 base_dbs: Vec::new(),
181 _marker: PhantomData,
182 })
183 }
184 }
185
186 pub fn add_tx_db<T: ThreadMode>(&mut self, db: &'a TransactionDB<T>) {
188 unsafe {
189 let base_db = ffi::rocksdb_transactiondb_get_base_db(db.inner);
190 ffi::rocksdb_memory_consumers_add_db(self.inner, base_db);
191 self.base_dbs.push(base_db);
193 }
194 }
195
196 pub fn add_db<T: ThreadMode, D: DBInner>(&mut self, db: &'a DBCommon<T, D>) {
198 unsafe {
199 ffi::rocksdb_memory_consumers_add_db(self.inner, db.inner.inner());
200 }
201 }
202
203 pub fn add_cache(&mut self, cache: &'a Cache) {
205 unsafe {
206 ffi::rocksdb_memory_consumers_add_cache(self.inner, cache.0.inner.as_ptr());
207 }
208 }
209
210 pub fn build(&self) -> Result<MemoryUsage, Error> {
212 unsafe {
213 let mu = ffi_try!(ffi::rocksdb_approximate_memory_usage_create(self.inner));
214 Ok(MemoryUsage { inner: mu })
215 }
216 }
217}
218
219pub fn get_memory_usage_stats(
221 dbs: Option<&[&DB]>,
222 caches: Option<&[&Cache]>,
223) -> Result<MemoryUsageStats, Error> {
224 let mut builder = MemoryUsageBuilder::new()?;
225 if let Some(dbs_) = dbs {
226 for db in dbs_ {
227 builder.add_db(db);
228 }
229 }
230 if let Some(caches_) = caches {
231 for cache in caches_ {
232 builder.add_cache(cache);
233 }
234 }
235
236 let mu = builder.build()?;
237 Ok(MemoryUsageStats {
238 mem_table_total: mu.approximate_mem_table_total(),
239 mem_table_unflushed: mu.approximate_mem_table_unflushed(),
240 mem_table_readers_total: mu.approximate_mem_table_readers_total(),
241 cache_total: mu.approximate_cache_total(),
242 })
243}
244
245#[cfg(test)]
246mod tests {
247 use super::*;
248 use crate::{DB, Options};
249 use tempfile::TempDir;
250
251 #[test]
252 fn test_perf_context_with_db_operations() {
253 let temp_dir = TempDir::new().unwrap();
254 let mut opts = Options::default();
255 opts.create_if_missing(true);
256 let db = DB::open(&opts, temp_dir.path()).unwrap();
257
258 let n = 10;
260 for i in 0..n {
261 let k = vec![i as u8];
262 db.put(&k, &k).unwrap();
263 if i % 2 == 0 {
264 db.delete(&k).unwrap();
265 }
266 }
267
268 set_perf_stats(PerfStatsLevel::EnableCount);
269 let mut ctx = PerfContext::default();
270
271 let mut iter = db.raw_iterator();
273 iter.seek_to_first();
274 let mut valid_count = 0;
275 while iter.valid() {
276 valid_count += 1;
277 iter.next();
278 }
279
280 assert_eq!(
282 valid_count, 5,
283 "Iterator should find 5 valid entries (odd numbers)"
284 );
285
286 let internal_key_skipped = ctx.metric(PerfMetric::InternalKeySkippedCount);
288 let internal_delete_skipped = ctx.metric(PerfMetric::InternalDeleteSkippedCount);
289
290 assert!(
294 internal_key_skipped >= (n / 2) as u64,
295 "internal_key_skipped ({}) should be >= {} (deletions)",
296 internal_key_skipped,
297 n / 2
298 );
299 assert_eq!(
300 internal_delete_skipped,
301 (n / 2) as u64,
302 "internal_delete_skipped ({internal_delete_skipped}) should equal {} (deleted entries)",
303 n / 2
304 );
305 assert_eq!(
306 ctx.metric(PerfMetric::SeekInternalSeekTime),
307 0,
308 "Time metrics should be 0 with EnableCount"
309 );
310
311 ctx.reset();
313 assert_eq!(ctx.metric(PerfMetric::InternalKeySkippedCount), 0);
314 assert_eq!(ctx.metric(PerfMetric::InternalDeleteSkippedCount), 0);
315
316 set_perf_stats(PerfStatsLevel::EnableTime);
318
319 let mut iter = db.raw_iterator();
321 iter.seek_to_last();
322 let mut backward_count = 0;
323 while iter.valid() {
324 backward_count += 1;
325 iter.prev();
326 }
327 assert_eq!(
328 backward_count, 5,
329 "Backward iteration should also find 5 valid entries"
330 );
331
332 let key_skipped_after = ctx.metric(PerfMetric::InternalKeySkippedCount);
334 let delete_skipped_after = ctx.metric(PerfMetric::InternalDeleteSkippedCount);
335
336 assert!(
338 key_skipped_after >= internal_key_skipped,
339 "After second iteration, internal_key_skipped ({key_skipped_after}) should be >= first iteration ({internal_key_skipped})",
340 );
341 assert_eq!(
342 delete_skipped_after,
343 (n / 2) as u64,
344 "internal_delete_skipped should still be {} after second iteration",
345 n / 2
346 );
347
348 set_perf_stats(PerfStatsLevel::Disable);
350 }
351}