libblobd_lite/
metrics.rs

1#[cfg(test)]
2use crate::test_util::device::TestSeekableAsyncFile as SeekableAsyncFile;
3#[cfg(test)]
4use crate::test_util::journal::TestTransaction as Txn;
5use off64::int::create_u64_be;
6use off64::int::Off64AsyncReadInt;
7use off64::usz;
8#[cfg(not(test))]
9use seekable_async_file::SeekableAsyncFile;
10use serde::Serialize;
11use std::sync::atomic::AtomicU64;
12use std::sync::atomic::Ordering::Relaxed;
13#[cfg(not(test))]
14use write_journal::Transaction as Txn;
15
16const OFFSETOF_ALLOCATED_BLOCK_COUNT: u64 = 0;
17const OFFSETOF_ALLOCATED_PAGE_COUNT: u64 = OFFSETOF_ALLOCATED_BLOCK_COUNT + 8;
18const OFFSETOF_DELETED_OBJECT_COUNT: u64 = OFFSETOF_ALLOCATED_PAGE_COUNT + 8;
19const OFFSETOF_INCOMPLETE_OBJECT_COUNT: u64 = OFFSETOF_DELETED_OBJECT_COUNT + 8;
20const OFFSETOF_OBJECT_COUNT: u64 = OFFSETOF_INCOMPLETE_OBJECT_COUNT + 8;
21const OFFSETOF_OBJECT_DATA_BYTES: u64 = OFFSETOF_OBJECT_COUNT + 8;
22const OFFSETOF_OBJECT_METADATA_BYTES: u64 = OFFSETOF_OBJECT_DATA_BYTES + 8;
23const OFFSETOF_USED_BYTES: u64 = OFFSETOF_OBJECT_METADATA_BYTES + 8;
24
25// Reserve more space for future proofing.
26pub(crate) const METRICS_STATE_SIZE: u64 = 1024 * 1024 * 4;
27
28#[derive(Serialize)]
29pub struct BlobdMetrics {
30  #[serde(skip_serializing)]
31  dev_offset: u64,
32  // This can only increase.
33  allocated_block_count: AtomicU64,
34  // This does not include metadata lpages.
35  allocated_page_count: AtomicU64,
36  deleted_object_count: AtomicU64,
37  incomplete_object_count: AtomicU64,
38  // This includes incomplete and deleted objects.
39  object_count: AtomicU64,
40  // To determine internal fragmentation, take `used_bytes` and subtract `object_data_bytes + object_metadata_bytes`. This is easier than attempting to track internal fragmentation directly.
41  object_data_bytes: AtomicU64,
42  object_metadata_bytes: AtomicU64,
43  // Sum of non-free pages and block metadata lpages.
44  used_bytes: AtomicU64,
45}
46
47#[rustfmt::skip]
48impl BlobdMetrics {
49  #[cfg(test)]
50  pub(crate) fn for_testing(dev_offset: u64) -> Self {
51    Self {
52      dev_offset,
53      allocated_block_count: AtomicU64::new(0),
54      allocated_page_count: AtomicU64::new(0),
55      deleted_object_count: AtomicU64::new(0),
56      incomplete_object_count: AtomicU64::new(0),
57      object_count: AtomicU64::new(0),
58      object_data_bytes: AtomicU64::new(0),
59      object_metadata_bytes: AtomicU64::new(0),
60      used_bytes: AtomicU64::new(0),
61    }
62  }
63
64  pub(crate) async fn load_from_device(dev: &SeekableAsyncFile, dev_offset: u64) -> Self {
65    Self {
66      dev_offset,
67      allocated_block_count: dev.read_u64_be_at(dev_offset + OFFSETOF_ALLOCATED_BLOCK_COUNT).await.into(),
68      allocated_page_count: dev.read_u64_be_at(dev_offset + OFFSETOF_ALLOCATED_PAGE_COUNT).await.into(),
69      deleted_object_count: dev.read_u64_be_at(dev_offset + OFFSETOF_DELETED_OBJECT_COUNT).await.into(),
70      incomplete_object_count: dev.read_u64_be_at(dev_offset + OFFSETOF_INCOMPLETE_OBJECT_COUNT).await.into(),
71      object_count: dev.read_u64_be_at(dev_offset + OFFSETOF_OBJECT_COUNT).await.into(),
72      object_data_bytes: dev.read_u64_be_at(dev_offset + OFFSETOF_OBJECT_DATA_BYTES).await.into(),
73      object_metadata_bytes: dev.read_u64_be_at(dev_offset + OFFSETOF_OBJECT_METADATA_BYTES).await.into(),
74      used_bytes: dev.read_u64_be_at(dev_offset + OFFSETOF_USED_BYTES).await.into(),
75    }
76  }
77
78  pub(crate) async fn format_device(dev: &SeekableAsyncFile, dev_offset: u64) {
79    dev.write_at(dev_offset, vec![0u8; usz!(METRICS_STATE_SIZE)]).await;
80  }
81
82  pub fn allocated_block_count(&self) -> u64 { self.allocated_block_count.load(Relaxed) }
83  pub fn allocated_page_count(&self) -> u64 { self.allocated_page_count.load(Relaxed) }
84  pub fn deleted_object_count(&self) -> u64 { self.deleted_object_count.load(Relaxed) }
85  pub fn incomplete_object_count(&self) -> u64 { self.incomplete_object_count.load(Relaxed) }
86  pub fn object_count(&self) -> u64 { self.object_count.load(Relaxed) }
87  pub fn object_data_bytes(&self) -> u64 { self.object_data_bytes.load(Relaxed) }
88  pub fn object_metadata_bytes(&self) -> u64 { self.object_metadata_bytes.load(Relaxed) }
89  pub fn used_bytes(&self) -> u64 { self.used_bytes.load(Relaxed) }
90
91  pub(crate) fn incr_allocated_block_count(&self, txn: &mut Txn, d: u64) { txn.write(self.dev_offset + OFFSETOF_ALLOCATED_BLOCK_COUNT, create_u64_be(self.allocated_block_count.fetch_add(d, Relaxed) + d)); }
92  pub(crate) fn incr_allocated_page_count(&self, txn: &mut Txn, d: u64) { txn.write(self.dev_offset + OFFSETOF_ALLOCATED_PAGE_COUNT, create_u64_be(self.allocated_page_count.fetch_add(d, Relaxed) + d)); }
93  pub(crate) fn incr_deleted_object_count(&self, txn: &mut Txn, d: u64) { txn.write(self.dev_offset + OFFSETOF_DELETED_OBJECT_COUNT, create_u64_be(self.deleted_object_count.fetch_add(d, Relaxed) + d)); }
94  pub(crate) fn incr_incomplete_object_count(&self, txn: &mut Txn, d: u64) { txn.write(self.dev_offset + OFFSETOF_INCOMPLETE_OBJECT_COUNT, create_u64_be(self.incomplete_object_count.fetch_add(d, Relaxed) + d)); }
95  pub(crate) fn incr_object_count(&self, txn: &mut Txn, d: u64) { txn.write(self.dev_offset + OFFSETOF_OBJECT_COUNT, create_u64_be(self.object_count.fetch_add(d, Relaxed) + d)); }
96  pub(crate) fn incr_object_data_bytes(&self, txn: &mut Txn, d: u64) { txn.write(self.dev_offset + OFFSETOF_OBJECT_DATA_BYTES, create_u64_be(self.object_data_bytes.fetch_add(d, Relaxed) + d)); }
97  pub(crate) fn incr_object_metadata_bytes(&self, txn: &mut Txn, d: u64) { txn.write(self.dev_offset + OFFSETOF_OBJECT_METADATA_BYTES, create_u64_be(self.object_metadata_bytes.fetch_add(d, Relaxed) + d)); }
98  pub(crate) fn incr_used_bytes(&self, txn: &mut Txn, d: u64) { txn.write(self.dev_offset + OFFSETOF_USED_BYTES, create_u64_be(self.used_bytes.fetch_add(d, Relaxed) + d)); }
99
100  // We don't have a decrement method for `allocated_block_count` as it should never decrement.
101  pub(crate) fn decr_allocated_page_count(&self, txn: &mut Txn, d: u64) { txn.write(self.dev_offset + OFFSETOF_ALLOCATED_PAGE_COUNT, create_u64_be(self.allocated_page_count.fetch_sub(d, Relaxed) - d)); }
102  pub(crate) fn decr_deleted_object_count(&self, txn: &mut Txn, d: u64) { txn.write(self.dev_offset + OFFSETOF_DELETED_OBJECT_COUNT, create_u64_be(self.deleted_object_count.fetch_sub(d, Relaxed) - d)); }
103  pub(crate) fn decr_incomplete_object_count(&self, txn: &mut Txn, d: u64) { txn.write(self.dev_offset + OFFSETOF_INCOMPLETE_OBJECT_COUNT, create_u64_be(self.incomplete_object_count.fetch_sub(d, Relaxed) - d)); }
104  pub(crate) fn decr_object_count(&self, txn: &mut Txn, d: u64) { txn.write(self.dev_offset + OFFSETOF_OBJECT_COUNT, create_u64_be(self.object_count.fetch_sub(d, Relaxed) - d)); }
105  pub(crate) fn decr_object_data_bytes(&self, txn: &mut Txn, d: u64) { txn.write(self.dev_offset + OFFSETOF_OBJECT_DATA_BYTES, create_u64_be(self.object_data_bytes.fetch_sub(d, Relaxed) - d)); }
106  pub(crate) fn decr_object_metadata_bytes(&self, txn: &mut Txn, d: u64) { txn.write(self.dev_offset + OFFSETOF_OBJECT_METADATA_BYTES, create_u64_be(self.object_metadata_bytes.fetch_sub(d, Relaxed) - d)); }
107  pub(crate) fn decr_used_bytes(&self, txn: &mut Txn, d: u64) { txn.write(self.dev_offset + OFFSETOF_USED_BYTES, create_u64_be(self.used_bytes.fetch_sub(d, Relaxed) - d)); }
108}