1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
use crate::{blob_cache::BlobCache, CompressionType};
use std::sync::Arc;
/// Value log configuration
#[derive(Debug)]
pub struct Config {
/// Target size of vLog segments
pub(crate) segment_size_bytes: u64,
/// Blob cache to use
pub(crate) blob_cache: Arc<BlobCache>,
/// Compression to use
pub(crate) compression: CompressionType,
}
impl Default for Config {
fn default() -> Self {
Self {
segment_size_bytes: 256 * 1_024 * 1_024,
blob_cache: Arc::new(BlobCache::with_capacity_bytes(16 * 1_024 * 1_024)),
// TODO: setter method
compression: CompressionType::None,
}
}
}
impl Config {
/// Sets the compression type to use.
///
/// Using compression is recommended, see [`CompressionType`].
///
/// Default = none
#[must_use]
pub fn use_compression(mut self, compression: CompressionType) -> Self {
self.compression = compression;
self
}
/// Sets the blob cache.
///
/// You can create a global [`BlobCache`] and share it between multiple
/// value logs to cap global cache memory usage.
///
/// Defaults to a blob cache with 16 MiB of capacity *per value log*.
#[must_use]
pub fn blob_cache(mut self, blob_cache: Arc<BlobCache>) -> Self {
self.blob_cache = blob_cache;
self
}
/// Sets the maximum size of value log segments.
///
/// This heavily influences space amplification, as
/// space reclamation works on a per-segment basis.
///
/// Like `blob_file_size` in `RocksDB`.
///
/// Default = 256 MiB
#[must_use]
pub fn segment_size_bytes(mut self, bytes: u64) -> Self {
self.segment_size_bytes = bytes;
self
}
}