use std::time::Duration;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum ScalabilityPreset {
Development,
Small,
Medium,
Large,
Enterprise,
Custom,
}
impl Default for ScalabilityPreset {
fn default() -> Self {
Self::Medium
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ScalabilityConfig {
pub preset: ScalabilityPreset,
pub connection_pool: ConnectionPoolConfig,
pub batch: BatchProcessorConfig,
pub resources: ResourceLimiterConfig,
pub profiler: ProfilerConfig,
}
impl Default for ScalabilityConfig {
fn default() -> Self {
Self::for_preset(ScalabilityPreset::Medium)
}
}
impl ScalabilityConfig {
pub fn for_preset(preset: ScalabilityPreset) -> Self {
match preset {
ScalabilityPreset::Development => Self::development(),
ScalabilityPreset::Small => Self::small(),
ScalabilityPreset::Medium => Self::medium(),
ScalabilityPreset::Large => Self::large(),
ScalabilityPreset::Enterprise => Self::enterprise(),
ScalabilityPreset::Custom => Self::medium(), }
}
pub fn for_connections(max_connections: usize) -> Self {
if max_connections <= 100 {
Self::development()
} else if max_connections <= 1_000 {
Self::small()
} else if max_connections <= 10_000 {
Self::medium()
} else if max_connections <= 50_000 {
Self::large()
} else {
Self::enterprise()
}
}
pub fn development() -> Self {
Self {
preset: ScalabilityPreset::Development,
connection_pool: ConnectionPoolConfig {
max_connections: 100,
shard_count: 4,
connections_per_shard: 32,
idle_timeout: Duration::from_secs(60),
health_check_interval: Duration::from_secs(30),
enable_metrics: true,
},
batch: BatchProcessorConfig {
enabled: false,
batch_size: 10,
batch_timeout: Duration::from_millis(5),
max_pending: 100,
coalescing: CoalescingConfig::disabled(),
},
resources: ResourceLimiterConfig {
max_memory_bytes: 256 * 1024 * 1024, max_connections: 100,
max_requests_per_second: 1_000,
backpressure_threshold: 0.8,
strategy: BackpressureStrategyConfig::Adaptive,
},
profiler: ProfilerConfig {
enabled: true,
sample_rate: 1.0, histogram_buckets: default_latency_buckets(),
report_interval: Duration::from_secs(10),
track_memory: true,
track_cpu: false,
},
}
}
pub fn small() -> Self {
Self {
preset: ScalabilityPreset::Small,
connection_pool: ConnectionPoolConfig {
max_connections: 1_000,
shard_count: 8,
connections_per_shard: 128,
idle_timeout: Duration::from_secs(120),
health_check_interval: Duration::from_secs(30),
enable_metrics: true,
},
batch: BatchProcessorConfig {
enabled: true,
batch_size: 50,
batch_timeout: Duration::from_millis(2),
max_pending: 1_000,
coalescing: CoalescingConfig::default(),
},
resources: ResourceLimiterConfig {
max_memory_bytes: 512 * 1024 * 1024, max_connections: 1_000,
max_requests_per_second: 10_000,
backpressure_threshold: 0.8,
strategy: BackpressureStrategyConfig::Adaptive,
},
profiler: ProfilerConfig {
enabled: true,
sample_rate: 0.1, histogram_buckets: default_latency_buckets(),
report_interval: Duration::from_secs(30),
track_memory: true,
track_cpu: true,
},
}
}
pub fn medium() -> Self {
Self {
preset: ScalabilityPreset::Medium,
connection_pool: ConnectionPoolConfig {
max_connections: 10_000,
shard_count: 64,
connections_per_shard: 256,
idle_timeout: Duration::from_secs(300),
health_check_interval: Duration::from_secs(60),
enable_metrics: true,
},
batch: BatchProcessorConfig {
enabled: true,
batch_size: 100,
batch_timeout: Duration::from_millis(1),
max_pending: 10_000,
coalescing: CoalescingConfig {
enabled: true,
window: Duration::from_micros(500),
max_coalesce: 16,
},
},
resources: ResourceLimiterConfig {
max_memory_bytes: 2 * 1024 * 1024 * 1024, max_connections: 10_000,
max_requests_per_second: 100_000,
backpressure_threshold: 0.75,
strategy: BackpressureStrategyConfig::Adaptive,
},
profiler: ProfilerConfig {
enabled: true,
sample_rate: 0.01, histogram_buckets: default_latency_buckets(),
report_interval: Duration::from_secs(60),
track_memory: true,
track_cpu: true,
},
}
}
pub fn large() -> Self {
Self {
preset: ScalabilityPreset::Large,
connection_pool: ConnectionPoolConfig {
max_connections: 50_000,
shard_count: 128,
connections_per_shard: 512,
idle_timeout: Duration::from_secs(300),
health_check_interval: Duration::from_secs(120),
enable_metrics: true,
},
batch: BatchProcessorConfig {
enabled: true,
batch_size: 200,
batch_timeout: Duration::from_micros(500),
max_pending: 50_000,
coalescing: CoalescingConfig {
enabled: true,
window: Duration::from_micros(250),
max_coalesce: 32,
},
},
resources: ResourceLimiterConfig {
max_memory_bytes: 8 * 1024 * 1024 * 1024, max_connections: 50_000,
max_requests_per_second: 500_000,
backpressure_threshold: 0.7,
strategy: BackpressureStrategyConfig::Aggressive,
},
profiler: ProfilerConfig {
enabled: true,
sample_rate: 0.001, histogram_buckets: default_latency_buckets(),
report_interval: Duration::from_secs(120),
track_memory: true,
track_cpu: true,
},
}
}
pub fn enterprise() -> Self {
Self {
preset: ScalabilityPreset::Enterprise,
connection_pool: ConnectionPoolConfig {
max_connections: 100_000,
shard_count: 256,
connections_per_shard: 512,
idle_timeout: Duration::from_secs(600),
health_check_interval: Duration::from_secs(180),
enable_metrics: true,
},
batch: BatchProcessorConfig {
enabled: true,
batch_size: 500,
batch_timeout: Duration::from_micros(250),
max_pending: 100_000,
coalescing: CoalescingConfig {
enabled: true,
window: Duration::from_micros(100),
max_coalesce: 64,
},
},
resources: ResourceLimiterConfig {
max_memory_bytes: 16 * 1024 * 1024 * 1024, max_connections: 100_000,
max_requests_per_second: 1_000_000,
backpressure_threshold: 0.65,
strategy: BackpressureStrategyConfig::Aggressive,
},
profiler: ProfilerConfig {
enabled: true,
sample_rate: 0.0001, histogram_buckets: default_latency_buckets(),
report_interval: Duration::from_secs(300),
track_memory: true,
track_cpu: true,
},
}
}
pub fn validate(&self) -> Result<(), ConfigError> {
self.connection_pool.validate()?;
self.batch.validate()?;
self.resources.validate()?;
self.profiler.validate()?;
Ok(())
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ConnectionPoolConfig {
pub max_connections: usize,
pub shard_count: usize,
pub connections_per_shard: usize,
pub idle_timeout: Duration,
pub health_check_interval: Duration,
pub enable_metrics: bool,
}
impl Default for ConnectionPoolConfig {
fn default() -> Self {
ScalabilityConfig::medium().connection_pool
}
}
impl ConnectionPoolConfig {
pub fn validate(&self) -> Result<(), ConfigError> {
if self.max_connections == 0 {
return Err(ConfigError::InvalidValue {
field: "max_connections".into(),
message: "must be greater than 0".into(),
});
}
if self.shard_count == 0 {
return Err(ConfigError::InvalidValue {
field: "shard_count".into(),
message: "must be greater than 0".into(),
});
}
if !self.shard_count.is_power_of_two() {
tracing::warn!(
shard_count = self.shard_count,
"shard_count is not a power of 2, hashing may be suboptimal"
);
}
Ok(())
}
pub fn optimal_shard_count(max_connections: usize) -> usize {
let target_per_shard = 256;
let raw_count = (max_connections + target_per_shard - 1) / target_per_shard;
let power = (raw_count as f64).log2().ceil() as u32;
(1usize << power).clamp(4, 256)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BatchProcessorConfig {
pub enabled: bool,
pub batch_size: usize,
pub batch_timeout: Duration,
pub max_pending: usize,
pub coalescing: CoalescingConfig,
}
impl Default for BatchProcessorConfig {
fn default() -> Self {
ScalabilityConfig::medium().batch
}
}
impl BatchProcessorConfig {
pub fn validate(&self) -> Result<(), ConfigError> {
if self.enabled && self.batch_size == 0 {
return Err(ConfigError::InvalidValue {
field: "batch_size".into(),
message: "must be greater than 0 when batching is enabled".into(),
});
}
if self.enabled && self.batch_timeout.is_zero() {
return Err(ConfigError::InvalidValue {
field: "batch_timeout".into(),
message: "must be greater than 0 when batching is enabled".into(),
});
}
Ok(())
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CoalescingConfig {
pub enabled: bool,
pub window: Duration,
pub max_coalesce: usize,
}
impl Default for CoalescingConfig {
fn default() -> Self {
Self {
enabled: true,
window: Duration::from_micros(500),
max_coalesce: 16,
}
}
}
impl CoalescingConfig {
pub fn disabled() -> Self {
Self {
enabled: false,
window: Duration::ZERO,
max_coalesce: 0,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ResourceLimiterConfig {
pub max_memory_bytes: u64,
pub max_connections: usize,
pub max_requests_per_second: u64,
pub backpressure_threshold: f64,
pub strategy: BackpressureStrategyConfig,
}
impl Default for ResourceLimiterConfig {
fn default() -> Self {
ScalabilityConfig::medium().resources
}
}
impl ResourceLimiterConfig {
pub fn validate(&self) -> Result<(), ConfigError> {
if self.backpressure_threshold <= 0.0 || self.backpressure_threshold > 1.0 {
return Err(ConfigError::InvalidValue {
field: "backpressure_threshold".into(),
message: "must be between 0.0 (exclusive) and 1.0 (inclusive)".into(),
});
}
Ok(())
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum BackpressureStrategyConfig {
None,
Gradual,
Adaptive,
Aggressive,
}
impl Default for BackpressureStrategyConfig {
fn default() -> Self {
Self::Adaptive
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProfilerConfig {
pub enabled: bool,
pub sample_rate: f64,
pub histogram_buckets: Vec<u64>,
pub report_interval: Duration,
pub track_memory: bool,
pub track_cpu: bool,
}
impl Default for ProfilerConfig {
fn default() -> Self {
ScalabilityConfig::medium().profiler
}
}
impl ProfilerConfig {
pub fn validate(&self) -> Result<(), ConfigError> {
if self.sample_rate < 0.0 || self.sample_rate > 1.0 {
return Err(ConfigError::InvalidValue {
field: "sample_rate".into(),
message: "must be between 0.0 and 1.0".into(),
});
}
if self.enabled && self.histogram_buckets.is_empty() {
return Err(ConfigError::InvalidValue {
field: "histogram_buckets".into(),
message: "must have at least one bucket when profiling is enabled".into(),
});
}
Ok(())
}
}
fn default_latency_buckets() -> Vec<u64> {
vec![
50, 100, 250, 500, 1_000, 2_500, 5_000, 10_000, 25_000, 50_000, 100_000, ]
}
#[derive(Debug, Clone, thiserror::Error)]
pub enum ConfigError {
#[error("Invalid value for {field}: {message}")]
InvalidValue { field: String, message: String },
#[error("Missing required field: {field}")]
MissingField { field: String },
#[error("Inconsistent configuration: {message}")]
Inconsistent { message: String },
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_default_config() {
let config = ScalabilityConfig::default();
assert_eq!(config.preset, ScalabilityPreset::Medium);
assert_eq!(config.connection_pool.max_connections, 10_000);
assert!(config.validate().is_ok());
}
#[test]
fn test_presets() {
for preset in [
ScalabilityPreset::Development,
ScalabilityPreset::Small,
ScalabilityPreset::Medium,
ScalabilityPreset::Large,
ScalabilityPreset::Enterprise,
] {
let config = ScalabilityConfig::for_preset(preset);
assert!(
config.validate().is_ok(),
"Preset {:?} failed validation",
preset
);
}
}
#[test]
fn test_for_connections() {
let config = ScalabilityConfig::for_connections(50);
assert_eq!(config.preset, ScalabilityPreset::Development);
let config = ScalabilityConfig::for_connections(500);
assert_eq!(config.preset, ScalabilityPreset::Small);
let config = ScalabilityConfig::for_connections(5_000);
assert_eq!(config.preset, ScalabilityPreset::Medium);
let config = ScalabilityConfig::for_connections(30_000);
assert_eq!(config.preset, ScalabilityPreset::Large);
let config = ScalabilityConfig::for_connections(100_000);
assert_eq!(config.preset, ScalabilityPreset::Enterprise);
}
#[test]
fn test_optimal_shard_count() {
assert_eq!(ConnectionPoolConfig::optimal_shard_count(100), 4);
assert_eq!(ConnectionPoolConfig::optimal_shard_count(1_000), 4);
assert_eq!(ConnectionPoolConfig::optimal_shard_count(10_000), 64);
assert_eq!(ConnectionPoolConfig::optimal_shard_count(50_000), 256);
assert_eq!(ConnectionPoolConfig::optimal_shard_count(100_000), 256);
}
#[test]
fn test_validation_errors() {
let mut config = ScalabilityConfig::default();
config.connection_pool.max_connections = 0;
assert!(config.connection_pool.validate().is_err());
let mut config = ScalabilityConfig::default();
config.resources.backpressure_threshold = 1.5;
assert!(config.resources.validate().is_err());
let mut config = ScalabilityConfig::default();
config.profiler.sample_rate = -0.1;
assert!(config.profiler.validate().is_err());
}
}