1#![doc = include_str!("../README.md")]
2#![doc(html_root_url = "https://docs.rs/poolsim-core/0.1.0")]
3#![cfg_attr(docsrs, feature(doc_cfg))]
4
5#![deny(missing_docs)]
6
7pub mod distribution;
9pub mod erlang;
11pub mod error;
13pub mod monte_carlo;
15pub mod optimizer;
17pub mod sensitivity;
19pub mod types;
21
22use distribution::LatencyDistribution;
23use error::PoolsimError;
24use optimizer::find_optimal;
25use types::{
26 EvaluationResult, PoolConfig, SaturationLevel, SensitivityRow, SimulationOptions, SimulationReport,
27 StepLoadResult, WorkloadConfig,
28};
29
30pub use types::DistributionModel;
32pub use types::QueueModel;
34pub use types::RiskLevel;
36
37pub const MIN_FULL_SIMULATION_ITERATIONS: u32 = 10_000;
39pub const PERFORMANCE_CONTRACT_WARNING: &str = "performance contract not met: expected <= 200ms";
41
42pub fn emit_performance_contract_warning(elapsed_ms: u128, threshold_ms: u128) {
44 if elapsed_ms > threshold_ms {
45 eprintln!("{PERFORMANCE_CONTRACT_WARNING}");
46 }
47}
48
49pub fn simulate(
56 workload: &WorkloadConfig,
57 pool: &PoolConfig,
58 opts: &SimulationOptions,
59) -> Result<SimulationReport, PoolsimError> {
60 workload.validate()?;
61 pool.validate()?;
62 opts.validate()?;
63
64 let mut effective_opts = opts.clone();
65 let mut warnings = Vec::new();
66 if effective_opts.iterations < MIN_FULL_SIMULATION_ITERATIONS {
67 effective_opts.iterations = MIN_FULL_SIMULATION_ITERATIONS;
68 warnings.push(format!(
69 "iterations increased to {} for full simulation fidelity",
70 MIN_FULL_SIMULATION_ITERATIONS
71 ));
72 }
73
74 let dist = LatencyDistribution::fit(workload, effective_opts.distribution)?;
75 let optimal = find_optimal(workload, pool, &dist, &effective_opts)?;
76 let sensitivity = sensitivity::sweep_with_options(workload, pool, &effective_opts)?;
77 let cold_start_min_pool_size =
78 recommend_cold_start_pool_size(workload, pool, &dist, &effective_opts, optimal.pool_size);
79
80 let mut step_opts = effective_opts.clone();
81 if workload.step_load_profile.is_some() {
82 let reduced = (effective_opts.iterations / 4).clamp(1_500, 5_000);
83 if reduced < effective_opts.iterations {
84 step_opts.iterations = reduced;
85 warnings.push(format!(
86 "step-load analysis used {} iterations per step for responsiveness",
87 reduced
88 ));
89 }
90 }
91 let step_load_analysis = build_step_load_analysis(workload, optimal.pool_size, &step_opts)?;
92
93 let saturation = SaturationLevel::from_rho(optimal.utilisation_rho);
94 warnings.extend(optimal.warnings);
95 if saturation != SaturationLevel::Ok {
96 warnings.push(format!(
97 "System utilisation is high at the recommended size (rho={:.3})",
98 optimal.utilisation_rho
99 ));
100 }
101
102 Ok(SimulationReport {
103 optimal_pool_size: optimal.pool_size,
104 confidence_interval: optimal.confidence_interval,
105 cold_start_min_pool_size,
106 utilisation_rho: optimal.utilisation_rho,
107 mean_queue_wait_ms: optimal.mean_queue_wait_ms,
108 p99_queue_wait_ms: optimal.p99_queue_wait_ms,
109 saturation,
110 sensitivity,
111 step_load_analysis,
112 warnings,
113 })
114}
115
116pub fn evaluate(
122 workload: &WorkloadConfig,
123 pool_size: u32,
124 opts: &SimulationOptions,
125) -> Result<EvaluationResult, PoolsimError> {
126 workload.validate()?;
127 opts.validate()?;
128
129 if pool_size == 0 {
130 return Err(PoolsimError::invalid_input(
131 "INVALID_POOL_SIZE",
132 "pool_size must be greater than 0",
133 None,
134 ));
135 }
136
137 let dist = LatencyDistribution::fit(workload, opts.distribution)?;
138 let mc = monte_carlo::run(workload, pool_size, &dist, opts)?;
139
140 let lambda = workload.requests_per_second;
141 let mu = 1_000.0 / dist.mean_ms();
142 let rho = erlang::utilisation(lambda, mu, pool_size);
143 let mean_wait = match opts.queue_model {
144 QueueModel::MMC => erlang::mean_queue_wait_ms(lambda, mu, pool_size).unwrap_or(mc.mean),
145 QueueModel::MDC => mc.mean,
146 };
147
148 let saturation = SaturationLevel::from_rho(rho);
149 let mut warnings = Vec::new();
150 if saturation != SaturationLevel::Ok {
151 warnings.push(format!("utilisation is elevated (rho={:.3})", rho));
152 }
153
154 Ok(EvaluationResult {
155 pool_size,
156 utilisation_rho: rho,
157 mean_queue_wait_ms: mean_wait,
158 p99_queue_wait_ms: mc.p99,
159 saturation,
160 warnings,
161 })
162}
163
164pub fn sweep(
170 workload: &WorkloadConfig,
171 pool: &PoolConfig,
172) -> Result<Vec<SensitivityRow>, PoolsimError> {
173 sweep_with_options(workload, pool, &SimulationOptions::default())
174}
175
176pub fn sweep_with_options(
182 workload: &WorkloadConfig,
183 pool: &PoolConfig,
184 opts: &SimulationOptions,
185) -> Result<Vec<SensitivityRow>, PoolsimError> {
186 workload.validate()?;
187 pool.validate()?;
188 opts.validate()?;
189 sensitivity::sweep_with_options(workload, pool, opts)
190}
191
192fn recommend_cold_start_pool_size(
193 workload: &WorkloadConfig,
194 pool: &PoolConfig,
195 dist: &LatencyDistribution,
196 opts: &SimulationOptions,
197 recommended_pool_size: u32,
198) -> u32 {
199 let peak_rps = workload
200 .step_load_profile
201 .as_ref()
202 .and_then(|profile| {
203 profile
204 .iter()
205 .map(|point| point.requests_per_second)
206 .max_by(|a, b| a.total_cmp(b))
207 })
208 .map(|peak| peak.max(workload.requests_per_second))
209 .unwrap_or(workload.requests_per_second);
210
211 let mu = 1_000.0 / (dist.mean_ms() + pool.connection_overhead_ms);
212 if !mu.is_finite() || mu <= 0.0 {
213 return pool.min_pool_size.min(recommended_pool_size);
214 }
215
216 let warm_rho_target = opts.max_acceptable_rho.min(0.70).max(0.35);
217 let required = (peak_rps / (mu * warm_rho_target)).ceil().max(1.0) as u32;
218 required
219 .max(pool.min_pool_size)
220 .min(recommended_pool_size)
221}
222
223fn build_step_load_analysis(
224 workload: &WorkloadConfig,
225 pool_size: u32,
226 opts: &SimulationOptions,
227) -> Result<Vec<StepLoadResult>, PoolsimError> {
228 let Some(profile) = &workload.step_load_profile else {
229 return Ok(Vec::new());
230 };
231
232 let mut rows = Vec::with_capacity(profile.len());
233 for point in profile {
234 let mut step_workload = workload.clone();
235 step_workload.requests_per_second = point.requests_per_second;
236 step_workload.step_load_profile = None;
237
238 let step = evaluate(&step_workload, pool_size, opts)?;
239 rows.push(StepLoadResult {
240 time_s: point.time_s,
241 requests_per_second: point.requests_per_second,
242 utilisation_rho: step.utilisation_rho,
243 p99_queue_wait_ms: step.p99_queue_wait_ms,
244 saturation: step.saturation,
245 });
246 }
247
248 Ok(rows)
249}