canic_core/policy/placement/
scaling.rs

1//! Policy layer for scaling worker pools.
2//!
3//! Scaling builds on top of the scaling registry and configuration entries
4//! under `[canisters.<type>.scaling]`. This module is PURE policy:
5//! - reads config
6//! - reads registry
7//! - computes decisions
8//!
9//! No IC calls. No async. No side effects.
10
11use crate::{
12    Error, ThisError,
13    cdk::types::BoundedString64,
14    config::schema::ScalePool,
15    dto::placement::WorkerEntryView,
16    ops::{config::ConfigOps, storage::scaling::ScalingRegistryOps},
17};
18
19///
20/// ScalingPolicyError
21/// Errors raised during scaling policy evaluation
22///
23
24#[derive(Debug, ThisError)]
25pub enum ScalingPolicyError {
26    #[error("scaling capability disabled for this canister")]
27    ScalingDisabled,
28
29    #[error("scaling pool '{0}' not found")]
30    PoolNotFound(String),
31}
32
33impl From<ScalingPolicyError> for Error {
34    fn from(err: ScalingPolicyError) -> Self {
35        Self::OpsError(err.to_string())
36    }
37}
38
39///
40/// ScalingPlan
41/// Result of a dry-run evaluation for scaling decisions
42///
43
44#[derive(Clone, Debug)]
45pub struct ScalingPlan {
46    pub should_spawn: bool,
47    pub reason: String,
48    pub worker_entry: Option<WorkerEntryView>,
49}
50
51///
52/// ScalingPolicy
53///
54
55pub struct ScalingPolicy;
56
57impl ScalingPolicy {
58    #[allow(clippy::cast_possible_truncation)]
59    pub fn plan_create_worker(pool: &str, created_at_secs: u64) -> Result<ScalingPlan, Error> {
60        let pool_cfg = Self::get_scaling_pool_cfg(pool)?;
61        let policy = pool_cfg.policy;
62        let worker_count = ScalingRegistryOps::count_by_pool(pool);
63
64        // Max bound check
65        if policy.max_workers > 0 && worker_count >= policy.max_workers {
66            return Ok(ScalingPlan {
67                should_spawn: false,
68                reason: format!(
69                    "pool '{pool}' at max_workers ({}/{})",
70                    worker_count, policy.max_workers
71                ),
72                worker_entry: None,
73            });
74        }
75
76        // Min bound check
77        if worker_count < policy.min_workers {
78            let entry = WorkerEntryView {
79                pool: BoundedString64::new(pool),
80                canister_role: pool_cfg.canister_role,
81                created_at_secs,
82            };
83
84            return Ok(ScalingPlan {
85                should_spawn: true,
86                reason: format!(
87                    "pool '{pool}' below min_workers (current {worker_count}, min {})",
88                    policy.min_workers
89                ),
90                worker_entry: Some(entry),
91            });
92        }
93
94        Ok(ScalingPlan {
95            should_spawn: false,
96            reason: format!(
97                "pool '{pool}' within policy bounds (current {worker_count}, min {}, max {})",
98                policy.min_workers, policy.max_workers
99            ),
100            worker_entry: None,
101        })
102    }
103
104    pub fn should_spawn_worker(pool: &str, now_secs: u64) -> Result<bool, Error> {
105        Ok(Self::plan_create_worker(pool, now_secs)?.should_spawn)
106    }
107
108    fn get_scaling_pool_cfg(pool: &str) -> Result<ScalePool, Error> {
109        let Some(scaling) = ConfigOps::current_scaling_config()? else {
110            return Err(ScalingPolicyError::ScalingDisabled.into());
111        };
112
113        let Some(pool_cfg) = scaling.pools.get(pool) else {
114            return Err(ScalingPolicyError::PoolNotFound(pool.to_string()).into());
115        };
116
117        Ok(pool_cfg.clone())
118    }
119}