canic_core/config/schema/
subnet.rs

1use crate::{
2    config::schema::{ConfigSchemaError, Validate},
3    ids::CanisterRole,
4};
5use canic_types::{Cycles, TC};
6use serde::{Deserialize, Serialize};
7use std::collections::{BTreeMap, BTreeSet};
8
9mod defaults {
10    use super::Cycles;
11
12    pub fn initial_cycles() -> Cycles {
13        Cycles::new(5_000_000_000_000)
14    }
15}
16
17///
18/// SubnetConfig
19///
20
21#[derive(Clone, Debug, Default, Deserialize, Serialize)]
22#[serde(deny_unknown_fields)]
23pub struct SubnetConfig {
24    #[serde(default)]
25    pub canisters: BTreeMap<CanisterRole, CanisterConfig>,
26
27    #[serde(default)]
28    pub auto_create: BTreeSet<CanisterRole>,
29
30    #[serde(default)]
31    pub subnet_directory: BTreeSet<CanisterRole>,
32
33    #[serde(default)]
34    pub reserve: CanisterReserve,
35}
36
37impl SubnetConfig {
38    /// Returns the directory canisters for this subnet.
39    #[must_use]
40    pub fn directory_canisters(&self) -> Vec<CanisterRole> {
41        self.subnet_directory.iter().cloned().collect()
42    }
43
44    /// Get a canister configuration by type.
45    #[must_use]
46    pub fn get_canister(&self, ty: &CanisterRole) -> Option<CanisterConfig> {
47        self.canisters.get(ty).cloned()
48    }
49}
50
51impl Validate for SubnetConfig {
52    fn validate(&self) -> Result<(), ConfigSchemaError> {
53        // --- 1. Validate directory entries ---
54        for canister_ty in &self.subnet_directory {
55            if !self.canisters.contains_key(canister_ty) {
56                return Err(ConfigSchemaError::ValidationError(format!(
57                    "subnet directory canister '{canister_ty}' is not defined in subnet",
58                )));
59            }
60        }
61
62        // --- 2. Validate auto-create entries ---
63        for canister_ty in &self.auto_create {
64            if !self.canisters.contains_key(canister_ty) {
65                return Err(ConfigSchemaError::ValidationError(format!(
66                    "auto-create canister '{canister_ty}' is not defined in subnet",
67                )));
68            }
69        }
70
71        // --- 3. Validate canister configurations ---
72        for (parent_ty, cfg) in &self.canisters {
73            // Sharding pools
74            if let Some(sharding) = &cfg.sharding {
75                for (pool_name, pool) in &sharding.pools {
76                    if !self.canisters.contains_key(&pool.canister_type) {
77                        return Err(ConfigSchemaError::ValidationError(format!(
78                            "canister '{parent_ty}' sharding pool '{pool_name}' references unknown canister type '{ty}'",
79                            ty = pool.canister_type
80                        )));
81                    }
82
83                    if pool.policy.capacity == 0 {
84                        return Err(ConfigSchemaError::ValidationError(format!(
85                            "canister '{parent_ty}' sharding pool '{pool_name}' has zero capacity; must be > 0",
86                        )));
87                    }
88
89                    if pool.policy.max_shards == 0 {
90                        return Err(ConfigSchemaError::ValidationError(format!(
91                            "canister '{parent_ty}' sharding pool '{pool_name}' has max_shards of 0; must be > 0",
92                        )));
93                    }
94                }
95            }
96
97            // Scaling pools
98            if let Some(scaling) = &cfg.scaling {
99                for (pool_name, pool) in &scaling.pools {
100                    if !self.canisters.contains_key(&pool.canister_type) {
101                        return Err(ConfigSchemaError::ValidationError(format!(
102                            "canister '{parent_ty}' scaling pool '{pool_name}' references unknown canister type '{ty}'",
103                            ty = pool.canister_type
104                        )));
105                    }
106
107                    if pool.policy.max_workers != 0
108                        && pool.policy.max_workers < pool.policy.min_workers
109                    {
110                        return Err(ConfigSchemaError::ValidationError(format!(
111                            "canister '{parent_ty}' scaling pool '{pool_name}' has max_workers < min_workers (min {}, max {})",
112                            pool.policy.min_workers, pool.policy.max_workers
113                        )));
114                    }
115                }
116            }
117        }
118
119        Ok(())
120    }
121}
122
123///
124/// CanisterReserve
125/// defaults to a minimum size of 0
126///
127
128#[derive(Clone, Debug, Default, Deserialize, Serialize)]
129#[serde(deny_unknown_fields)]
130pub struct CanisterReserve {
131    pub minimum_size: u8,
132}
133
134///
135/// CanisterConfig
136///
137
138#[derive(Clone, Debug, Default, Deserialize, Serialize)]
139#[serde(deny_unknown_fields)]
140pub struct CanisterConfig {
141    #[serde(
142        default = "defaults::initial_cycles",
143        deserialize_with = "Cycles::from_config"
144    )]
145    pub initial_cycles: Cycles,
146
147    #[serde(default)]
148    pub topup: Option<CanisterTopup>,
149
150    #[serde(default)]
151    pub scaling: Option<ScalingConfig>,
152
153    #[serde(default)]
154    pub sharding: Option<ShardingConfig>,
155}
156
157///
158/// CanisterTopup
159///
160
161#[derive(Clone, Debug, Deserialize, Serialize)]
162#[serde(deny_unknown_fields)]
163pub struct CanisterTopup {
164    #[serde(default, deserialize_with = "Cycles::from_config")]
165    pub threshold: Cycles,
166
167    #[serde(default, deserialize_with = "Cycles::from_config")]
168    pub amount: Cycles,
169}
170
171impl Default for CanisterTopup {
172    fn default() -> Self {
173        Self {
174            threshold: Cycles::new(10 * TC),
175            amount: Cycles::new(5 * TC),
176        }
177    }
178}
179
180///
181/// ScalingConfig
182/// (stateless, scaling)
183///
184/// * Organizes canisters into **worker groups** (e.g. "oracle").
185/// * Workers are interchangeable and handle transient tasks (no tenant assignment).
186/// * Scaling is about throughput, not capacity.
187/// * Hence: `WorkerManager → pools → WorkerSpec → WorkerPolicy`.
188///
189
190#[derive(Clone, Debug, Default, Deserialize, Serialize)]
191#[serde(deny_unknown_fields)]
192pub struct ScalingConfig {
193    #[serde(default)]
194    pub pools: BTreeMap<String, ScalePool>,
195}
196
197///
198/// ScalePool
199/// One stateless worker group (e.g. "oracle").
200///
201
202#[derive(Clone, Debug, Deserialize, Serialize)]
203#[serde(deny_unknown_fields)]
204pub struct ScalePool {
205    pub canister_type: CanisterRole,
206
207    #[serde(default)]
208    pub policy: ScalePoolPolicy,
209}
210
211///
212/// ScalePoolPolicy
213///
214
215#[derive(Clone, Debug, Deserialize, Serialize)]
216#[serde(deny_unknown_fields, default)]
217pub struct ScalePoolPolicy {
218    /// Minimum number of worker canisters to keep alive
219    pub min_workers: u32,
220
221    /// Maximum number of worker canisters to allow
222    pub max_workers: u32,
223}
224
225impl Default for ScalePoolPolicy {
226    fn default() -> Self {
227        Self {
228            min_workers: 1,
229            max_workers: 32,
230        }
231    }
232}
233
234///
235/// ShardingConfig
236/// (stateful, partitioned)
237///
238/// * Organizes canisters into named **pools**.
239/// * Each pool manages a set of **shards**, and each shard owns a partition of state.
240/// * Tenants are assigned to shards via HRW and stay there.
241/// * Hence: `ShardManager → pools → ShardPoolSpec → ShardPoolPolicy`.
242///
243
244#[derive(Clone, Debug, Default, Deserialize, Serialize)]
245#[serde(deny_unknown_fields)]
246pub struct ShardingConfig {
247    #[serde(default)]
248    pub pools: BTreeMap<String, ShardPool>,
249}
250
251///
252/// ShardPool
253///
254
255#[derive(Clone, Debug, Deserialize, Serialize)]
256#[serde(deny_unknown_fields)]
257pub struct ShardPool {
258    pub canister_type: CanisterRole,
259
260    #[serde(default)]
261    pub policy: ShardPoolPolicy,
262}
263
264///
265/// ShardPoolPolicy
266///
267
268#[derive(Clone, Debug, Deserialize, Serialize)]
269#[serde(deny_unknown_fields, default)]
270pub struct ShardPoolPolicy {
271    pub capacity: u32,
272    pub max_shards: u32,
273}
274
275impl Default for ShardPoolPolicy {
276    fn default() -> Self {
277        Self {
278            capacity: 1_000,
279            max_shards: 4,
280        }
281    }
282}
283
284///
285/// TESTS
286///
287
288#[cfg(test)]
289mod tests {
290    use super::*;
291    use std::collections::{BTreeMap, BTreeSet};
292
293    #[test]
294    fn auto_create_entries_must_exist_in_subnet() {
295        let mut auto_create = BTreeSet::new();
296        auto_create.insert(CanisterRole::from("missing_auto_canister"));
297
298        let subnet = SubnetConfig {
299            auto_create,
300            ..Default::default()
301        };
302
303        subnet
304            .validate()
305            .expect_err("expected missing auto-create type to fail");
306    }
307
308    #[test]
309    fn sharding_pool_references_must_exist_in_subnet() {
310        let managing_ty: CanisterRole = "shard_hub".into();
311        let mut canisters = BTreeMap::new();
312
313        let mut sharding = ShardingConfig::default();
314        sharding.pools.insert(
315            "primary".into(),
316            ShardPool {
317                canister_type: CanisterRole::from("missing_shard_worker"),
318                policy: ShardPoolPolicy::default(),
319            },
320        );
321
322        let manager_cfg = CanisterConfig {
323            sharding: Some(sharding),
324            ..Default::default()
325        };
326
327        canisters.insert(managing_ty, manager_cfg);
328
329        let subnet = SubnetConfig {
330            canisters,
331            ..Default::default()
332        };
333
334        subnet
335            .validate()
336            .expect_err("expected missing worker type to fail");
337    }
338
339    #[test]
340    fn sharding_pool_policy_requires_positive_capacity_and_shards() {
341        let managing_ty: CanisterRole = "shard_hub".into();
342        let mut canisters = BTreeMap::new();
343
344        let mut sharding = ShardingConfig::default();
345        sharding.pools.insert(
346            "primary".into(),
347            ShardPool {
348                canister_type: managing_ty.clone(),
349                policy: ShardPoolPolicy {
350                    capacity: 0,
351                    max_shards: 0,
352                },
353            },
354        );
355
356        canisters.insert(
357            managing_ty,
358            CanisterConfig {
359                sharding: Some(sharding),
360                ..Default::default()
361            },
362        );
363
364        let subnet = SubnetConfig {
365            canisters,
366            ..Default::default()
367        };
368
369        subnet
370            .validate()
371            .expect_err("expected invalid sharding policy to fail");
372    }
373
374    #[test]
375    fn scaling_pool_policy_requires_max_ge_min_when_bounded() {
376        let mut canisters = BTreeMap::new();
377        let mut pools = BTreeMap::new();
378        pools.insert(
379            "worker".into(),
380            ScalePool {
381                canister_type: CanisterRole::from("worker"),
382                policy: ScalePoolPolicy {
383                    min_workers: 5,
384                    max_workers: 3,
385                },
386            },
387        );
388
389        canisters.insert(CanisterRole::from("worker"), CanisterConfig::default());
390
391        let manager_cfg = CanisterConfig {
392            scaling: Some(ScalingConfig { pools }),
393            ..Default::default()
394        };
395
396        canisters.insert(CanisterRole::from("manager"), manager_cfg);
397
398        let subnet = SubnetConfig {
399            canisters,
400            ..Default::default()
401        };
402
403        subnet
404            .validate()
405            .expect_err("expected invalid scaling policy to fail");
406    }
407}