canic_core/config/schema/
subnet.rs

1use crate::{
2    config::schema::{ConfigSchemaError, Validate},
3    ids::CanisterRole,
4    types::{Cycles, TC},
5};
6use serde::{Deserialize, Serialize};
7use std::collections::{BTreeMap, BTreeSet};
8
9mod defaults {
10    use super::Cycles;
11
12    pub fn initial_cycles() -> Cycles {
13        Cycles::new(5_000_000_000_000)
14    }
15}
16
17///
18/// SubnetConfig
19///
20
21#[derive(Clone, Debug, Default, Deserialize, Serialize)]
22#[serde(deny_unknown_fields)]
23pub struct SubnetConfig {
24    #[serde(default)]
25    pub canisters: BTreeMap<CanisterRole, CanisterConfig>,
26
27    #[serde(default)]
28    pub auto_create: BTreeSet<CanisterRole>,
29
30    #[serde(default)]
31    pub subnet_directory: BTreeSet<CanisterRole>,
32
33    #[serde(default)]
34    pub reserve: CanisterReserve,
35}
36
37impl SubnetConfig {
38    /// Returns the directory canisters for this subnet.
39    #[must_use]
40    pub fn directory_canisters(&self) -> Vec<CanisterRole> {
41        self.subnet_directory.iter().cloned().collect()
42    }
43
44    /// Get a canister configuration by type.
45    #[must_use]
46    pub fn get_canister(&self, ty: &CanisterRole) -> Option<CanisterConfig> {
47        self.canisters.get(ty).cloned()
48    }
49}
50
51impl Validate for SubnetConfig {
52    fn validate(&self) -> Result<(), ConfigSchemaError> {
53        // --- 1. Validate directory entries ---
54        for canister_ty in &self.subnet_directory {
55            if !self.canisters.contains_key(canister_ty) {
56                return Err(ConfigSchemaError::ValidationError(format!(
57                    "subnet directory canister '{canister_ty}' is not defined in subnet",
58                )));
59            }
60        }
61
62        // --- 2. Validate auto-create entries ---
63        for canister_ty in &self.auto_create {
64            if !self.canisters.contains_key(canister_ty) {
65                return Err(ConfigSchemaError::ValidationError(format!(
66                    "auto-create canister '{canister_ty}' is not defined in subnet",
67                )));
68            }
69        }
70
71        // --- 3. Validate canister configurations ---
72        for (parent_ty, cfg) in &self.canisters {
73            if cfg.randomness.enabled && cfg.randomness.reseed_interval_secs == 0 {
74                return Err(ConfigSchemaError::ValidationError(format!(
75                    "canister '{parent_ty}' randomness reseed_interval_secs must be > 0",
76                )));
77            }
78
79            // Sharding pools
80            if let Some(sharding) = &cfg.sharding {
81                for (pool_name, pool) in &sharding.pools {
82                    if !self.canisters.contains_key(&pool.canister_type) {
83                        return Err(ConfigSchemaError::ValidationError(format!(
84                            "canister '{parent_ty}' sharding pool '{pool_name}' references unknown canister type '{ty}'",
85                            ty = pool.canister_type
86                        )));
87                    }
88
89                    if pool.policy.capacity == 0 {
90                        return Err(ConfigSchemaError::ValidationError(format!(
91                            "canister '{parent_ty}' sharding pool '{pool_name}' has zero capacity; must be > 0",
92                        )));
93                    }
94
95                    if pool.policy.max_shards == 0 {
96                        return Err(ConfigSchemaError::ValidationError(format!(
97                            "canister '{parent_ty}' sharding pool '{pool_name}' has max_shards of 0; must be > 0",
98                        )));
99                    }
100                }
101            }
102
103            // Scaling pools
104            if let Some(scaling) = &cfg.scaling {
105                for (pool_name, pool) in &scaling.pools {
106                    if !self.canisters.contains_key(&pool.canister_type) {
107                        return Err(ConfigSchemaError::ValidationError(format!(
108                            "canister '{parent_ty}' scaling pool '{pool_name}' references unknown canister type '{ty}'",
109                            ty = pool.canister_type
110                        )));
111                    }
112
113                    if pool.policy.max_workers != 0
114                        && pool.policy.max_workers < pool.policy.min_workers
115                    {
116                        return Err(ConfigSchemaError::ValidationError(format!(
117                            "canister '{parent_ty}' scaling pool '{pool_name}' has max_workers < min_workers (min {}, max {})",
118                            pool.policy.min_workers, pool.policy.max_workers
119                        )));
120                    }
121                }
122            }
123        }
124
125        Ok(())
126    }
127}
128
129///
130/// CanisterReserve
131/// defaults to a minimum size of 0
132///
133
134#[derive(Clone, Debug, Default, Deserialize, Serialize)]
135#[serde(deny_unknown_fields)]
136pub struct CanisterReserve {
137    pub minimum_size: u8,
138}
139
140///
141/// CanisterConfig
142///
143
144#[derive(Clone, Debug, Default, Deserialize, Serialize)]
145#[serde(deny_unknown_fields)]
146pub struct CanisterConfig {
147    #[serde(
148        default = "defaults::initial_cycles",
149        deserialize_with = "Cycles::from_config"
150    )]
151    pub initial_cycles: Cycles,
152
153    #[serde(default)]
154    pub topup: Option<CanisterTopup>,
155
156    #[serde(default)]
157    pub randomness: RandomnessConfig,
158
159    #[serde(default)]
160    pub scaling: Option<ScalingConfig>,
161
162    #[serde(default)]
163    pub sharding: Option<ShardingConfig>,
164}
165
166///
167/// CanisterTopup
168///
169
170#[derive(Clone, Debug, Deserialize, Serialize)]
171#[serde(deny_unknown_fields)]
172pub struct CanisterTopup {
173    #[serde(default, deserialize_with = "Cycles::from_config")]
174    pub threshold: Cycles,
175
176    #[serde(default, deserialize_with = "Cycles::from_config")]
177    pub amount: Cycles,
178}
179
180impl Default for CanisterTopup {
181    fn default() -> Self {
182        Self {
183            threshold: Cycles::new(10 * TC),
184            amount: Cycles::new(5 * TC),
185        }
186    }
187}
188
189///
190/// RandomnessConfig
191///
192
193#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
194#[serde(deny_unknown_fields, default)]
195pub struct RandomnessConfig {
196    pub enabled: bool,
197    pub reseed_interval_secs: u64,
198    pub source: RandomnessSource,
199}
200
201impl Default for RandomnessConfig {
202    fn default() -> Self {
203        Self {
204            enabled: true,
205            reseed_interval_secs: 3600,
206            source: RandomnessSource::Ic,
207        }
208    }
209}
210
211///
212/// RandomnessSource
213///
214
215#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
216#[serde(rename_all = "snake_case")]
217pub enum RandomnessSource {
218    Ic,
219    Time,
220}
221
222impl Default for RandomnessSource {
223    fn default() -> Self {
224        Self::Ic
225    }
226}
227
228///
229/// ScalingConfig
230/// (stateless, scaling)
231///
232/// * Organizes canisters into **worker groups** (e.g. "oracle").
233/// * Workers are interchangeable and handle transient tasks (no tenant assignment).
234/// * Scaling is about throughput, not capacity.
235/// * Hence: `WorkerManager → pools → WorkerSpec → WorkerPolicy`.
236///
237
238#[derive(Clone, Debug, Default, Deserialize, Serialize)]
239#[serde(deny_unknown_fields)]
240pub struct ScalingConfig {
241    #[serde(default)]
242    pub pools: BTreeMap<String, ScalePool>,
243}
244
245///
246/// ScalePool
247/// One stateless worker group (e.g. "oracle").
248///
249
250#[derive(Clone, Debug, Deserialize, Serialize)]
251#[serde(deny_unknown_fields)]
252pub struct ScalePool {
253    pub canister_type: CanisterRole,
254
255    #[serde(default)]
256    pub policy: ScalePoolPolicy,
257}
258
259///
260/// ScalePoolPolicy
261///
262
263#[derive(Clone, Debug, Deserialize, Serialize)]
264#[serde(deny_unknown_fields, default)]
265pub struct ScalePoolPolicy {
266    /// Minimum number of worker canisters to keep alive
267    pub min_workers: u32,
268
269    /// Maximum number of worker canisters to allow
270    pub max_workers: u32,
271}
272
273impl Default for ScalePoolPolicy {
274    fn default() -> Self {
275        Self {
276            min_workers: 1,
277            max_workers: 32,
278        }
279    }
280}
281
282///
283/// ShardingConfig
284/// (stateful, partitioned)
285///
286/// * Organizes canisters into named **pools**.
287/// * Each pool manages a set of **shards**, and each shard owns a partition of state.
288/// * Tenants are assigned to shards via HRW and stay there.
289/// * Hence: `ShardManager → pools → ShardPoolSpec → ShardPoolPolicy`.
290///
291
292#[derive(Clone, Debug, Default, Deserialize, Serialize)]
293#[serde(deny_unknown_fields)]
294pub struct ShardingConfig {
295    #[serde(default)]
296    pub pools: BTreeMap<String, ShardPool>,
297}
298
299///
300/// ShardPool
301///
302
303#[derive(Clone, Debug, Deserialize, Serialize)]
304#[serde(deny_unknown_fields)]
305pub struct ShardPool {
306    pub canister_type: CanisterRole,
307
308    #[serde(default)]
309    pub policy: ShardPoolPolicy,
310}
311
312///
313/// ShardPoolPolicy
314///
315
316#[derive(Clone, Debug, Deserialize, Serialize)]
317#[serde(deny_unknown_fields, default)]
318pub struct ShardPoolPolicy {
319    pub capacity: u32,
320    pub max_shards: u32,
321}
322
323impl Default for ShardPoolPolicy {
324    fn default() -> Self {
325        Self {
326            capacity: 1_000,
327            max_shards: 4,
328        }
329    }
330}
331
332///
333/// TESTS
334///
335
336#[cfg(test)]
337mod tests {
338    use super::*;
339    use std::collections::{BTreeMap, BTreeSet};
340
341    #[test]
342    fn randomness_defaults_to_ic() {
343        let cfg = RandomnessConfig::default();
344
345        assert!(cfg.enabled);
346        assert_eq!(cfg.reseed_interval_secs, 3600);
347        assert_eq!(cfg.source, RandomnessSource::Ic);
348    }
349
350    #[test]
351    fn randomness_source_parses_ic_and_time() {
352        let cfg: RandomnessConfig = toml::from_str("source = \"ic\"").unwrap();
353        assert_eq!(cfg.source, RandomnessSource::Ic);
354
355        let cfg: RandomnessConfig = toml::from_str("source = \"time\"").unwrap();
356        assert_eq!(cfg.source, RandomnessSource::Time);
357    }
358
359    #[test]
360    fn auto_create_entries_must_exist_in_subnet() {
361        let mut auto_create = BTreeSet::new();
362        auto_create.insert(CanisterRole::from("missing_auto_canister"));
363
364        let subnet = SubnetConfig {
365            auto_create,
366            ..Default::default()
367        };
368
369        subnet
370            .validate()
371            .expect_err("expected missing auto-create type to fail");
372    }
373
374    #[test]
375    fn sharding_pool_references_must_exist_in_subnet() {
376        let managing_role: CanisterRole = "shard_hub".into();
377        let mut canisters = BTreeMap::new();
378
379        let mut sharding = ShardingConfig::default();
380        sharding.pools.insert(
381            "primary".into(),
382            ShardPool {
383                canister_type: CanisterRole::from("missing_shard_worker"),
384                policy: ShardPoolPolicy::default(),
385            },
386        );
387
388        let manager_cfg = CanisterConfig {
389            sharding: Some(sharding),
390            ..Default::default()
391        };
392
393        canisters.insert(managing_role, manager_cfg);
394
395        let subnet = SubnetConfig {
396            canisters,
397            ..Default::default()
398        };
399
400        subnet
401            .validate()
402            .expect_err("expected missing worker type to fail");
403    }
404
405    #[test]
406    fn sharding_pool_policy_requires_positive_capacity_and_shards() {
407        let managing_role: CanisterRole = "shard_hub".into();
408        let mut canisters = BTreeMap::new();
409
410        let mut sharding = ShardingConfig::default();
411        sharding.pools.insert(
412            "primary".into(),
413            ShardPool {
414                canister_type: managing_role.clone(),
415                policy: ShardPoolPolicy {
416                    capacity: 0,
417                    max_shards: 0,
418                },
419            },
420        );
421
422        canisters.insert(
423            managing_role,
424            CanisterConfig {
425                sharding: Some(sharding),
426                ..Default::default()
427            },
428        );
429
430        let subnet = SubnetConfig {
431            canisters,
432            ..Default::default()
433        };
434
435        subnet
436            .validate()
437            .expect_err("expected invalid sharding policy to fail");
438    }
439
440    #[test]
441    fn scaling_pool_policy_requires_max_ge_min_when_bounded() {
442        let mut canisters = BTreeMap::new();
443        let mut pools = BTreeMap::new();
444        pools.insert(
445            "worker".into(),
446            ScalePool {
447                canister_type: CanisterRole::from("worker"),
448                policy: ScalePoolPolicy {
449                    min_workers: 5,
450                    max_workers: 3,
451                },
452            },
453        );
454
455        canisters.insert(CanisterRole::from("worker"), CanisterConfig::default());
456
457        let manager_cfg = CanisterConfig {
458            scaling: Some(ScalingConfig { pools }),
459            ..Default::default()
460        };
461
462        canisters.insert(CanisterRole::from("manager"), manager_cfg);
463
464        let subnet = SubnetConfig {
465            canisters,
466            ..Default::default()
467        };
468
469        subnet
470            .validate()
471            .expect_err("expected invalid scaling policy to fail");
472    }
473
474    #[test]
475    fn randomness_interval_requires_positive_value() {
476        let mut canisters = BTreeMap::new();
477
478        let cfg = CanisterConfig {
479            randomness: RandomnessConfig {
480                enabled: true,
481                reseed_interval_secs: 0,
482                ..Default::default()
483            },
484            ..Default::default()
485        };
486
487        canisters.insert(CanisterRole::from("app"), cfg);
488
489        let subnet = SubnetConfig {
490            canisters,
491            ..Default::default()
492        };
493
494        subnet
495            .validate()
496            .expect_err("expected invalid randomness interval to fail");
497    }
498}