canic_core/config/schema/
subnet.rs

1use crate::{
2    config::schema::{ConfigSchemaError, NAME_MAX_BYTES, Validate},
3    ids::CanisterRole,
4    types::{Cycles, TC},
5};
6use candid::Principal;
7use serde::{Deserialize, Serialize};
8use std::collections::{BTreeMap, BTreeSet};
9
10mod defaults {
11    use super::Cycles;
12
13    pub fn initial_cycles() -> Cycles {
14        Cycles::new(5_000_000_000_000)
15    }
16}
17
18fn validate_role_len(role: &CanisterRole, context: &str) -> Result<(), ConfigSchemaError> {
19    if role.as_ref().len() > NAME_MAX_BYTES {
20        return Err(ConfigSchemaError::ValidationError(format!(
21            "{context} '{role}' exceeds {NAME_MAX_BYTES} bytes",
22        )));
23    }
24
25    Ok(())
26}
27
28///
29/// SubnetConfig
30///
31
32#[derive(Clone, Debug, Default, Deserialize, Serialize)]
33#[serde(deny_unknown_fields)]
34pub struct SubnetConfig {
35    #[serde(default)]
36    pub canisters: BTreeMap<CanisterRole, CanisterConfig>,
37
38    #[serde(default)]
39    pub auto_create: BTreeSet<CanisterRole>,
40
41    #[serde(default)]
42    pub subnet_directory: BTreeSet<CanisterRole>,
43
44    #[serde(default)]
45    pub pool: CanisterPool,
46}
47
48impl SubnetConfig {
49    /// Returns the directory canisters for this subnet.
50    #[must_use]
51    pub fn directory_canisters(&self) -> Vec<CanisterRole> {
52        self.subnet_directory.iter().cloned().collect()
53    }
54
55    /// Get a canister configuration by role.
56    #[must_use]
57    pub fn get_canister(&self, role: &CanisterRole) -> Option<CanisterConfig> {
58        self.canisters.get(role).cloned()
59    }
60}
61
62impl Validate for SubnetConfig {
63    fn validate(&self) -> Result<(), ConfigSchemaError> {
64        // --- 1. Validate directory entries ---
65        for canister_role in &self.subnet_directory {
66            validate_role_len(canister_role, "subnet directory canister")?;
67            if !self.canisters.contains_key(canister_role) {
68                return Err(ConfigSchemaError::ValidationError(format!(
69                    "subnet directory canister '{canister_role}' is not defined in subnet",
70                )));
71            }
72        }
73
74        // --- 2. Validate auto-create entries ---
75        for canister_role in &self.auto_create {
76            validate_role_len(canister_role, "auto-create canister")?;
77            if !self.canisters.contains_key(canister_role) {
78                return Err(ConfigSchemaError::ValidationError(format!(
79                    "auto-create canister '{canister_role}' is not defined in subnet",
80                )));
81            }
82        }
83
84        // --- 3. Validate canister configurations ---
85        for (parent_role, cfg) in &self.canisters {
86            validate_role_len(parent_role, "canister")?;
87            if cfg.randomness.enabled && cfg.randomness.reseed_interval_secs == 0 {
88                return Err(ConfigSchemaError::ValidationError(format!(
89                    "canister '{parent_role}' randomness reseed_interval_secs must be > 0",
90                )));
91            }
92
93            // Sharding pools
94            if let Some(sharding) = &cfg.sharding {
95                for (pool_name, pool) in &sharding.pools {
96                    if pool_name.len() > NAME_MAX_BYTES {
97                        return Err(ConfigSchemaError::ValidationError(format!(
98                            "canister '{parent_role}' sharding pool '{pool_name}' name exceeds {NAME_MAX_BYTES} bytes",
99                        )));
100                    }
101
102                    if pool.canister_role.as_ref().len() > NAME_MAX_BYTES {
103                        return Err(ConfigSchemaError::ValidationError(format!(
104                            "canister '{parent_role}' sharding pool '{pool_name}' canister role '{role}' exceeds {NAME_MAX_BYTES} bytes",
105                            role = pool.canister_role
106                        )));
107                    }
108
109                    if !self.canisters.contains_key(&pool.canister_role) {
110                        return Err(ConfigSchemaError::ValidationError(format!(
111                            "canister '{parent_role}' sharding pool '{pool_name}' references unknown canister role '{role}'",
112                            role = pool.canister_role
113                        )));
114                    }
115
116                    if pool.policy.capacity == 0 {
117                        return Err(ConfigSchemaError::ValidationError(format!(
118                            "canister '{parent_role}' sharding pool '{pool_name}' has zero capacity; must be > 0",
119                        )));
120                    }
121
122                    if pool.policy.max_shards == 0 {
123                        return Err(ConfigSchemaError::ValidationError(format!(
124                            "canister '{parent_role}' sharding pool '{pool_name}' has max_shards of 0; must be > 0",
125                        )));
126                    }
127                }
128            }
129
130            // Scaling pools
131            if let Some(scaling) = &cfg.scaling {
132                for (pool_name, pool) in &scaling.pools {
133                    if pool_name.len() > NAME_MAX_BYTES {
134                        return Err(ConfigSchemaError::ValidationError(format!(
135                            "canister '{parent_role}' scaling pool '{pool_name}' name exceeds {NAME_MAX_BYTES} bytes",
136                        )));
137                    }
138
139                    if pool.canister_role.as_ref().len() > NAME_MAX_BYTES {
140                        return Err(ConfigSchemaError::ValidationError(format!(
141                            "canister '{parent_role}' scaling pool '{pool_name}' canister role '{role}' exceeds {NAME_MAX_BYTES} bytes",
142                            role = pool.canister_role
143                        )));
144                    }
145
146                    if !self.canisters.contains_key(&pool.canister_role) {
147                        return Err(ConfigSchemaError::ValidationError(format!(
148                            "canister '{parent_role}' scaling pool '{pool_name}' references unknown canister role '{role}'",
149                            role = pool.canister_role
150                        )));
151                    }
152
153                    if pool.policy.max_workers != 0
154                        && pool.policy.max_workers < pool.policy.min_workers
155                    {
156                        return Err(ConfigSchemaError::ValidationError(format!(
157                            "canister '{parent_role}' scaling pool '{pool_name}' has max_workers < min_workers (min {}, max {})",
158                            pool.policy.min_workers, pool.policy.max_workers
159                        )));
160                    }
161                }
162            }
163        }
164
165        Ok(())
166    }
167}
168
169///
170/// PoolImport
171/// Per-environment import lists for canister pools.
172///
173#[derive(Clone, Debug, Default, Deserialize, Serialize)]
174#[serde(deny_unknown_fields)]
175pub struct PoolImport {
176    #[serde(default)]
177    pub local: Vec<Principal>,
178    #[serde(default)]
179    pub ic: Vec<Principal>,
180}
181
182///
183/// CanisterPool
184/// defaults to a minimum size of 0
185///
186#[derive(Clone, Debug, Default, Deserialize, Serialize)]
187#[serde(deny_unknown_fields)]
188pub struct CanisterPool {
189    pub minimum_size: u8,
190    #[serde(default)]
191    pub import: PoolImport,
192}
193
194///
195/// CanisterConfig
196///
197
198#[derive(Clone, Debug, Default, Deserialize, Serialize)]
199#[serde(deny_unknown_fields)]
200pub struct CanisterConfig {
201    #[serde(
202        default = "defaults::initial_cycles",
203        deserialize_with = "Cycles::from_config"
204    )]
205    pub initial_cycles: Cycles,
206
207    #[serde(default)]
208    pub topup: Option<CanisterTopup>,
209
210    #[serde(default)]
211    pub randomness: RandomnessConfig,
212
213    #[serde(default)]
214    pub scaling: Option<ScalingConfig>,
215
216    #[serde(default)]
217    pub sharding: Option<ShardingConfig>,
218}
219
220///
221/// CanisterTopup
222///
223
224#[derive(Clone, Debug, Deserialize, Serialize)]
225#[serde(deny_unknown_fields)]
226pub struct CanisterTopup {
227    #[serde(default, deserialize_with = "Cycles::from_config")]
228    pub threshold: Cycles,
229
230    #[serde(default, deserialize_with = "Cycles::from_config")]
231    pub amount: Cycles,
232}
233
234impl Default for CanisterTopup {
235    fn default() -> Self {
236        Self {
237            threshold: Cycles::new(10 * TC),
238            amount: Cycles::new(5 * TC),
239        }
240    }
241}
242
243///
244/// RandomnessConfig
245///
246
247#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
248#[serde(deny_unknown_fields, default)]
249pub struct RandomnessConfig {
250    pub enabled: bool,
251    pub reseed_interval_secs: u64,
252    pub source: RandomnessSource,
253}
254
255impl Default for RandomnessConfig {
256    fn default() -> Self {
257        Self {
258            enabled: true,
259            reseed_interval_secs: 3600,
260            source: RandomnessSource::Ic,
261        }
262    }
263}
264
265///
266/// RandomnessSource
267///
268
269#[derive(Clone, Copy, Debug, Default, Deserialize, Eq, PartialEq, Serialize)]
270#[serde(rename_all = "snake_case")]
271pub enum RandomnessSource {
272    #[default]
273    Ic,
274    Time,
275}
276
277///
278/// ScalingConfig
279/// (stateless, scaling)
280///
281/// * Organizes canisters into **worker groups** (e.g. "oracle").
282/// * Workers are interchangeable and handle transient tasks (no tenant assignment).
283/// * Scaling is about throughput, not capacity.
284/// * Hence: `WorkerManager → pools → WorkerSpec → WorkerPolicy`.
285///
286
287#[derive(Clone, Debug, Default, Deserialize, Serialize)]
288#[serde(deny_unknown_fields)]
289pub struct ScalingConfig {
290    #[serde(default)]
291    pub pools: BTreeMap<String, ScalePool>,
292}
293
294///
295/// ScalePool
296/// One stateless worker group (e.g. "oracle").
297///
298
299#[derive(Clone, Debug, Deserialize, Serialize)]
300#[serde(deny_unknown_fields)]
301pub struct ScalePool {
302    pub canister_role: CanisterRole,
303
304    #[serde(default)]
305    pub policy: ScalePoolPolicy,
306}
307
308///
309/// ScalePoolPolicy
310///
311
312#[derive(Clone, Debug, Deserialize, Serialize)]
313#[serde(deny_unknown_fields, default)]
314pub struct ScalePoolPolicy {
315    /// Minimum number of worker canisters to keep alive
316    pub min_workers: u32,
317
318    /// Maximum number of worker canisters to allow
319    pub max_workers: u32,
320}
321
322impl Default for ScalePoolPolicy {
323    fn default() -> Self {
324        Self {
325            min_workers: 1,
326            max_workers: 32,
327        }
328    }
329}
330
331///
332/// ShardingConfig
333/// (stateful, partitioned)
334///
335/// * Organizes canisters into named **pools**.
336/// * Each pool manages a set of **shards**, and each shard owns a partition of state.
337/// * Tenants are assigned to shards via HRW and stay there.
338/// * Hence: `ShardManager → pools → ShardPoolSpec → ShardPoolPolicy`.
339///
340
341#[derive(Clone, Debug, Default, Deserialize, Serialize)]
342#[serde(deny_unknown_fields)]
343pub struct ShardingConfig {
344    #[serde(default)]
345    pub pools: BTreeMap<String, ShardPool>,
346}
347
348///
349/// ShardPool
350///
351
352#[derive(Clone, Debug, Deserialize, Serialize)]
353#[serde(deny_unknown_fields)]
354pub struct ShardPool {
355    pub canister_role: CanisterRole,
356
357    #[serde(default)]
358    pub policy: ShardPoolPolicy,
359}
360
361///
362/// ShardPoolPolicy
363///
364
365#[derive(Clone, Debug, Deserialize, Serialize)]
366#[serde(deny_unknown_fields, default)]
367pub struct ShardPoolPolicy {
368    pub capacity: u32,
369    pub max_shards: u32,
370}
371
372impl Default for ShardPoolPolicy {
373    fn default() -> Self {
374        Self {
375            capacity: 1_000,
376            max_shards: 4,
377        }
378    }
379}
380
381///
382/// TESTS
383///
384
385#[cfg(test)]
386mod tests {
387    use super::*;
388    use std::collections::{BTreeMap, BTreeSet};
389
390    #[test]
391    fn randomness_defaults_to_ic() {
392        let cfg = RandomnessConfig::default();
393
394        assert!(cfg.enabled);
395        assert_eq!(cfg.reseed_interval_secs, 3600);
396        assert_eq!(cfg.source, RandomnessSource::Ic);
397    }
398
399    #[test]
400    fn randomness_source_parses_ic_and_time() {
401        let cfg: RandomnessConfig = toml::from_str("source = \"ic\"").unwrap();
402        assert_eq!(cfg.source, RandomnessSource::Ic);
403
404        let cfg: RandomnessConfig = toml::from_str("source = \"time\"").unwrap();
405        assert_eq!(cfg.source, RandomnessSource::Time);
406    }
407
408    #[test]
409    fn auto_create_entries_must_exist_in_subnet() {
410        let mut auto_create = BTreeSet::new();
411        auto_create.insert(CanisterRole::from("missing_auto_canister"));
412
413        let subnet = SubnetConfig {
414            auto_create,
415            ..Default::default()
416        };
417
418        subnet
419            .validate()
420            .expect_err("expected missing auto-create role to fail");
421    }
422
423    #[test]
424    fn sharding_pool_references_must_exist_in_subnet() {
425        let managing_role: CanisterRole = "shard_hub".into();
426        let mut canisters = BTreeMap::new();
427
428        let mut sharding = ShardingConfig::default();
429        sharding.pools.insert(
430            "primary".into(),
431            ShardPool {
432                canister_role: CanisterRole::from("missing_shard_worker"),
433                policy: ShardPoolPolicy::default(),
434            },
435        );
436
437        let manager_cfg = CanisterConfig {
438            sharding: Some(sharding),
439            ..Default::default()
440        };
441
442        canisters.insert(managing_role, manager_cfg);
443
444        let subnet = SubnetConfig {
445            canisters,
446            ..Default::default()
447        };
448
449        subnet
450            .validate()
451            .expect_err("expected missing worker role to fail");
452    }
453
454    #[test]
455    fn sharding_pool_policy_requires_positive_capacity_and_shards() {
456        let managing_role: CanisterRole = "shard_hub".into();
457        let mut canisters = BTreeMap::new();
458
459        let mut sharding = ShardingConfig::default();
460        sharding.pools.insert(
461            "primary".into(),
462            ShardPool {
463                canister_role: managing_role.clone(),
464                policy: ShardPoolPolicy {
465                    capacity: 0,
466                    max_shards: 0,
467                },
468            },
469        );
470
471        canisters.insert(
472            managing_role,
473            CanisterConfig {
474                sharding: Some(sharding),
475                ..Default::default()
476            },
477        );
478
479        let subnet = SubnetConfig {
480            canisters,
481            ..Default::default()
482        };
483
484        subnet
485            .validate()
486            .expect_err("expected invalid sharding policy to fail");
487    }
488
489    #[test]
490    fn canister_role_name_must_fit_bound() {
491        let long_role = "a".repeat(NAME_MAX_BYTES + 1);
492        let mut canisters = BTreeMap::new();
493        canisters.insert(CanisterRole::from(long_role), CanisterConfig::default());
494
495        let subnet = SubnetConfig {
496            canisters,
497            ..Default::default()
498        };
499
500        subnet
501            .validate()
502            .expect_err("expected canister role length to fail");
503    }
504
505    #[test]
506    fn sharding_pool_name_must_fit_bound() {
507        let managing_role: CanisterRole = "shard_hub".into();
508        let mut canisters = BTreeMap::new();
509
510        let mut sharding = ShardingConfig::default();
511        sharding.pools.insert(
512            "a".repeat(NAME_MAX_BYTES + 1),
513            ShardPool {
514                canister_role: managing_role.clone(),
515                policy: ShardPoolPolicy::default(),
516            },
517        );
518
519        canisters.insert(
520            managing_role,
521            CanisterConfig {
522                sharding: Some(sharding),
523                ..Default::default()
524            },
525        );
526
527        let subnet = SubnetConfig {
528            canisters,
529            ..Default::default()
530        };
531
532        subnet
533            .validate()
534            .expect_err("expected sharding pool name length to fail");
535    }
536
537    #[test]
538    fn scaling_pool_policy_requires_max_ge_min_when_bounded() {
539        let mut canisters = BTreeMap::new();
540        let mut pools = BTreeMap::new();
541        pools.insert(
542            "worker".into(),
543            ScalePool {
544                canister_role: CanisterRole::from("worker"),
545                policy: ScalePoolPolicy {
546                    min_workers: 5,
547                    max_workers: 3,
548                },
549            },
550        );
551
552        canisters.insert(CanisterRole::from("worker"), CanisterConfig::default());
553
554        let manager_cfg = CanisterConfig {
555            scaling: Some(ScalingConfig { pools }),
556            ..Default::default()
557        };
558
559        canisters.insert(CanisterRole::from("manager"), manager_cfg);
560
561        let subnet = SubnetConfig {
562            canisters,
563            ..Default::default()
564        };
565
566        subnet
567            .validate()
568            .expect_err("expected invalid scaling policy to fail");
569    }
570
571    #[test]
572    fn scaling_pool_name_must_fit_bound() {
573        let mut canisters = BTreeMap::new();
574        let mut pools = BTreeMap::new();
575        pools.insert(
576            "a".repeat(NAME_MAX_BYTES + 1),
577            ScalePool {
578                canister_role: CanisterRole::from("worker"),
579                policy: ScalePoolPolicy::default(),
580            },
581        );
582
583        canisters.insert(CanisterRole::from("worker"), CanisterConfig::default());
584
585        let manager_cfg = CanisterConfig {
586            scaling: Some(ScalingConfig { pools }),
587            ..Default::default()
588        };
589
590        canisters.insert(CanisterRole::from("manager"), manager_cfg);
591
592        let subnet = SubnetConfig {
593            canisters,
594            ..Default::default()
595        };
596
597        subnet
598            .validate()
599            .expect_err("expected scaling pool name length to fail");
600    }
601
602    #[test]
603    fn randomness_interval_requires_positive_value() {
604        let mut canisters = BTreeMap::new();
605
606        let cfg = CanisterConfig {
607            randomness: RandomnessConfig {
608                enabled: true,
609                reseed_interval_secs: 0,
610                ..Default::default()
611            },
612            ..Default::default()
613        };
614
615        canisters.insert(CanisterRole::from("app"), cfg);
616
617        let subnet = SubnetConfig {
618            canisters,
619            ..Default::default()
620        };
621
622        subnet
623            .validate()
624            .expect_err("expected invalid randomness interval to fail");
625    }
626}