canic_core/config/schema/
subnet.rs

1use crate::{
2    config::schema::{ConfigSchemaError, NAME_MAX_BYTES, Validate},
3    ids::CanisterRole,
4    types::{Cycles, TC},
5};
6use candid::Principal;
7use serde::{Deserialize, Serialize};
8use std::collections::{BTreeMap, BTreeSet};
9
10mod defaults {
11    use super::Cycles;
12
13    pub fn initial_cycles() -> Cycles {
14        Cycles::new(5_000_000_000_000)
15    }
16}
17
18fn validate_role_len(role: &CanisterRole, context: &str) -> Result<(), ConfigSchemaError> {
19    if role.as_ref().len() > NAME_MAX_BYTES {
20        return Err(ConfigSchemaError::ValidationError(format!(
21            "{context} '{role}' exceeds {NAME_MAX_BYTES} bytes",
22        )));
23    }
24
25    Ok(())
26}
27
28///
29/// SubnetConfig
30///
31
32#[derive(Clone, Debug, Default, Deserialize, Serialize)]
33#[serde(deny_unknown_fields)]
34pub struct SubnetConfig {
35    #[serde(default)]
36    pub canisters: BTreeMap<CanisterRole, CanisterConfig>,
37
38    #[serde(default)]
39    pub auto_create: BTreeSet<CanisterRole>,
40
41    #[serde(default)]
42    pub subnet_directory: BTreeSet<CanisterRole>,
43
44    #[serde(default)]
45    pub pool: CanisterPool,
46}
47
48impl SubnetConfig {
49    /// Returns the directory canisters for this subnet.
50    #[must_use]
51    pub fn directory_canisters(&self) -> Vec<CanisterRole> {
52        self.subnet_directory.iter().cloned().collect()
53    }
54
55    /// Get a canister configuration by role.
56    #[must_use]
57    pub fn get_canister(&self, role: &CanisterRole) -> Option<CanisterConfig> {
58        self.canisters.get(role).cloned()
59    }
60}
61
62impl Validate for SubnetConfig {
63    fn validate(&self) -> Result<(), ConfigSchemaError> {
64        // --- 1. Validate directory entries ---
65        for canister_role in &self.subnet_directory {
66            validate_role_len(canister_role, "subnet directory canister")?;
67            if !self.canisters.contains_key(canister_role) {
68                return Err(ConfigSchemaError::ValidationError(format!(
69                    "subnet directory canister '{canister_role}' is not defined in subnet",
70                )));
71            }
72        }
73
74        // --- 2. Validate auto-create entries ---
75        for canister_role in &self.auto_create {
76            validate_role_len(canister_role, "auto-create canister")?;
77            if !self.canisters.contains_key(canister_role) {
78                return Err(ConfigSchemaError::ValidationError(format!(
79                    "auto-create canister '{canister_role}' is not defined in subnet",
80                )));
81            }
82        }
83
84        // --- 3. Validate canister configurations ---
85        for (parent_role, cfg) in &self.canisters {
86            validate_role_len(parent_role, "canister")?;
87            if cfg.randomness.enabled && cfg.randomness.reseed_interval_secs == 0 {
88                return Err(ConfigSchemaError::ValidationError(format!(
89                    "canister '{parent_role}' randomness reseed_interval_secs must be > 0",
90                )));
91            }
92
93            // Sharding pools
94            if let Some(sharding) = &cfg.sharding {
95                for (pool_name, pool) in &sharding.pools {
96                    if pool_name.len() > NAME_MAX_BYTES {
97                        return Err(ConfigSchemaError::ValidationError(format!(
98                            "canister '{parent_role}' sharding pool '{pool_name}' name exceeds {NAME_MAX_BYTES} bytes",
99                        )));
100                    }
101
102                    if pool.canister_role.as_ref().len() > NAME_MAX_BYTES {
103                        return Err(ConfigSchemaError::ValidationError(format!(
104                            "canister '{parent_role}' sharding pool '{pool_name}' canister role '{role}' exceeds {NAME_MAX_BYTES} bytes",
105                            role = pool.canister_role
106                        )));
107                    }
108
109                    if !self.canisters.contains_key(&pool.canister_role) {
110                        return Err(ConfigSchemaError::ValidationError(format!(
111                            "canister '{parent_role}' sharding pool '{pool_name}' references unknown canister role '{role}'",
112                            role = pool.canister_role
113                        )));
114                    }
115
116                    if pool.policy.capacity == 0 {
117                        return Err(ConfigSchemaError::ValidationError(format!(
118                            "canister '{parent_role}' sharding pool '{pool_name}' has zero capacity; must be > 0",
119                        )));
120                    }
121
122                    if pool.policy.max_shards == 0 {
123                        return Err(ConfigSchemaError::ValidationError(format!(
124                            "canister '{parent_role}' sharding pool '{pool_name}' has max_shards of 0; must be > 0",
125                        )));
126                    }
127                }
128            }
129
130            // Scaling pools
131            if let Some(scaling) = &cfg.scaling {
132                for (pool_name, pool) in &scaling.pools {
133                    if pool_name.len() > NAME_MAX_BYTES {
134                        return Err(ConfigSchemaError::ValidationError(format!(
135                            "canister '{parent_role}' scaling pool '{pool_name}' name exceeds {NAME_MAX_BYTES} bytes",
136                        )));
137                    }
138
139                    if pool.canister_role.as_ref().len() > NAME_MAX_BYTES {
140                        return Err(ConfigSchemaError::ValidationError(format!(
141                            "canister '{parent_role}' scaling pool '{pool_name}' canister role '{role}' exceeds {NAME_MAX_BYTES} bytes",
142                            role = pool.canister_role
143                        )));
144                    }
145
146                    if !self.canisters.contains_key(&pool.canister_role) {
147                        return Err(ConfigSchemaError::ValidationError(format!(
148                            "canister '{parent_role}' scaling pool '{pool_name}' references unknown canister role '{role}'",
149                            role = pool.canister_role
150                        )));
151                    }
152
153                    if pool.policy.max_workers != 0
154                        && pool.policy.max_workers < pool.policy.min_workers
155                    {
156                        return Err(ConfigSchemaError::ValidationError(format!(
157                            "canister '{parent_role}' scaling pool '{pool_name}' has max_workers < min_workers (min {}, max {})",
158                            pool.policy.min_workers, pool.policy.max_workers
159                        )));
160                    }
161                }
162            }
163        }
164
165        Ok(())
166    }
167}
168
169///
170/// CanisterPool
171/// defaults to a minimum size of 0
172///
173
174#[derive(Clone, Debug, Default, Deserialize, Serialize)]
175#[serde(deny_unknown_fields)]
176pub struct CanisterPool {
177    pub minimum_size: u8,
178    #[serde(default)]
179    pub import: Vec<Principal>,
180}
181
182///
183/// CanisterConfig
184///
185
186#[derive(Clone, Debug, Default, Deserialize, Serialize)]
187#[serde(deny_unknown_fields)]
188pub struct CanisterConfig {
189    #[serde(
190        default = "defaults::initial_cycles",
191        deserialize_with = "Cycles::from_config"
192    )]
193    pub initial_cycles: Cycles,
194
195    #[serde(default)]
196    pub topup: Option<CanisterTopup>,
197
198    #[serde(default)]
199    pub randomness: RandomnessConfig,
200
201    #[serde(default)]
202    pub scaling: Option<ScalingConfig>,
203
204    #[serde(default)]
205    pub sharding: Option<ShardingConfig>,
206}
207
208///
209/// CanisterTopup
210///
211
212#[derive(Clone, Debug, Deserialize, Serialize)]
213#[serde(deny_unknown_fields)]
214pub struct CanisterTopup {
215    #[serde(default, deserialize_with = "Cycles::from_config")]
216    pub threshold: Cycles,
217
218    #[serde(default, deserialize_with = "Cycles::from_config")]
219    pub amount: Cycles,
220}
221
222impl Default for CanisterTopup {
223    fn default() -> Self {
224        Self {
225            threshold: Cycles::new(10 * TC),
226            amount: Cycles::new(5 * TC),
227        }
228    }
229}
230
231///
232/// RandomnessConfig
233///
234
235#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
236#[serde(deny_unknown_fields, default)]
237pub struct RandomnessConfig {
238    pub enabled: bool,
239    pub reseed_interval_secs: u64,
240    pub source: RandomnessSource,
241}
242
243impl Default for RandomnessConfig {
244    fn default() -> Self {
245        Self {
246            enabled: true,
247            reseed_interval_secs: 3600,
248            source: RandomnessSource::Ic,
249        }
250    }
251}
252
253///
254/// RandomnessSource
255///
256
257#[derive(Clone, Copy, Debug, Default, Deserialize, Eq, PartialEq, Serialize)]
258#[serde(rename_all = "snake_case")]
259pub enum RandomnessSource {
260    #[default]
261    Ic,
262    Time,
263}
264
265///
266/// ScalingConfig
267/// (stateless, scaling)
268///
269/// * Organizes canisters into **worker groups** (e.g. "oracle").
270/// * Workers are interchangeable and handle transient tasks (no tenant assignment).
271/// * Scaling is about throughput, not capacity.
272/// * Hence: `WorkerManager → pools → WorkerSpec → WorkerPolicy`.
273///
274
275#[derive(Clone, Debug, Default, Deserialize, Serialize)]
276#[serde(deny_unknown_fields)]
277pub struct ScalingConfig {
278    #[serde(default)]
279    pub pools: BTreeMap<String, ScalePool>,
280}
281
282///
283/// ScalePool
284/// One stateless worker group (e.g. "oracle").
285///
286
287#[derive(Clone, Debug, Deserialize, Serialize)]
288#[serde(deny_unknown_fields)]
289pub struct ScalePool {
290    pub canister_role: CanisterRole,
291
292    #[serde(default)]
293    pub policy: ScalePoolPolicy,
294}
295
296///
297/// ScalePoolPolicy
298///
299
300#[derive(Clone, Debug, Deserialize, Serialize)]
301#[serde(deny_unknown_fields, default)]
302pub struct ScalePoolPolicy {
303    /// Minimum number of worker canisters to keep alive
304    pub min_workers: u32,
305
306    /// Maximum number of worker canisters to allow
307    pub max_workers: u32,
308}
309
310impl Default for ScalePoolPolicy {
311    fn default() -> Self {
312        Self {
313            min_workers: 1,
314            max_workers: 32,
315        }
316    }
317}
318
319///
320/// ShardingConfig
321/// (stateful, partitioned)
322///
323/// * Organizes canisters into named **pools**.
324/// * Each pool manages a set of **shards**, and each shard owns a partition of state.
325/// * Tenants are assigned to shards via HRW and stay there.
326/// * Hence: `ShardManager → pools → ShardPoolSpec → ShardPoolPolicy`.
327///
328
329#[derive(Clone, Debug, Default, Deserialize, Serialize)]
330#[serde(deny_unknown_fields)]
331pub struct ShardingConfig {
332    #[serde(default)]
333    pub pools: BTreeMap<String, ShardPool>,
334}
335
336///
337/// ShardPool
338///
339
340#[derive(Clone, Debug, Deserialize, Serialize)]
341#[serde(deny_unknown_fields)]
342pub struct ShardPool {
343    pub canister_role: CanisterRole,
344
345    #[serde(default)]
346    pub policy: ShardPoolPolicy,
347}
348
349///
350/// ShardPoolPolicy
351///
352
353#[derive(Clone, Debug, Deserialize, Serialize)]
354#[serde(deny_unknown_fields, default)]
355pub struct ShardPoolPolicy {
356    pub capacity: u32,
357    pub max_shards: u32,
358}
359
360impl Default for ShardPoolPolicy {
361    fn default() -> Self {
362        Self {
363            capacity: 1_000,
364            max_shards: 4,
365        }
366    }
367}
368
369///
370/// TESTS
371///
372
373#[cfg(test)]
374mod tests {
375    use super::*;
376    use std::collections::{BTreeMap, BTreeSet};
377
378    #[test]
379    fn randomness_defaults_to_ic() {
380        let cfg = RandomnessConfig::default();
381
382        assert!(cfg.enabled);
383        assert_eq!(cfg.reseed_interval_secs, 3600);
384        assert_eq!(cfg.source, RandomnessSource::Ic);
385    }
386
387    #[test]
388    fn randomness_source_parses_ic_and_time() {
389        let cfg: RandomnessConfig = toml::from_str("source = \"ic\"").unwrap();
390        assert_eq!(cfg.source, RandomnessSource::Ic);
391
392        let cfg: RandomnessConfig = toml::from_str("source = \"time\"").unwrap();
393        assert_eq!(cfg.source, RandomnessSource::Time);
394    }
395
396    #[test]
397    fn auto_create_entries_must_exist_in_subnet() {
398        let mut auto_create = BTreeSet::new();
399        auto_create.insert(CanisterRole::from("missing_auto_canister"));
400
401        let subnet = SubnetConfig {
402            auto_create,
403            ..Default::default()
404        };
405
406        subnet
407            .validate()
408            .expect_err("expected missing auto-create role to fail");
409    }
410
411    #[test]
412    fn sharding_pool_references_must_exist_in_subnet() {
413        let managing_role: CanisterRole = "shard_hub".into();
414        let mut canisters = BTreeMap::new();
415
416        let mut sharding = ShardingConfig::default();
417        sharding.pools.insert(
418            "primary".into(),
419            ShardPool {
420                canister_role: CanisterRole::from("missing_shard_worker"),
421                policy: ShardPoolPolicy::default(),
422            },
423        );
424
425        let manager_cfg = CanisterConfig {
426            sharding: Some(sharding),
427            ..Default::default()
428        };
429
430        canisters.insert(managing_role, manager_cfg);
431
432        let subnet = SubnetConfig {
433            canisters,
434            ..Default::default()
435        };
436
437        subnet
438            .validate()
439            .expect_err("expected missing worker role to fail");
440    }
441
442    #[test]
443    fn sharding_pool_policy_requires_positive_capacity_and_shards() {
444        let managing_role: CanisterRole = "shard_hub".into();
445        let mut canisters = BTreeMap::new();
446
447        let mut sharding = ShardingConfig::default();
448        sharding.pools.insert(
449            "primary".into(),
450            ShardPool {
451                canister_role: managing_role.clone(),
452                policy: ShardPoolPolicy {
453                    capacity: 0,
454                    max_shards: 0,
455                },
456            },
457        );
458
459        canisters.insert(
460            managing_role,
461            CanisterConfig {
462                sharding: Some(sharding),
463                ..Default::default()
464            },
465        );
466
467        let subnet = SubnetConfig {
468            canisters,
469            ..Default::default()
470        };
471
472        subnet
473            .validate()
474            .expect_err("expected invalid sharding policy to fail");
475    }
476
477    #[test]
478    fn canister_role_name_must_fit_bound() {
479        let long_role = "a".repeat(NAME_MAX_BYTES + 1);
480        let mut canisters = BTreeMap::new();
481        canisters.insert(CanisterRole::from(long_role), CanisterConfig::default());
482
483        let subnet = SubnetConfig {
484            canisters,
485            ..Default::default()
486        };
487
488        subnet
489            .validate()
490            .expect_err("expected canister role length to fail");
491    }
492
493    #[test]
494    fn sharding_pool_name_must_fit_bound() {
495        let managing_role: CanisterRole = "shard_hub".into();
496        let mut canisters = BTreeMap::new();
497
498        let mut sharding = ShardingConfig::default();
499        sharding.pools.insert(
500            "a".repeat(NAME_MAX_BYTES + 1),
501            ShardPool {
502                canister_role: managing_role.clone(),
503                policy: ShardPoolPolicy::default(),
504            },
505        );
506
507        canisters.insert(
508            managing_role,
509            CanisterConfig {
510                sharding: Some(sharding),
511                ..Default::default()
512            },
513        );
514
515        let subnet = SubnetConfig {
516            canisters,
517            ..Default::default()
518        };
519
520        subnet
521            .validate()
522            .expect_err("expected sharding pool name length to fail");
523    }
524
525    #[test]
526    fn scaling_pool_policy_requires_max_ge_min_when_bounded() {
527        let mut canisters = BTreeMap::new();
528        let mut pools = BTreeMap::new();
529        pools.insert(
530            "worker".into(),
531            ScalePool {
532                canister_role: CanisterRole::from("worker"),
533                policy: ScalePoolPolicy {
534                    min_workers: 5,
535                    max_workers: 3,
536                },
537            },
538        );
539
540        canisters.insert(CanisterRole::from("worker"), CanisterConfig::default());
541
542        let manager_cfg = CanisterConfig {
543            scaling: Some(ScalingConfig { pools }),
544            ..Default::default()
545        };
546
547        canisters.insert(CanisterRole::from("manager"), manager_cfg);
548
549        let subnet = SubnetConfig {
550            canisters,
551            ..Default::default()
552        };
553
554        subnet
555            .validate()
556            .expect_err("expected invalid scaling policy to fail");
557    }
558
559    #[test]
560    fn scaling_pool_name_must_fit_bound() {
561        let mut canisters = BTreeMap::new();
562        let mut pools = BTreeMap::new();
563        pools.insert(
564            "a".repeat(NAME_MAX_BYTES + 1),
565            ScalePool {
566                canister_role: CanisterRole::from("worker"),
567                policy: ScalePoolPolicy::default(),
568            },
569        );
570
571        canisters.insert(CanisterRole::from("worker"), CanisterConfig::default());
572
573        let manager_cfg = CanisterConfig {
574            scaling: Some(ScalingConfig { pools }),
575            ..Default::default()
576        };
577
578        canisters.insert(CanisterRole::from("manager"), manager_cfg);
579
580        let subnet = SubnetConfig {
581            canisters,
582            ..Default::default()
583        };
584
585        subnet
586            .validate()
587            .expect_err("expected scaling pool name length to fail");
588    }
589
590    #[test]
591    fn randomness_interval_requires_positive_value() {
592        let mut canisters = BTreeMap::new();
593
594        let cfg = CanisterConfig {
595            randomness: RandomnessConfig {
596                enabled: true,
597                reseed_interval_secs: 0,
598                ..Default::default()
599            },
600            ..Default::default()
601        };
602
603        canisters.insert(CanisterRole::from("app"), cfg);
604
605        let subnet = SubnetConfig {
606            canisters,
607            ..Default::default()
608        };
609
610        subnet
611            .validate()
612            .expect_err("expected invalid randomness interval to fail");
613    }
614}