canic_core/config/schema/
subnet.rs

1use crate::{
2    config::schema::{ConfigSchemaError, NAME_MAX_BYTES, Validate},
3    ids::CanisterRole,
4    types::{Cycles, TC},
5};
6use serde::{Deserialize, Serialize};
7use std::collections::{BTreeMap, BTreeSet};
8
9mod defaults {
10    use super::Cycles;
11
12    pub fn initial_cycles() -> Cycles {
13        Cycles::new(5_000_000_000_000)
14    }
15}
16
17fn validate_role_len(role: &CanisterRole, context: &str) -> Result<(), ConfigSchemaError> {
18    if role.as_ref().len() > NAME_MAX_BYTES {
19        return Err(ConfigSchemaError::ValidationError(format!(
20            "{context} '{role}' exceeds {NAME_MAX_BYTES} bytes",
21        )));
22    }
23
24    Ok(())
25}
26
27///
28/// SubnetConfig
29///
30
31#[derive(Clone, Debug, Default, Deserialize, Serialize)]
32#[serde(deny_unknown_fields)]
33pub struct SubnetConfig {
34    #[serde(default)]
35    pub canisters: BTreeMap<CanisterRole, CanisterConfig>,
36
37    #[serde(default)]
38    pub auto_create: BTreeSet<CanisterRole>,
39
40    #[serde(default)]
41    pub subnet_directory: BTreeSet<CanisterRole>,
42
43    #[serde(default)]
44    pub pool: CanisterPool,
45}
46
47impl SubnetConfig {
48    /// Returns the directory canisters for this subnet.
49    #[must_use]
50    pub fn directory_canisters(&self) -> Vec<CanisterRole> {
51        self.subnet_directory.iter().cloned().collect()
52    }
53
54    /// Get a canister configuration by type.
55    #[must_use]
56    pub fn get_canister(&self, ty: &CanisterRole) -> Option<CanisterConfig> {
57        self.canisters.get(ty).cloned()
58    }
59}
60
61impl Validate for SubnetConfig {
62    fn validate(&self) -> Result<(), ConfigSchemaError> {
63        // --- 1. Validate directory entries ---
64        for canister_ty in &self.subnet_directory {
65            validate_role_len(canister_ty, "subnet directory canister")?;
66            if !self.canisters.contains_key(canister_ty) {
67                return Err(ConfigSchemaError::ValidationError(format!(
68                    "subnet directory canister '{canister_ty}' is not defined in subnet",
69                )));
70            }
71        }
72
73        // --- 2. Validate auto-create entries ---
74        for canister_ty in &self.auto_create {
75            validate_role_len(canister_ty, "auto-create canister")?;
76            if !self.canisters.contains_key(canister_ty) {
77                return Err(ConfigSchemaError::ValidationError(format!(
78                    "auto-create canister '{canister_ty}' is not defined in subnet",
79                )));
80            }
81        }
82
83        // --- 3. Validate canister configurations ---
84        for (parent_ty, cfg) in &self.canisters {
85            validate_role_len(parent_ty, "canister")?;
86            if cfg.randomness.enabled && cfg.randomness.reseed_interval_secs == 0 {
87                return Err(ConfigSchemaError::ValidationError(format!(
88                    "canister '{parent_ty}' randomness reseed_interval_secs must be > 0",
89                )));
90            }
91
92            // Sharding pools
93            if let Some(sharding) = &cfg.sharding {
94                for (pool_name, pool) in &sharding.pools {
95                    if pool_name.len() > NAME_MAX_BYTES {
96                        return Err(ConfigSchemaError::ValidationError(format!(
97                            "canister '{parent_ty}' sharding pool '{pool_name}' name exceeds {NAME_MAX_BYTES} bytes",
98                        )));
99                    }
100
101                    if pool.canister_type.as_ref().len() > NAME_MAX_BYTES {
102                        return Err(ConfigSchemaError::ValidationError(format!(
103                            "canister '{parent_ty}' sharding pool '{pool_name}' canister type '{ty}' exceeds {NAME_MAX_BYTES} bytes",
104                            ty = pool.canister_type
105                        )));
106                    }
107
108                    if !self.canisters.contains_key(&pool.canister_type) {
109                        return Err(ConfigSchemaError::ValidationError(format!(
110                            "canister '{parent_ty}' sharding pool '{pool_name}' references unknown canister type '{ty}'",
111                            ty = pool.canister_type
112                        )));
113                    }
114
115                    if pool.policy.capacity == 0 {
116                        return Err(ConfigSchemaError::ValidationError(format!(
117                            "canister '{parent_ty}' sharding pool '{pool_name}' has zero capacity; must be > 0",
118                        )));
119                    }
120
121                    if pool.policy.max_shards == 0 {
122                        return Err(ConfigSchemaError::ValidationError(format!(
123                            "canister '{parent_ty}' sharding pool '{pool_name}' has max_shards of 0; must be > 0",
124                        )));
125                    }
126                }
127            }
128
129            // Scaling pools
130            if let Some(scaling) = &cfg.scaling {
131                for (pool_name, pool) in &scaling.pools {
132                    if pool_name.len() > NAME_MAX_BYTES {
133                        return Err(ConfigSchemaError::ValidationError(format!(
134                            "canister '{parent_ty}' scaling pool '{pool_name}' name exceeds {NAME_MAX_BYTES} bytes",
135                        )));
136                    }
137
138                    if pool.canister_type.as_ref().len() > NAME_MAX_BYTES {
139                        return Err(ConfigSchemaError::ValidationError(format!(
140                            "canister '{parent_ty}' scaling pool '{pool_name}' canister type '{ty}' exceeds {NAME_MAX_BYTES} bytes",
141                            ty = pool.canister_type
142                        )));
143                    }
144
145                    if !self.canisters.contains_key(&pool.canister_type) {
146                        return Err(ConfigSchemaError::ValidationError(format!(
147                            "canister '{parent_ty}' scaling pool '{pool_name}' references unknown canister type '{ty}'",
148                            ty = pool.canister_type
149                        )));
150                    }
151
152                    if pool.policy.max_workers != 0
153                        && pool.policy.max_workers < pool.policy.min_workers
154                    {
155                        return Err(ConfigSchemaError::ValidationError(format!(
156                            "canister '{parent_ty}' scaling pool '{pool_name}' has max_workers < min_workers (min {}, max {})",
157                            pool.policy.min_workers, pool.policy.max_workers
158                        )));
159                    }
160                }
161            }
162        }
163
164        Ok(())
165    }
166}
167
168///
169/// CanisterPool
170/// defaults to a minimum size of 0
171///
172
173#[derive(Clone, Debug, Default, Deserialize, Serialize)]
174#[serde(deny_unknown_fields)]
175pub struct CanisterPool {
176    pub minimum_size: u8,
177}
178
179///
180/// CanisterConfig
181///
182
183#[derive(Clone, Debug, Default, Deserialize, Serialize)]
184#[serde(deny_unknown_fields)]
185pub struct CanisterConfig {
186    #[serde(
187        default = "defaults::initial_cycles",
188        deserialize_with = "Cycles::from_config"
189    )]
190    pub initial_cycles: Cycles,
191
192    #[serde(default)]
193    pub topup: Option<CanisterTopup>,
194
195    #[serde(default)]
196    pub randomness: RandomnessConfig,
197
198    #[serde(default)]
199    pub scaling: Option<ScalingConfig>,
200
201    #[serde(default)]
202    pub sharding: Option<ShardingConfig>,
203}
204
205///
206/// CanisterTopup
207///
208
209#[derive(Clone, Debug, Deserialize, Serialize)]
210#[serde(deny_unknown_fields)]
211pub struct CanisterTopup {
212    #[serde(default, deserialize_with = "Cycles::from_config")]
213    pub threshold: Cycles,
214
215    #[serde(default, deserialize_with = "Cycles::from_config")]
216    pub amount: Cycles,
217}
218
219impl Default for CanisterTopup {
220    fn default() -> Self {
221        Self {
222            threshold: Cycles::new(10 * TC),
223            amount: Cycles::new(5 * TC),
224        }
225    }
226}
227
228///
229/// RandomnessConfig
230///
231
232#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
233#[serde(deny_unknown_fields, default)]
234pub struct RandomnessConfig {
235    pub enabled: bool,
236    pub reseed_interval_secs: u64,
237    pub source: RandomnessSource,
238}
239
240impl Default for RandomnessConfig {
241    fn default() -> Self {
242        Self {
243            enabled: true,
244            reseed_interval_secs: 3600,
245            source: RandomnessSource::Ic,
246        }
247    }
248}
249
250///
251/// RandomnessSource
252///
253
254#[derive(Clone, Copy, Debug, Default, Deserialize, Eq, PartialEq, Serialize)]
255#[serde(rename_all = "snake_case")]
256pub enum RandomnessSource {
257    #[default]
258    Ic,
259    Time,
260}
261
262///
263/// ScalingConfig
264/// (stateless, scaling)
265///
266/// * Organizes canisters into **worker groups** (e.g. "oracle").
267/// * Workers are interchangeable and handle transient tasks (no tenant assignment).
268/// * Scaling is about throughput, not capacity.
269/// * Hence: `WorkerManager → pools → WorkerSpec → WorkerPolicy`.
270///
271
272#[derive(Clone, Debug, Default, Deserialize, Serialize)]
273#[serde(deny_unknown_fields)]
274pub struct ScalingConfig {
275    #[serde(default)]
276    pub pools: BTreeMap<String, ScalePool>,
277}
278
279///
280/// ScalePool
281/// One stateless worker group (e.g. "oracle").
282///
283
284#[derive(Clone, Debug, Deserialize, Serialize)]
285#[serde(deny_unknown_fields)]
286pub struct ScalePool {
287    pub canister_type: CanisterRole,
288
289    #[serde(default)]
290    pub policy: ScalePoolPolicy,
291}
292
293///
294/// ScalePoolPolicy
295///
296
297#[derive(Clone, Debug, Deserialize, Serialize)]
298#[serde(deny_unknown_fields, default)]
299pub struct ScalePoolPolicy {
300    /// Minimum number of worker canisters to keep alive
301    pub min_workers: u32,
302
303    /// Maximum number of worker canisters to allow
304    pub max_workers: u32,
305}
306
307impl Default for ScalePoolPolicy {
308    fn default() -> Self {
309        Self {
310            min_workers: 1,
311            max_workers: 32,
312        }
313    }
314}
315
316///
317/// ShardingConfig
318/// (stateful, partitioned)
319///
320/// * Organizes canisters into named **pools**.
321/// * Each pool manages a set of **shards**, and each shard owns a partition of state.
322/// * Tenants are assigned to shards via HRW and stay there.
323/// * Hence: `ShardManager → pools → ShardPoolSpec → ShardPoolPolicy`.
324///
325
326#[derive(Clone, Debug, Default, Deserialize, Serialize)]
327#[serde(deny_unknown_fields)]
328pub struct ShardingConfig {
329    #[serde(default)]
330    pub pools: BTreeMap<String, ShardPool>,
331}
332
333///
334/// ShardPool
335///
336
337#[derive(Clone, Debug, Deserialize, Serialize)]
338#[serde(deny_unknown_fields)]
339pub struct ShardPool {
340    pub canister_type: CanisterRole,
341
342    #[serde(default)]
343    pub policy: ShardPoolPolicy,
344}
345
346///
347/// ShardPoolPolicy
348///
349
350#[derive(Clone, Debug, Deserialize, Serialize)]
351#[serde(deny_unknown_fields, default)]
352pub struct ShardPoolPolicy {
353    pub capacity: u32,
354    pub max_shards: u32,
355}
356
357impl Default for ShardPoolPolicy {
358    fn default() -> Self {
359        Self {
360            capacity: 1_000,
361            max_shards: 4,
362        }
363    }
364}
365
366///
367/// TESTS
368///
369
370#[cfg(test)]
371mod tests {
372    use super::*;
373    use std::collections::{BTreeMap, BTreeSet};
374
375    #[test]
376    fn randomness_defaults_to_ic() {
377        let cfg = RandomnessConfig::default();
378
379        assert!(cfg.enabled);
380        assert_eq!(cfg.reseed_interval_secs, 3600);
381        assert_eq!(cfg.source, RandomnessSource::Ic);
382    }
383
384    #[test]
385    fn randomness_source_parses_ic_and_time() {
386        let cfg: RandomnessConfig = toml::from_str("source = \"ic\"").unwrap();
387        assert_eq!(cfg.source, RandomnessSource::Ic);
388
389        let cfg: RandomnessConfig = toml::from_str("source = \"time\"").unwrap();
390        assert_eq!(cfg.source, RandomnessSource::Time);
391    }
392
393    #[test]
394    fn auto_create_entries_must_exist_in_subnet() {
395        let mut auto_create = BTreeSet::new();
396        auto_create.insert(CanisterRole::from("missing_auto_canister"));
397
398        let subnet = SubnetConfig {
399            auto_create,
400            ..Default::default()
401        };
402
403        subnet
404            .validate()
405            .expect_err("expected missing auto-create type to fail");
406    }
407
408    #[test]
409    fn sharding_pool_references_must_exist_in_subnet() {
410        let managing_role: CanisterRole = "shard_hub".into();
411        let mut canisters = BTreeMap::new();
412
413        let mut sharding = ShardingConfig::default();
414        sharding.pools.insert(
415            "primary".into(),
416            ShardPool {
417                canister_type: CanisterRole::from("missing_shard_worker"),
418                policy: ShardPoolPolicy::default(),
419            },
420        );
421
422        let manager_cfg = CanisterConfig {
423            sharding: Some(sharding),
424            ..Default::default()
425        };
426
427        canisters.insert(managing_role, manager_cfg);
428
429        let subnet = SubnetConfig {
430            canisters,
431            ..Default::default()
432        };
433
434        subnet
435            .validate()
436            .expect_err("expected missing worker type to fail");
437    }
438
439    #[test]
440    fn sharding_pool_policy_requires_positive_capacity_and_shards() {
441        let managing_role: CanisterRole = "shard_hub".into();
442        let mut canisters = BTreeMap::new();
443
444        let mut sharding = ShardingConfig::default();
445        sharding.pools.insert(
446            "primary".into(),
447            ShardPool {
448                canister_type: managing_role.clone(),
449                policy: ShardPoolPolicy {
450                    capacity: 0,
451                    max_shards: 0,
452                },
453            },
454        );
455
456        canisters.insert(
457            managing_role,
458            CanisterConfig {
459                sharding: Some(sharding),
460                ..Default::default()
461            },
462        );
463
464        let subnet = SubnetConfig {
465            canisters,
466            ..Default::default()
467        };
468
469        subnet
470            .validate()
471            .expect_err("expected invalid sharding policy to fail");
472    }
473
474    #[test]
475    fn canister_role_name_must_fit_bound() {
476        let long_role = "a".repeat(NAME_MAX_BYTES + 1);
477        let mut canisters = BTreeMap::new();
478        canisters.insert(CanisterRole::from(long_role), CanisterConfig::default());
479
480        let subnet = SubnetConfig {
481            canisters,
482            ..Default::default()
483        };
484
485        subnet
486            .validate()
487            .expect_err("expected canister role length to fail");
488    }
489
490    #[test]
491    fn sharding_pool_name_must_fit_bound() {
492        let managing_role: CanisterRole = "shard_hub".into();
493        let mut canisters = BTreeMap::new();
494
495        let mut sharding = ShardingConfig::default();
496        sharding.pools.insert(
497            "a".repeat(NAME_MAX_BYTES + 1),
498            ShardPool {
499                canister_type: managing_role.clone(),
500                policy: ShardPoolPolicy::default(),
501            },
502        );
503
504        canisters.insert(
505            managing_role,
506            CanisterConfig {
507                sharding: Some(sharding),
508                ..Default::default()
509            },
510        );
511
512        let subnet = SubnetConfig {
513            canisters,
514            ..Default::default()
515        };
516
517        subnet
518            .validate()
519            .expect_err("expected sharding pool name length to fail");
520    }
521
522    #[test]
523    fn scaling_pool_policy_requires_max_ge_min_when_bounded() {
524        let mut canisters = BTreeMap::new();
525        let mut pools = BTreeMap::new();
526        pools.insert(
527            "worker".into(),
528            ScalePool {
529                canister_type: CanisterRole::from("worker"),
530                policy: ScalePoolPolicy {
531                    min_workers: 5,
532                    max_workers: 3,
533                },
534            },
535        );
536
537        canisters.insert(CanisterRole::from("worker"), CanisterConfig::default());
538
539        let manager_cfg = CanisterConfig {
540            scaling: Some(ScalingConfig { pools }),
541            ..Default::default()
542        };
543
544        canisters.insert(CanisterRole::from("manager"), manager_cfg);
545
546        let subnet = SubnetConfig {
547            canisters,
548            ..Default::default()
549        };
550
551        subnet
552            .validate()
553            .expect_err("expected invalid scaling policy to fail");
554    }
555
556    #[test]
557    fn scaling_pool_name_must_fit_bound() {
558        let mut canisters = BTreeMap::new();
559        let mut pools = BTreeMap::new();
560        pools.insert(
561            "a".repeat(NAME_MAX_BYTES + 1),
562            ScalePool {
563                canister_type: CanisterRole::from("worker"),
564                policy: ScalePoolPolicy::default(),
565            },
566        );
567
568        canisters.insert(CanisterRole::from("worker"), CanisterConfig::default());
569
570        let manager_cfg = CanisterConfig {
571            scaling: Some(ScalingConfig { pools }),
572            ..Default::default()
573        };
574
575        canisters.insert(CanisterRole::from("manager"), manager_cfg);
576
577        let subnet = SubnetConfig {
578            canisters,
579            ..Default::default()
580        };
581
582        subnet
583            .validate()
584            .expect_err("expected scaling pool name length to fail");
585    }
586
587    #[test]
588    fn randomness_interval_requires_positive_value() {
589        let mut canisters = BTreeMap::new();
590
591        let cfg = CanisterConfig {
592            randomness: RandomnessConfig {
593                enabled: true,
594                reseed_interval_secs: 0,
595                ..Default::default()
596            },
597            ..Default::default()
598        };
599
600        canisters.insert(CanisterRole::from("app"), cfg);
601
602        let subnet = SubnetConfig {
603            canisters,
604            ..Default::default()
605        };
606
607        subnet
608            .validate()
609            .expect_err("expected invalid randomness interval to fail");
610    }
611}