1mod ai;
2mod cluster;
3mod service;
4
5use std::path::Path;
6
7use crate::error::{OrcaError, Result};
8
9pub use crate::backup::{BackupConfig, BackupTarget};
12pub use ai::{AiAlertConfig, AiConfig, AlertDeliveryChannels, AutoRemediateConfig};
13pub use cluster::NetworkConfig;
14pub use cluster::{
15 AlertChannelConfig, ClusterConfig, ClusterMeta, NodeConfig, NodeGpuConfig, ObservabilityConfig,
16};
17pub use service::{ProbeConfig, ServiceConfig, ServicesConfig};
18
19impl ClusterConfig {
22 pub fn load(path: &Path) -> Result<Self> {
23 let content = std::fs::read_to_string(path)
24 .map_err(|e| OrcaError::Config(format!("failed to read {}: {e}", path.display())))?;
25 toml::from_str(&content)
26 .map_err(|e| OrcaError::Config(format!("failed to parse {}: {e}", path.display())))
27 }
28}
29
30impl ServicesConfig {
31 pub fn load(path: &Path) -> Result<Self> {
32 let content = std::fs::read_to_string(path)
33 .map_err(|e| OrcaError::Config(format!("failed to read {}: {e}", path.display())))?;
34 toml::from_str(&content)
35 .map_err(|e| OrcaError::Config(format!("failed to parse {}: {e}", path.display())))
36 }
37}
38
39#[cfg(test)]
40mod tests {
41 use super::*;
42 use crate::types::RuntimeKind;
43
44 #[test]
45 fn parse_cluster_config() {
46 let toml = r#"
47[cluster]
48name = "test"
49domain = "example.com"
50acme_email = "ops@example.com"
51
52[[node]]
53address = "10.0.0.1"
54labels = { zone = "eu-1" }
55
56[[node]]
57address = "10.0.0.2"
58"#;
59 let config: ClusterConfig = toml::from_str(toml).unwrap();
60 assert_eq!(config.cluster.name, "test");
61 assert_eq!(config.node.len(), 2);
62 assert_eq!(config.cluster.api_port, 6880);
63 }
64
65 #[test]
66 fn parse_services_config() {
67 let toml = r#"
68[[service]]
69name = "api"
70image = "ghcr.io/myorg/api:latest"
71replicas = 3
72port = 8080
73health = "/healthz"
74domain = "api.example.com"
75
76[service.env]
77DATABASE_URL = "postgres://localhost/db"
78
79[[service]]
80name = "edge"
81runtime = "wasm"
82module = "./modules/edge.wasm"
83triggers = ["http:/api/edge/*"]
84"#;
85 let config: ServicesConfig = toml::from_str(toml).unwrap();
86 assert_eq!(config.service.len(), 2);
87 assert_eq!(config.service[0].name, "api");
88 assert_eq!(config.service[1].runtime, RuntimeKind::Wasm);
89 }
90
91 #[test]
92 fn parse_gpu_node_config() {
93 let toml = r#"
94[cluster]
95name = "gpu-cluster"
96
97[[node]]
98address = "10.0.0.1"
99labels = { role = "gpu" }
100
101[[node.gpus]]
102vendor = "nvidia"
103count = 2
104model = "A100"
105"#;
106 let config: ClusterConfig = toml::from_str(toml).unwrap();
107 assert_eq!(config.node[0].gpus.len(), 1);
108 assert_eq!(config.node[0].gpus[0].vendor, "nvidia");
109 assert_eq!(config.node[0].gpus[0].count, 2);
110 }
111
112 #[test]
113 fn parse_gpu_service_config() {
114 let toml = r#"
115[[service]]
116name = "llm-inference"
117image = "vllm/vllm-openai:latest"
118port = 8000
119
120[service.resources]
121memory = "32Gi"
122cpu = 8.0
123
124[service.resources.gpu]
125count = 1
126vendor = "nvidia"
127vram_min = 40000
128"#;
129 let config: ServicesConfig = toml::from_str(toml).unwrap();
130 let gpu = config.service[0]
131 .resources
132 .as_ref()
133 .unwrap()
134 .gpu
135 .as_ref()
136 .unwrap();
137 assert_eq!(gpu.count, 1);
138 assert_eq!(gpu.vendor.as_deref(), Some("nvidia"));
139 }
140
141 #[test]
142 fn parse_ai_config() {
143 let toml = r#"
144[cluster]
145name = "test"
146
147[ai]
148provider = "litellm"
149endpoint = "https://llm.example.com"
150model = "qwen3-30b"
151
152[ai.alerts]
153enabled = true
154analysis_interval_secs = 30
155
156[ai.auto_remediate]
157restart_crashed = true
158scale_on_pressure = false
159"#;
160 let config: ClusterConfig = toml::from_str(toml).unwrap();
161 let ai = config.ai.as_ref().unwrap();
162 assert_eq!(ai.provider, "litellm");
163 assert_eq!(ai.alerts.as_ref().unwrap().analysis_interval_secs, 30);
164 assert!(ai.auto_remediate.as_ref().unwrap().restart_crashed);
165 }
166}