1mod ai;
2mod cluster;
3mod service;
4
5use std::path::Path;
6
7use crate::error::{OrcaError, Result};
8
9pub use crate::backup::{BackupConfig, BackupTarget};
12pub use ai::{AiAlertConfig, AiConfig, AlertDeliveryChannels, AutoRemediateConfig};
13pub use cluster::{
14 AlertChannelConfig, ClusterConfig, ClusterMeta, NodeConfig, NodeGpuConfig, ObservabilityConfig,
15};
16pub use service::{ServiceConfig, ServicesConfig};
17
18impl ClusterConfig {
21 pub fn load(path: &Path) -> Result<Self> {
22 let content = std::fs::read_to_string(path)
23 .map_err(|e| OrcaError::Config(format!("failed to read {}: {e}", path.display())))?;
24 toml::from_str(&content)
25 .map_err(|e| OrcaError::Config(format!("failed to parse {}: {e}", path.display())))
26 }
27}
28
29impl ServicesConfig {
30 pub fn load(path: &Path) -> Result<Self> {
31 let content = std::fs::read_to_string(path)
32 .map_err(|e| OrcaError::Config(format!("failed to read {}: {e}", path.display())))?;
33 toml::from_str(&content)
34 .map_err(|e| OrcaError::Config(format!("failed to parse {}: {e}", path.display())))
35 }
36}
37
38#[cfg(test)]
39mod tests {
40 use super::*;
41 use crate::types::RuntimeKind;
42
43 #[test]
44 fn parse_cluster_config() {
45 let toml = r#"
46[cluster]
47name = "test"
48domain = "example.com"
49acme_email = "ops@example.com"
50
51[[node]]
52address = "10.0.0.1"
53labels = { zone = "eu-1" }
54
55[[node]]
56address = "10.0.0.2"
57"#;
58 let config: ClusterConfig = toml::from_str(toml).unwrap();
59 assert_eq!(config.cluster.name, "test");
60 assert_eq!(config.node.len(), 2);
61 assert_eq!(config.cluster.api_port, 6880);
62 }
63
64 #[test]
65 fn parse_services_config() {
66 let toml = r#"
67[[service]]
68name = "api"
69image = "ghcr.io/myorg/api:latest"
70replicas = 3
71port = 8080
72health = "/healthz"
73domain = "api.example.com"
74
75[service.env]
76DATABASE_URL = "postgres://localhost/db"
77
78[[service]]
79name = "edge"
80runtime = "wasm"
81module = "./modules/edge.wasm"
82triggers = ["http:/api/edge/*"]
83"#;
84 let config: ServicesConfig = toml::from_str(toml).unwrap();
85 assert_eq!(config.service.len(), 2);
86 assert_eq!(config.service[0].name, "api");
87 assert_eq!(config.service[1].runtime, RuntimeKind::Wasm);
88 }
89
90 #[test]
91 fn parse_gpu_node_config() {
92 let toml = r#"
93[cluster]
94name = "gpu-cluster"
95
96[[node]]
97address = "10.0.0.1"
98labels = { role = "gpu" }
99
100[[node.gpus]]
101vendor = "nvidia"
102count = 2
103model = "A100"
104"#;
105 let config: ClusterConfig = toml::from_str(toml).unwrap();
106 assert_eq!(config.node[0].gpus.len(), 1);
107 assert_eq!(config.node[0].gpus[0].vendor, "nvidia");
108 assert_eq!(config.node[0].gpus[0].count, 2);
109 }
110
111 #[test]
112 fn parse_gpu_service_config() {
113 let toml = r#"
114[[service]]
115name = "llm-inference"
116image = "vllm/vllm-openai:latest"
117port = 8000
118
119[service.resources]
120memory = "32Gi"
121cpu = 8.0
122
123[service.resources.gpu]
124count = 1
125vendor = "nvidia"
126vram_min = 40000
127"#;
128 let config: ServicesConfig = toml::from_str(toml).unwrap();
129 let gpu = config.service[0]
130 .resources
131 .as_ref()
132 .unwrap()
133 .gpu
134 .as_ref()
135 .unwrap();
136 assert_eq!(gpu.count, 1);
137 assert_eq!(gpu.vendor.as_deref(), Some("nvidia"));
138 }
139
140 #[test]
141 fn parse_ai_config() {
142 let toml = r#"
143[cluster]
144name = "test"
145
146[ai]
147provider = "litellm"
148endpoint = "https://llm.example.com"
149model = "qwen3-30b"
150
151[ai.alerts]
152enabled = true
153analysis_interval_secs = 30
154
155[ai.auto_remediate]
156restart_crashed = true
157scale_on_pressure = false
158"#;
159 let config: ClusterConfig = toml::from_str(toml).unwrap();
160 let ai = config.ai.as_ref().unwrap();
161 assert_eq!(ai.provider, "litellm");
162 assert_eq!(ai.alerts.as_ref().unwrap().analysis_interval_secs, 30);
163 assert!(ai.auto_remediate.as_ref().unwrap().restart_crashed);
164 }
165}