1mod ai;
2mod cluster;
3mod service;
4
5use std::path::Path;
6
7use crate::error::{OrcaError, Result};
8
9pub use ai::{AiAlertConfig, AiConfig, AlertDeliveryChannels, AutoRemediateConfig};
12pub use cluster::{
13 AlertChannelConfig, ClusterConfig, ClusterMeta, NodeConfig, NodeGpuConfig, ObservabilityConfig,
14};
15pub use service::{ServiceConfig, ServicesConfig};
16
17impl ClusterConfig {
20 pub fn load(path: &Path) -> Result<Self> {
21 let content = std::fs::read_to_string(path)
22 .map_err(|e| OrcaError::Config(format!("failed to read {}: {e}", path.display())))?;
23 toml::from_str(&content)
24 .map_err(|e| OrcaError::Config(format!("failed to parse {}: {e}", path.display())))
25 }
26}
27
28impl ServicesConfig {
29 pub fn load(path: &Path) -> Result<Self> {
30 let content = std::fs::read_to_string(path)
31 .map_err(|e| OrcaError::Config(format!("failed to read {}: {e}", path.display())))?;
32 toml::from_str(&content)
33 .map_err(|e| OrcaError::Config(format!("failed to parse {}: {e}", path.display())))
34 }
35}
36
37#[cfg(test)]
38mod tests {
39 use super::*;
40 use crate::types::RuntimeKind;
41
42 #[test]
43 fn parse_cluster_config() {
44 let toml = r#"
45[cluster]
46name = "test"
47domain = "example.com"
48acme_email = "ops@example.com"
49
50[[node]]
51address = "10.0.0.1"
52labels = { zone = "eu-1" }
53
54[[node]]
55address = "10.0.0.2"
56"#;
57 let config: ClusterConfig = toml::from_str(toml).unwrap();
58 assert_eq!(config.cluster.name, "test");
59 assert_eq!(config.node.len(), 2);
60 assert_eq!(config.cluster.api_port, 6880);
61 }
62
63 #[test]
64 fn parse_services_config() {
65 let toml = r#"
66[[service]]
67name = "api"
68image = "ghcr.io/myorg/api:latest"
69replicas = 3
70port = 8080
71health = "/healthz"
72domain = "api.example.com"
73
74[service.env]
75DATABASE_URL = "postgres://localhost/db"
76
77[[service]]
78name = "edge"
79runtime = "wasm"
80module = "./modules/edge.wasm"
81triggers = ["http:/api/edge/*"]
82"#;
83 let config: ServicesConfig = toml::from_str(toml).unwrap();
84 assert_eq!(config.service.len(), 2);
85 assert_eq!(config.service[0].name, "api");
86 assert_eq!(config.service[1].runtime, RuntimeKind::Wasm);
87 }
88
89 #[test]
90 fn parse_gpu_node_config() {
91 let toml = r#"
92[cluster]
93name = "gpu-cluster"
94
95[[node]]
96address = "10.0.0.1"
97labels = { role = "gpu" }
98
99[[node.gpus]]
100vendor = "nvidia"
101count = 2
102model = "A100"
103"#;
104 let config: ClusterConfig = toml::from_str(toml).unwrap();
105 assert_eq!(config.node[0].gpus.len(), 1);
106 assert_eq!(config.node[0].gpus[0].vendor, "nvidia");
107 assert_eq!(config.node[0].gpus[0].count, 2);
108 }
109
110 #[test]
111 fn parse_gpu_service_config() {
112 let toml = r#"
113[[service]]
114name = "llm-inference"
115image = "vllm/vllm-openai:latest"
116port = 8000
117
118[service.resources]
119memory = "32Gi"
120cpu = 8.0
121
122[service.resources.gpu]
123count = 1
124vendor = "nvidia"
125vram_min = 40000
126"#;
127 let config: ServicesConfig = toml::from_str(toml).unwrap();
128 let gpu = config.service[0]
129 .resources
130 .as_ref()
131 .unwrap()
132 .gpu
133 .as_ref()
134 .unwrap();
135 assert_eq!(gpu.count, 1);
136 assert_eq!(gpu.vendor.as_deref(), Some("nvidia"));
137 }
138
139 #[test]
140 fn parse_ai_config() {
141 let toml = r#"
142[cluster]
143name = "test"
144
145[ai]
146provider = "litellm"
147endpoint = "https://llm.example.com"
148model = "qwen3-30b"
149
150[ai.alerts]
151enabled = true
152analysis_interval_secs = 30
153
154[ai.auto_remediate]
155restart_crashed = true
156scale_on_pressure = false
157"#;
158 let config: ClusterConfig = toml::from_str(toml).unwrap();
159 let ai = config.ai.as_ref().unwrap();
160 assert_eq!(ai.provider, "litellm");
161 assert_eq!(ai.alerts.as_ref().unwrap().analysis_interval_secs, 30);
162 assert!(ai.auto_remediate.as_ref().unwrap().restart_crashed);
163 }
164}