Skip to main content

hadb_io/
config.rs

1//! Shared configuration types for the hadb ecosystem.
2//!
3//! These types are used across walrust, graphstream, haqlite, hakuzu, etc.
4//! Engine-specific config (checkpoint intervals, WAL thresholds) stays in each product.
5
6use anyhow::{anyhow, Result};
7use serde::{Deserialize, Serialize};
8
9/// S3 storage configuration.
10#[derive(Debug, Clone, Deserialize, Serialize, Default)]
11pub struct S3Config {
12    /// S3 bucket URL (e.g., "s3://backups/prod").
13    pub bucket: Option<String>,
14    /// S3 endpoint URL (for Tigris/MinIO).
15    pub endpoint: Option<String>,
16}
17
18/// Webhook configuration for failure notifications.
19#[derive(Debug, Clone, Deserialize, Serialize)]
20pub struct WebhookConfig {
21    /// URL to POST notifications to.
22    pub url: String,
23    /// Events to notify on (default: all).
24    #[serde(default = "default_webhook_events")]
25    pub events: Vec<String>,
26    /// Optional secret for HMAC signing (header: X-Hadb-Signature).
27    pub secret: Option<String>,
28}
29
30fn default_webhook_events() -> Vec<String> {
31    vec![
32        "upload_failed".to_string(),
33        "auth_failure".to_string(),
34        "corruption_detected".to_string(),
35        "circuit_breaker_open".to_string(),
36    ]
37}
38
39/// Cache configuration for disk-based upload queue.
40///
41/// When enabled, data files are written to disk cache before uploading to S3.
42/// Provides crash recovery, decouples encoding from uploads, and enables fast local restore.
43#[derive(Debug, Clone, Deserialize, Serialize)]
44pub struct CacheConfig {
45    /// Enable disk cache (default: false).
46    #[serde(default)]
47    pub enabled: bool,
48    /// How long to keep uploaded files in cache (default: "24h").
49    /// Supports: "1h", "24h", "7d", etc.
50    #[serde(default = "default_cache_retention")]
51    pub retention: String,
52    /// Maximum cache size in bytes before cleanup (default: 5GB).
53    #[serde(default = "default_cache_max_size")]
54    pub max_size: u64,
55    /// Override default cache location.
56    pub path: Option<String>,
57    /// Max concurrent S3 uploads per database (default: 4).
58    #[serde(default = "default_uploader_concurrency")]
59    pub uploader_concurrency: usize,
60}
61
62impl Default for CacheConfig {
63    fn default() -> Self {
64        Self {
65            enabled: false,
66            retention: "24h".to_string(),
67            max_size: 5 * 1024 * 1024 * 1024, // 5GB
68            path: None,
69            uploader_concurrency: 4,
70        }
71    }
72}
73
74fn default_cache_retention() -> String {
75    "24h".to_string()
76}
77
78fn default_cache_max_size() -> u64 {
79    5 * 1024 * 1024 * 1024 // 5GB
80}
81
82fn default_uploader_concurrency() -> usize {
83    4
84}
85
86/// Parse duration string like "24h", "7d", "30m", "60s" into chrono::Duration.
87pub fn parse_duration_string(s: &str) -> Result<chrono::Duration> {
88    let s = s.trim();
89    if s.is_empty() {
90        return Err(anyhow!("Empty duration string"));
91    }
92
93    let (num_str, unit) = if let Some(n) = s.strip_suffix('h') {
94        (n, 'h')
95    } else if let Some(n) = s.strip_suffix('d') {
96        (n, 'd')
97    } else if let Some(n) = s.strip_suffix('m') {
98        (n, 'm')
99    } else if let Some(n) = s.strip_suffix('s') {
100        (n, 's')
101    } else {
102        return Err(anyhow!(
103            "Invalid duration format '{}': must end with h, d, m, or s",
104            s
105        ));
106    };
107
108    let num: i64 = num_str
109        .parse()
110        .map_err(|_| anyhow!("Invalid duration number '{}' in '{}'", num_str, s))?;
111
112    match unit {
113        'h' => Ok(chrono::Duration::hours(num)),
114        'd' => Ok(chrono::Duration::days(num)),
115        'm' => Ok(chrono::Duration::minutes(num)),
116        's' => Ok(chrono::Duration::seconds(num)),
117        _ => unreachable!(),
118    }
119}
120
121#[cfg(test)]
122mod tests {
123    use super::*;
124
125    #[test]
126    fn test_s3_config_default() {
127        let config = S3Config::default();
128        assert!(config.bucket.is_none());
129        assert!(config.endpoint.is_none());
130    }
131
132    #[test]
133    fn test_s3_config_serde() {
134        let json = r#"{"bucket": "s3://test", "endpoint": "https://fly.storage.tigris.dev"}"#;
135        let config: S3Config = serde_json::from_str(json).unwrap();
136        assert_eq!(config.bucket, Some("s3://test".to_string()));
137        assert_eq!(
138            config.endpoint,
139            Some("https://fly.storage.tigris.dev".to_string())
140        );
141    }
142
143    #[test]
144    fn test_webhook_config_defaults() {
145        let json = r#"{"url": "https://example.com/hook"}"#;
146        let config: WebhookConfig = serde_json::from_str(json).unwrap();
147        assert_eq!(config.url, "https://example.com/hook");
148        assert_eq!(config.events.len(), 4);
149        assert!(config.events.contains(&"upload_failed".to_string()));
150        assert!(config.secret.is_none());
151    }
152
153    #[test]
154    fn test_webhook_config_with_secret() {
155        let json =
156            r#"{"url": "https://example.com", "events": ["auth_failure"], "secret": "s3cret"}"#;
157        let config: WebhookConfig = serde_json::from_str(json).unwrap();
158        assert_eq!(config.events.len(), 1);
159        assert_eq!(config.secret, Some("s3cret".to_string()));
160    }
161
162    #[test]
163    fn test_cache_config_defaults() {
164        let config = CacheConfig::default();
165        assert!(!config.enabled);
166        assert_eq!(config.retention, "24h");
167        assert_eq!(config.max_size, 5 * 1024 * 1024 * 1024);
168        assert!(config.path.is_none());
169        assert_eq!(config.uploader_concurrency, 4);
170    }
171
172    #[test]
173    fn test_cache_config_serde() {
174        let json = r#"{"enabled": true, "retention": "7d", "max_size": 1073741824, "uploader_concurrency": 8}"#;
175        let config: CacheConfig = serde_json::from_str(json).unwrap();
176        assert!(config.enabled);
177        assert_eq!(config.retention, "7d");
178        assert_eq!(config.max_size, 1073741824);
179        assert_eq!(config.uploader_concurrency, 8);
180    }
181
182    #[test]
183    fn test_parse_duration_hours() {
184        let duration = parse_duration_string("24h").unwrap();
185        assert_eq!(duration.num_hours(), 24);
186    }
187
188    #[test]
189    fn test_parse_duration_days() {
190        let duration = parse_duration_string("7d").unwrap();
191        assert_eq!(duration.num_days(), 7);
192    }
193
194    #[test]
195    fn test_parse_duration_minutes() {
196        let duration = parse_duration_string("30m").unwrap();
197        assert_eq!(duration.num_minutes(), 30);
198    }
199
200    #[test]
201    fn test_parse_duration_seconds() {
202        let duration = parse_duration_string("60s").unwrap();
203        assert_eq!(duration.num_seconds(), 60);
204    }
205
206    #[test]
207    fn test_parse_duration_whitespace() {
208        let duration = parse_duration_string("  12h  ").unwrap();
209        assert_eq!(duration.num_hours(), 12);
210    }
211
212    #[test]
213    fn test_parse_duration_invalid() {
214        assert!(parse_duration_string("").is_err());
215        assert!(parse_duration_string("24").is_err());
216        assert!(parse_duration_string("abc").is_err());
217        assert!(parse_duration_string("24x").is_err());
218    }
219
220    #[test]
221    fn test_parse_duration_invalid_number() {
222        assert!(parse_duration_string("abch").is_err());
223    }
224}