oxirs-cluster 0.2.4

Raft-backed distributed dataset for high availability and horizontal scaling
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
//! # Health Monitoring and Failure Detection
//!
//! Comprehensive health monitoring system for cluster nodes including
//! heartbeat monitoring, failure detection, and automated recovery.

use crate::raft::OxirsNodeId;
use anyhow::Result;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use tokio::sync::{mpsc, RwLock};
use tokio::time::interval;
use tracing::{debug, info};

/// Health status of a cluster node
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeHealth {
    /// Current health status
    pub status: NodeHealthLevel,
    /// System metrics
    pub system_metrics: SystemMetrics,
    /// Response time for health checks
    pub response_time: Duration,
    /// Last health check timestamp
    pub last_checked: u64,
}

impl Default for NodeHealth {
    fn default() -> Self {
        Self {
            status: NodeHealthLevel::Unknown,
            system_metrics: SystemMetrics::default(),
            response_time: Duration::from_millis(0),
            last_checked: 0,
        }
    }
}

/// Health status enumeration
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum NodeHealthLevel {
    /// Node is healthy and responsive
    Healthy,
    /// Node is experiencing degraded performance
    Degraded,
    /// Node is suspected to be failed
    Suspected,
    /// Node is confirmed failed
    Failed,
    /// Node status is unknown
    Unknown,
}

/// Complete health status tracking for a node
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct NodeHealthStatus {
    /// Node identifier
    pub node_id: OxirsNodeId,
    /// Overall health status
    pub health: NodeHealth,
    /// Last heartbeat timestamp (milliseconds since epoch)
    pub last_heartbeat: u64,
    /// Number of consecutive failures
    pub failure_count: u32,
    /// System metrics
    pub system_metrics: SystemMetrics,
    /// Raft-specific metrics
    pub raft_metrics: Option<RaftMetrics>,
    /// Last failure timestamp
    pub last_failure: Option<u64>,
    /// Custom health check results
    pub custom_checks: HashMap<String, bool>,
}

/// System metrics for health assessment
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SystemMetrics {
    /// CPU usage percentage (0.0-1.0)
    pub cpu_usage: f64,
    /// Memory usage percentage (0.0-1.0)
    pub memory_usage: f64,
    /// Disk I/O rate (MB/s)
    pub disk_io_rate: f64,
    /// Network throughput (MB/s)
    pub network_throughput: f64,
    /// Number of active connections
    pub connection_count: u32,
    /// Error rate (errors per second)
    pub error_rate: f64,
    /// Last update timestamp
    pub timestamp: u64,
}

impl Default for SystemMetrics {
    fn default() -> Self {
        Self {
            cpu_usage: 0.0,
            memory_usage: 0.0,
            disk_io_rate: 0.0,
            network_throughput: 0.0,
            connection_count: 0,
            error_rate: 0.0,
            timestamp: SystemTime::now()
                .duration_since(UNIX_EPOCH)
                .expect("SystemTime should be after UNIX_EPOCH")
                .as_secs(),
        }
    }
}

/// Raft-specific health metrics
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RaftMetrics {
    /// Leader election frequency (elections per hour)
    pub election_frequency: f64,
    /// Log replication lag (milliseconds)
    pub replication_lag_ms: u64,
    /// Commitment delay (milliseconds)
    pub commitment_delay_ms: u64,
    /// Number of network partitions detected
    pub partition_count: u32,
    /// Vote request rate (requests per second)
    pub vote_request_rate: f64,
    /// Heartbeat interval variance (milliseconds)
    pub heartbeat_variance_ms: u64,
    /// Last update timestamp
    pub timestamp: u64,
}

impl Default for RaftMetrics {
    fn default() -> Self {
        Self {
            election_frequency: 0.0,
            replication_lag_ms: 0,
            commitment_delay_ms: 0,
            partition_count: 0,
            vote_request_rate: 0.0,
            heartbeat_variance_ms: 0,
            timestamp: SystemTime::now()
                .duration_since(UNIX_EPOCH)
                .expect("SystemTime should be after UNIX_EPOCH")
                .as_secs(),
        }
    }
}

/// Comprehensive node health information
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeHealthInfo {
    /// Node identifier
    pub node_id: OxirsNodeId,
    /// Overall health status
    pub health: NodeHealth,
    /// Last heartbeat timestamp (Unix timestamp in milliseconds)
    pub last_heartbeat: u64,
    /// System metrics
    pub system_metrics: SystemMetrics,
    /// Raft-specific metrics
    pub raft_metrics: RaftMetrics,
    /// Number of consecutive failures
    pub failure_count: u32,
    /// Last failure timestamp (Unix timestamp in milliseconds)
    pub last_failure: Option<u64>,
    /// Custom health check results
    pub custom_checks: HashMap<String, bool>,
}

impl NodeHealthStatus {
    pub fn new(node_id: OxirsNodeId) -> Self {
        let now = SystemTime::now()
            .duration_since(UNIX_EPOCH)
            .expect("SystemTime should be after UNIX_EPOCH")
            .as_millis() as u64;

        Self {
            node_id,
            health: NodeHealth {
                status: NodeHealthLevel::Unknown,
                system_metrics: SystemMetrics::default(),
                response_time: Duration::from_millis(0),
                last_checked: now,
            },
            last_heartbeat: now,
            system_metrics: SystemMetrics::default(),
            raft_metrics: Some(RaftMetrics::default()),
            failure_count: 0,
            last_failure: None,
            custom_checks: HashMap::new(),
        }
    }

    /// Update health status based on current metrics
    pub fn update_health(&mut self) -> NodeHealth {
        let now = SystemTime::now()
            .duration_since(UNIX_EPOCH)
            .expect("SystemTime should be after UNIX_EPOCH")
            .as_millis() as u64;
        let heartbeat_age = Duration::from_millis(now.saturating_sub(self.last_heartbeat));

        // Check if node is unresponsive
        if heartbeat_age > Duration::from_secs(30) {
            self.health.status = NodeHealthLevel::Failed;
            self.health.last_checked = now;
            return self.health.clone();
        }

        // Check system metrics for degradation
        if self.system_metrics.cpu_usage > 0.90
            || self.system_metrics.memory_usage > 0.95
            || self.system_metrics.error_rate > 10.0
        {
            self.health.status = NodeHealthLevel::Degraded;
            self.health.last_checked = now;
            return self.health.clone();
        }

        // Check if we suspect failures
        if heartbeat_age > Duration::from_secs(10) || self.failure_count > 3 {
            self.health.status = NodeHealthLevel::Suspected;
            self.health.last_checked = now;
            return self.health.clone();
        }

        // Check custom health checks
        if self.custom_checks.values().any(|&check| !check) {
            self.health.status = NodeHealthLevel::Degraded;
            self.health.last_checked = now;
            return self.health.clone();
        }

        self.health.status = NodeHealthLevel::Healthy;
        self.health.last_checked = now;
        self.health.clone()
    }
}

/// Health monitoring configuration
#[derive(Debug, Clone)]
pub struct HealthMonitorConfig {
    /// Heartbeat interval
    pub heartbeat_interval: Duration,
    /// Failure detection timeout
    pub failure_timeout: Duration,
    /// Number of consecutive failures before marking as failed
    pub failure_threshold: u32,
    /// Health check interval
    pub health_check_interval: Duration,
    /// Enable system metrics collection
    pub enable_system_metrics: bool,
    /// Enable Raft metrics collection
    pub enable_raft_metrics: bool,
    /// Custom health check functions
    pub custom_checks: Vec<String>,
}

impl Default for HealthMonitorConfig {
    fn default() -> Self {
        Self {
            heartbeat_interval: Duration::from_secs(5),
            failure_timeout: Duration::from_secs(30),
            failure_threshold: 3,
            health_check_interval: Duration::from_secs(10),
            enable_system_metrics: true,
            enable_raft_metrics: true,
            custom_checks: Vec::new(),
        }
    }
}

/// Event types for health monitoring
#[derive(Debug, Clone)]
pub enum HealthEvent {
    /// Node became healthy
    NodeHealthy(OxirsNodeId),
    /// Node became degraded
    NodeDegraded(OxirsNodeId, String),
    /// Node is suspected to be failed
    NodeSuspected(OxirsNodeId),
    /// Node failed
    NodeFailed(OxirsNodeId),
    /// Node recovered from failure
    NodeRecovered(OxirsNodeId),
    /// Cluster partition detected
    PartitionDetected(Vec<OxirsNodeId>),
    /// Cluster partition healed
    PartitionHealed,
}

/// Health monitoring and failure detection system
pub struct HealthMonitor {
    /// Configuration
    config: HealthMonitorConfig,
    /// Node health statuses
    node_statuses: Arc<RwLock<HashMap<OxirsNodeId, NodeHealthStatus>>>,
    /// Event channel sender
    event_sender: mpsc::UnboundedSender<HealthEvent>,
    /// Event channel receiver
    event_receiver: Arc<RwLock<mpsc::UnboundedReceiver<HealthEvent>>>,
    /// Running flag
    running: Arc<RwLock<bool>>,
}

impl HealthMonitor {
    /// Create a new health monitor
    pub fn new(config: HealthMonitorConfig) -> Self {
        let (event_sender, event_receiver) = mpsc::unbounded_channel();

        Self {
            config,
            node_statuses: Arc::new(RwLock::new(HashMap::new())),
            event_sender,
            event_receiver: Arc::new(RwLock::new(event_receiver)),
            running: Arc::new(RwLock::new(false)),
        }
    }

    /// Start health monitoring
    pub async fn start(&self) -> Result<()> {
        let mut running = self.running.write().await;
        if *running {
            return Ok(());
        }
        *running = true;

        info!("Starting health monitor");

        // Start heartbeat monitoring task
        self.start_heartbeat_monitoring().await;

        // Start health checking task
        self.start_health_checking().await;

        // Start metrics collection if enabled
        if self.config.enable_system_metrics {
            self.start_system_metrics_collection().await;
        }

        if self.config.enable_raft_metrics {
            self.start_raft_metrics_collection().await;
        }

        Ok(())
    }

    /// Stop health monitoring
    pub async fn stop(&self) {
        let mut running = self.running.write().await;
        *running = false;
        info!("Health monitor stopped");
    }

    /// Register a node for monitoring
    pub async fn register_node(&self, node_id: OxirsNodeId) -> Result<()> {
        let mut statuses = self.node_statuses.write().await;
        statuses.insert(node_id, NodeHealthStatus::new(node_id));
        info!("Registered node {} for health monitoring", node_id);
        Ok(())
    }

    /// Unregister a node from monitoring
    pub async fn unregister_node(&self, node_id: OxirsNodeId) -> Result<()> {
        let mut statuses = self.node_statuses.write().await;
        statuses.remove(&node_id);
        info!("Unregistered node {} from health monitoring", node_id);
        Ok(())
    }

    /// Record a heartbeat from a node
    pub async fn record_heartbeat(&self, node_id: OxirsNodeId) -> Result<()> {
        let mut statuses = self.node_statuses.write().await;
        if let Some(status) = statuses.get_mut(&node_id) {
            let old_health = status.health.clone();
            status.last_heartbeat = SystemTime::now()
                .duration_since(UNIX_EPOCH)
                .expect("SystemTime should be after UNIX_EPOCH")
                .as_millis() as u64;
            status.failure_count = 0;
            let new_health = status.update_health();

            // Send health event if status changed
            if old_health.status != new_health.status
                && new_health.status == NodeHealthLevel::Healthy
            {
                let _ = self.event_sender.send(HealthEvent::NodeRecovered(node_id));
            }
        }
        Ok(())
    }

    /// Update system metrics for a node
    pub async fn update_system_metrics(
        &self,
        node_id: OxirsNodeId,
        metrics: SystemMetrics,
    ) -> Result<()> {
        let mut statuses = self.node_statuses.write().await;
        if let Some(status) = statuses.get_mut(&node_id) {
            status.system_metrics = metrics;
            status.update_health();
        }
        Ok(())
    }

    /// Update Raft metrics for a node
    pub async fn update_raft_metrics(
        &self,
        node_id: OxirsNodeId,
        metrics: RaftMetrics,
    ) -> Result<()> {
        let mut statuses = self.node_statuses.write().await;
        if let Some(status) = statuses.get_mut(&node_id) {
            status.raft_metrics = Some(metrics);
        }
        Ok(())
    }

    /// Get current health status of all nodes
    pub async fn get_cluster_health(&self) -> HashMap<OxirsNodeId, NodeHealthStatus> {
        let statuses = self.node_statuses.read().await;
        statuses.clone()
    }

    /// Get health status of a specific node
    pub async fn get_node_health(&self, node_id: OxirsNodeId) -> Option<NodeHealthStatus> {
        let statuses = self.node_statuses.read().await;
        statuses.get(&node_id).cloned()
    }

    /// Get next health event
    pub async fn next_event(&self) -> Option<HealthEvent> {
        let mut receiver = self.event_receiver.write().await;
        receiver.recv().await
    }

    /// Check if cluster is healthy
    pub async fn is_cluster_healthy(&self) -> bool {
        let statuses = self.node_statuses.read().await;
        let total_nodes = statuses.len();
        if total_nodes == 0 {
            return false;
        }

        let healthy_nodes = statuses
            .values()
            .filter(|status| matches!(status.health.status, NodeHealthLevel::Healthy))
            .count();

        // Require majority of nodes to be healthy
        healthy_nodes > total_nodes / 2
    }

    /// Start heartbeat monitoring task
    async fn start_heartbeat_monitoring(&self) {
        let node_statuses = self.node_statuses.clone();
        let event_sender = self.event_sender.clone();
        let failure_timeout = self.config.failure_timeout;
        let running = self.running.clone();

        tokio::spawn(async move {
            let mut interval = interval(Duration::from_secs(1));

            while *running.read().await {
                interval.tick().await;

                let mut statuses = node_statuses.write().await;
                let now = SystemTime::now()
                    .duration_since(UNIX_EPOCH)
                    .expect("SystemTime should be after UNIX_EPOCH")
                    .as_millis() as u64;

                for (node_id, status) in statuses.iter_mut() {
                    let old_health = status.health.clone();
                    let heartbeat_age =
                        Duration::from_millis(now.saturating_sub(status.last_heartbeat));

                    if heartbeat_age > failure_timeout {
                        status.failure_count += 1;
                        if status.failure_count == 1 {
                            status.last_failure = Some(now);
                        }
                    }

                    let new_health = status.update_health();

                    // Send health events on status change
                    if old_health.status != new_health.status {
                        let event = match new_health.status {
                            NodeHealthLevel::Healthy => HealthEvent::NodeHealthy(*node_id),
                            NodeHealthLevel::Degraded => HealthEvent::NodeDegraded(
                                *node_id,
                                "System metrics degraded".to_string(),
                            ),
                            NodeHealthLevel::Suspected => HealthEvent::NodeSuspected(*node_id),
                            NodeHealthLevel::Failed => HealthEvent::NodeFailed(*node_id),
                            NodeHealthLevel::Unknown => continue,
                        };
                        let _ = event_sender.send(event);
                    }
                }
            }
        });
    }

    /// Start health checking task
    async fn start_health_checking(&self) {
        let health_check_interval = self.config.health_check_interval;
        let running = self.running.clone();

        tokio::spawn(async move {
            let mut interval = interval(health_check_interval);

            while *running.read().await {
                interval.tick().await;
                // Perform custom health checks here
                debug!("Performing health checks");
            }
        });
    }

    /// Start system metrics collection task
    async fn start_system_metrics_collection(&self) {
        let running = self.running.clone();

        tokio::spawn(async move {
            let mut interval = interval(Duration::from_secs(30));

            while *running.read().await {
                interval.tick().await;
                // Collect system metrics here
                debug!("Collecting system metrics");
            }
        });
    }

    /// Start Raft metrics collection task
    async fn start_raft_metrics_collection(&self) {
        let running = self.running.clone();

        tokio::spawn(async move {
            let mut interval = interval(Duration::from_secs(10));

            while *running.read().await {
                interval.tick().await;
                // Collect Raft metrics here
                debug!("Collecting Raft metrics");
            }
        });
    }

    /// Start monitoring a specific node
    pub async fn start_monitoring(&self, node_id: OxirsNodeId, _address: String) {
        let mut statuses = self.node_statuses.write().await;
        if let std::collections::hash_map::Entry::Vacant(e) = statuses.entry(node_id) {
            let status = NodeHealthStatus::new(node_id);
            e.insert(status);
            info!("Started monitoring node {}", node_id);
        }
    }

    /// Stop monitoring a specific node
    pub async fn stop_monitoring(&self, node_id: OxirsNodeId) {
        let mut statuses = self.node_statuses.write().await;
        statuses.remove(&node_id);
        info!("Stopped monitoring node {}", node_id);
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[tokio::test]
    async fn test_health_monitor_creation() {
        let config = HealthMonitorConfig::default();
        let monitor = HealthMonitor::new(config);
        assert!(!*monitor.running.read().await);
    }

    #[tokio::test]
    async fn test_node_registration() {
        let config = HealthMonitorConfig::default();
        let monitor = HealthMonitor::new(config);

        monitor.register_node(1).await.unwrap();
        let health = monitor.get_node_health(1).await.unwrap();
        assert_eq!(health.node_id, 1);
    }

    #[tokio::test]
    async fn test_heartbeat_recording() {
        let config = HealthMonitorConfig::default();
        let monitor = HealthMonitor::new(config);

        monitor.register_node(1).await.unwrap();
        monitor.record_heartbeat(1).await.unwrap();

        let health = monitor.get_node_health(1).await.unwrap();
        assert_eq!(health.failure_count, 0);
    }

    #[tokio::test]
    async fn test_health_status_update() {
        let mut status = NodeHealthStatus::new(1);

        // Test healthy status
        status.last_heartbeat = SystemTime::now()
            .duration_since(UNIX_EPOCH)
            .unwrap()
            .as_millis() as u64;
        assert_eq!(status.update_health().status, NodeHealthLevel::Healthy);

        // Test degraded status
        status.system_metrics.cpu_usage = 0.95;
        assert_eq!(status.update_health().status, NodeHealthLevel::Degraded);

        // Test failed status
        status.last_heartbeat = SystemTime::now()
            .duration_since(UNIX_EPOCH)
            .unwrap()
            .as_millis() as u64
            - Duration::from_secs(60).as_millis() as u64;
        assert_eq!(status.update_health().status, NodeHealthLevel::Failed);
    }

    #[tokio::test]
    async fn test_cluster_health() {
        let config = HealthMonitorConfig::default();
        let monitor = HealthMonitor::new(config);

        // Empty cluster should be unhealthy
        assert!(!monitor.is_cluster_healthy().await);

        // Add healthy nodes
        monitor.register_node(1).await.unwrap();
        monitor.register_node(2).await.unwrap();
        monitor.register_node(3).await.unwrap();

        monitor.record_heartbeat(1).await.unwrap();
        monitor.record_heartbeat(2).await.unwrap();
        monitor.record_heartbeat(3).await.unwrap();

        assert!(monitor.is_cluster_healthy().await);
    }
}