oxirs-cluster 0.2.4

Raft-backed distributed dataset for high availability and horizontal scaling
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
//! # OxiRS Cluster
//!
//! [![Version](https://img.shields.io/badge/version-0.2.4-blue)](https://github.com/cool-japan/oxirs/releases)
//! [![docs.rs](https://docs.rs/oxirs-cluster/badge.svg)](https://docs.rs/oxirs-cluster)
//!
//! **Status**: Production Release (v0.2.4)
//! **Stability**: Public APIs are stable. Production-ready with comprehensive testing.
//!
//! Raft-backed distributed dataset for high availability and horizontal scaling.
//!
//! This crate provides distributed storage capabilities using Raft consensus with
//! multi-region support, Byzantine fault tolerance, and advanced replication strategies.
//!
//! ## Features
//!
//! - **Raft Consensus**: Production-ready Raft implementation using openraft
//! - **Distributed RDF Storage**: Scalable, consistent RDF triple storage
//! - **Automatic Failover**: Leader election and automatic recovery
//! - **Node Discovery**: Multiple discovery mechanisms (static, DNS, multicast)
//! - **Replication Management**: Configurable replication strategies
//! - **SPARQL Support**: Distributed SPARQL query execution
//! - **Transaction Support**: Distributed ACID transactions
//!
//! ## Example
//!
//! ```ignore
//! use oxirs_cluster::{ClusterNode, NodeConfig};
//! use std::net::SocketAddr;
//!
//! async fn example() -> Result<(), Box<dyn std::error::Error>> {
//!     let config = NodeConfig {
//!         node_id: 1,
//!         address: "127.0.0.1:8080".parse()?,
//!         data_dir: "./data".to_string(),
//!         peers: vec![2, 3],
//!     };
//!
//!     let mut node = ClusterNode::new(config).await?;
//!     node.start().await?;
//!
//!     // Insert data through consensus
//!     node.insert_triple(
//!         "<http://example.org/subject>",
//!         "<http://example.org/predicate>",
//!         "\"object\"")
//!     .await?;
//!
//!     Ok(())
//! }
//! ```

#![allow(clippy::field_reassign_with_default)]
#![allow(clippy::single_match)]
#![allow(clippy::collapsible_if)]
#![allow(clippy::clone_on_copy)]
#![allow(clippy::type_complexity)]
#![allow(clippy::collapsible_match)]
#![allow(clippy::manual_clamp)]
#![allow(clippy::needless_range_loop)]
#![allow(clippy::or_fun_call)]
#![allow(clippy::if_same_then_else)]
#![allow(clippy::only_used_in_recursion)]
#![allow(clippy::new_without_default)]
#![allow(clippy::derivable_impls)]
#![allow(clippy::useless_conversion)]
use serde::{Deserialize, Serialize};
use std::net::SocketAddr;
use std::sync::Arc;
use tokio::sync::RwLock;

pub mod adaptive_leader_election;
pub mod advanced_partitioning;
pub mod advanced_storage;
pub mod alerting;
pub mod auto_scaling;
pub mod backup_restore;
pub mod circuit_breaker;
pub mod cloud_integration;
pub mod cluster_metrics;
pub mod compression_strategy;
pub mod conflict_resolution;
pub mod consensus;
pub mod crash_recovery;
pub mod data_rebalancing;
pub mod disaster_recovery;
pub mod discovery;
pub mod distributed_query;
pub mod distributed_tracing;
pub mod edge_computing;
pub mod encryption;
pub mod enhanced_node_discovery;
pub mod enhanced_snapshotting;
pub mod error;
pub mod failover;
pub mod federation;
pub mod gpu_acceleration;
pub mod health_monitor;
pub mod health_monitoring;
pub mod memory_optimization;
pub mod merkle_tree;
pub mod ml_optimization;
pub mod multi_tenant;
pub mod mvcc;
pub mod mvcc_storage;
pub mod network;
pub mod neural_architecture_search;
pub mod node_lifecycle;
pub mod node_status_tracker;
pub mod operational_transformation;
pub mod optimization;
pub mod partition_detection;
pub mod performance_metrics;
pub mod performance_monitor;
pub mod raft;
pub mod raft_optimization;
pub mod raft_profiling;
pub mod raft_state;
pub mod range_partitioning;
pub mod read_replica;
pub mod region_manager;
pub mod replication;
pub mod replication_lag_monitor;
pub mod rl_consensus_optimizer;
pub mod rolling_upgrade;
pub mod rolling_upgrade_orchestrator;
pub mod split_brain_detector;
pub mod visualization_dashboard;
pub mod zero_downtime_migration;
// Temporarily disabled due to missing scirs2_core features
// pub mod revolutionary_cluster_optimization;
pub mod cross_dc;
pub mod network_compression;
pub mod security;
pub mod serialization;
pub mod shard;
pub mod shard_manager;
pub mod shard_migration;
pub mod shard_routing;
pub mod split_brain_prevention;
pub mod storage;
pub mod strong_consistency;
pub mod tls;
pub mod topology;
pub mod transaction;
pub mod transaction_optimizer;

#[cfg(feature = "bft")]
pub mod bft;
#[cfg(feature = "bft")]
pub mod bft_consensus;
#[cfg(feature = "bft")]
pub mod bft_network;

pub mod gossip_scaling;
pub mod sla_manager;
pub mod stream_integration;

// New modules added in v0.2.0
pub mod adaptive_consistent_hash;
pub mod cross_dc_consistency;
pub mod distributed_tx_coordinator;

// v1.1.0 Consistent hashing with virtual nodes and bounded loads
pub mod vnodes_hash_ring;

// v1.2.0 Gossip protocol for cluster membership management
pub mod membership_gossip;

// v1.2.0 Bully algorithm leader election simulation
pub mod leader_election;

// v1.2.0 Raft snapshot management with retention and checksum validation
pub mod snapshot_manager;

// v1.5.0 Consistent-hash shard router
pub mod consistent_shard_router;

// v1.6.0 Partition rebalancing for cluster data redistribution
pub mod partition_rebalancer;

// v1.7.0 Cluster node health monitoring with heartbeats
pub mod node_monitor;

// v1.8.0 Automatic failover handling with split-brain prevention
pub mod failover_manager;

/// Anti-entropy protocol for distributed consistency (v1.9.0).
pub mod anti_entropy;

/// Replication bandwidth throttling: token-bucket per-peer rate limiting with adaptive adjustment (v2.0.0).
pub mod replication_throttle;

/// Data migration between cluster nodes: plan creation, range-based transfer,
/// checksum-validated chunks, migration lifecycle and statistics (v1.1.0 round 14)
pub mod data_migrator;

/// Consistent-hash shard routing for distributed cluster nodes (v1.1.0 round 15)
pub mod shard_router;

/// Raft-style election timer: randomised timeout, TimerState (Idle/Running/Expired),
/// reset/check/stop lifecycle, LCG seed for deterministic tests (v1.1.0 round 16)
pub mod election_timer;

pub use error::{ClusterError, Result};
pub use failover::{FailoverConfig, FailoverManager, FailoverStrategy, RecoveryAction};
pub use health_monitor::{HealthMonitor, HealthMonitorConfig, NodeHealth, SystemMetrics};

// Temporarily disabled - Re-export revolutionary cluster optimization types
// pub use revolutionary_cluster_optimization::{
//     RevolutionaryClusterOptimizer, RevolutionaryClusterConfig, ConsensusOptimizationConfig,
//     DataDistributionConfig, AdaptiveReplicationConfig, NetworkOptimizationConfig,
//     ClusterPerformanceTargets, ClusterOptimizationResult, ClusterState, NodeState,
//     ClusterOptimizationContext, ClusterAnalytics, ScalingPrediction,
//     RevolutionaryClusterOptimizerFactory, ConsensusOptimizationStrategy,
//     DataDistributionStrategy, AdaptiveReplicationStrategy, NetworkOptimizationStrategy,
// };

use conflict_resolution::{
    ConflictResolver, ResolutionStrategy, TimestampedOperation, VectorClock,
};
use consensus::ConsensusManager;
use discovery::{DiscoveryConfig, DiscoveryService, NodeInfo};
use distributed_query::{DistributedQueryExecutor, ResultBinding};
use edge_computing::{EdgeComputingManager, EdgeDeploymentStrategy, EdgeDeviceProfile};
use raft::{OxirsNodeId, RdfResponse};
use region_manager::{
    ConsensusStrategy as RegionConsensusStrategy, MultiRegionReplicationStrategy, Region,
    RegionManager,
};
use replication::{ReplicationManager, ReplicationStats, ReplicationStrategy};

/// Multi-region deployment configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MultiRegionConfig {
    /// Region identifier where this node is located
    pub region_id: String,
    /// Availability zone identifier
    pub availability_zone_id: String,
    /// Data center identifier (optional)
    pub data_center: Option<String>,
    /// Rack identifier (optional)
    pub rack: Option<String>,
    /// List of all regions in the deployment
    pub regions: Vec<Region>,
    /// Consensus strategy for multi-region operations
    pub consensus_strategy: RegionConsensusStrategy,
    /// Replication strategy for multi-region
    pub replication_strategy: MultiRegionReplicationStrategy,
    /// Conflict resolution strategy for distributed operations
    pub conflict_resolution_strategy: ResolutionStrategy,
    /// Edge computing configuration
    pub edge_config: Option<EdgeComputingConfig>,
    /// Enable advanced monitoring and metrics
    pub enable_monitoring: bool,
}

/// Edge computing configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EdgeComputingConfig {
    /// Enable edge computing features
    pub enabled: bool,
    /// Local edge device profile
    pub device_profile: EdgeDeviceProfile,
    /// Edge deployment strategy
    pub deployment_strategy: EdgeDeploymentStrategy,
    /// Enable intelligent caching
    pub enable_intelligent_caching: bool,
    /// Enable network condition monitoring
    pub enable_network_monitoring: bool,
}

/// Cluster node configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeConfig {
    /// Unique node identifier
    pub node_id: OxirsNodeId,
    /// Network address for communication
    pub address: SocketAddr,
    /// Data directory for persistent storage
    pub data_dir: String,
    /// List of peer node IDs
    pub peers: Vec<OxirsNodeId>,
    /// Discovery configuration
    pub discovery: Option<DiscoveryConfig>,
    /// Replication strategy
    pub replication_strategy: Option<ReplicationStrategy>,
    /// Use Byzantine fault tolerance instead of Raft
    #[cfg(feature = "bft")]
    pub use_bft: bool,
    /// Multi-region deployment configuration
    pub region_config: Option<MultiRegionConfig>,
}

impl NodeConfig {
    /// Create a new node configuration
    pub fn new(node_id: OxirsNodeId, address: SocketAddr) -> Self {
        Self {
            node_id,
            address,
            data_dir: format!("./data/node-{node_id}"),
            peers: Vec::new(),
            discovery: Some(DiscoveryConfig::default()),
            replication_strategy: Some(ReplicationStrategy::default()),
            #[cfg(feature = "bft")]
            use_bft: false,
            region_config: None,
        }
    }

    /// Add a peer to the configuration
    pub fn add_peer(&mut self, peer_id: OxirsNodeId) -> &mut Self {
        if !self.peers.contains(&peer_id) && peer_id != self.node_id {
            self.peers.push(peer_id);
        }
        self
    }

    /// Set the discovery configuration
    pub fn with_discovery(mut self, discovery: DiscoveryConfig) -> Self {
        self.discovery = Some(discovery);
        self
    }

    /// Set the replication strategy
    pub fn with_replication_strategy(mut self, strategy: ReplicationStrategy) -> Self {
        self.replication_strategy = Some(strategy);
        self
    }

    /// Enable Byzantine fault tolerance
    #[cfg(feature = "bft")]
    pub fn with_bft(mut self, enable: bool) -> Self {
        self.use_bft = enable;
        self
    }

    /// Set multi-region configuration
    pub fn with_multi_region(mut self, region_config: MultiRegionConfig) -> Self {
        self.region_config = Some(region_config);
        self
    }

    /// Check if multi-region is enabled
    pub fn is_multi_region_enabled(&self) -> bool {
        self.region_config.is_some()
    }

    /// Get region ID if configured
    pub fn region_id(&self) -> Option<&str> {
        self.region_config
            .as_ref()
            .map(|config| config.region_id.as_str())
    }

    /// Get availability zone ID if configured
    pub fn availability_zone_id(&self) -> Option<&str> {
        self.region_config
            .as_ref()
            .map(|config| config.availability_zone_id.as_str())
    }
}

/// Cluster node implementation
pub struct ClusterNode {
    config: NodeConfig,
    consensus: ConsensusManager,
    discovery: DiscoveryService,
    replication: ReplicationManager,
    query_executor: DistributedQueryExecutor,
    region_manager: Option<Arc<RegionManager>>,
    conflict_resolver: Arc<ConflictResolver>,
    #[allow(dead_code)]
    edge_manager: Option<Arc<EdgeComputingManager>>,
    local_vector_clock: Arc<RwLock<VectorClock>>,
    running: Arc<RwLock<bool>>,
    byzantine_mode: Arc<RwLock<bool>>,
    network_isolated: Arc<RwLock<bool>>,
}

impl ClusterNode {
    /// Create a new cluster node
    pub async fn new(config: NodeConfig) -> Result<Self> {
        // Validate configuration
        if config.data_dir.is_empty() {
            return Err(ClusterError::Config(
                "Data directory cannot be empty".to_string(),
            ));
        }

        // Create data directory if it doesn't exist
        tokio::fs::create_dir_all(&config.data_dir)
            .await
            .map_err(|e| ClusterError::Other(format!("Failed to create data directory: {e}")))?;

        // Initialize consensus manager
        let consensus = ConsensusManager::new(config.node_id, config.peers.clone());

        // Initialize discovery service
        let discovery_config = config.discovery.clone().unwrap_or_default();
        let discovery = DiscoveryService::new(config.node_id, config.address, discovery_config);

        // Initialize replication manager
        let replication_strategy = config.replication_strategy.clone().unwrap_or_default();
        let replication = ReplicationManager::new(replication_strategy, config.node_id);

        // Initialize distributed query executor
        let query_executor = DistributedQueryExecutor::new(config.node_id);

        // Initialize conflict resolver
        let default_resolution_strategy = if let Some(region_config) = &config.region_config {
            region_config.conflict_resolution_strategy.clone()
        } else {
            ResolutionStrategy::LastWriterWins
        };
        let conflict_resolver = Arc::new(ConflictResolver::new(default_resolution_strategy));

        // Initialize vector clock
        let mut vector_clock = VectorClock::new();
        vector_clock.increment(config.node_id);
        let local_vector_clock = Arc::new(RwLock::new(vector_clock));

        // Initialize region manager if multi-region is configured
        let region_manager = if let Some(region_config) = &config.region_config {
            let manager = Arc::new(RegionManager::new(
                region_config.region_id.clone(),
                region_config.availability_zone_id.clone(),
                region_config.consensus_strategy.clone(),
                region_config.replication_strategy.clone(),
            ));

            // Initialize with region topology
            manager
                .initialize(region_config.regions.clone())
                .await
                .map_err(|e| {
                    ClusterError::Other(format!("Failed to initialize region manager: {e}"))
                })?;

            // Register this node in the region manager
            manager
                .register_node(
                    config.node_id,
                    region_config.region_id.clone(),
                    region_config.availability_zone_id.clone(),
                    region_config.data_center.clone(),
                    region_config.rack.clone(),
                )
                .await
                .map_err(|e| {
                    ClusterError::Other(format!("Failed to register node in region manager: {e}"))
                })?;

            Some(manager)
        } else {
            None
        };

        // Initialize edge computing manager if configured
        let edge_manager = if let Some(region_config) = &config.region_config {
            if let Some(edge_config) = &region_config.edge_config {
                if edge_config.enabled {
                    let manager = Arc::new(EdgeComputingManager::new());

                    // Register this device with the edge manager
                    manager
                        .register_device(edge_config.device_profile.clone())
                        .await
                        .map_err(|e| {
                            ClusterError::Other(format!("Failed to register edge device: {e}"))
                        })?;

                    Some(manager)
                } else {
                    None
                }
            } else {
                None
            }
        } else {
            None
        };

        Ok(Self {
            config,
            consensus,
            discovery,
            replication,
            query_executor,
            region_manager,
            conflict_resolver,
            edge_manager,
            local_vector_clock,
            running: Arc::new(RwLock::new(false)),
            byzantine_mode: Arc::new(RwLock::new(false)),
            network_isolated: Arc::new(RwLock::new(false)),
        })
    }

    /// Start the cluster node
    pub async fn start(&mut self) -> Result<()> {
        {
            let mut running = self.running.write().await;
            if *running {
                return Ok(());
            }
            *running = true;
        }

        tracing::info!(
            "Starting cluster node {} at {} with {} peers",
            self.config.node_id,
            self.config.address,
            self.config.peers.len()
        );

        // Start discovery service
        self.discovery
            .start()
            .await
            .map_err(|e| ClusterError::Other(format!("Failed to start discovery service: {e}")))?;

        // Discover initial nodes
        let discovered_nodes = self
            .discovery
            .discover_nodes()
            .await
            .map_err(|e| ClusterError::Other(format!("Failed to discover nodes: {e}")))?;

        // Add discovered nodes to replication manager and query executor
        for node in discovered_nodes {
            if node.node_id != self.config.node_id {
                self.replication
                    .add_replica(node.node_id, node.address.to_string());
                self.query_executor.add_node(node.node_id).await;
            }
        }

        // Initialize consensus system
        self.consensus
            .init()
            .await
            .map_err(|e| ClusterError::Other(format!("Failed to initialize consensus: {e}")))?;

        tracing::info!("Cluster node {} started successfully", self.config.node_id);

        // Start background tasks
        self.start_background_tasks().await;

        Ok(())
    }

    /// Stop the cluster node
    pub async fn stop(&mut self) -> Result<()> {
        let mut running = self.running.write().await;
        if !*running {
            return Ok(());
        }

        tracing::info!("Stopping cluster node {}", self.config.node_id);

        // Stop discovery service
        self.discovery
            .stop()
            .await
            .map_err(|e| ClusterError::Other(format!("Failed to stop discovery service: {e}")))?;

        *running = false;

        tracing::info!("Cluster node {} stopped", self.config.node_id);

        Ok(())
    }

    /// Check if this node is the leader
    pub async fn is_leader(&self) -> bool {
        self.consensus.is_leader().await
    }

    /// Get current consensus term
    pub async fn current_term(&self) -> u64 {
        self.consensus.current_term().await
    }

    /// Insert a triple through distributed consensus
    pub async fn insert_triple(
        &self,
        subject: &str,
        predicate: &str,
        object: &str,
    ) -> Result<RdfResponse> {
        if !self.is_leader().await {
            return Err(ClusterError::NotLeader);
        }

        let response = self
            .consensus
            .insert_triple(
                subject.to_string(),
                predicate.to_string(),
                object.to_string(),
            )
            .await?;

        Ok(response)
    }

    /// Delete a triple through distributed consensus
    pub async fn delete_triple(
        &self,
        subject: &str,
        predicate: &str,
        object: &str,
    ) -> Result<RdfResponse> {
        if !self.is_leader().await {
            return Err(ClusterError::NotLeader);
        }

        let response = self
            .consensus
            .delete_triple(
                subject.to_string(),
                predicate.to_string(),
                object.to_string(),
            )
            .await?;

        Ok(response)
    }

    /// Clear all triples through distributed consensus
    pub async fn clear_store(&self) -> Result<RdfResponse> {
        if !self.is_leader().await {
            return Err(ClusterError::NotLeader);
        }

        let response = self.consensus.clear_store().await?;
        Ok(response)
    }

    /// Begin a distributed transaction
    pub async fn begin_transaction(&self) -> Result<String> {
        if !self.is_leader().await {
            return Err(ClusterError::NotLeader);
        }

        let tx_id = uuid::Uuid::new_v4().to_string();
        let _response = self.consensus.begin_transaction(tx_id.clone()).await?;

        Ok(tx_id)
    }

    /// Commit a distributed transaction
    pub async fn commit_transaction(&self, tx_id: &str) -> Result<RdfResponse> {
        if !self.is_leader().await {
            return Err(ClusterError::NotLeader);
        }

        let response = self.consensus.commit_transaction(tx_id.to_string()).await?;
        Ok(response)
    }

    /// Rollback a distributed transaction
    pub async fn rollback_transaction(&self, tx_id: &str) -> Result<RdfResponse> {
        if !self.is_leader().await {
            return Err(ClusterError::NotLeader);
        }

        let response = self
            .consensus
            .rollback_transaction(tx_id.to_string())
            .await?;
        Ok(response)
    }

    /// Query triples (can be done on any node)
    pub async fn query_triples(
        &self,
        subject: Option<&str>,
        predicate: Option<&str>,
        object: Option<&str>,
    ) -> Vec<(String, String, String)> {
        self.consensus.query(subject, predicate, object).await
    }

    /// Execute SPARQL query using distributed query processing
    pub async fn query_sparql(&self, sparql: &str) -> Result<Vec<String>> {
        let bindings = self
            .query_executor
            .execute_query(sparql)
            .await
            .map_err(|e| ClusterError::Other(format!("Query execution failed: {e}")))?;

        // Convert result bindings to string format
        let results = bindings
            .into_iter()
            .map(|binding| {
                let vars: Vec<String> = binding
                    .variables
                    .into_iter()
                    .map(|(var, val)| format!("{var}: {val}"))
                    .collect();
                vars.join(", ")
            })
            .collect();

        Ok(results)
    }

    /// Execute SPARQL query and return structured results
    pub async fn query_sparql_bindings(&self, sparql: &str) -> Result<Vec<ResultBinding>> {
        self.query_executor
            .execute_query(sparql)
            .await
            .map_err(|e| ClusterError::Other(format!("Query execution failed: {e}")))
    }

    /// Get query execution statistics
    pub async fn get_query_statistics(
        &self,
    ) -> Result<std::collections::HashMap<String, distributed_query::QueryStats>> {
        Ok(self.query_executor.get_statistics().await)
    }

    /// Clear query cache
    pub async fn clear_query_cache(&self) -> Result<()> {
        self.query_executor.clear_cache().await;
        Ok(())
    }

    /// Get the number of triples in the store
    pub async fn len(&self) -> usize {
        self.consensus.len().await
    }

    /// Check if the store is empty
    pub async fn is_empty(&self) -> bool {
        self.consensus.is_empty().await
    }

    /// Add a new node to the cluster
    pub async fn add_cluster_node(
        &mut self,
        node_id: OxirsNodeId,
        address: SocketAddr,
    ) -> Result<()> {
        if node_id == self.config.node_id {
            return Err(ClusterError::Config(
                "Cannot add self to cluster".to_string(),
            ));
        }

        // Add to configuration
        self.config.add_peer(node_id);

        // Add to discovery
        let node_info = NodeInfo::new(node_id, address);
        self.discovery.add_node(node_info);

        // Add to replication
        self.replication.add_replica(node_id, address.to_string());

        // Add to query executor
        self.query_executor.add_node(node_id).await;

        // Add to consensus (this would trigger Raft membership change)
        self.consensus.add_peer(node_id);

        tracing::info!("Added node {} at {} to cluster", node_id, address);

        Ok(())
    }

    /// Remove a node from the cluster
    pub async fn remove_cluster_node(&mut self, node_id: OxirsNodeId) -> Result<()> {
        if node_id == self.config.node_id {
            return Err(ClusterError::Config(
                "Cannot remove self from cluster".to_string(),
            ));
        }

        // Remove from configuration
        self.config.peers.retain(|&id| id != node_id);

        // Remove from discovery
        self.discovery.remove_node(node_id);

        // Remove from replication
        self.replication.remove_replica(node_id);

        // Remove from query executor
        self.query_executor.remove_node(node_id).await;

        // Remove from consensus (this would trigger Raft membership change)
        self.consensus.remove_peer(node_id);

        tracing::info!("Removed node {} from cluster", node_id);

        Ok(())
    }

    /// Get comprehensive cluster status
    pub async fn get_status(&self) -> ClusterStatus {
        let consensus_status = self.consensus.get_status().await;
        let discovery_stats = self.discovery.get_stats().clone();
        let replication_stats = self.replication.get_stats().clone();

        // Get region status if multi-region is enabled
        let region_status = if let Some(region_manager) = &self.region_manager {
            let region_id = region_manager.get_local_region().to_string();
            let availability_zone_id = region_manager.get_local_availability_zone().to_string();
            let regional_peers = region_manager.get_nodes_in_region(&region_id).await;
            let topology = region_manager.get_topology().await;
            let monitoring_active = region_manager.is_monitoring_active().await;

            Some(RegionStatus {
                region_id,
                availability_zone_id,
                regional_peer_count: regional_peers.len(),
                total_regions: topology.regions.len(),
                monitoring_active,
            })
        } else {
            None
        };

        ClusterStatus {
            node_id: self.config.node_id,
            address: self.config.address,
            is_leader: consensus_status.is_leader,
            current_term: consensus_status.current_term,
            peer_count: consensus_status.peer_count,
            triple_count: consensus_status.triple_count,
            discovery_stats,
            replication_stats,
            is_running: *self.running.read().await,
            region_status,
        }
    }

    /// Start background maintenance tasks
    async fn start_background_tasks(&mut self) {
        let running = Arc::clone(&self.running);

        // Discovery and health check task
        let discovery_config = self.config.discovery.clone().unwrap_or_default();
        let mut discovery_clone =
            DiscoveryService::new(self.config.node_id, self.config.address, discovery_config);

        tokio::spawn(async move {
            while *running.read().await {
                discovery_clone.run_periodic_tasks().await;
                tokio::time::sleep(std::time::Duration::from_secs(1)).await;
            }
        });

        // Replication maintenance task
        let mut replication_clone = ReplicationManager::with_raft_consensus(self.config.node_id);
        let running_clone = Arc::clone(&self.running);

        tokio::spawn(async move {
            if *running_clone.read().await {
                replication_clone.run_maintenance().await; // run_maintenance() is infinite loop
            }
        });
    }

    /// Add a new node to the cluster using consensus protocol
    pub async fn add_node_with_consensus(
        &mut self,
        node_id: OxirsNodeId,
        address: SocketAddr,
    ) -> Result<()> {
        self.consensus
            .add_node_with_consensus(node_id, address.to_string())
            .await
            .map_err(|e| {
                ClusterError::Other(format!("Failed to add node through consensus: {e}"))
            })?;

        // Update local configuration
        self.config.add_peer(node_id);

        // Add to discovery, replication, and query executor
        let node_info = NodeInfo::new(node_id, address);
        self.discovery.add_node(node_info);
        self.replication.add_replica(node_id, address.to_string());
        self.query_executor.add_node(node_id).await;

        Ok(())
    }

    /// Remove a node from the cluster using consensus protocol
    pub async fn remove_node_with_consensus(&mut self, node_id: OxirsNodeId) -> Result<()> {
        self.consensus
            .remove_node_with_consensus(node_id)
            .await
            .map_err(|e| {
                ClusterError::Other(format!("Failed to remove node through consensus: {e}"))
            })?;

        // Update local configuration
        self.config.peers.retain(|&id| id != node_id);

        // Remove from discovery, replication, and query executor
        self.discovery.remove_node(node_id);
        self.replication.remove_replica(node_id);
        self.query_executor.remove_node(node_id).await;

        Ok(())
    }

    /// Gracefully shutdown this node
    pub async fn graceful_shutdown(&mut self) -> Result<()> {
        tracing::info!(
            "Initiating graceful shutdown of cluster node {}",
            self.config.node_id
        );

        // Stop background tasks first
        {
            let mut running = self.running.write().await;
            *running = false;
        }

        // Gracefully shutdown consensus layer (includes leadership transfer if needed)
        self.consensus
            .graceful_shutdown()
            .await
            .map_err(|e| ClusterError::Other(format!("Failed to shutdown consensus: {e}")))?;

        // Stop discovery and replication services
        self.discovery
            .stop()
            .await
            .map_err(|e| ClusterError::Other(format!("Failed to stop discovery: {e}")))?;

        tracing::info!("Cluster node {} gracefully shutdown", self.config.node_id);
        Ok(())
    }

    /// Transfer leadership to another node
    pub async fn transfer_leadership(&mut self, target_node: OxirsNodeId) -> Result<()> {
        if !self.config.peers.contains(&target_node) {
            return Err(ClusterError::Config(format!(
                "Target node {target_node} not in cluster"
            )));
        }

        self.consensus
            .transfer_leadership(target_node)
            .await
            .map_err(|e| ClusterError::Other(format!("Failed to transfer leadership: {e}")))?;

        Ok(())
    }

    /// Force evict a non-responsive node
    pub async fn force_evict_node(&mut self, node_id: OxirsNodeId) -> Result<()> {
        self.consensus
            .force_evict_node(node_id)
            .await
            .map_err(|e| ClusterError::Other(format!("Failed to force evict node: {e}")))?;

        // Update local configuration
        self.config.peers.retain(|&id| id != node_id);
        self.discovery.remove_node(node_id);
        self.replication.remove_replica(node_id);
        self.query_executor.remove_node(node_id).await;

        Ok(())
    }

    /// Check health of all peer nodes
    pub async fn check_cluster_health(&self) -> Result<Vec<consensus::NodeHealthStatus>> {
        self.consensus
            .check_peer_health()
            .await
            .map_err(|e| ClusterError::Other(format!("Failed to check cluster health: {e}")))
    }

    /// Attempt recovery from partition or failure
    pub async fn attempt_recovery(&mut self) -> Result<()> {
        self.consensus
            .attempt_recovery()
            .await
            .map_err(|e| ClusterError::Other(format!("Failed to recover cluster: {e}")))?;

        tracing::info!(
            "Cluster recovery completed for node {}",
            self.config.node_id
        );
        Ok(())
    }

    /// Get the node ID
    pub fn id(&self) -> OxirsNodeId {
        self.config.node_id
    }

    /// Count triples in the store
    pub async fn count_triples(&self) -> Result<usize> {
        Ok(self.len().await)
    }

    /// Check if the node is active (running and not isolated)
    pub async fn is_active(&self) -> Result<bool> {
        Ok(*self.running.read().await && !*self.network_isolated.read().await)
    }

    /// Isolate the node from network (simulate network partition)
    pub async fn isolate_network(&self) -> Result<()> {
        let mut isolated = self.network_isolated.write().await;
        *isolated = true;
        tracing::info!("Node {} network isolated", self.config.node_id);
        Ok(())
    }

    /// Restore network connectivity
    pub async fn restore_network(&self) -> Result<()> {
        let mut isolated = self.network_isolated.write().await;
        *isolated = false;
        tracing::info!("Node {} network restored", self.config.node_id);
        Ok(())
    }

    /// Enable Byzantine behavior (for testing)
    pub async fn enable_byzantine_mode(&self) -> Result<()> {
        let mut byzantine = self.byzantine_mode.write().await;
        *byzantine = true;
        tracing::info!("Node {} Byzantine mode enabled", self.config.node_id);
        Ok(())
    }

    /// Check if node is in Byzantine mode
    pub async fn is_byzantine(&self) -> Result<bool> {
        Ok(*self.byzantine_mode.read().await)
    }

    /// Get multi-region manager (if configured)
    pub fn region_manager(&self) -> Option<&Arc<RegionManager>> {
        self.region_manager.as_ref()
    }

    /// Check if multi-region deployment is enabled
    pub fn is_multi_region_enabled(&self) -> bool {
        self.region_manager.is_some()
    }

    /// Get current node's region ID
    pub fn get_region_id(&self) -> Option<String> {
        self.region_manager
            .as_ref()
            .map(|rm| rm.get_local_region().to_string())
    }

    /// Get current node's availability zone ID
    pub fn get_availability_zone_id(&self) -> Option<String> {
        self.region_manager
            .as_ref()
            .map(|rm| rm.get_local_availability_zone().to_string())
    }

    /// Get nodes in the same region
    pub async fn get_regional_peers(&self) -> Result<Vec<OxirsNodeId>> {
        if let Some(region_manager) = &self.region_manager {
            let region_id = region_manager.get_local_region();
            Ok(region_manager.get_nodes_in_region(region_id).await)
        } else {
            Err(ClusterError::Config(
                "Multi-region not configured".to_string(),
            ))
        }
    }

    /// Get optimal leader candidates considering region affinity
    pub async fn get_regional_leader_candidates(&self) -> Result<Vec<OxirsNodeId>> {
        if let Some(region_manager) = &self.region_manager {
            let region_id = region_manager.get_local_region();
            Ok(region_manager.get_leader_candidates(region_id).await)
        } else {
            // Fall back to regular peer list
            Ok(self.config.peers.clone())
        }
    }

    /// Calculate cross-region replication targets
    pub async fn get_cross_region_replication_targets(&self) -> Result<Vec<String>> {
        if let Some(region_manager) = &self.region_manager {
            let region_id = region_manager.get_local_region();
            region_manager
                .calculate_replication_targets(region_id)
                .await
                .map_err(|e| {
                    ClusterError::Other(format!("Failed to calculate replication targets: {e}"))
                })
        } else {
            Ok(Vec::new())
        }
    }

    /// Monitor inter-region latencies and update metrics
    pub async fn monitor_region_latencies(&self) -> Result<()> {
        if let Some(region_manager) = &self.region_manager {
            region_manager.monitor_latencies().await.map_err(|e| {
                ClusterError::Other(format!("Failed to monitor region latencies: {e}"))
            })
        } else {
            Ok(())
        }
    }

    /// Get region health status
    pub async fn get_region_health(&self, region_id: &str) -> Result<region_manager::RegionHealth> {
        if let Some(region_manager) = &self.region_manager {
            region_manager
                .get_region_health(region_id)
                .await
                .map_err(|e| ClusterError::Other(format!("Failed to get region health: {e}")))
        } else {
            Err(ClusterError::Config(
                "Multi-region not configured".to_string(),
            ))
        }
    }

    /// Perform region failover operation
    pub async fn perform_region_failover(
        &self,
        failed_region: &str,
        target_region: &str,
    ) -> Result<()> {
        if let Some(region_manager) = &self.region_manager {
            region_manager
                .perform_region_failover(failed_region, target_region)
                .await
                .map_err(|e| ClusterError::Other(format!("Failed to perform region failover: {e}")))
        } else {
            Err(ClusterError::Config(
                "Multi-region not configured".to_string(),
            ))
        }
    }

    /// Get multi-region topology information
    pub async fn get_region_topology(&self) -> Result<region_manager::RegionTopology> {
        if let Some(region_manager) = &self.region_manager {
            Ok(region_manager.get_topology().await)
        } else {
            Err(ClusterError::Config(
                "Multi-region not configured".to_string(),
            ))
        }
    }

    /// Add a node to a specific region and availability zone
    pub async fn add_node_to_region(
        &self,
        node_id: OxirsNodeId,
        region_id: String,
        availability_zone_id: String,
        data_center: Option<String>,
        rack: Option<String>,
    ) -> Result<()> {
        if let Some(region_manager) = &self.region_manager {
            region_manager
                .register_node(node_id, region_id, availability_zone_id, data_center, rack)
                .await
                .map_err(|e| ClusterError::Other(format!("Failed to add node to region: {e}")))
        } else {
            Err(ClusterError::Config(
                "Multi-region not configured".to_string(),
            ))
        }
    }

    /// Get conflict resolver instance
    pub fn conflict_resolver(&self) -> &Arc<ConflictResolver> {
        &self.conflict_resolver
    }

    /// Get current vector clock value
    pub async fn get_vector_clock(&self) -> VectorClock {
        self.local_vector_clock.read().await.clone()
    }

    /// Update vector clock with received clock
    pub async fn update_vector_clock(&self, received_clock: &VectorClock) {
        let mut clock = self.local_vector_clock.write().await;
        clock.update(received_clock);
        clock.increment(self.config.node_id);
    }

    /// Create a timestamped operation with current vector clock
    pub async fn create_timestamped_operation(
        &self,
        operation: conflict_resolution::RdfOperation,
        priority: u32,
    ) -> TimestampedOperation {
        let mut clock = self.local_vector_clock.write().await;
        clock.increment(self.config.node_id);

        TimestampedOperation {
            operation_id: uuid::Uuid::new_v4().to_string(),
            origin_node: self.config.node_id,
            vector_clock: clock.clone(),
            physical_time: std::time::SystemTime::now(),
            operation,
            priority,
        }
    }

    /// Detect conflicts in a batch of operations
    pub async fn detect_operation_conflicts(
        &self,
        operations: &[TimestampedOperation],
    ) -> Result<Vec<conflict_resolution::ConflictType>> {
        self.conflict_resolver
            .detect_conflicts(operations)
            .await
            .map_err(|e| ClusterError::Other(format!("Failed to detect conflicts: {e}")))
    }

    /// Resolve conflicts using configured strategies
    pub async fn resolve_operation_conflicts(
        &self,
        conflicts: &[conflict_resolution::ConflictType],
    ) -> Result<Vec<conflict_resolution::ResolutionResult>> {
        self.conflict_resolver
            .resolve_conflicts(conflicts)
            .await
            .map_err(|e| ClusterError::Other(format!("Failed to resolve conflicts: {e}")))
    }

    /// Submit an operation for conflict-aware processing
    pub async fn submit_conflict_aware_operation(
        &self,
        operation: conflict_resolution::RdfOperation,
        priority: u32,
    ) -> Result<RdfResponse> {
        // Create timestamped operation
        let _timestamped_op = self
            .create_timestamped_operation(operation.clone(), priority)
            .await;

        // For now, submit to consensus without conflict detection
        // In a full implementation, this would be integrated with the consensus layer
        match operation {
            conflict_resolution::RdfOperation::Insert {
                subject,
                predicate,
                object,
                ..
            } => self.insert_triple(&subject, &predicate, &object).await,
            conflict_resolution::RdfOperation::Delete {
                subject,
                predicate,
                object,
                ..
            } => self.delete_triple(&subject, &predicate, &object).await,
            conflict_resolution::RdfOperation::Clear { .. } => self.clear_store().await,
            conflict_resolution::RdfOperation::Update {
                old_triple,
                new_triple,
                ..
            } => {
                // Implement as delete + insert
                let _delete_result = self
                    .delete_triple(&old_triple.0, &old_triple.1, &old_triple.2)
                    .await?;
                self.insert_triple(&new_triple.0, &new_triple.1, &new_triple.2)
                    .await
            }
            conflict_resolution::RdfOperation::Batch { operations: _ } => {
                // Process batch operations sequentially
                // Note: This is a simplified implementation that doesn't use recursion
                // In a full implementation, each operation would be processed individually
                // For now, just return success for batch operations
                Ok(RdfResponse::Success)
            }
        }
    }

    /// Get conflict resolution statistics
    pub async fn get_conflict_resolution_statistics(
        &self,
    ) -> conflict_resolution::ResolutionStatistics {
        self.conflict_resolver.get_statistics().await
    }
}

/// Comprehensive cluster status information
#[derive(Debug, Clone)]
pub struct ClusterStatus {
    /// Local node ID
    pub node_id: OxirsNodeId,
    /// Local node address
    pub address: SocketAddr,
    /// Whether this node is the current leader
    pub is_leader: bool,
    /// Current Raft term
    pub current_term: u64,
    /// Number of peer nodes
    pub peer_count: usize,
    /// Number of triples in the store
    pub triple_count: usize,
    /// Discovery service statistics
    pub discovery_stats: discovery::DiscoveryStats,
    /// Replication statistics
    pub replication_stats: ReplicationStats,
    /// Whether the node is currently running
    pub is_running: bool,
    /// Multi-region status (if enabled)
    pub region_status: Option<RegionStatus>,
}

/// Multi-region status information
#[derive(Debug, Clone)]
pub struct RegionStatus {
    /// Current region ID
    pub region_id: String,
    /// Current availability zone ID
    pub availability_zone_id: String,
    /// Number of nodes in the same region
    pub regional_peer_count: usize,
    /// Total number of regions in topology
    pub total_regions: usize,
    /// Whether multi-region monitoring is active
    pub monitoring_active: bool,
}

/// Distributed RDF store (simplified interface)
pub struct DistributedStore {
    node: ClusterNode,
}

impl DistributedStore {
    /// Create a new distributed store
    pub async fn new(config: NodeConfig) -> Result<Self> {
        let node = ClusterNode::new(config).await?;
        Ok(Self { node })
    }

    /// Start the distributed store
    pub async fn start(&mut self) -> Result<()> {
        self.node.start().await
    }

    /// Stop the distributed store
    pub async fn stop(&mut self) -> Result<()> {
        self.node.stop().await
    }

    /// Insert a triple (only on leader)
    pub async fn insert_triple(
        &mut self,
        subject: &str,
        predicate: &str,
        object: &str,
    ) -> Result<()> {
        let _response = self.node.insert_triple(subject, predicate, object).await?;
        Ok(())
    }

    /// Query triples using SPARQL
    pub async fn query_sparql(&self, sparql: &str) -> Result<Vec<String>> {
        self.node.query_sparql(sparql).await
    }

    /// Query triples by pattern
    pub async fn query_pattern(
        &self,
        subject: Option<&str>,
        predicate: Option<&str>,
        object: Option<&str>,
    ) -> Vec<(String, String, String)> {
        self.node.query_triples(subject, predicate, object).await
    }

    /// Get cluster status
    pub async fn get_status(&self) -> ClusterStatus {
        self.node.get_status().await
    }
}

/// Re-export commonly used types
pub use consensus::ConsensusError;
pub use discovery::DiscoveryError;
pub use replication::ReplicationError;

#[cfg(test)]
mod tests {
    use super::*;
    use std::net::{IpAddr, Ipv4Addr};

    #[tokio::test]
    async fn test_node_config_creation() {
        let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
        let config = NodeConfig::new(1, addr);

        assert_eq!(config.node_id, 1);
        assert_eq!(config.address, addr);
        assert_eq!(config.data_dir, "./data/node-1");
        assert!(config.peers.is_empty());
        assert!(config.discovery.is_some());
        assert!(config.replication_strategy.is_some());
        assert!(config.region_config.is_none());
    }

    #[tokio::test]
    async fn test_node_config_add_peer() {
        let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
        let mut config = NodeConfig::new(1, addr);

        config.add_peer(2);
        config.add_peer(3);
        config.add_peer(2); // Duplicate should be ignored

        assert_eq!(config.peers, vec![2, 3]);
    }

    #[tokio::test]
    async fn test_node_config_no_self_peer() {
        let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
        let mut config = NodeConfig::new(1, addr);

        config.add_peer(1); // Should not add self

        assert!(config.peers.is_empty());
    }

    #[tokio::test]
    async fn test_cluster_node_creation() {
        let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
        let config = NodeConfig::new(1, addr);

        let node = ClusterNode::new(config).await;
        assert!(node.is_ok());

        let node = node.unwrap();
        assert_eq!(node.config.node_id, 1);
        assert_eq!(node.config.address, addr);
    }

    #[tokio::test]
    async fn test_cluster_node_empty_data_dir_error() {
        let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
        let mut config = NodeConfig::new(1, addr);
        config.data_dir = String::new();

        let result = ClusterNode::new(config).await;
        assert!(result.is_err());
        if let Err(e) = result {
            assert!(e.to_string().contains("Data directory cannot be empty"));
        }
    }

    #[tokio::test]
    async fn test_distributed_store_creation() {
        let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
        let config = NodeConfig::new(1, addr);

        let store = DistributedStore::new(config).await;
        assert!(store.is_ok());
    }

    #[test]
    fn test_cluster_error_types() {
        let err = ClusterError::Config("test error".to_string());
        assert!(err.to_string().contains("Configuration error: test error"));

        let err = ClusterError::NotLeader;
        assert_eq!(err.to_string(), "Not the leader node");

        let err = ClusterError::Network("connection failed".to_string());
        assert!(err.to_string().contains("Network error: connection failed"));
    }
}