1use std::{
10 collections::{HashMap, VecDeque},
11 net::{IpAddr, SocketAddr},
12 sync::{Arc, Mutex, RwLock},
13 time::{Duration, Instant},
14};
15
16use tokio::time::timeout;
17
18use tracing::{debug, info, warn};
19
20use tokio::time::sleep;
21
22use crate::{
23 candidate_discovery::NetworkInterface,
24 connection::nat_traversal::{CandidateSource, CandidateState},
25 nat_traversal_api::{CandidateAddress, PeerId},
26};
27
28#[derive(Debug)]
30pub struct ParallelDiscoveryCoordinator {
31 active_discoveries: Arc<RwLock<HashMap<String, DiscoveryTask>>>,
33 config: ParallelDiscoveryConfig,
35 stats: Arc<Mutex<ParallelDiscoveryStats>>,
37 coordination_handle: Option<tokio::task::JoinHandle<()>>,
39}
40
41#[derive(Debug, Clone)]
43pub struct ParallelDiscoveryConfig {
44 pub max_concurrent_tasks: usize,
46 pub interface_timeout: Duration,
48 pub enable_prioritization: bool,
50 pub preferred_interface_types: Vec<InterfaceType>,
52 pub enable_adaptive_parallelism: bool,
54}
55
56#[derive(Debug, Clone, Copy, PartialEq, Eq)]
58pub enum InterfaceType {
59 Ethernet,
60 WiFi,
61 Cellular,
62 Loopback,
63 VPN,
64 Unknown,
65}
66
67#[derive(Debug)]
69struct DiscoveryTask {
70 interface_name: String,
71 interface_type: InterfaceType,
72 started_at: Instant,
73 status: TaskStatus,
74 discovered_candidates: Vec<CandidateAddress>,
75 priority: u32,
76}
77
78#[derive(Debug, Clone, Copy, PartialEq, Eq)]
80enum TaskStatus {
81 Pending,
82 Running,
83 Completed,
84 Failed,
85 Timeout,
86}
87
88#[derive(Debug, Default, Clone)]
90pub struct ParallelDiscoveryStats {
91 pub tasks_started: u64,
93 pub tasks_completed: u64,
95 pub tasks_failed: u64,
97 pub avg_discovery_time: Duration,
99 pub total_candidates: u64,
101 pub parallelism_efficiency: f64,
103}
104
105#[derive(Debug)]
107pub struct AdaptiveTimeoutManager {
108 network_conditions: Arc<RwLock<NetworkConditions>>,
110 timeout_configs: HashMap<OperationType, AdaptiveTimeoutConfig>,
112 stats: Arc<Mutex<AdaptiveTimeoutStats>>,
114 monitoring_handle: Option<tokio::task::JoinHandle<()>>,
116}
117
118#[derive(Debug, Clone)]
120pub struct NetworkConditions {
121 rtt_samples: VecDeque<Duration>,
123 packet_loss_rate: f64,
125 bandwidth_estimate: u64,
127 quality_score: f64,
129 congestion_level: f64,
131 last_measurement: Instant,
133}
134
135#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
137pub enum OperationType {
138 CandidateDiscovery,
139 PathValidation,
140 CoordinationRequest,
141 HolePunching,
142 ConnectionEstablishment,
143}
144
145#[derive(Debug, Clone)]
147struct AdaptiveTimeoutConfig {
148 base_timeout: Duration,
150 min_timeout: Duration,
152 max_timeout: Duration,
154 rtt_multiplier: f64,
156 quality_factor: f64,
158 congestion_factor: f64,
160}
161
162#[derive(Debug, Default, Clone)]
164pub struct AdaptiveTimeoutStats {
165 pub adjustments_made: u64,
167 pub avg_timeouts: HashMap<OperationType, Duration>,
169 pub timeout_effectiveness: f64,
171 pub condition_accuracy: f64,
173}
174
175#[derive(Debug)]
177pub struct BandwidthAwareValidator {
178 active_validations: Arc<RwLock<HashMap<SocketAddr, ValidationSession>>>,
180 bandwidth_monitor: Arc<Mutex<BandwidthMonitor>>,
182 config: BandwidthValidationConfig,
184 stats: Arc<Mutex<BandwidthValidationStats>>,
186}
187
188#[derive(Debug, Clone)]
190pub struct BandwidthValidationConfig {
191 pub max_concurrent_validations: usize,
193 pub bandwidth_threshold: u64,
195 pub enable_adaptive_validation: bool,
197 pub validation_packet_size: usize,
199 pub max_validation_rate: f64,
201}
202
203#[derive(Debug)]
205struct BandwidthMonitor {
206 bandwidth_samples: VecDeque<BandwidthSample>,
208 current_bandwidth: u64,
210 utilization: f64,
212 last_measurement: Instant,
214}
215
216#[derive(Debug, Clone)]
218struct BandwidthSample {
219 timestamp: Instant,
220 bytes_transferred: u64,
221 duration: Duration,
222 bandwidth: u64,
223}
224
225#[derive(Debug)]
227struct ValidationSession {
228 target_address: SocketAddr,
229 started_at: Instant,
230 packets_sent: u32,
231 packets_received: u32,
232 total_bytes: u64,
233 rtt_samples: Vec<Duration>,
234 bandwidth_usage: u64,
235 priority: ValidationPriority,
236}
237
238#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
240pub enum ValidationPriority {
241 Low,
242 Normal,
243 High,
244 Critical,
245}
246
247#[derive(Debug, Default, Clone)]
249pub struct BandwidthValidationStats {
250 pub validations_started: u64,
252 pub validations_completed: u64,
254 pub total_bandwidth_used: u64,
256 pub avg_validation_time: Duration,
258 pub bandwidth_efficiency: f64,
260}
261
262#[derive(Debug)]
264pub struct CongestionControlIntegrator {
265 active_migrations: Arc<RwLock<HashMap<PeerId, MigrationSession>>>,
267 congestion_state: Arc<Mutex<CongestionState>>,
269 config: CongestionIntegrationConfig,
271 stats: Arc<Mutex<CongestionIntegrationStats>>,
273}
274
275#[derive(Debug, Clone)]
277pub struct CongestionIntegrationConfig {
278 pub enable_congestion_awareness: bool,
280 pub congestion_threshold: f64,
282 pub max_migrations_per_second: f64,
284 pub enable_bandwidth_estimation: bool,
286 pub cwnd_scaling_factor: f64,
288}
289
290#[derive(Debug)]
292struct MigrationSession {
293 peer_id: PeerId,
294 old_path: SocketAddr,
295 new_path: SocketAddr,
296 started_at: Instant,
297 migration_state: MigrationState,
298 congestion_window: u32,
299 rtt_estimate: Duration,
300 bandwidth_estimate: u64,
301}
302
303#[derive(Debug, Clone, Copy, PartialEq, Eq)]
305pub enum MigrationState {
306 Initiated,
307 PathValidating,
308 CongestionProbing,
309 Migrating,
310 Completed,
311 Failed,
312}
313
314#[derive(Debug)]
316struct CongestionState {
317 congestion_window: u32,
319 ssthresh: u32,
321 rtt_measurements: VecDeque<Duration>,
323 congestion_events: VecDeque<CongestionEvent>,
325 congestion_level: f64,
327}
328
329#[derive(Debug, Clone)]
331struct CongestionEvent {
332 timestamp: Instant,
333 event_type: CongestionEventType,
334 severity: f64,
335}
336
337#[derive(Debug, Clone, Copy, PartialEq, Eq)]
339pub enum CongestionEventType {
340 PacketLoss,
341 Timeout,
342 ECNMark,
343 RTTIncrease,
344}
345
346#[derive(Debug, Default, Clone)]
348pub struct CongestionIntegrationStats {
349 pub migrations_attempted: u64,
351 pub migrations_successful: u64,
353 pub avg_migration_time: Duration,
355 pub congestion_avoided_migrations: u64,
357 pub bandwidth_utilization_efficiency: f64,
359}
360
361impl Default for ParallelDiscoveryConfig {
362 fn default() -> Self {
363 Self {
364 max_concurrent_tasks: 8,
365 interface_timeout: Duration::from_secs(5),
366 enable_prioritization: true,
367 preferred_interface_types: vec![
368 InterfaceType::Ethernet,
369 InterfaceType::WiFi,
370 InterfaceType::Cellular,
371 ],
372 enable_adaptive_parallelism: true,
373 }
374 }
375}
376
377impl Default for BandwidthValidationConfig {
378 fn default() -> Self {
379 Self {
380 max_concurrent_validations: 16,
381 bandwidth_threshold: 1_000_000, enable_adaptive_validation: true,
383 validation_packet_size: 64,
384 max_validation_rate: 100.0, }
386 }
387}
388
389impl Default for CongestionIntegrationConfig {
390 fn default() -> Self {
391 Self {
392 enable_congestion_awareness: true,
393 congestion_threshold: 0.7, max_migrations_per_second: 10.0,
395 enable_bandwidth_estimation: true,
396 cwnd_scaling_factor: 0.8,
397 }
398 }
399}
400
401impl ParallelDiscoveryCoordinator {
402 pub fn new(config: ParallelDiscoveryConfig) -> Self {
404 Self {
405 active_discoveries: Arc::new(RwLock::new(HashMap::new())),
406 config,
407 stats: Arc::new(Mutex::new(ParallelDiscoveryStats::default())),
408 coordination_handle: None,
409 }
410 }
411
412 pub async fn start_parallel_discovery(
414 &mut self,
415 interfaces: Vec<NetworkInterface>,
416 peer_id: PeerId,
417 ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
418 info!(
419 "Starting parallel discovery across {} interfaces for peer {:?}",
420 interfaces.len(),
421 peer_id
422 );
423
424 let prioritized_interfaces = if self.config.enable_prioritization {
426 self.prioritize_interfaces(interfaces)
427 } else {
428 interfaces
429 };
430
431 let max_tasks = if self.config.enable_adaptive_parallelism {
433 self.calculate_adaptive_parallelism().await
434 } else {
435 self.config.max_concurrent_tasks
436 };
437
438 let tasks_to_start = prioritized_interfaces
439 .into_iter()
440 .take(max_tasks)
441 .collect::<Vec<_>>();
442
443 for interface in tasks_to_start {
445 self.start_interface_discovery(interface, peer_id).await?;
446 }
447
448 self.start_coordination_task().await?;
450
451 Ok(())
452 }
453
454 fn prioritize_interfaces(
456 &self,
457 mut interfaces: Vec<NetworkInterface>,
458 ) -> Vec<NetworkInterface> {
459 interfaces.sort_by_key(|interface| {
460 let interface_type = self.classify_interface_type(&interface.name);
461 let type_priority = self
462 .config
463 .preferred_interface_types
464 .iter()
465 .position(|&t| t == interface_type)
466 .unwrap_or(999);
467
468 (type_priority, interface.addresses.len())
470 });
471
472 interfaces
473 }
474
475 fn classify_interface_type(&self, name: &str) -> InterfaceType {
477 let name_lower = name.to_lowercase();
478
479 if name_lower.contains("eth") || name_lower.contains("en") {
480 InterfaceType::Ethernet
481 } else if name_lower.contains("wlan")
482 || name_lower.contains("wifi")
483 || name_lower.contains("wl")
484 {
485 InterfaceType::WiFi
486 } else if name_lower.contains("cell")
487 || name_lower.contains("wwan")
488 || name_lower.contains("ppp")
489 {
490 InterfaceType::Cellular
491 } else if name_lower.contains("lo") || name_lower.contains("loopback") {
492 InterfaceType::Loopback
493 } else if name_lower.contains("vpn")
494 || name_lower.contains("tun")
495 || name_lower.contains("tap")
496 {
497 InterfaceType::VPN
498 } else {
499 InterfaceType::Unknown
500 }
501 }
502
503 async fn calculate_adaptive_parallelism(&self) -> usize {
505 let base_parallelism = self.config.max_concurrent_tasks;
513 let system_load_factor = 0.8; ((base_parallelism as f64) * system_load_factor) as usize
516 }
517
518 async fn start_interface_discovery(
520 &self,
521 interface: NetworkInterface,
522 _peer_id: PeerId,
523 ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
524 let interface_type = self.classify_interface_type(&interface.name);
525 let priority = self.calculate_interface_priority(interface_type);
526
527 let task = DiscoveryTask {
528 interface_name: interface.name.clone(),
529 interface_type,
530 started_at: Instant::now(),
531 status: TaskStatus::Pending,
532 discovered_candidates: Vec::new(),
533 priority,
534 };
535
536 {
538 let mut discoveries = self.active_discoveries.write().unwrap();
539 discoveries.insert(interface.name.clone(), task);
540 }
541
542 {
544 let mut stats = self.stats.lock().unwrap();
545 stats.tasks_started += 1;
546 }
547
548 self.perform_interface_discovery(interface).await?;
550
551 Ok(())
552 }
553
554 fn calculate_interface_priority(&self, interface_type: InterfaceType) -> u32 {
556 match interface_type {
557 InterfaceType::Ethernet => 100,
558 InterfaceType::WiFi => 80,
559 InterfaceType::Cellular => 60,
560 InterfaceType::VPN => 40,
561 InterfaceType::Loopback => 20,
562 InterfaceType::Unknown => 10,
563 }
564 }
565
566 async fn perform_interface_discovery(
568 &self,
569 interface: NetworkInterface,
570 ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
571 let interface_name = interface.name.clone();
572
573 {
575 let mut discoveries = self.active_discoveries.write().unwrap();
576 if let Some(task) = discoveries.get_mut(&interface_name) {
577 task.status = TaskStatus::Running;
578 }
579 }
580
581 let discovery_result = timeout(
583 self.config.interface_timeout,
584 self.discover_candidates_for_interface(interface),
585 )
586 .await;
587
588 match discovery_result {
589 Ok(Ok(candidates)) => {
590 {
592 let mut discoveries = self.active_discoveries.write().unwrap();
593 if let Some(task) = discoveries.get_mut(&interface_name) {
594 task.status = TaskStatus::Completed;
595 task.discovered_candidates = candidates;
596 }
597 }
598
599 {
601 let mut stats = self.stats.lock().unwrap();
602 stats.tasks_completed += 1;
603 }
604
605 debug!("Interface discovery completed for {}", interface_name);
606 }
607 Ok(Err(_)) => {
608 {
610 let mut discoveries = self.active_discoveries.write().unwrap();
611 if let Some(task) = discoveries.get_mut(&interface_name) {
612 task.status = TaskStatus::Failed;
613 }
614 }
615
616 {
618 let mut stats = self.stats.lock().unwrap();
619 stats.tasks_failed += 1;
620 }
621
622 warn!("Interface discovery failed for {}", interface_name);
623 }
624 Err(_) => {
625 {
627 let mut discoveries = self.active_discoveries.write().unwrap();
628 if let Some(task) = discoveries.get_mut(&interface_name) {
629 task.status = TaskStatus::Timeout;
630 }
631 }
632
633 {
635 let mut stats = self.stats.lock().unwrap();
636 stats.tasks_failed += 1;
637 }
638
639 warn!("Interface discovery timeout for {}", interface_name);
640 }
641 }
642
643 Ok(())
644 }
645
646 async fn discover_candidates_for_interface(
648 &self,
649 interface: NetworkInterface,
650 ) -> Result<Vec<CandidateAddress>, Box<dyn std::error::Error + Send + Sync>> {
651 let mut candidates = Vec::new();
652
653 for address in &interface.addresses {
654 if self.is_valid_candidate_address(&address) {
656 let candidate = CandidateAddress {
657 address: *address,
658 priority: self.calculate_candidate_priority(&address, &interface),
659 source: CandidateSource::Local,
660 state: CandidateState::New,
661 };
662
663 candidates.push(candidate);
664 }
665 }
666
667 sleep(Duration::from_millis(100)).await;
669
670 Ok(candidates)
671 }
672
673 fn is_valid_candidate_address(&self, address: &SocketAddr) -> bool {
675 match address.ip() {
676 IpAddr::V4(ipv4) => {
677 !ipv4.is_loopback() && !ipv4.is_link_local() && !ipv4.is_broadcast()
678 }
679 IpAddr::V6(ipv6) => !ipv6.is_loopback() && !ipv6.is_unspecified(),
680 }
681 }
682
683 fn calculate_candidate_priority(
685 &self,
686 address: &SocketAddr,
687 interface: &NetworkInterface,
688 ) -> u32 {
689 let mut priority = 1000u32;
690
691 if address.is_ipv4() {
693 priority += 100;
694 }
695
696 if !self.is_private_address(address) {
698 priority += 200;
699 }
700
701 let interface_type = self.classify_interface_type(&interface.name);
703 priority += self.calculate_interface_priority(interface_type);
704
705 priority
706 }
707
708 fn is_private_address(&self, address: &SocketAddr) -> bool {
710 match address.ip() {
711 IpAddr::V4(ipv4) => ipv4.is_private(),
712 IpAddr::V6(ipv6) => {
713 let segments = ipv6.segments();
715 (segments[0] & 0xfe00) == 0xfc00
716 }
717 }
718 }
719
720 async fn start_coordination_task(
722 &mut self,
723 ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
724 let discoveries = Arc::clone(&self.active_discoveries);
725 let stats = Arc::clone(&self.stats);
726 let config = self.config.clone();
727
728 let coordination_handle = tokio::spawn(async move {
729 let mut interval = tokio::time::interval(Duration::from_millis(500));
730
731 loop {
732 interval.tick().await;
733 Self::coordinate_discoveries(&discoveries, &stats, &config).await;
734
735 let all_complete = {
737 let discoveries_read = discoveries.read().unwrap();
738 discoveries_read.values().all(|task| {
739 matches!(
740 task.status,
741 TaskStatus::Completed | TaskStatus::Failed | TaskStatus::Timeout
742 )
743 })
744 };
745
746 if all_complete {
747 break;
748 }
749 }
750 });
751
752 self.coordination_handle = Some(coordination_handle);
753 Ok(())
754 }
755
756 async fn coordinate_discoveries(
758 discoveries: &Arc<RwLock<HashMap<String, DiscoveryTask>>>,
759 stats: &Arc<Mutex<ParallelDiscoveryStats>>,
760 _config: &ParallelDiscoveryConfig,
761 ) {
762 let mut total_candidates = 0u64;
763 let mut completed_tasks = 0u64;
764 let mut total_discovery_time = Duration::ZERO;
765
766 {
767 let discoveries_read = discoveries.read().unwrap();
768 for task in discoveries_read.values() {
769 if task.status == TaskStatus::Completed {
770 total_candidates += task.discovered_candidates.len() as u64;
771 completed_tasks += 1;
772 total_discovery_time += task.started_at.elapsed();
773 }
774 }
775 }
776
777 {
779 let mut stats_guard = stats.lock().unwrap();
780 stats_guard.total_candidates = total_candidates;
781 stats_guard.tasks_completed = completed_tasks;
782
783 if completed_tasks > 0 {
784 stats_guard.avg_discovery_time = total_discovery_time / completed_tasks as u32;
785 stats_guard.parallelism_efficiency =
786 completed_tasks as f64 / stats_guard.tasks_started as f64;
787 }
788 }
789 }
790
791 pub async fn get_all_candidates(&self) -> Vec<CandidateAddress> {
793 let mut all_candidates = Vec::new();
794
795 let discoveries = self.active_discoveries.read().unwrap();
796 for task in discoveries.values() {
797 if task.status == TaskStatus::Completed {
798 all_candidates.extend(task.discovered_candidates.clone());
799 }
800 }
801
802 all_candidates.sort_by(|a, b| b.priority.cmp(&a.priority));
804
805 all_candidates
806 }
807
808 pub async fn get_stats(&self) -> ParallelDiscoveryStats {
810 self.stats.lock().unwrap().clone()
811 }
812
813 pub async fn shutdown(&mut self) {
815 if let Some(handle) = self.coordination_handle.take() {
816 handle.abort();
817 }
818
819 {
821 let mut discoveries = self.active_discoveries.write().unwrap();
822 discoveries.clear();
823 }
824
825 info!("Parallel discovery coordinator shutdown complete");
826 }
827}
828
829impl AdaptiveTimeoutManager {
830 pub fn new() -> Self {
832 let mut timeout_configs = HashMap::new();
833
834 timeout_configs.insert(
836 OperationType::CandidateDiscovery,
837 AdaptiveTimeoutConfig {
838 base_timeout: Duration::from_secs(5),
839 min_timeout: Duration::from_millis(500),
840 max_timeout: Duration::from_secs(30),
841 rtt_multiplier: 4.0,
842 quality_factor: 0.5,
843 congestion_factor: 0.3,
844 },
845 );
846
847 timeout_configs.insert(
848 OperationType::PathValidation,
849 AdaptiveTimeoutConfig {
850 base_timeout: Duration::from_secs(3),
851 min_timeout: Duration::from_millis(200),
852 max_timeout: Duration::from_secs(15),
853 rtt_multiplier: 3.0,
854 quality_factor: 0.4,
855 congestion_factor: 0.4,
856 },
857 );
858
859 timeout_configs.insert(
860 OperationType::CoordinationRequest,
861 AdaptiveTimeoutConfig {
862 base_timeout: Duration::from_secs(10),
863 min_timeout: Duration::from_secs(1),
864 max_timeout: Duration::from_secs(60),
865 rtt_multiplier: 5.0,
866 quality_factor: 0.6,
867 congestion_factor: 0.2,
868 },
869 );
870
871 timeout_configs.insert(
872 OperationType::HolePunching,
873 AdaptiveTimeoutConfig {
874 base_timeout: Duration::from_secs(2),
875 min_timeout: Duration::from_millis(100),
876 max_timeout: Duration::from_secs(10),
877 rtt_multiplier: 2.0,
878 quality_factor: 0.3,
879 congestion_factor: 0.5,
880 },
881 );
882
883 timeout_configs.insert(
884 OperationType::ConnectionEstablishment,
885 AdaptiveTimeoutConfig {
886 base_timeout: Duration::from_secs(15),
887 min_timeout: Duration::from_secs(2),
888 max_timeout: Duration::from_secs(120),
889 rtt_multiplier: 6.0,
890 quality_factor: 0.7,
891 congestion_factor: 0.1,
892 },
893 );
894
895 Self {
896 network_conditions: Arc::new(RwLock::new(NetworkConditions {
897 rtt_samples: VecDeque::new(),
898 packet_loss_rate: 0.0,
899 bandwidth_estimate: 1_000_000, quality_score: 0.8, congestion_level: 0.2, last_measurement: Instant::now(),
903 })),
904 timeout_configs,
905 stats: Arc::new(Mutex::new(AdaptiveTimeoutStats::default())),
906 monitoring_handle: None,
907 }
908 }
909
910 pub async fn start(&mut self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
912 let network_conditions = Arc::clone(&self.network_conditions);
913 let stats = Arc::clone(&self.stats);
914
915 let monitoring_handle = tokio::spawn(async move {
916 let mut interval = tokio::time::interval(Duration::from_secs(1));
917
918 loop {
919 interval.tick().await;
920 Self::update_network_conditions(&network_conditions, &stats).await;
921 }
922 });
923
924 self.monitoring_handle = Some(monitoring_handle);
925 info!("Adaptive timeout manager started");
926 Ok(())
927 }
928
929 pub async fn calculate_timeout(&self, operation: OperationType) -> Duration {
931 let config = self
932 .timeout_configs
933 .get(&operation)
934 .cloned()
935 .unwrap_or_else(|| AdaptiveTimeoutConfig {
936 base_timeout: Duration::from_secs(5),
937 min_timeout: Duration::from_millis(500),
938 max_timeout: Duration::from_secs(30),
939 rtt_multiplier: 4.0,
940 quality_factor: 0.5,
941 congestion_factor: 0.3,
942 });
943
944 let conditions = self.network_conditions.read().unwrap();
945
946 let rtt_based_timeout =
948 if let Some(avg_rtt) = self.calculate_average_rtt(&conditions.rtt_samples) {
949 Duration::from_millis((avg_rtt.as_millis() as f64 * config.rtt_multiplier) as u64)
950 } else {
951 config.base_timeout
952 };
953
954 let quality_adjustment = 1.0 + (1.0 - conditions.quality_score) * config.quality_factor;
956
957 let congestion_adjustment = 1.0 + conditions.congestion_level * config.congestion_factor;
959
960 let adjusted_timeout = Duration::from_millis(
962 (rtt_based_timeout.as_millis() as f64 * quality_adjustment * congestion_adjustment)
963 as u64,
964 );
965
966 let final_timeout = adjusted_timeout
968 .max(config.min_timeout)
969 .min(config.max_timeout);
970
971 {
973 let mut stats = self.stats.lock().unwrap();
974 stats.adjustments_made += 1;
975 stats.avg_timeouts.insert(operation, final_timeout);
976 }
977
978 debug!(
979 "Calculated adaptive timeout for {:?}: {:?} (quality: {:.2}, congestion: {:.2})",
980 operation, final_timeout, conditions.quality_score, conditions.congestion_level
981 );
982
983 final_timeout
984 }
985
986 pub async fn record_measurement(
988 &self,
989 rtt: Duration,
990 packet_loss: bool,
991 bandwidth: Option<u64>,
992 ) {
993 let mut conditions = self.network_conditions.write().unwrap();
994
995 conditions.rtt_samples.push_back(rtt);
997 if conditions.rtt_samples.len() > 50 {
998 conditions.rtt_samples.pop_front();
999 }
1000
1001 let loss_sample = if packet_loss { 1.0 } else { 0.0 };
1003 conditions.packet_loss_rate = conditions.packet_loss_rate * 0.9 + loss_sample * 0.1;
1004
1005 if let Some(bw) = bandwidth {
1007 conditions.bandwidth_estimate =
1008 (conditions.bandwidth_estimate as f64 * 0.8 + bw as f64 * 0.2) as u64;
1009 }
1010
1011 let rtt_quality = 1.0 - (rtt.as_millis() as f64 / 1000.0).min(1.0);
1013 let loss_quality = 1.0 - conditions.packet_loss_rate;
1014 conditions.quality_score = (rtt_quality + loss_quality) / 2.0;
1015
1016 let rtt_variance = self.calculate_rtt_variance(&conditions.rtt_samples);
1018 conditions.congestion_level = (conditions.packet_loss_rate + rtt_variance).min(1.0);
1019
1020 conditions.last_measurement = Instant::now();
1021 }
1022
1023 fn calculate_average_rtt(&self, samples: &VecDeque<Duration>) -> Option<Duration> {
1025 if samples.is_empty() {
1026 return None;
1027 }
1028
1029 let total_ms: u64 = samples.iter().map(|d| d.as_millis() as u64).sum();
1030 Some(Duration::from_millis(total_ms / samples.len() as u64))
1031 }
1032
1033 fn calculate_rtt_variance(&self, samples: &VecDeque<Duration>) -> f64 {
1035 if samples.len() < 2 {
1036 return 0.0;
1037 }
1038
1039 let avg = self.calculate_average_rtt(samples).unwrap().as_millis() as f64;
1040 let variance: f64 = samples
1041 .iter()
1042 .map(|d| {
1043 let diff = d.as_millis() as f64 - avg;
1044 diff * diff
1045 })
1046 .sum::<f64>()
1047 / samples.len() as f64;
1048
1049 (variance.sqrt() / avg).min(1.0)
1050 }
1051
1052 async fn update_network_conditions(
1054 network_conditions: &Arc<RwLock<NetworkConditions>>,
1055 _stats: &Arc<Mutex<AdaptiveTimeoutStats>>,
1056 ) {
1057 let mut conditions = network_conditions.write().unwrap();
1065
1066 while conditions.rtt_samples.len() > 100 {
1068 conditions.rtt_samples.pop_front();
1069 }
1070
1071 conditions.packet_loss_rate *= 0.99;
1073
1074 if conditions.last_measurement.elapsed() > Duration::from_secs(10) {
1076 conditions.quality_score *= 0.95;
1078 }
1079 }
1080
1081 pub async fn get_network_conditions(&self) -> NetworkConditions {
1083 self.network_conditions.read().unwrap().clone()
1084 }
1085
1086 pub async fn get_stats(&self) -> AdaptiveTimeoutStats {
1088 self.stats.lock().unwrap().clone()
1089 }
1090
1091 pub async fn shutdown(&mut self) {
1093 if let Some(handle) = self.monitoring_handle.take() {
1094 handle.abort();
1095 }
1096
1097 info!("Adaptive timeout manager shutdown complete");
1098 }
1099}
1100
1101impl BandwidthAwareValidator {
1102 pub fn new(config: BandwidthValidationConfig) -> Self {
1104 Self {
1105 active_validations: Arc::new(RwLock::new(HashMap::new())),
1106 bandwidth_monitor: Arc::new(Mutex::new(BandwidthMonitor {
1107 bandwidth_samples: VecDeque::new(),
1108 current_bandwidth: 1_000_000, utilization: 0.0,
1110 last_measurement: Instant::now(),
1111 })),
1112 config,
1113 stats: Arc::new(Mutex::new(BandwidthValidationStats::default())),
1114 }
1115 }
1116
1117 pub async fn start_validation(
1119 &self,
1120 target_address: SocketAddr,
1121 priority: ValidationPriority,
1122 ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
1123 if !self.can_start_validation().await {
1125 return Err("Bandwidth limit reached, cannot start validation".into());
1126 }
1127
1128 let session = ValidationSession {
1129 target_address,
1130 started_at: Instant::now(),
1131 packets_sent: 0,
1132 packets_received: 0,
1133 total_bytes: 0,
1134 rtt_samples: Vec::new(),
1135 bandwidth_usage: 0,
1136 priority,
1137 };
1138
1139 {
1141 let mut validations = self.active_validations.write().unwrap();
1142 validations.insert(target_address, session);
1143 }
1144
1145 {
1147 let mut stats = self.stats.lock().unwrap();
1148 stats.validations_started += 1;
1149 }
1150
1151 debug!("Started bandwidth-aware validation for {}", target_address);
1152 Ok(())
1153 }
1154
1155 async fn can_start_validation(&self) -> bool {
1157 let validations = self.active_validations.read().unwrap();
1158 let bandwidth_monitor = self.bandwidth_monitor.lock().unwrap();
1159
1160 if validations.len() >= self.config.max_concurrent_validations {
1162 return false;
1163 }
1164
1165 if self.config.enable_adaptive_validation {
1167 let current_usage: u64 = validations
1168 .values()
1169 .map(|session| session.bandwidth_usage)
1170 .sum();
1171
1172 let available_bandwidth = bandwidth_monitor.current_bandwidth;
1173 let utilization = current_usage as f64 / available_bandwidth as f64;
1174
1175 if utilization > 0.8 {
1176 return false;
1178 }
1179 }
1180
1181 true
1182 }
1183
1184 pub async fn record_packet_sent(
1186 &self,
1187 target_address: SocketAddr,
1188 packet_size: usize,
1189 ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
1190 let mut validations = self.active_validations.write().unwrap();
1191
1192 if let Some(session) = validations.get_mut(&target_address) {
1193 session.packets_sent += 1;
1194 session.total_bytes += packet_size as u64;
1195 session.bandwidth_usage += packet_size as u64;
1196 }
1197
1198 self.update_bandwidth_usage(packet_size as u64).await;
1200
1201 Ok(())
1202 }
1203
1204 pub async fn record_packet_received(
1206 &self,
1207 target_address: SocketAddr,
1208 rtt: Duration,
1209 ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
1210 let mut validations = self.active_validations.write().unwrap();
1211
1212 if let Some(session) = validations.get_mut(&target_address) {
1213 session.packets_received += 1;
1214 session.rtt_samples.push(rtt);
1215 }
1216
1217 Ok(())
1218 }
1219
1220 async fn update_bandwidth_usage(&self, bytes_used: u64) {
1222 let mut monitor = self.bandwidth_monitor.lock().unwrap();
1223
1224 let now = Instant::now();
1225 let sample = BandwidthSample {
1226 timestamp: now,
1227 bytes_transferred: bytes_used,
1228 duration: now.duration_since(monitor.last_measurement),
1229 bandwidth: if monitor.last_measurement.elapsed().as_secs() > 0 {
1230 bytes_used / monitor.last_measurement.elapsed().as_secs()
1231 } else {
1232 0
1233 },
1234 };
1235
1236 monitor.bandwidth_samples.push_back(sample);
1237 if monitor.bandwidth_samples.len() > 100 {
1238 monitor.bandwidth_samples.pop_front();
1239 }
1240
1241 if !monitor.bandwidth_samples.is_empty() {
1243 let total_bytes: u64 = monitor
1244 .bandwidth_samples
1245 .iter()
1246 .map(|s| s.bytes_transferred)
1247 .sum();
1248 let total_time: Duration = monitor.bandwidth_samples.iter().map(|s| s.duration).sum();
1249
1250 if total_time.as_secs() > 0 {
1251 monitor.current_bandwidth = total_bytes / total_time.as_secs();
1252 }
1253 }
1254
1255 monitor.last_measurement = now;
1256 }
1257
1258 pub async fn complete_validation(
1260 &self,
1261 target_address: SocketAddr,
1262 success: bool,
1263 ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
1264 let session = {
1265 let mut validations = self.active_validations.write().unwrap();
1266 validations.remove(&target_address)
1267 };
1268
1269 if let Some(session) = session {
1270 let duration = session.started_at.elapsed();
1271
1272 {
1274 let mut stats = self.stats.lock().unwrap();
1275 if success {
1276 stats.validations_completed += 1;
1277 }
1278 stats.total_bandwidth_used += session.bandwidth_usage;
1279 stats.avg_validation_time = if stats.validations_completed > 0 {
1280 Duration::from_millis(
1281 (stats.avg_validation_time.as_millis() as u64
1282 * (stats.validations_completed - 1)
1283 + duration.as_millis() as u64)
1284 / stats.validations_completed,
1285 )
1286 } else {
1287 duration
1288 };
1289
1290 if stats.total_bandwidth_used > 0 {
1291 stats.bandwidth_efficiency = stats.validations_completed as f64
1292 / stats.total_bandwidth_used as f64
1293 * 1000.0; }
1295 }
1296
1297 debug!(
1298 "Completed validation for {} in {:?} (success: {})",
1299 target_address, duration, success
1300 );
1301 }
1302
1303 Ok(())
1304 }
1305
1306 pub async fn get_stats(&self) -> BandwidthValidationStats {
1308 self.stats.lock().unwrap().clone()
1309 }
1310}
1311
1312impl CongestionControlIntegrator {
1313 pub fn new(config: CongestionIntegrationConfig) -> Self {
1315 Self {
1316 active_migrations: Arc::new(RwLock::new(HashMap::new())),
1317 congestion_state: Arc::new(Mutex::new(CongestionState {
1318 congestion_window: 10, ssthresh: 65535,
1320 rtt_measurements: VecDeque::new(),
1321 congestion_events: VecDeque::new(),
1322 congestion_level: 0.0,
1323 })),
1324 config,
1325 stats: Arc::new(Mutex::new(CongestionIntegrationStats::default())),
1326 }
1327 }
1328
1329 pub async fn start_migration(
1331 &self,
1332 peer_id: PeerId,
1333 old_path: SocketAddr,
1334 new_path: SocketAddr,
1335 ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
1336 if self.config.enable_congestion_awareness {
1338 let congestion_state = self.congestion_state.lock().unwrap();
1339 if congestion_state.congestion_level > self.config.congestion_threshold {
1340 return Err("Migration delayed due to high congestion".into());
1341 }
1342 }
1343
1344 let session = MigrationSession {
1345 peer_id,
1346 old_path,
1347 new_path,
1348 started_at: Instant::now(),
1349 migration_state: MigrationState::Initiated,
1350 congestion_window: {
1351 let state = self.congestion_state.lock().unwrap();
1352 (state.congestion_window as f64 * self.config.cwnd_scaling_factor) as u32
1353 },
1354 rtt_estimate: Duration::from_millis(100), bandwidth_estimate: 1_000_000, };
1357
1358 {
1360 let mut migrations = self.active_migrations.write().unwrap();
1361 migrations.insert(peer_id, session);
1362 }
1363
1364 {
1366 let mut stats = self.stats.lock().unwrap();
1367 stats.migrations_attempted += 1;
1368 }
1369
1370 info!(
1371 "Started congestion-aware migration for peer {:?}: {} -> {}",
1372 peer_id, old_path, new_path
1373 );
1374 Ok(())
1375 }
1376
1377 pub async fn update_migration_state(
1379 &self,
1380 peer_id: PeerId,
1381 new_state: MigrationState,
1382 rtt: Option<Duration>,
1383 bandwidth: Option<u64>,
1384 ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
1385 let mut migrations = self.active_migrations.write().unwrap();
1386
1387 if let Some(session) = migrations.get_mut(&peer_id) {
1388 session.migration_state = new_state;
1389
1390 if let Some(rtt) = rtt {
1391 session.rtt_estimate = rtt;
1392
1393 let mut congestion_state = self.congestion_state.lock().unwrap();
1395 congestion_state.rtt_measurements.push_back(rtt);
1396 if congestion_state.rtt_measurements.len() > 50 {
1397 congestion_state.rtt_measurements.pop_front();
1398 }
1399 }
1400
1401 if let Some(bw) = bandwidth {
1402 session.bandwidth_estimate = bw;
1403 }
1404
1405 if matches!(new_state, MigrationState::Completed) {
1407 let duration = session.started_at.elapsed();
1408
1409 let mut stats = self.stats.lock().unwrap();
1411 stats.migrations_successful += 1;
1412 stats.avg_migration_time = if stats.migrations_successful > 0 {
1413 Duration::from_millis(
1414 (stats.avg_migration_time.as_millis() as u64
1415 * (stats.migrations_successful - 1)
1416 + duration.as_millis() as u64)
1417 / stats.migrations_successful,
1418 )
1419 } else {
1420 duration
1421 };
1422
1423 debug!(
1424 "Migration completed for peer {:?} in {:?}",
1425 peer_id, duration
1426 );
1427 }
1428 }
1429
1430 Ok(())
1431 }
1432
1433 pub async fn record_congestion_event(&self, event_type: CongestionEventType, severity: f64) {
1435 let event = CongestionEvent {
1436 timestamp: Instant::now(),
1437 event_type,
1438 severity,
1439 };
1440
1441 let mut congestion_state = self.congestion_state.lock().unwrap();
1442 congestion_state.congestion_events.push_back(event);
1443
1444 if congestion_state.congestion_events.len() > 100 {
1446 congestion_state.congestion_events.pop_front();
1447 }
1448
1449 let recent_events: Vec<_> = congestion_state
1451 .congestion_events
1452 .iter()
1453 .filter(|e| e.timestamp.elapsed() < Duration::from_secs(10))
1454 .collect();
1455
1456 if !recent_events.is_empty() {
1457 let avg_severity: f64 =
1458 recent_events.iter().map(|e| e.severity).sum::<f64>() / recent_events.len() as f64;
1459
1460 congestion_state.congestion_level = avg_severity;
1461 }
1462
1463 match event_type {
1465 CongestionEventType::PacketLoss | CongestionEventType::Timeout => {
1466 congestion_state.ssthresh = congestion_state.congestion_window / 2;
1467 congestion_state.congestion_window = congestion_state.ssthresh;
1468 }
1469 CongestionEventType::ECNMark => {
1470 congestion_state.congestion_window =
1471 (congestion_state.congestion_window as f64 * 0.8) as u32;
1472 }
1473 CongestionEventType::RTTIncrease => {
1474 congestion_state.congestion_window =
1476 (congestion_state.congestion_window as f64 * 0.95) as u32;
1477 }
1478 }
1479
1480 debug!(
1481 "Recorded congestion event: {:?} (severity: {:.2}, new cwnd: {})",
1482 event_type, severity, congestion_state.congestion_window
1483 );
1484 }
1485
1486 pub async fn get_stats(&self) -> CongestionIntegrationStats {
1488 self.stats.lock().unwrap().clone()
1489 }
1490}
1491
1492#[derive(Debug)]
1494pub struct NetworkEfficiencyManager {
1495 parallel_discovery: ParallelDiscoveryCoordinator,
1496 adaptive_timeout: AdaptiveTimeoutManager,
1497 bandwidth_validator: BandwidthAwareValidator,
1498 congestion_integrator: CongestionControlIntegrator,
1499 is_running: bool,
1500}
1501
1502impl NetworkEfficiencyManager {
1503 pub fn new() -> Self {
1505 Self {
1506 parallel_discovery: ParallelDiscoveryCoordinator::new(
1507 ParallelDiscoveryConfig::default(),
1508 ),
1509 adaptive_timeout: AdaptiveTimeoutManager::new(),
1510 bandwidth_validator: BandwidthAwareValidator::new(BandwidthValidationConfig::default()),
1511 congestion_integrator: CongestionControlIntegrator::new(
1512 CongestionIntegrationConfig::default(),
1513 ),
1514 is_running: false,
1515 }
1516 }
1517
1518 pub fn with_configs(
1520 discovery_config: ParallelDiscoveryConfig,
1521 validation_config: BandwidthValidationConfig,
1522 congestion_config: CongestionIntegrationConfig,
1523 ) -> Self {
1524 Self {
1525 parallel_discovery: ParallelDiscoveryCoordinator::new(discovery_config),
1526 adaptive_timeout: AdaptiveTimeoutManager::new(),
1527 bandwidth_validator: BandwidthAwareValidator::new(validation_config),
1528 congestion_integrator: CongestionControlIntegrator::new(congestion_config),
1529 is_running: false,
1530 }
1531 }
1532
1533 pub async fn start(&mut self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
1535 if self.is_running {
1536 return Ok(());
1537 }
1538
1539 self.adaptive_timeout.start().await?;
1540
1541 self.is_running = true;
1542 info!("Network efficiency manager started");
1543 Ok(())
1544 }
1545
1546 pub fn parallel_discovery(&mut self) -> &mut ParallelDiscoveryCoordinator {
1548 &mut self.parallel_discovery
1549 }
1550
1551 pub fn adaptive_timeout(&self) -> &AdaptiveTimeoutManager {
1553 &self.adaptive_timeout
1554 }
1555
1556 pub fn bandwidth_validator(&self) -> &BandwidthAwareValidator {
1558 &self.bandwidth_validator
1559 }
1560
1561 pub fn congestion_integrator(&self) -> &CongestionControlIntegrator {
1563 &self.congestion_integrator
1564 }
1565
1566 pub async fn get_comprehensive_stats(&self) -> NetworkEfficiencyStats {
1568 NetworkEfficiencyStats {
1569 parallel_discovery: self.parallel_discovery.get_stats().await,
1570 adaptive_timeout: self.adaptive_timeout.get_stats().await,
1571 bandwidth_validation: self.bandwidth_validator.get_stats().await,
1572 congestion_integration: self.congestion_integrator.get_stats().await,
1573 }
1574 }
1575
1576 pub async fn shutdown(&mut self) {
1578 if !self.is_running {
1579 return;
1580 }
1581
1582 self.parallel_discovery.shutdown().await;
1583 self.adaptive_timeout.shutdown().await;
1584
1585 self.is_running = false;
1586 info!("Network efficiency manager shutdown complete");
1587 }
1588}
1589
1590#[derive(Debug, Clone)]
1592pub struct NetworkEfficiencyStats {
1593 pub parallel_discovery: ParallelDiscoveryStats,
1594 pub adaptive_timeout: AdaptiveTimeoutStats,
1595 pub bandwidth_validation: BandwidthValidationStats,
1596 pub congestion_integration: CongestionIntegrationStats,
1597}
1598
1599impl Default for NetworkEfficiencyManager {
1600 fn default() -> Self {
1601 Self::new()
1602 }
1603}