1use scirs2_core::ndarray::Array1;
8use scirs2_core::numeric::{Float, FromPrimitive};
9use std::collections::HashMap;
10use std::fmt::Debug;
11
12use crate::error::Result;
13
14#[allow(dead_code)]
16#[derive(Debug, Clone)]
17pub struct DistributedTaskScheduler<F: Float + Debug> {
18 task_queue: Vec<DistributedTask<F>>,
19 available_nodes: Vec<usize>,
20 scheduling_strategy: SchedulingStrategy,
21}
22
23#[allow(dead_code)]
25#[derive(Debug, Clone)]
26pub struct DistributedTask<F: Float + Debug> {
27 task_id: usize,
28 task_type: TaskType,
29 priority: F,
30 resource_requirements: ResourceRequirements<F>,
31 completion_status: TaskStatus,
32}
33
34#[allow(dead_code)]
36#[derive(Debug, Clone)]
37pub enum TaskType {
38 Computation,
40 DataProcessing,
42 MachineLearning,
44 QuantumComputation,
46 Analysis,
48}
49
50#[allow(dead_code)]
52#[derive(Debug, Clone)]
53pub struct ResourceRequirements<F: Float + Debug> {
54 cpu_cores: usize,
55 memory_gb: F,
56 storage_gb: F,
57 network_bandwidth: F,
58 gpu_required: bool,
59}
60
61#[allow(dead_code)]
63#[derive(Debug, Clone)]
64pub enum TaskStatus {
65 Pending,
67 Running,
69 Completed,
71 Failed,
73 Cancelled,
75}
76
77#[allow(dead_code)]
79#[derive(Debug, Clone)]
80pub enum SchedulingStrategy {
81 FCFS,
83 RoundRobin,
85 Priority,
87 LoadBalancing,
89 QuantumOptimal,
91}
92
93#[allow(dead_code)]
95#[derive(Debug, Clone)]
96pub struct DistributedResourceManager<F: Float + Debug> {
97 available_resources: HashMap<usize, NodeResources<F>>,
98 resource_allocation: HashMap<usize, Vec<usize>>, load_balancer: LoadBalancer<F>,
100}
101
102#[allow(dead_code)]
104#[derive(Debug, Clone)]
105pub struct NodeResources<F: Float + Debug> {
106 node_id: usize,
107 cpu_cores: usize,
108 available_memory: F,
109 total_memory: F,
110 storage_capacity: F,
111 network_bandwidth: F,
112 gpu_count: usize,
113 utilization: F,
114}
115
116#[allow(dead_code)]
118#[derive(Debug, Clone)]
119pub struct LoadBalancer<F: Float + Debug> {
120 balancing_algorithm: LoadBalancingAlgorithm,
121 load_metrics: Vec<LoadMetric<F>>,
122 rebalancing_threshold: F,
123}
124
125#[allow(dead_code)]
127#[derive(Debug, Clone)]
128pub enum LoadBalancingAlgorithm {
129 RoundRobin,
131 WeightedRoundRobin,
133 LeastConnections,
135 CpuBased,
137 MemoryBased,
139}
140
141#[allow(dead_code)]
143#[derive(Debug, Clone)]
144pub struct LoadMetric<F: Float + Debug> {
145 node_id: usize,
146 cpu_utilization: F,
147 memory_utilization: F,
148 network_utilization: F,
149 response_time: F,
150 task_count: usize,
151}
152
153#[allow(dead_code)]
155#[derive(Debug, Clone)]
156pub struct DistributedIntelligenceCoordinator<F: Float + Debug> {
157 task_scheduler: DistributedTaskScheduler<F>,
158 resource_manager: DistributedResourceManager<F>,
159 communication_layer: CommunicationLayer<F>,
160 fault_tolerance: FaultToleranceSystem<F>,
161}
162
163#[allow(dead_code)]
165#[derive(Debug, Clone)]
166pub struct CommunicationLayer<F: Float + Debug> {
167 communication_protocol: CommunicationProtocol,
168 message_queue: Vec<DistributedMessage<F>>,
169 network_topology: NetworkTopology,
170 bandwidth_allocation: HashMap<usize, F>,
171}
172
173#[allow(dead_code)]
175#[derive(Debug, Clone)]
176pub enum CommunicationProtocol {
177 TCP,
179 UDP,
181 MPI,
183 RPC,
185 PubSub,
187}
188
189#[allow(dead_code)]
191#[derive(Debug, Clone)]
192pub struct DistributedMessage<F: Float + Debug> {
193 message_id: usize,
194 sender_id: usize,
195 receiver_id: usize,
196 message_type: MessageType,
197 payload: Vec<F>,
198 timestamp: F,
199 priority: MessagePriority,
200}
201
202#[allow(dead_code)]
204#[derive(Debug, Clone)]
205pub enum MessageType {
206 TaskAssignment,
208 Result,
210 StatusUpdate,
212 Control,
214 Heartbeat,
216}
217
218#[allow(dead_code)]
220#[derive(Debug, Clone)]
221pub enum MessagePriority {
222 Low,
224 Normal,
226 High,
228 Critical,
230}
231
232#[allow(dead_code)]
234#[derive(Debug, Clone)]
235pub enum NetworkTopology {
236 Star,
238 Ring,
240 Mesh,
242 Tree,
244 FullyConnected,
246}
247
248#[allow(dead_code)]
250#[derive(Debug, Clone)]
251pub struct FaultToleranceSystem<F: Float + Debug> {
252 replication_factor: usize,
253 checkpoint_interval: F,
254 failure_detection: FailureDetection<F>,
255 recovery_mechanisms: Vec<RecoveryMechanism<F>>,
256}
257
258#[allow(dead_code)]
260#[derive(Debug, Clone)]
261pub struct FailureDetection<F: Float + Debug> {
262 detection_algorithms: Vec<DetectionAlgorithm<F>>,
263 heartbeat_interval: F,
264 timeout_threshold: F,
265 failure_probability: F,
266}
267
268#[allow(dead_code)]
270#[derive(Debug, Clone)]
271pub struct DetectionAlgorithm<F: Float + Debug> {
272 algorithm_name: String,
273 detection_accuracy: F,
274 false_positive_rate: F,
275 detection_latency: F,
276}
277
278#[allow(dead_code)]
280#[derive(Debug, Clone)]
281pub struct RecoveryMechanism<F: Float + Debug> {
282 mechanism_type: RecoveryType,
283 recovery_time: F,
284 success_rate: F,
285 resource_overhead: F,
286}
287
288#[allow(dead_code)]
290#[derive(Debug, Clone)]
291pub enum RecoveryType {
292 Restart,
294 Failover,
296 Redistribution,
298 CheckpointRecovery,
300 ReplicationRecovery,
302}
303
304impl<F: Float + Debug + Clone + FromPrimitive> DistributedTaskScheduler<F> {
305 pub fn new(strategy: SchedulingStrategy) -> Self {
307 DistributedTaskScheduler {
308 task_queue: Vec::new(),
309 available_nodes: vec![0, 1, 2, 3], scheduling_strategy: strategy,
311 }
312 }
313
314 pub fn add_task(&mut self, task: DistributedTask<F>) {
316 self.task_queue.push(task);
317 }
318
319 pub fn schedule_tasks(&mut self) -> Result<HashMap<usize, Vec<usize>>> {
321 let mut schedule = HashMap::new();
322
323 match self.scheduling_strategy {
324 SchedulingStrategy::RoundRobin => {
325 self.round_robin_scheduling(&mut schedule)?;
326 }
327 SchedulingStrategy::Priority => {
328 self.priority_scheduling(&mut schedule)?;
329 }
330 SchedulingStrategy::LoadBalancing => {
331 self.load_balancing_scheduling(&mut schedule)?;
332 }
333 _ => {
334 self.fcfs_scheduling(&mut schedule)?;
335 }
336 }
337
338 Ok(schedule)
339 }
340
341 fn fcfs_scheduling(&mut self, schedule: &mut HashMap<usize, Vec<usize>>) -> Result<()> {
343 let mut node_index = 0;
344
345 for task in &self.task_queue {
346 let node_id = self.available_nodes[node_index % self.available_nodes.len()];
347 let task_list = schedule.entry(node_id).or_default();
348 task_list.push(task.task_id);
349 node_index += 1;
350 }
351
352 Ok(())
353 }
354
355 fn round_robin_scheduling(&mut self, schedule: &mut HashMap<usize, Vec<usize>>) -> Result<()> {
357 for (i, task) in self.task_queue.iter().enumerate() {
358 let node_id = self.available_nodes[i % self.available_nodes.len()];
359 let task_list = schedule.entry(node_id).or_default();
360 task_list.push(task.task_id);
361 }
362
363 Ok(())
364 }
365
366 fn priority_scheduling(&mut self, schedule: &mut HashMap<usize, Vec<usize>>) -> Result<()> {
368 self.task_queue.sort_by(|a, b| {
370 b.priority
371 .partial_cmp(&a.priority)
372 .expect("Operation failed")
373 });
374
375 let mut node_index = 0;
377 for task in &self.task_queue {
378 let node_id = self.available_nodes[node_index % self.available_nodes.len()];
379 let task_list = schedule.entry(node_id).or_default();
380 task_list.push(task.task_id);
381 node_index += 1;
382 }
383
384 Ok(())
385 }
386
387 fn load_balancing_scheduling(
389 &mut self,
390 schedule: &mut HashMap<usize, Vec<usize>>,
391 ) -> Result<()> {
392 for task in &self.task_queue {
394 let least_loaded_node = self
395 .available_nodes
396 .iter()
397 .min_by_key(|&&node_id| {
398 schedule.get(&node_id).map(|tasks| tasks.len()).unwrap_or(0)
399 })
400 .copied()
401 .unwrap_or(self.available_nodes[0]);
402
403 let task_list = schedule.entry(least_loaded_node).or_default();
404 task_list.push(task.task_id);
405 }
406
407 Ok(())
408 }
409
410 pub fn get_scheduling_stats(&self) -> HashMap<String, usize> {
412 let mut stats = HashMap::new();
413 stats.insert("total_tasks".to_string(), self.task_queue.len());
414 stats.insert("available_nodes".to_string(), self.available_nodes.len());
415
416 let mut pending_count = 0;
418 let mut running_count = 0;
419 let mut completed_count = 0;
420
421 for task in &self.task_queue {
422 match task.completion_status {
423 TaskStatus::Pending => pending_count += 1,
424 TaskStatus::Running => running_count += 1,
425 TaskStatus::Completed => completed_count += 1,
426 _ => {}
427 }
428 }
429
430 stats.insert("pending_tasks".to_string(), pending_count);
431 stats.insert("running_tasks".to_string(), running_count);
432 stats.insert("completed_tasks".to_string(), completed_count);
433
434 stats
435 }
436}
437
438impl<F: Float + Debug + Clone + FromPrimitive> DistributedTask<F> {
439 pub fn new(task_id: usize, task_type: TaskType, priority: F) -> Self {
441 DistributedTask {
442 task_id,
443 task_type,
444 priority,
445 resource_requirements: ResourceRequirements::default(),
446 completion_status: TaskStatus::Pending,
447 }
448 }
449
450 pub fn update_status(&mut self, new_status: TaskStatus) {
452 self.completion_status = new_status;
453 }
454
455 pub fn is_complete(&self) -> bool {
457 matches!(self.completion_status, TaskStatus::Completed)
458 }
459
460 pub fn estimate_execution_time(&self) -> F {
462 match self.task_type {
463 TaskType::Computation => F::from_f64(10.0).expect("Operation failed"),
464 TaskType::DataProcessing => F::from_f64(15.0).expect("Operation failed"),
465 TaskType::MachineLearning => F::from_f64(30.0).expect("Operation failed"),
466 TaskType::QuantumComputation => F::from_f64(5.0).expect("Operation failed"),
467 TaskType::Analysis => F::from_f64(20.0).expect("Operation failed"),
468 }
469 }
470}
471
472impl<F: Float + Debug + Clone + FromPrimitive> Default for ResourceRequirements<F> {
473 fn default() -> Self {
474 ResourceRequirements {
475 cpu_cores: 2,
476 memory_gb: F::from_f64(4.0).expect("Operation failed"),
477 storage_gb: F::from_f64(10.0).expect("Operation failed"),
478 network_bandwidth: F::from_f64(100.0).expect("Operation failed"), gpu_required: false,
480 }
481 }
482}
483
484impl<F: Float + Debug + Clone + FromPrimitive> Default for DistributedResourceManager<F> {
485 fn default() -> Self {
486 Self::new()
487 }
488}
489
490impl<F: Float + Debug + Clone + FromPrimitive> DistributedResourceManager<F> {
491 pub fn new() -> Self {
493 let mut available_resources = HashMap::new();
494
495 for i in 0..4 {
497 let node = NodeResources {
498 node_id: i,
499 cpu_cores: 8,
500 available_memory: F::from_f64(16.0).expect("Operation failed"),
501 total_memory: F::from_f64(16.0).expect("Operation failed"),
502 storage_capacity: F::from_f64(1000.0).expect("Operation failed"),
503 network_bandwidth: F::from_f64(1000.0).expect("Operation failed"),
504 gpu_count: 1,
505 utilization: F::zero(),
506 };
507 available_resources.insert(i, node);
508 }
509
510 DistributedResourceManager {
511 available_resources,
512 resource_allocation: HashMap::new(),
513 load_balancer: LoadBalancer::new(),
514 }
515 }
516
517 pub fn allocate_resources(&mut self, task: &DistributedTask<F>) -> Result<Option<usize>> {
519 let node_ids: Vec<usize> = self.available_resources.keys().cloned().collect();
521 for node_id in node_ids {
522 let node_resources = self
523 .available_resources
524 .get(&node_id)
525 .expect("Operation failed");
526 if self.can_accommodate_task(node_resources, task) {
527 self.allocate_task_to_node(node_id, task.task_id)?;
529 self.update_node_utilization(node_id, task)?;
530 return Ok(Some(node_id));
531 }
532 }
533
534 Ok(None) }
536
537 fn can_accommodate_task(&self, node: &NodeResources<F>, task: &DistributedTask<F>) -> bool {
539 node.cpu_cores >= task.resource_requirements.cpu_cores
540 && node.available_memory >= task.resource_requirements.memory_gb
541 && (!task.resource_requirements.gpu_required || node.gpu_count > 0)
542 }
543
544 fn allocate_task_to_node(&mut self, node_id: usize, task_id: usize) -> Result<()> {
546 let task_list = self.resource_allocation.entry(node_id).or_default();
547 task_list.push(task_id);
548 Ok(())
549 }
550
551 fn update_node_utilization(&mut self, node_id: usize, task: &DistributedTask<F>) -> Result<()> {
553 if let Some(node) = self.available_resources.get_mut(&node_id) {
554 node.available_memory = node.available_memory - task.resource_requirements.memory_gb;
555
556 let memory_utilization =
558 (node.total_memory - node.available_memory) / node.total_memory;
559 node.utilization = memory_utilization;
560 }
561 Ok(())
562 }
563
564 pub fn get_utilization_stats(&self) -> HashMap<usize, F> {
566 self.available_resources
567 .iter()
568 .map(|(&node_id, node)| (node_id, node.utilization))
569 .collect()
570 }
571}
572
573impl<F: Float + Debug + Clone + FromPrimitive> Default for LoadBalancer<F> {
574 fn default() -> Self {
575 Self::new()
576 }
577}
578
579impl<F: Float + Debug + Clone + FromPrimitive> LoadBalancer<F> {
580 pub fn new() -> Self {
582 LoadBalancer {
583 balancing_algorithm: LoadBalancingAlgorithm::RoundRobin,
584 load_metrics: Vec::new(),
585 rebalancing_threshold: F::from_f64(0.8).expect("Operation failed"),
586 }
587 }
588
589 pub fn balance_load(&mut self, node_loads: &HashMap<usize, F>) -> Result<Vec<(usize, usize)>> {
591 let mut rebalancing_actions = Vec::new();
592
593 let avg_load = node_loads.values().fold(F::zero(), |acc, &load| acc + load)
595 / F::from_usize(node_loads.len()).expect("Operation failed");
596
597 let mut overloaded_nodes = Vec::new();
598 let mut underloaded_nodes = Vec::new();
599
600 for (&node_id, &load) in node_loads {
601 if load > self.rebalancing_threshold {
602 overloaded_nodes.push(node_id);
603 } else if load < avg_load * F::from_f64(0.5).expect("Operation failed") {
604 underloaded_nodes.push(node_id);
605 }
606 }
607
608 for &overloaded_node in &overloaded_nodes {
610 if let Some(&underloaded_node) = underloaded_nodes.first() {
611 rebalancing_actions.push((overloaded_node, underloaded_node));
612 }
613 }
614
615 Ok(rebalancing_actions)
616 }
617
618 pub fn update_metrics(&mut self, node_id: usize, metrics: LoadMetric<F>) {
620 self.load_metrics.retain(|m| m.node_id != node_id);
622 self.load_metrics.push(metrics);
624 }
625}
626
627impl<F: Float + Debug + Clone + FromPrimitive> Default for DistributedIntelligenceCoordinator<F> {
628 fn default() -> Self {
629 Self::new()
630 }
631}
632
633impl<F: Float + Debug + Clone + FromPrimitive> DistributedIntelligenceCoordinator<F> {
634 pub fn new() -> Self {
636 DistributedIntelligenceCoordinator {
637 task_scheduler: DistributedTaskScheduler::new(SchedulingStrategy::LoadBalancing),
638 resource_manager: DistributedResourceManager::new(),
639 communication_layer: CommunicationLayer::new(),
640 fault_tolerance: FaultToleranceSystem::new(),
641 }
642 }
643
644 pub fn coordinate_processing(&mut self, data: &Array1<F>) -> Result<Array1<F>> {
646 let tasks = self.create_tasks_from_data(data)?;
648
649 for task in tasks {
651 self.task_scheduler.add_task(task);
652 }
653
654 let schedule = self.task_scheduler.schedule_tasks()?;
655
656 for (node_id, task_ids) in schedule {
658 for task_id in task_ids {
659 let result = self.simulate_task_execution(task_id, node_id)?;
661 }
663 }
664
665 Ok(data.clone())
667 }
668
669 fn create_tasks_from_data(&self, data: &Array1<F>) -> Result<Vec<DistributedTask<F>>> {
671 let mut tasks = Vec::new();
672
673 let chunk_size = (data.len() / 4).max(1); for (i, chunk) in data
677 .axis_chunks_iter(scirs2_core::ndarray::Axis(0), chunk_size)
678 .enumerate()
679 {
680 let task = DistributedTask::new(
681 i,
682 TaskType::DataProcessing,
683 F::from_f64(1.0).expect("Operation failed"),
684 );
685 tasks.push(task);
686 }
687
688 Ok(tasks)
689 }
690
691 fn simulate_task_execution(&mut self, task_id: usize, nodeid: usize) -> Result<Array1<F>> {
693 let execution_time = F::from_f64(0.1).expect("Operation failed"); let result = Array1::from_elem(
698 10,
699 F::from_f64(scirs2_core::random::random::<f64>()).expect("Operation failed"),
700 );
701
702 Ok(result)
703 }
704}
705
706impl<F: Float + Debug + Clone + FromPrimitive> Default for CommunicationLayer<F> {
707 fn default() -> Self {
708 Self::new()
709 }
710}
711
712impl<F: Float + Debug + Clone + FromPrimitive> CommunicationLayer<F> {
713 pub fn new() -> Self {
715 CommunicationLayer {
716 communication_protocol: CommunicationProtocol::TCP,
717 message_queue: Vec::new(),
718 network_topology: NetworkTopology::Mesh,
719 bandwidth_allocation: HashMap::new(),
720 }
721 }
722
723 pub fn send_message(&mut self, message: DistributedMessage<F>) -> Result<()> {
725 self.message_queue.push(message);
727
728 Ok(())
730 }
731
732 pub fn receive_messages(&mut self) -> Vec<DistributedMessage<F>> {
734 let messages = self.message_queue.clone();
735 self.message_queue.clear();
736 messages
737 }
738
739 pub fn allocate_bandwidth(&mut self, node_id: usize, bandwidth: F) {
741 self.bandwidth_allocation.insert(node_id, bandwidth);
742 }
743}
744
745impl<F: Float + Debug + Clone + FromPrimitive> Default for FaultToleranceSystem<F> {
746 fn default() -> Self {
747 Self::new()
748 }
749}
750
751impl<F: Float + Debug + Clone + FromPrimitive> FaultToleranceSystem<F> {
752 pub fn new() -> Self {
754 FaultToleranceSystem {
755 replication_factor: 3,
756 checkpoint_interval: F::from_f64(60.0).expect("Operation failed"), failure_detection: FailureDetection::new(),
758 recovery_mechanisms: vec![
759 RecoveryMechanism::new(RecoveryType::Restart),
760 RecoveryMechanism::new(RecoveryType::Failover),
761 ],
762 }
763 }
764
765 pub fn handle_failure(&mut self, failed_nodeid: usize) -> Result<RecoveryType> {
767 for mechanism in &self.recovery_mechanisms {
769 if mechanism.success_rate > F::from_f64(0.8).expect("Operation failed") {
770 return Ok(mechanism.mechanism_type.clone());
771 }
772 }
773
774 Ok(RecoveryType::Restart) }
776
777 pub fn create_checkpoint(&self, data: &Array1<F>) -> Result<Array1<F>> {
779 Ok(data.clone())
781 }
782}
783
784impl<F: Float + Debug + Clone + FromPrimitive> Default for FailureDetection<F> {
785 fn default() -> Self {
786 Self::new()
787 }
788}
789
790impl<F: Float + Debug + Clone + FromPrimitive> FailureDetection<F> {
791 pub fn new() -> Self {
793 FailureDetection {
794 detection_algorithms: vec![DetectionAlgorithm {
795 algorithm_name: "heartbeat_monitor".to_string(),
796 detection_accuracy: F::from_f64(0.95).expect("Operation failed"),
797 false_positive_rate: F::from_f64(0.05).expect("Operation failed"),
798 detection_latency: F::from_f64(5.0).expect("Operation failed"),
799 }],
800 heartbeat_interval: F::from_f64(1.0).expect("Operation failed"), timeout_threshold: F::from_f64(5.0).expect("Operation failed"), failure_probability: F::from_f64(0.01).expect("Operation failed"),
803 }
804 }
805
806 pub fn detect_failures(&mut self, node_statuses: &HashMap<usize, bool>) -> Result<Vec<usize>> {
808 let mut failed_nodes = Vec::new();
809
810 for (&node_id, &is_responsive) in node_statuses {
811 if !is_responsive {
812 failed_nodes.push(node_id);
813 }
814 }
815
816 Ok(failed_nodes)
817 }
818}
819
820impl<F: Float + Debug + Clone + FromPrimitive> RecoveryMechanism<F> {
821 pub fn new(mechanism_type: RecoveryType) -> Self {
823 let (recovery_time, success_rate, resource_overhead) = match mechanism_type {
824 RecoveryType::Restart => (
825 F::from_f64(10.0).expect("Operation failed"),
826 F::from_f64(0.9).expect("Operation failed"),
827 F::from_f64(0.1).expect("Operation failed"),
828 ),
829 RecoveryType::Failover => (
830 F::from_f64(5.0).expect("Operation failed"),
831 F::from_f64(0.95).expect("Operation failed"),
832 F::from_f64(0.2).expect("Operation failed"),
833 ),
834 RecoveryType::Redistribution => (
835 F::from_f64(15.0).expect("Operation failed"),
836 F::from_f64(0.85).expect("Operation failed"),
837 F::from_f64(0.15).expect("Operation failed"),
838 ),
839 RecoveryType::CheckpointRecovery => (
840 F::from_f64(20.0).expect("Operation failed"),
841 F::from_f64(0.8).expect("Operation failed"),
842 F::from_f64(0.05).expect("Operation failed"),
843 ),
844 RecoveryType::ReplicationRecovery => (
845 F::from_f64(8.0).expect("Operation failed"),
846 F::from_f64(0.92).expect("Operation failed"),
847 F::from_f64(0.3).expect("Operation failed"),
848 ),
849 };
850
851 RecoveryMechanism {
852 mechanism_type,
853 recovery_time,
854 success_rate,
855 resource_overhead,
856 }
857 }
858
859 pub fn apply_recovery(&self, failedtasks: &[usize]) -> Result<bool> {
861 let recovery_success = self.success_rate
863 > F::from_f64(scirs2_core::random::random::<f64>()).expect("Operation failed");
864
865 if recovery_success {
866 Ok(true)
868 } else {
869 Ok(false)
871 }
872 }
873}
874
875impl<F: Float + Debug + Clone + FromPrimitive> DistributedMessage<F> {
876 pub fn new(
878 message_id: usize,
879 sender_id: usize,
880 receiver_id: usize,
881 message_type: MessageType,
882 payload: Vec<F>,
883 ) -> Self {
884 DistributedMessage {
885 message_id,
886 sender_id,
887 receiver_id,
888 message_type,
889 payload,
890 timestamp: F::from_f64(0.0).expect("Operation failed"), priority: MessagePriority::Normal,
892 }
893 }
894
895 pub fn with_priority(mut self, priority: MessagePriority) -> Self {
897 self.priority = priority;
898 self
899 }
900
901 pub fn size(&self) -> usize {
903 self.payload.len()
904 }
905}