1use crate::error::{OptimError, Result};
7use scirs2_core::ndarray::{Array, Dimension, ScalarOperand, Zip};
8use scirs2_core::numeric::Float;
9use std::collections::HashMap;
10use std::fmt::Debug;
11
12#[derive(Debug, Clone, Copy, PartialEq)]
14pub enum AveragingStrategy {
15 Arithmetic,
17 WeightedByData,
19 WeightedByTime,
21 Federated,
23 Momentum {
25 momentum: f64,
27 },
28 ExponentialMovingAverage {
30 decay: f64,
32 },
33}
34
35#[derive(Debug)]
37pub struct ParameterAverager<A: Float, D: Dimension> {
38 averaged_params: Vec<Array<A, D>>,
40 strategy: AveragingStrategy,
42 node_weights: HashMap<usize, A>,
44 numnodes: usize,
46 momentum_buffer: Option<Vec<Array<A, D>>>,
48 step_count: usize,
50 initialized: bool,
52}
53
54impl<A: Float + ScalarOperand + Debug + Send + Sync, D: Dimension + Send + Sync>
55 ParameterAverager<A, D>
56{
57 pub fn new(strategy: AveragingStrategy, numnodes: usize) -> Self {
59 Self {
60 averaged_params: Vec::new(),
61 strategy,
62 node_weights: HashMap::new(),
63 numnodes,
64 momentum_buffer: None,
65 step_count: 0,
66 initialized: false,
67 }
68 }
69
70 pub fn initialize(&mut self, params: &[Array<A, D>]) -> Result<()> {
72 if self.initialized {
73 return Err(OptimError::InvalidConfig(
74 "Parameter averager already initialized".to_string(),
75 ));
76 }
77
78 self.averaged_params = params.to_vec();
79
80 if matches!(self.strategy, AveragingStrategy::Momentum { .. }) {
82 self.momentum_buffer = Some(params.iter().map(|p| Array::zeros(p.raw_dim())).collect());
83 }
84
85 let uniform_weight = A::one() / A::from(self.numnodes).unwrap();
87 for nodeid in 0..self.numnodes {
88 self.node_weights.insert(nodeid, uniform_weight);
89 }
90
91 self.initialized = true;
92 Ok(())
93 }
94
95 pub fn set_node_weight(&mut self, nodeid: usize, weight: A) -> Result<()> {
97 if nodeid >= self.numnodes {
98 return Err(OptimError::InvalidConfig(format!(
99 "Node ID {} exceeds number of nodes {}",
100 nodeid, self.numnodes
101 )));
102 }
103 self.node_weights.insert(nodeid, weight);
104 Ok(())
105 }
106
107 pub fn average_parameters(
109 &mut self,
110 nodeparameters: &[(usize, Vec<Array<A, D>>)],
111 ) -> Result<()> {
112 if !self.initialized {
113 if let Some((_, first_params)) = nodeparameters.first() {
114 self.initialize(first_params)?;
115 } else {
116 return Err(OptimError::InvalidConfig(
117 "No _parameters provided for initialization".to_string(),
118 ));
119 }
120 }
121
122 for (nodeid, params) in nodeparameters {
124 if *nodeid >= self.numnodes {
125 return Err(OptimError::InvalidConfig(format!(
126 "Node ID {} exceeds number of nodes {}",
127 nodeid, self.numnodes
128 )));
129 }
130 if params.len() != self.averaged_params.len() {
131 return Err(OptimError::DimensionMismatch(format!(
132 "Expected {} parameter arrays, got {}",
133 self.averaged_params.len(),
134 params.len()
135 )));
136 }
137 }
138
139 self.step_count += 1;
140
141 match self.strategy {
142 AveragingStrategy::Arithmetic => {
143 self.arithmetic_average(nodeparameters)?;
144 }
145 AveragingStrategy::WeightedByData | AveragingStrategy::WeightedByTime => {
146 self.weighted_average(nodeparameters)?;
147 }
148 AveragingStrategy::Federated => {
149 self.federated_average(nodeparameters)?;
150 }
151 AveragingStrategy::Momentum { momentum } => {
152 self.momentum_average(nodeparameters, momentum)?;
153 }
154 AveragingStrategy::ExponentialMovingAverage { decay } => {
155 self.ema_average(nodeparameters, decay)?;
156 }
157 }
158
159 Ok(())
160 }
161
162 fn arithmetic_average(&mut self, nodeparameters: &[(usize, Vec<Array<A, D>>)]) -> Result<()> {
164 for param in &mut self.averaged_params {
166 param.fill(A::zero());
167 }
168
169 let numnodes = A::from(nodeparameters.len()).unwrap();
170
171 for (_node_id, params) in nodeparameters {
173 for (avg_param, param) in self.averaged_params.iter_mut().zip(params.iter()) {
174 Zip::from(avg_param).and(param).for_each(|avg, &p| {
175 *avg = *avg + p;
176 });
177 }
178 }
179
180 for param in &mut self.averaged_params {
182 param.mapv_inplace(|x| x / numnodes);
183 }
184
185 Ok(())
186 }
187
188 fn weighted_average(&mut self, nodeparameters: &[(usize, Vec<Array<A, D>>)]) -> Result<()> {
190 for param in &mut self.averaged_params {
192 param.fill(A::zero());
193 }
194
195 let total_weight: A = nodeparameters
197 .iter()
198 .map(|(nodeid, _)| self.node_weights.get(nodeid).copied().unwrap_or(A::zero()))
199 .fold(A::zero(), |acc, w| acc + w);
200
201 if total_weight <= A::zero() {
202 return Err(OptimError::InvalidConfig(
203 "Total node weights must be > 0".to_string(),
204 ));
205 }
206
207 for (nodeid, params) in nodeparameters {
209 let weight = self.node_weights.get(nodeid).copied().unwrap_or(A::zero()) / total_weight;
210
211 for (avg_param, param) in self.averaged_params.iter_mut().zip(params.iter()) {
212 Zip::from(avg_param).and(param).for_each(|avg, &p| {
213 *avg = *avg + weight * p;
214 });
215 }
216 }
217
218 Ok(())
219 }
220
221 fn federated_average(&mut self, nodeparameters: &[(usize, Vec<Array<A, D>>)]) -> Result<()> {
223 self.weighted_average(nodeparameters)
226 }
227
228 fn momentum_average(
230 &mut self,
231 nodeparameters: &[(usize, Vec<Array<A, D>>)],
232 momentum: f64,
233 ) -> Result<()> {
234 let momentum_factor = A::from(momentum).unwrap();
235 let one_minus_momentum = A::one() - momentum_factor;
236
237 let mut current_average: Vec<Array<A, D>> = self
239 .averaged_params
240 .iter()
241 .map(|param| Array::zeros(param.raw_dim()))
242 .collect();
243
244 let numnodes = A::from(nodeparameters.len()).unwrap();
245 for (_node_id, params) in nodeparameters {
246 for (avg_param, param) in current_average.iter_mut().zip(params.iter()) {
247 Zip::from(avg_param).and(param).for_each(|avg, &p| {
248 *avg = *avg + p / numnodes;
249 });
250 }
251 }
252
253 if let Some(ref mut momentum_buf) = self.momentum_buffer {
255 for ((avg_param, current_param), momentum_param) in self
256 .averaged_params
257 .iter_mut()
258 .zip(current_average.iter())
259 .zip(momentum_buf.iter_mut())
260 {
261 Zip::from(&mut *momentum_param)
263 .and(current_param)
264 .for_each(|mom, &curr| {
265 *mom = momentum_factor * *mom + one_minus_momentum * curr;
266 });
267
268 avg_param.assign(&*momentum_param);
270 }
271 }
272
273 Ok(())
274 }
275
276 fn ema_average(
278 &mut self,
279 nodeparameters: &[(usize, Vec<Array<A, D>>)],
280 decay: f64,
281 ) -> Result<()> {
282 let decay_factor = A::from(decay).unwrap();
283 let one_minus_decay = A::one() - decay_factor;
284
285 let mut current_average: Vec<Array<A, D>> = self
287 .averaged_params
288 .iter()
289 .map(|param| Array::zeros(param.raw_dim()))
290 .collect();
291
292 let numnodes = A::from(nodeparameters.len()).unwrap();
293 for (_node_id, params) in nodeparameters {
294 for (avg_param, param) in current_average.iter_mut().zip(params.iter()) {
295 Zip::from(avg_param).and(param).for_each(|avg, &p| {
296 *avg = *avg + p / numnodes;
297 });
298 }
299 }
300
301 for (avg_param, current_param) in
303 self.averaged_params.iter_mut().zip(current_average.iter())
304 {
305 Zip::from(avg_param)
306 .and(current_param)
307 .for_each(|avg, &curr| {
308 *avg = decay_factor * *avg + one_minus_decay * curr;
309 });
310 }
311
312 Ok(())
313 }
314
315 pub fn get_averaged_parameters(&self) -> &[Array<A, D>] {
317 &self.averaged_params
318 }
319
320 pub fn get_averaged_parameters_cloned(&self) -> Vec<Array<A, D>> {
322 self.averaged_params.clone()
323 }
324
325 pub fn reset(&mut self) {
327 self.step_count = 0;
328 for param in &mut self.averaged_params {
329 param.fill(A::zero());
330 }
331 if let Some(ref mut momentum_buf) = self.momentum_buffer {
332 for buf in momentum_buf {
333 buf.fill(A::zero());
334 }
335 }
336 }
337
338 pub fn step_count(&self) -> usize {
340 self.step_count
341 }
342
343 pub fn numnodes(&self) -> usize {
345 self.numnodes
346 }
347
348 pub fn strategy(&self) -> AveragingStrategy {
350 self.strategy
351 }
352
353 pub fn is_initialized(&self) -> bool {
355 self.initialized
356 }
357}
358
359#[derive(Debug)]
361pub struct ParameterServer<A: Float, D: Dimension> {
362 averager: ParameterAverager<A, D>,
364 global_parameters: Vec<Array<A, D>>,
366 update_counts: HashMap<usize, usize>,
368 expected_updates_per_round: usize,
370 current_round: usize,
372 pending_updates: HashMap<usize, Vec<Array<A, D>>>,
374}
375
376impl<A: Float + ScalarOperand + Debug + Send + Sync, D: Dimension + Send + Sync>
377 ParameterServer<A, D>
378{
379 pub fn new(
381 strategy: AveragingStrategy,
382 numnodes: usize,
383 expected_updates_per_round: usize,
384 ) -> Self {
385 Self {
386 averager: ParameterAverager::new(strategy, numnodes),
387 global_parameters: Vec::new(),
388 update_counts: HashMap::new(),
389 expected_updates_per_round,
390 current_round: 0,
391 pending_updates: HashMap::new(),
392 }
393 }
394
395 pub fn initialize(&mut self, initialparams: &[Array<A, D>]) -> Result<()> {
397 self.averager.initialize(initialparams)?;
398 self.global_parameters = initialparams.to_vec();
399
400 for nodeid in 0..self.averager.numnodes() {
402 self.update_counts.insert(nodeid, 0);
403 }
404
405 Ok(())
406 }
407
408 pub fn submit_update(&mut self, nodeid: usize, parameters: Vec<Array<A, D>>) -> Result<bool> {
410 if nodeid >= self.averager.numnodes() {
411 return Err(OptimError::InvalidConfig(format!(
412 "Node ID {} exceeds number of nodes {}",
413 nodeid,
414 self.averager.numnodes()
415 )));
416 }
417
418 self.pending_updates.insert(nodeid, parameters);
420 *self.update_counts.entry(nodeid).or_insert(0) += 1;
421
422 let ready_for_aggregation = self.pending_updates.len() >= self.expected_updates_per_round;
424
425 if ready_for_aggregation {
426 self.aggregate_and_update()?;
427 }
428
429 Ok(ready_for_aggregation)
430 }
431
432 pub fn force_aggregation(&mut self) -> Result<()> {
434 if !self.pending_updates.is_empty() {
435 self.aggregate_and_update()?;
436 }
437 Ok(())
438 }
439
440 fn aggregate_and_update(&mut self) -> Result<()> {
442 let node_params: Vec<(usize, Vec<Array<A, D>>)> = self.pending_updates.drain().collect();
444
445 self.averager.average_parameters(&node_params)?;
447
448 self.global_parameters = self.averager.get_averaged_parameters_cloned();
450
451 self.current_round += 1;
453
454 Ok(())
455 }
456
457 pub fn get_global_parameters(&self) -> &[Array<A, D>] {
459 &self.global_parameters
460 }
461
462 pub fn get_global_parameters_cloned(&self) -> Vec<Array<A, D>> {
464 self.global_parameters.clone()
465 }
466
467 pub fn current_round(&self) -> usize {
469 self.current_round
470 }
471
472 pub fn get_update_count(&self, nodeid: usize) -> usize {
474 self.update_counts.get(&nodeid).copied().unwrap_or(0)
475 }
476
477 pub fn pending_updates_count(&self) -> usize {
479 self.pending_updates.len()
480 }
481
482 pub fn set_node_weight(&mut self, nodeid: usize, weight: A) -> Result<()> {
484 self.averager.set_node_weight(nodeid, weight)
485 }
486
487 pub fn reset(&mut self) {
489 self.averager.reset();
490 self.update_counts.clear();
491 self.pending_updates.clear();
492 self.current_round = 0;
493
494 for nodeid in 0..self.averager.numnodes() {
495 self.update_counts.insert(nodeid, 0);
496 }
497 }
498}
499
500#[derive(Debug)]
502pub struct DistributedCoordinator<A: Float, D: Dimension> {
503 parameter_server: ParameterServer<A, D>,
505 communication_rounds: usize,
507 convergence_threshold: A,
509 max_rounds: usize,
511 training_stats: TrainingStats<A>,
513}
514
515impl<A: Float + ScalarOperand + Debug + Send + Sync, D: Dimension + Send + Sync>
516 DistributedCoordinator<A, D>
517{
518 pub fn new(
520 strategy: AveragingStrategy,
521 numnodes: usize,
522 expected_updates_per_round: usize,
523 max_rounds: usize,
524 ) -> Self {
525 Self {
526 parameter_server: ParameterServer::new(strategy, numnodes, expected_updates_per_round),
527 communication_rounds: 0,
528 convergence_threshold: A::from(1e-6).unwrap(),
529 max_rounds,
530 training_stats: TrainingStats::new(),
531 }
532 }
533
534 pub fn initialize(&mut self, initialparams: &[Array<A, D>]) -> Result<()> {
536 self.parameter_server.initialize(initialparams)?;
537 self.training_stats
538 .record_round(0, A::zero(), initialparams);
539 Ok(())
540 }
541
542 pub fn communication_round(
544 &mut self,
545 node_updates: Vec<(usize, Vec<Array<A, D>>)>,
546 ) -> Result<CommunicationResult<A, D>> {
547 let mut aggregated = false;
548
549 for (nodeid, params) in node_updates {
551 aggregated = self.parameter_server.submit_update(nodeid, params)? || aggregated;
552 }
553
554 if !aggregated {
556 self.parameter_server.force_aggregation()?;
557 aggregated = true;
558 }
559
560 if aggregated {
561 self.communication_rounds += 1;
562
563 let currentparams = self.parameter_server.get_global_parameters();
565 let convergence_metric = self.compute_convergence_metric(currentparams);
566
567 self.training_stats.record_round(
568 self.communication_rounds,
569 convergence_metric,
570 currentparams,
571 );
572
573 let converged = convergence_metric < self.convergence_threshold;
574 let max_rounds_reached = self.communication_rounds >= self.max_rounds;
575
576 Ok(CommunicationResult {
577 round: self.communication_rounds,
578 global_parameters: self.parameter_server.get_global_parameters_cloned(),
579 converged,
580 should_continue: !converged && !max_rounds_reached,
581 convergence_metric,
582 stats: self.training_stats.clone(),
583 })
584 } else {
585 Ok(CommunicationResult {
586 round: self.communication_rounds,
587 global_parameters: self.parameter_server.get_global_parameters_cloned(),
588 converged: false,
589 should_continue: true,
590 convergence_metric: A::infinity(),
591 stats: self.training_stats.clone(),
592 })
593 }
594 }
595
596 pub fn set_convergence_threshold(&mut self, threshold: A) {
598 self.convergence_threshold = threshold;
599 }
600
601 pub fn parameter_server(&self) -> &ParameterServer<A, D> {
603 &self.parameter_server
604 }
605
606 pub fn parameter_server_mut(&mut self) -> &mut ParameterServer<A, D> {
608 &mut self.parameter_server
609 }
610
611 fn compute_convergence_metric(&self, currentparams: &[Array<A, D>]) -> A {
613 if let Some(prev_params) = self.training_stats.get_previous_parameters::<D>() {
614 let mut total_change = A::zero();
615 let mut total_norm = A::zero();
616
617 for (curr, prev) in currentparams.iter().zip(prev_params.iter()) {
618 for (&c, &p) in curr.iter().zip(prev.iter()) {
619 let diff = c - p;
620 total_change = total_change + diff * diff;
621 total_norm = total_norm + c * c;
622 }
623 }
624
625 if total_norm > A::zero() {
626 (total_change / total_norm).sqrt()
627 } else {
628 A::zero()
629 }
630 } else {
631 A::infinity()
632 }
633 }
634}
635
636#[derive(Debug, Clone)]
638pub struct CommunicationResult<A: Float, D: Dimension> {
639 pub round: usize,
641 pub global_parameters: Vec<Array<A, D>>,
643 pub converged: bool,
645 pub should_continue: bool,
647 pub convergence_metric: A,
649 pub stats: TrainingStats<A>,
651}
652
653#[derive(Debug, Clone)]
655pub struct TrainingStats<A: Float> {
656 convergence_history: Vec<A>,
658 round_times: Vec<usize>,
660 previous_parameters: Option<Vec<u8>>, }
663
664impl<A: Float + Send + Sync> TrainingStats<A> {
665 pub fn new() -> Self {
667 Self {
668 convergence_history: Vec::new(),
669 round_times: Vec::new(),
670 previous_parameters: None,
671 }
672 }
673
674 pub fn record_round<D: Dimension>(
676 &mut self,
677 round: usize,
678 convergence_metric: A,
679 parameters: &[Array<A, D>],
680 ) {
681 self.convergence_history.push(convergence_metric);
682 self.round_times.push(round);
683
684 self.previous_parameters = Some(vec![0u8; parameters.len()]);
687 }
688
689 pub fn convergence_history(&self) -> &[A] {
691 &self.convergence_history
692 }
693
694 pub fn latest_convergence(&self) -> Option<A> {
696 self.convergence_history.last().copied()
697 }
698
699 pub fn num_rounds(&self) -> usize {
701 self.round_times.len()
702 }
703
704 fn get_previous_parameters<D: Dimension>(&self) -> Option<Vec<Array<A, D>>> {
706 None
708 }
709}
710
711impl<A: Float + Send + Sync> Default for TrainingStats<A> {
712 fn default() -> Self {
713 Self::new()
714 }
715}
716
717#[derive(Debug, Clone, PartialEq)]
719pub enum CompressionStrategy {
720 None,
722 TopK {
724 k: usize,
726 },
727 RandomK {
729 k: usize,
731 },
732 Threshold {
734 threshold: f64,
736 },
737 Quantization {
739 bits: u8,
741 },
742 ErrorFeedback {
744 base_strategy: Box<CompressionStrategy>,
746 error_compensation: bool,
748 },
749 ClippedCompression {
751 base_strategy: Box<CompressionStrategy>,
753 clip_value: f64,
755 },
756}
757
758#[derive(Debug, Clone)]
760pub struct CompressedGradient<A: Float> {
761 pub data: Vec<u8>,
763 pub metadata: CompressionMetadata<A>,
765 pub shapes: Vec<Vec<usize>>,
767}
768
769#[derive(Debug, Clone)]
771pub struct CompressionMetadata<A: Float> {
772 pub strategy: CompressionStrategy,
774 pub compression_ratio: f64,
776 pub nnz_count: usize,
778 pub scale_factors: Vec<A>,
780 pub extra_data: Vec<u8>,
782}
783
784#[derive(Debug)]
786pub struct GradientCompressor<A: Float, D: Dimension> {
787 strategy: CompressionStrategy,
789 error_state: Option<Vec<Array<A, D>>>,
791 stats: CompressionStats,
793}
794
795impl<A: Float + ScalarOperand + Debug + Send + Sync, D: Dimension + Send + Sync>
796 GradientCompressor<A, D>
797{
798 pub fn new(strategy: CompressionStrategy) -> Self {
800 Self {
801 strategy,
802 error_state: None,
803 stats: CompressionStats::new(),
804 }
805 }
806
807 pub fn initialize_error_state(&mut self, gradientshapes: &[Array<A, D>]) {
809 self.error_state = Some(
810 gradientshapes
811 .iter()
812 .map(|g| Array::zeros(g.raw_dim()))
813 .collect(),
814 );
815 }
816
817 pub fn compress(&mut self, gradients: &[Array<A, D>]) -> Result<CompressedGradient<A>> {
819 let mut working_gradients: Vec<Array<A, D>> =
821 if let Some(ref mut error_state) = self.error_state {
822 gradients
823 .iter()
824 .zip(error_state.iter())
825 .map(|(grad, error)| grad + error)
826 .collect()
827 } else {
828 gradients.to_vec()
829 };
830
831 let (compressed_data, metadata) = match &self.strategy {
832 CompressionStrategy::None => self.compress_none(&working_gradients)?,
833 CompressionStrategy::TopK { k } => self.compress_topk(&working_gradients, *k)?,
834 CompressionStrategy::RandomK { k } => self.compress_randomk(&working_gradients, *k)?,
835 CompressionStrategy::Threshold { threshold } => {
836 self.compress_threshold(&working_gradients, A::from(*threshold).unwrap())?
837 }
838 CompressionStrategy::Quantization { bits } => {
839 self.compress_quantization(&working_gradients, *bits)?
840 }
841 CompressionStrategy::ErrorFeedback { base_strategy, .. } => {
842 let mut temp_compressor = GradientCompressor::new((**base_strategy).clone());
844 let compressed = temp_compressor.compress(&working_gradients)?;
845 let decompressed = temp_compressor.decompress(&compressed)?;
846
847 if let Some(ref mut error_state) = self.error_state {
849 for ((original, decompressed), error) in gradients
850 .iter()
851 .zip(decompressed.iter())
852 .zip(error_state.iter_mut())
853 {
854 *error = original - decompressed;
855 }
856 }
857
858 (compressed.data, compressed.metadata)
859 }
860 CompressionStrategy::ClippedCompression {
861 base_strategy,
862 clip_value,
863 } => {
864 let clip_val = A::from(*clip_value).unwrap();
866 for grad in &mut working_gradients {
867 grad.mapv_inplace(|x| {
868 if x > clip_val {
869 clip_val
870 } else if x < -clip_val {
871 -clip_val
872 } else {
873 x
874 }
875 });
876 }
877
878 let mut temp_compressor = GradientCompressor::new((**base_strategy).clone());
880 let compressed = temp_compressor.compress(&working_gradients)?;
881 (compressed.data, compressed.metadata)
882 }
883 };
884
885 let shapes = gradients.iter().map(|g| g.shape().to_vec()).collect();
887
888 let result = CompressedGradient {
889 data: compressed_data,
890 metadata,
891 shapes,
892 };
893
894 let original_size = self.calculate_size(gradients);
896 let compressed_size = result.data.len();
897 self.stats
898 .record_compression(original_size, compressed_size);
899
900 Ok(result)
901 }
902
903 pub fn decompress(&self, compressed: &CompressedGradient<A>) -> Result<Vec<Array<A, D>>> {
905 match &compressed.metadata.strategy {
906 CompressionStrategy::None => self.decompress_none(compressed),
907 CompressionStrategy::TopK { .. } => self.decompress_sparse(compressed),
908 CompressionStrategy::RandomK { .. } => self.decompress_sparse(compressed),
909 CompressionStrategy::Threshold { .. } => self.decompress_sparse(compressed),
910 CompressionStrategy::Quantization { bits } => {
911 self.decompress_quantization(compressed, *bits)
912 }
913 CompressionStrategy::ErrorFeedback { base_strategy, .. } => {
914 let temp_compressor = GradientCompressor::new((**base_strategy).clone());
915 temp_compressor.decompress(compressed)
916 }
917 CompressionStrategy::ClippedCompression { base_strategy, .. } => {
918 let temp_compressor = GradientCompressor::new((**base_strategy).clone());
919 temp_compressor.decompress(compressed)
920 }
921 }
922 }
923
924 fn compress_none(
926 &self,
927 gradients: &[Array<A, D>],
928 ) -> Result<(Vec<u8>, CompressionMetadata<A>)> {
929 let mut data = Vec::new();
930
931 for grad in gradients {
933 for &val in grad.iter() {
934 data.extend_from_slice(&val.to_f64().unwrap().to_le_bytes());
935 }
936 }
937
938 let metadata = CompressionMetadata {
939 strategy: CompressionStrategy::None,
940 compression_ratio: 1.0,
941 nnz_count: gradients.iter().map(|g| g.len()).sum(),
942 scale_factors: Vec::new(),
943 extra_data: Vec::new(),
944 };
945
946 Ok((data, metadata))
947 }
948
949 fn compress_topk(
951 &self,
952 gradients: &[Array<A, D>],
953 k: usize,
954 ) -> Result<(Vec<u8>, CompressionMetadata<A>)> {
955 let mut indices = Vec::new();
956 let mut values = Vec::new();
957 let mut total_elements = 0;
958
959 for (grad_idx, grad) in gradients.iter().enumerate() {
960 total_elements += grad.len();
961
962 let mut value_indices: Vec<(A, usize)> = grad
964 .iter()
965 .enumerate()
966 .map(|(i, &val)| (val.abs(), i))
967 .collect();
968
969 value_indices.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap());
971
972 let k_local = k.min(value_indices.len());
974 for (_, orig_idx) in value_indices.iter().take(k_local) {
975 indices.push((grad_idx as u32, *orig_idx as u32));
976 values.push(grad.iter().nth(*orig_idx).copied().unwrap());
977 }
978 }
979
980 let mut data = Vec::new();
982
983 data.extend_from_slice(&(indices.len() as u32).to_le_bytes());
985
986 for ((grad_idx, elem_idx), value) in indices.iter().zip(values.iter()) {
988 data.extend_from_slice(&grad_idx.to_le_bytes());
989 data.extend_from_slice(&elem_idx.to_le_bytes());
990 data.extend_from_slice(&value.to_f64().unwrap().to_le_bytes());
991 }
992
993 let metadata = CompressionMetadata {
994 strategy: CompressionStrategy::TopK { k },
995 compression_ratio: data.len() as f64 / (total_elements * 8) as f64,
996 nnz_count: indices.len(),
997 scale_factors: Vec::new(),
998 extra_data: Vec::new(),
999 };
1000
1001 Ok((data, metadata))
1002 }
1003
1004 fn compress_randomk(
1006 &self,
1007 gradients: &[Array<A, D>],
1008 k: usize,
1009 ) -> Result<(Vec<u8>, CompressionMetadata<A>)> {
1010 let mut indices = Vec::new();
1011 let mut values = Vec::new();
1012 let mut total_elements = 0;
1013
1014 for (grad_idx, grad) in gradients.iter().enumerate() {
1015 total_elements += grad.len();
1016
1017 let k_local = k.min(grad.len());
1019 let mut selected_indices: Vec<usize> = (0..grad.len()).collect();
1020
1021 for i in 0..k_local {
1023 let swap_idx = i + ((grad_idx + i) % (grad.len() - i));
1024 selected_indices.swap(i, swap_idx);
1025 }
1026
1027 for &idx in selected_indices.iter().take(k_local) {
1028 indices.push((grad_idx as u32, idx as u32));
1029 values.push(grad.iter().nth(idx).copied().unwrap());
1030 }
1031 }
1032
1033 let mut data = Vec::new();
1035 data.extend_from_slice(&(indices.len() as u32).to_le_bytes());
1036
1037 for ((grad_idx, elem_idx), value) in indices.iter().zip(values.iter()) {
1038 data.extend_from_slice(&grad_idx.to_le_bytes());
1039 data.extend_from_slice(&elem_idx.to_le_bytes());
1040 data.extend_from_slice(&value.to_f64().unwrap().to_le_bytes());
1041 }
1042
1043 let metadata = CompressionMetadata {
1044 strategy: CompressionStrategy::RandomK { k },
1045 compression_ratio: data.len() as f64 / (total_elements * 8) as f64,
1046 nnz_count: indices.len(),
1047 scale_factors: Vec::new(),
1048 extra_data: Vec::new(),
1049 };
1050
1051 Ok((data, metadata))
1052 }
1053
1054 fn compress_threshold(
1056 &self,
1057 gradients: &[Array<A, D>],
1058 threshold: A,
1059 ) -> Result<(Vec<u8>, CompressionMetadata<A>)> {
1060 let mut indices = Vec::new();
1061 let mut values = Vec::new();
1062 let mut total_elements = 0;
1063
1064 for (grad_idx, grad) in gradients.iter().enumerate() {
1065 total_elements += grad.len();
1066
1067 for (elem_idx, &val) in grad.iter().enumerate() {
1068 if val.abs() > threshold {
1069 indices.push((grad_idx as u32, elem_idx as u32));
1070 values.push(val);
1071 }
1072 }
1073 }
1074
1075 let mut data = Vec::new();
1077 data.extend_from_slice(&(indices.len() as u32).to_le_bytes());
1078
1079 for ((grad_idx, elem_idx), value) in indices.iter().zip(values.iter()) {
1080 data.extend_from_slice(&grad_idx.to_le_bytes());
1081 data.extend_from_slice(&elem_idx.to_le_bytes());
1082 data.extend_from_slice(&value.to_f64().unwrap().to_le_bytes());
1083 }
1084
1085 let metadata = CompressionMetadata {
1086 strategy: CompressionStrategy::Threshold {
1087 threshold: threshold.to_f64().unwrap(),
1088 },
1089 compression_ratio: data.len() as f64 / (total_elements * 8) as f64,
1090 nnz_count: indices.len(),
1091 scale_factors: Vec::new(),
1092 extra_data: Vec::new(),
1093 };
1094
1095 Ok((data, metadata))
1096 }
1097
1098 fn compress_quantization(
1100 &self,
1101 gradients: &[Array<A, D>],
1102 bits: u8,
1103 ) -> Result<(Vec<u8>, CompressionMetadata<A>)> {
1104 if bits > 32 {
1105 return Err(OptimError::InvalidConfig(
1106 "Quantization bits must be <= 32".to_string(),
1107 ));
1108 }
1109
1110 let mut data = Vec::new();
1111 let mut scale_factors = Vec::new();
1112 let levels = (1u64 << bits) - 1;
1113
1114 for grad in gradients {
1115 let min_val = grad.iter().fold(A::infinity(), |acc, &x| acc.min(x));
1117 let max_val = grad.iter().fold(A::neg_infinity(), |acc, &x| acc.max(x));
1118
1119 let range = max_val - min_val;
1120 let scale = if range > A::zero() {
1121 range / A::from(levels).unwrap()
1122 } else {
1123 A::one()
1124 };
1125
1126 scale_factors.push(scale);
1127
1128 for &val in grad.iter() {
1130 let normalized = (val - min_val) / scale;
1131 let quantized = normalized.to_u64().unwrap().min(levels) as u32;
1132
1133 match bits {
1135 1..=8 => data.push(quantized as u8),
1136 9..=16 => data.extend_from_slice(&(quantized as u16).to_le_bytes()),
1137 17..=32 => data.extend_from_slice(&quantized.to_le_bytes()),
1138 _ => unreachable!(),
1139 }
1140 }
1141
1142 data.extend_from_slice(&min_val.to_f64().unwrap().to_le_bytes());
1144 }
1145
1146 let total_elements: usize = gradients.iter().map(|g| g.len()).sum();
1147 let metadata = CompressionMetadata {
1148 strategy: CompressionStrategy::Quantization { bits },
1149 compression_ratio: data.len() as f64 / (total_elements * 8) as f64,
1150 nnz_count: total_elements,
1151 scale_factors,
1152 extra_data: Vec::new(),
1153 };
1154
1155 Ok((data, metadata))
1156 }
1157
1158 fn decompress_none(&self, compressed: &CompressedGradient<A>) -> Result<Vec<Array<A, D>>> {
1160 let mut result = Vec::new();
1161 let mut data_offset = 0;
1162
1163 for shape in &compressed.shapes {
1164 let num_elements: usize = shape.iter().product();
1165 let mut values = Vec::with_capacity(num_elements);
1166
1167 for _ in 0..num_elements {
1168 if data_offset + 8 > compressed.data.len() {
1169 return Err(OptimError::InvalidConfig(
1170 "Insufficient data for decompression".to_string(),
1171 ));
1172 }
1173
1174 let bytes = &compressed.data[data_offset..data_offset + 8];
1175 let value = f64::from_le_bytes(bytes.try_into().unwrap());
1176 values.push(A::from(value).unwrap());
1177 data_offset += 8;
1178 }
1179
1180 let dynamic_array = Array::from_shape_vec(shape.as_slice(), values).map_err(|_| {
1182 OptimError::InvalidConfig("Invalid shape for reconstruction".to_string())
1183 })?;
1184 let array = dynamic_array.into_dimensionality::<D>().map_err(|_| {
1185 OptimError::InvalidConfig("Dimension conversion failed".to_string())
1186 })?;
1187 result.push(array);
1188 }
1189
1190 Ok(result)
1191 }
1192
1193 fn decompress_sparse(&self, compressed: &CompressedGradient<A>) -> Result<Vec<Array<A, D>>> {
1195 let mut result = Vec::new();
1196
1197 for shape in &compressed.shapes {
1199 let dynamic_array = Array::zeros(shape.as_slice());
1200 let array = dynamic_array.into_dimensionality::<D>().map_err(|_| {
1201 OptimError::InvalidConfig("Dimension conversion failed for zero array".to_string())
1202 })?;
1203 result.push(array);
1204 }
1205
1206 if compressed.data.len() < 4 {
1208 return Err(OptimError::InvalidConfig(
1209 "Invalid compressed data format".to_string(),
1210 ));
1211 }
1212
1213 let num_elements = u32::from_le_bytes(compressed.data[0..4].try_into().unwrap()) as usize;
1214 let mut data_offset = 4;
1215
1216 for _ in 0..num_elements {
1218 if data_offset + 16 > compressed.data.len() {
1219 return Err(OptimError::InvalidConfig(
1220 "Insufficient data for sparse decompression".to_string(),
1221 ));
1222 }
1223
1224 let grad_idx = u32::from_le_bytes(
1225 compressed.data[data_offset..data_offset + 4]
1226 .try_into()
1227 .unwrap(),
1228 ) as usize;
1229 let elem_idx = u32::from_le_bytes(
1230 compressed.data[data_offset + 4..data_offset + 8]
1231 .try_into()
1232 .unwrap(),
1233 ) as usize;
1234 let value_bytes = &compressed.data[data_offset + 8..data_offset + 16];
1235 let value = A::from(f64::from_le_bytes(value_bytes.try_into().unwrap())).unwrap();
1236
1237 data_offset += 16;
1238
1239 if grad_idx >= result.len() {
1240 return Err(OptimError::InvalidConfig(
1241 "Invalid gradient index in compressed data".to_string(),
1242 ));
1243 }
1244
1245 if let Some(elem) = result[grad_idx].iter_mut().nth(elem_idx) {
1246 *elem = value;
1247 } else {
1248 return Err(OptimError::InvalidConfig(
1249 "Invalid element index in compressed data".to_string(),
1250 ));
1251 }
1252 }
1253
1254 Ok(result)
1255 }
1256
1257 fn decompress_quantization(
1259 &self,
1260 compressed: &CompressedGradient<A>,
1261 bits: u8,
1262 ) -> Result<Vec<Array<A, D>>> {
1263 let mut result = Vec::new();
1264 let mut data_offset = 0;
1265 let _levels = (1u64 << bits) - 1;
1266
1267 for (grad_idx, shape) in compressed.shapes.iter().enumerate() {
1268 let num_elements: usize = shape.iter().product();
1269 let mut values = Vec::with_capacity(num_elements);
1270
1271 for _ in 0..num_elements {
1273 let quantized = match bits {
1274 1..=8 => {
1275 if data_offset >= compressed.data.len() {
1276 return Err(OptimError::InvalidConfig(
1277 "Insufficient quantized data".to_string(),
1278 ));
1279 }
1280 let val = compressed.data[data_offset] as u32;
1281 data_offset += 1;
1282 val
1283 }
1284 9..=16 => {
1285 if data_offset + 2 > compressed.data.len() {
1286 return Err(OptimError::InvalidConfig(
1287 "Insufficient quantized data".to_string(),
1288 ));
1289 }
1290 let val = u16::from_le_bytes(
1291 compressed.data[data_offset..data_offset + 2]
1292 .try_into()
1293 .unwrap(),
1294 ) as u32;
1295 data_offset += 2;
1296 val
1297 }
1298 17..=32 => {
1299 if data_offset + 4 > compressed.data.len() {
1300 return Err(OptimError::InvalidConfig(
1301 "Insufficient quantized data".to_string(),
1302 ));
1303 }
1304 let val = u32::from_le_bytes(
1305 compressed.data[data_offset..data_offset + 4]
1306 .try_into()
1307 .unwrap(),
1308 );
1309 data_offset += 4;
1310 val
1311 }
1312 _ => {
1313 return Err(OptimError::InvalidConfig(
1314 "Invalid quantization bits".to_string(),
1315 ))
1316 }
1317 };
1318
1319 values.push(quantized);
1320 }
1321
1322 if data_offset + 8 > compressed.data.len() {
1324 return Err(OptimError::InvalidConfig(
1325 "Missing min value for quantization".to_string(),
1326 ));
1327 }
1328 let min_bytes = &compressed.data[data_offset..data_offset + 8];
1329 let min_val = A::from(f64::from_le_bytes(min_bytes.try_into().unwrap())).unwrap();
1330 data_offset += 8;
1331
1332 let scale = if grad_idx < compressed.metadata.scale_factors.len() {
1334 compressed.metadata.scale_factors[grad_idx]
1335 } else {
1336 return Err(OptimError::InvalidConfig(
1337 "Missing scale factor for quantization".to_string(),
1338 ));
1339 };
1340
1341 let dequantized_values: Vec<A> = values
1343 .into_iter()
1344 .map(|q| min_val + A::from(q).unwrap() * scale)
1345 .collect();
1346
1347 let dynamic_array = Array::from_shape_vec(shape.as_slice(), dequantized_values)
1348 .map_err(|_| {
1349 OptimError::InvalidConfig(
1350 "Invalid shape for quantized reconstruction".to_string(),
1351 )
1352 })?;
1353 let array = dynamic_array.into_dimensionality::<D>().map_err(|_| {
1354 OptimError::InvalidConfig(
1355 "Dimension conversion failed for quantized array".to_string(),
1356 )
1357 })?;
1358 result.push(array);
1359 }
1360
1361 Ok(result)
1362 }
1363
1364 fn calculate_size(&self, gradients: &[Array<A, D>]) -> usize {
1366 gradients
1367 .iter()
1368 .map(|g| g.len() * std::mem::size_of::<A>())
1369 .sum()
1370 }
1371
1372 pub fn stats(&self) -> &CompressionStats {
1374 &self.stats
1375 }
1376
1377 pub fn reset_stats(&mut self) {
1379 self.stats = CompressionStats::new();
1380 }
1381}
1382
1383#[derive(Debug, Clone)]
1385pub struct CompressionStats {
1386 pub compressions_count: usize,
1388 pub total_original_bytes: usize,
1390 pub total_compressed_bytes: usize,
1392 pub average_compression_ratio: f64,
1394 pub best_compression_ratio: f64,
1396 pub worst_compression_ratio: f64,
1398}
1399
1400impl CompressionStats {
1401 pub fn new() -> Self {
1403 Self {
1404 compressions_count: 0,
1405 total_original_bytes: 0,
1406 total_compressed_bytes: 0,
1407 average_compression_ratio: 0.0,
1408 best_compression_ratio: f64::INFINITY,
1409 worst_compression_ratio: 0.0,
1410 }
1411 }
1412
1413 pub fn record_compression(&mut self, original_bytes: usize, compressedbytes: usize) {
1415 self.compressions_count += 1;
1416 self.total_original_bytes += original_bytes;
1417 self.total_compressed_bytes += compressedbytes;
1418
1419 let ratio = if original_bytes > 0 {
1420 compressedbytes as f64 / original_bytes as f64
1421 } else {
1422 1.0
1423 };
1424
1425 self.best_compression_ratio = self.best_compression_ratio.min(ratio);
1426 self.worst_compression_ratio = self.worst_compression_ratio.max(ratio);
1427
1428 self.average_compression_ratio = if self.total_original_bytes > 0 {
1429 self.total_compressed_bytes as f64 / self.total_original_bytes as f64
1430 } else {
1431 0.0
1432 };
1433 }
1434
1435 pub fn overall_compression_ratio(&self) -> f64 {
1437 self.average_compression_ratio
1438 }
1439
1440 pub fn bandwidth_savings(&self) -> f64 {
1442 (1.0 - self.average_compression_ratio) * 100.0
1443 }
1444}
1445
1446impl Default for CompressionStats {
1447 fn default() -> Self {
1448 Self::new()
1449 }
1450}
1451
1452#[cfg(test)]
1453mod tests {
1454 use super::*;
1455 use approx::assert_relative_eq;
1456 use scirs2_core::ndarray::Array1;
1457
1458 #[test]
1459 fn test_arithmetic_averaging() {
1460 let mut averager: ParameterAverager<f64, scirs2_core::ndarray::Ix1> =
1461 ParameterAverager::new(AveragingStrategy::Arithmetic, 3);
1462
1463 let params1 = vec![Array1::from_vec(vec![1.0, 2.0])];
1464 let params2 = vec![Array1::from_vec(vec![3.0, 4.0])];
1465 let params3 = vec![Array1::from_vec(vec![5.0, 6.0])];
1466
1467 let nodeparameters = vec![(0, params1), (1, params2), (2, params3)];
1468
1469 averager.average_parameters(&nodeparameters).unwrap();
1470
1471 let result = averager.get_averaged_parameters();
1472 assert_relative_eq!(result[0][0], 3.0, epsilon = 1e-6); assert_relative_eq!(result[0][1], 4.0, epsilon = 1e-6); }
1475
1476 #[test]
1477 fn test_weighted_averaging() {
1478 let mut averager: ParameterAverager<f64, scirs2_core::ndarray::Ix1> =
1479 ParameterAverager::new(AveragingStrategy::WeightedByData, 2);
1480
1481 let params1 = vec![Array1::from_vec(vec![2.0])];
1483 let params2 = vec![Array1::from_vec(vec![6.0])];
1484 let nodeparameters = vec![(0, params1.clone()), (1, params2.clone())];
1485 averager.initialize(¶ms1).unwrap();
1486
1487 averager.set_node_weight(0, 0.75).unwrap(); averager.set_node_weight(1, 0.25).unwrap(); averager.average_parameters(&nodeparameters).unwrap();
1492
1493 let result = averager.get_averaged_parameters();
1494 assert_relative_eq!(result[0][0], 3.0, epsilon = 1e-6);
1496 }
1497
1498 #[test]
1499 fn test_momentum_averaging() {
1500 let mut averager: ParameterAverager<f64, scirs2_core::ndarray::Ix1> =
1501 ParameterAverager::new(AveragingStrategy::Momentum { momentum: 0.9 }, 2);
1502
1503 let params1 = vec![Array1::from_vec(vec![1.0])];
1504 let params2 = vec![Array1::from_vec(vec![3.0])];
1505
1506 let node_parameters1 = vec![(0, params1.clone()), (1, params2.clone())];
1508 averager.average_parameters(&node_parameters1).unwrap();
1509
1510 let result1 = averager.get_averaged_parameters();
1511 assert!(result1[0][0] >= 0.0 && result1[0][0] <= 0.5);
1513
1514 for _ in 0..10 {
1516 let nodeparameters = vec![(0, params1.clone()), (1, params2.clone())];
1517 averager.average_parameters(&nodeparameters).unwrap();
1518 }
1519
1520 let final_result = averager.get_averaged_parameters();
1521 assert!(final_result[0][0] > 0.5 && final_result[0][0] < 2.5);
1524 }
1525
1526 #[test]
1527 fn test_parameter_server() {
1528 let mut server = ParameterServer::new(AveragingStrategy::Arithmetic, 2, 2);
1529
1530 let initialparams = vec![Array1::from_vec(vec![0.0, 0.0])];
1531 server.initialize(&initialparams).unwrap();
1532
1533 let update1 = vec![Array1::from_vec(vec![1.0, 2.0])];
1535 let update2 = vec![Array1::from_vec(vec![3.0, 4.0])];
1536
1537 let ready1 = server.submit_update(0, update1).unwrap();
1538 assert!(!ready1); let ready2 = server.submit_update(1, update2).unwrap();
1541 assert!(ready2); let global_params = server.get_global_parameters();
1544 assert_relative_eq!(global_params[0][0], 2.0, epsilon = 1e-6); assert_relative_eq!(global_params[0][1], 3.0, epsilon = 1e-6); assert_eq!(server.current_round(), 1);
1548 }
1549
1550 #[test]
1551 fn test_distributed_coordinator() {
1552 let mut coordinator = DistributedCoordinator::new(
1553 AveragingStrategy::Arithmetic,
1554 2, 2, 10, );
1558
1559 let initialparams = vec![Array1::from_vec(vec![0.0])];
1560 coordinator.initialize(&initialparams).unwrap();
1561
1562 for round in 1..=3 {
1564 let update1 = vec![Array1::from_vec(vec![round as f64])];
1565 let update2 = vec![Array1::from_vec(vec![(round * 2) as f64])];
1566
1567 let node_updates = vec![(0, update1), (1, update2)];
1568
1569 let result = coordinator.communication_round(node_updates).unwrap();
1570
1571 assert_eq!(result.round, round);
1572 assert!(result.should_continue);
1573 assert!(!result.converged); assert!(result.global_parameters[0][0] > 0.0);
1577 }
1578 }
1579
1580 #[test]
1581 fn test_averaging_strategies() {
1582 let simple_strategies = vec![
1584 AveragingStrategy::Arithmetic,
1585 AveragingStrategy::WeightedByData,
1586 AveragingStrategy::Federated,
1587 ];
1588
1589 for strategy in simple_strategies {
1590 let mut averager: ParameterAverager<f64, scirs2_core::ndarray::Ix1> =
1591 ParameterAverager::new(strategy, 2);
1592
1593 let params1 = vec![Array1::from_vec(vec![1.0])];
1594 let params2 = vec![Array1::from_vec(vec![3.0])];
1595
1596 let nodeparameters = vec![(0, params1), (1, params2)];
1597
1598 averager.average_parameters(&nodeparameters).unwrap();
1599 let result = averager.get_averaged_parameters();
1600 assert!(result[0][0] >= 1.0 && result[0][0] <= 3.0);
1601 }
1602
1603 let stateful_strategies = vec![
1605 AveragingStrategy::Momentum { momentum: 0.9 },
1606 AveragingStrategy::ExponentialMovingAverage { decay: 0.9 },
1607 ];
1608
1609 for strategy in stateful_strategies {
1610 let mut averager: ParameterAverager<f64, scirs2_core::ndarray::Ix1> =
1611 ParameterAverager::new(strategy, 2);
1612
1613 let params1 = vec![Array1::from_vec(vec![1.0])];
1614 let params2 = vec![Array1::from_vec(vec![3.0])];
1615
1616 let nodeparameters = vec![(0, params1), (1, params2)];
1617
1618 averager.average_parameters(&nodeparameters).unwrap();
1619 let result = averager.get_averaged_parameters();
1620 assert!(result[0][0] >= 0.0 && result[0][0] <= 3.0);
1622 }
1623 }
1624
1625 #[test]
1626 fn test_node_weight_validation() {
1627 let mut averager: ParameterAverager<f64, scirs2_core::ndarray::Ix1> =
1628 ParameterAverager::new(AveragingStrategy::WeightedByData, 2);
1629
1630 assert!(averager.set_node_weight(0, 0.5).is_ok());
1632 assert!(averager.set_node_weight(1, 0.5).is_ok());
1633
1634 assert!(averager.set_node_weight(2, 0.5).is_err());
1636 }
1637
1638 #[test]
1639 fn test_parameter_dimension_validation() {
1640 let mut averager: ParameterAverager<f64, scirs2_core::ndarray::Ix1> =
1641 ParameterAverager::new(AveragingStrategy::Arithmetic, 2);
1642
1643 let params1 = vec![Array1::from_vec(vec![1.0, 2.0])];
1644 let params2 = vec![Array1::from_vec(vec![3.0])]; let nodeparameters = vec![(0, params1), (1, params2)];
1647
1648 let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
1650 averager.average_parameters(&nodeparameters)
1651 }));
1652
1653 assert!(result.is_err() || (result.is_ok() && result.unwrap().is_err()));
1655 }
1656
1657 #[test]
1658 fn test_training_stats() {
1659 let mut stats = TrainingStats::new();
1660
1661 assert_eq!(stats.num_rounds(), 0);
1662 assert!(stats.latest_convergence().is_none());
1663
1664 let params = vec![Array1::from_vec(vec![1.0])];
1665 stats.record_round(1, 0.5, ¶ms);
1666
1667 assert_eq!(stats.num_rounds(), 1);
1668 assert_eq!(stats.latest_convergence(), Some(0.5));
1669 assert_eq!(stats.convergence_history(), &[0.5]);
1670 }
1671
1672 #[test]
1673 fn test_gradient_compression_none() {
1674 let mut compressor = GradientCompressor::new(CompressionStrategy::None);
1675
1676 let gradients = vec![
1677 Array1::from_vec(vec![1.0, 2.0, 3.0]),
1678 Array1::from_vec(vec![4.0, 5.0]),
1679 ];
1680
1681 let compressed = compressor.compress(&gradients).unwrap();
1682 assert_eq!(compressed.metadata.strategy, CompressionStrategy::None);
1683 assert_eq!(compressed.metadata.compression_ratio, 1.0);
1684
1685 let decompressed = compressor.decompress(&compressed).unwrap();
1686 assert_eq!(decompressed.len(), 2);
1687 assert_eq!(decompressed[0].as_slice().unwrap(), &[1.0, 2.0, 3.0]);
1688 assert_eq!(decompressed[1].as_slice().unwrap(), &[4.0, 5.0]);
1689 }
1690
1691 #[test]
1692 fn test_gradient_compression_topk() {
1693 let mut compressor = GradientCompressor::new(CompressionStrategy::TopK { k: 2 });
1694
1695 let gradients = vec![Array1::from_vec(vec![0.1, 3.0, 0.2, 4.0, 0.05])];
1696
1697 let compressed = compressor.compress(&gradients).unwrap();
1698 assert!(compressed.metadata.compression_ratio < 1.0);
1699 assert_eq!(compressed.metadata.nnz_count, 2); let decompressed = compressor.decompress(&compressed).unwrap();
1702 assert_eq!(decompressed.len(), 1);
1703
1704 let result = &decompressed[0];
1706 assert_eq!(result[1], 3.0); assert_eq!(result[3], 4.0); assert_eq!(result[0], 0.0); assert_eq!(result[2], 0.0); assert_eq!(result[4], 0.0); }
1712
1713 #[test]
1714 fn test_gradient_compression_threshold() {
1715 let mut compressor =
1716 GradientCompressor::new(CompressionStrategy::Threshold { threshold: 1.0 });
1717
1718 let gradients = vec![Array1::from_vec(vec![0.5, 2.0, 0.8, 3.0, 0.3])];
1719
1720 let compressed = compressor.compress(&gradients).unwrap();
1721 assert!(compressed.metadata.compression_ratio < 1.0);
1722 assert_eq!(compressed.metadata.nnz_count, 2); let decompressed = compressor.decompress(&compressed).unwrap();
1725 let result = &decompressed[0];
1726
1727 assert_eq!(result[0], 0.0); assert_eq!(result[1], 2.0); assert_eq!(result[2], 0.0); assert_eq!(result[3], 3.0); assert_eq!(result[4], 0.0); }
1734
1735 #[test]
1736 fn test_gradient_compression_quantization() {
1737 let mut compressor = GradientCompressor::new(CompressionStrategy::Quantization { bits: 8 });
1738
1739 let gradients = vec![Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0])];
1740
1741 let compressed = compressor.compress(&gradients).unwrap();
1742 assert!(compressed.metadata.compression_ratio < 1.0); let decompressed = compressor.decompress(&compressed).unwrap();
1745 let result = &decompressed[0];
1746
1747 assert!((result[0] - 1.0).abs() < 0.1);
1749 assert!((result[1] - 2.0).abs() < 0.1);
1750 assert!((result[2] - 3.0).abs() < 0.1);
1751 assert!((result[3] - 4.0).abs() < 0.1);
1752 }
1753
1754 #[test]
1755 fn test_gradient_compression_randomk() {
1756 let mut compressor = GradientCompressor::new(CompressionStrategy::RandomK { k: 3 });
1757
1758 let gradients = vec![Array1::from_vec(vec![
1760 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0,
1761 ])];
1762
1763 let compressed = compressor.compress(&gradients).unwrap();
1764 assert!(compressed.metadata.compression_ratio < 1.0);
1766 assert_eq!(compressed.metadata.nnz_count, 3); let decompressed = compressor.decompress(&compressed).unwrap();
1769 let result = &decompressed[0];
1770
1771 let non_zero_count = result.iter().filter(|&&x| x != 0.0).count();
1773 assert_eq!(non_zero_count, 3);
1774 }
1775
1776 #[test]
1777 fn test_gradient_compression_error_feedback() {
1778 let base_strategy = CompressionStrategy::TopK { k: 2 };
1779 let strategy = CompressionStrategy::ErrorFeedback {
1780 base_strategy: Box::new(base_strategy),
1781 error_compensation: true,
1782 };
1783
1784 let mut compressor = GradientCompressor::new(strategy);
1785
1786 let gradients = vec![Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0])];
1787
1788 compressor.initialize_error_state(&gradients);
1790
1791 let compressed1 = compressor.compress(&gradients).unwrap();
1793 let decompressed1 = compressor.decompress(&compressed1).unwrap();
1794
1795 let compressed2 = compressor.compress(&gradients).unwrap();
1797 let decompressed2 = compressor.decompress(&compressed2).unwrap();
1798
1799 assert_eq!(decompressed1.len(), 1);
1801 assert_eq!(decompressed2.len(), 1);
1802 }
1803
1804 #[test]
1805 fn test_gradient_compression_clipped() {
1806 let base_strategy = CompressionStrategy::TopK { k: 3 };
1807 let strategy = CompressionStrategy::ClippedCompression {
1808 base_strategy: Box::new(base_strategy),
1809 clip_value: 2.5,
1810 };
1811
1812 let mut compressor = GradientCompressor::new(strategy);
1813
1814 let gradients = vec![Array1::from_vec(vec![1.0, 5.0, -3.0, 2.0])];
1815
1816 let compressed = compressor.compress(&gradients).unwrap();
1817 let decompressed = compressor.decompress(&compressed).unwrap();
1818
1819 let result = &decompressed[0];
1820
1821 for &val in result.iter() {
1823 if val != 0.0 {
1824 assert!((-2.5..=2.5).contains(&val));
1826 }
1827 }
1828 }
1829
1830 #[test]
1831 fn test_compression_stats() {
1832 let mut stats = CompressionStats::new();
1833
1834 assert_eq!(stats.compressions_count, 0);
1835 assert_eq!(stats.overall_compression_ratio(), 0.0);
1836
1837 stats.record_compression(1000, 500); assert_eq!(stats.compressions_count, 1);
1840 assert_relative_eq!(stats.overall_compression_ratio(), 0.5, epsilon = 1e-6);
1841 assert_relative_eq!(stats.bandwidth_savings(), 50.0, epsilon = 1e-6);
1842
1843 stats.record_compression(1000, 250); assert_eq!(stats.compressions_count, 2);
1845 assert_relative_eq!(stats.overall_compression_ratio(), 0.375, epsilon = 1e-6); assert_relative_eq!(stats.bandwidth_savings(), 62.5, epsilon = 1e-6);
1847
1848 assert_relative_eq!(stats.best_compression_ratio, 0.25, epsilon = 1e-6);
1849 assert_relative_eq!(stats.worst_compression_ratio, 0.5, epsilon = 1e-6);
1850 }
1851
1852 #[test]
1853 fn test_compression_roundtrip() {
1854 let strategies = vec![
1855 CompressionStrategy::None,
1856 CompressionStrategy::TopK { k: 2 },
1857 CompressionStrategy::RandomK { k: 2 },
1858 CompressionStrategy::Threshold { threshold: 1.5 },
1859 CompressionStrategy::Quantization { bits: 4 },
1860 ];
1861
1862 let gradients = vec![
1863 Array1::from_vec(vec![1.0, 2.5, 0.5, 3.0]),
1864 Array1::from_vec(vec![0.1, 4.0]),
1865 ];
1866
1867 for strategy in strategies {
1868 let mut compressor = GradientCompressor::new(strategy.clone());
1869
1870 let compressed = compressor.compress(&gradients).unwrap();
1871 let decompressed = compressor.decompress(&compressed).unwrap();
1872
1873 assert_eq!(decompressed.len(), gradients.len());
1875
1876 for (orig, decomp) in gradients.iter().zip(decompressed.iter()) {
1878 assert_eq!(orig.shape(), decomp.shape());
1879 }
1880
1881 match strategy {
1883 CompressionStrategy::None => {
1884 for (orig, decomp) in gradients.iter().zip(decompressed.iter()) {
1885 for (&o, &d) in orig.iter().zip(decomp.iter()) {
1886 assert_relative_eq!(o, d, epsilon = 1e-10);
1887 }
1888 }
1889 }
1890 _ => {
1891 for decomp in &decompressed {
1893 assert!(decomp.iter().all(|&x| x.is_finite()));
1894 }
1895 }
1896 }
1897 }
1898 }
1899
1900 #[test]
1901 fn test_compression_invalid_configs() {
1902 let strategy = CompressionStrategy::Quantization { bits: 64 };
1904 let mut compressor = GradientCompressor::new(strategy);
1905
1906 let gradients = vec![Array1::from_vec(vec![1.0, 2.0])];
1907 assert!(compressor.compress(&gradients).is_err());
1908
1909 let valid_compressor: GradientCompressor<f64, scirs2_core::ndarray::Ix1> =
1911 GradientCompressor::new(CompressionStrategy::None);
1912 let invalid_compressed = CompressedGradient {
1913 data: vec![1, 2, 3], metadata: CompressionMetadata {
1915 strategy: CompressionStrategy::None,
1916 compression_ratio: 1.0,
1917 nnz_count: 1,
1918 scale_factors: vec![],
1919 extra_data: vec![],
1920 },
1921 shapes: vec![vec![2]],
1922 };
1923
1924 assert!(valid_compressor.decompress(&invalid_compressed).is_err());
1925 }
1926
1927 #[test]
1928 fn test_distributed_with_compression() {
1929 let mut server = ParameterServer::new(AveragingStrategy::Arithmetic, 2, 2);
1931 let initialparams = vec![Array1::from_vec(vec![0.0, 0.0])];
1932 server.initialize(&initialparams).unwrap();
1933
1934 let mut compressor = GradientCompressor::new(CompressionStrategy::TopK { k: 1 });
1935
1936 let gradients1 = vec![Array1::from_vec(vec![1.0, 3.0])]; let gradients2 = vec![Array1::from_vec(vec![2.0, 1.0])]; let compressed1 = compressor.compress(&gradients1).unwrap();
1941 let compressed2 = compressor.compress(&gradients2).unwrap();
1942
1943 let decompressed1 = compressor.decompress(&compressed1).unwrap();
1944 let decompressed2 = compressor.decompress(&compressed2).unwrap();
1945
1946 server.submit_update(0, decompressed1).unwrap();
1948 server.submit_update(1, decompressed2).unwrap();
1949
1950 let global_params = server.get_global_parameters();
1951
1952 assert_relative_eq!(global_params[0][0], 1.0, epsilon = 1e-6);
1956 assert_relative_eq!(global_params[0][1], 1.5, epsilon = 1e-6);
1957 }
1958}