1use std::collections::HashMap;
39use std::sync::atomic::{AtomicBool, Ordering};
40
41use axonml_autograd::Variable;
42use axonml_tensor::Tensor;
43use parking_lot::RwLock;
44
45use crate::init::{ones, zeros};
46use crate::module::Module;
47use crate::parameter::Parameter;
48
49pub struct BatchNorm1d {
61 pub weight: Parameter,
63 pub bias: Parameter,
65 running_mean: RwLock<Tensor<f32>>,
67 running_var: RwLock<Tensor<f32>>,
69 num_features: usize,
71 eps: f32,
73 momentum: f32,
75 track_running_stats: bool,
77 training: AtomicBool,
79}
80
81impl BatchNorm1d {
82 pub fn new(num_features: usize) -> Self {
84 Self::with_options(num_features, 1e-5, 0.1, true)
85 }
86
87 pub fn with_options(
89 num_features: usize,
90 eps: f32,
91 momentum: f32,
92 track_running_stats: bool,
93 ) -> Self {
94 Self {
95 weight: Parameter::named("weight", ones(&[num_features]), true),
96 bias: Parameter::named("bias", zeros(&[num_features]), true),
97 running_mean: RwLock::new(zeros(&[num_features])),
98 running_var: RwLock::new(ones(&[num_features])),
99 num_features,
100 eps,
101 momentum,
102 track_running_stats,
103 training: AtomicBool::new(true),
104 }
105 }
106
107 pub fn num_features(&self) -> usize {
109 self.num_features
110 }
111}
112
113impl Module for BatchNorm1d {
114 fn forward(&self, input: &Variable) -> Variable {
115 let input_data = input.data();
116 let shape = input_data.shape().to_vec();
117 let batch_size = shape[0];
118 let num_features = shape[1];
119
120 assert_eq!(
122 num_features, self.num_features,
123 "BatchNorm1d: expected {} features, got {}",
124 self.num_features, num_features
125 );
126
127 let input_vec = input_data.to_vec();
128 let weight_vec = self.weight.data().to_vec();
129 let bias_vec = self.bias.data().to_vec();
130
131 let is_training = self.training.load(Ordering::Relaxed);
132 let spatial_size: usize = if shape.len() > 2 {
133 shape[2..].iter().product()
134 } else {
135 1
136 };
137
138 let mut means = vec![0.0f32; num_features];
139 let mut vars = vec![0.0f32; num_features];
140
141 if is_training {
142 for c in 0..num_features {
144 let mut sum = 0.0f32;
145 for b in 0..batch_size {
146 for s in 0..spatial_size {
147 let idx = b * num_features * spatial_size + c * spatial_size + s;
148 sum += input_vec[idx];
149 }
150 }
151 means[c] = sum / (batch_size * spatial_size) as f32;
152
153 let mut var_sum = 0.0f32;
154 for b in 0..batch_size {
155 for s in 0..spatial_size {
156 let idx = b * num_features * spatial_size + c * spatial_size + s;
157 let diff = input_vec[idx] - means[c];
158 var_sum += diff * diff;
159 }
160 }
161 vars[c] = var_sum / (batch_size * spatial_size) as f32;
162 }
163
164 if self.track_running_stats {
166 let mut running_mean = self.running_mean.write();
167 let mut running_var = self.running_var.write();
168 let running_mean_vec = running_mean.to_vec();
169 let running_var_vec = running_var.to_vec();
170
171 let new_mean: Vec<f32> = running_mean_vec
172 .iter()
173 .zip(means.iter())
174 .map(|(&rm, &m)| (1.0 - self.momentum) * rm + self.momentum * m)
175 .collect();
176 let new_var: Vec<f32> = running_var_vec
177 .iter()
178 .zip(vars.iter())
179 .map(|(&rv, &v)| (1.0 - self.momentum) * rv + self.momentum * v)
180 .collect();
181
182 *running_mean = Tensor::from_vec(new_mean, &[num_features]).unwrap();
183 *running_var = Tensor::from_vec(new_var, &[num_features]).unwrap();
184 }
185 } else {
186 means = self.running_mean.read().to_vec();
188 vars = self.running_var.read().to_vec();
189 }
190
191 let mut output_vec = vec![0.0f32; input_vec.len()];
193 for b in 0..batch_size {
194 for c in 0..num_features {
195 for s in 0..spatial_size {
196 let idx = b * num_features * spatial_size + c * spatial_size + s;
197 let normalized = (input_vec[idx] - means[c]) / (vars[c] + self.eps).sqrt();
198 output_vec[idx] = normalized * weight_vec[c] + bias_vec[c];
199 }
200 }
201 }
202
203 let output = Tensor::from_vec(output_vec, &shape).unwrap();
204 Variable::new(output, input.requires_grad())
205 }
206
207 fn parameters(&self) -> Vec<Parameter> {
208 vec![self.weight.clone(), self.bias.clone()]
209 }
210
211 fn named_parameters(&self) -> HashMap<String, Parameter> {
212 let mut params = HashMap::new();
213 params.insert("weight".to_string(), self.weight.clone());
214 params.insert("bias".to_string(), self.bias.clone());
215 params
216 }
217
218 fn set_training(&mut self, training: bool) {
219 self.training.store(training, Ordering::Relaxed);
220 }
221
222 fn is_training(&self) -> bool {
223 self.training.load(Ordering::Relaxed)
224 }
225
226 fn name(&self) -> &'static str {
227 "BatchNorm1d"
228 }
229}
230
231pub struct BatchNorm2d {
241 pub weight: Parameter,
243 pub bias: Parameter,
245 running_mean: RwLock<Tensor<f32>>,
247 running_var: RwLock<Tensor<f32>>,
249 num_features: usize,
251 eps: f32,
253 momentum: f32,
255 training: AtomicBool,
257}
258
259impl BatchNorm2d {
260 pub fn new(num_features: usize) -> Self {
262 Self::with_options(num_features, 1e-5, 0.1)
263 }
264
265 pub fn with_options(num_features: usize, eps: f32, momentum: f32) -> Self {
267 Self {
268 weight: Parameter::named("weight", ones(&[num_features]), true),
269 bias: Parameter::named("bias", zeros(&[num_features]), true),
270 running_mean: RwLock::new(zeros(&[num_features])),
271 running_var: RwLock::new(ones(&[num_features])),
272 num_features,
273 eps,
274 momentum,
275 training: AtomicBool::new(true),
276 }
277 }
278
279 pub fn num_features(&self) -> usize {
281 self.num_features
282 }
283}
284
285impl Module for BatchNorm2d {
286 fn forward(&self, input: &Variable) -> Variable {
287 let input_data = input.data();
288 let shape = input_data.shape().to_vec();
289 let batch_size = shape[0];
290 let channels = shape[1];
291 let height = shape[2];
292 let width = shape[3];
293 let spatial_size = height * width;
294
295 assert_eq!(
297 channels, self.num_features,
298 "BatchNorm2d: expected {} channels, got {}",
299 self.num_features, channels
300 );
301
302 let input_vec = input_data.to_vec();
303 let weight_vec = self.weight.data().to_vec();
304 let bias_vec = self.bias.data().to_vec();
305
306 let is_training = self.training.load(Ordering::Relaxed);
307
308 let mut means = vec![0.0f32; channels];
309 let mut vars = vec![0.0f32; channels];
310
311 if is_training {
312 for c in 0..channels {
313 let mut sum = 0.0f32;
314 for b in 0..batch_size {
315 for h in 0..height {
316 for w in 0..width {
317 let idx =
318 b * channels * spatial_size + c * spatial_size + h * width + w;
319 sum += input_vec[idx];
320 }
321 }
322 }
323 means[c] = sum / (batch_size * spatial_size) as f32;
324
325 let mut var_sum = 0.0f32;
326 for b in 0..batch_size {
327 for h in 0..height {
328 for w in 0..width {
329 let idx =
330 b * channels * spatial_size + c * spatial_size + h * width + w;
331 let diff = input_vec[idx] - means[c];
332 var_sum += diff * diff;
333 }
334 }
335 }
336 vars[c] = var_sum / (batch_size * spatial_size) as f32;
337 }
338
339 let mut running_mean = self.running_mean.write();
341 let mut running_var = self.running_var.write();
342 let running_mean_vec = running_mean.to_vec();
343 let running_var_vec = running_var.to_vec();
344
345 let new_mean: Vec<f32> = running_mean_vec
346 .iter()
347 .zip(means.iter())
348 .map(|(&rm, &m)| (1.0 - self.momentum) * rm + self.momentum * m)
349 .collect();
350 let new_var: Vec<f32> = running_var_vec
351 .iter()
352 .zip(vars.iter())
353 .map(|(&rv, &v)| (1.0 - self.momentum) * rv + self.momentum * v)
354 .collect();
355
356 *running_mean = Tensor::from_vec(new_mean, &[channels]).unwrap();
357 *running_var = Tensor::from_vec(new_var, &[channels]).unwrap();
358 } else {
359 means = self.running_mean.read().to_vec();
360 vars = self.running_var.read().to_vec();
361 }
362
363 let mut output_vec = vec![0.0f32; input_vec.len()];
364 for b in 0..batch_size {
365 for c in 0..channels {
366 for h in 0..height {
367 for w in 0..width {
368 let idx = b * channels * spatial_size + c * spatial_size + h * width + w;
369 let normalized = (input_vec[idx] - means[c]) / (vars[c] + self.eps).sqrt();
370 output_vec[idx] = normalized * weight_vec[c] + bias_vec[c];
371 }
372 }
373 }
374 }
375
376 let output = Tensor::from_vec(output_vec, &shape).unwrap();
377 Variable::new(output, input.requires_grad())
378 }
379
380 fn parameters(&self) -> Vec<Parameter> {
381 vec![self.weight.clone(), self.bias.clone()]
382 }
383
384 fn named_parameters(&self) -> HashMap<String, Parameter> {
385 let mut params = HashMap::new();
386 params.insert("weight".to_string(), self.weight.clone());
387 params.insert("bias".to_string(), self.bias.clone());
388 params
389 }
390
391 fn set_training(&mut self, training: bool) {
392 self.training.store(training, Ordering::Relaxed);
393 }
394
395 fn is_training(&self) -> bool {
396 self.training.load(Ordering::Relaxed)
397 }
398
399 fn name(&self) -> &'static str {
400 "BatchNorm2d"
401 }
402}
403
404pub struct LayerNorm {
414 pub weight: Parameter,
416 pub bias: Parameter,
418 normalized_shape: Vec<usize>,
420 eps: f32,
422}
423
424impl LayerNorm {
425 pub fn new(normalized_shape: Vec<usize>) -> Self {
427 Self::with_eps(normalized_shape, 1e-5)
428 }
429
430 pub fn single(size: usize) -> Self {
432 Self::new(vec![size])
433 }
434
435 pub fn with_eps(normalized_shape: Vec<usize>, eps: f32) -> Self {
437 let numel: usize = normalized_shape.iter().product();
438 Self {
439 weight: Parameter::named("weight", ones(&[numel]), true),
440 bias: Parameter::named("bias", zeros(&[numel]), true),
441 normalized_shape,
442 eps,
443 }
444 }
445}
446
447impl Module for LayerNorm {
448 fn forward(&self, input: &Variable) -> Variable {
449 let input_data = input.data();
450 let shape = input_data.shape().to_vec();
451 let input_vec = input_data.to_vec();
452
453 let weight_vec = self.weight.data().to_vec();
454 let bias_vec = self.bias.data().to_vec();
455
456 let norm_size: usize = self.normalized_shape.iter().product();
458 let batch_size = input_vec.len() / norm_size;
459
460 let mut output_vec = vec![0.0f32; input_vec.len()];
461
462 for b in 0..batch_size {
463 let start = b * norm_size;
464 let end = start + norm_size;
465 let slice = &input_vec[start..end];
466
467 let mean: f32 = slice.iter().sum::<f32>() / norm_size as f32;
469
470 let var: f32 = slice.iter().map(|x| (x - mean).powi(2)).sum::<f32>() / norm_size as f32;
472
473 for i in 0..norm_size {
475 let normalized = (slice[i] - mean) / (var + self.eps).sqrt();
476 output_vec[start + i] = normalized * weight_vec[i] + bias_vec[i];
477 }
478 }
479
480 let output = Tensor::from_vec(output_vec, &shape).unwrap();
481 Variable::new(output, input.requires_grad())
482 }
483
484 fn parameters(&self) -> Vec<Parameter> {
485 vec![self.weight.clone(), self.bias.clone()]
486 }
487
488 fn named_parameters(&self) -> HashMap<String, Parameter> {
489 let mut params = HashMap::new();
490 params.insert("weight".to_string(), self.weight.clone());
491 params.insert("bias".to_string(), self.bias.clone());
492 params
493 }
494
495 fn name(&self) -> &'static str {
496 "LayerNorm"
497 }
498}
499
500pub struct GroupNorm {
513 pub weight: Parameter,
515 pub bias: Parameter,
517 num_groups: usize,
519 num_channels: usize,
521 eps: f32,
523 affine: bool,
525}
526
527impl GroupNorm {
528 pub fn new(num_groups: usize, num_channels: usize) -> Self {
534 Self::with_options(num_groups, num_channels, 1e-5, true)
535 }
536
537 pub fn with_options(num_groups: usize, num_channels: usize, eps: f32, affine: bool) -> Self {
539 assert!(
540 num_channels % num_groups == 0,
541 "num_channels ({}) must be divisible by num_groups ({})",
542 num_channels, num_groups
543 );
544
545 Self {
546 weight: Parameter::named("weight", ones(&[num_channels]), affine),
547 bias: Parameter::named("bias", zeros(&[num_channels]), affine),
548 num_groups,
549 num_channels,
550 eps,
551 affine,
552 }
553 }
554}
555
556impl Module for GroupNorm {
557 fn forward(&self, input: &Variable) -> Variable {
558 let input_data = input.data();
559 let shape = input_data.shape().to_vec();
560 let batch_size = shape[0];
561 let channels = shape[1];
562 let spatial_size: usize = shape[2..].iter().product();
563
564 assert_eq!(
565 channels, self.num_channels,
566 "GroupNorm: expected {} channels, got {}",
567 self.num_channels, channels
568 );
569
570 let input_vec = input_data.to_vec();
571 let channels_per_group = channels / self.num_groups;
572
573 let mut output_vec = vec![0.0f32; input_vec.len()];
574
575 for b in 0..batch_size {
576 for g in 0..self.num_groups {
577 let mut sum = 0.0f32;
579 let group_size = channels_per_group * spatial_size;
580
581 for c in 0..channels_per_group {
582 let channel_idx = g * channels_per_group + c;
583 for s in 0..spatial_size {
584 let idx = b * channels * spatial_size + channel_idx * spatial_size + s;
585 sum += input_vec[idx];
586 }
587 }
588 let mean = sum / group_size as f32;
589
590 let mut var_sum = 0.0f32;
591 for c in 0..channels_per_group {
592 let channel_idx = g * channels_per_group + c;
593 for s in 0..spatial_size {
594 let idx = b * channels * spatial_size + channel_idx * spatial_size + s;
595 let diff = input_vec[idx] - mean;
596 var_sum += diff * diff;
597 }
598 }
599 let var = var_sum / group_size as f32;
600
601 let std_inv = 1.0 / (var + self.eps).sqrt();
603 for c in 0..channels_per_group {
604 let channel_idx = g * channels_per_group + c;
605 let weight = if self.affine {
606 self.weight.data().to_vec()[channel_idx]
607 } else {
608 1.0
609 };
610 let bias = if self.affine {
611 self.bias.data().to_vec()[channel_idx]
612 } else {
613 0.0
614 };
615
616 for s in 0..spatial_size {
617 let idx = b * channels * spatial_size + channel_idx * spatial_size + s;
618 let normalized = (input_vec[idx] - mean) * std_inv;
619 output_vec[idx] = normalized * weight + bias;
620 }
621 }
622 }
623 }
624
625 let output = Tensor::from_vec(output_vec, &shape).unwrap();
626 Variable::new(output, input.requires_grad())
627 }
628
629 fn parameters(&self) -> Vec<Parameter> {
630 if self.affine {
631 vec![self.weight.clone(), self.bias.clone()]
632 } else {
633 vec![]
634 }
635 }
636
637 fn named_parameters(&self) -> HashMap<String, Parameter> {
638 if self.affine {
639 let mut params = HashMap::new();
640 params.insert("weight".to_string(), self.weight.clone());
641 params.insert("bias".to_string(), self.bias.clone());
642 params
643 } else {
644 HashMap::new()
645 }
646 }
647
648 fn name(&self) -> &'static str {
649 "GroupNorm"
650 }
651}
652
653pub struct InstanceNorm2d {
666 pub weight: Parameter,
668 pub bias: Parameter,
670 num_features: usize,
672 eps: f32,
674 affine: bool,
676}
677
678impl InstanceNorm2d {
679 pub fn new(num_features: usize) -> Self {
681 Self::with_options(num_features, 1e-5, false)
682 }
683
684 pub fn with_affine(num_features: usize) -> Self {
686 Self::with_options(num_features, 1e-5, true)
687 }
688
689 pub fn with_options(num_features: usize, eps: f32, affine: bool) -> Self {
691 Self {
692 weight: Parameter::named("weight", ones(&[num_features]), affine),
693 bias: Parameter::named("bias", zeros(&[num_features]), affine),
694 num_features,
695 eps,
696 affine,
697 }
698 }
699}
700
701impl Module for InstanceNorm2d {
702 fn forward(&self, input: &Variable) -> Variable {
703 let input_data = input.data();
704 let shape = input_data.shape().to_vec();
705
706 assert!(shape.len() == 4, "InstanceNorm2d expects 4D input (N, C, H, W)");
707
708 let batch_size = shape[0];
709 let channels = shape[1];
710 let height = shape[2];
711 let width = shape[3];
712 let spatial_size = height * width;
713
714 assert_eq!(
715 channels, self.num_features,
716 "InstanceNorm2d: expected {} channels, got {}",
717 self.num_features, channels
718 );
719
720 let input_vec = input_data.to_vec();
721 let mut output_vec = vec![0.0f32; input_vec.len()];
722
723 for b in 0..batch_size {
724 for c in 0..channels {
725 let mut sum = 0.0f32;
727 for s in 0..spatial_size {
728 let idx = b * channels * spatial_size + c * spatial_size + s;
729 sum += input_vec[idx];
730 }
731 let mean = sum / spatial_size as f32;
732
733 let mut var_sum = 0.0f32;
735 for s in 0..spatial_size {
736 let idx = b * channels * spatial_size + c * spatial_size + s;
737 let diff = input_vec[idx] - mean;
738 var_sum += diff * diff;
739 }
740 let var = var_sum / spatial_size as f32;
741
742 let std_inv = 1.0 / (var + self.eps).sqrt();
744 let weight = if self.affine {
745 self.weight.data().to_vec()[c]
746 } else {
747 1.0
748 };
749 let bias = if self.affine {
750 self.bias.data().to_vec()[c]
751 } else {
752 0.0
753 };
754
755 for s in 0..spatial_size {
756 let idx = b * channels * spatial_size + c * spatial_size + s;
757 let normalized = (input_vec[idx] - mean) * std_inv;
758 output_vec[idx] = normalized * weight + bias;
759 }
760 }
761 }
762
763 let output = Tensor::from_vec(output_vec, &shape).unwrap();
764 Variable::new(output, input.requires_grad())
765 }
766
767 fn parameters(&self) -> Vec<Parameter> {
768 if self.affine {
769 vec![self.weight.clone(), self.bias.clone()]
770 } else {
771 vec![]
772 }
773 }
774
775 fn named_parameters(&self) -> HashMap<String, Parameter> {
776 if self.affine {
777 let mut params = HashMap::new();
778 params.insert("weight".to_string(), self.weight.clone());
779 params.insert("bias".to_string(), self.bias.clone());
780 params
781 } else {
782 HashMap::new()
783 }
784 }
785
786 fn name(&self) -> &'static str {
787 "InstanceNorm2d"
788 }
789}
790
791#[cfg(test)]
796mod tests {
797 use super::*;
798
799 #[test]
800 fn test_batchnorm1d() {
801 let bn = BatchNorm1d::new(3);
802 let input = Variable::new(
803 Tensor::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0], &[2, 3]).unwrap(),
804 false,
805 );
806 let output = bn.forward(&input);
807 assert_eq!(output.shape(), vec![2, 3]);
808 }
809
810 #[test]
811 fn test_batchnorm2d() {
812 let bn = BatchNorm2d::new(2);
813 let input = Variable::new(
814 Tensor::from_vec(vec![1.0; 32], &[2, 2, 2, 4]).unwrap(),
815 false,
816 );
817 let output = bn.forward(&input);
818 assert_eq!(output.shape(), vec![2, 2, 2, 4]);
819 }
820
821 #[test]
822 fn test_layernorm() {
823 let ln = LayerNorm::single(4);
824 let input = Variable::new(
825 Tensor::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], &[2, 4]).unwrap(),
826 false,
827 );
828 let output = ln.forward(&input);
829 assert_eq!(output.shape(), vec![2, 4]);
830 }
831
832 #[test]
833 fn test_batchnorm_parameters() {
834 let bn = BatchNorm1d::new(10);
835 assert_eq!(bn.parameters().len(), 2);
836 assert_eq!(bn.num_parameters(), 20); }
838
839 #[test]
840 fn test_groupnorm() {
841 let gn = GroupNorm::new(2, 4); let input = Variable::new(
843 Tensor::from_vec(vec![1.0; 32], &[2, 4, 2, 2]).unwrap(),
844 false,
845 );
846 let output = gn.forward(&input);
847 assert_eq!(output.shape(), vec![2, 4, 2, 2]);
848 }
849
850 #[test]
851 fn test_groupnorm_normalization() {
852 let gn = GroupNorm::with_options(2, 4, 1e-5, false); let input = Variable::new(
854 Tensor::from_vec(
855 vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0],
856 &[1, 4, 1, 2]
857 ).unwrap(),
858 false,
859 );
860 let output = gn.forward(&input);
861 let out_vec = output.data().to_vec();
863 let group1_mean: f32 = out_vec[0..4].iter().sum::<f32>() / 4.0;
865 let group2_mean: f32 = out_vec[4..8].iter().sum::<f32>() / 4.0;
866 assert!(group1_mean.abs() < 1e-5);
867 assert!(group2_mean.abs() < 1e-5);
868 }
869
870 #[test]
871 fn test_instancenorm2d() {
872 let inn = InstanceNorm2d::new(2);
873 let input = Variable::new(
874 Tensor::from_vec(vec![1.0; 32], &[2, 2, 2, 4]).unwrap(),
875 false,
876 );
877 let output = inn.forward(&input);
878 assert_eq!(output.shape(), vec![2, 2, 2, 4]);
879 }
880
881 #[test]
882 fn test_instancenorm2d_with_affine() {
883 let inn = InstanceNorm2d::with_affine(4);
884 let input = Variable::new(
885 Tensor::from_vec(vec![1.0; 64], &[1, 4, 4, 4]).unwrap(),
886 false,
887 );
888 let output = inn.forward(&input);
889 assert_eq!(output.shape(), vec![1, 4, 4, 4]);
890 assert_eq!(inn.parameters().len(), 2);
891 }
892}