1use scirs2_core::ndarray_ext::{Array1, Array2, ArrayView1, ArrayView2, Axis};
8use scirs2_core::random::{Random, Rng};
9use sklears_core::error::{Result, SklearsError};
11use sklears_core::traits::{Fit, Predict, Transform};
12use thiserror::Error;
13
14#[derive(Error, Debug)]
15pub enum StackedAutoencoderError {
16 #[error("Invalid layer sizes: {0:?}")]
17 InvalidLayerSizes(Vec<usize>),
18 #[error("Invalid learning rate: {0}")]
19 InvalidLearningRate(f64),
20 #[error("Invalid epochs: {0}")]
21 InvalidEpochs(usize),
22 #[error("Insufficient labeled samples: need at least 1")]
23 InsufficientLabeledSamples,
24 #[error("Shape mismatch: expected {expected:?}, got {actual:?}")]
25 ShapeMismatch {
26 expected: Vec<usize>,
27 actual: Vec<usize>,
28 },
29 #[error("Training failed: {0}")]
30 TrainingFailed(String),
31 #[error("Model not trained")]
32 ModelNotTrained,
33}
34
35impl From<StackedAutoencoderError> for SklearsError {
36 fn from(err: StackedAutoencoderError) -> Self {
37 SklearsError::FitError(err.to_string())
38 }
39}
40
41#[derive(Debug, Clone)]
43pub struct AutoencoderLayer {
44 pub input_size: usize,
46 pub hidden_size: usize,
48 pub learning_rate: f64,
50 pub epochs: usize,
52 pub noise_factor: f64,
54 weights_encode: Array2<f64>,
55 bias_encode: Array1<f64>,
56 weights_decode: Array2<f64>,
57 bias_decode: Array1<f64>,
58 is_trained: bool,
59}
60
61impl AutoencoderLayer {
62 pub fn new(input_size: usize, hidden_size: usize) -> Self {
63 Self {
64 input_size,
65 hidden_size,
66 learning_rate: 0.001,
67 epochs: 100,
68 noise_factor: 0.1,
69 weights_encode: Array2::zeros((input_size, hidden_size)),
70 bias_encode: Array1::zeros(hidden_size),
71 weights_decode: Array2::zeros((hidden_size, input_size)),
72 bias_decode: Array1::zeros(input_size),
73 is_trained: false,
74 }
75 }
76
77 pub fn learning_rate(mut self, learning_rate: f64) -> Result<Self> {
78 if learning_rate <= 0.0 {
79 return Err(StackedAutoencoderError::InvalidLearningRate(learning_rate).into());
80 }
81 self.learning_rate = learning_rate;
82 Ok(self)
83 }
84
85 pub fn epochs(mut self, epochs: usize) -> Result<Self> {
86 if epochs == 0 {
87 return Err(StackedAutoencoderError::InvalidEpochs(epochs).into());
88 }
89 self.epochs = epochs;
90 Ok(self)
91 }
92
93 pub fn noise_factor(mut self, noise_factor: f64) -> Self {
94 self.noise_factor = noise_factor;
95 self
96 }
97
98 fn initialize_weights(&mut self, random_state: Option<u64>) {
99 let mut rng = match random_state {
100 Some(seed) => Random::seed(seed),
101 None => Random::seed(42),
102 };
103
104 let limit_encode = (6.0 / (self.input_size + self.hidden_size) as f64).sqrt();
106 let limit_decode = (6.0 / (self.hidden_size + self.input_size) as f64).sqrt();
107
108 let mut weights_encode = Array2::<f64>::zeros((self.input_size, self.hidden_size));
110 for i in 0..self.input_size {
111 for j in 0..self.hidden_size {
112 let u: f64 = rng.random_range(0.0..1.0);
114 weights_encode[(i, j)] = u * (2.0 * limit_encode) - limit_encode;
115 }
116 }
117 self.weights_encode = weights_encode;
118
119 let mut weights_decode = Array2::<f64>::zeros((self.hidden_size, self.input_size));
121 for i in 0..self.hidden_size {
122 for j in 0..self.input_size {
123 let u: f64 = rng.random_range(0.0..1.0);
125 weights_decode[(i, j)] = u * (2.0 * limit_decode) - limit_decode;
126 }
127 }
128 self.weights_decode = weights_decode;
129
130 self.bias_encode = Array1::zeros(self.hidden_size);
131 self.bias_decode = Array1::zeros(self.input_size);
132 }
133
134 fn sigmoid(&self, x: f64) -> f64 {
135 1.0 / (1.0 + (-x).exp())
136 }
137
138 fn sigmoid_derivative(&self, x: f64) -> f64 {
139 let s = self.sigmoid(x);
140 s * (1.0 - s)
141 }
142
143 fn add_noise<R>(&self, X: &ArrayView2<f64>, rng: &mut Random<R>) -> Array2<f64>
144 where
145 R: Rng,
146 {
147 let (nrows, ncols) = X.dim();
149 let mut noise = Array2::<f64>::zeros((nrows, ncols));
150 for i in 0..nrows {
151 for j in 0..ncols {
152 let u1: f64 = rng.random_range(0.0..1.0);
154 let u2: f64 = rng.random_range(0.0..1.0);
155 let z = (-2.0 * u1.ln()).sqrt() * (2.0 * std::f64::consts::PI * u2).cos();
156 noise[(i, j)] = z * self.noise_factor;
157 }
158 }
159 X + &noise
160 }
161
162 fn encode(&self, X: &ArrayView2<f64>) -> Array2<f64> {
163 let linear = X.dot(&self.weights_encode) + &self.bias_encode;
164 linear.mapv(|x| self.sigmoid(x))
165 }
166
167 fn decode(&self, H: &ArrayView2<f64>) -> Array2<f64> {
168 let linear = H.dot(&self.weights_decode) + &self.bias_decode;
169 linear.mapv(|x| self.sigmoid(x))
170 }
171
172 #[allow(non_snake_case)]
173 pub fn fit(&mut self, X: &ArrayView2<f64>, random_state: Option<u64>) -> Result<()> {
174 let (n_samples, n_features) = X.dim();
175
176 if n_features != self.input_size {
177 return Err(StackedAutoencoderError::ShapeMismatch {
178 expected: vec![n_samples, self.input_size],
179 actual: vec![n_samples, n_features],
180 }
181 .into());
182 }
183
184 self.initialize_weights(random_state);
185
186 let mut rng = match random_state {
187 Some(seed) => Random::seed(seed),
188 None => Random::seed(42),
189 };
190
191 for epoch in 0..self.epochs {
193 let mut total_loss = 0.0;
194
195 let X_noisy = self.add_noise(X, &mut rng);
197
198 let encoded = self.encode(&X_noisy.view());
200 let decoded = self.decode(&encoded.view());
201
202 let reconstruction_error = X - &decoded;
204 total_loss = reconstruction_error.mapv(|x| x * x).sum() / n_samples as f64;
205
206 let output_delta = &reconstruction_error * 2.0 / n_samples as f64;
209
210 let hidden_linear = X_noisy.dot(&self.weights_encode) + &self.bias_encode;
212 let hidden_delta = output_delta.dot(&self.weights_decode.t())
213 * hidden_linear.mapv(|x| self.sigmoid_derivative(x));
214
215 let dW_decode = encoded.t().dot(&output_delta);
217 let db_decode = output_delta.sum_axis(Axis(0));
218
219 let dW_encode = X_noisy.t().dot(&hidden_delta);
220 let db_encode = hidden_delta.sum_axis(Axis(0));
221
222 self.weights_decode = &self.weights_decode - self.learning_rate * dW_decode;
223 self.bias_decode = &self.bias_decode - self.learning_rate * db_decode;
224
225 self.weights_encode = &self.weights_encode - self.learning_rate * dW_encode;
226 self.bias_encode = &self.bias_encode - self.learning_rate * db_encode;
227
228 if epoch % 10 == 0 && total_loss < 1e-6 {
230 break;
231 }
232 }
233
234 self.is_trained = true;
235 Ok(())
236 }
237
238 pub fn transform(&self, X: &ArrayView2<f64>) -> Result<Array2<f64>> {
239 if !self.is_trained {
240 return Err(StackedAutoencoderError::ModelNotTrained.into());
241 }
242
243 let (_, n_features) = X.dim();
244 if n_features != self.input_size {
245 return Err(StackedAutoencoderError::ShapeMismatch {
246 expected: vec![0, self.input_size],
247 actual: vec![0, n_features],
248 }
249 .into());
250 }
251
252 Ok(self.encode(X))
253 }
254
255 pub fn reconstruct(&self, X: &ArrayView2<f64>) -> Result<Array2<f64>> {
256 if !self.is_trained {
257 return Err(StackedAutoencoderError::ModelNotTrained.into());
258 }
259
260 let encoded = self.transform(X)?;
261 Ok(self.decode(&encoded.view()))
262 }
263}
264
265#[derive(Debug, Clone)]
270pub struct StackedAutoencoders {
271 pub layer_sizes: Vec<usize>,
273 pub learning_rate: f64,
275 pub pretrain_epochs: usize,
277 pub finetune_epochs: usize,
279 pub noise_factor: f64,
281 pub random_state: Option<u64>,
283 layers: Vec<AutoencoderLayer>,
284 classifier_weights: Array2<f64>,
285 classifier_bias: Array1<f64>,
286 n_classes: usize,
287 is_pretrained: bool,
288 is_finetuned: bool,
289}
290
291impl Default for StackedAutoencoders {
292 fn default() -> Self {
293 Self {
294 layer_sizes: vec![784, 500, 200, 50],
295 learning_rate: 0.001,
296 pretrain_epochs: 100,
297 finetune_epochs: 50,
298 noise_factor: 0.1,
299 random_state: None,
300 layers: Vec::new(),
301 classifier_weights: Array2::zeros((0, 0)),
302 classifier_bias: Array1::zeros(0),
303 n_classes: 0,
304 is_pretrained: false,
305 is_finetuned: false,
306 }
307 }
308}
309
310impl StackedAutoencoders {
311 pub fn new() -> Self {
312 Self::default()
313 }
314
315 pub fn layer_sizes(mut self, layer_sizes: Vec<usize>) -> Result<Self> {
316 if layer_sizes.len() < 2 {
317 return Err(StackedAutoencoderError::InvalidLayerSizes(layer_sizes).into());
318 }
319 self.layer_sizes = layer_sizes;
320 Ok(self)
321 }
322
323 pub fn learning_rate(mut self, learning_rate: f64) -> Result<Self> {
324 if learning_rate <= 0.0 {
325 return Err(StackedAutoencoderError::InvalidLearningRate(learning_rate).into());
326 }
327 self.learning_rate = learning_rate;
328 Ok(self)
329 }
330
331 pub fn pretrain_epochs(mut self, pretrain_epochs: usize) -> Result<Self> {
332 if pretrain_epochs == 0 {
333 return Err(StackedAutoencoderError::InvalidEpochs(pretrain_epochs).into());
334 }
335 self.pretrain_epochs = pretrain_epochs;
336 Ok(self)
337 }
338
339 pub fn finetune_epochs(mut self, finetune_epochs: usize) -> Result<Self> {
340 if finetune_epochs == 0 {
341 return Err(StackedAutoencoderError::InvalidEpochs(finetune_epochs).into());
342 }
343 self.finetune_epochs = finetune_epochs;
344 Ok(self)
345 }
346
347 pub fn noise_factor(mut self, noise_factor: f64) -> Self {
348 self.noise_factor = noise_factor;
349 self
350 }
351
352 pub fn random_state(mut self, random_state: u64) -> Self {
353 self.random_state = Some(random_state);
354 self
355 }
356
357 fn initialize_layers(&mut self) {
358 self.layers.clear();
359
360 for i in 0..self.layer_sizes.len() - 1 {
361 let layer = AutoencoderLayer::new(self.layer_sizes[i], self.layer_sizes[i + 1])
362 .learning_rate(self.learning_rate)
363 .unwrap()
364 .epochs(self.pretrain_epochs)
365 .unwrap()
366 .noise_factor(self.noise_factor);
367 self.layers.push(layer);
368 }
369 }
370
371 pub fn pretrain(&mut self, X: &ArrayView2<f64>) -> Result<()> {
372 let (_, n_features) = X.dim();
373
374 if self.layer_sizes.is_empty() {
376 self.layer_sizes = vec![n_features, n_features / 2, n_features / 4];
377 } else {
378 self.layer_sizes[0] = n_features;
379 }
380
381 self.initialize_layers();
382
383 let mut current_data = X.to_owned();
385
386 for (i, layer) in self.layers.iter_mut().enumerate() {
387 let seed = self.random_state.map(|s| s + i as u64);
388 layer.fit(¤t_data.view(), seed)?;
389
390 current_data = layer.transform(¤t_data.view())?;
392 }
393
394 self.is_pretrained = true;
395 Ok(())
396 }
397
398 fn forward_pass(&self, X: &ArrayView2<f64>) -> Result<Array2<f64>> {
399 if !self.is_pretrained {
400 return Err(StackedAutoencoderError::ModelNotTrained.into());
401 }
402
403 let mut current_data = X.to_owned();
404
405 for layer in self.layers.iter() {
406 current_data = layer.transform(¤t_data.view())?;
407 }
408
409 Ok(current_data)
410 }
411
412 fn softmax(&self, X: &ArrayView2<f64>) -> Array2<f64> {
413 let mut result = X.to_owned();
414
415 for mut row in result.rows_mut() {
416 let max_val = row.fold(f64::NEG_INFINITY, |a, &b| a.max(b));
417 for val in row.iter_mut() {
418 *val = (*val - max_val).exp();
419 }
420 let sum: f64 = row.sum();
421 for val in row.iter_mut() {
422 *val /= sum;
423 }
424 }
425
426 result
427 }
428
429 pub fn finetune(&mut self, X: &ArrayView2<f64>, y: &ArrayView1<i32>) -> Result<()> {
430 if !self.is_pretrained {
431 return Err(StackedAutoencoderError::ModelNotTrained.into());
432 }
433
434 let (n_samples, _) = X.dim();
435
436 if y.len() != n_samples {
437 return Err(StackedAutoencoderError::ShapeMismatch {
438 expected: vec![n_samples],
439 actual: vec![y.len()],
440 }
441 .into());
442 }
443
444 let labeled_mask: Vec<bool> = y.iter().map(|&label| label >= 0).collect();
446 let n_labeled = labeled_mask.iter().filter(|&&x| x).count();
447
448 if n_labeled == 0 {
449 return Err(StackedAutoencoderError::InsufficientLabeledSamples.into());
450 }
451
452 let mut classes: Vec<i32> = y.iter().filter(|&&label| label >= 0).cloned().collect();
454 classes.sort_unstable();
455 classes.dedup();
456 self.n_classes = classes.len();
457
458 let hidden_size = self.layer_sizes.last().unwrap();
460 let mut rng = match self.random_state {
461 Some(seed) => Random::seed(seed),
462 None => Random::seed(42),
463 };
464
465 let limit = (6.0 / (hidden_size + self.n_classes) as f64).sqrt();
466
467 let mut classifier_weights = Array2::<f64>::zeros((*hidden_size, self.n_classes));
469 for i in 0..*hidden_size {
470 for j in 0..self.n_classes {
471 let u: f64 = rng.random_range(0.0..1.0);
473 classifier_weights[(i, j)] = u * (2.0 * limit) - limit;
474 }
475 }
476 self.classifier_weights = classifier_weights;
477 self.classifier_bias = Array1::zeros(self.n_classes);
478
479 for _epoch in 0..self.finetune_epochs {
481 let features = self.forward_pass(X)?;
483
484 let logits = features.dot(&self.classifier_weights) + &self.classifier_bias;
486 let probabilities = self.softmax(&logits.view());
487
488 let mut classifier_weights_grad = Array2::zeros(self.classifier_weights.raw_dim());
490 let mut classifier_bias_grad = Array1::zeros(self.classifier_bias.len());
491
492 for i in 0..n_samples {
493 if labeled_mask[i] {
494 let true_class = y[i] as usize;
495
496 for j in 0..self.n_classes {
498 let target = if j == true_class { 1.0 } else { 0.0 };
499 let error = probabilities[[i, j]] - target;
500
501 for k in 0..*hidden_size {
503 classifier_weights_grad[[k, j]] += features[[i, k]] * error;
504 }
505 classifier_bias_grad[j] += error;
506 }
507 }
508 }
509
510 classifier_weights_grad /= n_labeled as f64;
512 classifier_bias_grad /= n_labeled as f64;
513
514 self.classifier_weights =
515 &self.classifier_weights - self.learning_rate * classifier_weights_grad;
516 self.classifier_bias =
517 &self.classifier_bias - self.learning_rate * classifier_bias_grad;
518 }
519
520 self.is_finetuned = true;
521 Ok(())
522 }
523
524 pub fn transform(&self, X: &ArrayView2<f64>) -> Result<Array2<f64>> {
525 self.forward_pass(X)
526 }
527
528 pub fn predict_proba(&self, X: &ArrayView2<f64>) -> Result<Array2<f64>> {
529 if !self.is_finetuned {
530 return Err(StackedAutoencoderError::ModelNotTrained.into());
531 }
532
533 let features = self.forward_pass(X)?;
534 let logits = features.dot(&self.classifier_weights) + &self.classifier_bias;
535 Ok(self.softmax(&logits.view()))
536 }
537
538 pub fn predict(&self, X: &ArrayView2<f64>) -> Result<Array1<i32>> {
539 let probabilities = self.predict_proba(X)?;
540 let predictions = probabilities.map_axis(Axis(1), |row| {
541 row.iter()
542 .enumerate()
543 .max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap())
544 .map(|(idx, _)| idx as i32)
545 .unwrap()
546 });
547 Ok(predictions)
548 }
549}
550
551#[derive(Debug, Clone)]
552pub struct FittedStackedAutoencoders {
553 model: StackedAutoencoders,
554}
555
556impl Fit<ArrayView2<'_, f64>, ArrayView1<'_, i32>, FittedStackedAutoencoders>
557 for StackedAutoencoders
558{
559 type Fitted = FittedStackedAutoencoders;
560
561 fn fit(
562 mut self,
563 X: &ArrayView2<'_, f64>,
564 y: &ArrayView1<'_, i32>,
565 ) -> Result<FittedStackedAutoencoders> {
566 self.pretrain(X)?;
568
569 self.finetune(X, y)?;
571
572 Ok(FittedStackedAutoencoders { model: self })
573 }
574}
575
576impl Predict<ArrayView2<'_, f64>, Array1<i32>> for FittedStackedAutoencoders {
577 fn predict(&self, X: &ArrayView2<'_, f64>) -> Result<Array1<i32>> {
578 self.model.predict(X)
579 }
580}
581
582impl Transform<ArrayView2<'_, f64>, Array2<f64>> for FittedStackedAutoencoders {
583 fn transform(&self, X: &ArrayView2<'_, f64>) -> Result<Array2<f64>> {
584 self.model.transform(X)
585 }
586}
587
588#[allow(non_snake_case)]
589#[cfg(test)]
590mod tests {
591 use super::*;
592 use approx::assert_abs_diff_eq;
593 use scirs2_core::array;
594
595 #[test]
596 fn test_autoencoder_layer_creation() {
597 let layer = AutoencoderLayer::new(10, 5);
598 assert_eq!(layer.input_size, 10);
599 assert_eq!(layer.hidden_size, 5);
600 assert_eq!(layer.learning_rate, 0.001);
601 assert_eq!(layer.epochs, 100);
602 }
603
604 #[test]
605 #[allow(non_snake_case)]
606 fn test_autoencoder_layer_fit_transform() {
607 let mut layer = AutoencoderLayer::new(4, 2)
608 .learning_rate(0.01)
609 .unwrap()
610 .epochs(50)
611 .unwrap();
612
613 let X = array![
614 [1.0, 0.0, 1.0, 0.0],
615 [0.0, 1.0, 0.0, 1.0],
616 [1.0, 1.0, 0.0, 0.0],
617 [0.0, 0.0, 1.0, 1.0]
618 ];
619
620 layer.fit(&X.view(), Some(42)).unwrap();
621 assert!(layer.is_trained);
622
623 let encoded = layer.transform(&X.view()).unwrap();
624 assert_eq!(encoded.dim(), (4, 2));
625
626 let reconstructed = layer.reconstruct(&X.view()).unwrap();
627 assert_eq!(reconstructed.dim(), (4, 4));
628 }
629
630 #[test]
631 fn test_stacked_autoencoders_creation() {
632 let sae = StackedAutoencoders::new()
633 .layer_sizes(vec![4, 3, 2])
634 .unwrap()
635 .learning_rate(0.01)
636 .unwrap()
637 .pretrain_epochs(10)
638 .unwrap()
639 .finetune_epochs(5)
640 .unwrap();
641
642 assert_eq!(sae.layer_sizes, vec![4, 3, 2]);
643 assert_eq!(sae.learning_rate, 0.01);
644 assert_eq!(sae.pretrain_epochs, 10);
645 assert_eq!(sae.finetune_epochs, 5);
646 }
647
648 #[test]
649 #[allow(non_snake_case)]
650 fn test_stacked_autoencoders_pretrain() {
651 let mut sae = StackedAutoencoders::new()
652 .layer_sizes(vec![4, 3, 2])
653 .unwrap()
654 .pretrain_epochs(5)
655 .unwrap()
656 .random_state(42);
657
658 let X = array![
659 [1.0, 0.0, 1.0, 0.0],
660 [0.0, 1.0, 0.0, 1.0],
661 [1.0, 1.0, 0.0, 0.0],
662 [0.0, 0.0, 1.0, 1.0]
663 ];
664
665 sae.pretrain(&X.view()).unwrap();
666 assert!(sae.is_pretrained);
667 assert_eq!(sae.layers.len(), 2); let features = sae.transform(&X.view()).unwrap();
670 assert_eq!(features.dim(), (4, 2));
671 }
672
673 #[test]
674 #[allow(non_snake_case)]
675 fn test_stacked_autoencoders_fit_predict() {
676 let sae = StackedAutoencoders::new()
677 .layer_sizes(vec![4, 3, 2])
678 .unwrap()
679 .pretrain_epochs(5)
680 .unwrap()
681 .finetune_epochs(3)
682 .unwrap()
683 .random_state(42);
684
685 let X = array![
686 [1.0, 0.0, 1.0, 0.0],
687 [0.0, 1.0, 0.0, 1.0],
688 [1.0, 1.0, 0.0, 0.0],
689 [0.0, 0.0, 1.0, 1.0],
690 [0.5, 0.5, 0.5, 0.5],
691 [0.2, 0.8, 0.3, 0.7]
692 ];
693 let y = array![0, 1, 0, 1, -1, -1]; let fitted = sae.fit(&X.view(), &y.view()).unwrap();
696
697 let predictions = fitted.predict(&X.view()).unwrap();
698 assert_eq!(predictions.len(), 6);
699
700 let probabilities = fitted.model.predict_proba(&X.view()).unwrap();
701 assert_eq!(probabilities.dim(), (6, 2));
702
703 for i in 0..6 {
705 let sum: f64 = probabilities.row(i).sum();
706 assert_abs_diff_eq!(sum, 1.0, epsilon = 1e-10);
707 }
708 }
709
710 #[test]
711 fn test_stacked_autoencoders_invalid_parameters() {
712 assert!(StackedAutoencoders::new().layer_sizes(vec![]).is_err());
713 assert!(StackedAutoencoders::new().layer_sizes(vec![10]).is_err());
714 assert!(StackedAutoencoders::new().learning_rate(0.0).is_err());
715 assert!(StackedAutoencoders::new().learning_rate(-0.1).is_err());
716 assert!(StackedAutoencoders::new().pretrain_epochs(0).is_err());
717 assert!(StackedAutoencoders::new().finetune_epochs(0).is_err());
718 }
719
720 #[test]
721 #[allow(non_snake_case)]
722 fn test_stacked_autoencoders_insufficient_labeled_samples() {
723 let sae = StackedAutoencoders::new()
724 .layer_sizes(vec![4, 2])
725 .unwrap()
726 .pretrain_epochs(2)
727 .unwrap()
728 .finetune_epochs(2)
729 .unwrap();
730
731 let X = array![[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]];
732 let y = array![-1, -1]; let result = sae.fit(&X.view(), &y.view());
735 assert!(result.is_err());
736 }
737
738 #[test]
739 #[allow(non_snake_case)]
740 fn test_stacked_autoencoders_transform() {
741 let sae = StackedAutoencoders::new()
742 .layer_sizes(vec![4, 2])
743 .unwrap()
744 .pretrain_epochs(3)
745 .unwrap()
746 .finetune_epochs(2)
747 .unwrap()
748 .random_state(42);
749
750 let X = array![
751 [1.0, 0.0, 1.0, 0.0],
752 [0.0, 1.0, 0.0, 1.0],
753 [1.0, 1.0, 0.0, 0.0]
754 ];
755 let y = array![0, 1, 0];
756
757 let fitted = sae.fit(&X.view(), &y.view()).unwrap();
758 let features = fitted.transform(&X.view()).unwrap();
759
760 assert_eq!(features.dim(), (3, 2));
761
762 for value in features.iter() {
764 assert!(*value >= 0.0 && *value <= 1.0);
765 }
766 }
767}