Skip to main content

pc_rl_core/
pc_actor.rs

1// Author: Julian Bolivar
2// Version: 1.0.0
3// Date: 2026-03-25
4
5//! Predictive Coding Actor Network.
6//!
7//! Implements an actor that uses iterative top-down/bottom-up predictive coding
8//! inference loops instead of standard feedforward passes. The prediction error
9//! (surprise score) drives learning rate modulation in the actor-critic agent.
10
11use rand::Rng;
12use serde::{Deserialize, Serialize};
13
14use crate::activation::Activation;
15use crate::error::PcError;
16use crate::layer::{Layer, LayerDef};
17use crate::linalg::cpu::CpuLinAlg;
18use crate::linalg::LinAlg;
19
20/// Configuration for the predictive coding actor network.
21///
22/// # Examples
23///
24/// ```
25/// use pc_rl_core::activation::Activation;
26/// use pc_rl_core::layer::LayerDef;
27/// use pc_rl_core::pc_actor::PcActorConfig;
28///
29/// let config = PcActorConfig {
30///     input_size: 9,
31///     hidden_layers: vec![LayerDef { size: 18, activation: Activation::Tanh }],
32///     output_size: 9,
33///     output_activation: Activation::Tanh,
34///     alpha: 0.1,
35///     tol: 0.01,
36///     min_steps: 1,
37///     max_steps: 20,
38///     lr_weights: 0.01,
39///     synchronous: true,
40///     temperature: 1.0,
41///     local_lambda: 1.0,
42///     residual: false,
43///     rezero_init: 0.001,
44/// };
45/// ```
46#[derive(Debug, Clone, Serialize, Deserialize)]
47pub struct PcActorConfig {
48    /// Number of input features (e.g. 9 for tic-tac-toe board).
49    pub input_size: usize,
50    /// Hidden layer topology definitions.
51    pub hidden_layers: Vec<LayerDef>,
52    /// Number of output actions.
53    pub output_size: usize,
54    /// Activation function for the output layer.
55    pub output_activation: Activation,
56    /// Inference learning rate for PC loop state updates (`h += alpha * error`).
57    /// Set to 0.0 to disable PC inference (network behaves as standard MLP).
58    /// Active regardless of `residual` setting.
59    pub alpha: f64,
60    /// Convergence threshold for RMS prediction error.
61    /// PC loop exits early when surprise < tol (after at least `min_steps`).
62    /// Active regardless of `residual` setting.
63    pub tol: f64,
64    /// Minimum PC inference steps before convergence check is allowed.
65    /// Active regardless of `residual` setting.
66    pub min_steps: usize,
67    /// Maximum PC inference steps per action.
68    /// Active regardless of `residual` setting.
69    pub max_steps: usize,
70    /// Base learning rate for weight updates.
71    pub lr_weights: f64,
72    /// If true, use synchronous snapshot mode; otherwise in-place.
73    pub synchronous: bool,
74    /// Softmax temperature for action selection.
75    pub temperature: f64,
76    /// Blend factor for hidden layer weight updates, range `[0.0, 1.0]`.
77    ///
78    /// Controls how hidden layers combine two gradient signals:
79    /// `delta = lambda * backprop_grad + (1 - lambda) * pc_prediction_error`
80    ///
81    /// - `1.0` — Pure backprop: reward signal propagated from output (default).
82    /// - `0.0` — Pure local PC: prediction errors from inference loop
83    ///   used as gradients (Millidge et al. 2022). No vanishing gradient
84    ///   but no reward signal reaches hidden layers.
85    /// - `0.0 < lambda < 1.0` — Hybrid: reward-aware backprop regularized
86    ///   by local PC consistency errors.
87    ///
88    /// The output layer always uses standard backprop regardless of this value.
89    #[serde(default = "default_local_lambda")]
90    pub local_lambda: f64,
91    /// Enable residual skip connections between same-dimension hidden layers.
92    /// When false, `rezero_init` is ignored. When true, all hidden layers
93    /// must have the same size, and skip connections with learnable ReZero
94    /// scaling are added between consecutive hidden layers (not the first,
95    /// since input_size typically differs from hidden_size).
96    #[serde(default)]
97    pub residual: bool,
98    /// Initial value for ReZero scaling factors on residual connections.
99    /// Only used when `residual = true`. Controls initial contribution of
100    /// the nonlinear component: `h[i] = rezero_init * tanh(...) + h[i-1]`.
101    ///
102    /// - `0.001` — Near-identity start (ReZero: network learns depth gradually)
103    /// - `1.0` — Standard ResNet residual (full contribution from start)
104    ///
105    /// Ignored when `residual = false`.
106    #[serde(default = "default_rezero_init")]
107    pub rezero_init: f64,
108}
109
110/// Default rezero_init: 0.001 (near-identity at start).
111fn default_rezero_init() -> f64 {
112    0.001
113}
114
115/// Default local_lambda: 1.0 (pure backprop).
116fn default_local_lambda() -> f64 {
117    1.0
118}
119
120/// Result of the predictive coding inference loop.
121///
122/// Contains converged output logits, hidden state representations,
123/// and diagnostic information about the inference process.
124///
125/// Generic over a [`LinAlg`] backend `L`. Defaults to [`CpuLinAlg`].
126#[derive(Debug, Clone)]
127pub struct InferResult<L: LinAlg = CpuLinAlg> {
128    /// Converged output logits.
129    pub y_conv: L::Vector,
130    /// All hidden states concatenated (fed to critic).
131    pub latent_concat: L::Vector,
132    /// Per-layer hidden state activations.
133    pub hidden_states: Vec<L::Vector>,
134    /// Per-layer prediction errors from the last PC inference step.
135    /// Ordered from top hidden layer to bottom (reverse layer order).
136    pub prediction_errors: Vec<L::Vector>,
137    /// RMS prediction error across layers.
138    pub surprise_score: f64,
139    /// Number of inference steps performed.
140    pub steps_used: usize,
141    /// Whether the inference loop converged within tolerance.
142    pub converged: bool,
143    /// Per-layer tanh components for residual layers.
144    /// `None` for non-skip layers, `Some(tanh_out)` for skip-eligible layers.
145    /// Needed for correct backward pass (derivative on tanh_out, not full h\[i\]).
146    pub tanh_components: Vec<Option<L::Vector>>,
147}
148
149/// Action selection mode.
150#[derive(Debug, Clone, Copy, PartialEq, Eq)]
151pub enum SelectionMode {
152    /// Stochastic sampling from softmax distribution.
153    Training,
154    /// Deterministic argmax selection.
155    Play,
156}
157
158/// Predictive coding actor network.
159///
160/// Uses iterative top-down/bottom-up inference loops to produce
161/// stable hidden representations and output logits.
162///
163/// Generic over a [`LinAlg`] backend `L`. Defaults to [`CpuLinAlg`].
164///
165/// # Examples
166///
167/// ```
168/// use pc_rl_core::activation::Activation;
169/// use pc_rl_core::layer::LayerDef;
170/// use pc_rl_core::pc_actor::{PcActor, PcActorConfig, SelectionMode};
171/// use rand::SeedableRng;
172/// use rand::rngs::StdRng;
173///
174/// let config = PcActorConfig {
175///     input_size: 9,
176///     hidden_layers: vec![LayerDef { size: 18, activation: Activation::Tanh }],
177///     output_size: 9,
178///     output_activation: Activation::Tanh,
179///     alpha: 0.1, tol: 0.01, min_steps: 1, max_steps: 20,
180///     lr_weights: 0.01, synchronous: true, temperature: 1.0,
181///     local_lambda: 1.0,
182///     residual: false,
183///     rezero_init: 0.001,
184/// };
185/// let mut rng = StdRng::seed_from_u64(42);
186/// let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
187/// let result = actor.infer(&[0.0; 9]);
188/// assert_eq!(result.y_conv.len(), 9);
189/// ```
190#[derive(Debug)]
191pub struct PcActor<L: LinAlg = CpuLinAlg> {
192    /// Network layers: hidden_layers.len() + 1 (output layer).
193    pub(crate) layers: Vec<Layer<L>>,
194    /// Actor configuration.
195    pub config: PcActorConfig,
196    /// ReZero scaling factors for skip connections. One per skip layer (all i >= 1 when residual=true).
197    pub(crate) rezero_alpha: Vec<f64>,
198    /// Projection matrices for skip connections between layers of different sizes.
199    /// One entry per skip layer: `None` for identity (same size), `Some(Matrix)` for projection.
200    pub(crate) skip_projections: Vec<Option<L::Matrix>>,
201}
202
203impl<L: LinAlg> PcActor<L> {
204    /// Creates a new PC actor with Xavier-initialized layers.
205    ///
206    /// # Arguments
207    ///
208    /// * `config` - Actor configuration specifying topology and hyperparameters.
209    /// * `rng` - Random number generator for weight initialization.
210    ///
211    /// # Errors
212    ///
213    /// Returns `PcError::ConfigValidation` if `input_size`, `output_size`,
214    /// or `temperature` are invalid.
215    pub fn new(config: PcActorConfig, rng: &mut impl Rng) -> Result<Self, PcError> {
216        if config.input_size == 0 {
217            return Err(PcError::ConfigValidation("input_size must be > 0".into()));
218        }
219        if config.output_size == 0 {
220            return Err(PcError::ConfigValidation("output_size must be > 0".into()));
221        }
222        if config.temperature <= 0.0 {
223            return Err(PcError::ConfigValidation(format!(
224                "temperature must be positive, got {}",
225                config.temperature
226            )));
227        }
228        if !(0.0..=1.0).contains(&config.local_lambda) {
229            return Err(PcError::ConfigValidation(format!(
230                "local_lambda must be in [0.0, 1.0], got {}",
231                config.local_lambda
232            )));
233        }
234        if config.rezero_init < 0.0 {
235            return Err(PcError::ConfigValidation(format!(
236                "rezero_init must be >= 0, got {}",
237                config.rezero_init
238            )));
239        }
240        let mut layers: Vec<Layer<L>> = Vec::new();
241        let mut prev_size = config.input_size;
242
243        for def in &config.hidden_layers {
244            layers.push(Layer::<L>::new(prev_size, def.size, def.activation, rng));
245            prev_size = def.size;
246        }
247
248        // Output layer
249        layers.push(Layer::<L>::new(
250            prev_size,
251            config.output_size,
252            config.output_activation,
253            rng,
254        ));
255
256        // Compute rezero_alpha and skip_projections: one per skip layer (all i >= 1)
257        let (rezero_alpha, skip_projections) = if config.residual {
258            let mut alphas = Vec::new();
259            let mut projs = Vec::new();
260            for i in 1..config.hidden_layers.len() {
261                alphas.push(config.rezero_init);
262                if config.hidden_layers[i].size != config.hidden_layers[i - 1].size {
263                    projs.push(Some(L::xavier_mat(
264                        config.hidden_layers[i].size,
265                        config.hidden_layers[i - 1].size,
266                        rng,
267                    )));
268                } else {
269                    projs.push(None);
270                }
271            }
272            (alphas, projs)
273        } else {
274            (Vec::new(), Vec::new())
275        };
276
277        Ok(Self {
278            layers,
279            config,
280            rezero_alpha,
281            skip_projections,
282        })
283    }
284
285    /// Creates a child actor by crossing over two parent actors using CCA neuron alignment.
286    ///
287    /// Aligns hidden neurons functionally via CCA before blending weights.
288    /// Input and output layers use positional crossover (no permutation problem).
289    ///
290    /// # Arguments
291    ///
292    /// * `parent_a` - First parent (reference, typically higher fitness).
293    /// * `parent_b` - Second parent (aligned to A via CCA).
294    /// * `caches_a` - Per-layer activation matrices for parent A `[batch × neurons]`.
295    /// * `caches_b` - Per-layer activation matrices for parent B `[batch × neurons]`.
296    /// * `alpha` - Blending weight: 1.0 = all A, 0.0 = all B.
297    /// * `child_config` - Topology configuration for the child network.
298    /// * `rng` - Random number generator for Xavier initialization.
299    ///
300    /// # Errors
301    ///
302    /// Returns `PcError::ConfigValidation` if `child_config` is invalid.
303    pub fn crossover(
304        parent_a: &PcActor<L>,
305        parent_b: &PcActor<L>,
306        caches_a: &[L::Matrix],
307        caches_b: &[L::Matrix],
308        alpha: f64,
309        child_config: PcActorConfig,
310        rng: &mut impl Rng,
311    ) -> Result<Self, PcError> {
312        let num_child_hidden = child_config.hidden_layers.len();
313        if num_child_hidden == 0 {
314            return Err(PcError::ConfigValidation(
315                "crossover requires at least one hidden layer".into(),
316            ));
317        }
318        let num_parent_a_hidden = parent_a.config.hidden_layers.len();
319        let num_parent_b_hidden = parent_b.config.hidden_layers.len();
320
321        let mut layers: Vec<Layer<L>> = Vec::new();
322        // Track the previous layer's CCA permutation for column propagation
323        let mut prev_perm: Option<Vec<usize>> = None;
324
325        // ── Input layer (layer 0): CCA-aligned crossover ─────────
326        let child_h0 = &child_config.hidden_layers[0];
327
328        if parent_a.config.input_size == child_config.input_size
329            && parent_b.config.input_size == child_config.input_size
330        {
331            let cache_a_0 = caches_a.first();
332            let cache_b_0 = caches_b.first();
333            let (layer, perm) = cca_align_and_blend_layer::<L>(
334                &parent_a.layers[0],
335                &parent_b.layers[0],
336                cache_a_0,
337                cache_b_0,
338                None, // No previous perm for first layer
339                child_h0.size,
340                L::mat_cols(&parent_a.layers[0].weights),
341                child_h0.activation,
342                alpha,
343                rng,
344            )?;
345            layers.push(layer);
346            prev_perm = perm;
347        } else {
348            layers.push(Layer::<L>::new(
349                child_config.input_size,
350                child_h0.size,
351                child_h0.activation,
352                rng,
353            ));
354        }
355
356        // ── Hidden layers 1..n: CCA-aligned crossover ────────────
357        for h_idx in 1..num_child_hidden {
358            let child_def = &child_config.hidden_layers[h_idx];
359            let prev_child_size = child_config.hidden_layers[h_idx - 1].size;
360
361            let a_has = h_idx < num_parent_a_hidden;
362            let b_has = h_idx < num_parent_b_hidden;
363
364            if a_has && b_has {
365                let cache_a_h = caches_a.get(h_idx);
366                let cache_b_h = caches_b.get(h_idx);
367                let (layer, perm) = cca_align_and_blend_layer::<L>(
368                    &parent_a.layers[h_idx],
369                    &parent_b.layers[h_idx],
370                    cache_a_h,
371                    cache_b_h,
372                    prev_perm.as_deref(),
373                    child_def.size,
374                    prev_child_size,
375                    child_def.activation,
376                    alpha,
377                    rng,
378                )?;
379                layers.push(layer);
380                prev_perm = perm;
381            } else {
382                layers.push(Layer::<L>::new(
383                    prev_child_size,
384                    child_def.size,
385                    child_def.activation,
386                    rng,
387                ));
388                prev_perm = None;
389            }
390        }
391
392        // ── Output layer: positional crossover or Xavier ─────────
393        let last_child_hidden = child_config.hidden_layers.last().map(|d| d.size).unwrap();
394        let a_out = parent_a.layers.last().unwrap();
395        let b_out = parent_b.layers.last().unwrap();
396        let a_out_in = L::mat_cols(&a_out.weights);
397        let b_out_in = L::mat_cols(&b_out.weights);
398
399        if a_out_in == last_child_hidden && b_out_in == last_child_hidden {
400            // Positional crossover with column permutation from last hidden layer
401            let b_out_permuted = if let Some(ref pp) = prev_perm {
402                permute_cols::<L>(&b_out.weights, pp)
403            } else {
404                b_out.weights.clone()
405            };
406            let out_rows = child_config.output_size;
407            let mut weights = L::zeros_mat(out_rows, last_child_hidden);
408            let mut biases = L::zeros_vec(out_rows);
409            let blend_rows = out_rows
410                .min(L::mat_rows(&a_out.weights))
411                .min(L::mat_rows(&b_out_permuted));
412            for r in 0..blend_rows {
413                for c in 0..last_child_hidden {
414                    let va = L::mat_get(&a_out.weights, r, c);
415                    let vb = L::mat_get(&b_out_permuted, r, c);
416                    L::mat_set(&mut weights, r, c, alpha * va + (1.0 - alpha) * vb);
417                }
418                let ba = L::vec_get(&a_out.bias, r);
419                let bb = L::vec_get(&b_out.bias, r);
420                L::vec_set(&mut biases, r, alpha * ba + (1.0 - alpha) * bb);
421            }
422            layers.push(Layer {
423                weights,
424                bias: biases,
425                activation: child_config.output_activation,
426            });
427        } else {
428            layers.push(Layer::<L>::new(
429                last_child_hidden,
430                child_config.output_size,
431                child_config.output_activation,
432                rng,
433            ));
434        }
435
436        // ── Residual components ──────────────────────────────────
437        let (rezero_alpha, skip_projections) = if child_config.residual {
438            let mut alphas = Vec::new();
439            let mut projs = Vec::new();
440            for i in 1..num_child_hidden {
441                // ReZero alpha: blend if both parents have it
442                let a_has_rz = i - 1 < parent_a.rezero_alpha.len();
443                let b_has_rz = i - 1 < parent_b.rezero_alpha.len();
444                let rz = if a_has_rz && b_has_rz {
445                    alpha * parent_a.rezero_alpha[i - 1]
446                        + (1.0 - alpha) * parent_b.rezero_alpha[i - 1]
447                } else if a_has_rz {
448                    parent_a.rezero_alpha[i - 1]
449                } else if b_has_rz {
450                    parent_b.rezero_alpha[i - 1]
451                } else {
452                    child_config.rezero_init
453                };
454                alphas.push(rz);
455
456                // Skip projections
457                let cur_size = child_config.hidden_layers[i].size;
458                let prev_size = child_config.hidden_layers[i - 1].size;
459                if cur_size != prev_size {
460                    let a_proj = parent_a
461                        .skip_projections
462                        .get(i - 1)
463                        .and_then(|p| p.as_ref());
464                    let b_proj = parent_b
465                        .skip_projections
466                        .get(i - 1)
467                        .and_then(|p| p.as_ref());
468                    if let (Some(ap), Some(bp)) = (a_proj, b_proj) {
469                        if L::mat_rows(ap) == cur_size
470                            && L::mat_cols(ap) == prev_size
471                            && L::mat_rows(bp) == cur_size
472                            && L::mat_cols(bp) == prev_size
473                        {
474                            // Blend projections
475                            let mut proj = L::zeros_mat(cur_size, prev_size);
476                            for r in 0..cur_size {
477                                for c in 0..prev_size {
478                                    let va = L::mat_get(ap, r, c);
479                                    let vb = L::mat_get(bp, r, c);
480                                    L::mat_set(&mut proj, r, c, alpha * va + (1.0 - alpha) * vb);
481                                }
482                            }
483                            projs.push(Some(proj));
484                        } else {
485                            projs.push(Some(L::xavier_mat(cur_size, prev_size, rng)));
486                        }
487                    } else {
488                        projs.push(Some(L::xavier_mat(cur_size, prev_size, rng)));
489                    }
490                } else {
491                    projs.push(None);
492                }
493            }
494            (alphas, projs)
495        } else {
496            (Vec::new(), Vec::new())
497        };
498
499        Ok(Self {
500            layers,
501            config: child_config,
502            rezero_alpha,
503            skip_projections,
504        })
505    }
506
507    /// Returns the total size of the latent concatenation (sum of hidden layer sizes).
508    pub fn latent_size(&self) -> usize {
509        self.config.hidden_layers.iter().map(|def| def.size).sum()
510    }
511
512    /// Runs the predictive coding inference loop on the given input.
513    ///
514    /// This method is `&self` — it never modifies weights.
515    ///
516    /// # Arguments
517    ///
518    /// * `input` - Input vector of length `input_size`.
519    ///
520    /// # Panics
521    ///
522    /// Panics if `input.len() != config.input_size`.
523    /// Returns whether hidden layer `i` has a skip connection (identity or projection).
524    fn is_skip_layer(&self, i: usize) -> bool {
525        self.config.residual && i >= 1
526    }
527
528    /// Returns the rezero_alpha/skip_projections index for hidden layer `i`.
529    fn skip_alpha_index(&self, i: usize) -> Option<usize> {
530        if !self.is_skip_layer(i) {
531            return None;
532        }
533        Some(i - 1)
534    }
535
536    pub fn infer(&self, input: &[f64]) -> InferResult<L> {
537        assert_eq!(
538            input.len(),
539            self.config.input_size,
540            "input size mismatch: got {}, expected {}",
541            input.len(),
542            self.config.input_size
543        );
544
545        let input_vec = L::vec_from_slice(input);
546        let n_hidden = self.config.hidden_layers.len();
547
548        // Forward pass to initialize hidden states and output
549        let mut hidden_states: Vec<L::Vector> = Vec::with_capacity(n_hidden);
550        let mut tanh_components: Vec<Option<L::Vector>> = Vec::with_capacity(n_hidden);
551        let mut prev = input_vec.clone();
552        for (i, layer) in self.layers[..n_hidden].iter().enumerate() {
553            let tanh_out = layer.forward(&prev);
554            if let Some(alpha_idx) = self.skip_alpha_index(i) {
555                let alpha = self.rezero_alpha[alpha_idx];
556                let scaled = L::vec_scale(&tanh_out, alpha);
557                let skip_path = if let Some(ref proj) = self.skip_projections[alpha_idx] {
558                    L::mat_vec_mul(proj, &prev)
559                } else {
560                    prev.clone()
561                };
562                prev = L::vec_add(&skip_path, &scaled);
563                tanh_components.push(Some(tanh_out));
564            } else {
565                prev = tanh_out;
566                tanh_components.push(None);
567            }
568            hidden_states.push(prev.clone());
569        }
570        // Output from last hidden (or input if no hidden)
571        let last_input = if n_hidden > 0 {
572            &hidden_states[n_hidden - 1]
573        } else {
574            &input_vec
575        };
576        let mut y = self.layers[n_hidden].forward(last_input);
577
578        // PC inference loop
579        let mut steps_used = 0;
580        let mut converged = false;
581        let mut surprise_score = 0.0;
582        let mut last_errors: Vec<L::Vector> = Vec::new();
583
584        for step in 0..self.config.max_steps {
585            steps_used = step + 1;
586
587            if self.config.synchronous {
588                // Snapshot mode: freeze all states
589                let snapshot: Vec<L::Vector> = hidden_states.clone();
590                let tanh_snap: Vec<Option<L::Vector>> = tanh_components.clone();
591
592                let mut error_vecs: Vec<L::Vector> = Vec::new();
593
594                for i in (0..n_hidden).rev() {
595                    // For top-down prediction, use tanh_component of layer above
596                    // (not the full residual sum) when it is a skip layer.
597                    let state_above = if i == n_hidden - 1 {
598                        &y
599                    } else if let Some(ref tc) = tanh_snap[i + 1] {
600                        tc
601                    } else {
602                        &snapshot[i + 1]
603                    };
604
605                    // Top-down prediction targets tanh_component for skip layers
606                    let target = if let Some(ref tc) = tanh_snap[i] {
607                        tc
608                    } else {
609                        &snapshot[i]
610                    };
611
612                    let prediction = self.layers[i + 1]
613                        .transpose_forward(state_above, self.config.hidden_layers[i].activation);
614
615                    let error = L::vec_sub(&prediction, target);
616                    error_vecs.push(error.clone());
617
618                    // Update tanh_component or hidden_state
619                    let updated_target =
620                        L::vec_add(target, &L::vec_scale(&error, self.config.alpha));
621                    if let Some(alpha_idx) = self.skip_alpha_index(i) {
622                        tanh_components[i] = Some(updated_target.clone());
623                        let alpha = self.rezero_alpha[alpha_idx];
624                        let prev_h = if i > 0 {
625                            &hidden_states[i - 1]
626                        } else {
627                            &input_vec
628                        };
629                        let skip_path = if let Some(ref proj) = self.skip_projections[alpha_idx] {
630                            L::mat_vec_mul(proj, prev_h)
631                        } else {
632                            prev_h.clone()
633                        };
634                        hidden_states[i] =
635                            L::vec_add(&skip_path, &L::vec_scale(&updated_target, alpha));
636                    } else {
637                        hidden_states[i] = updated_target;
638                    }
639                }
640
641                let top_hidden = if n_hidden > 0 {
642                    &hidden_states[n_hidden - 1]
643                } else {
644                    &input_vec
645                };
646                y = self.layers[n_hidden].forward(top_hidden);
647
648                let refs: Vec<&L::Vector> = error_vecs.iter().collect();
649                surprise_score = L::rms_error(&refs);
650                last_errors = error_vecs;
651            } else {
652                // In-place mode: updates immediately visible
653                let mut error_vecs: Vec<L::Vector> = Vec::new();
654
655                for i in (0..n_hidden).rev() {
656                    // For top-down prediction, use tanh_component of layer above
657                    // (not the full residual sum) when it is a skip layer.
658                    let state_above = if i == n_hidden - 1 {
659                        &y
660                    } else if let Some(ref tc) = tanh_components[i + 1] {
661                        tc
662                    } else {
663                        &hidden_states[i + 1]
664                    };
665
666                    let target = if let Some(ref tc) = tanh_components[i] {
667                        tc.clone()
668                    } else {
669                        hidden_states[i].clone()
670                    };
671
672                    let prediction = self.layers[i + 1]
673                        .transpose_forward(state_above, self.config.hidden_layers[i].activation);
674
675                    let error = L::vec_sub(&prediction, &target);
676                    error_vecs.push(error.clone());
677
678                    let updated_target =
679                        L::vec_add(&target, &L::vec_scale(&error, self.config.alpha));
680                    if let Some(alpha_idx) = self.skip_alpha_index(i) {
681                        tanh_components[i] = Some(updated_target.clone());
682                        let alpha = self.rezero_alpha[alpha_idx];
683                        let prev_h = if i > 0 {
684                            &hidden_states[i - 1]
685                        } else {
686                            &input_vec
687                        };
688                        let skip_path = if let Some(ref proj) = self.skip_projections[alpha_idx] {
689                            L::mat_vec_mul(proj, prev_h)
690                        } else {
691                            prev_h.clone()
692                        };
693                        hidden_states[i] =
694                            L::vec_add(&skip_path, &L::vec_scale(&updated_target, alpha));
695                    } else {
696                        hidden_states[i] = updated_target;
697                    }
698                }
699
700                let top_hidden = if n_hidden > 0 {
701                    &hidden_states[n_hidden - 1]
702                } else {
703                    &input_vec
704                };
705                y = self.layers[n_hidden].forward(top_hidden);
706
707                let refs: Vec<&L::Vector> = error_vecs.iter().collect();
708                surprise_score = L::rms_error(&refs);
709                last_errors = error_vecs;
710            }
711
712            // Convergence check (alpha must be > 0 for meaningful convergence)
713            if self.config.alpha > 0.0
714                && step + 1 >= self.config.min_steps
715                && surprise_score < self.config.tol
716            {
717                converged = true;
718                break;
719            }
720        }
721
722        // Build latent_concat (uses vec_to_vec for GPU compatibility)
723        let mut latent_raw: Vec<f64> = Vec::new();
724        for h in &hidden_states {
725            latent_raw.extend_from_slice(&L::vec_to_vec(h));
726        }
727        let latent_concat = L::vec_from_slice(&latent_raw);
728
729        InferResult {
730            y_conv: y,
731            latent_concat,
732            hidden_states,
733            prediction_errors: last_errors,
734            surprise_score,
735            steps_used,
736            converged,
737            tanh_components,
738        }
739    }
740
741    /// Selects an action given converged output logits and valid actions.
742    ///
743    /// # Arguments
744    ///
745    /// * `y_conv` - Output logits from inference.
746    /// * `valid_actions` - Indices of valid actions.
747    /// * `mode` - Training (stochastic) or Play (deterministic).
748    /// * `rng` - Random number generator (used only in Training mode).
749    ///
750    /// # Panics
751    ///
752    /// Panics if `valid_actions` is empty.
753    pub fn select_action(
754        &self,
755        y_conv: &L::Vector,
756        valid_actions: &[usize],
757        mode: SelectionMode,
758        rng: &mut impl Rng,
759    ) -> usize {
760        assert!(!valid_actions.is_empty(), "valid_actions must not be empty");
761
762        // Scale logits by temperature
763        let scaled = L::vec_scale(y_conv, 1.0 / self.config.temperature);
764
765        let probs = L::softmax_masked(&scaled, valid_actions);
766
767        match mode {
768            SelectionMode::Play => L::argmax_masked(&probs, valid_actions),
769            SelectionMode::Training => L::sample_from_probs(&probs, valid_actions, rng),
770        }
771    }
772
773    /// Updates network weights using a blend of backprop and local PC error.
774    ///
775    /// The `local_lambda` config controls the blend: 1.0 = pure backprop,
776    /// 0.0 = pure local PC learning (Millidge et al. 2022), intermediate = hybrid.
777    ///
778    /// # Arguments
779    ///
780    /// * `output_delta` - Error signal at the output layer.
781    /// * `infer_result` - Result from the most recent inference.
782    /// * `input` - Original input that was fed to `infer`.
783    /// * `surprise_scale` - Multiplier on learning rate based on surprise.
784    ///
785    /// # Panics
786    ///
787    /// Panics if `input.len() != config.input_size`.
788    pub fn update_weights(
789        &mut self,
790        output_delta: &[f64],
791        infer_result: &InferResult<L>,
792        input: &[f64],
793        surprise_scale: f64,
794    ) {
795        assert_eq!(
796            input.len(),
797            self.config.input_size,
798            "input size mismatch: got {}, expected {}",
799            input.len(),
800            self.config.input_size
801        );
802
803        self.update_weights_hybrid(
804            output_delta,
805            infer_result,
806            input,
807            surprise_scale,
808            self.config.local_lambda,
809        );
810    }
811
812    /// Hybrid weight update blending backprop and local PC error signals.
813    ///
814    /// For hidden layers, the effective delta is:
815    /// `delta = lambda * backprop_delta + (1 - lambda) * pc_error`
816    ///
817    /// * `lambda = 1.0` → pure backprop (standard mode).
818    /// * `lambda = 0.0` → pure local PC learning (Millidge et al. 2022).
819    /// * `0 < lambda < 1` → hybrid blend.
820    ///
821    /// The output layer always uses standard backprop from `output_delta`.
822    fn update_weights_hybrid(
823        &mut self,
824        output_delta: &[f64],
825        infer_result: &InferResult<L>,
826        input: &[f64],
827        surprise_scale: f64,
828        lambda: f64,
829    ) {
830        let input_vec = L::vec_from_slice(input);
831        let output_delta_vec = L::vec_from_slice(output_delta);
832        let n_hidden = self.config.hidden_layers.len();
833        let n_layers = self.layers.len();
834
835        // Output layer: always standard backward
836        let output_input = if n_hidden > 0 {
837            &infer_result.hidden_states[n_hidden - 1]
838        } else {
839            &input_vec
840        };
841        let output_output = &infer_result.y_conv;
842        let mut bp_delta = self.layers[n_layers - 1].backward(
843            output_input,
844            output_output,
845            &output_delta_vec,
846            self.config.lr_weights,
847            surprise_scale,
848        );
849
850        // Hidden layers (from top to bottom)
851        for i in (0..n_hidden).rev() {
852            let layer_input = if i > 0 {
853                &infer_result.hidden_states[i - 1]
854            } else {
855                &input_vec
856            };
857
858            // Blend backprop delta with local PC error
859            let effective_delta = if (lambda - 1.0).abs() < f64::EPSILON {
860                bp_delta.clone()
861            } else if lambda.abs() < f64::EPSILON {
862                let error_idx = n_hidden - 1 - i;
863                infer_result.prediction_errors[error_idx].clone()
864            } else {
865                let error_idx = n_hidden - 1 - i;
866                let pc_error = &infer_result.prediction_errors[error_idx];
867                let bp_scaled = L::vec_scale(&bp_delta, lambda);
868                let pc_scaled = L::vec_scale(pc_error, 1.0 - lambda);
869                L::vec_add(&bp_scaled, &pc_scaled)
870            };
871
872            if let Some(alpha_idx) = self.skip_alpha_index(i) {
873                // Skip-eligible layer: use tanh_out for derivative, scale by alpha,
874                // add identity path to propagated gradient, update alpha.
875                let tanh_out = infer_result.tanh_components[i].as_ref().unwrap();
876                let alpha = self.rezero_alpha[alpha_idx];
877                let effective_lr = self.config.lr_weights * surprise_scale;
878
879                // Scale delta by rezero_alpha for the nonlinear path
880                let scaled_delta = L::vec_scale(&effective_delta, alpha);
881
882                // Backward through the layer using tanh_out (not hidden_states[i])
883                let propagated = self.layers[i].backward(
884                    layer_input,
885                    tanh_out,
886                    &scaled_delta,
887                    self.config.lr_weights,
888                    surprise_scale,
889                );
890
891                // Update rezero_alpha: dL/d(alpha) = delta · tanh_out
892                let grad_alpha: f64 = L::vec_dot(&effective_delta, tanh_out);
893                self.rezero_alpha[alpha_idx] -= effective_lr * grad_alpha;
894
895                // Propagated delta = nonlinear path + skip path (identity or projection)
896                if let Some(ref mut proj) = self.skip_projections[alpha_idx] {
897                    // Projection path: W_proj^T × delta
898                    let proj_t = L::mat_transpose(proj);
899                    let skip_delta = L::mat_vec_mul(&proj_t, &effective_delta);
900                    // Update projection: W_proj -= lr × outer(delta, layer_input)
901                    let dw_proj = L::outer_product(&effective_delta, layer_input);
902                    L::mat_scale_add(proj, &dw_proj, -effective_lr);
903                    bp_delta = L::vec_add(&propagated, &skip_delta);
904                } else {
905                    // Identity path: + delta
906                    bp_delta = L::vec_add(&propagated, &effective_delta);
907                }
908            } else {
909                // Standard layer: use hidden_states[i] as output
910                let layer_output = &infer_result.hidden_states[i];
911                bp_delta = self.layers[i].backward(
912                    layer_input,
913                    layer_output,
914                    &effective_delta,
915                    self.config.lr_weights,
916                    surprise_scale,
917                );
918            }
919        }
920    }
921
922    /// Extracts a serializable snapshot of current weights.
923    ///
924    /// Converts generic layers and skip projections to CPU-backed types.
925    pub fn to_weights(&self) -> crate::serializer::PcActorWeights {
926        let cpu_layers: Vec<Layer<CpuLinAlg>> = self
927            .layers
928            .iter()
929            .map(|layer| {
930                let rows = L::mat_rows(&layer.weights);
931                let cols = L::mat_cols(&layer.weights);
932                let mut cpu_weights = crate::matrix::Matrix::zeros(rows, cols);
933                for r in 0..rows {
934                    for c in 0..cols {
935                        cpu_weights.set(r, c, L::mat_get(&layer.weights, r, c));
936                    }
937                }
938                let bias_data = L::vec_to_vec(&layer.bias);
939                Layer {
940                    weights: cpu_weights,
941                    bias: bias_data,
942                    activation: layer.activation,
943                }
944            })
945            .collect();
946        let cpu_projs: Vec<Option<crate::matrix::Matrix>> = self
947            .skip_projections
948            .iter()
949            .map(|opt| {
950                opt.as_ref().map(|m| {
951                    let rows = L::mat_rows(m);
952                    let cols = L::mat_cols(m);
953                    let mut cpu_m = crate::matrix::Matrix::zeros(rows, cols);
954                    for r in 0..rows {
955                        for c in 0..cols {
956                            cpu_m.set(r, c, L::mat_get(m, r, c));
957                        }
958                    }
959                    cpu_m
960                })
961            })
962            .collect();
963        crate::serializer::PcActorWeights {
964            layers: cpu_layers,
965            rezero_alpha: self.rezero_alpha.clone(),
966            skip_projections: cpu_projs,
967        }
968    }
969
970    /// Restores an actor from saved weights without requiring an RNG.
971    ///
972    /// Converts CPU-backed weight snapshots to the target backend `L`.
973    pub fn from_weights(config: PcActorConfig, weights: crate::serializer::PcActorWeights) -> Self {
974        let layers: Vec<Layer<L>> = weights
975            .layers
976            .into_iter()
977            .map(|cpu_layer| {
978                let rows = cpu_layer.weights.rows;
979                let cols = cpu_layer.weights.cols;
980                let mut mat = L::zeros_mat(rows, cols);
981                for r in 0..rows {
982                    for c in 0..cols {
983                        L::mat_set(&mut mat, r, c, cpu_layer.weights.get(r, c));
984                    }
985                }
986                let bias = L::vec_from_slice(&cpu_layer.bias);
987                Layer {
988                    weights: mat,
989                    bias,
990                    activation: cpu_layer.activation,
991                }
992            })
993            .collect();
994        let skip_projections: Vec<Option<L::Matrix>> = weights
995            .skip_projections
996            .into_iter()
997            .map(|opt| {
998                opt.map(|cpu_m| {
999                    let rows = cpu_m.rows;
1000                    let cols = cpu_m.cols;
1001                    let mut mat = L::zeros_mat(rows, cols);
1002                    for r in 0..rows {
1003                        for c in 0..cols {
1004                            L::mat_set(&mut mat, r, c, cpu_m.get(r, c));
1005                        }
1006                    }
1007                    mat
1008                })
1009            })
1010            .collect();
1011        Self {
1012            layers,
1013            config,
1014            rezero_alpha: weights.rezero_alpha,
1015            skip_projections,
1016        }
1017    }
1018}
1019
1020/// Permute columns of a weight matrix according to a permutation.
1021/// `perm[i]` = source column index for destination column i.
1022pub(crate) fn permute_cols<L: LinAlg>(m: &L::Matrix, perm: &[usize]) -> L::Matrix {
1023    let rows = L::mat_rows(m);
1024    let cols = L::mat_cols(m);
1025    let perm_len = perm.len();
1026    let mut result = L::zeros_mat(rows, cols);
1027    for (dst, &src) in perm.iter().enumerate().take(cols.min(perm_len)) {
1028        if src < cols {
1029            for r in 0..rows {
1030                L::mat_set(&mut result, r, dst, L::mat_get(m, r, src));
1031            }
1032        }
1033    }
1034    // Copy remaining columns (beyond permutation length) in original order
1035    for dst in perm_len..cols {
1036        for r in 0..rows {
1037            L::mat_set(&mut result, r, dst, L::mat_get(m, r, dst));
1038        }
1039    }
1040    result
1041}
1042
1043/// Permute rows of a weight matrix according to a permutation.
1044/// `perm[i]` = source row index for destination row i.
1045pub(crate) fn permute_rows<L: LinAlg>(m: &L::Matrix, perm: &[usize], n: usize) -> L::Matrix {
1046    let cols = L::mat_cols(m);
1047    let perm_len = perm.len();
1048    let mut result = L::zeros_mat(n, cols);
1049    for (dst, &src) in perm.iter().enumerate().take(n.min(perm_len)) {
1050        if src < L::mat_rows(m) {
1051            for c in 0..cols {
1052                L::mat_set(&mut result, dst, c, L::mat_get(m, src, c));
1053            }
1054        }
1055    }
1056    // Copy remaining rows (unmatched) in original order
1057    for dst in perm_len..n {
1058        if dst < L::mat_rows(m) {
1059            for c in 0..cols {
1060                L::mat_set(&mut result, dst, c, L::mat_get(m, dst, c));
1061            }
1062        }
1063    }
1064    result
1065}
1066
1067/// Permute elements of a bias vector according to a permutation.
1068pub(crate) fn permute_vec<L: LinAlg>(v: &L::Vector, perm: &[usize], n: usize) -> L::Vector {
1069    let perm_len = perm.len();
1070    let mut result = L::zeros_vec(n);
1071    for (dst, &src) in perm.iter().enumerate().take(n.min(perm_len)) {
1072        if src < L::vec_len(v) {
1073            L::vec_set(&mut result, dst, L::vec_get(v, src));
1074        }
1075    }
1076    for dst in perm_len..n {
1077        if dst < L::vec_len(v) {
1078            L::vec_set(&mut result, dst, L::vec_get(v, dst));
1079        }
1080    }
1081    result
1082}
1083
1084/// Blend weights from two parent layers into a child layer.
1085/// Handles all 4 dimension cases (equal, child smaller, parents differ, child larger).
1086///
1087/// * `parent_a` - (weights, bias, neuron_count) for parent A.
1088/// * `parent_b` - (weights, bias, neuron_count) for parent B (already CCA-aligned).
1089/// * `child_cols` - Number of columns (input size) for child layer.
1090#[allow(clippy::too_many_arguments)]
1091pub(crate) fn blend_layer_weights<L: LinAlg>(
1092    parent_a: (&L::Matrix, &L::Vector, usize),
1093    parent_b: (&L::Matrix, &L::Vector, usize),
1094    n_child: usize,
1095    child_cols: usize,
1096    alpha: f64,
1097    rng: &mut impl Rng,
1098) -> (L::Matrix, L::Vector) {
1099    let (a_weights, a_biases, n_a) = parent_a;
1100    let (b_weights, b_biases, n_b) = parent_b;
1101    let n_min = n_a.min(n_b);
1102    let n_max = n_a.max(n_b);
1103    let a_cols = L::mat_cols(a_weights);
1104    let b_cols = L::mat_cols(b_weights);
1105    let use_cols = child_cols.min(a_cols).min(b_cols);
1106
1107    let mut weights = L::zeros_mat(n_child, child_cols);
1108    let mut biases = L::zeros_vec(n_child);
1109
1110    // Blending zone [0..min(n_min, n_child))
1111    let blend_end = n_min.min(n_child);
1112    for r in 0..blend_end {
1113        for c in 0..use_cols {
1114            let va = L::mat_get(a_weights, r, c);
1115            let vb = L::mat_get(b_weights, r, c);
1116            L::mat_set(&mut weights, r, c, alpha * va + (1.0 - alpha) * vb);
1117        }
1118        let ba = L::vec_get(a_biases, r);
1119        let bb = L::vec_get(b_biases, r);
1120        L::vec_set(&mut biases, r, alpha * ba + (1.0 - alpha) * bb);
1121    }
1122
1123    // Copy zone [n_min..min(n_max, n_child)) from the larger parent
1124    let copy_end = n_max.min(n_child);
1125    if copy_end > blend_end {
1126        let (larger_w, larger_b) = if n_a >= n_b {
1127            (a_weights, a_biases)
1128        } else {
1129            (b_weights, b_biases)
1130        };
1131        let larger_cols = L::mat_cols(larger_w);
1132        for r in blend_end..copy_end {
1133            for c in 0..child_cols.min(larger_cols) {
1134                L::mat_set(&mut weights, r, c, L::mat_get(larger_w, r, c));
1135            }
1136            L::vec_set(&mut biases, r, L::vec_get(larger_b, r));
1137        }
1138    }
1139
1140    // Xavier zone [n_max..n_child) for new neurons
1141    if n_child > n_max {
1142        let xavier = L::xavier_mat(n_child - n_max, child_cols, rng);
1143        for r in n_max..n_child {
1144            for c in 0..child_cols {
1145                L::mat_set(&mut weights, r, c, L::mat_get(&xavier, r - n_max, c));
1146            }
1147            // biases stay zero for Xavier zone
1148        }
1149    }
1150
1151    (weights, biases)
1152}
1153
1154/// CCA-aligns and blends a single hidden layer from two parents.
1155///
1156/// Handles the common pattern: CCA alignment → column permutation from
1157/// previous layer → row permutation → blend. Returns the blended layer
1158/// and the CCA permutation applied (for column propagation to the next layer).
1159///
1160/// * `prev_perm` — Permutation from the previous layer to apply to columns.
1161///   Pass `None` to skip column propagation.
1162#[allow(clippy::too_many_arguments)]
1163pub(crate) fn cca_align_and_blend_layer<L: LinAlg>(
1164    a_layer: &Layer<L>,
1165    b_layer: &Layer<L>,
1166    cache_a: Option<&L::Matrix>,
1167    cache_b: Option<&L::Matrix>,
1168    prev_perm: Option<&[usize]>,
1169    child_rows: usize,
1170    child_cols: usize,
1171    child_activation: Activation,
1172    alpha: f64,
1173    rng: &mut impl Rng,
1174) -> Result<(Layer<L>, Option<Vec<usize>>), crate::error::PcError> {
1175    let n_a = L::mat_rows(&a_layer.weights);
1176    let n_b = L::mat_rows(&b_layer.weights);
1177
1178    // CCA alignment
1179    let perm = if let (Some(ca), Some(cb)) = (cache_a, cache_b) {
1180        Some(crate::matrix::cca_neuron_alignment::<L>(ca, cb)?)
1181    } else {
1182        None
1183    };
1184
1185    // Apply previous layer's permutation to columns of parent B
1186    let b_weights_col = if let Some(pp) = prev_perm {
1187        permute_cols::<L>(&b_layer.weights, pp)
1188    } else {
1189        b_layer.weights.clone()
1190    };
1191
1192    // Apply CCA row permutation to parent B
1193    let b_weights_aligned = if let Some(ref p) = perm {
1194        permute_rows::<L>(&b_weights_col, p, n_b)
1195    } else {
1196        b_weights_col
1197    };
1198    let b_bias_aligned = if let Some(ref p) = perm {
1199        permute_vec::<L>(&b_layer.bias, p, n_b)
1200    } else {
1201        b_layer.bias.clone()
1202    };
1203
1204    let (weights, biases) = blend_layer_weights::<L>(
1205        (&a_layer.weights, &a_layer.bias, n_a),
1206        (&b_weights_aligned, &b_bias_aligned, n_b),
1207        child_rows,
1208        child_cols,
1209        alpha,
1210        rng,
1211    );
1212
1213    Ok((
1214        Layer {
1215            weights,
1216            bias: biases,
1217            activation: child_activation,
1218        },
1219        perm,
1220    ))
1221}
1222
1223#[cfg(test)]
1224mod tests {
1225    use super::*;
1226    use crate::activation::Activation;
1227    use crate::layer::LayerDef;
1228    use crate::matrix::WEIGHT_CLIP;
1229    use rand::rngs::StdRng;
1230    use rand::SeedableRng;
1231
1232    fn make_rng() -> StdRng {
1233        StdRng::seed_from_u64(42)
1234    }
1235
1236    fn default_config() -> PcActorConfig {
1237        PcActorConfig {
1238            input_size: 9,
1239            hidden_layers: vec![LayerDef {
1240                size: 18,
1241                activation: Activation::Tanh,
1242            }],
1243            output_size: 9,
1244            output_activation: Activation::Tanh,
1245            alpha: 0.1,
1246            tol: 0.01,
1247            min_steps: 1,
1248            max_steps: 20,
1249            lr_weights: 0.01,
1250            synchronous: true,
1251            temperature: 1.0,
1252            local_lambda: 1.0,
1253            residual: false,
1254            rezero_init: 0.001,
1255        }
1256    }
1257
1258    fn two_hidden_config() -> PcActorConfig {
1259        PcActorConfig {
1260            hidden_layers: vec![
1261                LayerDef {
1262                    size: 18,
1263                    activation: Activation::Tanh,
1264                },
1265                LayerDef {
1266                    size: 12,
1267                    activation: Activation::Tanh,
1268                },
1269            ],
1270            ..default_config()
1271        }
1272    }
1273
1274    // ── Inference Tests ──────────────────────────────────────────────
1275
1276    #[test]
1277    fn test_infer_converges_on_zero_board() {
1278        let mut rng = make_rng();
1279        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1280        let result = actor.infer(&[0.0; 9]);
1281        // Should complete without panic; all finite
1282        for &v in &result.y_conv {
1283            assert!(v.is_finite());
1284        }
1285    }
1286
1287    #[test]
1288    fn test_infer_steps_used_at_least_min_steps() {
1289        let mut rng = make_rng();
1290        let config = PcActorConfig {
1291            min_steps: 3,
1292            ..default_config()
1293        };
1294        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
1295        let result = actor.infer(&[0.0; 9]);
1296        assert!(result.steps_used >= 3);
1297    }
1298
1299    #[test]
1300    fn test_infer_alpha_zero_does_not_converge() {
1301        let mut rng = make_rng();
1302        let config = PcActorConfig {
1303            alpha: 0.0,
1304            ..default_config()
1305        };
1306        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
1307        let result = actor.infer(&[0.0; 9]);
1308        assert!(!result.converged);
1309        assert_eq!(result.steps_used, 20);
1310    }
1311
1312    #[test]
1313    fn test_infer_does_not_modify_weights() {
1314        let mut rng = make_rng();
1315        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1316        let weights_before: Vec<Vec<f64>> = actor
1317            .layers
1318            .iter()
1319            .map(|l| l.weights.data.clone())
1320            .collect();
1321        let _ = actor.infer(&[0.0; 9]);
1322        for (i, layer) in actor.layers.iter().enumerate() {
1323            assert_eq!(layer.weights.data, weights_before[i]);
1324        }
1325    }
1326
1327    #[test]
1328    fn test_infer_latent_size_single_hidden() {
1329        let mut rng = make_rng();
1330        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1331        let result = actor.infer(&[0.0; 9]);
1332        assert_eq!(result.latent_concat.len(), 18);
1333    }
1334
1335    #[test]
1336    fn test_infer_latent_size_two_hidden() {
1337        let mut rng = make_rng();
1338        let actor: PcActor = PcActor::new(two_hidden_config(), &mut rng).unwrap();
1339        let result = actor.infer(&[0.0; 9]);
1340        assert_eq!(result.latent_concat.len(), 30);
1341    }
1342
1343    #[test]
1344    fn test_infer_latent_size_matches_latent_size_method() {
1345        let mut rng = make_rng();
1346        let actor: PcActor = PcActor::new(two_hidden_config(), &mut rng).unwrap();
1347        let result = actor.infer(&[0.0; 9]);
1348        assert_eq!(result.latent_concat.len(), actor.latent_size());
1349    }
1350
1351    #[test]
1352    fn test_infer_y_conv_length_equals_output_size() {
1353        let mut rng = make_rng();
1354        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1355        let result = actor.infer(&[0.0; 9]);
1356        assert_eq!(result.y_conv.len(), 9);
1357    }
1358
1359    #[test]
1360    fn test_infer_hidden_states_count_matches_hidden_layers() {
1361        let mut rng = make_rng();
1362        let actor: PcActor = PcActor::new(two_hidden_config(), &mut rng).unwrap();
1363        let result = actor.infer(&[0.0; 9]);
1364        assert_eq!(result.hidden_states.len(), 2);
1365    }
1366
1367    #[test]
1368    fn test_infer_all_outputs_finite() {
1369        let mut rng = make_rng();
1370        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1371        let result = actor.infer(&[1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5]);
1372        for &v in &result.y_conv {
1373            assert!(v.is_finite());
1374        }
1375        for &v in &result.latent_concat {
1376            assert!(v.is_finite());
1377        }
1378        assert!(result.surprise_score.is_finite());
1379    }
1380
1381    #[test]
1382    fn test_infer_surprise_score_nonnegative() {
1383        let mut rng = make_rng();
1384        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1385        let result = actor.infer(&[0.0; 9]);
1386        assert!(result.surprise_score >= 0.0);
1387    }
1388
1389    #[test]
1390    fn test_infer_synchronous_and_inplace_both_converge() {
1391        let mut rng = make_rng();
1392        let sync_actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1393        let mut rng2 = make_rng();
1394        let inplace_config = PcActorConfig {
1395            synchronous: false,
1396            ..default_config()
1397        };
1398        let inplace_actor: PcActor = PcActor::new(inplace_config, &mut rng2).unwrap();
1399        let sync_result = sync_actor.infer(&[0.0; 9]);
1400        let inplace_result = inplace_actor.infer(&[0.0; 9]);
1401        // Both should complete without panic; at least one should converge or use all steps
1402        assert!(sync_result.steps_used > 0);
1403        assert!(inplace_result.steps_used > 0);
1404    }
1405
1406    #[test]
1407    fn test_infer_synchronous_produces_different_result_than_inplace() {
1408        let mut rng = make_rng();
1409        let config = PcActorConfig {
1410            hidden_layers: vec![
1411                LayerDef {
1412                    size: 18,
1413                    activation: Activation::Tanh,
1414                },
1415                LayerDef {
1416                    size: 12,
1417                    activation: Activation::Tanh,
1418                },
1419            ],
1420            alpha: 0.3,
1421            tol: 1e-15,
1422            min_steps: 1,
1423            max_steps: 3,
1424            ..default_config()
1425        };
1426        let sync_actor: PcActor = PcActor::new(config.clone(), &mut rng).unwrap();
1427        let mut rng2 = make_rng();
1428        let inplace_config = PcActorConfig {
1429            synchronous: false,
1430            ..config
1431        };
1432        let inplace_actor: PcActor = PcActor::new(inplace_config, &mut rng2).unwrap();
1433        let input = [1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
1434        let sync_result = sync_actor.infer(&input);
1435        let inplace_result = inplace_actor.infer(&input);
1436        // Different update orders should produce different hidden representations
1437        let differs = sync_result
1438            .latent_concat
1439            .iter()
1440            .zip(inplace_result.latent_concat.iter())
1441            .any(|(a, b)| (a - b).abs() > 1e-12);
1442        assert!(
1443            differs,
1444            "Synchronous and in-place should produce different results"
1445        );
1446    }
1447
1448    #[test]
1449    #[should_panic(expected = "input size")]
1450    fn test_infer_panics_wrong_input_length() {
1451        let mut rng = make_rng();
1452        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1453        let _ = actor.infer(&[0.0; 5]);
1454    }
1455
1456    // ── Action Selection Tests ───────────────────────────────────────
1457
1458    #[test]
1459    fn test_select_action_training_always_in_valid() {
1460        let mut rng = make_rng();
1461        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1462        let logits = vec![0.1, -0.2, 0.5, -0.1, 0.3, 0.0, -0.3, 0.2, 0.4];
1463        let valid = vec![0, 2, 4, 6, 8];
1464        for _ in 0..20 {
1465            let action = actor.select_action(&logits, &valid, SelectionMode::Training, &mut rng);
1466            assert!(valid.contains(&action));
1467        }
1468    }
1469
1470    #[test]
1471    fn test_select_action_play_mode_deterministic() {
1472        let mut rng1 = StdRng::seed_from_u64(1);
1473        let mut rng2 = StdRng::seed_from_u64(99);
1474        let mut rng_init = make_rng();
1475        let actor: PcActor = PcActor::new(default_config(), &mut rng_init).unwrap();
1476        let logits = vec![0.1, -0.2, 0.5, -0.1, 0.3, 0.0, -0.3, 0.2, 0.4];
1477        let valid = vec![0, 2, 4, 6, 8];
1478        let a1 = actor.select_action(&logits, &valid, SelectionMode::Play, &mut rng1);
1479        let a2 = actor.select_action(&logits, &valid, SelectionMode::Play, &mut rng2);
1480        assert_eq!(a1, a2, "Play mode should be deterministic");
1481    }
1482
1483    #[test]
1484    fn test_select_action_temperature_gt_one_more_uniform() {
1485        let mut rng = make_rng();
1486        let hot_config = PcActorConfig {
1487            temperature: 5.0,
1488            ..default_config()
1489        };
1490        let actor: PcActor = PcActor::new(hot_config, &mut rng).unwrap();
1491        // With high temperature, sampling should visit more actions
1492        let logits = vec![10.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0];
1493        let valid: Vec<usize> = (0..9).collect();
1494        let mut seen = std::collections::HashSet::new();
1495        let mut rng2 = StdRng::seed_from_u64(123);
1496        for _ in 0..100 {
1497            let a = actor.select_action(&logits, &valid, SelectionMode::Training, &mut rng2);
1498            seen.insert(a);
1499        }
1500        assert!(seen.len() > 1, "High temperature should explore more");
1501    }
1502
1503    #[test]
1504    #[should_panic]
1505    fn test_select_action_empty_valid_panics() {
1506        let mut rng = make_rng();
1507        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1508        let logits = vec![0.1; 9];
1509        let _ = actor.select_action(&logits, &[], SelectionMode::Training, &mut rng);
1510    }
1511
1512    // ── Weight Update Tests ──────────────────────────────────────────
1513
1514    #[test]
1515    fn test_update_weights_changes_first_layer() {
1516        let mut rng = make_rng();
1517        let mut actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1518        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
1519        let infer_result = actor.infer(&input);
1520        let weights_before = actor.layers[0].weights.data.clone();
1521        let delta = vec![0.1; 9];
1522        actor.update_weights(&delta, &infer_result, &input, 1.0);
1523        assert_ne!(actor.layers[0].weights.data, weights_before);
1524    }
1525
1526    #[test]
1527    fn test_update_weights_clips_all_layers() {
1528        let mut rng = make_rng();
1529        let mut actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1530        let input = vec![1.0; 9];
1531        let infer_result = actor.infer(&input);
1532        let delta = vec![1e6; 9];
1533        actor.update_weights(&delta, &infer_result, &input, 1.0);
1534        for layer in &actor.layers {
1535            for &w in &layer.weights.data {
1536                assert!(
1537                    w.abs() <= WEIGHT_CLIP + 1e-12,
1538                    "Weight {w} exceeds WEIGHT_CLIP"
1539                );
1540            }
1541        }
1542    }
1543
1544    #[test]
1545    fn test_update_weights_two_hidden_changes_both_layers() {
1546        let mut rng = make_rng();
1547        let mut actor: PcActor = PcActor::new(two_hidden_config(), &mut rng).unwrap();
1548        let input = vec![0.5; 9];
1549        let infer_result = actor.infer(&input);
1550        let w0_before = actor.layers[0].weights.data.clone();
1551        let w1_before = actor.layers[1].weights.data.clone();
1552        let delta = vec![0.1; 9];
1553        actor.update_weights(&delta, &infer_result, &input, 1.0);
1554        assert_ne!(
1555            actor.layers[0].weights.data, w0_before,
1556            "Layer 0 should change"
1557        );
1558        assert_ne!(
1559            actor.layers[1].weights.data, w1_before,
1560            "Layer 1 should change"
1561        );
1562    }
1563
1564    #[test]
1565    #[should_panic(expected = "input size")]
1566    fn test_update_weights_panics_wrong_x_size() {
1567        let mut rng = make_rng();
1568        let mut actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1569        let input = vec![0.0; 9];
1570        let infer_result = actor.infer(&input);
1571        let delta = vec![0.1; 9];
1572        actor.update_weights(&delta, &infer_result, &[0.0; 5], 1.0);
1573    }
1574
1575    // ── Zero Hidden Layers Test ─────────────────────────────────
1576
1577    #[test]
1578    fn test_infer_zero_hidden_layers_produces_finite_output() {
1579        let mut rng = make_rng();
1580        let config = PcActorConfig {
1581            hidden_layers: vec![],
1582            ..default_config()
1583        };
1584        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
1585        let result = actor.infer(&[0.5; 9]);
1586        assert_eq!(result.y_conv.len(), 9);
1587        assert!(result.y_conv.iter().all(|v| v.is_finite()));
1588        assert!(result.latent_concat.is_empty());
1589        assert!(result.hidden_states.is_empty());
1590    }
1591
1592    // ── Config Validation Tests ─────────────────────────────────
1593
1594    #[test]
1595    fn test_new_zero_input_size_returns_error() {
1596        let mut rng = make_rng();
1597        let config = PcActorConfig {
1598            input_size: 0,
1599            ..default_config()
1600        };
1601        let result: Result<PcActor, _> = PcActor::new(config, &mut rng);
1602        assert!(result.is_err());
1603        let err = result.unwrap_err();
1604        assert!(matches!(err, crate::error::PcError::ConfigValidation(_)));
1605    }
1606
1607    #[test]
1608    fn test_new_zero_output_size_returns_error() {
1609        let mut rng = make_rng();
1610        let config = PcActorConfig {
1611            output_size: 0,
1612            ..default_config()
1613        };
1614        let result: Result<PcActor, _> = PcActor::new(config, &mut rng);
1615        assert!(result.is_err());
1616    }
1617
1618    #[test]
1619    fn test_new_zero_temperature_returns_error() {
1620        let mut rng = make_rng();
1621        let config = PcActorConfig {
1622            temperature: 0.0,
1623            ..default_config()
1624        };
1625        let result: Result<PcActor, _> = PcActor::new(config, &mut rng);
1626        assert!(result.is_err());
1627    }
1628
1629    #[test]
1630    fn test_new_negative_temperature_returns_error() {
1631        let mut rng = make_rng();
1632        let config = PcActorConfig {
1633            temperature: -1.0,
1634            ..default_config()
1635        };
1636        let result: Result<PcActor, _> = PcActor::new(config, &mut rng);
1637        assert!(result.is_err());
1638    }
1639
1640    // ── Residual / ReZero Config Tests ────────────────────────
1641
1642    #[test]
1643    fn test_default_config_residual_false() {
1644        let config = default_config();
1645        assert!(!config.residual);
1646    }
1647
1648    #[test]
1649    fn test_default_config_rezero_init() {
1650        let config = default_config();
1651        assert!((config.rezero_init - 0.001).abs() < 1e-12);
1652    }
1653
1654    #[test]
1655    fn test_new_negative_rezero_init_returns_error() {
1656        let mut rng = make_rng();
1657        let config = PcActorConfig {
1658            residual: true,
1659            rezero_init: -0.1,
1660            ..default_config()
1661        };
1662        let result: Result<PcActor, _> = PcActor::new(config, &mut rng);
1663        assert!(result.is_err());
1664    }
1665
1666    #[test]
1667    fn test_residual_mixed_sizes_accepted() {
1668        let mut rng = make_rng();
1669        let config = PcActorConfig {
1670            residual: true,
1671            hidden_layers: vec![
1672                LayerDef {
1673                    size: 27,
1674                    activation: Activation::Tanh,
1675                },
1676                LayerDef {
1677                    size: 18,
1678                    activation: Activation::Tanh,
1679                },
1680            ],
1681            ..default_config()
1682        };
1683        let result: Result<PcActor, _> = PcActor::new(config, &mut rng);
1684        assert!(result.is_ok());
1685    }
1686
1687    #[test]
1688    fn test_residual_mixed_sizes_all_skip() {
1689        // [27, 27, 18]: ALL layers i>=1 get skip — identity for 27→27, projection for 27→18
1690        let mut rng = make_rng();
1691        let config = PcActorConfig {
1692            residual: true,
1693            hidden_layers: vec![
1694                LayerDef {
1695                    size: 27,
1696                    activation: Activation::Tanh,
1697                },
1698                LayerDef {
1699                    size: 27,
1700                    activation: Activation::Tanh,
1701                },
1702                LayerDef {
1703                    size: 18,
1704                    activation: Activation::Tanh,
1705                },
1706            ],
1707            ..default_config()
1708        };
1709        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
1710        // 2 skips: layer 1 (identity) + layer 2 (projection)
1711        assert_eq!(actor.rezero_alpha.len(), 2);
1712    }
1713
1714    #[test]
1715    fn test_residual_heterogeneous_has_projection() {
1716        // [27, 18]: different sizes → projection matrix created
1717        let mut rng = make_rng();
1718        let config = PcActorConfig {
1719            residual: true,
1720            hidden_layers: vec![
1721                LayerDef {
1722                    size: 27,
1723                    activation: Activation::Tanh,
1724                },
1725                LayerDef {
1726                    size: 18,
1727                    activation: Activation::Tanh,
1728                },
1729            ],
1730            ..default_config()
1731        };
1732        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
1733        assert_eq!(actor.rezero_alpha.len(), 1);
1734        assert_eq!(actor.skip_projections.len(), 1);
1735        assert!(actor.skip_projections[0].is_some());
1736        let proj = actor.skip_projections[0].as_ref().unwrap();
1737        assert_eq!(proj.rows, 18); // output dim
1738        assert_eq!(proj.cols, 27); // input dim
1739    }
1740
1741    #[test]
1742    fn test_residual_homogeneous_no_projection() {
1743        // [27, 27]: same sizes → no projection needed
1744        let mut rng = make_rng();
1745        let actor: PcActor = PcActor::new(residual_two_hidden_config(), &mut rng).unwrap();
1746        assert_eq!(actor.skip_projections.len(), 1);
1747        assert!(actor.skip_projections[0].is_none());
1748    }
1749
1750    #[test]
1751    fn test_residual_mixed_sizes_infer_finite() {
1752        let mut rng = make_rng();
1753        let config = PcActorConfig {
1754            residual: true,
1755            hidden_layers: vec![
1756                LayerDef {
1757                    size: 27,
1758                    activation: Activation::Tanh,
1759                },
1760                LayerDef {
1761                    size: 27,
1762                    activation: Activation::Tanh,
1763                },
1764                LayerDef {
1765                    size: 18,
1766                    activation: Activation::Tanh,
1767                },
1768            ],
1769            ..default_config()
1770        };
1771        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
1772        let result = actor.infer(&[0.5; 9]);
1773        for &v in &result.y_conv {
1774            assert!(v.is_finite());
1775        }
1776        assert_eq!(result.hidden_states.len(), 3);
1777        assert_eq!(result.latent_concat.len(), 27 + 27 + 18);
1778    }
1779
1780    #[test]
1781    fn test_residual_same_size_hidden_layers_accepted() {
1782        let mut rng = make_rng();
1783        let config = PcActorConfig {
1784            residual: true,
1785            hidden_layers: vec![
1786                LayerDef {
1787                    size: 27,
1788                    activation: Activation::Tanh,
1789                },
1790                LayerDef {
1791                    size: 27,
1792                    activation: Activation::Tanh,
1793                },
1794            ],
1795            ..default_config()
1796        };
1797        let result: Result<PcActor, _> = PcActor::new(config, &mut rng);
1798        assert!(result.is_ok());
1799    }
1800
1801    fn residual_two_hidden_config() -> PcActorConfig {
1802        PcActorConfig {
1803            residual: true,
1804            hidden_layers: vec![
1805                LayerDef {
1806                    size: 27,
1807                    activation: Activation::Tanh,
1808                },
1809                LayerDef {
1810                    size: 27,
1811                    activation: Activation::Tanh,
1812                },
1813            ],
1814            ..default_config()
1815        }
1816    }
1817
1818    #[test]
1819    fn test_non_residual_actor_empty_rezero_alpha() {
1820        let mut rng = make_rng();
1821        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1822        assert!(actor.rezero_alpha.is_empty());
1823    }
1824
1825    #[test]
1826    fn test_residual_two_hidden_one_rezero_alpha() {
1827        let mut rng = make_rng();
1828        let actor: PcActor = PcActor::new(residual_two_hidden_config(), &mut rng).unwrap();
1829        assert_eq!(actor.rezero_alpha.len(), 1);
1830    }
1831
1832    #[test]
1833    fn test_residual_three_hidden_two_rezero_alpha() {
1834        let mut rng = make_rng();
1835        let config = PcActorConfig {
1836            residual: true,
1837            hidden_layers: vec![
1838                LayerDef {
1839                    size: 27,
1840                    activation: Activation::Tanh,
1841                },
1842                LayerDef {
1843                    size: 27,
1844                    activation: Activation::Tanh,
1845                },
1846                LayerDef {
1847                    size: 27,
1848                    activation: Activation::Tanh,
1849                },
1850            ],
1851            ..default_config()
1852        };
1853        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
1854        assert_eq!(actor.rezero_alpha.len(), 2);
1855    }
1856
1857    #[test]
1858    fn test_rezero_alpha_initialized_to_rezero_init() {
1859        let mut rng = make_rng();
1860        let config = PcActorConfig {
1861            rezero_init: 0.005,
1862            ..residual_two_hidden_config()
1863        };
1864        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
1865        assert!((actor.rezero_alpha[0] - 0.005).abs() < 1e-12);
1866    }
1867
1868    #[test]
1869    fn test_residual_single_hidden_zero_rezero_alpha() {
1870        let mut rng = make_rng();
1871        let config = PcActorConfig {
1872            residual: true,
1873            ..default_config()
1874        };
1875        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
1876        assert!(actor.rezero_alpha.is_empty());
1877    }
1878
1879    #[test]
1880    fn test_residual_single_hidden_accepted() {
1881        let mut rng = make_rng();
1882        let config = PcActorConfig {
1883            residual: true,
1884            ..default_config()
1885        };
1886        let result: Result<PcActor, _> = PcActor::new(config, &mut rng);
1887        assert!(result.is_ok());
1888    }
1889
1890    // ── Local Learning (PC-based weight updates) Tests ──────────
1891
1892    // ── Residual Inference Tests ──────────────────────────────
1893
1894    #[test]
1895    fn test_residual_false_identical_to_non_residual() {
1896        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
1897        let mut rng1 = make_rng();
1898        let actor1: PcActor = PcActor::new(two_hidden_config(), &mut rng1).unwrap();
1899        let result1 = actor1.infer(&input);
1900
1901        let mut rng2 = make_rng();
1902        let config2 = PcActorConfig {
1903            residual: false,
1904            ..two_hidden_config()
1905        };
1906        let actor2: PcActor = PcActor::new(config2, &mut rng2).unwrap();
1907        let result2 = actor2.infer(&input);
1908
1909        for (a, b) in result1.y_conv.iter().zip(result2.y_conv.iter()) {
1910            assert!((a - b).abs() < 1e-12);
1911        }
1912    }
1913
1914    #[test]
1915    fn test_residual_rezero_zero_second_hidden_near_identity() {
1916        let mut rng = make_rng();
1917        let config = PcActorConfig {
1918            rezero_init: 0.0,
1919            alpha: 0.0,
1920            ..residual_two_hidden_config()
1921        };
1922        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
1923        let result = actor.infer(&[0.5; 9]);
1924        let h0 = &result.hidden_states[0];
1925        let h1 = &result.hidden_states[1];
1926        for (a, b) in h0.iter().zip(h1.iter()) {
1927            assert!(
1928                (a - b).abs() < 1e-12,
1929                "With rezero_init=0, h[1] should equal h[0]"
1930            );
1931        }
1932    }
1933
1934    #[test]
1935    fn test_residual_infer_all_outputs_finite() {
1936        let mut rng = make_rng();
1937        let actor: PcActor = PcActor::new(residual_two_hidden_config(), &mut rng).unwrap();
1938        let result = actor.infer(&[0.5; 9]);
1939        for &v in &result.y_conv {
1940            assert!(v.is_finite());
1941        }
1942        for &v in &result.latent_concat {
1943            assert!(v.is_finite());
1944        }
1945        assert!(result.surprise_score.is_finite());
1946    }
1947
1948    #[test]
1949    fn test_residual_latent_concat_size() {
1950        let mut rng = make_rng();
1951        let actor: PcActor = PcActor::new(residual_two_hidden_config(), &mut rng).unwrap();
1952        let result = actor.infer(&[0.5; 9]);
1953        assert_eq!(result.latent_concat.len(), 54); // 27 + 27
1954    }
1955
1956    #[test]
1957    fn test_residual_pc_loop_completes() {
1958        let mut rng = make_rng();
1959        let config = PcActorConfig {
1960            alpha: 0.03,
1961            max_steps: 5,
1962            ..residual_two_hidden_config()
1963        };
1964        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
1965        let result = actor.infer(&[0.5; 9]);
1966        assert!(result.steps_used > 0);
1967        assert!(result.steps_used <= 5);
1968    }
1969
1970    #[test]
1971    fn test_residual_hidden_states_count() {
1972        let mut rng = make_rng();
1973        let actor: PcActor = PcActor::new(residual_two_hidden_config(), &mut rng).unwrap();
1974        let result = actor.infer(&[0.5; 9]);
1975        assert_eq!(result.hidden_states.len(), 2);
1976    }
1977
1978    #[test]
1979    fn test_residual_infer_does_not_modify_weights() {
1980        let mut rng = make_rng();
1981        let actor: PcActor = PcActor::new(residual_two_hidden_config(), &mut rng).unwrap();
1982        let weights_before: Vec<Vec<f64>> = actor
1983            .layers
1984            .iter()
1985            .map(|l| l.weights.data.clone())
1986            .collect();
1987        let alpha_before = actor.rezero_alpha.clone();
1988        let _ = actor.infer(&[0.5; 9]);
1989        for (i, layer) in actor.layers.iter().enumerate() {
1990            assert_eq!(layer.weights.data, weights_before[i]);
1991        }
1992        assert_eq!(actor.rezero_alpha, alpha_before);
1993    }
1994
1995    #[test]
1996    fn test_residual_three_hidden_infer_finite() {
1997        let mut rng = make_rng();
1998        let config = PcActorConfig {
1999            residual: true,
2000            hidden_layers: vec![
2001                LayerDef {
2002                    size: 27,
2003                    activation: Activation::Tanh,
2004                },
2005                LayerDef {
2006                    size: 27,
2007                    activation: Activation::Tanh,
2008                },
2009                LayerDef {
2010                    size: 27,
2011                    activation: Activation::Tanh,
2012                },
2013            ],
2014            ..default_config()
2015        };
2016        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
2017        let result = actor.infer(&[0.5; 9]);
2018        for &v in &result.y_conv {
2019            assert!(v.is_finite());
2020        }
2021    }
2022
2023    #[test]
2024    fn test_residual_tanh_components_populated() {
2025        let mut rng = make_rng();
2026        let actor: PcActor = PcActor::new(residual_two_hidden_config(), &mut rng).unwrap();
2027        let result = actor.infer(&[0.5; 9]);
2028        assert_eq!(result.tanh_components.len(), 2);
2029        assert!(result.tanh_components[0].is_none()); // layer 0: no skip
2030        assert!(result.tanh_components[1].is_some()); // layer 1: has skip
2031        assert_eq!(result.tanh_components[1].as_ref().unwrap().len(), 27);
2032    }
2033
2034    #[test]
2035    fn test_residual_pc_prediction_uses_tanh_component_not_full_state() {
2036        // With rezero_init=1.0, h[1] = tanh_out + h[0] (significantly different
2037        // from tanh_out alone). If PC prediction uses h[1] instead of tanh_out,
2038        // the surprise score and convergence will differ.
2039        // Two runs with same weights: one with alpha=0 (no PC), one with alpha>0.
2040        // The PC loop should converge meaningfully (surprise decreases).
2041        let mut rng = make_rng();
2042        let config = PcActorConfig {
2043            rezero_init: 1.0,
2044            alpha: 0.1,
2045            max_steps: 20,
2046            tol: 0.001,
2047            min_steps: 1,
2048            ..residual_two_hidden_config()
2049        };
2050        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
2051        let result = actor.infer(&[1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5]);
2052        // With proper PC predictions, surprise should be finite and non-negative
2053        assert!(result.surprise_score.is_finite());
2054        assert!(result.surprise_score >= 0.0);
2055        // Prediction errors should all be finite
2056        for errors in &result.prediction_errors {
2057            for &e in errors {
2058                assert!(e.is_finite(), "PC prediction error not finite: {e}");
2059            }
2060        }
2061    }
2062
2063    // ── Residual Backward Tests ────────────────────────────────
2064
2065    #[test]
2066    fn test_residual_false_update_identical_to_non_residual() {
2067        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2068        let delta = vec![0.1; 9];
2069
2070        let mut rng1 = make_rng();
2071        let mut actor1: PcActor = PcActor::new(two_hidden_config(), &mut rng1).unwrap();
2072        let infer1 = actor1.infer(&input);
2073        actor1.update_weights(&delta, &infer1, &input, 1.0);
2074
2075        let mut rng2 = make_rng();
2076        let config2 = PcActorConfig {
2077            residual: false,
2078            ..two_hidden_config()
2079        };
2080        let mut actor2: PcActor = PcActor::new(config2, &mut rng2).unwrap();
2081        let infer2 = actor2.infer(&input);
2082        actor2.update_weights(&delta, &infer2, &input, 1.0);
2083
2084        for i in 0..actor1.layers.len() {
2085            assert_eq!(actor1.layers[i].weights.data, actor2.layers[i].weights.data);
2086        }
2087    }
2088
2089    #[test]
2090    fn test_residual_update_changes_all_layer_weights() {
2091        let mut rng = make_rng();
2092        let mut actor: PcActor = PcActor::new(residual_two_hidden_config(), &mut rng).unwrap();
2093        let input = vec![0.5; 9];
2094        let infer_result = actor.infer(&input);
2095        let w0 = actor.layers[0].weights.data.clone();
2096        let w1 = actor.layers[1].weights.data.clone();
2097        let w2 = actor.layers[2].weights.data.clone();
2098        actor.update_weights(&[0.1; 9], &infer_result, &input, 1.0);
2099        assert_ne!(actor.layers[0].weights.data, w0, "Layer 0 should change");
2100        assert_ne!(actor.layers[1].weights.data, w1, "Layer 1 should change");
2101        assert_ne!(
2102            actor.layers[2].weights.data, w2,
2103            "Output layer should change"
2104        );
2105    }
2106
2107    #[test]
2108    fn test_residual_update_changes_rezero_alpha() {
2109        let mut rng = make_rng();
2110        let mut actor: PcActor = PcActor::new(residual_two_hidden_config(), &mut rng).unwrap();
2111        let input = vec![0.5; 9];
2112        let infer_result = actor.infer(&input);
2113        let alpha_before = actor.rezero_alpha.clone();
2114        actor.update_weights(&[0.1; 9], &infer_result, &input, 1.0);
2115        assert_ne!(
2116            actor.rezero_alpha, alpha_before,
2117            "rezero_alpha should be updated by backprop"
2118        );
2119    }
2120
2121    #[test]
2122    fn test_residual_update_clips_weights() {
2123        let mut rng = make_rng();
2124        let mut actor: PcActor = PcActor::new(residual_two_hidden_config(), &mut rng).unwrap();
2125        let input = vec![1.0; 9];
2126        let infer_result = actor.infer(&input);
2127        actor.update_weights(&[1e6; 9], &infer_result, &input, 1.0);
2128        for layer in &actor.layers {
2129            for &w in &layer.weights.data {
2130                assert!(
2131                    w.abs() <= WEIGHT_CLIP + 1e-12,
2132                    "Weight {w} exceeds WEIGHT_CLIP"
2133                );
2134            }
2135        }
2136    }
2137
2138    #[test]
2139    fn test_residual_gradient_stronger_than_non_residual() {
2140        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2141        let delta = vec![0.1; 9];
2142
2143        // Non-residual 2 hidden layers (27, 27)
2144        let mut rng1 = make_rng();
2145        let config1 = PcActorConfig {
2146            hidden_layers: vec![
2147                LayerDef {
2148                    size: 27,
2149                    activation: Activation::Tanh,
2150                },
2151                LayerDef {
2152                    size: 27,
2153                    activation: Activation::Tanh,
2154                },
2155            ],
2156            ..default_config()
2157        };
2158        let mut actor1: PcActor = PcActor::new(config1, &mut rng1).unwrap();
2159        let w0_before1 = actor1.layers[0].weights.data.clone();
2160        let infer1 = actor1.infer(&input);
2161        actor1.update_weights(&delta, &infer1, &input, 1.0);
2162        let change1: f64 = actor1.layers[0]
2163            .weights
2164            .data
2165            .iter()
2166            .zip(w0_before1.iter())
2167            .map(|(a, b)| (a - b).abs())
2168            .sum();
2169
2170        // Residual 2 hidden layers (27, 27) with rezero_init=1.0
2171        let mut rng2 = make_rng();
2172        let config2 = PcActorConfig {
2173            rezero_init: 1.0,
2174            ..residual_two_hidden_config()
2175        };
2176        let mut actor2: PcActor = PcActor::new(config2, &mut rng2).unwrap();
2177        let w0_before2 = actor2.layers[0].weights.data.clone();
2178        let infer2 = actor2.infer(&input);
2179        actor2.update_weights(&delta, &infer2, &input, 1.0);
2180        let change2: f64 = actor2.layers[0]
2181            .weights
2182            .data
2183            .iter()
2184            .zip(w0_before2.iter())
2185            .map(|(a, b)| (a - b).abs())
2186            .sum();
2187
2188        assert!(
2189            change2 > change1,
2190            "Residual should propagate stronger gradient to layer 0: residual={change2:.6}, non-residual={change1:.6}"
2191        );
2192    }
2193
2194    #[test]
2195    fn test_residual_hybrid_lambda_works() {
2196        let mut rng = make_rng();
2197        let config = PcActorConfig {
2198            local_lambda: 0.99,
2199            ..residual_two_hidden_config()
2200        };
2201        let mut actor: PcActor = PcActor::new(config, &mut rng).unwrap();
2202        let input = vec![0.5; 9];
2203        let infer_result = actor.infer(&input);
2204        let w0_before = actor.layers[0].weights.data.clone();
2205        actor.update_weights(&[0.1; 9], &infer_result, &input, 1.0);
2206        assert_ne!(actor.layers[0].weights.data, w0_before);
2207    }
2208
2209    fn local_learning_config() -> PcActorConfig {
2210        PcActorConfig {
2211            local_lambda: 0.0,
2212            ..default_config()
2213        }
2214    }
2215
2216    #[test]
2217    fn test_infer_prediction_errors_count_matches_hidden_layers() {
2218        let mut rng = make_rng();
2219        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
2220        let result = actor.infer(&[0.0; 9]);
2221        assert_eq!(result.prediction_errors.len(), 1);
2222    }
2223
2224    #[test]
2225    fn test_infer_prediction_errors_two_hidden() {
2226        let mut rng = make_rng();
2227        let actor: PcActor = PcActor::new(two_hidden_config(), &mut rng).unwrap();
2228        let result = actor.infer(&[0.0; 9]);
2229        assert_eq!(result.prediction_errors.len(), 2);
2230    }
2231
2232    #[test]
2233    fn test_infer_prediction_errors_zero_hidden_is_empty() {
2234        let mut rng = make_rng();
2235        let config = PcActorConfig {
2236            hidden_layers: vec![],
2237            ..default_config()
2238        };
2239        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
2240        let result = actor.infer(&[0.5; 9]);
2241        assert!(result.prediction_errors.is_empty());
2242    }
2243
2244    #[test]
2245    fn test_infer_prediction_errors_all_finite() {
2246        let mut rng = make_rng();
2247        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
2248        let result = actor.infer(&[1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5]);
2249        for errors in &result.prediction_errors {
2250            for &e in errors {
2251                assert!(e.is_finite(), "prediction error not finite: {e}");
2252            }
2253        }
2254    }
2255
2256    #[test]
2257    fn test_infer_prediction_errors_size_matches_hidden_layer_size() {
2258        let mut rng = make_rng();
2259        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
2260        let result = actor.infer(&[0.0; 9]);
2261        // default_config has one hidden layer of size 18
2262        assert_eq!(result.prediction_errors[0].len(), 18);
2263    }
2264
2265    #[test]
2266    fn test_local_learning_config_accepted() {
2267        let mut rng = make_rng();
2268        let config = local_learning_config();
2269        assert!((config.local_lambda).abs() < f64::EPSILON);
2270        let actor: Result<PcActor, _> = PcActor::new(config, &mut rng);
2271        assert!(actor.is_ok());
2272    }
2273
2274    #[test]
2275    fn test_local_learning_update_changes_weights() {
2276        let mut rng = make_rng();
2277        let mut actor: PcActor = PcActor::new(local_learning_config(), &mut rng).unwrap();
2278        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2279        let infer_result = actor.infer(&input);
2280        let weights_before = actor.layers[0].weights.data.clone();
2281        let delta = vec![0.1; 9];
2282        actor.update_weights(&delta, &infer_result, &input, 1.0);
2283        assert_ne!(actor.layers[0].weights.data, weights_before);
2284    }
2285
2286    #[test]
2287    fn test_local_learning_clips_weights() {
2288        let mut rng = make_rng();
2289        let mut actor: PcActor = PcActor::new(local_learning_config(), &mut rng).unwrap();
2290        let input = vec![1.0; 9];
2291        let infer_result = actor.infer(&input);
2292        let delta = vec![1e6; 9];
2293        actor.update_weights(&delta, &infer_result, &input, 1.0);
2294        for layer in &actor.layers {
2295            for &w in &layer.weights.data {
2296                assert!(
2297                    w.abs() <= WEIGHT_CLIP + 1e-12,
2298                    "Weight {w} exceeds WEIGHT_CLIP"
2299                );
2300            }
2301        }
2302    }
2303
2304    #[test]
2305    fn test_local_learning_two_hidden_changes_both() {
2306        let mut rng = make_rng();
2307        let config = PcActorConfig {
2308            local_lambda: 0.0,
2309            ..two_hidden_config()
2310        };
2311        let mut actor: PcActor = PcActor::new(config, &mut rng).unwrap();
2312        let input = vec![0.5; 9];
2313        let infer_result = actor.infer(&input);
2314        let w0_before = actor.layers[0].weights.data.clone();
2315        let w1_before = actor.layers[1].weights.data.clone();
2316        let delta = vec![0.1; 9];
2317        actor.update_weights(&delta, &infer_result, &input, 1.0);
2318        assert_ne!(
2319            actor.layers[0].weights.data, w0_before,
2320            "Layer 0 should change"
2321        );
2322        assert_ne!(
2323            actor.layers[1].weights.data, w1_before,
2324            "Layer 1 should change"
2325        );
2326    }
2327
2328    #[test]
2329    fn test_local_learning_differs_from_backprop() {
2330        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2331        let delta = vec![0.1; 9];
2332
2333        // Backprop actor
2334        let mut rng1 = make_rng();
2335        let mut bp_actor: PcActor = PcActor::new(default_config(), &mut rng1).unwrap();
2336        let bp_infer = bp_actor.infer(&input);
2337        bp_actor.update_weights(&delta, &bp_infer, &input, 1.0);
2338
2339        // Local learning actor (same initial weights)
2340        let mut rng2 = make_rng();
2341        let mut ll_actor: PcActor = PcActor::new(local_learning_config(), &mut rng2).unwrap();
2342        let ll_infer = ll_actor.infer(&input);
2343        ll_actor.update_weights(&delta, &ll_infer, &input, 1.0);
2344
2345        // Hidden layer weights should differ between the two approaches
2346        assert_ne!(
2347            bp_actor.layers[0].weights.data, ll_actor.layers[0].weights.data,
2348            "Local learning should produce different weight updates than backprop"
2349        );
2350    }
2351
2352    // ── Hybrid Learning (local_lambda) Tests ────────────────────
2353
2354    fn hybrid_config(lambda: f64) -> PcActorConfig {
2355        PcActorConfig {
2356            local_lambda: lambda,
2357            ..default_config()
2358        }
2359    }
2360
2361    #[test]
2362    fn test_local_lambda_one_equals_backprop() {
2363        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2364        let delta = vec![0.1; 9];
2365
2366        // Pure backprop (local_learning=false, default)
2367        let mut rng1 = make_rng();
2368        let mut bp_actor: PcActor = PcActor::new(default_config(), &mut rng1).unwrap();
2369        let bp_infer = bp_actor.infer(&input);
2370        bp_actor.update_weights(&delta, &bp_infer, &input, 1.0);
2371
2372        // lambda=1.0 should be identical to backprop
2373        let mut rng2 = make_rng();
2374        let mut lam_actor: PcActor = PcActor::new(hybrid_config(1.0), &mut rng2).unwrap();
2375        let lam_infer = lam_actor.infer(&input);
2376        lam_actor.update_weights(&delta, &lam_infer, &input, 1.0);
2377
2378        assert_eq!(
2379            bp_actor.layers[0].weights.data, lam_actor.layers[0].weights.data,
2380            "lambda=1.0 should produce identical weights to pure backprop"
2381        );
2382    }
2383
2384    #[test]
2385    fn test_local_lambda_zero_equals_local_learning() {
2386        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2387        let delta = vec![0.1; 9];
2388
2389        // Pure local (local_learning=true)
2390        let mut rng1 = make_rng();
2391        let mut ll_actor: PcActor = PcActor::new(local_learning_config(), &mut rng1).unwrap();
2392        let ll_infer = ll_actor.infer(&input);
2393        ll_actor.update_weights(&delta, &ll_infer, &input, 1.0);
2394
2395        // lambda=0.0 should be identical to pure local
2396        let mut rng2 = make_rng();
2397        let mut lam_actor: PcActor = PcActor::new(hybrid_config(0.0), &mut rng2).unwrap();
2398        let lam_infer = lam_actor.infer(&input);
2399        lam_actor.update_weights(&delta, &lam_infer, &input, 1.0);
2400
2401        assert_eq!(
2402            ll_actor.layers[0].weights.data, lam_actor.layers[0].weights.data,
2403            "lambda=0.0 should produce identical weights to pure local learning"
2404        );
2405    }
2406
2407    #[test]
2408    fn test_local_lambda_half_differs_from_both_pure_modes() {
2409        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2410        let delta = vec![0.1; 9];
2411
2412        // Pure backprop
2413        let mut rng1 = make_rng();
2414        let mut bp_actor: PcActor = PcActor::new(default_config(), &mut rng1).unwrap();
2415        let bp_infer = bp_actor.infer(&input);
2416        bp_actor.update_weights(&delta, &bp_infer, &input, 1.0);
2417
2418        // Pure local
2419        let mut rng2 = make_rng();
2420        let mut ll_actor: PcActor = PcActor::new(local_learning_config(), &mut rng2).unwrap();
2421        let ll_infer = ll_actor.infer(&input);
2422        ll_actor.update_weights(&delta, &ll_infer, &input, 1.0);
2423
2424        // Hybrid lambda=0.5
2425        let mut rng3 = make_rng();
2426        let mut hy_actor: PcActor = PcActor::new(hybrid_config(0.5), &mut rng3).unwrap();
2427        let hy_infer = hy_actor.infer(&input);
2428        hy_actor.update_weights(&delta, &hy_infer, &input, 1.0);
2429
2430        assert_ne!(
2431            hy_actor.layers[0].weights.data, bp_actor.layers[0].weights.data,
2432            "lambda=0.5 should differ from pure backprop"
2433        );
2434        assert_ne!(
2435            hy_actor.layers[0].weights.data, ll_actor.layers[0].weights.data,
2436            "lambda=0.5 should differ from pure local"
2437        );
2438    }
2439
2440    #[test]
2441    fn test_local_lambda_changes_weights() {
2442        let mut rng = make_rng();
2443        let mut actor: PcActor = PcActor::new(hybrid_config(0.5), &mut rng).unwrap();
2444        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2445        let infer_result = actor.infer(&input);
2446        let weights_before = actor.layers[0].weights.data.clone();
2447        let delta = vec![0.1; 9];
2448        actor.update_weights(&delta, &infer_result, &input, 1.0);
2449        assert_ne!(actor.layers[0].weights.data, weights_before);
2450    }
2451
2452    #[test]
2453    fn test_local_lambda_clips_weights() {
2454        let mut rng = make_rng();
2455        let mut actor: PcActor = PcActor::new(hybrid_config(0.5), &mut rng).unwrap();
2456        let input = vec![1.0; 9];
2457        let infer_result = actor.infer(&input);
2458        let delta = vec![1e6; 9];
2459        actor.update_weights(&delta, &infer_result, &input, 1.0);
2460        for layer in &actor.layers {
2461            for &w in &layer.weights.data {
2462                assert!(
2463                    w.abs() <= WEIGHT_CLIP + 1e-12,
2464                    "Weight {w} exceeds WEIGHT_CLIP"
2465                );
2466            }
2467        }
2468    }
2469
2470    #[test]
2471    fn test_local_lambda_negative_returns_error() {
2472        let mut rng = make_rng();
2473        let config = hybrid_config(-0.1);
2474        let result: Result<PcActor, _> = PcActor::new(config, &mut rng);
2475        assert!(result.is_err());
2476    }
2477
2478    #[test]
2479    fn test_local_lambda_above_one_returns_error() {
2480        let mut rng = make_rng();
2481        let config = hybrid_config(1.1);
2482        let result: Result<PcActor, _> = PcActor::new(config, &mut rng);
2483        assert!(result.is_err());
2484    }
2485
2486    // ── Phase 5 Cycle 5.1: Crossover same topology ─────────────
2487
2488    fn crossover_config_27() -> PcActorConfig {
2489        PcActorConfig {
2490            input_size: 9,
2491            hidden_layers: vec![LayerDef {
2492                size: 27,
2493                activation: Activation::Tanh,
2494            }],
2495            output_size: 9,
2496            output_activation: Activation::Linear,
2497            alpha: 0.03,
2498            tol: 0.01,
2499            min_steps: 1,
2500            max_steps: 5,
2501            lr_weights: 0.005,
2502            synchronous: true,
2503            temperature: 1.0,
2504            local_lambda: 0.99,
2505            residual: false,
2506            rezero_init: 0.001,
2507        }
2508    }
2509
2510    fn make_caches_for_actor(actor: &PcActor, batch_size: usize) -> Vec<Vec<Vec<f64>>> {
2511        let num_hidden = actor.config.hidden_layers.len();
2512        let mut layers: Vec<Vec<Vec<f64>>> = (0..num_hidden).map(|_| Vec::new()).collect();
2513        for i in 0..batch_size {
2514            let input: Vec<f64> = (0..actor.config.input_size)
2515                .map(|j| ((i * actor.config.input_size + j) as f64 * 0.01).sin())
2516                .collect();
2517            let result = actor.infer(&input);
2518            for (layer_idx, state) in result.hidden_states.iter().enumerate() {
2519                layers[layer_idx].push(state.clone());
2520            }
2521        }
2522        layers
2523    }
2524
2525    fn build_cache_matrix(
2526        cache_layers: &[Vec<Vec<f64>>],
2527        layer_idx: usize,
2528    ) -> crate::matrix::Matrix {
2529        use crate::linalg::LinAlg;
2530        let samples = &cache_layers[layer_idx];
2531        let batch_size = samples.len();
2532        let n_neurons = samples[0].len();
2533        let mut mat = CpuLinAlg::zeros_mat(batch_size, n_neurons);
2534        for (r, sample) in samples.iter().enumerate() {
2535            for (c, &val) in sample.iter().enumerate() {
2536                CpuLinAlg::mat_set(&mut mat, r, c, val);
2537            }
2538        }
2539        mat
2540    }
2541
2542    #[test]
2543    fn test_crossover_same_topology_produces_valid_actor() {
2544        let mut rng_a = StdRng::seed_from_u64(42);
2545        let mut rng_b = StdRng::seed_from_u64(123);
2546        let config = crossover_config_27();
2547        let actor_a: PcActor = PcActor::new(config.clone(), &mut rng_a).unwrap();
2548        let actor_b: PcActor = PcActor::new(config.clone(), &mut rng_b).unwrap();
2549
2550        let caches_a = make_caches_for_actor(&actor_a, 50);
2551        let caches_b = make_caches_for_actor(&actor_b, 50);
2552        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
2553        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
2554
2555        let mut rng_child = StdRng::seed_from_u64(99);
2556        let child: PcActor = PcActor::crossover(
2557            &actor_a,
2558            &actor_b,
2559            &cache_mats_a,
2560            &cache_mats_b,
2561            0.5,
2562            config,
2563            &mut rng_child,
2564        )
2565        .unwrap();
2566
2567        // Child has same topology
2568        assert_eq!(child.layers.len(), actor_a.layers.len());
2569        for (i, layer) in child.layers.iter().enumerate() {
2570            assert_eq!(
2571                CpuLinAlg::mat_rows(&layer.weights),
2572                CpuLinAlg::mat_rows(&actor_a.layers[i].weights)
2573            );
2574            assert_eq!(
2575                CpuLinAlg::mat_cols(&layer.weights),
2576                CpuLinAlg::mat_cols(&actor_a.layers[i].weights)
2577            );
2578        }
2579    }
2580
2581    #[test]
2582    fn test_crossover_same_topology_child_differs_from_parents() {
2583        let mut rng_a = StdRng::seed_from_u64(42);
2584        let mut rng_b = StdRng::seed_from_u64(123);
2585        let config = crossover_config_27();
2586        let actor_a: PcActor = PcActor::new(config.clone(), &mut rng_a).unwrap();
2587        let actor_b: PcActor = PcActor::new(config.clone(), &mut rng_b).unwrap();
2588
2589        let caches_a = make_caches_for_actor(&actor_a, 50);
2590        let caches_b = make_caches_for_actor(&actor_b, 50);
2591        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
2592        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
2593
2594        let mut rng_child = StdRng::seed_from_u64(99);
2595        let child: PcActor = PcActor::crossover(
2596            &actor_a,
2597            &actor_b,
2598            &cache_mats_a,
2599            &cache_mats_b,
2600            0.5,
2601            config,
2602            &mut rng_child,
2603        )
2604        .unwrap();
2605
2606        // Child weights differ from both parents (blended)
2607        assert_ne!(child.layers[0].weights.data, actor_a.layers[0].weights.data);
2608        assert_ne!(child.layers[0].weights.data, actor_b.layers[0].weights.data);
2609    }
2610
2611    #[test]
2612    fn test_crossover_alpha_one_approximates_parent_a() {
2613        let mut rng_a = StdRng::seed_from_u64(42);
2614        let mut rng_b = StdRng::seed_from_u64(123);
2615        let config = crossover_config_27();
2616        let actor_a: PcActor = PcActor::new(config.clone(), &mut rng_a).unwrap();
2617        let actor_b: PcActor = PcActor::new(config.clone(), &mut rng_b).unwrap();
2618
2619        let caches_a = make_caches_for_actor(&actor_a, 50);
2620        let caches_b = make_caches_for_actor(&actor_b, 50);
2621        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
2622        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
2623
2624        let mut rng_child = StdRng::seed_from_u64(99);
2625        let child: PcActor = PcActor::crossover(
2626            &actor_a,
2627            &actor_b,
2628            &cache_mats_a,
2629            &cache_mats_b,
2630            1.0, // alpha=1.0 → child ≈ parent A
2631            config,
2632            &mut rng_child,
2633        )
2634        .unwrap();
2635
2636        // Input layer (layer 0): positional crossover, should be close to parent A
2637        let a_w = &actor_a.layers[0].weights.data;
2638        let child_w = &child.layers[0].weights.data;
2639        let max_diff: f64 = a_w
2640            .iter()
2641            .zip(child_w.iter())
2642            .map(|(a, c)| (a - c).abs())
2643            .fold(0.0_f64, f64::max);
2644        assert!(
2645            max_diff < 1e-10,
2646            "alpha=1.0: input layer max diff from parent A = {max_diff}"
2647        );
2648    }
2649
2650    #[test]
2651    fn test_crossover_child_weights_finite() {
2652        let mut rng_a = StdRng::seed_from_u64(42);
2653        let mut rng_b = StdRng::seed_from_u64(123);
2654        let config = crossover_config_27();
2655        let actor_a: PcActor = PcActor::new(config.clone(), &mut rng_a).unwrap();
2656        let actor_b: PcActor = PcActor::new(config.clone(), &mut rng_b).unwrap();
2657
2658        let caches_a = make_caches_for_actor(&actor_a, 50);
2659        let caches_b = make_caches_for_actor(&actor_b, 50);
2660        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
2661        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
2662
2663        let mut rng_child = StdRng::seed_from_u64(99);
2664        let child: PcActor = PcActor::crossover(
2665            &actor_a,
2666            &actor_b,
2667            &cache_mats_a,
2668            &cache_mats_b,
2669            0.5,
2670            config,
2671            &mut rng_child,
2672        )
2673        .unwrap();
2674
2675        for (i, layer) in child.layers.iter().enumerate() {
2676            for &w in &layer.weights.data {
2677                assert!(w.is_finite(), "NaN/Inf in layer {i} weights");
2678            }
2679            for b in CpuLinAlg::vec_to_vec(&layer.bias) {
2680                assert!(b.is_finite(), "NaN/Inf in layer {i} biases");
2681            }
2682        }
2683    }
2684
2685    // ── Phase 5 Cycle 5.2: Crossover child smaller ──────────────
2686
2687    #[test]
2688    fn test_crossover_child_smaller() {
2689        let mut rng_a = StdRng::seed_from_u64(42);
2690        let mut rng_b = StdRng::seed_from_u64(123);
2691        let config_27 = PcActorConfig {
2692            hidden_layers: vec![
2693                LayerDef {
2694                    size: 27,
2695                    activation: Activation::Tanh,
2696                },
2697                LayerDef {
2698                    size: 27,
2699                    activation: Activation::Tanh,
2700                },
2701            ],
2702            ..crossover_config_27()
2703        };
2704        let actor_a: PcActor = PcActor::new(config_27.clone(), &mut rng_a).unwrap();
2705        let actor_b: PcActor = PcActor::new(config_27, &mut rng_b).unwrap();
2706
2707        let caches_a = make_caches_for_actor(&actor_a, 50);
2708        let caches_b = make_caches_for_actor(&actor_b, 50);
2709        let cache_mats_a: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_a, i)).collect();
2710        let cache_mats_b: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_b, i)).collect();
2711
2712        let child_config = PcActorConfig {
2713            hidden_layers: vec![
2714                LayerDef {
2715                    size: 18,
2716                    activation: Activation::Tanh,
2717                },
2718                LayerDef {
2719                    size: 18,
2720                    activation: Activation::Tanh,
2721                },
2722            ],
2723            ..crossover_config_27()
2724        };
2725
2726        let mut rng_child = StdRng::seed_from_u64(99);
2727        let child: PcActor = PcActor::crossover(
2728            &actor_a,
2729            &actor_b,
2730            &cache_mats_a,
2731            &cache_mats_b,
2732            0.5,
2733            child_config,
2734            &mut rng_child,
2735        )
2736        .unwrap();
2737
2738        // Child hidden layers have 18 neurons
2739        use crate::linalg::LinAlg;
2740        assert_eq!(CpuLinAlg::mat_rows(&child.layers[0].weights), 18);
2741        assert_eq!(CpuLinAlg::mat_rows(&child.layers[1].weights), 18);
2742    }
2743
2744    // ── Phase 5 Cycle 5.3: Crossover parents differ ─────────────
2745
2746    #[test]
2747    fn test_crossover_parents_different_sizes() {
2748        let mut rng_a = StdRng::seed_from_u64(42);
2749        let mut rng_b = StdRng::seed_from_u64(123);
2750        let config_a = crossover_config_27(); // [27]
2751        let config_b = PcActorConfig {
2752            hidden_layers: vec![LayerDef {
2753                size: 18,
2754                activation: Activation::Tanh,
2755            }],
2756            ..crossover_config_27()
2757        }; // [18]
2758
2759        let actor_a: PcActor = PcActor::new(config_a, &mut rng_a).unwrap();
2760        let actor_b: PcActor = PcActor::new(config_b, &mut rng_b).unwrap();
2761
2762        let caches_a = make_caches_for_actor(&actor_a, 50);
2763        let caches_b = make_caches_for_actor(&actor_b, 50);
2764        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
2765        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
2766
2767        // Child has [27] → blending zone [0..18), copy zone [18..27) from parent A
2768        let child_config = crossover_config_27();
2769        let mut rng_child = StdRng::seed_from_u64(99);
2770        let child: PcActor = PcActor::crossover(
2771            &actor_a,
2772            &actor_b,
2773            &cache_mats_a,
2774            &cache_mats_b,
2775            0.5,
2776            child_config,
2777            &mut rng_child,
2778        )
2779        .unwrap();
2780
2781        use crate::linalg::LinAlg;
2782        // Child has correct dimensions [27]
2783        assert_eq!(CpuLinAlg::mat_rows(&child.layers[0].weights), 27);
2784        // All weights finite
2785        for &w in &child.layers[0].weights.data {
2786            assert!(w.is_finite());
2787        }
2788    }
2789
2790    // ── Phase 5 Cycle 5.4: Crossover child larger ───────────────
2791
2792    #[test]
2793    fn test_crossover_child_larger() {
2794        let mut rng_a = StdRng::seed_from_u64(42);
2795        let mut rng_b = StdRng::seed_from_u64(123);
2796        let config_18 = PcActorConfig {
2797            hidden_layers: vec![LayerDef {
2798                size: 18,
2799                activation: Activation::Tanh,
2800            }],
2801            ..crossover_config_27()
2802        };
2803        let actor_a: PcActor = PcActor::new(config_18.clone(), &mut rng_a).unwrap();
2804        let actor_b: PcActor = PcActor::new(config_18, &mut rng_b).unwrap();
2805
2806        let caches_a = make_caches_for_actor(&actor_a, 50);
2807        let caches_b = make_caches_for_actor(&actor_b, 50);
2808        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
2809        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
2810
2811        // Child has [27] → blending zone [0..18), Xavier zone [18..27)
2812        let child_config = crossover_config_27();
2813        let mut rng_child = StdRng::seed_from_u64(99);
2814        let child: PcActor = PcActor::crossover(
2815            &actor_a,
2816            &actor_b,
2817            &cache_mats_a,
2818            &cache_mats_b,
2819            0.5,
2820            child_config,
2821            &mut rng_child,
2822        )
2823        .unwrap();
2824
2825        use crate::linalg::LinAlg;
2826        assert_eq!(CpuLinAlg::mat_rows(&child.layers[0].weights), 27);
2827        // All weights finite
2828        for &w in &child.layers[0].weights.data {
2829            assert!(w.is_finite());
2830        }
2831        // Xavier zone weights are not all zero (random init)
2832        let xavier_zone_nonzero = (18..27).any(|r| {
2833            (0..CpuLinAlg::mat_cols(&child.layers[0].weights))
2834                .any(|c| CpuLinAlg::mat_get(&child.layers[0].weights, r, c).abs() > 1e-15)
2835        });
2836        assert!(
2837            xavier_zone_nonzero,
2838            "Xavier zone [18..27) should have non-zero weights"
2839        );
2840    }
2841
2842    // ── Phase 5 Cycle 5.5: Crossover layer count mismatch ───────
2843
2844    #[test]
2845    fn test_crossover_child_more_layers() {
2846        let mut rng_a = StdRng::seed_from_u64(42);
2847        let mut rng_b = StdRng::seed_from_u64(123);
2848        let config_2l = PcActorConfig {
2849            hidden_layers: vec![
2850                LayerDef {
2851                    size: 27,
2852                    activation: Activation::Tanh,
2853                },
2854                LayerDef {
2855                    size: 27,
2856                    activation: Activation::Tanh,
2857                },
2858            ],
2859            ..crossover_config_27()
2860        };
2861        let actor_a: PcActor = PcActor::new(config_2l.clone(), &mut rng_a).unwrap();
2862        let actor_b: PcActor = PcActor::new(config_2l, &mut rng_b).unwrap();
2863
2864        let caches_a = make_caches_for_actor(&actor_a, 50);
2865        let caches_b = make_caches_for_actor(&actor_b, 50);
2866        let cache_mats_a: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_a, i)).collect();
2867        let cache_mats_b: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_b, i)).collect();
2868
2869        // Child has 3 hidden layers → layers 0-1 crossover, layer 2 Xavier
2870        let child_config = PcActorConfig {
2871            hidden_layers: vec![
2872                LayerDef {
2873                    size: 27,
2874                    activation: Activation::Tanh,
2875                },
2876                LayerDef {
2877                    size: 27,
2878                    activation: Activation::Tanh,
2879                },
2880                LayerDef {
2881                    size: 18,
2882                    activation: Activation::Tanh,
2883                },
2884            ],
2885            ..crossover_config_27()
2886        };
2887
2888        let mut rng_child = StdRng::seed_from_u64(99);
2889        let child: PcActor = PcActor::crossover(
2890            &actor_a,
2891            &actor_b,
2892            &cache_mats_a,
2893            &cache_mats_b,
2894            0.5,
2895            child_config,
2896            &mut rng_child,
2897        )
2898        .unwrap();
2899
2900        use crate::linalg::LinAlg;
2901        // Child has 4 layers (3 hidden + 1 output)
2902        assert_eq!(child.layers.len(), 4);
2903        // Layer 2 (new) has 18 rows
2904        assert_eq!(CpuLinAlg::mat_rows(&child.layers[2].weights), 18);
2905        // All weights finite
2906        for (i, layer) in child.layers.iter().enumerate() {
2907            for &w in &layer.weights.data {
2908                assert!(w.is_finite(), "NaN/Inf in layer {i}");
2909            }
2910        }
2911    }
2912
2913    #[test]
2914    fn test_crossover_child_fewer_layers() {
2915        let mut rng_a = StdRng::seed_from_u64(42);
2916        let mut rng_b = StdRng::seed_from_u64(123);
2917        let config_3l = PcActorConfig {
2918            hidden_layers: vec![
2919                LayerDef {
2920                    size: 27,
2921                    activation: Activation::Tanh,
2922                },
2923                LayerDef {
2924                    size: 27,
2925                    activation: Activation::Tanh,
2926                },
2927                LayerDef {
2928                    size: 18,
2929                    activation: Activation::Tanh,
2930                },
2931            ],
2932            ..crossover_config_27()
2933        };
2934        let actor_a: PcActor = PcActor::new(config_3l.clone(), &mut rng_a).unwrap();
2935        let actor_b: PcActor = PcActor::new(config_3l, &mut rng_b).unwrap();
2936
2937        let caches_a = make_caches_for_actor(&actor_a, 50);
2938        let caches_b = make_caches_for_actor(&actor_b, 50);
2939        let cache_mats_a: Vec<_> = (0..3).map(|i| build_cache_matrix(&caches_a, i)).collect();
2940        let cache_mats_b: Vec<_> = (0..3).map(|i| build_cache_matrix(&caches_b, i)).collect();
2941
2942        // Child has 2 hidden layers → layers 0-1 crossover, layer 2 discarded
2943        let child_config = PcActorConfig {
2944            hidden_layers: vec![
2945                LayerDef {
2946                    size: 27,
2947                    activation: Activation::Tanh,
2948                },
2949                LayerDef {
2950                    size: 27,
2951                    activation: Activation::Tanh,
2952                },
2953            ],
2954            ..crossover_config_27()
2955        };
2956
2957        let mut rng_child = StdRng::seed_from_u64(99);
2958        let child: PcActor = PcActor::crossover(
2959            &actor_a,
2960            &actor_b,
2961            &cache_mats_a,
2962            &cache_mats_b,
2963            0.5,
2964            child_config,
2965            &mut rng_child,
2966        )
2967        .unwrap();
2968
2969        use crate::linalg::LinAlg;
2970        // Child has 3 layers (2 hidden + 1 output)
2971        assert_eq!(child.layers.len(), 3);
2972        // Output layer input_size = 27 (last hidden size)
2973        assert_eq!(CpuLinAlg::mat_cols(&child.layers[2].weights), 27);
2974    }
2975
2976    // ── Phase 5 Cycle 5.6: Crossover residual components ────────
2977
2978    #[test]
2979    fn test_crossover_residual_rezero_blended() {
2980        let mut rng_a = StdRng::seed_from_u64(42);
2981        let mut rng_b = StdRng::seed_from_u64(123);
2982        let config = PcActorConfig {
2983            hidden_layers: vec![
2984                LayerDef {
2985                    size: 27,
2986                    activation: Activation::Softsign,
2987                },
2988                LayerDef {
2989                    size: 27,
2990                    activation: Activation::Softsign,
2991                },
2992            ],
2993            residual: true,
2994            rezero_init: 0.1,
2995            ..crossover_config_27()
2996        };
2997        let actor_a: PcActor = PcActor::new(config.clone(), &mut rng_a).unwrap();
2998        let actor_b: PcActor = PcActor::new(config.clone(), &mut rng_b).unwrap();
2999
3000        let caches_a = make_caches_for_actor(&actor_a, 50);
3001        let caches_b = make_caches_for_actor(&actor_b, 50);
3002        let cache_mats_a: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_a, i)).collect();
3003        let cache_mats_b: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_b, i)).collect();
3004
3005        let mut rng_child = StdRng::seed_from_u64(99);
3006        let child: PcActor = PcActor::crossover(
3007            &actor_a,
3008            &actor_b,
3009            &cache_mats_a,
3010            &cache_mats_b,
3011            0.5,
3012            config,
3013            &mut rng_child,
3014        )
3015        .unwrap();
3016
3017        // Child has rezero_alpha values
3018        assert!(!child.rezero_alpha.is_empty());
3019        // Blended rezero_alpha: with alpha=0.5 and both parents same init,
3020        // child should be close to parent values
3021        for &rz in &child.rezero_alpha {
3022            assert!(rz.is_finite(), "rezero_alpha is not finite");
3023        }
3024    }
3025
3026    #[test]
3027    fn test_crossover_residual_skip_projections_blended() {
3028        let mut rng_a = StdRng::seed_from_u64(42);
3029        let mut rng_b = StdRng::seed_from_u64(123);
3030        let config = PcActorConfig {
3031            hidden_layers: vec![
3032                LayerDef {
3033                    size: 27,
3034                    activation: Activation::Softsign,
3035                },
3036                LayerDef {
3037                    size: 18,
3038                    activation: Activation::Softsign,
3039                },
3040            ],
3041            residual: true,
3042            rezero_init: 0.1,
3043            ..crossover_config_27()
3044        };
3045        let actor_a: PcActor = PcActor::new(config.clone(), &mut rng_a).unwrap();
3046        let actor_b: PcActor = PcActor::new(config.clone(), &mut rng_b).unwrap();
3047
3048        let caches_a = make_caches_for_actor(&actor_a, 50);
3049        let caches_b = make_caches_for_actor(&actor_b, 50);
3050        let cache_mats_a: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_a, i)).collect();
3051        let cache_mats_b: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_b, i)).collect();
3052
3053        let mut rng_child = StdRng::seed_from_u64(99);
3054        let child: PcActor = PcActor::crossover(
3055            &actor_a,
3056            &actor_b,
3057            &cache_mats_a,
3058            &cache_mats_b,
3059            0.5,
3060            config,
3061            &mut rng_child,
3062        )
3063        .unwrap();
3064
3065        // Child should have skip_projections for size mismatch (27→18)
3066        assert!(!child.skip_projections.is_empty());
3067        // At least one projection should be Some (27→18 needs projection)
3068        let has_projection = child.skip_projections.iter().any(|p| p.is_some());
3069        assert!(has_projection, "Expected at least one skip projection");
3070
3071        // Projection weights are finite
3072        for mat in child.skip_projections.iter().flatten() {
3073            for &w in &mat.data {
3074                assert!(w.is_finite(), "NaN/Inf in skip projection");
3075            }
3076        }
3077    }
3078
3079    // ── Fix #1: Column permutation propagation ──────────────────
3080
3081    #[test]
3082    fn test_crossover_multilayer_column_permutation_consistency() {
3083        // Two identical parents → child should be identical regardless of
3084        // CCA permutation (identity) or column ordering. But if we manually
3085        // set parent B = parent A with a known neuron permutation at layer 0,
3086        // the child at alpha=0.5 should produce a network whose layer 1
3087        // columns are also reordered to match.
3088        //
3089        // Strategy: crossover parent A with itself (same weights). The CCA
3090        // permutation should be identity, and the child should equal both
3091        // parents. Then crossover with alpha=0.5 using two different parents.
3092        // Run inference on the child — if column permutation is broken,
3093        // the child's layer 1 receives inputs in the wrong order, and
3094        // inference produces different results than a properly-permuted child.
3095        use crate::linalg::LinAlg;
3096        let mut rng_a = StdRng::seed_from_u64(42);
3097        let mut rng_b = StdRng::seed_from_u64(123);
3098        let config = PcActorConfig {
3099            hidden_layers: vec![
3100                LayerDef {
3101                    size: 8,
3102                    activation: Activation::Tanh,
3103                },
3104                LayerDef {
3105                    size: 8,
3106                    activation: Activation::Tanh,
3107                },
3108            ],
3109            input_size: 4,
3110            output_size: 4,
3111            ..crossover_config_27()
3112        };
3113        let actor_a: PcActor = PcActor::new(config.clone(), &mut rng_a).unwrap();
3114        let actor_b: PcActor = PcActor::new(config.clone(), &mut rng_b).unwrap();
3115
3116        let caches_a = make_caches_for_actor(&actor_a, 100);
3117        let caches_b = make_caches_for_actor(&actor_b, 100);
3118        let cache_mats_a: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_a, i)).collect();
3119        let cache_mats_b: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_b, i)).collect();
3120
3121        // Get CCA permutation for layer 0 to check if it's non-trivial
3122        let perm0 =
3123            crate::matrix::cca_neuron_alignment::<CpuLinAlg>(&cache_mats_a[0], &cache_mats_b[0])
3124                .unwrap();
3125        let is_nontrivial = perm0.iter().enumerate().any(|(i, &p)| i != p);
3126
3127        // Only test column propagation if CCA produced a non-trivial permutation
3128        if !is_nontrivial {
3129            // Parents too similar for meaningful test — skip
3130            return;
3131        }
3132
3133        // Crossover with alpha=0.5
3134        let mut rng_child = StdRng::seed_from_u64(99);
3135        let child: PcActor = PcActor::crossover(
3136            &actor_a,
3137            &actor_b,
3138            &cache_mats_a,
3139            &cache_mats_b,
3140            0.5,
3141            config.clone(),
3142            &mut rng_child,
3143        )
3144        .unwrap();
3145
3146        // Verify: layer 1's input columns should be permuted to match layer 0's
3147        // row permutation of parent B. Check that the child's layer 1 column
3148        // ordering is consistent by verifying that inference produces finite,
3149        // non-degenerate output AND that crossover applied the column permutation.
3150        //
3151        // If columns are NOT permuted, parent B's layer 1 columns still reference
3152        // the original neuron positions, but the blended layer 0 has reordered
3153        // neurons. The inconsistency means column c of layer 1 connects to the
3154        // wrong neuron from layer 0.
3155        //
3156        // We verify by checking that the column permutation was actually applied:
3157        // parent B's layer 1 columns should be reordered by perm0.
3158        let b_layer1 = &actor_b.layers[1];
3159        let b_cols = CpuLinAlg::mat_cols(&b_layer1.weights);
3160
3161        // Expected: child layer 1 col[c] = 0.5 * A.layer1.col[c] + 0.5 * B.layer1.col[perm0[c]]
3162        // If column permutation is NOT applied, it would be:
3163        // child layer 1 col[c] = 0.5 * A.layer1.col[c] + 0.5 * B.layer1.col[c]  (wrong!)
3164        let a_layer1 = &actor_a.layers[1];
3165        let child_layer1 = &child.layers[1];
3166        let n_rows = CpuLinAlg::mat_rows(&child_layer1.weights);
3167
3168        let mut has_col_permutation = false;
3169        for (c, &src_col) in perm0.iter().enumerate().take(b_cols.min(perm0.len())) {
3170            if src_col == c {
3171                continue; // Identity position, can't distinguish
3172            }
3173            // Check if child col c matches the permuted blend (correct)
3174            // vs the unpermuted blend (broken)
3175            for r in 0..n_rows {
3176                let a_val = CpuLinAlg::mat_get(&a_layer1.weights, r, c);
3177                let b_val_permuted = CpuLinAlg::mat_get(&b_layer1.weights, r, src_col);
3178                let b_val_unpermuted = CpuLinAlg::mat_get(&b_layer1.weights, r, c);
3179                let child_val = CpuLinAlg::mat_get(&child_layer1.weights, r, c);
3180
3181                let expected_permuted = 0.5 * a_val + 0.5 * b_val_permuted;
3182                let expected_unpermuted = 0.5 * a_val + 0.5 * b_val_unpermuted;
3183
3184                // If column permutation is applied, child matches permuted expectation
3185                if (child_val - expected_permuted).abs() < 1e-10
3186                    && (child_val - expected_unpermuted).abs() > 1e-10
3187                {
3188                    has_col_permutation = true;
3189                }
3190            }
3191        }
3192
3193        assert!(
3194            has_col_permutation,
3195            "Layer 1 columns should be permuted to match layer 0's CCA \
3196             permutation of parent B. perm0={perm0:?}"
3197        );
3198    }
3199
3200    // ── Fix #5: Empty hidden_layers guard ────────────────────────
3201
3202    #[test]
3203    fn test_crossover_empty_hidden_layers_returns_error() {
3204        let mut rng_a = StdRng::seed_from_u64(42);
3205        let mut rng_b = StdRng::seed_from_u64(123);
3206        let config = crossover_config_27();
3207        let actor_a: PcActor = PcActor::new(config.clone(), &mut rng_a).unwrap();
3208        let actor_b: PcActor = PcActor::new(config, &mut rng_b).unwrap();
3209
3210        let caches_a = make_caches_for_actor(&actor_a, 50);
3211        let caches_b = make_caches_for_actor(&actor_b, 50);
3212        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
3213        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
3214
3215        // Child config with empty hidden layers should return error, not panic
3216        let empty_config = PcActorConfig {
3217            hidden_layers: vec![],
3218            ..crossover_config_27()
3219        };
3220
3221        let mut rng_child = StdRng::seed_from_u64(99);
3222        let result = PcActor::crossover(
3223            &actor_a,
3224            &actor_b,
3225            &cache_mats_a,
3226            &cache_mats_b,
3227            0.5,
3228            empty_config,
3229            &mut rng_child,
3230        );
3231        assert!(
3232            result.is_err(),
3233            "Crossover with empty hidden_layers should return error"
3234        );
3235    }
3236}