Skip to main content

pc_rl_core/
pc_actor.rs

1// Author: Julian Bolivar
2// Version: 1.0.0
3// Date: 2026-03-25
4
5//! Predictive Coding Actor Network.
6//!
7//! Implements an actor that uses iterative top-down/bottom-up predictive coding
8//! inference loops instead of standard feedforward passes. The prediction error
9//! (surprise score) drives learning rate modulation in the actor-critic agent.
10
11use rand::Rng;
12use serde::{Deserialize, Serialize};
13
14use crate::activation::Activation;
15use crate::error::PcError;
16use crate::layer::{Layer, LayerDef};
17use crate::linalg::cpu::CpuLinAlg;
18use crate::linalg::LinAlg;
19
20/// Configuration for the predictive coding actor network.
21///
22/// # Examples
23///
24/// ```
25/// use pc_rl_core::activation::Activation;
26/// use pc_rl_core::layer::LayerDef;
27/// use pc_rl_core::pc_actor::PcActorConfig;
28///
29/// let config = PcActorConfig {
30///     input_size: 9,
31///     hidden_layers: vec![LayerDef { size: 18, activation: Activation::Tanh }],
32///     output_size: 9,
33///     output_activation: Activation::Tanh,
34///     alpha: 0.1,
35///     tol: 0.01,
36///     min_steps: 1,
37///     max_steps: 20,
38///     lr_weights: 0.01,
39///     synchronous: true,
40///     temperature: 1.0,
41///     local_lambda: 1.0,
42///     residual: false,
43///     rezero_init: 0.001,
44/// };
45/// ```
46#[derive(Debug, Clone, Serialize, Deserialize)]
47pub struct PcActorConfig {
48    /// Number of input features (e.g. 9 for tic-tac-toe board).
49    pub input_size: usize,
50    /// Hidden layer topology definitions.
51    pub hidden_layers: Vec<LayerDef>,
52    /// Number of output actions.
53    pub output_size: usize,
54    /// Activation function for the output layer.
55    pub output_activation: Activation,
56    /// Inference learning rate for PC loop state updates (`h += alpha * error`).
57    /// Set to 0.0 to disable PC inference (network behaves as standard MLP).
58    /// Active regardless of `residual` setting. Default: 0.1.
59    #[serde(default = "default_alpha")]
60    pub alpha: f64,
61    /// Convergence threshold for RMS prediction error.
62    /// PC loop exits early when surprise < tol (after at least `min_steps`).
63    /// Active regardless of `residual` setting. Default: 0.01.
64    #[serde(default = "default_tol")]
65    pub tol: f64,
66    /// Minimum PC inference steps before convergence check is allowed.
67    /// Active regardless of `residual` setting. Default: 1.
68    #[serde(default = "default_min_steps")]
69    pub min_steps: usize,
70    /// Maximum PC inference steps per action.
71    /// Active regardless of `residual` setting. Default: 20.
72    #[serde(default = "default_max_steps")]
73    pub max_steps: usize,
74    /// Base learning rate for weight updates. Default: 0.01.
75    #[serde(default = "default_lr_weights")]
76    pub lr_weights: f64,
77    /// If true, use synchronous snapshot mode; otherwise in-place. Default: true.
78    #[serde(default = "default_synchronous")]
79    pub synchronous: bool,
80    /// Softmax temperature for action selection. Default: 1.0.
81    #[serde(default = "default_temperature")]
82    pub temperature: f64,
83    /// Blend factor for hidden layer weight updates, range `[0.0, 1.0]`.
84    ///
85    /// Controls how hidden layers combine two gradient signals:
86    /// `delta = lambda * backprop_grad + (1 - lambda) * pc_prediction_error`
87    ///
88    /// - `1.0` — Pure backprop: reward signal propagated from output (default).
89    /// - `0.0` — Pure local PC: prediction errors from inference loop
90    ///   used as gradients (Millidge et al. 2022). No vanishing gradient
91    ///   but no reward signal reaches hidden layers.
92    /// - `0.0 < lambda < 1.0` — Hybrid: reward-aware backprop regularized
93    ///   by local PC consistency errors.
94    ///
95    /// The output layer always uses standard backprop regardless of this value.
96    #[serde(default = "default_local_lambda")]
97    pub local_lambda: f64,
98    /// Enable residual skip connections between same-dimension hidden layers.
99    /// When false, `rezero_init` is ignored. When true, all hidden layers
100    /// must have the same size, and skip connections with learnable ReZero
101    /// scaling are added between consecutive hidden layers (not the first,
102    /// since input_size typically differs from hidden_size).
103    #[serde(default)]
104    pub residual: bool,
105    /// Initial value for ReZero scaling factors on residual connections.
106    /// Only used when `residual = true`. Controls initial contribution of
107    /// the nonlinear component: `h[i] = rezero_init * tanh(...) + h[i-1]`.
108    ///
109    /// - `0.001` — Near-identity start (ReZero: network learns depth gradually)
110    /// - `1.0` — Standard ResNet residual (full contribution from start)
111    ///
112    /// Ignored when `residual = false`.
113    #[serde(default = "default_rezero_init")]
114    pub rezero_init: f64,
115}
116
117/// Default PC inference learning rate.
118fn default_alpha() -> f64 {
119    0.1
120}
121
122/// Default convergence tolerance for PC loop.
123fn default_tol() -> f64 {
124    0.01
125}
126
127/// Default minimum PC inference steps.
128fn default_min_steps() -> usize {
129    1
130}
131
132/// Default maximum PC inference steps.
133fn default_max_steps() -> usize {
134    20
135}
136
137/// Default base learning rate for weight updates.
138fn default_lr_weights() -> f64 {
139    0.01
140}
141
142/// Default synchronous mode (snapshot).
143fn default_synchronous() -> bool {
144    true
145}
146
147/// Default softmax temperature.
148fn default_temperature() -> f64 {
149    1.0
150}
151
152/// Default local_lambda: 1.0 (pure backprop).
153fn default_local_lambda() -> f64 {
154    1.0
155}
156
157/// Default rezero_init: 0.001 (near-identity at start).
158fn default_rezero_init() -> f64 {
159    0.001
160}
161
162/// Result of the predictive coding inference loop.
163///
164/// Contains converged output logits, hidden state representations,
165/// and diagnostic information about the inference process.
166///
167/// Generic over a [`LinAlg`] backend `L`. Defaults to [`CpuLinAlg`].
168#[derive(Debug, Clone)]
169pub struct InferResult<L: LinAlg = CpuLinAlg> {
170    /// Converged output logits.
171    pub y_conv: L::Vector,
172    /// All hidden states concatenated (fed to critic).
173    pub latent_concat: L::Vector,
174    /// Per-layer hidden state activations.
175    pub hidden_states: Vec<L::Vector>,
176    /// Per-layer prediction errors from the last PC inference step.
177    /// Ordered from top hidden layer to bottom (reverse layer order).
178    pub prediction_errors: Vec<L::Vector>,
179    /// RMS prediction error across layers.
180    pub surprise_score: f64,
181    /// Number of inference steps performed.
182    pub steps_used: usize,
183    /// Whether the inference loop converged within tolerance.
184    pub converged: bool,
185    /// Per-layer tanh components for residual layers.
186    /// `None` for non-skip layers, `Some(tanh_out)` for skip-eligible layers.
187    /// Needed for correct backward pass (derivative on tanh_out, not full h\[i\]).
188    pub tanh_components: Vec<Option<L::Vector>>,
189}
190
191/// Action selection mode.
192#[derive(Debug, Clone, Copy, PartialEq, Eq)]
193pub enum SelectionMode {
194    /// Stochastic sampling from softmax distribution.
195    Training,
196    /// Deterministic argmax selection.
197    Play,
198}
199
200/// Predictive coding actor network.
201///
202/// Uses iterative top-down/bottom-up inference loops to produce
203/// stable hidden representations and output logits.
204///
205/// Generic over a [`LinAlg`] backend `L`. Defaults to [`CpuLinAlg`].
206///
207/// # Examples
208///
209/// ```
210/// use pc_rl_core::activation::Activation;
211/// use pc_rl_core::layer::LayerDef;
212/// use pc_rl_core::pc_actor::{PcActor, PcActorConfig, SelectionMode};
213/// use rand::SeedableRng;
214/// use rand::rngs::StdRng;
215///
216/// let config = PcActorConfig {
217///     input_size: 9,
218///     hidden_layers: vec![LayerDef { size: 18, activation: Activation::Tanh }],
219///     output_size: 9,
220///     output_activation: Activation::Tanh,
221///     alpha: 0.1, tol: 0.01, min_steps: 1, max_steps: 20,
222///     lr_weights: 0.01, synchronous: true, temperature: 1.0,
223///     local_lambda: 1.0,
224///     residual: false,
225///     rezero_init: 0.001,
226/// };
227/// let mut rng = StdRng::seed_from_u64(42);
228/// let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
229/// let result = actor.infer(&[0.0; 9]);
230/// assert_eq!(result.y_conv.len(), 9);
231/// ```
232#[derive(Debug)]
233pub struct PcActor<L: LinAlg = CpuLinAlg> {
234    /// Network layers: hidden_layers.len() + 1 (output layer).
235    pub(crate) layers: Vec<Layer<L>>,
236    /// Actor configuration.
237    pub config: PcActorConfig,
238    /// ReZero scaling factors for skip connections. One per skip layer (all i >= 1 when residual=true).
239    pub(crate) rezero_alpha: Vec<f64>,
240    /// Projection matrices for skip connections between layers of different sizes.
241    /// One entry per skip layer: `None` for identity (same size), `Some(Matrix)` for projection.
242    pub(crate) skip_projections: Vec<Option<L::Matrix>>,
243}
244
245impl<L: LinAlg> PcActor<L> {
246    /// Creates a new PC actor with Xavier-initialized layers.
247    ///
248    /// # Arguments
249    ///
250    /// * `config` - Actor configuration specifying topology and hyperparameters.
251    /// * `rng` - Random number generator for weight initialization.
252    ///
253    /// # Errors
254    ///
255    /// Returns `PcError::ConfigValidation` if `input_size`, `output_size`,
256    /// or `temperature` are invalid.
257    pub fn new(config: PcActorConfig, rng: &mut impl Rng) -> Result<Self, PcError> {
258        if config.input_size == 0 {
259            return Err(PcError::ConfigValidation("input_size must be > 0".into()));
260        }
261        if config.output_size == 0 {
262            return Err(PcError::ConfigValidation("output_size must be > 0".into()));
263        }
264        if config.temperature <= 0.0 {
265            return Err(PcError::ConfigValidation(format!(
266                "temperature must be positive, got {}",
267                config.temperature
268            )));
269        }
270        if !(0.0..=1.0).contains(&config.local_lambda) {
271            return Err(PcError::ConfigValidation(format!(
272                "local_lambda must be in [0.0, 1.0], got {}",
273                config.local_lambda
274            )));
275        }
276        if config.rezero_init < 0.0 {
277            return Err(PcError::ConfigValidation(format!(
278                "rezero_init must be >= 0, got {}",
279                config.rezero_init
280            )));
281        }
282        let mut layers: Vec<Layer<L>> = Vec::new();
283        let mut prev_size = config.input_size;
284
285        for def in &config.hidden_layers {
286            layers.push(Layer::<L>::new(prev_size, def.size, def.activation, rng));
287            prev_size = def.size;
288        }
289
290        // Output layer
291        layers.push(Layer::<L>::new(
292            prev_size,
293            config.output_size,
294            config.output_activation,
295            rng,
296        ));
297
298        // Compute rezero_alpha and skip_projections: one per skip layer (all i >= 1)
299        let (rezero_alpha, skip_projections) = if config.residual {
300            let mut alphas = Vec::new();
301            let mut projs = Vec::new();
302            for i in 1..config.hidden_layers.len() {
303                alphas.push(config.rezero_init);
304                if config.hidden_layers[i].size != config.hidden_layers[i - 1].size {
305                    projs.push(Some(L::xavier_mat(
306                        config.hidden_layers[i].size,
307                        config.hidden_layers[i - 1].size,
308                        rng,
309                    )));
310                } else {
311                    projs.push(None);
312                }
313            }
314            (alphas, projs)
315        } else {
316            (Vec::new(), Vec::new())
317        };
318
319        Ok(Self {
320            layers,
321            config,
322            rezero_alpha,
323            skip_projections,
324        })
325    }
326
327    /// Creates a child actor by crossing over two parent actors using CCA neuron alignment.
328    ///
329    /// Aligns hidden neurons functionally via CCA before blending weights.
330    /// Input and output layers use positional crossover (no permutation problem).
331    ///
332    /// # Arguments
333    ///
334    /// * `parent_a` - First parent (reference, typically higher fitness).
335    /// * `parent_b` - Second parent (aligned to A via CCA).
336    /// * `caches_a` - Per-layer activation matrices for parent A `[batch × neurons]`.
337    /// * `caches_b` - Per-layer activation matrices for parent B `[batch × neurons]`.
338    /// * `alpha` - Blending weight: 1.0 = all A, 0.0 = all B.
339    /// * `child_config` - Topology configuration for the child network.
340    /// * `rng` - Random number generator for Xavier initialization.
341    ///
342    /// # Errors
343    ///
344    /// Returns `PcError::ConfigValidation` if `child_config` is invalid.
345    pub fn crossover(
346        parent_a: &PcActor<L>,
347        parent_b: &PcActor<L>,
348        caches_a: &[L::Matrix],
349        caches_b: &[L::Matrix],
350        alpha: f64,
351        child_config: PcActorConfig,
352        rng: &mut impl Rng,
353    ) -> Result<Self, PcError> {
354        let num_child_hidden = child_config.hidden_layers.len();
355        if num_child_hidden == 0 {
356            return Err(PcError::ConfigValidation(
357                "crossover requires at least one hidden layer".into(),
358            ));
359        }
360        let num_parent_a_hidden = parent_a.config.hidden_layers.len();
361        let num_parent_b_hidden = parent_b.config.hidden_layers.len();
362
363        let mut layers: Vec<Layer<L>> = Vec::new();
364        // Track the previous layer's CCA permutation for column propagation
365        let mut prev_perm: Option<Vec<usize>> = None;
366
367        // ── Input layer (layer 0): CCA-aligned crossover ─────────
368        let child_h0 = &child_config.hidden_layers[0];
369
370        if parent_a.config.input_size == child_config.input_size
371            && parent_b.config.input_size == child_config.input_size
372        {
373            let cache_a_0 = caches_a.first();
374            let cache_b_0 = caches_b.first();
375            let (layer, perm) = cca_align_and_blend_layer::<L>(
376                &parent_a.layers[0],
377                &parent_b.layers[0],
378                cache_a_0,
379                cache_b_0,
380                None, // No previous perm for first layer
381                child_h0.size,
382                L::mat_cols(&parent_a.layers[0].weights),
383                child_h0.activation,
384                alpha,
385                rng,
386            )?;
387            layers.push(layer);
388            prev_perm = perm;
389        } else {
390            layers.push(Layer::<L>::new(
391                child_config.input_size,
392                child_h0.size,
393                child_h0.activation,
394                rng,
395            ));
396        }
397
398        // ── Hidden layers 1..n: CCA-aligned crossover ────────────
399        for h_idx in 1..num_child_hidden {
400            let child_def = &child_config.hidden_layers[h_idx];
401            let prev_child_size = child_config.hidden_layers[h_idx - 1].size;
402
403            let a_has = h_idx < num_parent_a_hidden;
404            let b_has = h_idx < num_parent_b_hidden;
405
406            if a_has && b_has {
407                let cache_a_h = caches_a.get(h_idx);
408                let cache_b_h = caches_b.get(h_idx);
409                let (layer, perm) = cca_align_and_blend_layer::<L>(
410                    &parent_a.layers[h_idx],
411                    &parent_b.layers[h_idx],
412                    cache_a_h,
413                    cache_b_h,
414                    prev_perm.as_deref(),
415                    child_def.size,
416                    prev_child_size,
417                    child_def.activation,
418                    alpha,
419                    rng,
420                )?;
421                layers.push(layer);
422                prev_perm = perm;
423            } else {
424                layers.push(Layer::<L>::new(
425                    prev_child_size,
426                    child_def.size,
427                    child_def.activation,
428                    rng,
429                ));
430                prev_perm = None;
431            }
432        }
433
434        // ── Output layer: positional crossover or Xavier ─────────
435        let last_child_hidden = child_config.hidden_layers.last().map(|d| d.size).unwrap();
436        let a_out = parent_a.layers.last().unwrap();
437        let b_out = parent_b.layers.last().unwrap();
438        let a_out_in = L::mat_cols(&a_out.weights);
439        let b_out_in = L::mat_cols(&b_out.weights);
440
441        if a_out_in == last_child_hidden && b_out_in == last_child_hidden {
442            // Positional crossover with column permutation from last hidden layer
443            let b_out_permuted = if let Some(ref pp) = prev_perm {
444                permute_cols::<L>(&b_out.weights, pp)
445            } else {
446                b_out.weights.clone()
447            };
448            let out_rows = child_config.output_size;
449            let mut weights = L::zeros_mat(out_rows, last_child_hidden);
450            let mut biases = L::zeros_vec(out_rows);
451            let blend_rows = out_rows
452                .min(L::mat_rows(&a_out.weights))
453                .min(L::mat_rows(&b_out_permuted));
454            for r in 0..blend_rows {
455                for c in 0..last_child_hidden {
456                    let va = L::mat_get(&a_out.weights, r, c);
457                    let vb = L::mat_get(&b_out_permuted, r, c);
458                    L::mat_set(&mut weights, r, c, alpha * va + (1.0 - alpha) * vb);
459                }
460                let ba = L::vec_get(&a_out.bias, r);
461                let bb = L::vec_get(&b_out.bias, r);
462                L::vec_set(&mut biases, r, alpha * ba + (1.0 - alpha) * bb);
463            }
464            layers.push(Layer {
465                weights,
466                bias: biases,
467                activation: child_config.output_activation,
468            });
469        } else {
470            layers.push(Layer::<L>::new(
471                last_child_hidden,
472                child_config.output_size,
473                child_config.output_activation,
474                rng,
475            ));
476        }
477
478        // ── Residual components ──────────────────────────────────
479        let (rezero_alpha, skip_projections) = if child_config.residual {
480            let mut alphas = Vec::new();
481            let mut projs = Vec::new();
482            for i in 1..num_child_hidden {
483                // ReZero alpha: blend if both parents have it
484                let a_has_rz = i - 1 < parent_a.rezero_alpha.len();
485                let b_has_rz = i - 1 < parent_b.rezero_alpha.len();
486                let rz = if a_has_rz && b_has_rz {
487                    alpha * parent_a.rezero_alpha[i - 1]
488                        + (1.0 - alpha) * parent_b.rezero_alpha[i - 1]
489                } else if a_has_rz {
490                    parent_a.rezero_alpha[i - 1]
491                } else if b_has_rz {
492                    parent_b.rezero_alpha[i - 1]
493                } else {
494                    child_config.rezero_init
495                };
496                alphas.push(rz);
497
498                // Skip projections
499                let cur_size = child_config.hidden_layers[i].size;
500                let prev_size = child_config.hidden_layers[i - 1].size;
501                if cur_size != prev_size {
502                    let a_proj = parent_a
503                        .skip_projections
504                        .get(i - 1)
505                        .and_then(|p| p.as_ref());
506                    let b_proj = parent_b
507                        .skip_projections
508                        .get(i - 1)
509                        .and_then(|p| p.as_ref());
510                    if let (Some(ap), Some(bp)) = (a_proj, b_proj) {
511                        if L::mat_rows(ap) == cur_size
512                            && L::mat_cols(ap) == prev_size
513                            && L::mat_rows(bp) == cur_size
514                            && L::mat_cols(bp) == prev_size
515                        {
516                            // Blend projections
517                            let mut proj = L::zeros_mat(cur_size, prev_size);
518                            for r in 0..cur_size {
519                                for c in 0..prev_size {
520                                    let va = L::mat_get(ap, r, c);
521                                    let vb = L::mat_get(bp, r, c);
522                                    L::mat_set(&mut proj, r, c, alpha * va + (1.0 - alpha) * vb);
523                                }
524                            }
525                            projs.push(Some(proj));
526                        } else {
527                            projs.push(Some(L::xavier_mat(cur_size, prev_size, rng)));
528                        }
529                    } else {
530                        projs.push(Some(L::xavier_mat(cur_size, prev_size, rng)));
531                    }
532                } else {
533                    projs.push(None);
534                }
535            }
536            (alphas, projs)
537        } else {
538            (Vec::new(), Vec::new())
539        };
540
541        Ok(Self {
542            layers,
543            config: child_config,
544            rezero_alpha,
545            skip_projections,
546        })
547    }
548
549    /// Returns the total size of the latent concatenation (sum of hidden layer sizes).
550    pub fn latent_size(&self) -> usize {
551        self.config.hidden_layers.iter().map(|def| def.size).sum()
552    }
553
554    /// Runs the predictive coding inference loop on the given input.
555    ///
556    /// This method is `&self` — it never modifies weights.
557    ///
558    /// # Arguments
559    ///
560    /// * `input` - Input vector of length `input_size`.
561    ///
562    /// # Panics
563    ///
564    /// Panics if `input.len() != config.input_size`.
565    /// Returns whether hidden layer `i` has a skip connection (identity or projection).
566    fn is_skip_layer(&self, i: usize) -> bool {
567        self.config.residual && i >= 1
568    }
569
570    /// Returns the rezero_alpha/skip_projections index for hidden layer `i`.
571    fn skip_alpha_index(&self, i: usize) -> Option<usize> {
572        if !self.is_skip_layer(i) {
573            return None;
574        }
575        Some(i - 1)
576    }
577
578    pub fn infer(&self, input: &[f64]) -> InferResult<L> {
579        assert_eq!(
580            input.len(),
581            self.config.input_size,
582            "input size mismatch: got {}, expected {}",
583            input.len(),
584            self.config.input_size
585        );
586
587        let input_vec = L::vec_from_slice(input);
588        let n_hidden = self.config.hidden_layers.len();
589
590        // Forward pass to initialize hidden states and output
591        let mut hidden_states: Vec<L::Vector> = Vec::with_capacity(n_hidden);
592        let mut tanh_components: Vec<Option<L::Vector>> = Vec::with_capacity(n_hidden);
593        let mut prev = input_vec.clone();
594        for (i, layer) in self.layers[..n_hidden].iter().enumerate() {
595            let tanh_out = layer.forward(&prev);
596            if let Some(alpha_idx) = self.skip_alpha_index(i) {
597                let alpha = self.rezero_alpha[alpha_idx];
598                let scaled = L::vec_scale(&tanh_out, alpha);
599                let skip_path = if let Some(ref proj) = self.skip_projections[alpha_idx] {
600                    L::mat_vec_mul(proj, &prev)
601                } else {
602                    prev.clone()
603                };
604                prev = L::vec_add(&skip_path, &scaled);
605                tanh_components.push(Some(tanh_out));
606            } else {
607                prev = tanh_out;
608                tanh_components.push(None);
609            }
610            hidden_states.push(prev.clone());
611        }
612        // Output from last hidden (or input if no hidden)
613        let last_input = if n_hidden > 0 {
614            &hidden_states[n_hidden - 1]
615        } else {
616            &input_vec
617        };
618        let mut y = self.layers[n_hidden].forward(last_input);
619
620        // PC inference loop
621        let mut steps_used = 0;
622        let mut converged = false;
623        let mut surprise_score = 0.0;
624        let mut last_errors: Vec<L::Vector> = Vec::new();
625
626        for step in 0..self.config.max_steps {
627            steps_used = step + 1;
628
629            // Synchronous mode freezes states before updating (snapshot);
630            // in-place mode reads live states that include prior updates.
631            // Both modes need an owned copy of target[i] since we write
632            // hidden_states[i] within the loop body.
633            let snap_h: Vec<L::Vector>;
634            let snap_tc: Vec<Option<L::Vector>>;
635            let use_snapshot = self.config.synchronous;
636            if use_snapshot {
637                snap_h = hidden_states.clone();
638                snap_tc = tanh_components.clone();
639            } else {
640                snap_h = Vec::new();
641                snap_tc = Vec::new();
642            }
643
644            let mut error_vecs: Vec<L::Vector> = Vec::new();
645
646            for i in (0..n_hidden).rev() {
647                // state_above: sync reads frozen snapshot, in-place reads live
648                let state_above = if i == n_hidden - 1 {
649                    &y
650                } else if use_snapshot {
651                    snap_tc[i + 1].as_ref().unwrap_or(&snap_h[i + 1])
652                } else {
653                    tanh_components[i + 1]
654                        .as_ref()
655                        .unwrap_or(&hidden_states[i + 1])
656                };
657
658                // target: always read pre-update value (clone to own it)
659                let target = if use_snapshot {
660                    snap_tc[i].as_ref().unwrap_or(&snap_h[i]).clone()
661                } else {
662                    tanh_components[i]
663                        .as_ref()
664                        .unwrap_or(&hidden_states[i])
665                        .clone()
666                };
667
668                let prediction = self.layers[i + 1]
669                    .transpose_forward(state_above, self.config.hidden_layers[i].activation);
670
671                let error = L::vec_sub(&prediction, &target);
672                error_vecs.push(error.clone());
673
674                let updated_target = L::vec_add(&target, &L::vec_scale(&error, self.config.alpha));
675                if let Some(alpha_idx) = self.skip_alpha_index(i) {
676                    tanh_components[i] = Some(updated_target.clone());
677                    let alpha = self.rezero_alpha[alpha_idx];
678                    let prev_h = if i > 0 {
679                        &hidden_states[i - 1]
680                    } else {
681                        &input_vec
682                    };
683                    let skip_path = if let Some(ref proj) = self.skip_projections[alpha_idx] {
684                        L::mat_vec_mul(proj, prev_h)
685                    } else {
686                        prev_h.clone()
687                    };
688                    hidden_states[i] =
689                        L::vec_add(&skip_path, &L::vec_scale(&updated_target, alpha));
690                } else {
691                    hidden_states[i] = updated_target;
692                }
693            }
694
695            let top_hidden = if n_hidden > 0 {
696                &hidden_states[n_hidden - 1]
697            } else {
698                &input_vec
699            };
700            y = self.layers[n_hidden].forward(top_hidden);
701
702            let refs: Vec<&L::Vector> = error_vecs.iter().collect();
703            surprise_score = L::rms_error(&refs);
704            last_errors = error_vecs;
705
706            // Convergence check (alpha must be > 0 for meaningful convergence)
707            if self.config.alpha > 0.0
708                && step + 1 >= self.config.min_steps
709                && surprise_score < self.config.tol
710            {
711                converged = true;
712                break;
713            }
714        }
715
716        // Build latent_concat (uses vec_to_vec for GPU compatibility)
717        let mut latent_raw: Vec<f64> = Vec::new();
718        for h in &hidden_states {
719            latent_raw.extend_from_slice(&L::vec_to_vec(h));
720        }
721        let latent_concat = L::vec_from_slice(&latent_raw);
722
723        InferResult {
724            y_conv: y,
725            latent_concat,
726            hidden_states,
727            prediction_errors: last_errors,
728            surprise_score,
729            steps_used,
730            converged,
731            tanh_components,
732        }
733    }
734
735    /// Selects an action given converged output logits and valid actions.
736    ///
737    /// # Arguments
738    ///
739    /// * `y_conv` - Output logits from inference.
740    /// * `valid_actions` - Indices of valid actions.
741    /// * `mode` - Training (stochastic) or Play (deterministic).
742    /// * `rng` - Random number generator (used only in Training mode).
743    ///
744    /// # Panics
745    ///
746    /// Panics if `valid_actions` is empty.
747    pub fn select_action(
748        &self,
749        y_conv: &L::Vector,
750        valid_actions: &[usize],
751        mode: SelectionMode,
752        rng: &mut impl Rng,
753    ) -> usize {
754        assert!(!valid_actions.is_empty(), "valid_actions must not be empty");
755
756        // Scale logits by temperature
757        let scaled = L::vec_scale(y_conv, 1.0 / self.config.temperature);
758
759        let probs = L::softmax_masked(&scaled, valid_actions);
760
761        match mode {
762            SelectionMode::Play => L::argmax_masked(&probs, valid_actions),
763            SelectionMode::Training => L::sample_from_probs(&probs, valid_actions, rng),
764        }
765    }
766
767    /// Updates network weights using a blend of backprop and local PC error.
768    ///
769    /// The `local_lambda` config controls the blend: 1.0 = pure backprop,
770    /// 0.0 = pure local PC learning (Millidge et al. 2022), intermediate = hybrid.
771    ///
772    /// # Arguments
773    ///
774    /// * `output_delta` - Error signal at the output layer.
775    /// * `infer_result` - Result from the most recent inference.
776    /// * `input` - Original input that was fed to `infer`.
777    /// * `surprise_scale` - Multiplier on learning rate based on surprise.
778    ///
779    /// # Panics
780    ///
781    /// Panics if `input.len() != config.input_size`.
782    pub fn update_weights(
783        &mut self,
784        output_delta: &[f64],
785        infer_result: &InferResult<L>,
786        input: &[f64],
787        surprise_scale: f64,
788    ) {
789        assert_eq!(
790            input.len(),
791            self.config.input_size,
792            "input size mismatch: got {}, expected {}",
793            input.len(),
794            self.config.input_size
795        );
796
797        self.update_weights_hybrid(
798            output_delta,
799            infer_result,
800            input,
801            surprise_scale,
802            self.config.local_lambda,
803        );
804    }
805
806    /// Hybrid weight update blending backprop and local PC error signals.
807    ///
808    /// For hidden layers, the effective delta is:
809    /// `delta = lambda * backprop_delta + (1 - lambda) * pc_error`
810    ///
811    /// * `lambda = 1.0` → pure backprop (standard mode).
812    /// * `lambda = 0.0` → pure local PC learning (Millidge et al. 2022).
813    /// * `0 < lambda < 1` → hybrid blend.
814    ///
815    /// The output layer always uses standard backprop from `output_delta`.
816    fn update_weights_hybrid(
817        &mut self,
818        output_delta: &[f64],
819        infer_result: &InferResult<L>,
820        input: &[f64],
821        surprise_scale: f64,
822        lambda: f64,
823    ) {
824        let input_vec = L::vec_from_slice(input);
825        let output_delta_vec = L::vec_from_slice(output_delta);
826        let n_hidden = self.config.hidden_layers.len();
827        let n_layers = self.layers.len();
828
829        // Output layer: always standard backward
830        let output_input = if n_hidden > 0 {
831            &infer_result.hidden_states[n_hidden - 1]
832        } else {
833            &input_vec
834        };
835        let output_output = &infer_result.y_conv;
836        let mut bp_delta = self.layers[n_layers - 1].backward(
837            output_input,
838            output_output,
839            &output_delta_vec,
840            self.config.lr_weights,
841            surprise_scale,
842        );
843
844        // Hidden layers (from top to bottom)
845        for i in (0..n_hidden).rev() {
846            let layer_input = if i > 0 {
847                &infer_result.hidden_states[i - 1]
848            } else {
849                &input_vec
850            };
851
852            // Blend backprop delta with local PC error
853            let effective_delta = if (lambda - 1.0).abs() < f64::EPSILON {
854                bp_delta.clone()
855            } else if lambda.abs() < f64::EPSILON {
856                let error_idx = n_hidden - 1 - i;
857                infer_result.prediction_errors[error_idx].clone()
858            } else {
859                let error_idx = n_hidden - 1 - i;
860                let pc_error = &infer_result.prediction_errors[error_idx];
861                let bp_scaled = L::vec_scale(&bp_delta, lambda);
862                let pc_scaled = L::vec_scale(pc_error, 1.0 - lambda);
863                L::vec_add(&bp_scaled, &pc_scaled)
864            };
865
866            if let Some(alpha_idx) = self.skip_alpha_index(i) {
867                // Skip-eligible layer: use tanh_out for derivative, scale by alpha,
868                // add identity path to propagated gradient, update alpha.
869                let tanh_out = infer_result.tanh_components[i].as_ref().unwrap();
870                let alpha = self.rezero_alpha[alpha_idx];
871                let effective_lr = self.config.lr_weights * surprise_scale;
872
873                // Scale delta by rezero_alpha for the nonlinear path
874                let scaled_delta = L::vec_scale(&effective_delta, alpha);
875
876                // Backward through the layer using tanh_out (not hidden_states[i])
877                let propagated = self.layers[i].backward(
878                    layer_input,
879                    tanh_out,
880                    &scaled_delta,
881                    self.config.lr_weights,
882                    surprise_scale,
883                );
884
885                // Update rezero_alpha: dL/d(alpha) = delta · tanh_out
886                let grad_alpha: f64 = L::vec_dot(&effective_delta, tanh_out);
887                self.rezero_alpha[alpha_idx] -= effective_lr * grad_alpha;
888
889                // Propagated delta = nonlinear path + skip path (identity or projection)
890                if let Some(ref mut proj) = self.skip_projections[alpha_idx] {
891                    // Projection path: W_proj^T × delta
892                    let proj_t = L::mat_transpose(proj);
893                    let skip_delta = L::mat_vec_mul(&proj_t, &effective_delta);
894                    // Update projection: W_proj -= lr × outer(delta, layer_input)
895                    let dw_proj = L::outer_product(&effective_delta, layer_input);
896                    L::mat_scale_add(proj, &dw_proj, -effective_lr);
897                    bp_delta = L::vec_add(&propagated, &skip_delta);
898                } else {
899                    // Identity path: + delta
900                    bp_delta = L::vec_add(&propagated, &effective_delta);
901                }
902            } else {
903                // Standard layer: use hidden_states[i] as output
904                let layer_output = &infer_result.hidden_states[i];
905                bp_delta = self.layers[i].backward(
906                    layer_input,
907                    layer_output,
908                    &effective_delta,
909                    self.config.lr_weights,
910                    surprise_scale,
911                );
912            }
913        }
914    }
915
916    /// Extracts a serializable snapshot of current weights.
917    ///
918    /// Converts generic layers and skip projections to CPU-backed types.
919    pub fn to_weights(&self) -> crate::serializer::PcActorWeights {
920        let cpu_layers: Vec<Layer<CpuLinAlg>> = self
921            .layers
922            .iter()
923            .map(|layer| {
924                let rows = L::mat_rows(&layer.weights);
925                let cols = L::mat_cols(&layer.weights);
926                let mut cpu_weights = crate::matrix::Matrix::zeros(rows, cols);
927                for r in 0..rows {
928                    for c in 0..cols {
929                        cpu_weights.set(r, c, L::mat_get(&layer.weights, r, c));
930                    }
931                }
932                let bias_data = L::vec_to_vec(&layer.bias);
933                Layer {
934                    weights: cpu_weights,
935                    bias: bias_data,
936                    activation: layer.activation,
937                }
938            })
939            .collect();
940        let cpu_projs: Vec<Option<crate::matrix::Matrix>> = self
941            .skip_projections
942            .iter()
943            .map(|opt| {
944                opt.as_ref().map(|m| {
945                    let rows = L::mat_rows(m);
946                    let cols = L::mat_cols(m);
947                    let mut cpu_m = crate::matrix::Matrix::zeros(rows, cols);
948                    for r in 0..rows {
949                        for c in 0..cols {
950                            cpu_m.set(r, c, L::mat_get(m, r, c));
951                        }
952                    }
953                    cpu_m
954                })
955            })
956            .collect();
957        crate::serializer::PcActorWeights {
958            layers: cpu_layers,
959            rezero_alpha: self.rezero_alpha.clone(),
960            skip_projections: cpu_projs,
961        }
962    }
963
964    /// Restores an actor from saved weights without requiring an RNG.
965    ///
966    /// Converts CPU-backed weight snapshots to the target backend `L`.
967    /// Validates that all weight matrix dimensions and bias lengths match
968    /// the expected topology from `config`.
969    ///
970    /// # Errors
971    ///
972    /// Returns `PcError::DimensionMismatch` if any weight matrix or bias
973    /// vector has dimensions inconsistent with the config topology.
974    pub fn from_weights(
975        config: PcActorConfig,
976        weights: crate::serializer::PcActorWeights,
977    ) -> Result<Self, PcError> {
978        let n_hidden = config.hidden_layers.len();
979        let expected_layers = n_hidden + 1;
980
981        if weights.layers.len() != expected_layers {
982            return Err(PcError::DimensionMismatch {
983                expected: expected_layers,
984                got: weights.layers.len(),
985                context: "actor layer count",
986            });
987        }
988
989        // Validate each layer's dimensions
990        let mut prev_size = config.input_size;
991        for (i, cpu_layer) in weights.layers.iter().enumerate() {
992            let (expected_rows, expected_cols) = if i < n_hidden {
993                (config.hidden_layers[i].size, prev_size)
994            } else {
995                (config.output_size, prev_size)
996            };
997
998            if cpu_layer.weights.rows != expected_rows {
999                return Err(PcError::DimensionMismatch {
1000                    expected: expected_rows,
1001                    got: cpu_layer.weights.rows,
1002                    context: "actor layer weight rows",
1003                });
1004            }
1005            if cpu_layer.weights.cols != expected_cols {
1006                return Err(PcError::DimensionMismatch {
1007                    expected: expected_cols,
1008                    got: cpu_layer.weights.cols,
1009                    context: "actor layer weight cols",
1010                });
1011            }
1012            if cpu_layer.bias.len() != expected_rows {
1013                return Err(PcError::DimensionMismatch {
1014                    expected: expected_rows,
1015                    got: cpu_layer.bias.len(),
1016                    context: "actor layer bias length",
1017                });
1018            }
1019
1020            if i < n_hidden {
1021                prev_size = config.hidden_layers[i].size;
1022            }
1023        }
1024
1025        // Validate residual components
1026        if config.residual {
1027            let expected_residual = n_hidden.saturating_sub(1);
1028            if weights.rezero_alpha.len() != expected_residual {
1029                return Err(PcError::DimensionMismatch {
1030                    expected: expected_residual,
1031                    got: weights.rezero_alpha.len(),
1032                    context: "actor rezero_alpha count",
1033                });
1034            }
1035            if weights.skip_projections.len() != expected_residual {
1036                return Err(PcError::DimensionMismatch {
1037                    expected: expected_residual,
1038                    got: weights.skip_projections.len(),
1039                    context: "actor skip_projections count",
1040                });
1041            }
1042            // Validate skip projection dimensions (rows/cols)
1043            for (i, proj_opt) in weights.skip_projections.iter().enumerate() {
1044                if let Some(ref proj) = proj_opt {
1045                    let expected_rows = config.hidden_layers[i + 1].size;
1046                    let expected_cols = config.hidden_layers[i].size;
1047                    if proj.rows != expected_rows || proj.cols != expected_cols {
1048                        return Err(PcError::DimensionMismatch {
1049                            expected: expected_rows * expected_cols,
1050                            got: proj.rows * proj.cols,
1051                            context: "actor skip_projection dimensions",
1052                        });
1053                    }
1054                }
1055            }
1056        }
1057
1058        // Convert layers
1059        let layers: Vec<Layer<L>> = weights
1060            .layers
1061            .into_iter()
1062            .map(|cpu_layer| {
1063                let rows = cpu_layer.weights.rows;
1064                let cols = cpu_layer.weights.cols;
1065                let mut mat = L::zeros_mat(rows, cols);
1066                for r in 0..rows {
1067                    for c in 0..cols {
1068                        L::mat_set(&mut mat, r, c, cpu_layer.weights.get(r, c));
1069                    }
1070                }
1071                let bias = L::vec_from_slice(&cpu_layer.bias);
1072                Layer {
1073                    weights: mat,
1074                    bias,
1075                    activation: cpu_layer.activation,
1076                }
1077            })
1078            .collect();
1079        let skip_projections: Vec<Option<L::Matrix>> = weights
1080            .skip_projections
1081            .into_iter()
1082            .map(|opt| {
1083                opt.map(|cpu_m| {
1084                    let rows = cpu_m.rows;
1085                    let cols = cpu_m.cols;
1086                    let mut mat = L::zeros_mat(rows, cols);
1087                    for r in 0..rows {
1088                        for c in 0..cols {
1089                            L::mat_set(&mut mat, r, c, cpu_m.get(r, c));
1090                        }
1091                    }
1092                    mat
1093                })
1094            })
1095            .collect();
1096        Ok(Self {
1097            layers,
1098            config,
1099            rezero_alpha: weights.rezero_alpha,
1100            skip_projections,
1101        })
1102    }
1103}
1104
1105/// Permute columns of a weight matrix according to a permutation.
1106/// `perm[i]` = source column index for destination column i.
1107pub(crate) fn permute_cols<L: LinAlg>(m: &L::Matrix, perm: &[usize]) -> L::Matrix {
1108    let rows = L::mat_rows(m);
1109    let cols = L::mat_cols(m);
1110    let perm_len = perm.len();
1111    let mut result = L::zeros_mat(rows, cols);
1112    for (dst, &src) in perm.iter().enumerate().take(cols.min(perm_len)) {
1113        if src < cols {
1114            for r in 0..rows {
1115                L::mat_set(&mut result, r, dst, L::mat_get(m, r, src));
1116            }
1117        }
1118    }
1119    // Copy remaining columns (beyond permutation length) in original order
1120    for dst in perm_len..cols {
1121        for r in 0..rows {
1122            L::mat_set(&mut result, r, dst, L::mat_get(m, r, dst));
1123        }
1124    }
1125    result
1126}
1127
1128/// Permute rows of a weight matrix according to a permutation.
1129/// `perm[i]` = source row index for destination row i.
1130pub(crate) fn permute_rows<L: LinAlg>(m: &L::Matrix, perm: &[usize], n: usize) -> L::Matrix {
1131    let cols = L::mat_cols(m);
1132    let perm_len = perm.len();
1133    let mut result = L::zeros_mat(n, cols);
1134    for (dst, &src) in perm.iter().enumerate().take(n.min(perm_len)) {
1135        if src < L::mat_rows(m) {
1136            for c in 0..cols {
1137                L::mat_set(&mut result, dst, c, L::mat_get(m, src, c));
1138            }
1139        }
1140    }
1141    // Copy remaining rows (unmatched) in original order
1142    for dst in perm_len..n {
1143        if dst < L::mat_rows(m) {
1144            for c in 0..cols {
1145                L::mat_set(&mut result, dst, c, L::mat_get(m, dst, c));
1146            }
1147        }
1148    }
1149    result
1150}
1151
1152/// Permute elements of a bias vector according to a permutation.
1153pub(crate) fn permute_vec<L: LinAlg>(v: &L::Vector, perm: &[usize], n: usize) -> L::Vector {
1154    let perm_len = perm.len();
1155    let mut result = L::zeros_vec(n);
1156    for (dst, &src) in perm.iter().enumerate().take(n.min(perm_len)) {
1157        if src < L::vec_len(v) {
1158            L::vec_set(&mut result, dst, L::vec_get(v, src));
1159        }
1160    }
1161    for dst in perm_len..n {
1162        if dst < L::vec_len(v) {
1163            L::vec_set(&mut result, dst, L::vec_get(v, dst));
1164        }
1165    }
1166    result
1167}
1168
1169/// Blend weights from two parent layers into a child layer.
1170/// Handles all 4 dimension cases (equal, child smaller, parents differ, child larger).
1171///
1172/// * `parent_a` - (weights, bias, neuron_count) for parent A.
1173/// * `parent_b` - (weights, bias, neuron_count) for parent B (already CCA-aligned).
1174/// * `child_cols` - Number of columns (input size) for child layer.
1175#[allow(clippy::too_many_arguments)]
1176pub(crate) fn blend_layer_weights<L: LinAlg>(
1177    parent_a: (&L::Matrix, &L::Vector, usize),
1178    parent_b: (&L::Matrix, &L::Vector, usize),
1179    n_child: usize,
1180    child_cols: usize,
1181    alpha: f64,
1182    rng: &mut impl Rng,
1183) -> (L::Matrix, L::Vector) {
1184    let (a_weights, a_biases, n_a) = parent_a;
1185    let (b_weights, b_biases, n_b) = parent_b;
1186    let n_min = n_a.min(n_b);
1187    let n_max = n_a.max(n_b);
1188    let a_cols = L::mat_cols(a_weights);
1189    let b_cols = L::mat_cols(b_weights);
1190    let use_cols = child_cols.min(a_cols).min(b_cols);
1191
1192    let mut weights = L::zeros_mat(n_child, child_cols);
1193    let mut biases = L::zeros_vec(n_child);
1194
1195    // Blending zone [0..min(n_min, n_child))
1196    let blend_end = n_min.min(n_child);
1197    for r in 0..blend_end {
1198        for c in 0..use_cols {
1199            let va = L::mat_get(a_weights, r, c);
1200            let vb = L::mat_get(b_weights, r, c);
1201            L::mat_set(&mut weights, r, c, alpha * va + (1.0 - alpha) * vb);
1202        }
1203        let ba = L::vec_get(a_biases, r);
1204        let bb = L::vec_get(b_biases, r);
1205        L::vec_set(&mut biases, r, alpha * ba + (1.0 - alpha) * bb);
1206    }
1207
1208    // Copy zone [n_min..min(n_max, n_child)) from the larger parent
1209    let copy_end = n_max.min(n_child);
1210    if copy_end > blend_end {
1211        let (larger_w, larger_b) = if n_a >= n_b {
1212            (a_weights, a_biases)
1213        } else {
1214            (b_weights, b_biases)
1215        };
1216        let larger_cols = L::mat_cols(larger_w);
1217        for r in blend_end..copy_end {
1218            for c in 0..child_cols.min(larger_cols) {
1219                L::mat_set(&mut weights, r, c, L::mat_get(larger_w, r, c));
1220            }
1221            L::vec_set(&mut biases, r, L::vec_get(larger_b, r));
1222        }
1223    }
1224
1225    // Xavier zone [n_max..n_child) for new neurons
1226    if n_child > n_max {
1227        let xavier = L::xavier_mat(n_child - n_max, child_cols, rng);
1228        for r in n_max..n_child {
1229            for c in 0..child_cols {
1230                L::mat_set(&mut weights, r, c, L::mat_get(&xavier, r - n_max, c));
1231            }
1232            // biases stay zero for Xavier zone
1233        }
1234    }
1235
1236    (weights, biases)
1237}
1238
1239/// CCA-aligns and blends a single hidden layer from two parents.
1240///
1241/// Handles the common pattern: CCA alignment → column permutation from
1242/// previous layer → row permutation → blend. Returns the blended layer
1243/// and the CCA permutation applied (for column propagation to the next layer).
1244///
1245/// * `prev_perm` — Permutation from the previous layer to apply to columns.
1246///   Pass `None` to skip column propagation.
1247#[allow(clippy::too_many_arguments)]
1248pub(crate) fn cca_align_and_blend_layer<L: LinAlg>(
1249    a_layer: &Layer<L>,
1250    b_layer: &Layer<L>,
1251    cache_a: Option<&L::Matrix>,
1252    cache_b: Option<&L::Matrix>,
1253    prev_perm: Option<&[usize]>,
1254    child_rows: usize,
1255    child_cols: usize,
1256    child_activation: Activation,
1257    alpha: f64,
1258    rng: &mut impl Rng,
1259) -> Result<(Layer<L>, Option<Vec<usize>>), crate::error::PcError> {
1260    let n_a = L::mat_rows(&a_layer.weights);
1261    let n_b = L::mat_rows(&b_layer.weights);
1262
1263    // CCA alignment
1264    let perm = if let (Some(ca), Some(cb)) = (cache_a, cache_b) {
1265        Some(crate::matrix::cca_neuron_alignment::<L>(ca, cb)?)
1266    } else {
1267        None
1268    };
1269
1270    // Apply previous layer's permutation to columns of parent B
1271    let b_weights_col = if let Some(pp) = prev_perm {
1272        permute_cols::<L>(&b_layer.weights, pp)
1273    } else {
1274        b_layer.weights.clone()
1275    };
1276
1277    // Apply CCA row permutation to parent B
1278    let b_weights_aligned = if let Some(ref p) = perm {
1279        permute_rows::<L>(&b_weights_col, p, n_b)
1280    } else {
1281        b_weights_col
1282    };
1283    let b_bias_aligned = if let Some(ref p) = perm {
1284        permute_vec::<L>(&b_layer.bias, p, n_b)
1285    } else {
1286        b_layer.bias.clone()
1287    };
1288
1289    let (weights, biases) = blend_layer_weights::<L>(
1290        (&a_layer.weights, &a_layer.bias, n_a),
1291        (&b_weights_aligned, &b_bias_aligned, n_b),
1292        child_rows,
1293        child_cols,
1294        alpha,
1295        rng,
1296    );
1297
1298    Ok((
1299        Layer {
1300            weights,
1301            bias: biases,
1302            activation: child_activation,
1303        },
1304        perm,
1305    ))
1306}
1307
1308#[cfg(test)]
1309mod tests {
1310    use super::*;
1311    use crate::activation::Activation;
1312    use crate::layer::LayerDef;
1313    use crate::matrix::WEIGHT_CLIP;
1314    use rand::rngs::StdRng;
1315    use rand::SeedableRng;
1316
1317    fn make_rng() -> StdRng {
1318        StdRng::seed_from_u64(42)
1319    }
1320
1321    fn default_config() -> PcActorConfig {
1322        PcActorConfig {
1323            input_size: 9,
1324            hidden_layers: vec![LayerDef {
1325                size: 18,
1326                activation: Activation::Tanh,
1327            }],
1328            output_size: 9,
1329            output_activation: Activation::Tanh,
1330            alpha: 0.1,
1331            tol: 0.01,
1332            min_steps: 1,
1333            max_steps: 20,
1334            lr_weights: 0.01,
1335            synchronous: true,
1336            temperature: 1.0,
1337            local_lambda: 1.0,
1338            residual: false,
1339            rezero_init: 0.001,
1340        }
1341    }
1342
1343    fn two_hidden_config() -> PcActorConfig {
1344        PcActorConfig {
1345            hidden_layers: vec![
1346                LayerDef {
1347                    size: 18,
1348                    activation: Activation::Tanh,
1349                },
1350                LayerDef {
1351                    size: 12,
1352                    activation: Activation::Tanh,
1353                },
1354            ],
1355            ..default_config()
1356        }
1357    }
1358
1359    // ── Inference Tests ──────────────────────────────────────────────
1360
1361    #[test]
1362    fn test_infer_converges_on_zero_board() {
1363        let mut rng = make_rng();
1364        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1365        let result = actor.infer(&[0.0; 9]);
1366        // Should complete without panic; all finite
1367        for &v in &result.y_conv {
1368            assert!(v.is_finite());
1369        }
1370    }
1371
1372    #[test]
1373    fn test_infer_steps_used_at_least_min_steps() {
1374        let mut rng = make_rng();
1375        let config = PcActorConfig {
1376            min_steps: 3,
1377            ..default_config()
1378        };
1379        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
1380        let result = actor.infer(&[0.0; 9]);
1381        assert!(result.steps_used >= 3);
1382    }
1383
1384    #[test]
1385    fn test_infer_alpha_zero_does_not_converge() {
1386        let mut rng = make_rng();
1387        let config = PcActorConfig {
1388            alpha: 0.0,
1389            ..default_config()
1390        };
1391        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
1392        let result = actor.infer(&[0.0; 9]);
1393        assert!(!result.converged);
1394        assert_eq!(result.steps_used, 20);
1395    }
1396
1397    #[test]
1398    fn test_infer_does_not_modify_weights() {
1399        let mut rng = make_rng();
1400        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1401        let weights_before: Vec<Vec<f64>> = actor
1402            .layers
1403            .iter()
1404            .map(|l| l.weights.data.clone())
1405            .collect();
1406        let _ = actor.infer(&[0.0; 9]);
1407        for (i, layer) in actor.layers.iter().enumerate() {
1408            assert_eq!(layer.weights.data, weights_before[i]);
1409        }
1410    }
1411
1412    #[test]
1413    fn test_infer_latent_size_single_hidden() {
1414        let mut rng = make_rng();
1415        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1416        let result = actor.infer(&[0.0; 9]);
1417        assert_eq!(result.latent_concat.len(), 18);
1418    }
1419
1420    #[test]
1421    fn test_infer_latent_size_two_hidden() {
1422        let mut rng = make_rng();
1423        let actor: PcActor = PcActor::new(two_hidden_config(), &mut rng).unwrap();
1424        let result = actor.infer(&[0.0; 9]);
1425        assert_eq!(result.latent_concat.len(), 30);
1426    }
1427
1428    #[test]
1429    fn test_infer_latent_size_matches_latent_size_method() {
1430        let mut rng = make_rng();
1431        let actor: PcActor = PcActor::new(two_hidden_config(), &mut rng).unwrap();
1432        let result = actor.infer(&[0.0; 9]);
1433        assert_eq!(result.latent_concat.len(), actor.latent_size());
1434    }
1435
1436    #[test]
1437    fn test_infer_y_conv_length_equals_output_size() {
1438        let mut rng = make_rng();
1439        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1440        let result = actor.infer(&[0.0; 9]);
1441        assert_eq!(result.y_conv.len(), 9);
1442    }
1443
1444    #[test]
1445    fn test_infer_hidden_states_count_matches_hidden_layers() {
1446        let mut rng = make_rng();
1447        let actor: PcActor = PcActor::new(two_hidden_config(), &mut rng).unwrap();
1448        let result = actor.infer(&[0.0; 9]);
1449        assert_eq!(result.hidden_states.len(), 2);
1450    }
1451
1452    #[test]
1453    fn test_infer_all_outputs_finite() {
1454        let mut rng = make_rng();
1455        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1456        let result = actor.infer(&[1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5]);
1457        for &v in &result.y_conv {
1458            assert!(v.is_finite());
1459        }
1460        for &v in &result.latent_concat {
1461            assert!(v.is_finite());
1462        }
1463        assert!(result.surprise_score.is_finite());
1464    }
1465
1466    #[test]
1467    fn test_infer_surprise_score_nonnegative() {
1468        let mut rng = make_rng();
1469        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1470        let result = actor.infer(&[0.0; 9]);
1471        assert!(result.surprise_score >= 0.0);
1472    }
1473
1474    #[test]
1475    fn test_infer_synchronous_and_inplace_both_converge() {
1476        let mut rng = make_rng();
1477        let sync_actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1478        let mut rng2 = make_rng();
1479        let inplace_config = PcActorConfig {
1480            synchronous: false,
1481            ..default_config()
1482        };
1483        let inplace_actor: PcActor = PcActor::new(inplace_config, &mut rng2).unwrap();
1484        let sync_result = sync_actor.infer(&[0.0; 9]);
1485        let inplace_result = inplace_actor.infer(&[0.0; 9]);
1486        // Both should complete without panic; at least one should converge or use all steps
1487        assert!(sync_result.steps_used > 0);
1488        assert!(inplace_result.steps_used > 0);
1489    }
1490
1491    #[test]
1492    fn test_infer_synchronous_produces_different_result_than_inplace() {
1493        let mut rng = make_rng();
1494        let config = PcActorConfig {
1495            hidden_layers: vec![
1496                LayerDef {
1497                    size: 18,
1498                    activation: Activation::Tanh,
1499                },
1500                LayerDef {
1501                    size: 12,
1502                    activation: Activation::Tanh,
1503                },
1504            ],
1505            alpha: 0.3,
1506            tol: 1e-15,
1507            min_steps: 1,
1508            max_steps: 3,
1509            ..default_config()
1510        };
1511        let sync_actor: PcActor = PcActor::new(config.clone(), &mut rng).unwrap();
1512        let mut rng2 = make_rng();
1513        let inplace_config = PcActorConfig {
1514            synchronous: false,
1515            ..config
1516        };
1517        let inplace_actor: PcActor = PcActor::new(inplace_config, &mut rng2).unwrap();
1518        let input = [1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
1519        let sync_result = sync_actor.infer(&input);
1520        let inplace_result = inplace_actor.infer(&input);
1521        // Different update orders should produce different hidden representations
1522        let differs = sync_result
1523            .latent_concat
1524            .iter()
1525            .zip(inplace_result.latent_concat.iter())
1526            .any(|(a, b)| (a - b).abs() > 1e-12);
1527        assert!(
1528            differs,
1529            "Synchronous and in-place should produce different results"
1530        );
1531    }
1532
1533    #[test]
1534    #[should_panic(expected = "input size")]
1535    fn test_infer_panics_wrong_input_length() {
1536        let mut rng = make_rng();
1537        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1538        let _ = actor.infer(&[0.0; 5]);
1539    }
1540
1541    // ── Action Selection Tests ───────────────────────────────────────
1542
1543    #[test]
1544    fn test_select_action_training_always_in_valid() {
1545        let mut rng = make_rng();
1546        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1547        let logits = vec![0.1, -0.2, 0.5, -0.1, 0.3, 0.0, -0.3, 0.2, 0.4];
1548        let valid = vec![0, 2, 4, 6, 8];
1549        for _ in 0..20 {
1550            let action = actor.select_action(&logits, &valid, SelectionMode::Training, &mut rng);
1551            assert!(valid.contains(&action));
1552        }
1553    }
1554
1555    #[test]
1556    fn test_select_action_play_mode_deterministic() {
1557        let mut rng1 = StdRng::seed_from_u64(1);
1558        let mut rng2 = StdRng::seed_from_u64(99);
1559        let mut rng_init = make_rng();
1560        let actor: PcActor = PcActor::new(default_config(), &mut rng_init).unwrap();
1561        let logits = vec![0.1, -0.2, 0.5, -0.1, 0.3, 0.0, -0.3, 0.2, 0.4];
1562        let valid = vec![0, 2, 4, 6, 8];
1563        let a1 = actor.select_action(&logits, &valid, SelectionMode::Play, &mut rng1);
1564        let a2 = actor.select_action(&logits, &valid, SelectionMode::Play, &mut rng2);
1565        assert_eq!(a1, a2, "Play mode should be deterministic");
1566    }
1567
1568    #[test]
1569    fn test_select_action_temperature_gt_one_more_uniform() {
1570        let mut rng = make_rng();
1571        let hot_config = PcActorConfig {
1572            temperature: 5.0,
1573            ..default_config()
1574        };
1575        let actor: PcActor = PcActor::new(hot_config, &mut rng).unwrap();
1576        // With high temperature, sampling should visit more actions
1577        let logits = vec![10.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0];
1578        let valid: Vec<usize> = (0..9).collect();
1579        let mut seen = std::collections::HashSet::new();
1580        let mut rng2 = StdRng::seed_from_u64(123);
1581        for _ in 0..100 {
1582            let a = actor.select_action(&logits, &valid, SelectionMode::Training, &mut rng2);
1583            seen.insert(a);
1584        }
1585        assert!(seen.len() > 1, "High temperature should explore more");
1586    }
1587
1588    #[test]
1589    #[should_panic]
1590    fn test_select_action_empty_valid_panics() {
1591        let mut rng = make_rng();
1592        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1593        let logits = vec![0.1; 9];
1594        let _ = actor.select_action(&logits, &[], SelectionMode::Training, &mut rng);
1595    }
1596
1597    // ── Weight Update Tests ──────────────────────────────────────────
1598
1599    #[test]
1600    fn test_update_weights_changes_first_layer() {
1601        let mut rng = make_rng();
1602        let mut actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1603        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
1604        let infer_result = actor.infer(&input);
1605        let weights_before = actor.layers[0].weights.data.clone();
1606        let delta = vec![0.1; 9];
1607        actor.update_weights(&delta, &infer_result, &input, 1.0);
1608        assert_ne!(actor.layers[0].weights.data, weights_before);
1609    }
1610
1611    #[test]
1612    fn test_update_weights_clips_all_layers() {
1613        let mut rng = make_rng();
1614        let mut actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1615        let input = vec![1.0; 9];
1616        let infer_result = actor.infer(&input);
1617        let delta = vec![1e6; 9];
1618        actor.update_weights(&delta, &infer_result, &input, 1.0);
1619        for layer in &actor.layers {
1620            for &w in &layer.weights.data {
1621                assert!(
1622                    w.abs() <= WEIGHT_CLIP + 1e-12,
1623                    "Weight {w} exceeds WEIGHT_CLIP"
1624                );
1625            }
1626        }
1627    }
1628
1629    #[test]
1630    fn test_update_weights_two_hidden_changes_both_layers() {
1631        let mut rng = make_rng();
1632        let mut actor: PcActor = PcActor::new(two_hidden_config(), &mut rng).unwrap();
1633        let input = vec![0.5; 9];
1634        let infer_result = actor.infer(&input);
1635        let w0_before = actor.layers[0].weights.data.clone();
1636        let w1_before = actor.layers[1].weights.data.clone();
1637        let delta = vec![0.1; 9];
1638        actor.update_weights(&delta, &infer_result, &input, 1.0);
1639        assert_ne!(
1640            actor.layers[0].weights.data, w0_before,
1641            "Layer 0 should change"
1642        );
1643        assert_ne!(
1644            actor.layers[1].weights.data, w1_before,
1645            "Layer 1 should change"
1646        );
1647    }
1648
1649    #[test]
1650    #[should_panic(expected = "input size")]
1651    fn test_update_weights_panics_wrong_x_size() {
1652        let mut rng = make_rng();
1653        let mut actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1654        let input = vec![0.0; 9];
1655        let infer_result = actor.infer(&input);
1656        let delta = vec![0.1; 9];
1657        actor.update_weights(&delta, &infer_result, &[0.0; 5], 1.0);
1658    }
1659
1660    // ── Zero Hidden Layers Test ─────────────────────────────────
1661
1662    #[test]
1663    fn test_infer_zero_hidden_layers_produces_finite_output() {
1664        let mut rng = make_rng();
1665        let config = PcActorConfig {
1666            hidden_layers: vec![],
1667            ..default_config()
1668        };
1669        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
1670        let result = actor.infer(&[0.5; 9]);
1671        assert_eq!(result.y_conv.len(), 9);
1672        assert!(result.y_conv.iter().all(|v| v.is_finite()));
1673        assert!(result.latent_concat.is_empty());
1674        assert!(result.hidden_states.is_empty());
1675    }
1676
1677    // ── Config Validation Tests ─────────────────────────────────
1678
1679    #[test]
1680    fn test_new_zero_input_size_returns_error() {
1681        let mut rng = make_rng();
1682        let config = PcActorConfig {
1683            input_size: 0,
1684            ..default_config()
1685        };
1686        let result: Result<PcActor, _> = PcActor::new(config, &mut rng);
1687        assert!(result.is_err());
1688        let err = result.unwrap_err();
1689        assert!(matches!(err, crate::error::PcError::ConfigValidation(_)));
1690    }
1691
1692    #[test]
1693    fn test_new_zero_output_size_returns_error() {
1694        let mut rng = make_rng();
1695        let config = PcActorConfig {
1696            output_size: 0,
1697            ..default_config()
1698        };
1699        let result: Result<PcActor, _> = PcActor::new(config, &mut rng);
1700        assert!(result.is_err());
1701    }
1702
1703    #[test]
1704    fn test_new_zero_temperature_returns_error() {
1705        let mut rng = make_rng();
1706        let config = PcActorConfig {
1707            temperature: 0.0,
1708            ..default_config()
1709        };
1710        let result: Result<PcActor, _> = PcActor::new(config, &mut rng);
1711        assert!(result.is_err());
1712    }
1713
1714    #[test]
1715    fn test_new_negative_temperature_returns_error() {
1716        let mut rng = make_rng();
1717        let config = PcActorConfig {
1718            temperature: -1.0,
1719            ..default_config()
1720        };
1721        let result: Result<PcActor, _> = PcActor::new(config, &mut rng);
1722        assert!(result.is_err());
1723    }
1724
1725    // ── Residual / ReZero Config Tests ────────────────────────
1726
1727    #[test]
1728    fn test_default_config_residual_false() {
1729        let config = default_config();
1730        assert!(!config.residual);
1731    }
1732
1733    #[test]
1734    fn test_default_config_rezero_init() {
1735        let config = default_config();
1736        assert!((config.rezero_init - 0.001).abs() < 1e-12);
1737    }
1738
1739    #[test]
1740    fn test_new_negative_rezero_init_returns_error() {
1741        let mut rng = make_rng();
1742        let config = PcActorConfig {
1743            residual: true,
1744            rezero_init: -0.1,
1745            ..default_config()
1746        };
1747        let result: Result<PcActor, _> = PcActor::new(config, &mut rng);
1748        assert!(result.is_err());
1749    }
1750
1751    #[test]
1752    fn test_residual_mixed_sizes_accepted() {
1753        let mut rng = make_rng();
1754        let config = PcActorConfig {
1755            residual: true,
1756            hidden_layers: vec![
1757                LayerDef {
1758                    size: 27,
1759                    activation: Activation::Tanh,
1760                },
1761                LayerDef {
1762                    size: 18,
1763                    activation: Activation::Tanh,
1764                },
1765            ],
1766            ..default_config()
1767        };
1768        let result: Result<PcActor, _> = PcActor::new(config, &mut rng);
1769        assert!(result.is_ok());
1770    }
1771
1772    #[test]
1773    fn test_residual_mixed_sizes_all_skip() {
1774        // [27, 27, 18]: ALL layers i>=1 get skip — identity for 27→27, projection for 27→18
1775        let mut rng = make_rng();
1776        let config = PcActorConfig {
1777            residual: true,
1778            hidden_layers: vec![
1779                LayerDef {
1780                    size: 27,
1781                    activation: Activation::Tanh,
1782                },
1783                LayerDef {
1784                    size: 27,
1785                    activation: Activation::Tanh,
1786                },
1787                LayerDef {
1788                    size: 18,
1789                    activation: Activation::Tanh,
1790                },
1791            ],
1792            ..default_config()
1793        };
1794        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
1795        // 2 skips: layer 1 (identity) + layer 2 (projection)
1796        assert_eq!(actor.rezero_alpha.len(), 2);
1797    }
1798
1799    #[test]
1800    fn test_residual_heterogeneous_has_projection() {
1801        // [27, 18]: different sizes → projection matrix created
1802        let mut rng = make_rng();
1803        let config = PcActorConfig {
1804            residual: true,
1805            hidden_layers: vec![
1806                LayerDef {
1807                    size: 27,
1808                    activation: Activation::Tanh,
1809                },
1810                LayerDef {
1811                    size: 18,
1812                    activation: Activation::Tanh,
1813                },
1814            ],
1815            ..default_config()
1816        };
1817        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
1818        assert_eq!(actor.rezero_alpha.len(), 1);
1819        assert_eq!(actor.skip_projections.len(), 1);
1820        assert!(actor.skip_projections[0].is_some());
1821        let proj = actor.skip_projections[0].as_ref().unwrap();
1822        assert_eq!(proj.rows, 18); // output dim
1823        assert_eq!(proj.cols, 27); // input dim
1824    }
1825
1826    #[test]
1827    fn test_residual_homogeneous_no_projection() {
1828        // [27, 27]: same sizes → no projection needed
1829        let mut rng = make_rng();
1830        let actor: PcActor = PcActor::new(residual_two_hidden_config(), &mut rng).unwrap();
1831        assert_eq!(actor.skip_projections.len(), 1);
1832        assert!(actor.skip_projections[0].is_none());
1833    }
1834
1835    #[test]
1836    fn test_residual_mixed_sizes_infer_finite() {
1837        let mut rng = make_rng();
1838        let config = PcActorConfig {
1839            residual: true,
1840            hidden_layers: vec![
1841                LayerDef {
1842                    size: 27,
1843                    activation: Activation::Tanh,
1844                },
1845                LayerDef {
1846                    size: 27,
1847                    activation: Activation::Tanh,
1848                },
1849                LayerDef {
1850                    size: 18,
1851                    activation: Activation::Tanh,
1852                },
1853            ],
1854            ..default_config()
1855        };
1856        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
1857        let result = actor.infer(&[0.5; 9]);
1858        for &v in &result.y_conv {
1859            assert!(v.is_finite());
1860        }
1861        assert_eq!(result.hidden_states.len(), 3);
1862        assert_eq!(result.latent_concat.len(), 27 + 27 + 18);
1863    }
1864
1865    #[test]
1866    fn test_residual_same_size_hidden_layers_accepted() {
1867        let mut rng = make_rng();
1868        let config = PcActorConfig {
1869            residual: true,
1870            hidden_layers: vec![
1871                LayerDef {
1872                    size: 27,
1873                    activation: Activation::Tanh,
1874                },
1875                LayerDef {
1876                    size: 27,
1877                    activation: Activation::Tanh,
1878                },
1879            ],
1880            ..default_config()
1881        };
1882        let result: Result<PcActor, _> = PcActor::new(config, &mut rng);
1883        assert!(result.is_ok());
1884    }
1885
1886    fn residual_two_hidden_config() -> PcActorConfig {
1887        PcActorConfig {
1888            residual: true,
1889            hidden_layers: vec![
1890                LayerDef {
1891                    size: 27,
1892                    activation: Activation::Tanh,
1893                },
1894                LayerDef {
1895                    size: 27,
1896                    activation: Activation::Tanh,
1897                },
1898            ],
1899            ..default_config()
1900        }
1901    }
1902
1903    #[test]
1904    fn test_non_residual_actor_empty_rezero_alpha() {
1905        let mut rng = make_rng();
1906        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
1907        assert!(actor.rezero_alpha.is_empty());
1908    }
1909
1910    #[test]
1911    fn test_residual_two_hidden_one_rezero_alpha() {
1912        let mut rng = make_rng();
1913        let actor: PcActor = PcActor::new(residual_two_hidden_config(), &mut rng).unwrap();
1914        assert_eq!(actor.rezero_alpha.len(), 1);
1915    }
1916
1917    #[test]
1918    fn test_residual_three_hidden_two_rezero_alpha() {
1919        let mut rng = make_rng();
1920        let config = PcActorConfig {
1921            residual: true,
1922            hidden_layers: vec![
1923                LayerDef {
1924                    size: 27,
1925                    activation: Activation::Tanh,
1926                },
1927                LayerDef {
1928                    size: 27,
1929                    activation: Activation::Tanh,
1930                },
1931                LayerDef {
1932                    size: 27,
1933                    activation: Activation::Tanh,
1934                },
1935            ],
1936            ..default_config()
1937        };
1938        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
1939        assert_eq!(actor.rezero_alpha.len(), 2);
1940    }
1941
1942    #[test]
1943    fn test_rezero_alpha_initialized_to_rezero_init() {
1944        let mut rng = make_rng();
1945        let config = PcActorConfig {
1946            rezero_init: 0.005,
1947            ..residual_two_hidden_config()
1948        };
1949        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
1950        assert!((actor.rezero_alpha[0] - 0.005).abs() < 1e-12);
1951    }
1952
1953    #[test]
1954    fn test_residual_single_hidden_zero_rezero_alpha() {
1955        let mut rng = make_rng();
1956        let config = PcActorConfig {
1957            residual: true,
1958            ..default_config()
1959        };
1960        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
1961        assert!(actor.rezero_alpha.is_empty());
1962    }
1963
1964    #[test]
1965    fn test_residual_single_hidden_accepted() {
1966        let mut rng = make_rng();
1967        let config = PcActorConfig {
1968            residual: true,
1969            ..default_config()
1970        };
1971        let result: Result<PcActor, _> = PcActor::new(config, &mut rng);
1972        assert!(result.is_ok());
1973    }
1974
1975    // ── Local Learning (PC-based weight updates) Tests ──────────
1976
1977    // ── Residual Inference Tests ──────────────────────────────
1978
1979    #[test]
1980    fn test_residual_false_identical_to_non_residual() {
1981        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
1982        let mut rng1 = make_rng();
1983        let actor1: PcActor = PcActor::new(two_hidden_config(), &mut rng1).unwrap();
1984        let result1 = actor1.infer(&input);
1985
1986        let mut rng2 = make_rng();
1987        let config2 = PcActorConfig {
1988            residual: false,
1989            ..two_hidden_config()
1990        };
1991        let actor2: PcActor = PcActor::new(config2, &mut rng2).unwrap();
1992        let result2 = actor2.infer(&input);
1993
1994        for (a, b) in result1.y_conv.iter().zip(result2.y_conv.iter()) {
1995            assert!((a - b).abs() < 1e-12);
1996        }
1997    }
1998
1999    #[test]
2000    fn test_residual_rezero_zero_second_hidden_near_identity() {
2001        let mut rng = make_rng();
2002        let config = PcActorConfig {
2003            rezero_init: 0.0,
2004            alpha: 0.0,
2005            ..residual_two_hidden_config()
2006        };
2007        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
2008        let result = actor.infer(&[0.5; 9]);
2009        let h0 = &result.hidden_states[0];
2010        let h1 = &result.hidden_states[1];
2011        for (a, b) in h0.iter().zip(h1.iter()) {
2012            assert!(
2013                (a - b).abs() < 1e-12,
2014                "With rezero_init=0, h[1] should equal h[0]"
2015            );
2016        }
2017    }
2018
2019    #[test]
2020    fn test_residual_infer_all_outputs_finite() {
2021        let mut rng = make_rng();
2022        let actor: PcActor = PcActor::new(residual_two_hidden_config(), &mut rng).unwrap();
2023        let result = actor.infer(&[0.5; 9]);
2024        for &v in &result.y_conv {
2025            assert!(v.is_finite());
2026        }
2027        for &v in &result.latent_concat {
2028            assert!(v.is_finite());
2029        }
2030        assert!(result.surprise_score.is_finite());
2031    }
2032
2033    #[test]
2034    fn test_residual_latent_concat_size() {
2035        let mut rng = make_rng();
2036        let actor: PcActor = PcActor::new(residual_two_hidden_config(), &mut rng).unwrap();
2037        let result = actor.infer(&[0.5; 9]);
2038        assert_eq!(result.latent_concat.len(), 54); // 27 + 27
2039    }
2040
2041    #[test]
2042    fn test_residual_pc_loop_completes() {
2043        let mut rng = make_rng();
2044        let config = PcActorConfig {
2045            alpha: 0.03,
2046            max_steps: 5,
2047            ..residual_two_hidden_config()
2048        };
2049        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
2050        let result = actor.infer(&[0.5; 9]);
2051        assert!(result.steps_used > 0);
2052        assert!(result.steps_used <= 5);
2053    }
2054
2055    #[test]
2056    fn test_residual_hidden_states_count() {
2057        let mut rng = make_rng();
2058        let actor: PcActor = PcActor::new(residual_two_hidden_config(), &mut rng).unwrap();
2059        let result = actor.infer(&[0.5; 9]);
2060        assert_eq!(result.hidden_states.len(), 2);
2061    }
2062
2063    #[test]
2064    fn test_residual_infer_does_not_modify_weights() {
2065        let mut rng = make_rng();
2066        let actor: PcActor = PcActor::new(residual_two_hidden_config(), &mut rng).unwrap();
2067        let weights_before: Vec<Vec<f64>> = actor
2068            .layers
2069            .iter()
2070            .map(|l| l.weights.data.clone())
2071            .collect();
2072        let alpha_before = actor.rezero_alpha.clone();
2073        let _ = actor.infer(&[0.5; 9]);
2074        for (i, layer) in actor.layers.iter().enumerate() {
2075            assert_eq!(layer.weights.data, weights_before[i]);
2076        }
2077        assert_eq!(actor.rezero_alpha, alpha_before);
2078    }
2079
2080    #[test]
2081    fn test_residual_three_hidden_infer_finite() {
2082        let mut rng = make_rng();
2083        let config = PcActorConfig {
2084            residual: true,
2085            hidden_layers: vec![
2086                LayerDef {
2087                    size: 27,
2088                    activation: Activation::Tanh,
2089                },
2090                LayerDef {
2091                    size: 27,
2092                    activation: Activation::Tanh,
2093                },
2094                LayerDef {
2095                    size: 27,
2096                    activation: Activation::Tanh,
2097                },
2098            ],
2099            ..default_config()
2100        };
2101        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
2102        let result = actor.infer(&[0.5; 9]);
2103        for &v in &result.y_conv {
2104            assert!(v.is_finite());
2105        }
2106    }
2107
2108    #[test]
2109    fn test_residual_tanh_components_populated() {
2110        let mut rng = make_rng();
2111        let actor: PcActor = PcActor::new(residual_two_hidden_config(), &mut rng).unwrap();
2112        let result = actor.infer(&[0.5; 9]);
2113        assert_eq!(result.tanh_components.len(), 2);
2114        assert!(result.tanh_components[0].is_none()); // layer 0: no skip
2115        assert!(result.tanh_components[1].is_some()); // layer 1: has skip
2116        assert_eq!(result.tanh_components[1].as_ref().unwrap().len(), 27);
2117    }
2118
2119    #[test]
2120    fn test_residual_pc_prediction_uses_tanh_component_not_full_state() {
2121        // With rezero_init=1.0, h[1] = tanh_out + h[0] (significantly different
2122        // from tanh_out alone). If PC prediction uses h[1] instead of tanh_out,
2123        // the surprise score and convergence will differ.
2124        // Two runs with same weights: one with alpha=0 (no PC), one with alpha>0.
2125        // The PC loop should converge meaningfully (surprise decreases).
2126        let mut rng = make_rng();
2127        let config = PcActorConfig {
2128            rezero_init: 1.0,
2129            alpha: 0.1,
2130            max_steps: 20,
2131            tol: 0.001,
2132            min_steps: 1,
2133            ..residual_two_hidden_config()
2134        };
2135        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
2136        let result = actor.infer(&[1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5]);
2137        // With proper PC predictions, surprise should be finite and non-negative
2138        assert!(result.surprise_score.is_finite());
2139        assert!(result.surprise_score >= 0.0);
2140        // Prediction errors should all be finite
2141        for errors in &result.prediction_errors {
2142            for &e in errors {
2143                assert!(e.is_finite(), "PC prediction error not finite: {e}");
2144            }
2145        }
2146    }
2147
2148    // ── Residual Backward Tests ────────────────────────────────
2149
2150    #[test]
2151    fn test_residual_false_update_identical_to_non_residual() {
2152        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2153        let delta = vec![0.1; 9];
2154
2155        let mut rng1 = make_rng();
2156        let mut actor1: PcActor = PcActor::new(two_hidden_config(), &mut rng1).unwrap();
2157        let infer1 = actor1.infer(&input);
2158        actor1.update_weights(&delta, &infer1, &input, 1.0);
2159
2160        let mut rng2 = make_rng();
2161        let config2 = PcActorConfig {
2162            residual: false,
2163            ..two_hidden_config()
2164        };
2165        let mut actor2: PcActor = PcActor::new(config2, &mut rng2).unwrap();
2166        let infer2 = actor2.infer(&input);
2167        actor2.update_weights(&delta, &infer2, &input, 1.0);
2168
2169        for i in 0..actor1.layers.len() {
2170            assert_eq!(actor1.layers[i].weights.data, actor2.layers[i].weights.data);
2171        }
2172    }
2173
2174    #[test]
2175    fn test_residual_update_changes_all_layer_weights() {
2176        let mut rng = make_rng();
2177        let mut actor: PcActor = PcActor::new(residual_two_hidden_config(), &mut rng).unwrap();
2178        let input = vec![0.5; 9];
2179        let infer_result = actor.infer(&input);
2180        let w0 = actor.layers[0].weights.data.clone();
2181        let w1 = actor.layers[1].weights.data.clone();
2182        let w2 = actor.layers[2].weights.data.clone();
2183        actor.update_weights(&[0.1; 9], &infer_result, &input, 1.0);
2184        assert_ne!(actor.layers[0].weights.data, w0, "Layer 0 should change");
2185        assert_ne!(actor.layers[1].weights.data, w1, "Layer 1 should change");
2186        assert_ne!(
2187            actor.layers[2].weights.data, w2,
2188            "Output layer should change"
2189        );
2190    }
2191
2192    #[test]
2193    fn test_residual_update_changes_rezero_alpha() {
2194        let mut rng = make_rng();
2195        let mut actor: PcActor = PcActor::new(residual_two_hidden_config(), &mut rng).unwrap();
2196        let input = vec![0.5; 9];
2197        let infer_result = actor.infer(&input);
2198        let alpha_before = actor.rezero_alpha.clone();
2199        actor.update_weights(&[0.1; 9], &infer_result, &input, 1.0);
2200        assert_ne!(
2201            actor.rezero_alpha, alpha_before,
2202            "rezero_alpha should be updated by backprop"
2203        );
2204    }
2205
2206    #[test]
2207    fn test_residual_update_clips_weights() {
2208        let mut rng = make_rng();
2209        let mut actor: PcActor = PcActor::new(residual_two_hidden_config(), &mut rng).unwrap();
2210        let input = vec![1.0; 9];
2211        let infer_result = actor.infer(&input);
2212        actor.update_weights(&[1e6; 9], &infer_result, &input, 1.0);
2213        for layer in &actor.layers {
2214            for &w in &layer.weights.data {
2215                assert!(
2216                    w.abs() <= WEIGHT_CLIP + 1e-12,
2217                    "Weight {w} exceeds WEIGHT_CLIP"
2218                );
2219            }
2220        }
2221    }
2222
2223    #[test]
2224    fn test_residual_gradient_stronger_than_non_residual() {
2225        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2226        let delta = vec![0.1; 9];
2227
2228        // Non-residual 2 hidden layers (27, 27)
2229        let mut rng1 = make_rng();
2230        let config1 = PcActorConfig {
2231            hidden_layers: vec![
2232                LayerDef {
2233                    size: 27,
2234                    activation: Activation::Tanh,
2235                },
2236                LayerDef {
2237                    size: 27,
2238                    activation: Activation::Tanh,
2239                },
2240            ],
2241            ..default_config()
2242        };
2243        let mut actor1: PcActor = PcActor::new(config1, &mut rng1).unwrap();
2244        let w0_before1 = actor1.layers[0].weights.data.clone();
2245        let infer1 = actor1.infer(&input);
2246        actor1.update_weights(&delta, &infer1, &input, 1.0);
2247        let change1: f64 = actor1.layers[0]
2248            .weights
2249            .data
2250            .iter()
2251            .zip(w0_before1.iter())
2252            .map(|(a, b)| (a - b).abs())
2253            .sum();
2254
2255        // Residual 2 hidden layers (27, 27) with rezero_init=1.0
2256        let mut rng2 = make_rng();
2257        let config2 = PcActorConfig {
2258            rezero_init: 1.0,
2259            ..residual_two_hidden_config()
2260        };
2261        let mut actor2: PcActor = PcActor::new(config2, &mut rng2).unwrap();
2262        let w0_before2 = actor2.layers[0].weights.data.clone();
2263        let infer2 = actor2.infer(&input);
2264        actor2.update_weights(&delta, &infer2, &input, 1.0);
2265        let change2: f64 = actor2.layers[0]
2266            .weights
2267            .data
2268            .iter()
2269            .zip(w0_before2.iter())
2270            .map(|(a, b)| (a - b).abs())
2271            .sum();
2272
2273        assert!(
2274            change2 > change1,
2275            "Residual should propagate stronger gradient to layer 0: residual={change2:.6}, non-residual={change1:.6}"
2276        );
2277    }
2278
2279    #[test]
2280    fn test_residual_hybrid_lambda_works() {
2281        let mut rng = make_rng();
2282        let config = PcActorConfig {
2283            local_lambda: 0.99,
2284            ..residual_two_hidden_config()
2285        };
2286        let mut actor: PcActor = PcActor::new(config, &mut rng).unwrap();
2287        let input = vec![0.5; 9];
2288        let infer_result = actor.infer(&input);
2289        let w0_before = actor.layers[0].weights.data.clone();
2290        actor.update_weights(&[0.1; 9], &infer_result, &input, 1.0);
2291        assert_ne!(actor.layers[0].weights.data, w0_before);
2292    }
2293
2294    fn local_learning_config() -> PcActorConfig {
2295        PcActorConfig {
2296            local_lambda: 0.0,
2297            ..default_config()
2298        }
2299    }
2300
2301    #[test]
2302    fn test_infer_prediction_errors_count_matches_hidden_layers() {
2303        let mut rng = make_rng();
2304        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
2305        let result = actor.infer(&[0.0; 9]);
2306        assert_eq!(result.prediction_errors.len(), 1);
2307    }
2308
2309    #[test]
2310    fn test_infer_prediction_errors_two_hidden() {
2311        let mut rng = make_rng();
2312        let actor: PcActor = PcActor::new(two_hidden_config(), &mut rng).unwrap();
2313        let result = actor.infer(&[0.0; 9]);
2314        assert_eq!(result.prediction_errors.len(), 2);
2315    }
2316
2317    #[test]
2318    fn test_infer_prediction_errors_zero_hidden_is_empty() {
2319        let mut rng = make_rng();
2320        let config = PcActorConfig {
2321            hidden_layers: vec![],
2322            ..default_config()
2323        };
2324        let actor: PcActor = PcActor::new(config, &mut rng).unwrap();
2325        let result = actor.infer(&[0.5; 9]);
2326        assert!(result.prediction_errors.is_empty());
2327    }
2328
2329    #[test]
2330    fn test_infer_prediction_errors_all_finite() {
2331        let mut rng = make_rng();
2332        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
2333        let result = actor.infer(&[1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5]);
2334        for errors in &result.prediction_errors {
2335            for &e in errors {
2336                assert!(e.is_finite(), "prediction error not finite: {e}");
2337            }
2338        }
2339    }
2340
2341    #[test]
2342    fn test_infer_prediction_errors_size_matches_hidden_layer_size() {
2343        let mut rng = make_rng();
2344        let actor: PcActor = PcActor::new(default_config(), &mut rng).unwrap();
2345        let result = actor.infer(&[0.0; 9]);
2346        // default_config has one hidden layer of size 18
2347        assert_eq!(result.prediction_errors[0].len(), 18);
2348    }
2349
2350    #[test]
2351    fn test_local_learning_config_accepted() {
2352        let mut rng = make_rng();
2353        let config = local_learning_config();
2354        assert!((config.local_lambda).abs() < f64::EPSILON);
2355        let actor: Result<PcActor, _> = PcActor::new(config, &mut rng);
2356        assert!(actor.is_ok());
2357    }
2358
2359    #[test]
2360    fn test_local_learning_update_changes_weights() {
2361        let mut rng = make_rng();
2362        let mut actor: PcActor = PcActor::new(local_learning_config(), &mut rng).unwrap();
2363        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2364        let infer_result = actor.infer(&input);
2365        let weights_before = actor.layers[0].weights.data.clone();
2366        let delta = vec![0.1; 9];
2367        actor.update_weights(&delta, &infer_result, &input, 1.0);
2368        assert_ne!(actor.layers[0].weights.data, weights_before);
2369    }
2370
2371    #[test]
2372    fn test_local_learning_clips_weights() {
2373        let mut rng = make_rng();
2374        let mut actor: PcActor = PcActor::new(local_learning_config(), &mut rng).unwrap();
2375        let input = vec![1.0; 9];
2376        let infer_result = actor.infer(&input);
2377        let delta = vec![1e6; 9];
2378        actor.update_weights(&delta, &infer_result, &input, 1.0);
2379        for layer in &actor.layers {
2380            for &w in &layer.weights.data {
2381                assert!(
2382                    w.abs() <= WEIGHT_CLIP + 1e-12,
2383                    "Weight {w} exceeds WEIGHT_CLIP"
2384                );
2385            }
2386        }
2387    }
2388
2389    #[test]
2390    fn test_local_learning_two_hidden_changes_both() {
2391        let mut rng = make_rng();
2392        let config = PcActorConfig {
2393            local_lambda: 0.0,
2394            ..two_hidden_config()
2395        };
2396        let mut actor: PcActor = PcActor::new(config, &mut rng).unwrap();
2397        let input = vec![0.5; 9];
2398        let infer_result = actor.infer(&input);
2399        let w0_before = actor.layers[0].weights.data.clone();
2400        let w1_before = actor.layers[1].weights.data.clone();
2401        let delta = vec![0.1; 9];
2402        actor.update_weights(&delta, &infer_result, &input, 1.0);
2403        assert_ne!(
2404            actor.layers[0].weights.data, w0_before,
2405            "Layer 0 should change"
2406        );
2407        assert_ne!(
2408            actor.layers[1].weights.data, w1_before,
2409            "Layer 1 should change"
2410        );
2411    }
2412
2413    #[test]
2414    fn test_local_learning_differs_from_backprop() {
2415        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2416        let delta = vec![0.1; 9];
2417
2418        // Backprop actor
2419        let mut rng1 = make_rng();
2420        let mut bp_actor: PcActor = PcActor::new(default_config(), &mut rng1).unwrap();
2421        let bp_infer = bp_actor.infer(&input);
2422        bp_actor.update_weights(&delta, &bp_infer, &input, 1.0);
2423
2424        // Local learning actor (same initial weights)
2425        let mut rng2 = make_rng();
2426        let mut ll_actor: PcActor = PcActor::new(local_learning_config(), &mut rng2).unwrap();
2427        let ll_infer = ll_actor.infer(&input);
2428        ll_actor.update_weights(&delta, &ll_infer, &input, 1.0);
2429
2430        // Hidden layer weights should differ between the two approaches
2431        assert_ne!(
2432            bp_actor.layers[0].weights.data, ll_actor.layers[0].weights.data,
2433            "Local learning should produce different weight updates than backprop"
2434        );
2435    }
2436
2437    // ── Hybrid Learning (local_lambda) Tests ────────────────────
2438
2439    fn hybrid_config(lambda: f64) -> PcActorConfig {
2440        PcActorConfig {
2441            local_lambda: lambda,
2442            ..default_config()
2443        }
2444    }
2445
2446    #[test]
2447    fn test_local_lambda_one_equals_backprop() {
2448        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2449        let delta = vec![0.1; 9];
2450
2451        // Pure backprop (local_learning=false, default)
2452        let mut rng1 = make_rng();
2453        let mut bp_actor: PcActor = PcActor::new(default_config(), &mut rng1).unwrap();
2454        let bp_infer = bp_actor.infer(&input);
2455        bp_actor.update_weights(&delta, &bp_infer, &input, 1.0);
2456
2457        // lambda=1.0 should be identical to backprop
2458        let mut rng2 = make_rng();
2459        let mut lam_actor: PcActor = PcActor::new(hybrid_config(1.0), &mut rng2).unwrap();
2460        let lam_infer = lam_actor.infer(&input);
2461        lam_actor.update_weights(&delta, &lam_infer, &input, 1.0);
2462
2463        assert_eq!(
2464            bp_actor.layers[0].weights.data, lam_actor.layers[0].weights.data,
2465            "lambda=1.0 should produce identical weights to pure backprop"
2466        );
2467    }
2468
2469    #[test]
2470    fn test_local_lambda_zero_equals_local_learning() {
2471        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2472        let delta = vec![0.1; 9];
2473
2474        // Pure local (local_learning=true)
2475        let mut rng1 = make_rng();
2476        let mut ll_actor: PcActor = PcActor::new(local_learning_config(), &mut rng1).unwrap();
2477        let ll_infer = ll_actor.infer(&input);
2478        ll_actor.update_weights(&delta, &ll_infer, &input, 1.0);
2479
2480        // lambda=0.0 should be identical to pure local
2481        let mut rng2 = make_rng();
2482        let mut lam_actor: PcActor = PcActor::new(hybrid_config(0.0), &mut rng2).unwrap();
2483        let lam_infer = lam_actor.infer(&input);
2484        lam_actor.update_weights(&delta, &lam_infer, &input, 1.0);
2485
2486        assert_eq!(
2487            ll_actor.layers[0].weights.data, lam_actor.layers[0].weights.data,
2488            "lambda=0.0 should produce identical weights to pure local learning"
2489        );
2490    }
2491
2492    #[test]
2493    fn test_local_lambda_half_differs_from_both_pure_modes() {
2494        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2495        let delta = vec![0.1; 9];
2496
2497        // Pure backprop
2498        let mut rng1 = make_rng();
2499        let mut bp_actor: PcActor = PcActor::new(default_config(), &mut rng1).unwrap();
2500        let bp_infer = bp_actor.infer(&input);
2501        bp_actor.update_weights(&delta, &bp_infer, &input, 1.0);
2502
2503        // Pure local
2504        let mut rng2 = make_rng();
2505        let mut ll_actor: PcActor = PcActor::new(local_learning_config(), &mut rng2).unwrap();
2506        let ll_infer = ll_actor.infer(&input);
2507        ll_actor.update_weights(&delta, &ll_infer, &input, 1.0);
2508
2509        // Hybrid lambda=0.5
2510        let mut rng3 = make_rng();
2511        let mut hy_actor: PcActor = PcActor::new(hybrid_config(0.5), &mut rng3).unwrap();
2512        let hy_infer = hy_actor.infer(&input);
2513        hy_actor.update_weights(&delta, &hy_infer, &input, 1.0);
2514
2515        assert_ne!(
2516            hy_actor.layers[0].weights.data, bp_actor.layers[0].weights.data,
2517            "lambda=0.5 should differ from pure backprop"
2518        );
2519        assert_ne!(
2520            hy_actor.layers[0].weights.data, ll_actor.layers[0].weights.data,
2521            "lambda=0.5 should differ from pure local"
2522        );
2523    }
2524
2525    #[test]
2526    fn test_local_lambda_changes_weights() {
2527        let mut rng = make_rng();
2528        let mut actor: PcActor = PcActor::new(hybrid_config(0.5), &mut rng).unwrap();
2529        let input = vec![1.0, -1.0, 0.5, -0.5, 0.0, 1.0, -1.0, 0.5, -0.5];
2530        let infer_result = actor.infer(&input);
2531        let weights_before = actor.layers[0].weights.data.clone();
2532        let delta = vec![0.1; 9];
2533        actor.update_weights(&delta, &infer_result, &input, 1.0);
2534        assert_ne!(actor.layers[0].weights.data, weights_before);
2535    }
2536
2537    #[test]
2538    fn test_local_lambda_clips_weights() {
2539        let mut rng = make_rng();
2540        let mut actor: PcActor = PcActor::new(hybrid_config(0.5), &mut rng).unwrap();
2541        let input = vec![1.0; 9];
2542        let infer_result = actor.infer(&input);
2543        let delta = vec![1e6; 9];
2544        actor.update_weights(&delta, &infer_result, &input, 1.0);
2545        for layer in &actor.layers {
2546            for &w in &layer.weights.data {
2547                assert!(
2548                    w.abs() <= WEIGHT_CLIP + 1e-12,
2549                    "Weight {w} exceeds WEIGHT_CLIP"
2550                );
2551            }
2552        }
2553    }
2554
2555    #[test]
2556    fn test_local_lambda_negative_returns_error() {
2557        let mut rng = make_rng();
2558        let config = hybrid_config(-0.1);
2559        let result: Result<PcActor, _> = PcActor::new(config, &mut rng);
2560        assert!(result.is_err());
2561    }
2562
2563    #[test]
2564    fn test_local_lambda_above_one_returns_error() {
2565        let mut rng = make_rng();
2566        let config = hybrid_config(1.1);
2567        let result: Result<PcActor, _> = PcActor::new(config, &mut rng);
2568        assert!(result.is_err());
2569    }
2570
2571    // ── Phase 5 Cycle 5.1: Crossover same topology ─────────────
2572
2573    fn crossover_config_27() -> PcActorConfig {
2574        PcActorConfig {
2575            input_size: 9,
2576            hidden_layers: vec![LayerDef {
2577                size: 27,
2578                activation: Activation::Tanh,
2579            }],
2580            output_size: 9,
2581            output_activation: Activation::Linear,
2582            alpha: 0.03,
2583            tol: 0.01,
2584            min_steps: 1,
2585            max_steps: 5,
2586            lr_weights: 0.005,
2587            synchronous: true,
2588            temperature: 1.0,
2589            local_lambda: 0.99,
2590            residual: false,
2591            rezero_init: 0.001,
2592        }
2593    }
2594
2595    fn make_caches_for_actor(actor: &PcActor, batch_size: usize) -> Vec<Vec<Vec<f64>>> {
2596        let num_hidden = actor.config.hidden_layers.len();
2597        let mut layers: Vec<Vec<Vec<f64>>> = (0..num_hidden).map(|_| Vec::new()).collect();
2598        for i in 0..batch_size {
2599            let input: Vec<f64> = (0..actor.config.input_size)
2600                .map(|j| ((i * actor.config.input_size + j) as f64 * 0.01).sin())
2601                .collect();
2602            let result = actor.infer(&input);
2603            for (layer_idx, state) in result.hidden_states.iter().enumerate() {
2604                layers[layer_idx].push(state.clone());
2605            }
2606        }
2607        layers
2608    }
2609
2610    fn build_cache_matrix(
2611        cache_layers: &[Vec<Vec<f64>>],
2612        layer_idx: usize,
2613    ) -> crate::matrix::Matrix {
2614        use crate::linalg::LinAlg;
2615        let samples = &cache_layers[layer_idx];
2616        let batch_size = samples.len();
2617        let n_neurons = samples[0].len();
2618        let mut mat = CpuLinAlg::zeros_mat(batch_size, n_neurons);
2619        for (r, sample) in samples.iter().enumerate() {
2620            for (c, &val) in sample.iter().enumerate() {
2621                CpuLinAlg::mat_set(&mut mat, r, c, val);
2622            }
2623        }
2624        mat
2625    }
2626
2627    #[test]
2628    fn test_crossover_same_topology_produces_valid_actor() {
2629        let mut rng_a = StdRng::seed_from_u64(42);
2630        let mut rng_b = StdRng::seed_from_u64(123);
2631        let config = crossover_config_27();
2632        let actor_a: PcActor = PcActor::new(config.clone(), &mut rng_a).unwrap();
2633        let actor_b: PcActor = PcActor::new(config.clone(), &mut rng_b).unwrap();
2634
2635        let caches_a = make_caches_for_actor(&actor_a, 50);
2636        let caches_b = make_caches_for_actor(&actor_b, 50);
2637        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
2638        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
2639
2640        let mut rng_child = StdRng::seed_from_u64(99);
2641        let child: PcActor = PcActor::crossover(
2642            &actor_a,
2643            &actor_b,
2644            &cache_mats_a,
2645            &cache_mats_b,
2646            0.5,
2647            config,
2648            &mut rng_child,
2649        )
2650        .unwrap();
2651
2652        // Child has same topology
2653        assert_eq!(child.layers.len(), actor_a.layers.len());
2654        for (i, layer) in child.layers.iter().enumerate() {
2655            assert_eq!(
2656                CpuLinAlg::mat_rows(&layer.weights),
2657                CpuLinAlg::mat_rows(&actor_a.layers[i].weights)
2658            );
2659            assert_eq!(
2660                CpuLinAlg::mat_cols(&layer.weights),
2661                CpuLinAlg::mat_cols(&actor_a.layers[i].weights)
2662            );
2663        }
2664    }
2665
2666    #[test]
2667    fn test_crossover_same_topology_child_differs_from_parents() {
2668        let mut rng_a = StdRng::seed_from_u64(42);
2669        let mut rng_b = StdRng::seed_from_u64(123);
2670        let config = crossover_config_27();
2671        let actor_a: PcActor = PcActor::new(config.clone(), &mut rng_a).unwrap();
2672        let actor_b: PcActor = PcActor::new(config.clone(), &mut rng_b).unwrap();
2673
2674        let caches_a = make_caches_for_actor(&actor_a, 50);
2675        let caches_b = make_caches_for_actor(&actor_b, 50);
2676        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
2677        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
2678
2679        let mut rng_child = StdRng::seed_from_u64(99);
2680        let child: PcActor = PcActor::crossover(
2681            &actor_a,
2682            &actor_b,
2683            &cache_mats_a,
2684            &cache_mats_b,
2685            0.5,
2686            config,
2687            &mut rng_child,
2688        )
2689        .unwrap();
2690
2691        // Child weights differ from both parents (blended)
2692        assert_ne!(child.layers[0].weights.data, actor_a.layers[0].weights.data);
2693        assert_ne!(child.layers[0].weights.data, actor_b.layers[0].weights.data);
2694    }
2695
2696    #[test]
2697    fn test_crossover_alpha_one_approximates_parent_a() {
2698        let mut rng_a = StdRng::seed_from_u64(42);
2699        let mut rng_b = StdRng::seed_from_u64(123);
2700        let config = crossover_config_27();
2701        let actor_a: PcActor = PcActor::new(config.clone(), &mut rng_a).unwrap();
2702        let actor_b: PcActor = PcActor::new(config.clone(), &mut rng_b).unwrap();
2703
2704        let caches_a = make_caches_for_actor(&actor_a, 50);
2705        let caches_b = make_caches_for_actor(&actor_b, 50);
2706        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
2707        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
2708
2709        let mut rng_child = StdRng::seed_from_u64(99);
2710        let child: PcActor = PcActor::crossover(
2711            &actor_a,
2712            &actor_b,
2713            &cache_mats_a,
2714            &cache_mats_b,
2715            1.0, // alpha=1.0 → child ≈ parent A
2716            config,
2717            &mut rng_child,
2718        )
2719        .unwrap();
2720
2721        // Input layer (layer 0): positional crossover, should be close to parent A
2722        let a_w = &actor_a.layers[0].weights.data;
2723        let child_w = &child.layers[0].weights.data;
2724        let max_diff: f64 = a_w
2725            .iter()
2726            .zip(child_w.iter())
2727            .map(|(a, c)| (a - c).abs())
2728            .fold(0.0_f64, f64::max);
2729        assert!(
2730            max_diff < 1e-10,
2731            "alpha=1.0: input layer max diff from parent A = {max_diff}"
2732        );
2733    }
2734
2735    #[test]
2736    fn test_crossover_child_weights_finite() {
2737        let mut rng_a = StdRng::seed_from_u64(42);
2738        let mut rng_b = StdRng::seed_from_u64(123);
2739        let config = crossover_config_27();
2740        let actor_a: PcActor = PcActor::new(config.clone(), &mut rng_a).unwrap();
2741        let actor_b: PcActor = PcActor::new(config.clone(), &mut rng_b).unwrap();
2742
2743        let caches_a = make_caches_for_actor(&actor_a, 50);
2744        let caches_b = make_caches_for_actor(&actor_b, 50);
2745        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
2746        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
2747
2748        let mut rng_child = StdRng::seed_from_u64(99);
2749        let child: PcActor = PcActor::crossover(
2750            &actor_a,
2751            &actor_b,
2752            &cache_mats_a,
2753            &cache_mats_b,
2754            0.5,
2755            config,
2756            &mut rng_child,
2757        )
2758        .unwrap();
2759
2760        for (i, layer) in child.layers.iter().enumerate() {
2761            for &w in &layer.weights.data {
2762                assert!(w.is_finite(), "NaN/Inf in layer {i} weights");
2763            }
2764            for b in CpuLinAlg::vec_to_vec(&layer.bias) {
2765                assert!(b.is_finite(), "NaN/Inf in layer {i} biases");
2766            }
2767        }
2768    }
2769
2770    // ── Phase 5 Cycle 5.2: Crossover child smaller ──────────────
2771
2772    #[test]
2773    fn test_crossover_child_smaller() {
2774        let mut rng_a = StdRng::seed_from_u64(42);
2775        let mut rng_b = StdRng::seed_from_u64(123);
2776        let config_27 = PcActorConfig {
2777            hidden_layers: vec![
2778                LayerDef {
2779                    size: 27,
2780                    activation: Activation::Tanh,
2781                },
2782                LayerDef {
2783                    size: 27,
2784                    activation: Activation::Tanh,
2785                },
2786            ],
2787            ..crossover_config_27()
2788        };
2789        let actor_a: PcActor = PcActor::new(config_27.clone(), &mut rng_a).unwrap();
2790        let actor_b: PcActor = PcActor::new(config_27, &mut rng_b).unwrap();
2791
2792        let caches_a = make_caches_for_actor(&actor_a, 50);
2793        let caches_b = make_caches_for_actor(&actor_b, 50);
2794        let cache_mats_a: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_a, i)).collect();
2795        let cache_mats_b: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_b, i)).collect();
2796
2797        let child_config = PcActorConfig {
2798            hidden_layers: vec![
2799                LayerDef {
2800                    size: 18,
2801                    activation: Activation::Tanh,
2802                },
2803                LayerDef {
2804                    size: 18,
2805                    activation: Activation::Tanh,
2806                },
2807            ],
2808            ..crossover_config_27()
2809        };
2810
2811        let mut rng_child = StdRng::seed_from_u64(99);
2812        let child: PcActor = PcActor::crossover(
2813            &actor_a,
2814            &actor_b,
2815            &cache_mats_a,
2816            &cache_mats_b,
2817            0.5,
2818            child_config,
2819            &mut rng_child,
2820        )
2821        .unwrap();
2822
2823        // Child hidden layers have 18 neurons
2824        use crate::linalg::LinAlg;
2825        assert_eq!(CpuLinAlg::mat_rows(&child.layers[0].weights), 18);
2826        assert_eq!(CpuLinAlg::mat_rows(&child.layers[1].weights), 18);
2827    }
2828
2829    // ── Phase 5 Cycle 5.3: Crossover parents differ ─────────────
2830
2831    #[test]
2832    fn test_crossover_parents_different_sizes() {
2833        let mut rng_a = StdRng::seed_from_u64(42);
2834        let mut rng_b = StdRng::seed_from_u64(123);
2835        let config_a = crossover_config_27(); // [27]
2836        let config_b = PcActorConfig {
2837            hidden_layers: vec![LayerDef {
2838                size: 18,
2839                activation: Activation::Tanh,
2840            }],
2841            ..crossover_config_27()
2842        }; // [18]
2843
2844        let actor_a: PcActor = PcActor::new(config_a, &mut rng_a).unwrap();
2845        let actor_b: PcActor = PcActor::new(config_b, &mut rng_b).unwrap();
2846
2847        let caches_a = make_caches_for_actor(&actor_a, 50);
2848        let caches_b = make_caches_for_actor(&actor_b, 50);
2849        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
2850        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
2851
2852        // Child has [27] → blending zone [0..18), copy zone [18..27) from parent A
2853        let child_config = crossover_config_27();
2854        let mut rng_child = StdRng::seed_from_u64(99);
2855        let child: PcActor = PcActor::crossover(
2856            &actor_a,
2857            &actor_b,
2858            &cache_mats_a,
2859            &cache_mats_b,
2860            0.5,
2861            child_config,
2862            &mut rng_child,
2863        )
2864        .unwrap();
2865
2866        use crate::linalg::LinAlg;
2867        // Child has correct dimensions [27]
2868        assert_eq!(CpuLinAlg::mat_rows(&child.layers[0].weights), 27);
2869        // All weights finite
2870        for &w in &child.layers[0].weights.data {
2871            assert!(w.is_finite());
2872        }
2873    }
2874
2875    // ── Phase 5 Cycle 5.4: Crossover child larger ───────────────
2876
2877    #[test]
2878    fn test_crossover_child_larger() {
2879        let mut rng_a = StdRng::seed_from_u64(42);
2880        let mut rng_b = StdRng::seed_from_u64(123);
2881        let config_18 = PcActorConfig {
2882            hidden_layers: vec![LayerDef {
2883                size: 18,
2884                activation: Activation::Tanh,
2885            }],
2886            ..crossover_config_27()
2887        };
2888        let actor_a: PcActor = PcActor::new(config_18.clone(), &mut rng_a).unwrap();
2889        let actor_b: PcActor = PcActor::new(config_18, &mut rng_b).unwrap();
2890
2891        let caches_a = make_caches_for_actor(&actor_a, 50);
2892        let caches_b = make_caches_for_actor(&actor_b, 50);
2893        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
2894        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
2895
2896        // Child has [27] → blending zone [0..18), Xavier zone [18..27)
2897        let child_config = crossover_config_27();
2898        let mut rng_child = StdRng::seed_from_u64(99);
2899        let child: PcActor = PcActor::crossover(
2900            &actor_a,
2901            &actor_b,
2902            &cache_mats_a,
2903            &cache_mats_b,
2904            0.5,
2905            child_config,
2906            &mut rng_child,
2907        )
2908        .unwrap();
2909
2910        use crate::linalg::LinAlg;
2911        assert_eq!(CpuLinAlg::mat_rows(&child.layers[0].weights), 27);
2912        // All weights finite
2913        for &w in &child.layers[0].weights.data {
2914            assert!(w.is_finite());
2915        }
2916        // Xavier zone weights are not all zero (random init)
2917        let xavier_zone_nonzero = (18..27).any(|r| {
2918            (0..CpuLinAlg::mat_cols(&child.layers[0].weights))
2919                .any(|c| CpuLinAlg::mat_get(&child.layers[0].weights, r, c).abs() > 1e-15)
2920        });
2921        assert!(
2922            xavier_zone_nonzero,
2923            "Xavier zone [18..27) should have non-zero weights"
2924        );
2925    }
2926
2927    // ── Phase 5 Cycle 5.5: Crossover layer count mismatch ───────
2928
2929    #[test]
2930    fn test_crossover_child_more_layers() {
2931        let mut rng_a = StdRng::seed_from_u64(42);
2932        let mut rng_b = StdRng::seed_from_u64(123);
2933        let config_2l = PcActorConfig {
2934            hidden_layers: vec![
2935                LayerDef {
2936                    size: 27,
2937                    activation: Activation::Tanh,
2938                },
2939                LayerDef {
2940                    size: 27,
2941                    activation: Activation::Tanh,
2942                },
2943            ],
2944            ..crossover_config_27()
2945        };
2946        let actor_a: PcActor = PcActor::new(config_2l.clone(), &mut rng_a).unwrap();
2947        let actor_b: PcActor = PcActor::new(config_2l, &mut rng_b).unwrap();
2948
2949        let caches_a = make_caches_for_actor(&actor_a, 50);
2950        let caches_b = make_caches_for_actor(&actor_b, 50);
2951        let cache_mats_a: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_a, i)).collect();
2952        let cache_mats_b: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_b, i)).collect();
2953
2954        // Child has 3 hidden layers → layers 0-1 crossover, layer 2 Xavier
2955        let child_config = PcActorConfig {
2956            hidden_layers: vec![
2957                LayerDef {
2958                    size: 27,
2959                    activation: Activation::Tanh,
2960                },
2961                LayerDef {
2962                    size: 27,
2963                    activation: Activation::Tanh,
2964                },
2965                LayerDef {
2966                    size: 18,
2967                    activation: Activation::Tanh,
2968                },
2969            ],
2970            ..crossover_config_27()
2971        };
2972
2973        let mut rng_child = StdRng::seed_from_u64(99);
2974        let child: PcActor = PcActor::crossover(
2975            &actor_a,
2976            &actor_b,
2977            &cache_mats_a,
2978            &cache_mats_b,
2979            0.5,
2980            child_config,
2981            &mut rng_child,
2982        )
2983        .unwrap();
2984
2985        use crate::linalg::LinAlg;
2986        // Child has 4 layers (3 hidden + 1 output)
2987        assert_eq!(child.layers.len(), 4);
2988        // Layer 2 (new) has 18 rows
2989        assert_eq!(CpuLinAlg::mat_rows(&child.layers[2].weights), 18);
2990        // All weights finite
2991        for (i, layer) in child.layers.iter().enumerate() {
2992            for &w in &layer.weights.data {
2993                assert!(w.is_finite(), "NaN/Inf in layer {i}");
2994            }
2995        }
2996    }
2997
2998    #[test]
2999    fn test_crossover_child_fewer_layers() {
3000        let mut rng_a = StdRng::seed_from_u64(42);
3001        let mut rng_b = StdRng::seed_from_u64(123);
3002        let config_3l = PcActorConfig {
3003            hidden_layers: vec![
3004                LayerDef {
3005                    size: 27,
3006                    activation: Activation::Tanh,
3007                },
3008                LayerDef {
3009                    size: 27,
3010                    activation: Activation::Tanh,
3011                },
3012                LayerDef {
3013                    size: 18,
3014                    activation: Activation::Tanh,
3015                },
3016            ],
3017            ..crossover_config_27()
3018        };
3019        let actor_a: PcActor = PcActor::new(config_3l.clone(), &mut rng_a).unwrap();
3020        let actor_b: PcActor = PcActor::new(config_3l, &mut rng_b).unwrap();
3021
3022        let caches_a = make_caches_for_actor(&actor_a, 50);
3023        let caches_b = make_caches_for_actor(&actor_b, 50);
3024        let cache_mats_a: Vec<_> = (0..3).map(|i| build_cache_matrix(&caches_a, i)).collect();
3025        let cache_mats_b: Vec<_> = (0..3).map(|i| build_cache_matrix(&caches_b, i)).collect();
3026
3027        // Child has 2 hidden layers → layers 0-1 crossover, layer 2 discarded
3028        let child_config = PcActorConfig {
3029            hidden_layers: vec![
3030                LayerDef {
3031                    size: 27,
3032                    activation: Activation::Tanh,
3033                },
3034                LayerDef {
3035                    size: 27,
3036                    activation: Activation::Tanh,
3037                },
3038            ],
3039            ..crossover_config_27()
3040        };
3041
3042        let mut rng_child = StdRng::seed_from_u64(99);
3043        let child: PcActor = PcActor::crossover(
3044            &actor_a,
3045            &actor_b,
3046            &cache_mats_a,
3047            &cache_mats_b,
3048            0.5,
3049            child_config,
3050            &mut rng_child,
3051        )
3052        .unwrap();
3053
3054        use crate::linalg::LinAlg;
3055        // Child has 3 layers (2 hidden + 1 output)
3056        assert_eq!(child.layers.len(), 3);
3057        // Output layer input_size = 27 (last hidden size)
3058        assert_eq!(CpuLinAlg::mat_cols(&child.layers[2].weights), 27);
3059    }
3060
3061    // ── Phase 5 Cycle 5.6: Crossover residual components ────────
3062
3063    #[test]
3064    fn test_crossover_residual_rezero_blended() {
3065        let mut rng_a = StdRng::seed_from_u64(42);
3066        let mut rng_b = StdRng::seed_from_u64(123);
3067        let config = PcActorConfig {
3068            hidden_layers: vec![
3069                LayerDef {
3070                    size: 27,
3071                    activation: Activation::Softsign,
3072                },
3073                LayerDef {
3074                    size: 27,
3075                    activation: Activation::Softsign,
3076                },
3077            ],
3078            residual: true,
3079            rezero_init: 0.1,
3080            ..crossover_config_27()
3081        };
3082        let actor_a: PcActor = PcActor::new(config.clone(), &mut rng_a).unwrap();
3083        let actor_b: PcActor = PcActor::new(config.clone(), &mut rng_b).unwrap();
3084
3085        let caches_a = make_caches_for_actor(&actor_a, 50);
3086        let caches_b = make_caches_for_actor(&actor_b, 50);
3087        let cache_mats_a: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_a, i)).collect();
3088        let cache_mats_b: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_b, i)).collect();
3089
3090        let mut rng_child = StdRng::seed_from_u64(99);
3091        let child: PcActor = PcActor::crossover(
3092            &actor_a,
3093            &actor_b,
3094            &cache_mats_a,
3095            &cache_mats_b,
3096            0.5,
3097            config,
3098            &mut rng_child,
3099        )
3100        .unwrap();
3101
3102        // Child has rezero_alpha values
3103        assert!(!child.rezero_alpha.is_empty());
3104        // Blended rezero_alpha: with alpha=0.5 and both parents same init,
3105        // child should be close to parent values
3106        for &rz in &child.rezero_alpha {
3107            assert!(rz.is_finite(), "rezero_alpha is not finite");
3108        }
3109    }
3110
3111    #[test]
3112    fn test_crossover_residual_skip_projections_blended() {
3113        let mut rng_a = StdRng::seed_from_u64(42);
3114        let mut rng_b = StdRng::seed_from_u64(123);
3115        let config = PcActorConfig {
3116            hidden_layers: vec![
3117                LayerDef {
3118                    size: 27,
3119                    activation: Activation::Softsign,
3120                },
3121                LayerDef {
3122                    size: 18,
3123                    activation: Activation::Softsign,
3124                },
3125            ],
3126            residual: true,
3127            rezero_init: 0.1,
3128            ..crossover_config_27()
3129        };
3130        let actor_a: PcActor = PcActor::new(config.clone(), &mut rng_a).unwrap();
3131        let actor_b: PcActor = PcActor::new(config.clone(), &mut rng_b).unwrap();
3132
3133        let caches_a = make_caches_for_actor(&actor_a, 50);
3134        let caches_b = make_caches_for_actor(&actor_b, 50);
3135        let cache_mats_a: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_a, i)).collect();
3136        let cache_mats_b: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_b, i)).collect();
3137
3138        let mut rng_child = StdRng::seed_from_u64(99);
3139        let child: PcActor = PcActor::crossover(
3140            &actor_a,
3141            &actor_b,
3142            &cache_mats_a,
3143            &cache_mats_b,
3144            0.5,
3145            config,
3146            &mut rng_child,
3147        )
3148        .unwrap();
3149
3150        // Child should have skip_projections for size mismatch (27→18)
3151        assert!(!child.skip_projections.is_empty());
3152        // At least one projection should be Some (27→18 needs projection)
3153        let has_projection = child.skip_projections.iter().any(|p| p.is_some());
3154        assert!(has_projection, "Expected at least one skip projection");
3155
3156        // Projection weights are finite
3157        for mat in child.skip_projections.iter().flatten() {
3158            for &w in &mat.data {
3159                assert!(w.is_finite(), "NaN/Inf in skip projection");
3160            }
3161        }
3162    }
3163
3164    // ── Fix #1: Column permutation propagation ──────────────────
3165
3166    #[test]
3167    fn test_crossover_multilayer_column_permutation_consistency() {
3168        // Two identical parents → child should be identical regardless of
3169        // CCA permutation (identity) or column ordering. But if we manually
3170        // set parent B = parent A with a known neuron permutation at layer 0,
3171        // the child at alpha=0.5 should produce a network whose layer 1
3172        // columns are also reordered to match.
3173        //
3174        // Strategy: crossover parent A with itself (same weights). The CCA
3175        // permutation should be identity, and the child should equal both
3176        // parents. Then crossover with alpha=0.5 using two different parents.
3177        // Run inference on the child — if column permutation is broken,
3178        // the child's layer 1 receives inputs in the wrong order, and
3179        // inference produces different results than a properly-permuted child.
3180        use crate::linalg::LinAlg;
3181        let mut rng_a = StdRng::seed_from_u64(42);
3182        let mut rng_b = StdRng::seed_from_u64(123);
3183        let config = PcActorConfig {
3184            hidden_layers: vec![
3185                LayerDef {
3186                    size: 8,
3187                    activation: Activation::Tanh,
3188                },
3189                LayerDef {
3190                    size: 8,
3191                    activation: Activation::Tanh,
3192                },
3193            ],
3194            input_size: 4,
3195            output_size: 4,
3196            ..crossover_config_27()
3197        };
3198        let actor_a: PcActor = PcActor::new(config.clone(), &mut rng_a).unwrap();
3199        let actor_b: PcActor = PcActor::new(config.clone(), &mut rng_b).unwrap();
3200
3201        let caches_a = make_caches_for_actor(&actor_a, 100);
3202        let caches_b = make_caches_for_actor(&actor_b, 100);
3203        let cache_mats_a: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_a, i)).collect();
3204        let cache_mats_b: Vec<_> = (0..2).map(|i| build_cache_matrix(&caches_b, i)).collect();
3205
3206        // Get CCA permutation for layer 0 to check if it's non-trivial
3207        let perm0 =
3208            crate::matrix::cca_neuron_alignment::<CpuLinAlg>(&cache_mats_a[0], &cache_mats_b[0])
3209                .unwrap();
3210        let is_nontrivial = perm0.iter().enumerate().any(|(i, &p)| i != p);
3211
3212        // Only test column propagation if CCA produced a non-trivial permutation
3213        if !is_nontrivial {
3214            // Parents too similar for meaningful test — skip
3215            return;
3216        }
3217
3218        // Crossover with alpha=0.5
3219        let mut rng_child = StdRng::seed_from_u64(99);
3220        let child: PcActor = PcActor::crossover(
3221            &actor_a,
3222            &actor_b,
3223            &cache_mats_a,
3224            &cache_mats_b,
3225            0.5,
3226            config.clone(),
3227            &mut rng_child,
3228        )
3229        .unwrap();
3230
3231        // Verify: layer 1's input columns should be permuted to match layer 0's
3232        // row permutation of parent B. Check that the child's layer 1 column
3233        // ordering is consistent by verifying that inference produces finite,
3234        // non-degenerate output AND that crossover applied the column permutation.
3235        //
3236        // If columns are NOT permuted, parent B's layer 1 columns still reference
3237        // the original neuron positions, but the blended layer 0 has reordered
3238        // neurons. The inconsistency means column c of layer 1 connects to the
3239        // wrong neuron from layer 0.
3240        //
3241        // We verify by checking that the column permutation was actually applied:
3242        // parent B's layer 1 columns should be reordered by perm0.
3243        let b_layer1 = &actor_b.layers[1];
3244        let b_cols = CpuLinAlg::mat_cols(&b_layer1.weights);
3245
3246        // Expected: child layer 1 col[c] = 0.5 * A.layer1.col[c] + 0.5 * B.layer1.col[perm0[c]]
3247        // If column permutation is NOT applied, it would be:
3248        // child layer 1 col[c] = 0.5 * A.layer1.col[c] + 0.5 * B.layer1.col[c]  (wrong!)
3249        let a_layer1 = &actor_a.layers[1];
3250        let child_layer1 = &child.layers[1];
3251        let n_rows = CpuLinAlg::mat_rows(&child_layer1.weights);
3252
3253        let mut has_col_permutation = false;
3254        for (c, &src_col) in perm0.iter().enumerate().take(b_cols.min(perm0.len())) {
3255            if src_col == c {
3256                continue; // Identity position, can't distinguish
3257            }
3258            // Check if child col c matches the permuted blend (correct)
3259            // vs the unpermuted blend (broken)
3260            for r in 0..n_rows {
3261                let a_val = CpuLinAlg::mat_get(&a_layer1.weights, r, c);
3262                let b_val_permuted = CpuLinAlg::mat_get(&b_layer1.weights, r, src_col);
3263                let b_val_unpermuted = CpuLinAlg::mat_get(&b_layer1.weights, r, c);
3264                let child_val = CpuLinAlg::mat_get(&child_layer1.weights, r, c);
3265
3266                let expected_permuted = 0.5 * a_val + 0.5 * b_val_permuted;
3267                let expected_unpermuted = 0.5 * a_val + 0.5 * b_val_unpermuted;
3268
3269                // If column permutation is applied, child matches permuted expectation
3270                if (child_val - expected_permuted).abs() < 1e-10
3271                    && (child_val - expected_unpermuted).abs() > 1e-10
3272                {
3273                    has_col_permutation = true;
3274                }
3275            }
3276        }
3277
3278        assert!(
3279            has_col_permutation,
3280            "Layer 1 columns should be permuted to match layer 0's CCA \
3281             permutation of parent B. perm0={perm0:?}"
3282        );
3283    }
3284
3285    // ── Fix #5: Empty hidden_layers guard ────────────────────────
3286
3287    #[test]
3288    fn test_crossover_empty_hidden_layers_returns_error() {
3289        let mut rng_a = StdRng::seed_from_u64(42);
3290        let mut rng_b = StdRng::seed_from_u64(123);
3291        let config = crossover_config_27();
3292        let actor_a: PcActor = PcActor::new(config.clone(), &mut rng_a).unwrap();
3293        let actor_b: PcActor = PcActor::new(config, &mut rng_b).unwrap();
3294
3295        let caches_a = make_caches_for_actor(&actor_a, 50);
3296        let caches_b = make_caches_for_actor(&actor_b, 50);
3297        let cache_mats_a: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_a, i)).collect();
3298        let cache_mats_b: Vec<_> = (0..1).map(|i| build_cache_matrix(&caches_b, i)).collect();
3299
3300        // Child config with empty hidden layers should return error, not panic
3301        let empty_config = PcActorConfig {
3302            hidden_layers: vec![],
3303            ..crossover_config_27()
3304        };
3305
3306        let mut rng_child = StdRng::seed_from_u64(99);
3307        let result = PcActor::crossover(
3308            &actor_a,
3309            &actor_b,
3310            &cache_mats_a,
3311            &cache_mats_b,
3312            0.5,
3313            empty_config,
3314            &mut rng_child,
3315        );
3316        assert!(
3317            result.is_err(),
3318            "Crossover with empty hidden_layers should return error"
3319        );
3320    }
3321
3322    // ── from_weights dimension validation tests ──────────────────────
3323
3324    /// Helper: build valid PcActorWeights from a config by constructing
3325    /// an actor and extracting its weights.
3326    fn valid_weights_for(config: &PcActorConfig) -> crate::serializer::PcActorWeights {
3327        let mut rng = make_rng();
3328        let actor = PcActor::<CpuLinAlg>::new(config.clone(), &mut rng).unwrap();
3329        actor.to_weights()
3330    }
3331
3332    #[test]
3333    fn test_from_weights_valid_returns_ok() {
3334        let config = default_config();
3335        let weights = valid_weights_for(&config);
3336        let result = PcActor::<CpuLinAlg>::from_weights(config, weights);
3337        assert!(result.is_ok());
3338    }
3339
3340    #[test]
3341    fn test_from_weights_wrong_weight_rows_returns_err() {
3342        let config = default_config(); // input=9, hidden=[18], output=9
3343        let mut weights = valid_weights_for(&config);
3344        // Layer 0 should be 18x9; corrupt rows to 10x9
3345        weights.layers[0].weights = crate::matrix::Matrix::zeros(10, 9);
3346        weights.layers[0].bias = vec![0.0; 10];
3347        let result = PcActor::<CpuLinAlg>::from_weights(config, weights);
3348        assert!(result.is_err());
3349        let err = result.unwrap_err();
3350        assert!(
3351            matches!(err, PcError::DimensionMismatch { .. }),
3352            "Expected DimensionMismatch, got: {err}"
3353        );
3354    }
3355
3356    #[test]
3357    fn test_from_weights_wrong_weight_cols_returns_err() {
3358        let config = default_config(); // input=9, hidden=[18], output=9
3359        let mut weights = valid_weights_for(&config);
3360        // Layer 0 should be 18x9; corrupt cols to 18x5
3361        weights.layers[0].weights = crate::matrix::Matrix::zeros(18, 5);
3362        let result = PcActor::<CpuLinAlg>::from_weights(config, weights);
3363        assert!(result.is_err());
3364        let err = result.unwrap_err();
3365        assert!(
3366            matches!(err, PcError::DimensionMismatch { .. }),
3367            "Expected DimensionMismatch, got: {err}"
3368        );
3369    }
3370
3371    #[test]
3372    fn test_from_weights_wrong_bias_length_returns_err() {
3373        let config = default_config(); // hidden=[18], so layer 0 bias should be len 18
3374        let mut weights = valid_weights_for(&config);
3375        weights.layers[0].bias = vec![0.0; 5]; // wrong length
3376        let result = PcActor::<CpuLinAlg>::from_weights(config, weights);
3377        assert!(result.is_err());
3378        let err = result.unwrap_err();
3379        assert!(
3380            matches!(err, PcError::DimensionMismatch { .. }),
3381            "Expected DimensionMismatch, got: {err}"
3382        );
3383    }
3384
3385    #[test]
3386    fn test_from_weights_wrong_output_layer_dims_returns_err() {
3387        let config = default_config(); // output layer should be 9x18
3388        let mut weights = valid_weights_for(&config);
3389        let last = weights.layers.len() - 1;
3390        weights.layers[last].weights = crate::matrix::Matrix::zeros(9, 10); // wrong cols
3391        let result = PcActor::<CpuLinAlg>::from_weights(config, weights);
3392        assert!(result.is_err());
3393    }
3394
3395    #[test]
3396    fn test_from_weights_wrong_rezero_alpha_count_returns_err() {
3397        let mut config = default_config();
3398        config.hidden_layers = vec![
3399            LayerDef {
3400                size: 18,
3401                activation: Activation::Tanh,
3402            },
3403            LayerDef {
3404                size: 18,
3405                activation: Activation::Tanh,
3406            },
3407        ];
3408        config.residual = true;
3409        let mut weights = valid_weights_for(&config);
3410        // residual with 2 hidden layers expects 1 rezero_alpha; give 0
3411        weights.rezero_alpha = vec![];
3412        let result = PcActor::<CpuLinAlg>::from_weights(config, weights);
3413        assert!(result.is_err());
3414        let err = result.unwrap_err();
3415        assert!(
3416            matches!(err, PcError::DimensionMismatch { .. }),
3417            "Expected DimensionMismatch, got: {err}"
3418        );
3419    }
3420
3421    #[test]
3422    fn test_from_weights_wrong_skip_projection_dims_returns_err() {
3423        // N1: skip projection dimensions (rows/cols) should be validated
3424        let mut config = default_config();
3425        config.hidden_layers = vec![
3426            LayerDef {
3427                size: 27,
3428                activation: Activation::Softsign,
3429            },
3430            LayerDef {
3431                size: 18,
3432                activation: Activation::Softsign,
3433            },
3434        ];
3435        config.residual = true;
3436        let mut weights = valid_weights_for(&config);
3437        // Skip projection should be 18x27; corrupt to 10x5
3438        weights.skip_projections[0] = Some(crate::matrix::Matrix::zeros(10, 5));
3439        let result = PcActor::<CpuLinAlg>::from_weights(config, weights);
3440        assert!(result.is_err());
3441        let err = result.unwrap_err();
3442        assert!(
3443            matches!(err, PcError::DimensionMismatch { .. }),
3444            "Expected DimensionMismatch, got: {err}"
3445        );
3446    }
3447
3448    #[test]
3449    fn test_from_weights_wrong_skip_projections_count_returns_err() {
3450        let mut config = default_config();
3451        config.hidden_layers = vec![
3452            LayerDef {
3453                size: 18,
3454                activation: Activation::Tanh,
3455            },
3456            LayerDef {
3457                size: 18,
3458                activation: Activation::Tanh,
3459            },
3460        ];
3461        config.residual = true;
3462        let mut weights = valid_weights_for(&config);
3463        // Should have 1 skip_projection; give 3
3464        weights.skip_projections = vec![None, None, None];
3465        let result = PcActor::<CpuLinAlg>::from_weights(config, weights);
3466        assert!(result.is_err());
3467        let err = result.unwrap_err();
3468        assert!(
3469            matches!(err, PcError::DimensionMismatch { .. }),
3470            "Expected DimensionMismatch, got: {err}"
3471        );
3472    }
3473}