Skip to main content

exg_source/
lib.rs

1//! # exg-source — EEG Source Localization in Pure Rust
2//!
3//! [![Crate](https://img.shields.io/crates/v/exg-source.svg)](https://crates.io/crates/exg-source)
4//! [![Docs](https://docs.rs/exg-source/badge.svg)](https://docs.rs/exg-source)
5//!
6//! Complete EEG source localization pipeline ported from
7//! [MNE-Python](https://mne.tools), implemented in pure Rust with no C/BLAS
8//! dependencies. Uses [`faer`](https://crates.io/crates/faer) for SVD and
9//! eigendecomposition.
10//!
11//! This crate can be used **standalone** or as an optional dependency of
12//! [`exg`](https://crates.io/crates/exg) (enabled by the default `source`
13//! feature).
14//!
15//! ## End-to-end example
16//!
17//! ```no_run
18//! use exg_source::*;
19//! use exg_source::covariance::Regularization;
20//! use ndarray::Array2;
21//!
22//! // 1. Define source space — 162 dipoles on a cortical sphere
23//! let (src_pos, src_nn) = ico_source_space(2, 0.06, [0.0, 0.0, 0.04]);
24//!
25//! // 2. Define electrode positions (e.g., from a 10-20 montage)
26//! let n_elec = 32;
27//! let electrodes = Array2::<f64>::zeros((n_elec, 3)); // your positions here
28//!
29//! // 3. Compute forward model (3-shell spherical head)
30//! let sphere = SphereModel::default(); // brain/skull/scalp
31//! let fwd = make_sphere_forward(&electrodes, &src_pos, &src_nn, &sphere);
32//!
33//! // 4. Estimate noise covariance from a baseline recording
34//! let baseline = Array2::<f64>::zeros((n_elec, 5000));
35//! let noise_cov = compute_covariance(&baseline, Regularization::ShrunkIdentity(None));
36//!
37//! // 5. Build the inverse operator
38//! let inv = make_inverse_operator(&fwd, &noise_cov, Some(0.8)).unwrap();
39//!
40//! // 6. Apply to EEG data
41//! let data = Array2::<f64>::zeros((n_elec, 1000));
42//! let lambda2 = 1.0 / 9.0; // SNR² = 9
43//! let stc = apply_inverse(&data, &inv, lambda2, InverseMethod::SLORETA).unwrap();
44//!
45//! // 7. Assess solution quality
46//! let (snr, snr_est) = estimate_snr(&data, &inv);
47//! let r = make_resolution_matrix(&inv, &fwd, lambda2, InverseMethod::SLORETA).unwrap();
48//! let errors = peak_localisation_error(&r);
49//! ```
50//!
51//! ## Modules
52//!
53//! | Module | Description |
54//! |--------|-------------|
55//! | [`source_space`] | Icosahedron and grid source space generation |
56//! | [`forward`] | Spherical head model (Berg & Scherg) forward computation |
57//! | [`covariance`] | Noise covariance estimation (empirical, Ledoit–Wolf, diagonal) |
58//! | [`inverse`] | Inverse operator construction and application |
59//! | [`eloreta`] | eLORETA iterative weight solver |
60//! | [`resolution`] | Resolution matrix, PSF, CTF, and spatial metrics |
61//! | [`snr`] | SNR estimation from whitened data |
62//! | [`linalg`] | SVD, eigendecomposition, whitener (faer backend) |
63//!
64//! ## Inverse methods
65//!
66//! | Method | Noise-normalised? | Iterative? | Best for |
67//! |--------|:-:|:-:|----------|
68//! | **MNE** | ✗ | ✗ | Raw current density estimates |
69//! | **dSPM** | ✓ | ✗ | Statistical maps, group studies |
70//! | **sLORETA** | ✓ | ✗ | Localisation with low bias |
71//! | **eLORETA** | ✓ | ✓ | Exact zero-bias localisation |
72//!
73//! ## Mathematical background
74//!
75//! The whitened gain matrix is decomposed via SVD:
76//!
77//! ```text
78//! G_w = W @ G @ R^{1/2} = U @ S @ V^T
79//! ```
80//!
81//! where **W** is the whitener (`C_noise^{-1/2}`), **R** is the source
82//! covariance (depth / orientation priors), and **U**, **S**, **V** are the
83//! SVD factors.
84//!
85//! The inverse kernel is:
86//!
87//! ```text
88//! K = R^{1/2} @ V @ diag(s / (s² + λ²)) @ U^T @ W
89//! ```
90//!
91//! Noise normalisation divides each source by its noise sensitivity:
92//!
93//! - **dSPM**: `norm_k = ‖V_k @ diag(reginv)‖`
94//! - **sLORETA**: `norm_k = ‖V_k @ diag(reginv × √(1 + s²/λ²))‖`
95//! - **eLORETA**: iterative reweighting to achieve `R = I` (see [`eloreta`])
96
97pub mod covariance;
98pub mod eloreta;
99pub mod forward;
100pub mod inverse;
101pub mod linalg;
102pub mod resolution;
103pub mod snr;
104pub mod source_space;
105
106use ndarray::{Array1, Array2};
107
108// ── Public types ───────────────────────────────────────────────────────────
109
110/// Choice of inverse method.
111///
112/// See the [module-level docs](crate) for a comparison table.
113#[derive(Debug, Clone, Copy, PartialEq, Eq)]
114pub enum InverseMethod {
115    /// Minimum-norm estimate (no noise normalisation).
116    MNE,
117    /// Dynamic statistical parametric mapping (noise-normalised).
118    DSPM,
119    /// Standardised low-resolution electromagnetic tomography.
120    SLORETA,
121    /// Exact low-resolution electromagnetic tomography (iterative).
122    ELORETA,
123}
124
125/// Source orientation constraint.
126#[derive(Debug, Clone, Copy, PartialEq, Eq)]
127pub enum SourceOrientation {
128    /// Fixed orientation: one value per source (`n_orient = 1`).
129    Fixed,
130    /// Free orientation: three Cartesian components per source (`n_orient = 3`).
131    Free,
132}
133
134/// Forward operator (gain matrix + metadata).
135///
136/// The gain matrix maps source activations to sensor measurements:
137/// `x(t) = G @ j(t) + noise`.
138///
139/// # Construction
140///
141/// - From a raw gain matrix: [`ForwardOperator::new_fixed`] or [`ForwardOperator::new_free`].
142/// - From electrode positions + source space: [`forward::make_sphere_forward`].
143#[derive(Debug, Clone)]
144pub struct ForwardOperator {
145    /// Gain matrix, shape `[n_channels, n_sources × n_orient]`.
146    pub gain: Array2<f64>,
147    /// Source normals/orientations, shape `[n_sources × n_orient, 3]`.
148    pub source_nn: Array2<f64>,
149    /// Number of source locations.
150    pub n_sources: usize,
151    /// Orientation mode.
152    pub orientation: SourceOrientation,
153    /// Depth-weighting exponent (0.8 for MEG, 2–5 for EEG). `None` = no depth weighting.
154    pub depth_exp: Option<f64>,
155}
156
157impl ForwardOperator {
158    /// Create a fixed-orientation forward operator from a gain matrix
159    /// `[n_channels, n_sources]`.
160    ///
161    /// Source normals are initialised to zero; set [`ForwardOperator::source_nn`]
162    /// manually if you need proper normals for `PickOri::Normal`.
163    pub fn new_fixed(gain: Array2<f64>) -> Self {
164        let n_src = gain.ncols();
165        let source_nn = Array2::zeros((n_src, 3));
166        Self {
167            gain,
168            source_nn,
169            n_sources: n_src,
170            orientation: SourceOrientation::Fixed,
171            depth_exp: None,
172        }
173    }
174
175    /// Create a free-orientation forward operator from a gain matrix
176    /// `[n_channels, n_sources × 3]`.
177    ///
178    /// Panics if `gain.ncols()` is not divisible by 3.
179    pub fn new_free(gain: Array2<f64>) -> Self {
180        let n_cols = gain.ncols();
181        assert!(
182            n_cols % 3 == 0,
183            "Free-orientation gain must have 3N columns, got {n_cols}"
184        );
185        let n_src = n_cols / 3;
186        let mut source_nn = Array2::zeros((n_cols, 3));
187        for i in 0..n_src {
188            source_nn[[i * 3, 0]] = 1.0;
189            source_nn[[i * 3 + 1, 1]] = 1.0;
190            source_nn[[i * 3 + 2, 2]] = 1.0;
191        }
192        Self {
193            gain,
194            source_nn,
195            n_sources: n_src,
196            orientation: SourceOrientation::Free,
197            depth_exp: None,
198        }
199    }
200
201    /// Number of orientations per source (1 for fixed, 3 for free).
202    pub fn n_orient(&self) -> usize {
203        match self.orientation {
204            SourceOrientation::Fixed => 1,
205            SourceOrientation::Free => 3,
206        }
207    }
208}
209
210/// Noise covariance matrix.
211///
212/// Can be a full `[n_channels, n_channels]` matrix or a diagonal vector.
213///
214/// # Construction
215///
216/// - Directly: [`NoiseCov::full`] or [`NoiseCov::diagonal`].
217/// - From data: [`covariance::compute_covariance`] or [`covariance::compute_covariance_epochs`].
218#[derive(Debug, Clone)]
219pub struct NoiseCov {
220    /// Full covariance matrix `[n, n]`, or a diagonal stored as `[n, 1]`.
221    data: Array2<f64>,
222    /// If true, `data` is `[n, 1]` (diagonal elements only).
223    pub diag: bool,
224}
225
226impl NoiseCov {
227    /// Create from a full covariance matrix `[n, n]`.
228    pub fn full(data: Array2<f64>) -> Self {
229        assert_eq!(data.nrows(), data.ncols(), "Covariance must be square");
230        Self { data, diag: false }
231    }
232
233    /// Create from diagonal variances (channel noise powers).
234    pub fn diagonal(variances: Vec<f64>) -> Self {
235        let n = variances.len();
236        let data = Array2::from_shape_vec((n, 1), variances).unwrap();
237        Self { data, diag: true }
238    }
239
240    /// Number of channels.
241    pub fn n_channels(&self) -> usize {
242        self.data.nrows()
243    }
244
245    /// Return the full covariance matrix (expanding diagonal if needed).
246    pub fn to_full(&self) -> Array2<f64> {
247        if self.diag {
248            let n = self.data.nrows();
249            let mut out = Array2::zeros((n, n));
250            for i in 0..n {
251                out[[i, i]] = self.data[[i, 0]];
252            }
253            out
254        } else {
255            self.data.clone()
256        }
257    }
258
259    /// Return the diagonal elements (channel variances).
260    pub fn diag_elements(&self) -> Array1<f64> {
261        if self.diag {
262            self.data.column(0).to_owned()
263        } else {
264            self.data.diag().to_owned()
265        }
266    }
267}
268
269/// Prepared inverse operator, ready for application to data.
270///
271/// Created by [`make_inverse_operator`] and consumed by [`apply_inverse`].
272/// Contains the SVD decomposition of the whitened gain matrix plus
273/// all metadata needed to reconstruct source currents.
274#[derive(Debug, Clone)]
275pub struct InverseOperator {
276    /// Left singular vectors transposed: `U^T`, shape `[n_nzero, n_channels]`.
277    pub eigen_fields: Array2<f64>,
278    /// Singular values, length `n_nzero`.
279    pub sing: Array1<f64>,
280    /// Right singular vectors: `V`, shape `[n_sources × n_orient, n_nzero]`.
281    pub eigen_leads: Array2<f64>,
282    /// Source covariance diagonal (depth + orient priors), length `n_sources × n_orient`.
283    pub source_cov: Array1<f64>,
284    /// Whether `eigen_leads` already includes `√source_cov` (set by eLORETA).
285    pub eigen_leads_weighted: bool,
286    /// Number of source locations.
287    pub n_sources: usize,
288    /// Orientation mode.
289    pub orientation: SourceOrientation,
290    /// Source normals, shape `[n_sources × n_orient, 3]`.
291    pub source_nn: Array2<f64>,
292    /// Whitener matrix, shape `[n_nzero, n_channels]`.
293    pub whitener: Array2<f64>,
294    /// Number of non-zero eigenvalues in the noise covariance.
295    pub n_nzero: usize,
296    /// Noise covariance (retained for eLORETA computation).
297    pub noise_cov: NoiseCov,
298}
299
300/// Source-space estimate produced by [`apply_inverse`].
301///
302/// Contains source time courses and metadata about the source space.
303#[derive(Debug, Clone)]
304pub struct SourceEstimate {
305    /// Source time courses.
306    ///
307    /// Shape depends on [`PickOri`]:
308    /// - `PickOri::None` / `PickOri::Normal` → `[n_sources, n_times]`
309    /// - `PickOri::Vector` → `[n_sources × 3, n_times]`
310    pub data: Array2<f64>,
311    /// Number of source locations.
312    pub n_sources: usize,
313    /// Orientation mode of the inverse operator.
314    pub orientation: SourceOrientation,
315}
316
317/// How to handle source orientations in free-orientation inverse solutions.
318///
319/// Only relevant when the inverse operator has [`SourceOrientation::Free`].
320/// For fixed-orientation operators, all variants behave identically.
321#[derive(Debug, Clone, Copy, PartialEq, Eq)]
322pub enum PickOri {
323    /// Combine XYZ components as `√(x² + y² + z²)` per source (default).
324    None,
325    /// Extract only the component normal to the cortical surface.
326    /// Requires a free-orientation inverse operator.
327    Normal,
328    /// Return all three orientation components without combining.
329    /// Output has shape `[n_sources × 3, n_times]`.
330    Vector,
331}
332
333/// Options for the eLORETA iterative solver.
334///
335/// See [`inverse::apply_inverse_full`] and [`eloreta`] for details.
336#[derive(Debug, Clone)]
337pub struct EloretaOptions {
338    /// Convergence threshold (default: `1e-6`).
339    pub eps: f64,
340    /// Maximum number of iterations (default: `20`).
341    pub max_iter: usize,
342    /// Force equal weights across XYZ orientations at each source.
343    ///
344    /// - `None` — automatic: `true` for fixed, `false` for free orientation.
345    /// - `Some(true)` — uniform weights (like dSPM/sLORETA), recommended for loose orientation.
346    /// - `Some(false)` — independent 3×3 weights per source (reference eLORETA).
347    pub force_equal: Option<bool>,
348}
349
350impl Default for EloretaOptions {
351    fn default() -> Self {
352        Self {
353            eps: 1e-6,
354            max_iter: 20,
355            force_equal: None,
356        }
357    }
358}
359
360// ── Public API re-exports ──────────────────────────────────────────────────
361
362// Inverse operator
363pub use inverse::{
364    apply_inverse, apply_inverse_epochs, apply_inverse_epochs_full, apply_inverse_full,
365    apply_inverse_with_options, make_inverse_operator, prepare_inverse,
366};
367
368// Covariance estimation
369pub use covariance::{compute_covariance, compute_covariance_epochs, Regularization};
370
371// Resolution analysis
372pub use resolution::{
373    get_cross_talk, get_point_spread, make_resolution_matrix, peak_localisation_error,
374    relative_amplitude, spatial_spread,
375};
376
377// SNR estimation
378pub use snr::estimate_snr;
379
380// Forward model
381pub use forward::{make_sphere_forward, make_sphere_forward_free, SphereModel};
382
383// Source space
384pub use source_space::{grid_source_space, ico_n_vertices, ico_source_space};