use num_complex::Complex64;
use std::f64::consts::PI;
pub struct RandomModeCoupling {
pub n_modes: usize,
pub coupling_strength: f64,
pub correlation_length_m: f64,
}
impl RandomModeCoupling {
pub fn new(n_modes: usize, h: f64, lc: f64) -> Self {
Self {
n_modes,
coupling_strength: h,
correlation_length_m: lc,
}
}
pub fn coupling_matrix(&self, dz: f64) -> Vec<Vec<f64>> {
let n = self.n_modes;
let h_dz = self.coupling_strength * dz;
let off = h_dz.min(1.0 / (n as f64 + 1.0)); let diag = 1.0 - (n as f64 - 1.0) * off;
let mut m = vec![vec![0.0_f64; n]; n];
for (i, row) in m.iter_mut().enumerate().take(n) {
for (j, elem) in row.iter_mut().enumerate().take(n) {
if i == j {
*elem = diag.max(0.0);
} else {
*elem = off;
}
}
}
m
}
pub fn mixing_length_km(&self) -> f64 {
let n = self.n_modes;
if self.coupling_strength <= 0.0 || n <= 1 {
return f64::INFINITY;
}
let l_mix_m = 1.0 / (2.0 * self.coupling_strength * (n as f64 - 1.0));
l_mix_m / 1.0e3
}
pub fn is_strongly_coupled(&self, fiber_length_km: f64) -> bool {
let l_mix = self.mixing_length_km();
fiber_length_km > 10.0 * l_mix
}
pub fn effective_dgd_ps(&self, dgd_uncoupled_ps: f64, fiber_length_km: f64) -> f64 {
let l_mix = self.mixing_length_km();
if l_mix <= 0.0 || l_mix.is_infinite() || fiber_length_km <= l_mix {
return dgd_uncoupled_ps;
}
let ratio = fiber_length_km / l_mix;
dgd_uncoupled_ps / ratio.sqrt()
}
pub fn effective_mdl_db(&self, mdl_uncoupled_db: f64, fiber_length_km: f64) -> f64 {
let l_mix = self.mixing_length_km();
let n = self.n_modes as f64;
if l_mix <= 0.0 || l_mix.is_infinite() || fiber_length_km <= l_mix {
return mdl_uncoupled_db;
}
let denom = (n * fiber_length_km / l_mix).sqrt();
mdl_uncoupled_db / denom
}
pub fn steady_state_powers(&self, total_power: f64) -> Vec<f64> {
vec![total_power / self.n_modes as f64; self.n_modes]
}
}
pub struct FmfMimoEqualizer {
pub n_modes: usize,
pub n_taps: usize,
pub tap_spacing_samples: usize,
pub equalizer_matrix: Vec<Vec<Vec<Complex64>>>,
pub mu: f64,
}
impl FmfMimoEqualizer {
pub fn new(n_modes: usize, n_taps: usize, mu: f64) -> Self {
let center = n_taps / 2;
let equalizer_matrix: Vec<Vec<Vec<Complex64>>> = (0..n_modes)
.map(|out| {
(0..n_modes)
.map(|inp| {
let mut taps = vec![Complex64::new(0.0, 0.0); n_taps];
if inp == out {
taps[center] = Complex64::new(1.0, 0.0);
}
taps
})
.collect()
})
.collect();
Self {
n_modes,
n_taps,
tap_spacing_samples: 2,
equalizer_matrix,
mu,
}
}
pub fn apply(&self, received: &[Vec<Complex64>]) -> Vec<Complex64> {
let n = self.n_modes.min(received.len());
let n_samp = received.first().map(|v| v.len()).unwrap_or(0);
let mut output = vec![Complex64::new(0.0, 0.0); n];
for (out, out_val) in output.iter_mut().enumerate().take(n) {
for (inp, recv_mode) in received.iter().enumerate().take(n) {
let taps = &self.equalizer_matrix[out][inp];
for (k, &tap) in taps.iter().enumerate() {
let idx = k * self.tap_spacing_samples;
if idx < n_samp {
*out_val += tap * recv_mode[idx];
}
}
}
}
output
}
pub fn update_cma(&mut self, received: &[Complex64], equalized: &[Complex64]) {
let r_squared = 1.0_f64; let n = self.n_modes.min(equalized.len()).min(received.len());
for (out, &y) in equalized.iter().enumerate().take(n) {
let error = (r_squared - y.norm_sqr()) * y;
for (inp, &x) in received.iter().enumerate().take(n) {
let mu_err = self.mu * error;
for k in 0..self.n_taps {
self.equalizer_matrix[out][inp][k] += mu_err * x.conj();
}
}
}
}
pub fn complexity_multiplications_per_symbol(&self) -> usize {
self.n_modes * self.n_modes * self.n_taps
}
pub fn required_taps(dgd_total_ps: f64, symbol_rate_gbaud: f64) -> usize {
if symbol_rate_gbaud <= 0.0 {
return 1;
}
let t_sym_ps = 1.0 / (symbol_rate_gbaud * 1.0e9) * 1.0e12; let n_taps = (dgd_total_ps / t_sym_ps * 1.2).ceil() as usize;
n_taps.max(3)
}
pub fn convergence_symbols(&self) -> usize {
if self.mu <= 0.0 {
return usize::MAX;
}
(1.0 / (self.mu * self.n_modes as f64)).ceil() as usize
}
}
pub struct PhotonicLantern {
pub n_modes: usize,
pub transition_length_mm: f64,
pub insertion_loss_db: f64,
pub mode_dependent_loss_db: f64,
pub crosstalk_db: f64,
}
impl PhotonicLantern {
pub fn new(n_modes: usize) -> Self {
let transition_length_mm = 25.0 * (n_modes as f64).sqrt();
let insertion_loss_db = 0.3 + 0.1 * (n_modes as f64).log2();
let mode_dependent_loss_db = 0.5 + 0.05 * n_modes as f64;
let crosstalk_db = -25.0 - 5.0 * (n_modes as f64).log2();
Self {
n_modes,
transition_length_mm,
insertion_loss_db,
mode_dependent_loss_db,
crosstalk_db,
}
}
pub fn mode_selectivity_db(&self) -> f64 {
self.crosstalk_db.abs() - self.mode_dependent_loss_db
}
pub fn efficiency(&self) -> f64 {
10.0_f64.powf(-self.insertion_loss_db / 10.0)
}
pub fn bandwidth_nm(&self) -> f64 {
let base_bw = 150.0_f64;
base_bw * (self.transition_length_mm / 25.0).sqrt()
}
pub fn transfer_matrix(&self) -> Vec<Vec<Complex64>> {
let n = self.n_modes;
let eta = self.efficiency();
let diag_amp = eta.sqrt();
let xt_ratio = 10.0_f64.powf(self.crosstalk_db / 20.0);
let xt_amp = diag_amp * xt_ratio;
let mut t = vec![vec![Complex64::new(0.0, 0.0); n]; n];
for (i, t_row) in t.iter_mut().enumerate().take(n) {
for (j, elem) in t_row.iter_mut().enumerate().take(n) {
if i == j {
*elem = Complex64::new(diag_amp, 0.0);
} else {
let phase = 2.0 * PI * (i * n + j) as f64 / (n * n) as f64;
*elem = Complex64::new(xt_amp * phase.cos(), xt_amp * phase.sin());
}
}
}
t
}
}
pub struct LpgModeConverter {
pub period_mm: f64,
pub length_mm: f64,
pub coupling_coefficient: f64,
pub mode_pair: (usize, usize),
}
impl LpgModeConverter {
pub fn new(period_mm: f64, length_mm: f64, mode_pair: (usize, usize)) -> Self {
let coupling_coefficient = PI / (2.0 * length_mm * 1.0e-3) * 0.9; Self {
period_mm,
length_mm,
coupling_coefficient,
mode_pair,
}
}
pub fn efficiency(&self, delta_beta: f64) -> f64 {
let kappa = self.coupling_coefficient;
let l_m = self.length_mm * 1.0e-3;
let detuning = delta_beta / 2.0; let omega = (kappa * kappa + detuning * detuning).sqrt();
if omega < 1.0e-30 {
return 0.0;
}
(kappa / omega).powi(2) * (omega * l_m).sin().powi(2)
}
pub fn peak_efficiency(&self) -> f64 {
let kappa = self.coupling_coefficient;
let l_m = self.length_mm * 1.0e-3;
(kappa * l_m).sin().powi(2)
}
pub fn bandwidth_nm(&self, dispersion_slope: f64) -> f64 {
let l_m = self.length_mm * 1.0e-3;
if dispersion_slope.abs() < 1.0e-30 || l_m < 1.0e-30 {
return 0.0;
}
let lambda_center = 1.55e-6_f64;
let bw_m = 0.886 * lambda_center * lambda_center / (l_m * dispersion_slope.abs());
bw_m * 1.0e9 }
pub fn resonance_wavelength(&self, beta1: f64, beta2: f64) -> f64 {
let delta_beta = (beta1 - beta2).abs();
let lambda_m = self.period_mm * 1.0e-3 * delta_beta / (2.0 * PI);
lambda_m * 1.0e9 * 1.0e-9
}
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_abs_diff_eq;
#[test]
fn test_coupling_matrix_rows_sum_to_one() {
let coupler = RandomModeCoupling::new(4, 1.0e-4, 0.5);
let m = coupler.coupling_matrix(10.0);
for row in &m {
let row_sum: f64 = row.iter().sum();
assert_abs_diff_eq!(row_sum, 1.0, epsilon = 1.0e-9);
}
}
#[test]
fn test_mixing_length_decreases_with_coupling() {
let weak = RandomModeCoupling::new(4, 1.0e-5, 0.5);
let strong = RandomModeCoupling::new(4, 1.0e-3, 0.5);
assert!(strong.mixing_length_km() < weak.mixing_length_km());
}
#[test]
fn test_effective_dgd_reduced_in_strong_coupling() {
let coupler = RandomModeCoupling::new(4, 1.0e-3, 1.0);
let l_mix = coupler.mixing_length_km();
let fiber_len = 100.0 * l_mix; let dgd_eff = coupler.effective_dgd_ps(1000.0, fiber_len);
assert!(
dgd_eff < 1000.0,
"DGD should be reduced by coupling, got {}",
dgd_eff
);
}
#[test]
fn test_mimo_apply_identity() {
let eq = FmfMimoEqualizer::new(2, 5, 1.0e-4);
let received: Vec<Vec<Complex64>> = (0..2)
.map(|i| vec![Complex64::new(i as f64 + 1.0, 0.0); 10])
.collect();
let output = eq.apply(&received);
assert_eq!(output.len(), 2);
assert_abs_diff_eq!(output[0].re, 1.0, epsilon = 1.0e-9);
assert_abs_diff_eq!(output[1].re, 2.0, epsilon = 1.0e-9);
}
#[test]
fn test_required_taps_positive() {
let n_taps = FmfMimoEqualizer::required_taps(1000.0, 32.0);
assert!(n_taps >= 3, "Should need at least 3 taps");
}
#[test]
fn test_convergence_symbols_finite() {
let eq = FmfMimoEqualizer::new(4, 32, 1.0e-3);
let conv = eq.convergence_symbols();
assert!(conv > 0 && conv < usize::MAX);
}
#[test]
fn test_photonic_lantern_efficiency() {
let pl = PhotonicLantern::new(6);
let eta = pl.efficiency();
assert!(
eta > 0.0 && eta <= 1.0,
"Efficiency must be in (0,1]: {}",
eta
);
}
#[test]
fn test_photonic_lantern_transfer_matrix_size() {
let pl = PhotonicLantern::new(4);
let t = pl.transfer_matrix();
assert_eq!(t.len(), 4);
assert_eq!(t[0].len(), 4);
}
#[test]
fn test_lpg_peak_efficiency_range() {
let lpg = LpgModeConverter::new(0.5, 30.0, (0, 1));
let eta = lpg.peak_efficiency();
assert!(
(0.0..=1.0).contains(&eta),
"Peak efficiency must be in [0,1]: {}",
eta
);
}
#[test]
fn test_lpg_efficiency_at_resonance_equals_peak() {
let lpg = LpgModeConverter::new(0.5, 30.0, (0, 1));
let eta_res = lpg.efficiency(0.0);
let eta_peak = lpg.peak_efficiency();
assert_abs_diff_eq!(eta_res, eta_peak, epsilon = 1.0e-9);
}
#[test]
fn test_lpg_efficiency_decreases_off_resonance() {
let lpg = LpgModeConverter::new(0.5, 30.0, (0, 1));
let eta_on = lpg.efficiency(0.0);
let eta_off = lpg.efficiency(1000.0); assert!(eta_off < eta_on, "Off-resonance efficiency must be lower");
}
}