Skip to main content

fdars_core/
elastic_changepoint.rs

1//! Elastic changepoint detection for functional data streams.
2//!
3//! Implements the methods from Tucker & Yarger (2023) for detecting changepoints
4//! in the amplitude, phase, or joint structure of functional data.
5//!
6//! Key capabilities:
7//! - [`elastic_amp_changepoint`] — Amplitude changepoint via CUSUM on aligned curves
8//! - [`elastic_ph_changepoint`] — Phase changepoint via CUSUM on shooting vectors
9//! - [`elastic_fpca_changepoint`] — FPCA-based changepoint via Hotelling CUSUM
10
11use crate::alignment::{karcher_mean, KarcherMeanResult};
12use crate::elastic_fpca::{
13    horiz_fpca, joint_fpca, shooting_vectors_from_psis, sphere_karcher_mean, vert_fpca,
14    warps_to_normalized_psi,
15};
16use crate::helpers::simpsons_weights;
17use crate::matrix::FdMatrix;
18use nalgebra::{DMatrix, SVD};
19use rand::rngs::StdRng;
20use rand::SeedableRng;
21use rand_distr::{Distribution, Normal};
22
23// ─── Types ──────────────────────────────────────────────────────────────────
24
25/// Result of changepoint detection.
26#[derive(Debug, Clone)]
27pub struct ChangepointResult {
28    /// Estimated changepoint location (index in 1..n-1).
29    pub changepoint: usize,
30    /// Test statistic value.
31    pub test_statistic: f64,
32    /// Monte Carlo p-value.
33    pub p_value: f64,
34    /// CUSUM values for k=1..n-1.
35    pub cusum_values: Vec<f64>,
36}
37
38/// Type of changepoint to detect.
39#[derive(Debug, Clone, Copy, PartialEq)]
40pub enum ChangepointType {
41    /// Amplitude changepoint (on aligned curves).
42    Amplitude,
43    /// Phase changepoint (on warping functions).
44    Phase,
45    /// FPCA-based changepoint.
46    Fpca(FpcaChangepointMethod),
47}
48
49/// FPCA method for changepoint detection.
50#[derive(Debug, Clone, Copy, PartialEq)]
51pub enum FpcaChangepointMethod {
52    Vertical,
53    Horizontal,
54    Joint,
55}
56
57/// Kernel for long-run covariance estimation.
58#[derive(Debug, Clone, Copy, PartialEq)]
59pub enum CovKernel {
60    Bartlett,
61    Parzen,
62    FlatTop,
63    Simple,
64}
65
66// ─── Amplitude Changepoint ──────────────────────────────────────────────────
67
68/// Detect a changepoint in the amplitude of functional data.
69///
70/// 1. Karcher mean alignment
71/// 2. CUSUM statistic on aligned curves
72/// 3. k* = argmax(S_n), T_n = max(S_n)
73/// 4. Monte Carlo p-value via Brownian bridge simulation
74///
75/// # Arguments
76/// * `data` — Functional data (n × m), ordered by time/index
77/// * `argvals` — Evaluation points (length m)
78/// * `lambda` — Alignment penalty
79/// * `max_iter` — Karcher mean iterations
80/// * `n_mc` — Number of Monte Carlo simulations for p-value
81/// * `cov_kernel` — Kernel for long-run covariance
82/// * `cov_bandwidth` — Bandwidth for covariance kernel (if None, auto-select)
83/// * `seed` — Random seed for reproducibility
84pub fn elastic_amp_changepoint(
85    data: &FdMatrix,
86    argvals: &[f64],
87    lambda: f64,
88    max_iter: usize,
89    n_mc: usize,
90    cov_kernel: CovKernel,
91    cov_bandwidth: Option<usize>,
92    seed: u64,
93) -> Option<ChangepointResult> {
94    let (n, m) = data.shape();
95    if n < 4 || m < 2 || argvals.len() != m {
96        return None;
97    }
98
99    // Alignment
100    let km = karcher_mean(data, argvals, max_iter, 1e-4, lambda);
101
102    // CUSUM on aligned curves
103    let weights = simpsons_weights(argvals);
104    let cusum_values = functional_cusum(&km.aligned_data, &weights);
105
106    let (changepoint, test_statistic) = find_max_cusum(&cusum_values);
107
108    // Long-run covariance and eigendecomposition
109    let bandwidth = cov_bandwidth.unwrap_or_else(|| auto_bandwidth(n));
110    let eigen_values = long_run_cov_eigenvalues(&km.aligned_data, bandwidth, cov_kernel, 20);
111
112    // Monte Carlo p-value
113    let p_value = mc_pvalue_brownian_bridge(test_statistic, &eigen_values, n, n_mc, seed);
114
115    Some(ChangepointResult {
116        changepoint,
117        test_statistic,
118        p_value,
119        cusum_values,
120    })
121}
122
123// ─── Phase Changepoint ──────────────────────────────────────────────────────
124
125/// Detect a changepoint in the phase (warping) of functional data.
126///
127/// Same as amplitude changepoint but on shooting vectors from the warping functions.
128pub fn elastic_ph_changepoint(
129    data: &FdMatrix,
130    argvals: &[f64],
131    lambda: f64,
132    max_iter: usize,
133    n_mc: usize,
134    cov_kernel: CovKernel,
135    cov_bandwidth: Option<usize>,
136    seed: u64,
137) -> Option<ChangepointResult> {
138    let (n, m) = data.shape();
139    if n < 4 || m < 2 || argvals.len() != m {
140        return None;
141    }
142
143    let km = karcher_mean(data, argvals, max_iter, 1e-4, lambda);
144
145    // Compute shooting vectors from warps
146    let shooting = compute_shooting_vectors(&km, argvals)?;
147
148    let weights = simpsons_weights(argvals);
149    let cusum_values = functional_cusum(&shooting, &weights);
150    let (changepoint, test_statistic) = find_max_cusum(&cusum_values);
151
152    let bandwidth = cov_bandwidth.unwrap_or_else(|| auto_bandwidth(n));
153    let eigen_values = long_run_cov_eigenvalues(&shooting, bandwidth, cov_kernel, 20);
154
155    let p_value = mc_pvalue_brownian_bridge(test_statistic, &eigen_values, n, n_mc, seed);
156
157    Some(ChangepointResult {
158        changepoint,
159        test_statistic,
160        p_value,
161        cusum_values,
162    })
163}
164
165// ─── FPCA Changepoint ───────────────────────────────────────────────────────
166
167/// Detect a changepoint using FPCA scores with Hotelling CUSUM.
168///
169/// Uses PC scores from vert/horiz/joint FPCA for a Hotelling-type test.
170///
171/// # Arguments
172/// * `data` — Functional data (n × m)
173/// * `argvals` — Evaluation points (length m)
174/// * `pca_method` — Which FPCA variant to use
175/// * `ncomp` — Number of principal components
176/// * `lambda` — Alignment penalty
177/// * `max_iter` — Karcher mean iterations
178/// * `n_mc` — Monte Carlo simulations
179/// * `seed` — Random seed
180pub fn elastic_fpca_changepoint(
181    data: &FdMatrix,
182    argvals: &[f64],
183    pca_method: FpcaChangepointMethod,
184    ncomp: usize,
185    lambda: f64,
186    max_iter: usize,
187    n_mc: usize,
188    seed: u64,
189) -> Option<ChangepointResult> {
190    let (n, m) = data.shape();
191    if n < 4 || m < 2 || argvals.len() != m || ncomp < 1 {
192        return None;
193    }
194
195    let km = karcher_mean(data, argvals, max_iter, 1e-4, lambda);
196
197    // Get PC scores
198    let scores = match pca_method {
199        FpcaChangepointMethod::Vertical => {
200            let fpca = vert_fpca(&km, argvals, ncomp)?;
201            fpca.scores
202        }
203        FpcaChangepointMethod::Horizontal => {
204            let fpca = horiz_fpca(&km, argvals, ncomp)?;
205            fpca.scores
206        }
207        FpcaChangepointMethod::Joint => {
208            let fpca = joint_fpca(&km, argvals, ncomp, None)?;
209            fpca.scores
210        }
211    };
212
213    let actual_ncomp = scores.ncols();
214
215    // Hotelling CUSUM on scores
216    let cusum_values = hotelling_cusum(&scores);
217    let (changepoint, test_statistic) = find_max_cusum(&cusum_values);
218
219    // Monte Carlo p-value via squared Brownian bridges
220    let p_value = mc_pvalue_hotelling(test_statistic, actual_ncomp, n, n_mc, seed);
221
222    Some(ChangepointResult {
223        changepoint,
224        test_statistic,
225        p_value,
226        cusum_values,
227    })
228}
229
230// ─── Internal Helpers ───────────────────────────────────────────────────────
231
232/// Compute shooting vectors from warping functions.
233fn compute_shooting_vectors(km: &KarcherMeanResult, argvals: &[f64]) -> Option<FdMatrix> {
234    let (n, m) = km.gammas.shape();
235    if n < 2 || m < 2 {
236        return None;
237    }
238    let time: Vec<f64> = (0..m).map(|i| i as f64 / (m - 1) as f64).collect();
239    let psis = warps_to_normalized_psi(&km.gammas, argvals);
240    let mu_psi = sphere_karcher_mean(&psis, &time, 30);
241    Some(shooting_vectors_from_psis(&psis, &mu_psi, &time))
242}
243
244/// Functional CUSUM: S_n(k) = (1/(m*n²)) Σ_j (cumsum_j - (k/n)*total_j)²
245///
246/// Matches R's fdasrvf formula: `sum(diff_vec^2) / (m * n^2)`.
247fn functional_cusum(data: &FdMatrix, _weights: &[f64]) -> Vec<f64> {
248    let (n, m) = data.shape();
249    let mut cusum_values = Vec::with_capacity(n - 1);
250
251    // Running sum
252    let mut cumsum = vec![0.0; m];
253    let mut total = vec![0.0; m];
254    for i in 0..n {
255        for j in 0..m {
256            total[j] += data[(i, j)];
257        }
258    }
259
260    for k in 1..n {
261        for j in 0..m {
262            cumsum[j] += data[(k - 1, j)];
263        }
264
265        // S_n(k) = (1/(m*n²)) Σ_j (cumsum_j - (k/n)*total_j)²
266        let ratio = k as f64 / n as f64;
267        let mut s = 0.0;
268        for j in 0..m {
269            let diff = cumsum[j] - ratio * total[j];
270            s += diff * diff;
271        }
272        s /= (m * n * n) as f64;
273        cusum_values.push(s);
274    }
275
276    cusum_values
277}
278
279/// Compute sample covariance matrix with regularization, returning its inverse.
280fn regularized_cov_inverse(scores: &FdMatrix, mean: &[f64], n: usize, p: usize) -> DMatrix<f64> {
281    let mut cov = DMatrix::zeros(p, p);
282    for i in 0..n {
283        for j1 in 0..p {
284            for j2 in 0..p {
285                cov[(j1, j2)] += (scores[(i, j1)] - mean[j1]) * (scores[(i, j2)] - mean[j2]);
286            }
287        }
288    }
289    cov /= (n - 1) as f64;
290    for j in 0..p {
291        cov[(j, j)] += 1e-6;
292    }
293    if let Some(chol) = cov.cholesky() {
294        chol.inverse()
295    } else {
296        DMatrix::identity(p, p)
297    }
298}
299
300/// Hotelling CUSUM on multivariate scores: S_n(k) = (1/n) Δ_k' Σ^{-1} Δ_k
301fn hotelling_cusum(scores: &FdMatrix) -> Vec<f64> {
302    let (n, p) = scores.shape();
303
304    let mut mean = vec![0.0; p];
305    for k in 0..p {
306        for i in 0..n {
307            mean[k] += scores[(i, k)];
308        }
309        mean[k] /= n as f64;
310    }
311
312    let cov_inv = regularized_cov_inverse(scores, &mean, n, p);
313
314    let mut cumsum = vec![0.0; p];
315    let mut total = vec![0.0; p];
316    for i in 0..n {
317        for k in 0..p {
318            total[k] += scores[(i, k)];
319        }
320    }
321
322    let mut cusum_values = Vec::with_capacity(n - 1);
323    for k in 1..n {
324        for j in 0..p {
325            cumsum[j] += scores[(k - 1, j)];
326        }
327        let ratio = k as f64 / n as f64;
328        let mut delta = nalgebra::DVector::zeros(p);
329        for j in 0..p {
330            delta[j] = (cumsum[j] - ratio * total[j]) / n as f64;
331        }
332        let hotelling = (&delta.transpose() * &cov_inv * &delta)[(0, 0)];
333        cusum_values.push(hotelling);
334    }
335
336    cusum_values
337}
338
339/// Find index and value of maximum CUSUM.
340fn find_max_cusum(cusum_values: &[f64]) -> (usize, f64) {
341    let mut max_val = f64::NEG_INFINITY;
342    let mut max_idx = 0;
343    for (i, &v) in cusum_values.iter().enumerate() {
344        if v > max_val {
345            max_val = v;
346            max_idx = i + 1; // 1-indexed changepoint
347        }
348    }
349    (max_idx, max_val)
350}
351
352/// Compute lag-h cross-covariance matrix Γ(h) = (1/n) Σ_i (x_i - μ)(x_{i+h} - μ)'.
353fn lag_cross_covariance(
354    data: &FdMatrix,
355    mean: &[f64],
356    n: usize,
357    m: usize,
358    lag: usize,
359) -> DMatrix<f64> {
360    let count = n.saturating_sub(lag);
361    let mut gamma_lag = DMatrix::zeros(m, m);
362    for i in 0..count {
363        let i2 = i + lag;
364        for j1 in 0..m {
365            for j2 in 0..m {
366                gamma_lag[(j1, j2)] += (data[(i, j1)] - mean[j1]) * (data[(i2, j2)] - mean[j2]);
367            }
368        }
369    }
370    gamma_lag /= n as f64;
371    gamma_lag
372}
373
374/// Eigenvalues of long-run covariance via kernel-weighted lag cross-covariances.
375fn long_run_cov_eigenvalues(
376    data: &FdMatrix,
377    bandwidth: usize,
378    kernel: CovKernel,
379    n_eigen: usize,
380) -> Vec<f64> {
381    let (n, m) = data.shape();
382
383    let mut mean = vec![0.0; m];
384    for j in 0..m {
385        for i in 0..n {
386            mean[j] += data[(i, j)];
387        }
388        mean[j] /= n as f64;
389    }
390
391    let mut d_mat = DMatrix::zeros(m, m);
392    for lag in 0..=bandwidth {
393        let k_weight = kernel_weight(lag as f64 / bandwidth as f64, kernel);
394        if k_weight.abs() < 1e-15 || lag >= n {
395            continue;
396        }
397        let gamma_lag = lag_cross_covariance(data, &mean, n, m, lag);
398        if lag == 0 {
399            d_mat += k_weight * &gamma_lag;
400        } else {
401            d_mat += k_weight * (&gamma_lag + gamma_lag.transpose());
402        }
403    }
404
405    let svd = SVD::new(d_mat, false, false);
406    svd.singular_values
407        .iter()
408        .take(n_eigen)
409        .map(|&s| s.max(0.0))
410        .collect()
411}
412
413/// Kernel weight function.
414fn kernel_weight(x: f64, kernel: CovKernel) -> f64 {
415    let x_abs = x.abs();
416    match kernel {
417        CovKernel::Bartlett => {
418            if x_abs <= 1.0 {
419                1.0 - x_abs
420            } else {
421                0.0
422            }
423        }
424        CovKernel::Parzen => {
425            if x_abs <= 0.5 {
426                1.0 - 6.0 * x_abs * x_abs + 6.0 * x_abs * x_abs * x_abs
427            } else if x_abs <= 1.0 {
428                2.0 * (1.0 - x_abs).powi(3)
429            } else {
430                0.0
431            }
432        }
433        CovKernel::FlatTop => {
434            if x_abs <= 0.5 {
435                1.0
436            } else if x_abs <= 1.0 {
437                2.0 * (1.0 - x_abs)
438            } else {
439                0.0
440            }
441        }
442        CovKernel::Simple => {
443            if x_abs <= 1.0 {
444                1.0
445            } else {
446                0.0
447            }
448        }
449    }
450}
451
452/// Auto-select bandwidth: floor(n^{1/3}).
453fn auto_bandwidth(n: usize) -> usize {
454    (n as f64).powf(1.0 / 3.0).floor() as usize
455}
456
457/// Monte Carlo p-value using Brownian bridge simulation.
458///
459/// Simulates max_t(Σ λ_k BB_k(t)²) and counts fraction ≥ T_n.
460fn mc_pvalue_brownian_bridge(
461    test_stat: f64,
462    eigenvalues: &[f64],
463    n: usize,
464    n_mc: usize,
465    seed: u64,
466) -> f64 {
467    let mut rng = StdRng::seed_from_u64(seed);
468    let normal = Normal::new(0.0, 1.0).unwrap();
469    let n_eigen = eigenvalues.len();
470
471    let mut count_exceed = 0usize;
472    let grid_size = n - 1;
473
474    for _ in 0..n_mc {
475        // Generate n_eigen independent Brownian bridges on [0,1]
476        let mut max_stat = 0.0;
477        for t_idx in 1..=grid_size {
478            let t = t_idx as f64 / n as f64;
479            let mut val = 0.0;
480            for k in 0..n_eigen {
481                // Brownian bridge at t: W(t) - t*W(1), but we simulate directly
482                // BB(t) ~ N(0, t(1-t)), so BB(t) = sqrt(t(1-t)) * Z
483                let z: f64 = normal.sample(&mut rng);
484                let bb = (t * (1.0 - t)).sqrt() * z;
485                val += eigenvalues[k] * bb * bb;
486            }
487            if val > max_stat {
488                max_stat = val;
489            }
490        }
491
492        if max_stat >= test_stat {
493            count_exceed += 1;
494        }
495    }
496
497    (count_exceed as f64 + 1.0) / (n_mc as f64 + 1.0)
498}
499
500/// Monte Carlo p-value for Hotelling CUSUM via squared Brownian bridges.
501fn mc_pvalue_hotelling(test_stat: f64, p: usize, n: usize, n_mc: usize, seed: u64) -> f64 {
502    let mut rng = StdRng::seed_from_u64(seed);
503    let normal = Normal::new(0.0, 1.0).unwrap();
504
505    let mut count_exceed = 0usize;
506    let grid_size = n - 1;
507
508    for _ in 0..n_mc {
509        let mut max_stat = 0.0;
510        for t_idx in 1..=grid_size {
511            let t = t_idx as f64 / n as f64;
512            let var = t * (1.0 - t);
513            let mut val = 0.0;
514            for _ in 0..p {
515                let z: f64 = normal.sample(&mut rng);
516                let bb = var.sqrt() * z;
517                val += bb * bb;
518            }
519            if val > max_stat {
520                max_stat = val;
521            }
522        }
523
524        if max_stat >= test_stat {
525            count_exceed += 1;
526        }
527    }
528
529    (count_exceed as f64 + 1.0) / (n_mc as f64 + 1.0)
530}
531
532/// Simulate a Brownian bridge on a grid.
533#[allow(dead_code)]
534fn brownian_bridge(n: usize, rng: &mut StdRng) -> Vec<f64> {
535    let normal = Normal::new(0.0, 1.0).unwrap();
536    let mut bb = vec![0.0; n];
537    if n < 2 {
538        return bb;
539    }
540
541    // Generate Brownian motion
542    let dt = 1.0 / (n - 1) as f64;
543    let sqrt_dt = dt.sqrt();
544    let mut w = vec![0.0; n];
545    for i in 1..n {
546        w[i] = w[i - 1] + sqrt_dt * normal.sample(rng);
547    }
548
549    // Bridge: BB(t) = W(t) - t * W(1)
550    let w_final = w[n - 1];
551    for i in 0..n {
552        let t = i as f64 / (n - 1) as f64;
553        bb[i] = w[i] - t * w_final;
554    }
555
556    bb
557}
558
559#[cfg(test)]
560mod tests {
561    use super::*;
562    use std::f64::consts::PI;
563
564    fn generate_changepoint_data(n: usize, m: usize, cp: usize) -> (FdMatrix, Vec<f64>) {
565        let t: Vec<f64> = (0..m).map(|j| j as f64 / (m - 1) as f64).collect();
566        let mut data = FdMatrix::zeros(n, m);
567
568        for i in 0..n {
569            let amp = if i < cp { 1.0 } else { 2.0 };
570            for j in 0..m {
571                data[(i, j)] = amp * (2.0 * PI * t[j]).sin();
572            }
573        }
574        (data, t)
575    }
576
577    #[test]
578    fn test_amp_changepoint_detects_shift() {
579        let n = 30;
580        let m = 51;
581        let cp = 15;
582        let (data, t) = generate_changepoint_data(n, m, cp);
583
584        let result = elastic_amp_changepoint(&data, &t, 0.0, 5, 100, CovKernel::Bartlett, None, 42);
585        assert!(result.is_some(), "amp changepoint should succeed");
586
587        let res = result.unwrap();
588        assert!(
589            res.cusum_values.len() == n - 1,
590            "Should have n-1 CUSUM values"
591        );
592        assert!(
593            (res.changepoint as i64 - cp as i64).abs() <= 5,
594            "Detected changepoint {} should be near true cp {}",
595            res.changepoint,
596            cp
597        );
598        assert!(res.p_value >= 0.0 && res.p_value <= 1.0);
599    }
600
601    #[test]
602    fn test_ph_changepoint_basic() {
603        let n = 30;
604        let m = 51;
605        let t: Vec<f64> = (0..m).map(|j| j as f64 / (m - 1) as f64).collect();
606        let mut data = FdMatrix::zeros(n, m);
607
608        for i in 0..n {
609            let shift = if i < 15 { 0.0 } else { 0.15 };
610            for j in 0..m {
611                data[(i, j)] = (2.0 * PI * (t[j] + shift)).sin();
612            }
613        }
614
615        let result = elastic_ph_changepoint(&data, &t, 0.0, 5, 100, CovKernel::Bartlett, None, 42);
616        assert!(result.is_some());
617    }
618
619    #[test]
620    fn test_fpca_changepoint_basic() {
621        let n = 30;
622        let m = 51;
623        let cp = 15;
624        let (data, t) = generate_changepoint_data(n, m, cp);
625
626        let result = elastic_fpca_changepoint(
627            &data,
628            &t,
629            FpcaChangepointMethod::Vertical,
630            3,
631            0.0,
632            5,
633            100,
634            42,
635        );
636        assert!(result.is_some(), "fpca changepoint should succeed");
637    }
638
639    #[test]
640    fn test_kernel_weights() {
641        // Bartlett
642        assert!((kernel_weight(0.0, CovKernel::Bartlett) - 1.0).abs() < 1e-10);
643        assert!((kernel_weight(0.5, CovKernel::Bartlett) - 0.5).abs() < 1e-10);
644        assert!(kernel_weight(1.5, CovKernel::Bartlett).abs() < 1e-10);
645
646        // Parzen
647        assert!((kernel_weight(0.0, CovKernel::Parzen) - 1.0).abs() < 1e-10);
648        assert!(kernel_weight(1.5, CovKernel::Parzen).abs() < 1e-10);
649
650        // FlatTop
651        assert!((kernel_weight(0.0, CovKernel::FlatTop) - 1.0).abs() < 1e-10);
652        assert!((kernel_weight(0.3, CovKernel::FlatTop) - 1.0).abs() < 1e-10);
653    }
654
655    #[test]
656    fn test_brownian_bridge_endpoints() {
657        let mut rng = StdRng::seed_from_u64(123);
658        let bb = brownian_bridge(101, &mut rng);
659        assert!((bb[0]).abs() < 1e-10, "BB should start at 0");
660        assert!((bb[100]).abs() < 1e-10, "BB should end at 0");
661    }
662
663    #[test]
664    fn test_changepoint_no_change() {
665        // All curves identical → weak test statistic, high p-value
666        let n = 20;
667        let m = 51;
668        let t: Vec<f64> = (0..m).map(|j| j as f64 / (m - 1) as f64).collect();
669        let mut data = FdMatrix::zeros(n, m);
670        for i in 0..n {
671            for j in 0..m {
672                data[(i, j)] = (2.0 * PI * t[j]).sin();
673            }
674        }
675
676        let result = elastic_amp_changepoint(&data, &t, 0.0, 5, 200, CovKernel::Bartlett, None, 42);
677        if let Some(res) = result {
678            // With no change, p-value should tend to be higher
679            // (not a strict guarantee due to randomness, but a sanity check)
680            assert!(res.p_value > 0.0);
681        }
682    }
683
684    #[test]
685    fn test_invalid_input() {
686        let data = FdMatrix::zeros(2, 5);
687        let t: Vec<f64> = (0..5).map(|i| i as f64 / 4.0).collect();
688        // n=2 < 4 → should return None
689        assert!(
690            elastic_amp_changepoint(&data, &t, 0.0, 5, 100, CovKernel::Simple, None, 42).is_none()
691        );
692    }
693}