atelier_quant 0.0.12

Quantitative Finance Tools & Models for the atelier-rs engine
Documentation
//! # Point process discrete simulation
//!
//! A Hawkes Process is a self-exciting point process where the intensity of
//! events increases following the occurrence of previous events.
//!
//! ## Linear - Univariate.
//!
//! The simplest case is a self-exciting, one dimensional effect, i.e. the
//! arrival of an event increases the likelihood of observing events in the
//! near future. It is also useful to consider the case when there is
//! more than one type of event, and there is mutual excitement
//! between the different events. For $d$ such events, we define a
//! $d$-dimensional Hawkes process:
//!
//! $$
//!   \lambda_{i}(t) = \mu_{i} + \sum_{j=1}^{d} \sum_{t_{j,r} \leq t} \phi_{ij}(t - t_{j,r})
//! $$
//!
//! Where:
//!
//! $\mu_{i} \in R_{+}$: Base line (exogenous) intensities. \
//! $\phi_{ij}$: The (exciting) Kernel functions. \
//! $t_{j,r}$ : the time of the $j^{th}$ event of type $r$
//! \
//! \
//! As for the $\phi_{ij}$ kernel definition, exponential kernels
//! are among the most commonly used in Hawkes
//! processes due to their simplicity and mathematical properties:
//!
//! $$
//!  \phi_{ij}(t) = \alpha_{ij} e^{-\beta_{ij}t}
//! $$
//!
//! Where:
//!
//! $\alpha$: Excitation factor (how much each event excites the
//! future events). \
//! $\beta$: Decay rate (how quickly the excitement diminishes).\

use crate::hawkes::errors;
use crate::hawkes::kernel::{ExcitationKernel, ExponentialKernel};
use crate::hawkes::quadrature;
use rand::Rng;

/// Univariate Hawkes process parameterised by an excitation kernel `K`.
///
/// The conditional intensity is:
///
/// $$\lambda(t) = \mu + \text{kernel.intensity\_contribution}(\text{state}(t))$$
///
/// For the default [`ExponentialKernel`] this reduces to the classical:
///
/// $$\lambda(t) = \mu + \alpha \sum_{t_i < t} e^{-\beta(t - t_i)}$$
///
/// The process is stationary when the branching ratio $\alpha / \beta < 1$.
pub struct HawkesProcess<K: ExcitationKernel = ExponentialKernel> {
    /// Baseline (exogenous) intensity $\mu$. Must be ≥ 0.
    pub mu: f64,
    /// The excitation kernel (carries its own parameters).
    pub kernel: K,
}

// ═══════════════════════════════════════════════════════════════════
//  Generic impl — works for any ExcitationKernel
// ═══════════════════════════════════════════════════════════════════

impl<K: ExcitationKernel> HawkesProcess<K> {
    /// Construct a `HawkesProcess` from a baseline rate and any kernel.
    ///
    /// # Errors
    ///
    /// Returns [`HawkesError::HawkesInputTypeFailure`](errors::HawkesError::HawkesInputTypeFailure)
    /// if μ < 0.
    pub fn with_kernel(mu: f64, kernel: K) -> Result<Self, errors::HawkesError> {
        if mu < 0.0 {
            return Err(errors::HawkesError::HawkesInputTypeFailure);
        }
        Ok(Self { mu, kernel })
    }

    /// Compute the conditional intensity from a precomputed recursive
    /// kernel state.
    ///
    /// $$\lambda(t) = \mu + \text{kernel.intensity\_contribution}(\text{state})$$
    #[inline]
    pub fn conditional_intensity_at(&self, state: &K::State) -> f64 {
        self.mu + self.kernel.intensity_contribution(state)
    }

    /// Simulate N events conditioned on observed event history.
    ///
    /// Uses Ogata's thinning algorithm with O(1) recursive state
    /// maintenance per candidate step.
    ///
    /// # Arguments
    ///
    /// * `current_ts` - Start time (must be ≥ the last history event).
    /// * `history` - Sorted event times observed before `current_ts`.
    /// * `n` - Number of new events to generate.
    ///
    /// # Returns
    ///
    /// A `Vec<f64>` of `n` strictly increasing event times, all
    /// greater than `current_ts`.
    pub fn generate_values_conditioned(
        &self,
        current_ts: f64,
        history: &[f64],
        n: usize,
    ) -> Vec<f64> {
        let mut rng = rand::rng();
        let mut event_times = Vec::with_capacity(n);
        let mut current_time = current_ts;

        // Initialise the recursive state from the full history — O(n).
        let mut state = self.kernel.init_state(current_ts, history);

        let max_attempts = n * 10_000;
        let mut attempts = 0;

        while event_times.len() < n && attempts < max_attempts {
            attempts += 1;

            // Upper bound: intensity right now (intensity only decays
            // until the next event, so this is a valid bound).
            let lambda_star = self.conditional_intensity_at(&state);

            if lambda_star <= 0.0 || !lambda_star.is_finite() {
                let u: f64 = rng.random();
                let wait = -u.ln() / self.mu.max(1e-10);
                current_time += wait;
                state = self.kernel.decay_state(&state, wait);
                state = self.kernel.excite_state(&state);
                event_times.push(current_time);
                continue;
            }

            // Step 1: candidate waiting time ~ Exp(λ*)
            let u1: f64 = rng.random();
            let wait = -u1.ln() / lambda_star;
            let candidate = current_time + wait;

            // Decay state to the candidate time — O(1).
            let state_candidate = self.kernel.decay_state(&state, wait);

            // Step 2: true intensity at the candidate
            let lambda_cand = self.conditional_intensity_at(&state_candidate);

            // Step 3: accept / reject
            let u2: f64 = rng.random();
            if u2 <= lambda_cand / lambda_star {
                // Accept: excite state (the new event contributes +1).
                state = self.kernel.excite_state(&state_candidate);
                event_times.push(candidate);
            } else {
                // Reject: keep decayed state, no excitation.
                state = state_candidate;
            }

            current_time = candidate;
        }

        event_times
    }

    /// Simulate N event times starting from an empty history.
    ///
    /// Delegates to [`generate_values_conditioned`](Self::generate_values_conditioned)
    /// with an empty history slice.
    pub fn generate_values(&self, current_ts: f64, n: usize) -> Vec<f64> {
        self.generate_values_conditioned(current_ts, &[], n)
    }

    /// Compute the deterministic conditional expected gap to the next
    /// event, given the current kernel state.
    ///
    /// $$E[\Delta \mid \text{state}] = \int_0^\infty S(s)\,ds$$
    ///
    /// where $S(s) = \exp(-\mu\,s - K_{\text{integrated}}(s))$.
    ///
    /// See [`quadrature::conditional_mean_gap`] for the numerical method.
    pub fn conditional_mean_gap(&self, state: &K::State) -> f64 {
        let state_clone = state.clone();
        let kernel_clone = self.kernel.clone();
        quadrature::conditional_mean_gap(
            self.mu,
            |s| kernel_clone.integrated_intensity_contribution(&state_clone, s),
            1e-10,
        )
    }

    /// Forecast N inter-arrival gaps deterministically using iterated
    /// conditional means.
    ///
    /// At each step:
    /// 1. Compute $E[\Delta_k \mid \text{state}]$ via quadrature.
    /// 2. Place a virtual event at $t + E[\Delta_k]$.
    /// 3. Decay the state by $E[\Delta_k]$, then excite.
    /// 4. Repeat.
    ///
    /// This is the MAP-like point forecast: it replaces each stochastic
    /// gap with its conditional expectation, propagating the state
    /// deterministically.
    ///
    /// # Arguments
    ///
    /// * `current_ts` - Start time.
    /// * `history` - Sorted event times observed before `current_ts`.
    /// * `n` - Number of gaps (events) to forecast.
    ///
    /// # Returns
    ///
    /// A `Vec<f64>` of `n` strictly increasing forecast event times.
    pub fn forecast_conditional_means(
        &self,
        current_ts: f64,
        history: &[f64],
        n: usize,
    ) -> Vec<f64> {
        let mut state = self.kernel.init_state(current_ts, history);
        let mut t = current_ts;
        let mut forecasts = Vec::with_capacity(n);

        for _ in 0..n {
            let gap = self.conditional_mean_gap(&state);

            if !gap.is_finite() || gap <= 0.0 {
                // Fallback to 1/μ if quadrature diverges
                let fallback = if self.mu > 0.0 { 1.0 / self.mu } else { 1.0 };
                t += fallback;
                state = self.kernel.decay_state(&state, fallback);
                state = self.kernel.excite_state(&state);
                forecasts.push(t);
                continue;
            }

            t += gap;
            // Decay state across the gap, then excite for the virtual event.
            state = self.kernel.decay_state(&state, gap);
            state = self.kernel.excite_state(&state);
            forecasts.push(t);
        }

        forecasts
    }
}

// ═══════════════════════════════════════════════════════════════════
//  Exponential-kernel-specific convenience methods
// ═══════════════════════════════════════════════════════════════════

impl HawkesProcess<ExponentialKernel> {
    /// Validate that μ ≥ 0, α ≥ 0, and β > 0.
    ///
    /// # Errors
    ///
    /// Returns [`HawkesError::HawkesInputTypeFailure`](errors::HawkesError::HawkesInputTypeFailure)
    /// if any constraint is violated.
    pub fn hawkes_valid_inputs(
        mu: f64,
        alpha: f64,
        beta: f64,
    ) -> Result<(), errors::HawkesError> {
        match mu >= 0.0 && alpha >= 0.0 && beta > 0.0 {
            true => Ok(()),
            false => Err(errors::HawkesError::HawkesInputTypeFailure),
        }
    }

    /// Construct a new `HawkesProcess` with an exponential kernel,
    /// after validating parameters.
    ///
    /// This is the backward-compatible constructor that mirrors the
    /// original `HawkesProcess::new(mu, alpha, beta)` signature.
    ///
    /// # Errors
    ///
    /// Returns [`HawkesError::HawkesInputTypeFailure`](errors::HawkesError::HawkesInputTypeFailure)
    /// if μ < 0, α < 0, or β ≤ 0.
    pub fn new(mu: f64, alpha: f64, beta: f64) -> Result<Self, errors::HawkesError> {
        Self::hawkes_valid_inputs(mu, alpha, beta)?;
        Ok(Self {
            mu,
            kernel: ExponentialKernel { alpha, beta },
        })
    }

    /// Compute the conditional intensity $\lambda(t)$ given past event times.
    ///
    /// Only events strictly before `t` contribute to the sum.
    ///
    /// This is the brute-force O(n) method, useful for testing and
    /// for the MLE estimation module which has its own recursion.
    pub fn lambda(&self, t: f64, event_times: &[f64]) -> f64 {
        let mut intensity = self.mu;
        for &event_time in event_times {
            if event_time < t {
                intensity +=
                    self.kernel.alpha * (-self.kernel.beta * (t - event_time)).exp();
            }
        }
        intensity
    }

    /// Compute the recursive auxiliary $A(t)$ from a full history slice.
    ///
    /// $$A(t) = \sum_{t_i < t} e^{-\beta(t - t_i)}$$
    ///
    /// This is a thin wrapper around
    /// [`ExponentialKernel::init_state`](ExponentialKernel) for
    /// backward compatibility with existing call sites.
    #[inline]
    pub fn init_auxiliary(&self, t: f64, history: &[f64]) -> f64 {
        self.kernel.init_state(t, history)
    }
}