Skip to main content

systick_timer/
timer.rs

1// SPDX-License-Identifier: Apache-2.0
2
3use core::sync::atomic::{AtomicU32, Ordering};
4
5#[cfg(test)]
6use core::sync::atomic::AtomicBool;
7
8#[cfg(feature = "embedded-hal")]
9const NANOS_PER_SECOND: u128 = 1_000_000_000;
10
11/// A 64-bit timer based on SysTick.
12///
13/// Stores wraparounds in 2 32-bit atomics. Scales the systick counts
14/// to arbitrary frequency.
15pub struct Timer {
16    inner_wraps: AtomicU32, // Counts SysTick interrupts (lower 32 bits)
17    outer_wraps: AtomicU32, // Counts overflows of inner_wraps (upper 32 bits)
18    reload_value: u32,      // SysTick reload value (max 2^24 - 1)
19    #[cfg(feature = "embedded-hal")]
20    systick_freq: u64, // Input SysTick frequency in Hz
21    tick_hz: u64,           // Output frequency in Hz
22    multiplier: u64,        // Precomputed for scaling cycles to ticks
23    shift: u32,             // Precomputed for scaling efficiency
24    #[cfg(test)]
25    current_systick: AtomicU32,
26    #[cfg(test)]
27    after_v1_hook: Option<fn(&Timer)>, // injected nested call site
28    #[cfg(test)]
29    pendst_is_pending: AtomicBool, // emulated SCB->ICSR PENDSTSET bit
30}
31
32impl Timer {
33    fn stable_cycle_snapshot(&self) -> (u64, u64) {
34        let reload = self.reload_value as u64;
35
36        loop {
37            // The order of these reads is critical to preventing race conditions.
38            // 1. Read the high-level state (wrap counters).
39            let in1 = self.inner_wraps.load(Ordering::SeqCst) as u64;
40            let out1 = self.outer_wraps.load(Ordering::SeqCst) as u64;
41            let wraps_pre = (out1 << 32) | in1;
42
43            // 2. Read the low-level hardware value.
44            let val_before = self.get_syst() as u64;
45
46            // 3. Re-read the high-level state to detect if an ISR ran during our reads.
47            let in2 = self.inner_wraps.load(Ordering::SeqCst) as u64;
48            let out2 = self.outer_wraps.load(Ordering::SeqCst) as u64;
49            let wraps_post = (out2 << 32) | in2;
50
51            // If the wrap counters changed, an ISR ran. Our snapshot is inconsistent,
52            // so we must loop again to get a clean read.
53            if wraps_pre != wraps_post {
54                continue;
55            }
56
57            // If we're here, the wrap counters are stable, but we need to handle the race
58            // where PendST could have flipped after wraps_pre but before wraps_post.
59            // Re-sample both PendST and VAL now that we know the counters are consistent.
60            let is_pending = self.is_systick_pending_internal();
61            let val_after = self.get_syst() as u64;
62
63            // Double-check that no ISR ran during our PendST/VAL re-sampling.
64            let in3 = self.inner_wraps.load(Ordering::SeqCst) as u64;
65            let out3 = self.outer_wraps.load(Ordering::SeqCst) as u64;
66            let wraps_final = (out3 << 32) | in3;
67
68            if wraps_final != wraps_pre {
69                // Wrap counters changed during our final reads - loop again
70                continue;
71            }
72
73            // Now we have a truly stable snapshot. Determine if a wrap occurred:
74            // 1. PendST is set (hardware detected a wrap)
75            // 2. VAL increased (SysTick counts down, so increase means wrap occurred)
76            let wrap_occurred = is_pending || val_after > val_before;
77
78            //
79            // KEY DESIGN DECISION: This +1 compensation handles exactly ONE missed wrap.
80            // If the ISR is starved for 2+ wrap periods, we get monotonic violations.
81            let (wraps_u64, final_val) = if wrap_occurred {
82                // A wrap occurred after we read wraps_pre. Use the post-wrap VAL reading.
83                (wraps_pre + 1, val_after)
84            } else {
85                // No wrap occurred. Use the most recent VAL reading for consistency.
86                (wraps_pre, val_after)
87            };
88
89            // Return the cycle snapshot as wrap count and current VAL-derived low part.
90            return (wraps_u64, reload - final_val);
91        }
92    }
93
94    /// SysTick handler.
95    ///
96    /// Call this from the SysTick interrupt handler.
97    pub fn systick_handler(&self) {
98        // Increment inner_wraps and check for overflow
99        let inner = self.inner_wraps.load(Ordering::Relaxed);
100        // Check for overflow (inner was u32::MAX)
101        // Store the incremented value
102        self.inner_wraps
103            .store(inner.wrapping_add(1), Ordering::SeqCst);
104        if inner == u32::MAX {
105            // Increment outer_wraps
106            let outer = self.outer_wraps.load(Ordering::Relaxed).wrapping_add(1);
107            self.outer_wraps.store(outer, Ordering::SeqCst);
108        }
109    }
110
111    /// Robust `now()` (VAL-jump tie-breaker, no COUNTFLAG dependency).
112    ///
113    /// ## Design: One-Wrap Compensation via PendST Detection
114    /// This implementation is designed to handle exactly **one missed SysTick wrap**.
115    ///
116    /// **How it works:**
117    /// 1. Uses PendST bit to detect when the SysTick ISR is pending (hardware wrapped but ISR hasn't run)
118    /// 2. If PendST is set, adds +1 to the wrap count to compensate for the missed wrap
119    /// 3. This allows monotonic time even when the ISR is delayed by up to one full wrap period
120    ///
121    /// **Design limit:**
122    /// If the SysTick ISR is starved for MORE than one complete wrap period, this compensation
123    /// becomes insufficient and monotonic violations occur. The ISR starvation detection logic
124    /// in `diagnose_timing_violation()` identifies these as catastrophic "N+1 missed wraps".
125    pub fn now(&self) -> u64 {
126        let reload = self.reload_value as u64;
127        let (wraps_u64, low_part) = self.stable_cycle_snapshot();
128
129        // Calculate final time.
130        let reload_plus_1 = reload + 1;
131
132        // If cycles computation would overflow u64, use u128 path from the start
133        if wraps_u64 > (u64::MAX - low_part) / reload_plus_1 {
134            let cycles128 = (wraps_u64 as u128) * (reload_plus_1 as u128) + (low_part as u128);
135            let ticks128 = cycles128 * (self.multiplier as u128);
136            return (ticks128 >> self.shift) as u64;
137        }
138
139        let total_cycles = wraps_u64 * reload_plus_1 + low_part;
140
141        // Scale to ticks.
142        let (result, mul_overflow) = total_cycles.overflowing_mul(self.multiplier);
143        if mul_overflow {
144            let wide = (total_cycles as u128) * (self.multiplier as u128);
145            return (wide >> self.shift) as u64;
146        }
147        result >> self.shift
148    }
149
150    /// Returns the current SysTick counter value.
151    pub fn get_syst(&self) -> u32 {
152        #[cfg(test)]
153        return self.current_systick.load(Ordering::SeqCst);
154
155        #[cfg(all(not(test), feature = "cortex-m"))]
156        return cortex_m::peripheral::SYST::get_current();
157
158        #[cfg(all(not(test), not(feature = "cortex-m")))]
159        panic!("This module requires the cortex-m crate to be available");
160    }
161
162    /// Returns the SysTick reload value configured for this timer.
163    pub const fn reload_value(&self) -> u32 {
164        self.reload_value
165    }
166
167    /// Returns the output tick frequency in Hz.
168    pub const fn tick_hz(&self) -> u64 {
169        self.tick_hz
170    }
171
172    #[cfg(feature = "embedded-hal")]
173    fn delay_cycles_from_ns(&self, ns: u32) -> u64 {
174        if ns == 0 || self.systick_freq == 0 {
175            return 0;
176        }
177
178        let ticks = (((ns as u128) * (self.systick_freq as u128)) + (NANOS_PER_SECOND - 1))
179            / NANOS_PER_SECOND;
180        ticks.min(u64::MAX as u128) as u64
181    }
182
183    #[cfg(feature = "embedded-hal")]
184    fn current_cycles_wrapping(&self) -> u64 {
185        let reload_plus_1 = self.reload_value as u64 + 1;
186        let (wraps_u64, low_part) = self.stable_cycle_snapshot();
187        wraps_u64.wrapping_mul(reload_plus_1).wrapping_add(low_part)
188    }
189
190    #[cfg(feature = "embedded-hal")]
191    /// Blocking delay helper.
192    ///
193    /// Requires the underlying SysTick timer to be started and actively
194    /// advancing. If the hardware counter is stalled, this loop will not
195    /// complete.
196    fn delay_cycles_blocking(&self, cycles: u64) {
197        if cycles == 0 {
198            return;
199        }
200
201        let start = self.current_cycles_wrapping();
202        while self.current_cycles_wrapping().wrapping_sub(start) < cycles {
203            core::hint::spin_loop();
204        }
205    }
206
207    /// Internal method to check if the SysTick interrupt is pending.
208    fn is_systick_pending_internal(&self) -> bool {
209        #[cfg(test)]
210        return self.pendst_is_pending.load(Ordering::SeqCst);
211
212        #[cfg(all(not(test), feature = "cortex-m"))]
213        return cortex_m::peripheral::SCB::is_pendst_pending();
214
215        #[cfg(all(not(test), not(feature = "cortex-m")))]
216        return false; // Or panic, depending on desired behavior without cortex-m
217    }
218
219    /// Checks if the SysTick interrupt is pending (diagnostic method).
220    #[cfg(feature = "diagnostics")]
221    pub fn is_systick_pending(&self) -> bool {
222        self.is_systick_pending_internal()
223    }
224
225    // Figure out a shift that leads to less precision loss
226    const fn compute_shift(tick_hz: u64, systick_freq: u64) -> u32 {
227        let mut shift = 32;
228        let mut multiplier = (tick_hz << shift) / systick_freq;
229        while multiplier == 0 && shift < 64 {
230            shift += 1;
231            multiplier = (tick_hz << shift) / systick_freq;
232        }
233        shift
234    }
235
236    /// Creates a new timer that converts SysTick cycles to ticks at a specified frequency.
237    ///
238    /// # Arguments
239    ///
240    /// * `tick_hz` - The desired output frequency in Hz (e.g., 1000 for millisecond ticks)
241    /// * `reload_value` - The SysTick reload value. Must be between 1 and 2^24-1.
242    ///   This determines how many cycles occur between interrupts.
243    /// * `systick_freq` - The frequency of the SysTick counter in Hz (typically CPU frequency)
244    ///
245    /// # Panics
246    ///
247    /// * If `reload_value` is 0 or greater than 2^24-1 (16,777,215)
248    /// * If `systick_freq` is 0
249    ///
250    /// # Examples
251    ///
252    /// ```
253    /// # use systick_timer::Timer;
254    /// // Create a millisecond-resolution timer on a 48MHz CPU with reload value of 47,999
255    /// let timer = Timer::new(1000, 47_999, 48_000_000);
256    /// ```
257    pub const fn new(tick_hz: u64, reload_value: u32, systick_freq: u64) -> Self {
258        if reload_value > (1 << 24) - 1 {
259            panic!("Reload value too large");
260        }
261        if reload_value == 0 {
262            panic!("Reload value cannot be 0");
263        }
264        if systick_freq == 0 {
265            panic!("SysTick frequency cannot be 0");
266        }
267
268        // Use a shift to maintain precision and keep multiplier within u64
269        let shift = Self::compute_shift(tick_hz, systick_freq);
270        let multiplier = (tick_hz << shift) / systick_freq;
271
272        Timer {
273            inner_wraps: AtomicU32::new(0),
274            outer_wraps: AtomicU32::new(0),
275            reload_value,
276            #[cfg(feature = "embedded-hal")]
277            systick_freq,
278            tick_hz,
279            multiplier,
280            shift,
281            #[cfg(test)]
282            current_systick: AtomicU32::new(0),
283            #[cfg(test)]
284            after_v1_hook: None,
285            #[cfg(test)]
286            pendst_is_pending: AtomicBool::new(false),
287        }
288    }
289
290    /// Call this if you haven't already started the timer.
291    #[cfg(feature = "cortex-m")]
292    pub fn start(&self, syst: &mut cortex_m::peripheral::SYST) {
293        syst.set_clock_source(cortex_m::peripheral::syst::SystClkSource::Core);
294        syst.set_reload(self.reload_value);
295        syst.clear_current();
296        syst.enable_interrupt();
297        syst.enable_counter();
298    }
299
300    /// Check if a time difference indicates ISR starvation beyond design limits.
301    ///
302    /// This timer implementation compensates for exactly one missed SysTick wrap using
303    /// the PendST bit detection mechanism. If the SysTick ISR is starved longer than
304    /// one complete wrap period, monotonic violations will occur.
305    ///
306    /// Returns `Some(total_missed_wraps)` if the backwards jump matches the pattern of
307    /// ISR starvation (N+1 total missed wraps). Returns `None` for other timing issues.
308    #[cfg(feature = "diagnostics")]
309    pub fn diagnose_timing_violation(
310        &self,
311        current_time: u64,
312        previous_time: u64,
313        systick_freq: u64,
314    ) -> Option<u32> {
315        if current_time >= previous_time {
316            return None; // Not a backwards jump
317        }
318
319        let backwards_jump = previous_time - current_time;
320        let wrap_period_ns = ((self.reload_value as u64 + 1) * 1_000_000_000) / systick_freq;
321
322        // Convert backwards_jump from ticks to nanoseconds for comparison
323        // backwards_jump is in ticks scaled by tick_hz
324        let backwards_jump_ns = (backwards_jump * 1_000_000_000) / self.tick_hz;
325
326        // Check if backwards jump is close to N complete wrap periods
327        // If so, this indicates N+1 total missed wraps (since PendST already compensated for 1)
328        for observed_periods in 1..=3 {
329            let expected_jump = observed_periods * wrap_period_ns;
330            let tolerance = wrap_period_ns / 100; // 1% tolerance
331
332            if backwards_jump_ns >= expected_jump.saturating_sub(tolerance)
333                && backwards_jump_ns <= expected_jump + tolerance
334            {
335                return Some(observed_periods as u32 + 1); // +1 because PendST compensated for first wrap
336            }
337        }
338
339        None
340    }
341}
342
343impl Timer {
344    // -------- test-only helpers ----------
345    #[cfg(test)]
346    pub fn set_syst(&self, value: u32) {
347        debug_assert!(
348            value <= self.reload_value,
349            "set_syst: value {} exceeds reload {}",
350            value,
351            self.reload_value
352        );
353        self.current_systick.store(value, Ordering::SeqCst);
354    }
355
356    #[cfg(test)]
357    pub fn set_after_v1_hook(&mut self, hook: Option<fn(&Timer)>) {
358        self.after_v1_hook = hook;
359    }
360
361    #[cfg(test)]
362    pub fn set_pendst_pending(&self, val: bool) {
363        self.pendst_is_pending.store(val, Ordering::SeqCst);
364    }
365}
366
367#[cfg(feature = "embedded-hal")]
368/// Busy-wait delay implementation backed by SysTick progress.
369///
370/// Callers must start the timer with [`Timer::start`] before using this trait.
371/// If SysTick is not running, delay calls will not complete.
372impl embedded_hal::delay::DelayNs for Timer {
373    fn delay_ns(&mut self, ns: u32) {
374        let cycles = self.delay_cycles_from_ns(ns);
375        self.delay_cycles_blocking(cycles);
376    }
377}
378
379#[cfg(feature = "embedded-hal")]
380/// Busy-wait delay implementation backed by SysTick progress.
381///
382/// Callers must start the timer with [`Timer::start`] before using this trait.
383/// If SysTick is not running, delay calls will not complete.
384impl embedded_hal::delay::DelayNs for &Timer {
385    fn delay_ns(&mut self, ns: u32) {
386        let cycles = self.delay_cycles_from_ns(ns);
387        self.delay_cycles_blocking(cycles);
388    }
389}
390
391#[cfg(test)]
392mod tests {
393    use super::*;
394
395    #[test]
396    #[should_panic]
397    fn test_zero_systick_freq() {
398        Timer::new(1000, 5, 0);
399    }
400
401    #[test]
402    fn test_timer_new() {
403        let timer = Timer::new(1000, 5, 12_000);
404        timer.inner_wraps.store(4, Ordering::Relaxed); // 4 interrupts = 24 cycles
405        timer.set_syst(3); // Start of next period
406        assert_eq!(timer.now(), 2); // Should be ~2 ticks
407    }
408
409    #[test]
410    fn test_compute_shift() {
411        assert_eq!(Timer::compute_shift(1000, 12_000), 32);
412        // This ratio overflows 32bit, so we shift
413        assert_eq!(Timer::compute_shift(3, 16_000_000_000), 33);
414    }
415
416    #[cfg(feature = "embedded-hal")]
417    #[test]
418    fn test_delay_cycles_from_ns_rounds_up() {
419        let timer = Timer::new(1_000_000, 5, 8_000_000);
420        assert_eq!(timer.delay_cycles_from_ns(0), 0);
421        assert_eq!(timer.delay_cycles_from_ns(1), 1);
422        assert_eq!(timer.delay_cycles_from_ns(125), 1);
423        assert_eq!(timer.delay_cycles_from_ns(126), 2);
424    }
425
426    #[cfg(feature = "embedded-hal")]
427    #[test]
428    fn test_delay_cycles_from_ns_saturates() {
429        let timer = Timer {
430            inner_wraps: AtomicU32::new(0),
431            outer_wraps: AtomicU32::new(0),
432            reload_value: 5,
433            systick_freq: u64::MAX,
434            tick_hz: 1,
435            multiplier: 0,
436            shift: 0,
437            current_systick: AtomicU32::new(0),
438            after_v1_hook: None,
439            pendst_is_pending: AtomicBool::new(false),
440        };
441
442        assert_eq!(timer.delay_cycles_from_ns(u32::MAX), u64::MAX);
443    }
444
445    #[cfg(feature = "embedded-hal")]
446    #[test]
447    fn test_delay_ns_trait_for_timer_accepts_zero_delay() {
448        let mut timer = Timer::new(1_000_000, 5, 1_000_000);
449        embedded_hal::delay::DelayNs::delay_ns(&mut timer, 0);
450    }
451
452    #[cfg(feature = "embedded-hal")]
453    #[test]
454    fn test_delay_ns_trait_for_timer_reference_accepts_zero_delay() {
455        let timer = Timer::new(1_000_000, 5, 1_000_000);
456        let mut delay = &timer;
457        embedded_hal::delay::DelayNs::delay_ns(&mut delay, 0);
458    }
459
460    #[cfg(feature = "embedded-hal")]
461    #[test]
462    fn test_delay_ns_waits_full_duration_from_partial_tick_phase() {
463        use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering};
464        use std::sync::Arc;
465        use std::thread;
466        use std::time::Duration;
467
468        fn advance_one_cycle(timer: &Timer, reload: u32) {
469            let current = timer.get_syst();
470            if current == 0 {
471                timer.systick_handler();
472                timer.set_syst(reload);
473            } else {
474                timer.set_syst(current - 1);
475            }
476        }
477
478        const RELOAD: u32 = 9;
479        let timer = Arc::new(Timer::new(1, RELOAD, 10));
480        timer.set_syst(1);
481
482        let done = Arc::new(AtomicBool::new(false));
483        let timer_for_thread = Arc::clone(&timer);
484        let done_for_thread = Arc::clone(&done);
485
486        let handle = thread::spawn(move || {
487            let mut delay = &*timer_for_thread;
488            embedded_hal::delay::DelayNs::delay_ns(&mut delay, 1_000_000_000);
489            done_for_thread.store(true, AtomicOrdering::SeqCst);
490        });
491
492        thread::sleep(Duration::from_millis(10));
493        assert!(!done.load(AtomicOrdering::SeqCst));
494
495        for _ in 0..9 {
496            advance_one_cycle(&timer, RELOAD);
497            thread::sleep(Duration::from_millis(1));
498        }
499
500        assert!(!done.load(AtomicOrdering::SeqCst));
501
502        advance_one_cycle(&timer, RELOAD);
503        thread::sleep(Duration::from_millis(10));
504
505        assert!(done.load(AtomicOrdering::SeqCst));
506        handle.join().unwrap();
507    }
508
509    #[test]
510    fn test_timer_initial_state() {
511        let timer = Timer::new(1000, 5, 12_000);
512        assert_eq!(timer.now(), 0);
513    }
514
515    struct TestTimer<const RELOAD: u32> {
516        timer: Timer,
517    }
518    impl<const RELOAD: u32> TestTimer<RELOAD> {
519        fn new(tick_hz: u64, systick_freq: u64) -> Self {
520            Self {
521                timer: Timer::new(tick_hz, RELOAD, systick_freq),
522            }
523        }
524        fn interrupt(&mut self) {
525            self.timer.systick_handler();
526            self.timer.set_syst(RELOAD);
527        }
528        fn set_tick(&mut self, tick: u32) -> u64 {
529            assert!(tick <= RELOAD);
530            self.timer.set_syst(tick);
531            self.timer.now()
532        }
533    }
534
535    #[test]
536    fn test_timer_matching_rates() {
537        let mut timer = TestTimer::<5>::new(1000, 1000);
538        assert_eq!(timer.set_tick(5), 0);
539        assert_eq!(timer.set_tick(4), 1);
540        assert_eq!(timer.set_tick(0), 5);
541        timer.interrupt();
542        assert_eq!(timer.set_tick(5), 6);
543    }
544
545    #[test]
546    fn test_timer_tick_rate_2x() {
547        let mut timer = TestTimer::<5>::new(2000, 1000);
548        assert_eq!(timer.set_tick(5), 0);
549        assert_eq!(timer.set_tick(4), 2);
550        assert_eq!(timer.set_tick(0), 10);
551        timer.interrupt();
552        assert_eq!(timer.set_tick(5), 12);
553        timer.interrupt();
554        assert_eq!(timer.set_tick(5), 24);
555    }
556
557    #[test]
558    fn test_systick_rate_2x() {
559        let mut timer = TestTimer::<5>::new(1000, 2000);
560        assert_eq!(timer.set_tick(5), 0);
561        assert_eq!(timer.set_tick(4), 0);
562        assert_eq!(timer.set_tick(3), 1);
563        assert_eq!(timer.set_tick(2), 1);
564        assert_eq!(timer.set_tick(0), 2);
565        timer.interrupt();
566        assert_eq!(timer.set_tick(5), 3);
567        timer.interrupt();
568        assert_eq!(timer.set_tick(5), 6);
569    }
570
571    #[test]
572    fn test_outer_wraps_wrapping() {
573        let mut timer = TestTimer::<5>::new(1000, 1000);
574        // Set up for outer_wraps overflow
575        timer.timer.inner_wraps.store(u32::MAX, Ordering::Relaxed);
576        timer.timer.outer_wraps.store(u32::MAX, Ordering::Relaxed);
577        timer.timer.set_syst(5);
578
579        // One more interrupt should wrap outer_wraps
580        timer.interrupt();
581        // Should still count correctly despite wrapping
582        // With matching rates, we expect total_cycles * (1000/1000) ticks
583        assert_eq!(timer.set_tick(5), ((1u128 << 64) * 1000 / 1000) as u64);
584    }
585
586    #[test]
587    fn test_extreme_rates() {
588        // Test with very high tick rate vs systick rate (1000:1)
589        let mut timer = TestTimer::<5>::new(1_000_000, 1000);
590        assert_eq!(timer.set_tick(5), 0);
591        timer.interrupt(); // One interrupt = 6 cycles, each cycle = 1000 ticks
592        assert_eq!(timer.set_tick(5), 6000); // 6 cycles * 1000 ticks/cycle
593
594        // Test with very low tick rate vs systick rate (1:1000)
595        let mut timer = TestTimer::<5>::new(1000, 1_000_000);
596        // With 1000:1 ratio and reload of 5 (6 cycles per interrupt)
597        // We need (1_000_000/1000 * 6) = 6000 cycles for 6 ticks
598        // So we need 1000 interrupts for 6 ticks
599        for _ in 0..1000 {
600            timer.interrupt();
601        }
602        assert_eq!(timer.set_tick(5), 5); // Should get 5 complete ticks
603    }
604
605    #[test]
606    fn test_boundary_conditions() {
607        // Test with minimum reload value
608        let mut timer = TestTimer::<1>::new(1000, 1000);
609        assert_eq!(timer.set_tick(1), 0);
610        assert_eq!(timer.set_tick(0), 1);
611        timer.interrupt();
612        assert_eq!(timer.set_tick(1), 2);
613
614        // Test with maximum reload value
615        let mut timer = TestTimer::<0xFFFFFF>::new(1000, 1000);
616        assert_eq!(timer.set_tick(0xFFFFFF), 0);
617        assert_eq!(timer.set_tick(0xFFFF00), 255);
618        assert_eq!(timer.set_tick(0), 0xFFFFFF);
619    }
620
621    #[test]
622    fn test_partial_tick_accuracy() {
623        // With matching rates, test partial periods
624        let mut timer = TestTimer::<100>::new(1000, 1000);
625        assert_eq!(timer.set_tick(100), 0); // Start of period
626        assert_eq!(timer.set_tick(75), 25); // 25% through period = 25 ticks
627        assert_eq!(timer.set_tick(50), 50); // 50% through period = 50 ticks
628        assert_eq!(timer.set_tick(25), 75); // 75% through period = 75 ticks
629        assert_eq!(timer.set_tick(0), 100); // End of period = 100 ticks
630    }
631
632    #[test]
633    fn test_interrupt_race() {
634        let mut timer = TestTimer::<5>::new(1000, 1000);
635        timer.interrupt();
636        timer.timer.set_syst(3);
637        let t1 = timer.timer.now();
638        timer.interrupt();
639        let t2 = timer.timer.now();
640        assert!(t2 > t1); // Monotonicity
641    }
642
643    #[test]
644    fn test_rapid_interrupts() {
645        let mut timer = TestTimer::<5>::new(1000, 1000);
646        // With matching rates, each interrupt = 6 cycles = 6 ticks
647        for _ in 0..10 {
648            timer.interrupt();
649        }
650        // 10 interrupts * 6 cycles/interrupt * (1000/1000) = 60 ticks
651        assert_eq!(timer.set_tick(5), 60);
652
653        // At position 2, we're 3 cycles in = 3 more ticks
654        assert_eq!(timer.set_tick(2), 63);
655    }
656
657    #[test]
658    fn test_u64_overflow_scenario() {
659        // Timer configuration from the real application:
660        // TICK_RESOLUTION: 10_000_000 (tick_hz)
661        // reload_value: 0xFFFFFF (16,777,215)
662        // systick_freq: 100_000_000
663        let timer = Timer::new(10_000_000, 0xFFFFFF, 100_000_000);
664
665        let total_interrupts = 2560u64;
666        let outer = (total_interrupts >> 32) as u32;
667        let inner = total_interrupts as u32;
668
669        timer.outer_wraps.store(outer, Ordering::Relaxed);
670        timer.inner_wraps.store(inner, Ordering::Relaxed);
671
672        // This call should take the u128 fallback path.
673        let expected_ticks = 4_296_645_011;
674        assert_eq!(timer.now(), expected_ticks);
675    }
676
677    #[test]
678    fn test_monotonicity_around_wrap() {
679        const RELOAD: u32 = 100;
680        let timer = Timer::new(1_000, RELOAD, 1_000);
681
682        // 1. Time right before the wrap
683        timer.set_syst(1);
684        let t1 = timer.now();
685
686        // 2. Simulate the hardware wrap:
687        //    - The ISR has NOT run yet, but the pending bit is set.
688        timer.set_syst(RELOAD);
689        timer.set_pendst_pending(true);
690
691        // 3. Time right after the wrap
692        let t2 = timer.now();
693
694        // The key assertion: time must not go backward.
695        // The new logic reads the COUNTFLAG and virtually adds a wrap,
696        // preventing the non-monotonic jump.
697        assert!(
698            t2 >= t1,
699            "Timer is not monotonic: t1 was {}, t2 was {}",
700            t1,
701            t2
702        );
703
704        // For sanity, let's check the values.
705        // t1 should be close to the end of a period.
706        // t2 should be at the beginning of the *next* period.
707        assert_eq!(t1, 99);
708        assert_eq!(t2, 101);
709    }
710
711    #[test]
712    fn test_monotonicity_between_interrupts() {
713        const RELOAD: u32 = 100;
714        let timer = Timer::new(1_000, RELOAD, 1_000);
715
716        // Set the counter to the reload value, no wraps yet.
717        timer.set_syst(RELOAD);
718        let t1 = timer.now();
719
720        // Simulate time passing by decrementing the hardware counter.
721        timer.set_syst(RELOAD / 2);
722        let t2 = timer.now();
723
724        // Decrement again.
725        timer.set_syst(0);
726        let t3 = timer.now();
727
728        // Assert that time is always moving forward.
729        assert!(t2 > t1, "t2 ({}) should be > t1 ({})", t2, t1);
730        assert!(t3 > t2, "t3 ({}) should be > t2 ({})", t3, t2);
731
732        // Also check the specific values for correctness.
733        assert_eq!(t1, 0);
734        assert_eq!(t2, 50);
735        assert_eq!(t3, 100);
736    }
737
738    const RELOAD: u32 = 100; // small for easy arithmetic; period = 101 cycles
739
740    #[test]
741    fn test_monotonicity_with_starved_isr() {
742        // This test simulates the "hardest path" scenario:
743        // 1. A wrap occurs, setting the PENDST bit.
744        // 2. The SysTick ISR is "starved" by a higher-priority interrupt and does not run.
745        // 3. Multiple calls to now() are made from the higher-priority context.
746        // 4. All calls must see the pending wrap and report monotonic time.
747
748        let timer = Timer::new(1_000, RELOAD, 1_000); // 1 tick per cycle
749
750        // State 1: Right before a wrap.
751        timer.set_syst(1);
752        let t1 = timer.now();
753        assert_eq!(t1, 100 - 1);
754
755        // State 2: Hardware wraps, ISR is pended but does not run.
756        // We manually simulate this state.
757        timer.set_pendst_pending(true);
758        timer.set_syst(RELOAD - 10); // Timer has wrapped and counted down a bit.
759
760        // First call to now() after the wrap. It must see the pending bit.
761        let t2 = timer.now();
762        let expected_t2 = (0 + 1) * (RELOAD as u64 + 1) + (RELOAD as u64 - (RELOAD as u64 - 10));
763        assert_eq!(t2, expected_t2);
764        assert!(
765            t2 > t1,
766            "Time must advance after wrap. t1={}, t2={}",
767            t1,
768            t2
769        );
770
771        // State 3: More time passes, ISR is still starved.
772        timer.set_syst(RELOAD - 20);
773
774        // Second call to now(). It must still see the pending bit.
775        let t3 = timer.now();
776        let expected_t3 = (0 + 1) * (RELOAD as u64 + 1) + (RELOAD as u64 - (RELOAD as u64 - 20));
777        assert_eq!(t3, expected_t3);
778        assert!(
779            t3 > t2,
780            "Time must advance even if ISR is starved. t2={}, t3={}",
781            t2,
782            t3
783        );
784
785        // State 4: The ISR finally runs, clearing the pending bit and incrementing wraps.
786        timer.set_pendst_pending(false);
787        timer.systick_handler(); // This increments inner_wraps to 1.
788
789        // Third call to now(). It should now use the updated wrap counter.
790        let t4 = timer.now();
791        let expected_t4 = 1 * (RELOAD as u64 + 1) + (RELOAD as u64 - (RELOAD as u64 - 20));
792        assert_eq!(t4, expected_t4);
793        assert_eq!(
794            t4, t3,
795            "Time should be consistent after ISR runs. t3={}, t4={}",
796            t3, t4
797        );
798    }
799
800    // The old tests for value-jump and COUNTFLAG are no longer relevant
801    // as the core logic has been replaced. The new test above provides
802    // superior coverage for the most critical race condition.
803}
804
805#[cfg(test)]
806mod stress_test;