systick_timer/
timer.rs

1// SPDX-License-Identifier: Apache-2.0
2
3use core::sync::atomic::{AtomicU32, Ordering};
4
5#[cfg(test)]
6use core::sync::atomic::AtomicBool;
7
8/// A 64-bit timer based on SysTick.
9///
10/// Stores wraparounds in 2 32-bit atomics. Scales the systick counts
11/// to arbitrary frequency.
12pub struct Timer {
13    inner_wraps: AtomicU32, // Counts SysTick interrupts (lower 32 bits)
14    outer_wraps: AtomicU32, // Counts overflows of inner_wraps (upper 32 bits)
15    reload_value: u32,      // SysTick reload value (max 2^24 - 1)
16    tick_hz: u64,           // Output frequency in Hz
17    multiplier: u64,        // Precomputed for scaling cycles to ticks
18    shift: u32,             // Precomputed for scaling efficiency
19    #[cfg(test)]
20    current_systick: AtomicU32,
21    #[cfg(test)]
22    after_v1_hook: Option<fn(&Timer)>, // injected nested call site
23    #[cfg(test)]
24    pendst_is_pending: AtomicBool, // emulated SCB->ICSR PENDSTSET bit
25}
26
27impl Timer {
28    /// SysTick handler.
29    ///
30    /// Call this from the SysTick interrupt handler.
31    pub fn systick_handler(&self) {
32        // Increment inner_wraps and check for overflow
33        let inner = self.inner_wraps.load(Ordering::Relaxed);
34        // Check for overflow (inner was u32::MAX)
35        // Store the incremented value
36        self.inner_wraps
37            .store(inner.wrapping_add(1), Ordering::SeqCst);
38        if inner == u32::MAX {
39            // Increment outer_wraps
40            let outer = self.outer_wraps.load(Ordering::Relaxed).wrapping_add(1);
41            self.outer_wraps.store(outer, Ordering::SeqCst);
42        }
43    }
44
45    /// Robust `now()` (VAL-jump tie-breaker, no COUNTFLAG dependency).
46    ///
47    /// ## Design: One-Wrap Compensation via PendST Detection
48    /// This implementation is designed to handle exactly **one missed SysTick wrap**.
49    ///
50    /// **How it works:**
51    /// 1. Uses PendST bit to detect when the SysTick ISR is pending (hardware wrapped but ISR hasn't run)
52    /// 2. If PendST is set, adds +1 to the wrap count to compensate for the missed wrap
53    /// 3. This allows monotonic time even when the ISR is delayed by up to one full wrap period
54    ///
55    /// **Design limit:**
56    /// If the SysTick ISR is starved for MORE than one complete wrap period, this compensation
57    /// becomes insufficient and monotonic violations occur. The ISR starvation detection logic
58    /// in `diagnose_timing_violation()` identifies these as catastrophic "N+1 missed wraps".
59    pub fn now(&self) -> u64 {
60        let reload = self.reload_value as u64;
61
62        loop {
63            // The order of these reads is critical to preventing race conditions.
64            // 1. Read the high-level state (wrap counters).
65            let in1 = self.inner_wraps.load(Ordering::SeqCst) as u64;
66            let out1 = self.outer_wraps.load(Ordering::SeqCst) as u64;
67            let wraps_pre = (out1 << 32) | in1;
68
69            // 2. Read the low-level hardware value.
70            let val_before = self.get_syst() as u64;
71
72            // 3. Re-read the high-level state to detect if an ISR ran during our reads.
73            let in2 = self.inner_wraps.load(Ordering::SeqCst) as u64;
74            let out2 = self.outer_wraps.load(Ordering::SeqCst) as u64;
75            let wraps_post = (out2 << 32) | in2;
76
77            // If the wrap counters changed, an ISR ran. Our snapshot is inconsistent,
78            // so we must loop again to get a clean read.
79            if wraps_pre != wraps_post {
80                continue;
81            }
82
83            // If we're here, the wrap counters are stable, but we need to handle the race
84            // where PendST could have flipped after wraps_pre but before wraps_post.
85            // Re-sample both PendST and VAL now that we know the counters are consistent.
86            let is_pending = self.is_systick_pending_internal();
87            let val_after = self.get_syst() as u64;
88
89            // Double-check that no ISR ran during our PendST/VAL re-sampling.
90            let in3 = self.inner_wraps.load(Ordering::SeqCst) as u64;
91            let out3 = self.outer_wraps.load(Ordering::SeqCst) as u64;
92            let wraps_final = (out3 << 32) | in3;
93
94            if wraps_final != wraps_pre {
95                // Wrap counters changed during our final reads - loop again
96                continue;
97            }
98
99            // Now we have a truly stable snapshot. Determine if a wrap occurred:
100            // 1. PendST is set (hardware detected a wrap)
101            // 2. VAL increased (SysTick counts down, so increase means wrap occurred)
102            let wrap_occurred = is_pending || val_after > val_before;
103
104            //
105            // KEY DESIGN DECISION: This +1 compensation handles exactly ONE missed wrap.
106            // If the ISR is starved for 2+ wrap periods, we get monotonic violations.
107            let (wraps_u64, final_val) = if wrap_occurred {
108                // A wrap occurred after we read wraps_pre. Use the post-wrap VAL reading.
109                (wraps_pre + 1, val_after)
110            } else {
111                // No wrap occurred. Use the most recent VAL reading for consistency.
112                (wraps_pre, val_after)
113            };
114
115            // Calculate final time.
116            let reload_plus_1 = reload + 1;
117            let low_part = reload - final_val;
118
119            // If cycles computation would overflow u64, use u128 path from the start
120            if wraps_u64 > (u64::MAX - low_part) / reload_plus_1 {
121                let cycles128 = (wraps_u64 as u128) * (reload_plus_1 as u128) + (low_part as u128);
122                let ticks128 = cycles128 * (self.multiplier as u128);
123                return (ticks128 >> self.shift) as u64;
124            }
125
126            let total_cycles = wraps_u64 * reload_plus_1 + low_part;
127
128            // Scale to ticks.
129            let (result, mul_overflow) = total_cycles.overflowing_mul(self.multiplier);
130            if mul_overflow {
131                let wide = (total_cycles as u128) * (self.multiplier as u128);
132                return (wide >> self.shift) as u64;
133            }
134            return result >> self.shift;
135        }
136    }
137
138    /// Returns the current SysTick counter value.
139    pub fn get_syst(&self) -> u32 {
140        #[cfg(test)]
141        return self.current_systick.load(Ordering::SeqCst);
142
143        #[cfg(all(not(test), feature = "cortex-m"))]
144        return cortex_m::peripheral::SYST::get_current();
145
146        #[cfg(all(not(test), not(feature = "cortex-m")))]
147        panic!("This module requires the cortex-m crate to be available");
148    }
149
150    /// Returns the SysTick reload value configured for this timer.
151    pub const fn reload_value(&self) -> u32 {
152        self.reload_value
153    }
154
155    /// Returns the output tick frequency in Hz.
156    pub const fn tick_hz(&self) -> u64 {
157        self.tick_hz
158    }
159
160    /// Internal method to check if the SysTick interrupt is pending.
161    fn is_systick_pending_internal(&self) -> bool {
162        #[cfg(test)]
163        return self.pendst_is_pending.load(Ordering::SeqCst);
164
165        #[cfg(all(not(test), feature = "cortex-m"))]
166        return cortex_m::peripheral::SCB::is_pendst_pending();
167
168        #[cfg(all(not(test), not(feature = "cortex-m")))]
169        return false; // Or panic, depending on desired behavior without cortex-m
170    }
171
172    /// Checks if the SysTick interrupt is pending (diagnostic method).
173    #[cfg(feature = "diagnostics")]
174    pub fn is_systick_pending(&self) -> bool {
175        self.is_systick_pending_internal()
176    }
177
178    // Figure out a shift that leads to less precision loss
179    const fn compute_shift(tick_hz: u64, systick_freq: u64) -> u32 {
180        let mut shift = 32;
181        let mut multiplier = (tick_hz << shift) / systick_freq;
182        while multiplier == 0 && shift < 64 {
183            shift += 1;
184            multiplier = (tick_hz << shift) / systick_freq;
185        }
186        shift
187    }
188
189    /// Creates a new timer that converts SysTick cycles to ticks at a specified frequency.
190    ///
191    /// # Arguments
192    ///
193    /// * `tick_hz` - The desired output frequency in Hz (e.g., 1000 for millisecond ticks)
194    /// * `reload_value` - The SysTick reload value. Must be between 1 and 2^24-1.
195    ///   This determines how many cycles occur between interrupts.
196    /// * `systick_freq` - The frequency of the SysTick counter in Hz (typically CPU frequency)
197    ///
198    /// # Panics
199    ///
200    /// * If `reload_value` is 0 or greater than 2^24-1 (16,777,215)
201    /// * If `systick_freq` is 0
202    ///
203    /// # Examples
204    ///
205    /// ```
206    /// # use systick_timer::Timer;
207    /// // Create a millisecond-resolution timer on a 48MHz CPU with reload value of 47,999
208    /// let timer = Timer::new(1000, 47_999, 48_000_000);
209    /// ```
210    pub const fn new(tick_hz: u64, reload_value: u32, systick_freq: u64) -> Self {
211        if reload_value > (1 << 24) - 1 {
212            panic!("Reload value too large");
213        }
214        if reload_value == 0 {
215            panic!("Reload value cannot be 0");
216        }
217
218        // Use a shift to maintain precision and keep multiplier within u64
219        let shift = Self::compute_shift(tick_hz, systick_freq);
220        let multiplier = (tick_hz << shift) / systick_freq;
221
222        Timer {
223            inner_wraps: AtomicU32::new(0),
224            outer_wraps: AtomicU32::new(0),
225            reload_value,
226            tick_hz,
227            multiplier,
228            shift,
229            #[cfg(test)]
230            current_systick: AtomicU32::new(0),
231            #[cfg(test)]
232            after_v1_hook: None,
233            #[cfg(test)]
234            pendst_is_pending: AtomicBool::new(false),
235        }
236    }
237
238    /// Call this if you haven't already started the timer.
239    #[cfg(feature = "cortex-m")]
240    pub fn start(&self, syst: &mut cortex_m::peripheral::SYST) {
241        syst.set_clock_source(cortex_m::peripheral::syst::SystClkSource::Core);
242        syst.set_reload(self.reload_value);
243        syst.clear_current();
244        syst.enable_interrupt();
245        syst.enable_counter();
246    }
247
248    /// Check if a time difference indicates ISR starvation beyond design limits.
249    ///
250    /// This timer implementation compensates for exactly one missed SysTick wrap using
251    /// the PendST bit detection mechanism. If the SysTick ISR is starved longer than
252    /// one complete wrap period, monotonic violations will occur.
253    ///
254    /// Returns `Some(total_missed_wraps)` if the backwards jump matches the pattern of
255    /// ISR starvation (N+1 total missed wraps). Returns `None` for other timing issues.
256    #[cfg(feature = "diagnostics")]
257    pub fn diagnose_timing_violation(
258        &self,
259        current_time: u64,
260        previous_time: u64,
261        systick_freq: u64,
262    ) -> Option<u32> {
263        if current_time >= previous_time {
264            return None; // Not a backwards jump
265        }
266
267        let backwards_jump = previous_time - current_time;
268        let wrap_period_ns = ((self.reload_value as u64 + 1) * 1_000_000_000) / systick_freq;
269
270        // Convert backwards_jump from ticks to nanoseconds for comparison
271        // backwards_jump is in ticks scaled by tick_hz
272        let backwards_jump_ns = (backwards_jump * 1_000_000_000) / self.tick_hz;
273
274        // Check if backwards jump is close to N complete wrap periods
275        // If so, this indicates N+1 total missed wraps (since PendST already compensated for 1)
276        for observed_periods in 1..=3 {
277            let expected_jump = observed_periods * wrap_period_ns;
278            let tolerance = wrap_period_ns / 100; // 1% tolerance
279
280            if backwards_jump_ns >= expected_jump.saturating_sub(tolerance)
281                && backwards_jump_ns <= expected_jump + tolerance
282            {
283                return Some(observed_periods as u32 + 1); // +1 because PendST compensated for first wrap
284            }
285        }
286
287        None
288    }
289}
290
291impl Timer {
292    // -------- test-only helpers ----------
293    #[cfg(test)]
294    pub fn set_syst(&self, value: u32) {
295        debug_assert!(
296            value <= self.reload_value,
297            "set_syst: value {} exceeds reload {}",
298            value,
299            self.reload_value
300        );
301        self.current_systick.store(value, Ordering::SeqCst);
302    }
303
304    #[cfg(test)]
305    pub fn set_after_v1_hook(&mut self, hook: Option<fn(&Timer)>) {
306        self.after_v1_hook = hook;
307    }
308
309    #[cfg(test)]
310    pub fn set_pendst_pending(&self, val: bool) {
311        self.pendst_is_pending.store(val, Ordering::SeqCst);
312    }
313}
314
315#[cfg(test)]
316mod tests {
317    use super::*;
318
319    #[test]
320    #[should_panic]
321    fn test_zero_systick_freq() {
322        Timer::new(1000, 5, 0);
323    }
324
325    #[test]
326    fn test_timer_new() {
327        let timer = Timer::new(1000, 5, 12_000);
328        timer.inner_wraps.store(4, Ordering::Relaxed); // 4 interrupts = 24 cycles
329        timer.set_syst(3); // Start of next period
330        assert_eq!(timer.now(), 2); // Should be ~2 ticks
331    }
332
333    #[test]
334    fn test_compute_shift() {
335        assert_eq!(Timer::compute_shift(1000, 12_000), 32);
336        // This ratio overflows 32bit, so we shift
337        assert_eq!(Timer::compute_shift(3, 16_000_000_000), 33);
338    }
339
340    #[test]
341    fn test_timer_initial_state() {
342        let timer = Timer::new(1000, 5, 12_000);
343        assert_eq!(timer.now(), 0);
344    }
345
346    struct TestTimer<const RELOAD: u32> {
347        timer: Timer,
348    }
349    impl<const RELOAD: u32> TestTimer<RELOAD> {
350        fn new(tick_hz: u64, systick_freq: u64) -> Self {
351            Self {
352                timer: Timer::new(tick_hz, RELOAD, systick_freq),
353            }
354        }
355        fn interrupt(&mut self) {
356            self.timer.systick_handler();
357            self.timer.set_syst(RELOAD);
358        }
359        fn set_tick(&mut self, tick: u32) -> u64 {
360            assert!(tick <= RELOAD);
361            self.timer.set_syst(tick);
362            self.timer.now()
363        }
364    }
365
366    #[test]
367    fn test_timer_matching_rates() {
368        let mut timer = TestTimer::<5>::new(1000, 1000);
369        assert_eq!(timer.set_tick(5), 0);
370        assert_eq!(timer.set_tick(4), 1);
371        assert_eq!(timer.set_tick(0), 5);
372        timer.interrupt();
373        assert_eq!(timer.set_tick(5), 6);
374    }
375
376    #[test]
377    fn test_timer_tick_rate_2x() {
378        let mut timer = TestTimer::<5>::new(2000, 1000);
379        assert_eq!(timer.set_tick(5), 0);
380        assert_eq!(timer.set_tick(4), 2);
381        assert_eq!(timer.set_tick(0), 10);
382        timer.interrupt();
383        assert_eq!(timer.set_tick(5), 12);
384        timer.interrupt();
385        assert_eq!(timer.set_tick(5), 24);
386    }
387
388    #[test]
389    fn test_systick_rate_2x() {
390        let mut timer = TestTimer::<5>::new(1000, 2000);
391        assert_eq!(timer.set_tick(5), 0);
392        assert_eq!(timer.set_tick(4), 0);
393        assert_eq!(timer.set_tick(3), 1);
394        assert_eq!(timer.set_tick(2), 1);
395        assert_eq!(timer.set_tick(0), 2);
396        timer.interrupt();
397        assert_eq!(timer.set_tick(5), 3);
398        timer.interrupt();
399        assert_eq!(timer.set_tick(5), 6);
400    }
401
402    #[test]
403    fn test_outer_wraps_wrapping() {
404        let mut timer = TestTimer::<5>::new(1000, 1000);
405        // Set up for outer_wraps overflow
406        timer.timer.inner_wraps.store(u32::MAX, Ordering::Relaxed);
407        timer.timer.outer_wraps.store(u32::MAX, Ordering::Relaxed);
408        timer.timer.set_syst(5);
409
410        // One more interrupt should wrap outer_wraps
411        timer.interrupt();
412        // Should still count correctly despite wrapping
413        // With matching rates, we expect total_cycles * (1000/1000) ticks
414        assert_eq!(timer.set_tick(5), ((1u128 << 64) * 1000 / 1000) as u64);
415    }
416
417    #[test]
418    fn test_extreme_rates() {
419        // Test with very high tick rate vs systick rate (1000:1)
420        let mut timer = TestTimer::<5>::new(1_000_000, 1000);
421        assert_eq!(timer.set_tick(5), 0);
422        timer.interrupt(); // One interrupt = 6 cycles, each cycle = 1000 ticks
423        assert_eq!(timer.set_tick(5), 6000); // 6 cycles * 1000 ticks/cycle
424
425        // Test with very low tick rate vs systick rate (1:1000)
426        let mut timer = TestTimer::<5>::new(1000, 1_000_000);
427        // With 1000:1 ratio and reload of 5 (6 cycles per interrupt)
428        // We need (1_000_000/1000 * 6) = 6000 cycles for 6 ticks
429        // So we need 1000 interrupts for 6 ticks
430        for _ in 0..1000 {
431            timer.interrupt();
432        }
433        assert_eq!(timer.set_tick(5), 5); // Should get 5 complete ticks
434    }
435
436    #[test]
437    fn test_boundary_conditions() {
438        // Test with minimum reload value
439        let mut timer = TestTimer::<1>::new(1000, 1000);
440        assert_eq!(timer.set_tick(1), 0);
441        assert_eq!(timer.set_tick(0), 1);
442        timer.interrupt();
443        assert_eq!(timer.set_tick(1), 2);
444
445        // Test with maximum reload value
446        let mut timer = TestTimer::<0xFFFFFF>::new(1000, 1000);
447        assert_eq!(timer.set_tick(0xFFFFFF), 0);
448        assert_eq!(timer.set_tick(0xFFFF00), 255);
449        assert_eq!(timer.set_tick(0), 0xFFFFFF);
450    }
451
452    #[test]
453    fn test_partial_tick_accuracy() {
454        // With matching rates, test partial periods
455        let mut timer = TestTimer::<100>::new(1000, 1000);
456        assert_eq!(timer.set_tick(100), 0); // Start of period
457        assert_eq!(timer.set_tick(75), 25); // 25% through period = 25 ticks
458        assert_eq!(timer.set_tick(50), 50); // 50% through period = 50 ticks
459        assert_eq!(timer.set_tick(25), 75); // 75% through period = 75 ticks
460        assert_eq!(timer.set_tick(0), 100); // End of period = 100 ticks
461    }
462
463    #[test]
464    fn test_interrupt_race() {
465        let mut timer = TestTimer::<5>::new(1000, 1000);
466        timer.interrupt();
467        timer.timer.set_syst(3);
468        let t1 = timer.timer.now();
469        timer.interrupt();
470        let t2 = timer.timer.now();
471        assert!(t2 > t1); // Monotonicity
472    }
473
474    #[test]
475    fn test_rapid_interrupts() {
476        let mut timer = TestTimer::<5>::new(1000, 1000);
477        // With matching rates, each interrupt = 6 cycles = 6 ticks
478        for _ in 0..10 {
479            timer.interrupt();
480        }
481        // 10 interrupts * 6 cycles/interrupt * (1000/1000) = 60 ticks
482        assert_eq!(timer.set_tick(5), 60);
483
484        // At position 2, we're 3 cycles in = 3 more ticks
485        assert_eq!(timer.set_tick(2), 63);
486    }
487
488    #[test]
489    fn test_u64_overflow_scenario() {
490        // Timer configuration from the real application:
491        // TICK_RESOLUTION: 10_000_000 (tick_hz)
492        // reload_value: 0xFFFFFF (16,777,215)
493        // systick_freq: 100_000_000
494        let timer = Timer::new(10_000_000, 0xFFFFFF, 100_000_000);
495
496        let total_interrupts = 2560u64;
497        let outer = (total_interrupts >> 32) as u32;
498        let inner = total_interrupts as u32;
499
500        timer.outer_wraps.store(outer, Ordering::Relaxed);
501        timer.inner_wraps.store(inner, Ordering::Relaxed);
502
503        // This call should take the u128 fallback path.
504        let expected_ticks = 4_296_645_011;
505        assert_eq!(timer.now(), expected_ticks);
506    }
507
508    #[test]
509    fn test_monotonicity_around_wrap() {
510        const RELOAD: u32 = 100;
511        let timer = Timer::new(1_000, RELOAD, 1_000);
512
513        // 1. Time right before the wrap
514        timer.set_syst(1);
515        let t1 = timer.now();
516
517        // 2. Simulate the hardware wrap:
518        //    - The ISR has NOT run yet, but the pending bit is set.
519        timer.set_syst(RELOAD);
520        timer.set_pendst_pending(true);
521
522        // 3. Time right after the wrap
523        let t2 = timer.now();
524
525        // The key assertion: time must not go backward.
526        // The new logic reads the COUNTFLAG and virtually adds a wrap,
527        // preventing the non-monotonic jump.
528        assert!(
529            t2 >= t1,
530            "Timer is not monotonic: t1 was {}, t2 was {}",
531            t1,
532            t2
533        );
534
535        // For sanity, let's check the values.
536        // t1 should be close to the end of a period.
537        // t2 should be at the beginning of the *next* period.
538        assert_eq!(t1, 99);
539        assert_eq!(t2, 101);
540    }
541
542    #[test]
543    fn test_monotonicity_between_interrupts() {
544        const RELOAD: u32 = 100;
545        let timer = Timer::new(1_000, RELOAD, 1_000);
546
547        // Set the counter to the reload value, no wraps yet.
548        timer.set_syst(RELOAD);
549        let t1 = timer.now();
550
551        // Simulate time passing by decrementing the hardware counter.
552        timer.set_syst(RELOAD / 2);
553        let t2 = timer.now();
554
555        // Decrement again.
556        timer.set_syst(0);
557        let t3 = timer.now();
558
559        // Assert that time is always moving forward.
560        assert!(t2 > t1, "t2 ({}) should be > t1 ({})", t2, t1);
561        assert!(t3 > t2, "t3 ({}) should be > t2 ({})", t3, t2);
562
563        // Also check the specific values for correctness.
564        assert_eq!(t1, 0);
565        assert_eq!(t2, 50);
566        assert_eq!(t3, 100);
567    }
568
569    const RELOAD: u32 = 100; // small for easy arithmetic; period = 101 cycles
570
571    #[test]
572    fn test_monotonicity_with_starved_isr() {
573        // This test simulates the "hardest path" scenario:
574        // 1. A wrap occurs, setting the PENDST bit.
575        // 2. The SysTick ISR is "starved" by a higher-priority interrupt and does not run.
576        // 3. Multiple calls to now() are made from the higher-priority context.
577        // 4. All calls must see the pending wrap and report monotonic time.
578
579        let timer = Timer::new(1_000, RELOAD, 1_000); // 1 tick per cycle
580
581        // State 1: Right before a wrap.
582        timer.set_syst(1);
583        let t1 = timer.now();
584        assert_eq!(t1, 100 - 1);
585
586        // State 2: Hardware wraps, ISR is pended but does not run.
587        // We manually simulate this state.
588        timer.set_pendst_pending(true);
589        timer.set_syst(RELOAD - 10); // Timer has wrapped and counted down a bit.
590
591        // First call to now() after the wrap. It must see the pending bit.
592        let t2 = timer.now();
593        let expected_t2 = (0 + 1) * (RELOAD as u64 + 1) + (RELOAD as u64 - (RELOAD as u64 - 10));
594        assert_eq!(t2, expected_t2);
595        assert!(
596            t2 > t1,
597            "Time must advance after wrap. t1={}, t2={}",
598            t1,
599            t2
600        );
601
602        // State 3: More time passes, ISR is still starved.
603        timer.set_syst(RELOAD - 20);
604
605        // Second call to now(). It must still see the pending bit.
606        let t3 = timer.now();
607        let expected_t3 = (0 + 1) * (RELOAD as u64 + 1) + (RELOAD as u64 - (RELOAD as u64 - 20));
608        assert_eq!(t3, expected_t3);
609        assert!(
610            t3 > t2,
611            "Time must advance even if ISR is starved. t2={}, t3={}",
612            t2,
613            t3
614        );
615
616        // State 4: The ISR finally runs, clearing the pending bit and incrementing wraps.
617        timer.set_pendst_pending(false);
618        timer.systick_handler(); // This increments inner_wraps to 1.
619
620        // Third call to now(). It should now use the updated wrap counter.
621        let t4 = timer.now();
622        let expected_t4 = 1 * (RELOAD as u64 + 1) + (RELOAD as u64 - (RELOAD as u64 - 20));
623        assert_eq!(t4, expected_t4);
624        assert_eq!(
625            t4, t3,
626            "Time should be consistent after ISR runs. t3={}, t4={}",
627            t3, t4
628        );
629    }
630
631    // The old tests for value-jump and COUNTFLAG are no longer relevant
632    // as the core logic has been replaced. The new test above provides
633    // superior coverage for the most critical race condition.
634}
635
636#[cfg(test)]
637mod stress_test;