sliding_ring/
lib.rs

1//! Fixed-size sliding ring buffer with anchor-preserving index math.
2//!
3//! `SlidingRing` keeps a circular array of 256 slots whose indices are addressed
4//! relative to an anchor coordinate (for example, the current best price in an
5//! orderbook). When the anchor moves forward or backward, the ring clears the
6//! newly entered slots and advances a pointer so that lookups by absolute
7//! coordinate stay O(1).
8//!
9//! ## Model overview
10//! - `anchor` represents the absolute coordinate associated with the pointer slot.
11//! - Moving the anchor by `k` rewires slots branchlessly and clears the `k` slots
12//!   that freshly enter the window (or the entire ring if `|k| >= 256`).
13//! - Reads/writes go through [`SlidingRing::slot`] / [`SlidingRing::slot_mut`], which translate absolute
14//!   coordinates into ring indices via modulo arithmetic.
15//! - [`SlidingRing::shift_to_keep_new_base`] exists for workflows that promote the next best
16//!   coordinate without clearing the new base slot (e.g., orderbooks).
17//!
18//! ## When to use it
19//! Use this crate any time you need a cache-friendly sliding window tied to a
20//! monotonically moving reference point and you can tolerate a fixed resolution
21//! of 256 discrete offsets. Typical use-cases include:
22//! - Aggregated orderbooks keyed by price ticks.
23//! - Sliding time buckets for telemetry or rate-limiters.
24//! - Game state timelines (e.g., ringed entity positions) where only a bounded
25//!   horizon surrounding the current tick matters.
26//!
27//! ## When *not* to use it
28//! - You need more than 256 distinct offsets at a time.
29//! - The anchor jumps arbitrarily with sparse occupancy (use a hashmap instead).
30//! - Slots require heap allocations on clear (prefer a structure that owns the
31//!   heap nodes elsewhere and references them by ID).
32//!
33//! ## Slot flexibility
34//! The buffer is generic over the stored slot type via [`Slot`]. Implement the
35//! trait on your own type to hook into the ring’s lifecycle, or use one of the
36//! bundled implementations (numeric primitives, `Option<T>`, or `Cell<T>`).
37//! For ultra-low latency users, the main primitive is
38//! [`SlidingRing::slices_from_anchor`], which returns the two contiguous spans
39//! that cover the ring starting at the anchor. Operate on those slices directly
40//! with SIMD or unrolled loops when you need raw throughput. The iterator
41//! variants (`iter*`) simply layer absolute coordinate reporting on top of the
42//! same pointer arithmetic and remain zero-cost.
43//! Pair those slices with a stable SIMD helper (e.g., the [`wide`](https://crates.io/crates/wide)
44//! crate demonstrated in `examples/simd_scan.rs`) to keep portable performance on stable Rust.
45//!
46//! ## Example
47//!
48//! ```rust
49//! use sliding_ring::{SlidingRing, Slot};
50//!
51//! #[derive(Debug, Default)]
52//! struct Level { qty: u64 }
53//!
54//! impl Slot for Level {
55//!     fn init() -> Self {
56//!         Self { qty: 0 }
57//!     }
58//!
59//!     fn clear(&mut self) {
60//!         self.qty = 0;
61//!     }
62//! }
63//!
64//! let mut ring = SlidingRing::<Level>::new(1_000);
65//! ring.slot_mut(1_005).qty = 10;
66//! ring.shift_to(1_005);
67//! assert_eq!(ring.slot_at_pointer().qty, 10);
68//! ```
69//!
70//! ### Telemetry counter example
71//!
72//! ```rust
73//! # use sliding_ring::{SlidingRing, Slot, Direction};
74//! #[derive(Clone, Copy, Default)]
75//! struct Bucket { hits: u64 }
76//! # impl Slot for Bucket { fn init() -> Self { Bucket { hits: 0 } } fn clear(&mut self) { self.hits = 0; } }
77//! let mut ring = SlidingRing::<Bucket>::new(0);
78//! ring.slot_mut(5).hits += 1;
79//! ring.slot_mut(6).hits += 3;
80//! let sum: u64 = ring
81//!     .iter_from_anchor(Direction::Forward)
82//!     .map(|(_, bucket)| bucket.hits)
83//!     .sum();
84//! assert_eq!(sum, 4);
85//! ```
86
87use core::{array::from_fn, cell::Cell, fmt, marker::PhantomData};
88
89/// Number of slots in the ring.
90pub const RING_SIZE: usize = 256;
91pub type RingIndex = u8;
92
93/// Trait that describes how to create and reset an element stored in the ring.
94pub trait Slot: Sized {
95    /// Produce a fresh slot in the cleared state.
96    fn init() -> Self;
97
98    /// Reset the slot to its cleared state.
99    fn clear(&mut self);
100
101    /// Conditionally clear the slot. Implementations may override this to use
102    /// branchless logic for primitive types.
103    #[inline(always)]
104    fn clear_if(&mut self, should_clear: bool) {
105        if should_clear {
106            self.clear();
107        }
108    }
109}
110
111macro_rules! impl_numeric_slot {
112    ($($ty:ty),+ $(,)?) => {
113        $(
114            impl Slot for $ty {
115                #[inline(always)]
116                fn init() -> Self {
117                    0 as $ty
118                }
119
120                #[inline(always)]
121                fn clear(&mut self) {
122                    *self = 0 as $ty;
123                }
124
125                #[inline(always)]
126                fn clear_if(&mut self, should_clear: bool) {
127                    let keep_mask = if should_clear { 0 as $ty } else { !0 as $ty };
128                    *self &= keep_mask;
129                }
130            }
131        )+
132    };
133}
134
135impl_numeric_slot!(
136    u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize
137);
138
139impl<T> Slot for Option<T> {
140    #[inline(always)]
141    fn init() -> Self {
142        None
143    }
144
145    #[inline(always)]
146    fn clear(&mut self) {
147        *self = None;
148    }
149}
150
151impl<T: Slot + Copy> Slot for Cell<T> {
152    #[inline(always)]
153    fn init() -> Self {
154        Cell::new(T::init())
155    }
156
157    #[inline(always)]
158    fn clear(&mut self) {
159        self.set(T::init());
160    }
161
162    #[inline(always)]
163    fn clear_if(&mut self, should_clear: bool) {
164        if should_clear {
165            self.set(T::init());
166        }
167    }
168}
169
170/// Ring that slides over signed coordinates while keeping slot access O(1).
171pub struct SlidingRing<T: Slot> {
172    slots: [T; RING_SIZE],
173    pointer: RingIndex,
174    anchor: i128,
175}
176
177impl<T: Slot> SlidingRing<T> {
178    /// Create a new ring anchored at `anchor`.
179    pub fn new(anchor: i128) -> Self {
180        Self {
181            slots: from_fn(|_| T::init()),
182            pointer: 0,
183            anchor,
184        }
185    }
186
187    /// Current anchor value the ring is aligned to.
188    #[inline(always)]
189    pub fn anchor(&self) -> i128 {
190        self.anchor
191    }
192
193    /// Index of the slot that corresponds to the anchor value.
194    #[inline(always)]
195    pub fn pointer(&self) -> RingIndex {
196        self.pointer
197    }
198
199    /// Return an immutable reference to the slot at the anchor.
200    #[inline(always)]
201    pub fn slot_at_pointer(&self) -> &T {
202        &self.slots[self.pointer as usize]
203    }
204
205    /// Return a mutable reference to the slot at the anchor.
206    #[inline(always)]
207    pub fn slot_at_pointer_mut(&mut self) -> &mut T {
208        &mut self.slots[self.pointer as usize]
209    }
210
211    /// Map an absolute coordinate to the underlying ring index.
212    #[inline(always)]
213    pub fn index_for(&self, coordinate: i128) -> RingIndex {
214        let diff = diff_mod_256(coordinate.wrapping_sub(self.anchor));
215        self.pointer.wrapping_add(diff)
216    }
217
218    /// Immutable access to the slot for `coordinate`.
219    #[inline(always)]
220    pub fn slot(&self, coordinate: i128) -> &T {
221        let idx = self.index_for(coordinate);
222        &self.slots[idx as usize]
223    }
224
225    /// Mutable access to the slot for `coordinate`.
226    #[inline(always)]
227    pub fn slot_mut(&mut self, coordinate: i128) -> &mut T {
228        let idx = self.index_for(coordinate);
229        &mut self.slots[idx as usize]
230    }
231
232    /// Immutable access by raw ring index (0-255).
233    #[inline(always)]
234    pub fn slot_by_index(&self, index: RingIndex) -> &T {
235        &self.slots[index as usize]
236    }
237
238    /// Mutable access by raw ring index (0-255).
239    #[inline(always)]
240    pub fn slot_by_index_mut(&mut self, index: RingIndex) -> &mut T {
241        &mut self.slots[index as usize]
242    }
243
244    /// Shift the anchor to `new_anchor`, clearing newly entered slots.
245    #[inline(always)]
246    pub fn shift_to(&mut self, new_anchor: i128) {
247        let raw = new_anchor.wrapping_sub(self.anchor);
248        self.pointer = advance_and_clear(&mut self.slots, self.pointer, raw);
249        self.anchor = new_anchor;
250    }
251
252    /// Shift the anchor but keep the newly entered base when moving backward.
253    #[inline(always)]
254    pub fn shift_to_keep_new_base(&mut self, new_anchor: i128) {
255        let raw = new_anchor.wrapping_sub(self.anchor);
256        self.pointer = advance_and_clear_keep_new_base(&mut self.slots, self.pointer, raw);
257        self.anchor = new_anchor;
258    }
259
260    /// Clear every slot and reset the pointer to zero, preserving the anchor.
261    #[inline(always)]
262    pub fn clear_all(&mut self) {
263        for slot in self.slots.iter_mut() {
264            slot.clear();
265        }
266        self.pointer = 0;
267    }
268
269    /// Expose immutable slice reference for advanced use-cases.
270    #[inline(always)]
271    pub fn as_slice(&self) -> &[T; RING_SIZE] {
272        &self.slots
273    }
274
275    /// Expose mutable slice reference for advanced use-cases.
276    #[inline(always)]
277    pub fn as_mut_slice(&mut self) -> &mut [T; RING_SIZE] {
278        &mut self.slots
279    }
280
281    /// Returns the contiguous slices that cover the ring starting at the anchor.
282    ///
283    /// The first slice begins at the pointer slot; the second slice wraps around
284    /// to index 0 (and may be empty). This is ideal for SIMD scans.
285    ///
286    /// ```
287    /// # use sliding_ring::SlidingRing;
288    /// let mut ring = SlidingRing::<u64>::new(10);
289    /// *ring.slot_mut(10) = 42;
290    /// let (tail, head) = ring.slices_from_anchor();
291    /// assert_eq!(tail.first(), Some(&42));
292    /// assert!(head.is_empty());
293    /// ```
294    #[inline(always)]
295    pub fn slices_from_anchor(&self) -> (&[T], &[T]) {
296        let split = self.pointer as usize;
297        let (head, tail) = self.slots.split_at(split);
298        (tail, head)
299    }
300
301    /// Mutable variant of [`SlidingRing::slices_from_anchor`].
302    ///
303    /// ```
304    /// # use sliding_ring::SlidingRing;
305    /// let mut ring = SlidingRing::<u64>::new(0);
306    /// {
307    ///     let (tail, head) = ring.slices_from_anchor_mut();
308    ///     tail[0] = 7;
309    ///     assert!(head.is_empty());
310    /// }
311    /// assert_eq!(*ring.slot(0), 7);
312    /// ```
313    #[inline(always)]
314    pub fn slices_from_anchor_mut(&mut self) -> (&mut [T], &mut [T]) {
315        let split = self.pointer as usize;
316        let (head, tail) = self.slots.split_at_mut(split);
317        (tail, head)
318    }
319
320    /// Iterate over slots in storage order using raw pointers.
321    #[inline(always)]
322    pub fn iter(&self) -> Slots<'_, T> {
323        Slots::new(&self.slots)
324    }
325
326    /// Mutable iterator over slots in storage order using raw pointers.
327    #[inline(always)]
328    pub fn iter_mut(&mut self) -> SlotsMut<'_, T> {
329        SlotsMut::new(&mut self.slots)
330    }
331
332    /// Iterate starting at the anchor in the desired direction (wrapping once).
333    ///
334    /// ```
335    /// # use sliding_ring::{Direction, SlidingRing};
336    /// let mut ring = SlidingRing::<u64>::new(100);
337    /// *ring.slot_mut(100) = 1;
338    /// *ring.slot_mut(101) = 2;
339    /// let mut iter = ring.iter_from_anchor(Direction::Forward);
340    /// assert_eq!(iter.next().unwrap().1, &1);
341    /// assert_eq!(iter.next().unwrap().0, 101);
342    /// ```
343    #[inline(always)]
344    pub fn iter_from_anchor(&self, direction: Direction) -> AnchorIter<'_, T> {
345        AnchorIter::new(self, direction)
346    }
347
348    /// Mutable iteration starting at the anchor in the desired direction.
349    ///
350    /// ```
351    /// # use sliding_ring::{Direction, SlidingRing};
352    /// let mut ring = SlidingRing::<u64>::new(0);
353    /// for (_, slot) in ring.iter_from_anchor_mut(Direction::Forward).take(2) {
354    ///     *slot = 5;
355    /// }
356    /// assert_eq!(*ring.slot(0), 5);
357    /// assert_eq!(*ring.slot(1), 5);
358    /// ```
359    #[inline(always)]
360    pub fn iter_from_anchor_mut(&mut self, direction: Direction) -> AnchorIterMut<'_, T> {
361        AnchorIterMut::new(self, direction)
362    }
363}
364
365impl<T: Slot + fmt::Debug> fmt::Debug for SlidingRing<T> {
366    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
367        f.debug_struct("SlidingRing")
368            .field("pointer", &self.pointer)
369            .field("anchor", &self.anchor)
370            .finish()
371    }
372}
373
374/// Iteration order relative to the anchor.
375#[derive(Copy, Clone, Debug)]
376pub enum Direction {
377    /// Increasing absolute coordinates (useful for asks / forward scans).
378    Forward,
379    /// Decreasing absolute coordinates (useful for bids / backward scans).
380    Backward,
381}
382
383/// Raw-pointer iterator over an immutable ring slice.
384pub struct Slots<'a, T> {
385    ptr: *const T,
386    end: *const T,
387    _marker: PhantomData<&'a T>,
388}
389
390impl<'a, T> Slots<'a, T> {
391    #[inline(always)]
392    fn new(slice: &'a [T; RING_SIZE]) -> Self {
393        let ptr = slice.as_ptr();
394        unsafe {
395            Self {
396                ptr,
397                end: ptr.add(RING_SIZE),
398                _marker: PhantomData,
399            }
400        }
401    }
402}
403
404impl<'a, T> Iterator for Slots<'a, T> {
405    type Item = &'a T;
406
407    #[inline(always)]
408    fn next(&mut self) -> Option<Self::Item> {
409        if self.ptr == self.end {
410            return None;
411        }
412        unsafe {
413            let item = &*self.ptr;
414            self.ptr = self.ptr.add(1);
415            Some(item)
416        }
417    }
418
419    #[inline(always)]
420    fn size_hint(&self) -> (usize, Option<usize>) {
421        let len = unsafe { self.end.offset_from(self.ptr) as usize };
422        (len, Some(len))
423    }
424}
425
426impl<'a, T> ExactSizeIterator for Slots<'a, T> {
427    #[inline(always)]
428    fn len(&self) -> usize {
429        unsafe { self.end.offset_from(self.ptr) as usize }
430    }
431}
432
433impl<'a, T> core::iter::FusedIterator for Slots<'a, T> {}
434
435/// Raw-pointer iterator over a mutable ring slice.
436pub struct SlotsMut<'a, T> {
437    ptr: *mut T,
438    end: *mut T,
439    _marker: PhantomData<&'a mut T>,
440}
441
442impl<'a, T> SlotsMut<'a, T> {
443    #[inline(always)]
444    fn new(slice: &'a mut [T; RING_SIZE]) -> Self {
445        let ptr = slice.as_mut_ptr();
446        unsafe {
447            Self {
448                ptr,
449                end: ptr.add(RING_SIZE),
450                _marker: PhantomData,
451            }
452        }
453    }
454}
455
456impl<'a, T> Iterator for SlotsMut<'a, T> {
457    type Item = &'a mut T;
458
459    #[inline(always)]
460    fn next(&mut self) -> Option<Self::Item> {
461        if self.ptr == self.end {
462            return None;
463        }
464        unsafe {
465            let item = &mut *self.ptr;
466            self.ptr = self.ptr.add(1);
467            Some(item)
468        }
469    }
470
471    #[inline(always)]
472    fn size_hint(&self) -> (usize, Option<usize>) {
473        let len = unsafe { self.end.offset_from(self.ptr) as usize };
474        (len, Some(len))
475    }
476}
477
478impl<'a, T> ExactSizeIterator for SlotsMut<'a, T> {
479    #[inline(always)]
480    fn len(&self) -> usize {
481        unsafe { self.end.offset_from(self.ptr) as usize }
482    }
483}
484
485impl<'a, T> core::iter::FusedIterator for SlotsMut<'a, T> {}
486
487/// Anchor-ordered iterator (immutable).
488pub struct AnchorIter<'a, T: Slot> {
489    ptr: *const T,
490    base: *const T,
491    end: *const T,
492    remaining: usize,
493    coord: i128,
494    direction: Direction,
495    _marker: PhantomData<&'a T>,
496}
497
498impl<'a, T: Slot> AnchorIter<'a, T> {
499    #[inline(always)]
500    fn new(ring: &'a SlidingRing<T>, direction: Direction) -> Self {
501        let base = ring.slots.as_ptr();
502        let ptr = unsafe { base.add(ring.pointer as usize) };
503        let end = unsafe { base.add(RING_SIZE) };
504        Self {
505            ptr,
506            base,
507            end,
508            remaining: RING_SIZE,
509            coord: ring.anchor,
510            direction,
511            _marker: PhantomData,
512        }
513    }
514}
515
516impl<'a, T: Slot> Iterator for AnchorIter<'a, T> {
517    type Item = (i128, &'a T);
518
519    #[inline(always)]
520    fn next(&mut self) -> Option<Self::Item> {
521        if self.remaining == 0 {
522            return None;
523        }
524        self.remaining -= 1;
525        unsafe {
526            let item = &*self.ptr;
527            self.ptr = match self.direction {
528                Direction::Forward => {
529                    let next = self.ptr.add(1);
530                    if next == self.end { self.base } else { next }
531                }
532                Direction::Backward => {
533                    if self.ptr == self.base {
534                        self.end.sub(1)
535                    } else {
536                        self.ptr.sub(1)
537                    }
538                }
539            };
540            let coord = self.coord;
541            match self.direction {
542                Direction::Forward => self.coord = self.coord.wrapping_add(1),
543                Direction::Backward => self.coord = self.coord.wrapping_sub(1),
544            }
545            Some((coord, item))
546        }
547    }
548
549    #[inline(always)]
550    fn size_hint(&self) -> (usize, Option<usize>) {
551        (self.remaining, Some(self.remaining))
552    }
553}
554
555impl<'a, T: Slot> ExactSizeIterator for AnchorIter<'a, T> {
556    #[inline(always)]
557    fn len(&self) -> usize {
558        self.remaining
559    }
560}
561
562impl<'a, T: Slot> core::iter::FusedIterator for AnchorIter<'a, T> {}
563
564/// Anchor-ordered iterator (mutable).
565pub struct AnchorIterMut<'a, T: Slot> {
566    ptr: *mut T,
567    base: *mut T,
568    end: *mut T,
569    remaining: usize,
570    coord: i128,
571    direction: Direction,
572    _marker: PhantomData<&'a mut T>,
573}
574
575impl<'a, T: Slot> AnchorIterMut<'a, T> {
576    #[inline(always)]
577    fn new(ring: &'a mut SlidingRing<T>, direction: Direction) -> Self {
578        let base = ring.slots.as_mut_ptr();
579        let ptr = unsafe { base.add(ring.pointer as usize) };
580        let end = unsafe { base.add(RING_SIZE) };
581        Self {
582            ptr,
583            base,
584            end,
585            remaining: RING_SIZE,
586            coord: ring.anchor,
587            direction,
588            _marker: PhantomData,
589        }
590    }
591}
592
593impl<'a, T: Slot> Iterator for AnchorIterMut<'a, T> {
594    type Item = (i128, &'a mut T);
595
596    #[inline(always)]
597    fn next(&mut self) -> Option<Self::Item> {
598        if self.remaining == 0 {
599            return None;
600        }
601        self.remaining -= 1;
602        unsafe {
603            let item = &mut *self.ptr;
604            self.ptr = match self.direction {
605                Direction::Forward => {
606                    let next = self.ptr.add(1);
607                    if next == self.end { self.base } else { next }
608                }
609                Direction::Backward => {
610                    if self.ptr == self.base {
611                        self.end.sub(1)
612                    } else {
613                        self.ptr.sub(1)
614                    }
615                }
616            };
617            let coord = self.coord;
618            match self.direction {
619                Direction::Forward => self.coord = self.coord.wrapping_add(1),
620                Direction::Backward => self.coord = self.coord.wrapping_sub(1),
621            }
622            Some((coord, item))
623        }
624    }
625
626    #[inline(always)]
627    fn size_hint(&self) -> (usize, Option<usize>) {
628        (self.remaining, Some(self.remaining))
629    }
630}
631
632impl<'a, T: Slot> ExactSizeIterator for AnchorIterMut<'a, T> {
633    #[inline(always)]
634    fn len(&self) -> usize {
635        self.remaining
636    }
637}
638
639impl<'a, T: Slot> core::iter::FusedIterator for AnchorIterMut<'a, T> {}
640
641/// Reduce a signed difference mod 256 (ring size).
642#[inline]
643pub fn diff_mod_256(diff: i128) -> RingIndex {
644    (diff & 255) as RingIndex
645}
646
647#[inline]
648#[cfg(feature = "branchless")]
649pub fn advance_and_clear<T: Slot>(
650    slice: &mut [T; RING_SIZE],
651    pointer: RingIndex,
652    raw: i128,
653) -> RingIndex {
654    // A predictable short-circuit avoids touching all 256 slots when no movement happened.
655    if raw == 0 {
656        return pointer;
657    }
658    let abs = raw.unsigned_abs();
659    let big = ((abs >> 8) != 0) as u64;
660    let pos = (raw > 0) as u64;
661    let neg = (raw < 0) as u64;
662    let k_small = (abs & 0xFF) as u16;
663    let old_ptr = pointer as u16;
664
665    for j in 0u16..=255 {
666        let dist_fwd = j.wrapping_sub(old_ptr) & 0xFF;
667        let dist_back = old_ptr.wrapping_sub(j) & 0xFF;
668        let clear_fwd = ((dist_fwd < k_small) as u64) & pos;
669        let clear_back = (((dist_back != 0) as u64) & ((dist_back <= k_small) as u64)) & neg;
670        let clear = big | (clear_fwd | clear_back);
671        slice[j as usize].clear_if(clear != 0);
672    }
673    pointer.wrapping_add(diff_mod_256(raw))
674}
675
676#[inline]
677#[cfg(not(feature = "branchless"))]
678pub fn advance_and_clear<T: Slot>(
679    slice: &mut [T; RING_SIZE],
680    pointer: RingIndex,
681    raw: i128,
682) -> RingIndex {
683    if raw >= 256 || raw <= -256 {
684        for slot in slice.iter_mut() {
685            slot.clear();
686        }
687        return pointer.wrapping_add(diff_mod_256(raw));
688    }
689    if raw > 0 {
690        let k = raw as u16;
691        for i in 0..k {
692            let idx = pointer.wrapping_add(i as u8);
693            slice[idx as usize].clear();
694        }
695        pointer.wrapping_add(k as u8)
696    } else if raw < 0 {
697        let k = (-raw) as u16;
698        for i in 1..=k {
699            let idx = pointer.wrapping_sub(i as u8);
700            slice[idx as usize].clear();
701        }
702        pointer.wrapping_sub(k as u8)
703    } else {
704        pointer
705    }
706}
707
708#[inline]
709#[cfg(feature = "branchless")]
710pub fn advance_and_clear_keep_new_base<T: Slot>(
711    slice: &mut [T; RING_SIZE],
712    pointer: RingIndex,
713    raw: i128,
714) -> RingIndex {
715    // The zero-shift path is branchless after this point, so early-return cheaply.
716    if raw == 0 {
717        return pointer;
718    }
719    let abs = raw.unsigned_abs();
720    let big = ((abs >> 8) != 0) as u64;
721    let pos = (raw > 0) as u64;
722    let neg = (raw < 0) as u64;
723    let k_small = (abs & 0xFF) as u16;
724    let old_ptr = pointer as u16;
725    for j in 0u16..=255 {
726        let dist_fwd = j.wrapping_sub(old_ptr) & 0xFF;
727        let dist_back = old_ptr.wrapping_sub(j) & 0xFF;
728        let clear_fwd = ((dist_fwd < k_small) as u64) & pos;
729        let clear_back = (((dist_back != 0) as u64) & ((dist_back < k_small) as u64)) & neg;
730        let clear = big | (clear_fwd | clear_back);
731        slice[j as usize].clear_if(clear != 0);
732    }
733    pointer.wrapping_add(diff_mod_256(raw))
734}
735
736#[inline]
737#[cfg(not(feature = "branchless"))]
738pub fn advance_and_clear_keep_new_base<T: Slot>(
739    slice: &mut [T; RING_SIZE],
740    pointer: RingIndex,
741    raw: i128,
742) -> RingIndex {
743    if raw >= 256 || raw <= -256 {
744        for slot in slice.iter_mut() {
745            slot.clear();
746        }
747        return pointer.wrapping_add(diff_mod_256(raw));
748    }
749    if raw > 0 {
750        let k = raw as u16;
751        for i in 0..k {
752            let idx = pointer.wrapping_add(i as u8);
753            slice[idx as usize].clear();
754        }
755        pointer.wrapping_add(k as u8)
756    } else if raw < 0 {
757        let k = (-raw) as u16;
758        for i in 1..k {
759            let idx = pointer.wrapping_sub(i as u8);
760            slice[idx as usize].clear();
761        }
762        pointer.wrapping_sub(k as u8)
763    } else {
764        pointer
765    }
766}
767
768#[cfg(test)]
769mod tests {
770    use super::*;
771
772    #[test]
773    fn diff_mod_matches_reference() {
774        let xs = [
775            i128::MIN,
776            i128::MIN / 2,
777            -65_536,
778            -10_000,
779            -1024,
780            -256,
781            -255,
782            -1,
783            0,
784            1,
785            127,
786            254,
787            255,
788            256,
789            1024,
790            10_000,
791            65_536,
792            i128::MAX / 2,
793            i128::MAX,
794        ];
795        for &d in &xs {
796            let m = diff_mod_256(d);
797            let expected = (d.rem_euclid(256)) as u8;
798            assert_eq!(m, expected);
799        }
800    }
801
802    #[test]
803    fn sliding_ring_remaps_coordinates() {
804        let mut ring = SlidingRing::<u64>::new(1_000);
805        *ring.slot_mut(1_005) = 10;
806        assert_eq!(*ring.slot(1_005), 10);
807        ring.shift_to(1_005);
808        assert_eq!(*ring.slot_at_pointer(), 10);
809        assert_eq!(ring.anchor(), 1_005);
810    }
811
812    #[test]
813    fn forward_shift_clears_entered_range() {
814        let base = 50_000i128;
815        let mut ring = SlidingRing::<u64>::new(base);
816        for off in 0u16..=255 {
817            *ring.slot_mut(base + off as i128) = 1;
818        }
819        ring.shift_to(base + 10);
820        for j in 0..10 {
821            assert_eq!(*ring.slot(base + j as i128), 0);
822        }
823        for j in 10..=255 {
824            assert_eq!(*ring.slot(base + j as i128), 1);
825        }
826    }
827
828    #[test]
829    fn backward_shift_clears_entered_range() {
830        let base = 60_000i128;
831        let mut ring = SlidingRing::<u64>::new(base);
832        for off in 0u16..=255 {
833            *ring.slot_mut(base + off as i128) = 1;
834        }
835        ring.shift_to(base - 5);
836        for i in 1..=5 {
837            assert_eq!(*ring.slot(base - i as i128), 0);
838        }
839        for off in 0u16..=255 {
840            let price = base + off as i128;
841            let cleared = off >= 251;
842            if !cleared {
843                assert_eq!(*ring.slot(price), 1);
844            }
845        }
846    }
847
848    #[test]
849    fn large_shift_resets_all() {
850        let base = 70_000i128;
851        let mut ring = SlidingRing::<u64>::new(base);
852        for off in 0u16..=255 {
853            *ring.slot_mut(base + off as i128) = 1;
854        }
855        ring.shift_to(base + 512);
856        for off in 0u16..=255 {
857            assert_eq!(*ring.slot(base + off as i128), 0);
858        }
859    }
860
861    #[test]
862    fn option_slots_clear_back_to_none() {
863        let mut ring = SlidingRing::<Option<u32>>::new(0);
864        *ring.slot_mut(5) = Some(42);
865        assert_eq!(ring.slot(5), &Some(42));
866        ring.shift_to(260); // large leap clears entire ring
867        assert!(ring.slot(260).is_none());
868    }
869
870    #[test]
871    fn cell_slots_follow_inner_slot_semantics() {
872        let mut ring = SlidingRing::<Cell<u32>>::new(10);
873        ring.slot_mut(10).set(7);
874        assert_eq!(ring.slot(10).get(), 7);
875        ring.shift_to(20);
876        assert_eq!(ring.slot(20).get(), 0);
877    }
878
879    #[test]
880    fn iterators_cover_all_slots_in_storage_order() {
881        let mut ring = SlidingRing::<u64>::new(0);
882        for (i, slot) in ring.iter_mut().enumerate() {
883            *slot = i as u64;
884        }
885        for (i, slot) in ring.iter().enumerate() {
886            assert_eq!(*slot, i as u64);
887        }
888    }
889
890    #[test]
891    fn slices_from_anchor_split_correctly() {
892        let mut ring = SlidingRing::<u64>::new(0);
893        for (i, slot) in ring.iter_mut().enumerate() {
894            *slot = i as u64;
895        }
896        ring.shift_to(100);
897        let (head, tail) = ring.slices_from_anchor();
898        assert_eq!(head.len() + tail.len(), RING_SIZE);
899        assert_eq!(head.len(), RING_SIZE - ring.pointer as usize);
900        assert_eq!(tail.len(), ring.pointer as usize);
901    }
902
903    #[test]
904    fn anchor_iter_traverses_forward() {
905        let mut ring = SlidingRing::<u64>::new(10);
906        ring.shift_to(50);
907        for (i, slot) in ring.iter_mut().enumerate() {
908            *slot = i as u64;
909        }
910        let mut coords = Vec::new();
911        for (price, slot) in ring.iter_from_anchor(Direction::Forward).take(5) {
912            coords.push((price, *slot));
913        }
914        assert_eq!(coords[0].0, ring.anchor());
915        assert_eq!(coords[1].0, ring.anchor() + 1);
916    }
917
918    #[test]
919    fn anchor_iter_traverses_backward() {
920        let mut ring = SlidingRing::<u64>::new(10);
921        ring.shift_to(50);
922        for (i, slot) in ring.iter_mut().enumerate() {
923            *slot = i as u64;
924        }
925        let mut coords = Vec::new();
926        for (price, _) in ring.iter_from_anchor(Direction::Backward).take(3) {
927            coords.push(price);
928        }
929        assert_eq!(coords[0], ring.anchor());
930        assert_eq!(coords[1], ring.anchor() - 1);
931    }
932
933    #[test]
934    fn index_for_handles_wrapping_differences() {
935        let anchor = i128::MIN + 5;
936        let ring = SlidingRing::<u64>::new(anchor);
937        let coordinate = i128::MAX - 7;
938        let idx = ring.index_for(coordinate);
939        assert_eq!(idx, diff_mod_256(coordinate.wrapping_sub(anchor)));
940    }
941
942    #[test]
943    fn shift_handles_overflowing_differences() {
944        let start = i128::MAX - 100;
945        let mut ring = SlidingRing::<u64>::new(start);
946        let target = start.wrapping_add(300);
947        ring.shift_to(target);
948        assert_eq!(ring.anchor(), target);
949        assert_eq!(ring.pointer(), diff_mod_256(target.wrapping_sub(start)));
950    }
951
952    #[test]
953    fn anchor_iter_wraps_coordinates() {
954        let anchor = i128::MAX - 1;
955        let mut ring = SlidingRing::<u8>::new(anchor);
956        let forward: Vec<_> = ring
957            .iter_from_anchor(Direction::Forward)
958            .take(3)
959            .map(|(coord, _)| coord)
960            .collect();
961        assert_eq!(
962            forward,
963            vec![anchor, anchor.wrapping_add(1), anchor.wrapping_add(2)]
964        );
965
966        let backward_anchor = i128::MIN + 1;
967        ring.shift_to(backward_anchor);
968        let backward: Vec<_> = ring
969            .iter_from_anchor(Direction::Backward)
970            .take(3)
971            .map(|(coord, _)| coord)
972            .collect();
973        assert_eq!(
974            backward,
975            vec![
976                backward_anchor,
977                backward_anchor.wrapping_sub(1),
978                backward_anchor.wrapping_sub(2)
979            ]
980        );
981    }
982
983    #[test]
984    fn shift_to_keep_new_base_preserves_new_anchor_backward() {
985        assert_keep_new_base_backward();
986    }
987
988    #[cfg(feature = "branchless")]
989    #[test]
990    fn shift_to_keep_new_base_preserves_new_anchor_backward_branchless() {
991        assert_keep_new_base_backward();
992    }
993
994    #[test]
995    fn shift_to_keep_new_base_clears_forward_range() {
996        let base = 30_000i128;
997        let mut ring = SlidingRing::<u64>::new(base);
998        *ring.slot_mut(base) = 1;
999        *ring.slot_mut(base + 1) = 2;
1000        *ring.slot_mut(base + 2) = 3;
1001        *ring.slot_mut(base - 1) = 9;
1002
1003        ring.shift_to_keep_new_base(base + 2);
1004        assert_eq!(*ring.slot(base), 0);
1005        assert_eq!(*ring.slot(base + 1), 0);
1006        assert_eq!(*ring.slot(base + 2), 3);
1007        assert_eq!(*ring.slot(base - 1), 9);
1008    }
1009
1010    fn assert_keep_new_base_backward() {
1011        let base = 90_000i128;
1012        let mut ring = SlidingRing::<u64>::new(base);
1013        *ring.slot_mut(base - 3) = 33;
1014        *ring.slot_mut(base - 2) = 22;
1015        *ring.slot_mut(base - 1) = 11;
1016        *ring.slot_mut(base + 5) = 77;
1017
1018        ring.shift_to_keep_new_base(base - 3);
1019        assert_eq!(ring.anchor(), base - 3);
1020        assert_eq!(*ring.slot(base - 3), 33);
1021        assert_eq!(*ring.slot(base - 2), 0);
1022        assert_eq!(*ring.slot(base - 1), 0);
1023        assert_eq!(*ring.slot(base + 5), 77);
1024    }
1025}