Skip to main content

hopper_runtime/
segment_borrow.rs

1//! Segment-level borrow registry for fine-grained access control.
2//!
3//! The account-level [`BorrowRegistry`](crate::borrow_registry) prevents
4//! aliasing across entire accounts. This module adds **segment-level**
5//! conflict detection: two borrows of the *same* account are allowed when
6//! their byte ranges don't overlap, or when both are read-only.
7//!
8//! ## Conflict Rules
9//!
10//! | Existing | New   | Overlapping? | Allowed |
11//! |----------|-------|--------------|---------|
12//! | Read     | Read  | yes          | ✅       |
13//! | Read     | Write | yes          | ❌       |
14//! | Write    | Read  | yes          | ❌       |
15//! | Write    | Write | yes          | ❌       |
16//! | *any*    | *any* | no           | ✅       |
17//!
18//! ## Zero-Cost Design
19//!
20//! - Fixed-capacity array (no heap)
21//! - Inline conflict checks
22//! - Deterministic iteration (bounded loop)
23
24use crate::address::Address;
25use crate::error::ProgramError;
26
27/// Maximum simultaneous segment borrows per instruction.
28///
29/// 16 covers any realistic instruction, most use 2-6 segments.
30/// Keeping it fixed avoids heap allocation while staying well within
31/// Solana's CU budget.  The compact entry representation keeps the
32/// total stack footprint under 200 bytes.
33pub const MAX_SEGMENT_BORROWS: usize = 16;
34
35/// Read or write access intent for a segment borrow.
36#[derive(Clone, Copy, PartialEq, Eq, Debug)]
37#[repr(u8)]
38pub enum AccessKind {
39    /// Shared (immutable) access.
40    Read = 0,
41    /// Exclusive (mutable) access.
42    Write = 1,
43}
44
45/// First-8-byte prefix of an account address, used as a fast-path
46/// comparator in the conflict scan.
47///
48/// The **audit-correct** model is fingerprint-then-verify: a hot-path
49/// `u64` compare rejects unrelated accounts immediately; the slow-path
50/// 32-byte compare fires only when the prefixes match. Because a
51/// full-address compare always follows, fingerprint collisions produce
52/// **no** false conflicts, they only cost one extra 32-byte compare
53/// for the extremely rare collision pair.
54#[inline(always)]
55fn address_fingerprint(address: &Address) -> u64 {
56    let bytes = address.as_array();
57    u64::from_le_bytes([
58        bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7],
59    ])
60}
61
62/// Full-identity equality check on the slow path.
63#[inline(always)]
64fn address_eq(a: &Address, b: &Address) -> bool {
65    a.as_array() == b.as_array()
66}
67
68/// A single active segment borrow.
69///
70/// Carries both a fast `u64` fingerprint and the full 32-byte account
71/// address. The fingerprint is the hot-path comparator; the full
72/// address resolves collisions so conflict detection is never
73/// probabilistic.
74#[derive(Clone, Copy, Debug)]
75pub struct SegmentBorrow {
76    /// Fast-path prefix of the account address.
77    pub key_fp: u64,
78    /// Full account address, authoritative identity, checked whenever
79    /// the fast-path fingerprint matches. Pre-audit we relied on the
80    /// fingerprint alone and claimed it was "collision-free for any
81    /// realistic instruction"; that was probabilistic, not a guarantee.
82    pub key: Address,
83    /// Byte offset within the account data.
84    pub offset: u32,
85    /// Byte size of the borrowed segment.
86    pub size: u32,
87    /// Access kind (read or write).
88    pub kind: AccessKind,
89}
90
91/// Check whether two byte ranges overlap.
92#[inline(always)]
93const fn ranges_overlap(a_off: u32, a_size: u32, b_off: u32, b_size: u32) -> bool {
94    let a_end = a_off as u64 + a_size as u64;
95    let b_end = b_off as u64 + b_size as u64;
96    // Non-overlapping iff one ends before the other starts.
97    !(a_end <= b_off as u64 || b_end <= a_off as u64)
98}
99
100/// Instruction-scoped segment borrow registry.
101///
102/// Tracks active segment borrows and enforces conflict rules. Designed
103/// for inline use in an execution context, no heap, no dynamic dispatch.
104///
105/// Uses compact 8-byte address fingerprints and a flat array of
106/// fixed-size entries.  Total stack footprint: ~280 bytes (vs ~1.3 KB
107/// with full 32-byte addresses and Option wrappers).
108///
109/// # Example
110///
111/// ```ignore
112/// let mut borrows = SegmentBorrowRegistry::new();
113/// borrows.register_read(&vault_key, 0, 8)?;   // read balance
114/// borrows.register_write(&vault_key, 8, 32)?;  // write metadata, OK, non-overlapping
115/// borrows.register_write(&vault_key, 0, 8)?;   // REJECTED, overlaps read
116/// ```
117pub struct SegmentBorrowRegistry {
118    entries: [SegmentBorrow; MAX_SEGMENT_BORROWS],
119    len: u8,
120}
121
122impl SegmentBorrowRegistry {
123    /// Create an empty registry.
124    #[inline(always)]
125    pub const fn new() -> Self {
126        const EMPTY: SegmentBorrow = SegmentBorrow {
127            key_fp: 0,
128            key: Address::new([0u8; 32]),
129            offset: 0,
130            size: 0,
131            kind: AccessKind::Read,
132        };
133        Self {
134            entries: [EMPTY; MAX_SEGMENT_BORROWS],
135            len: 0,
136        }
137    }
138
139    /// Number of active borrows.
140    #[inline(always)]
141    pub const fn len(&self) -> usize {
142        self.len as usize
143    }
144
145    /// Whether the registry is empty.
146    #[inline(always)]
147    pub const fn is_empty(&self) -> bool {
148        self.len == 0
149    }
150
151    /// Register a new read borrow and return the `SegmentBorrow`
152    /// record the caller can hand to `SegmentLease::new` for RAII
153    /// release. This is the plumbing that makes
154    /// [`crate::segment_lease::SegRef`] possible.
155    #[inline(always)]
156    pub fn register_leased_read(
157        &mut self,
158        key: &Address,
159        offset: u32,
160        size: u32,
161    ) -> Result<SegmentBorrow, ProgramError> {
162        let borrow = SegmentBorrow {
163            key_fp: address_fingerprint(key),
164            key: *key,
165            offset,
166            size,
167            kind: AccessKind::Read,
168        };
169        self.register(borrow)?;
170        Ok(borrow)
171    }
172
173    /// Mutable counterpart of [`register_leased_read`].
174    #[inline(always)]
175    pub fn register_leased_write(
176        &mut self,
177        key: &Address,
178        offset: u32,
179        size: u32,
180    ) -> Result<SegmentBorrow, ProgramError> {
181        let borrow = SegmentBorrow {
182            key_fp: address_fingerprint(key),
183            key: *key,
184            offset,
185            size,
186            kind: AccessKind::Write,
187        };
188        self.register(borrow)?;
189        Ok(borrow)
190    }
191
192    /// Register a new segment borrow, checking for conflicts.
193    ///
194    /// Returns `Err(AccountBorrowFailed)` if the new borrow overlaps an
195    /// existing borrow with incompatible access (read+write or write+write)
196    /// on the **same** account (full-address identity, not fingerprint).
197    #[inline(always)]
198    pub fn register(&mut self, new: SegmentBorrow) -> Result<(), ProgramError> {
199        let len = self.len as usize;
200        if len >= MAX_SEGMENT_BORROWS {
201            return Err(ProgramError::AccountBorrowFailed);
202        }
203
204        // Check conflicts against all active borrows. Fast path on the
205        // 8-byte fingerprint; slow path confirms with the full 32-byte
206        // address so fingerprint collisions cannot manufacture false
207        // conflicts between unrelated accounts.
208        let mut i = 0;
209        while i < len {
210            let existing = &self.entries[i];
211            if existing.key_fp == new.key_fp
212                && address_eq(&existing.key, &new.key)
213                && ranges_overlap(existing.offset, existing.size, new.offset, new.size)
214            {
215                match (existing.kind, new.kind) {
216                    (AccessKind::Read, AccessKind::Read) => {}
217                    _ => return Err(ProgramError::AccountBorrowFailed),
218                }
219            }
220            i += 1;
221        }
222
223        self.entries[len] = new;
224        self.len = (len + 1) as u8;
225        Ok(())
226    }
227
228    /// Convenience: register a read borrow for the given account region.
229    #[inline(always)]
230    pub fn register_read(
231        &mut self,
232        key: &Address,
233        offset: u32,
234        size: u32,
235    ) -> Result<(), ProgramError> {
236        self.register(SegmentBorrow {
237            key_fp: address_fingerprint(key),
238            key: *key,
239            offset,
240            size,
241            kind: AccessKind::Read,
242        })
243    }
244
245    /// Convenience: register a write borrow for the given account region.
246    #[inline(always)]
247    pub fn register_write(
248        &mut self,
249        key: &Address,
250        offset: u32,
251        size: u32,
252    ) -> Result<(), ProgramError> {
253        self.register(SegmentBorrow {
254            key_fp: address_fingerprint(key),
255            key: *key,
256            offset,
257            size,
258            kind: AccessKind::Write,
259        })
260    }
261
262    /// Release a previously registered borrow.
263    ///
264    /// Finds the first matching entry and removes it, compacting the array.
265    /// Identity is full-address (not fingerprint) to stay collision-safe.
266    #[inline(always)]
267    pub fn release(&mut self, borrow: &SegmentBorrow) -> bool {
268        let len = self.len as usize;
269        let mut i = 0;
270        while i < len {
271            let existing = &self.entries[i];
272            if existing.key_fp == borrow.key_fp
273                && address_eq(&existing.key, &borrow.key)
274                && existing.offset == borrow.offset
275                && existing.size == borrow.size
276                && existing.kind == borrow.kind
277            {
278                // Swap-remove: move last entry into this slot.
279                let new_len = len - 1;
280                self.len = new_len as u8;
281                if i < new_len {
282                    self.entries[i] = self.entries[new_len];
283                }
284                return true;
285            }
286            i += 1;
287        }
288        false
289    }
290
291    /// Reset the registry, clearing all active borrows.
292    #[inline(always)]
293    pub fn clear(&mut self) {
294        self.len = 0;
295    }
296
297    /// Check if a proposed borrow would conflict, without registering it.
298    ///
299    /// Uses full-address identity, fingerprint collisions do not
300    /// produce false positives.
301    #[inline(always)]
302    pub fn would_conflict(&self, proposed: &SegmentBorrow) -> bool {
303        let len = self.len as usize;
304        let mut i = 0;
305        while i < len {
306            let existing = &self.entries[i];
307            if existing.key_fp == proposed.key_fp
308                && address_eq(&existing.key, &proposed.key)
309                && ranges_overlap(
310                    existing.offset,
311                    existing.size,
312                    proposed.offset,
313                    proposed.size,
314                )
315            {
316                match (existing.kind, proposed.kind) {
317                    (AccessKind::Read, AccessKind::Read) => {}
318                    _ => return true,
319                }
320            }
321            i += 1;
322        }
323        false
324    }
325
326    /// Register a borrow and return an RAII guard that auto-releases it on drop.
327    ///
328    /// This is the preferred way to acquire segment borrows, the guard
329    /// ensures the borrow is released even if the caller returns early
330    /// via `?` or encounters an error.
331    ///
332    /// # Example
333    ///
334    /// ```ignore
335    /// {
336    ///     let _guard = borrows.register_guard_write(&key, 0, 8)?;
337    ///     // ... write to segment ...
338    /// } // guard dropped → borrow released
339    /// ```
340    #[inline(always)]
341    pub fn register_guard(
342        &mut self,
343        borrow: SegmentBorrow,
344    ) -> Result<SegmentBorrowGuard<'_>, ProgramError> {
345        self.register(borrow)?;
346        Ok(SegmentBorrowGuard {
347            registry: self,
348            borrow,
349        })
350    }
351
352    /// Register a read borrow with RAII auto-release.
353    #[inline(always)]
354    pub fn register_guard_read(
355        &mut self,
356        key: &Address,
357        offset: u32,
358        size: u32,
359    ) -> Result<SegmentBorrowGuard<'_>, ProgramError> {
360        let borrow = SegmentBorrow {
361            key_fp: address_fingerprint(key),
362            key: *key,
363            offset,
364            size,
365            kind: AccessKind::Read,
366        };
367        self.register_guard(borrow)
368    }
369
370    /// Register a write borrow with RAII auto-release.
371    #[inline(always)]
372    pub fn register_guard_write(
373        &mut self,
374        key: &Address,
375        offset: u32,
376        size: u32,
377    ) -> Result<SegmentBorrowGuard<'_>, ProgramError> {
378        let borrow = SegmentBorrow {
379            key_fp: address_fingerprint(key),
380            key: *key,
381            offset,
382            size,
383            kind: AccessKind::Write,
384        };
385        self.register_guard(borrow)
386    }
387
388    /// Visit each active borrow in registration order.
389    ///
390    /// Intended for diagnostics and for the `hopper explain`
391    /// introspection path, never for hot-path decisions.
392    #[inline]
393    pub fn for_each<F: FnMut(&SegmentBorrow)>(&self, mut f: F) {
394        let len = self.len as usize;
395        let mut i = 0;
396        while i < len {
397            f(&self.entries[i]);
398            i += 1;
399        }
400    }
401
402    /// Look up an active borrow by exact `(key, offset, size, kind)`.
403    #[inline]
404    pub fn find_exact(
405        &self,
406        key: &Address,
407        offset: u32,
408        size: u32,
409        kind: AccessKind,
410    ) -> Option<&SegmentBorrow> {
411        let fp = address_fingerprint(key);
412        let len = self.len as usize;
413        let mut i = 0;
414        while i < len {
415            let e = &self.entries[i];
416            if e.key_fp == fp
417                && address_eq(&e.key, key)
418                && e.offset == offset
419                && e.size == size
420                && e.kind as u8 == kind as u8
421            {
422                return Some(e);
423            }
424            i += 1;
425        }
426        None
427    }
428}
429
430/// RAII guard that releases a segment borrow when dropped.
431///
432/// Created by [`SegmentBorrowRegistry::register_guard()`] and its
433/// convenience wrappers. The borrow is automatically released from the
434/// registry on drop, preventing borrow leaks.
435pub struct SegmentBorrowGuard<'a> {
436    registry: &'a mut SegmentBorrowRegistry,
437    borrow: SegmentBorrow,
438}
439
440impl<'a> SegmentBorrowGuard<'a> {
441    /// Access kind of the guarded borrow.
442    #[inline(always)]
443    pub fn kind(&self) -> AccessKind {
444        self.borrow.kind
445    }
446
447    /// Byte offset of the guarded segment.
448    #[inline(always)]
449    pub fn offset(&self) -> u32 {
450        self.borrow.offset
451    }
452
453    /// Byte size of the guarded segment.
454    #[inline(always)]
455    pub fn size(&self) -> u32 {
456        self.borrow.size
457    }
458}
459
460impl<'a> Drop for SegmentBorrowGuard<'a> {
461    fn drop(&mut self) {
462        self.registry.release(&self.borrow);
463    }
464}
465
466#[cfg(kani)]
467mod kani_proofs {
468    use super::*;
469
470    #[kani::proof]
471    fn range_overlap_is_symmetric_for_arbitrary_u32s() {
472        let a_off: u32 = kani::any();
473        let a_size: u32 = kani::any();
474        let b_off: u32 = kani::any();
475        let b_size: u32 = kani::any();
476
477        assert_eq!(
478            ranges_overlap(a_off, a_size, b_off, b_size),
479            ranges_overlap(b_off, b_size, a_off, a_size)
480        );
481    }
482
483    #[kani::proof]
484    fn overlapping_write_blocks_same_account_accesses() {
485        let offset: u32 = kani::any();
486        let size: u32 = kani::any();
487        let delta: u32 = kani::any();
488        kani::assume(offset <= 1024);
489        kani::assume(size > 0 && size <= 64);
490        kani::assume(delta < size);
491
492        let key = Address::new([7u8; 32]);
493        let probe_offset = offset + delta;
494        let mut reg = SegmentBorrowRegistry::new();
495
496        assert!(reg.register_write(&key, offset, size).is_ok());
497        assert!(reg.register_read(&key, probe_offset, 1).is_err());
498        assert!(reg.register_write(&key, probe_offset, 1).is_err());
499        assert_eq!(reg.len(), 1);
500    }
501
502    #[kani::proof]
503    fn overlapping_reads_are_shared_for_same_account() {
504        let offset: u32 = kani::any();
505        let size: u32 = kani::any();
506        let delta: u32 = kani::any();
507        kani::assume(offset <= 1024);
508        kani::assume(size > 0 && size <= 64);
509        kani::assume(delta < size);
510
511        let key = Address::new([8u8; 32]);
512        let probe_offset = offset + delta;
513        let mut reg = SegmentBorrowRegistry::new();
514
515        assert!(reg.register_read(&key, offset, size).is_ok());
516        assert!(reg.register_read(&key, probe_offset, 1).is_ok());
517        assert_eq!(reg.len(), 2);
518    }
519
520    #[kani::proof]
521    fn fingerprint_collision_different_addresses_do_not_conflict() {
522        let key_a = Address::new([9u8; 32]);
523        let mut key_b_bytes = [9u8; 32];
524        key_b_bytes[8] = 10;
525        let key_b = Address::new(key_b_bytes);
526        let mut reg = SegmentBorrowRegistry::new();
527
528        assert_eq!(address_fingerprint(&key_a), address_fingerprint(&key_b));
529        assert_ne!(key_a.as_array(), key_b.as_array());
530        assert!(reg.register_write(&key_a, 0, 8).is_ok());
531        assert!(reg.register_write(&key_b, 0, 8).is_ok());
532        assert_eq!(reg.len(), 2);
533    }
534
535    #[kani::proof]
536    fn release_removes_exact_borrow_and_preserves_others() {
537        let key = Address::new([11u8; 32]);
538        let mut reg = SegmentBorrowRegistry::new();
539
540        let first = reg.register_leased_read(&key, 0, 8).unwrap();
541        let second = reg.register_leased_write(&key, 8, 8).unwrap();
542        assert!(reg.release(&first));
543
544        assert_eq!(reg.len(), 1);
545        assert!(reg.find_exact(&key, 0, 8, AccessKind::Read).is_none());
546        assert!(reg.find_exact(&key, 8, 8, AccessKind::Write).is_some());
547        assert!(reg.release(&second));
548        assert!(reg.is_empty());
549    }
550}
551
552// ── Tests ────────────────────────────────────────────────────────────
553
554#[cfg(test)]
555mod tests {
556    use super::*;
557    use crate::Address;
558
559    fn test_addr(seed: u8) -> Address {
560        Address::new([seed; 32])
561    }
562
563    #[test]
564    fn read_read_same_range_allowed() {
565        let mut reg = SegmentBorrowRegistry::new();
566        let key = test_addr(1);
567        assert!(reg.register_read(&key, 0, 8).is_ok());
568        assert!(reg.register_read(&key, 0, 8).is_ok());
569        assert_eq!(reg.len(), 2);
570    }
571
572    #[test]
573    fn read_write_same_range_rejected() {
574        let mut reg = SegmentBorrowRegistry::new();
575        let key = test_addr(1);
576        assert!(reg.register_read(&key, 0, 8).is_ok());
577        assert!(reg.register_write(&key, 0, 8).is_err());
578    }
579
580    #[test]
581    fn write_write_same_range_rejected() {
582        let mut reg = SegmentBorrowRegistry::new();
583        let key = test_addr(1);
584        assert!(reg.register_write(&key, 0, 8).is_ok());
585        assert!(reg.register_write(&key, 0, 8).is_err());
586    }
587
588    #[test]
589    fn write_read_same_range_rejected() {
590        let mut reg = SegmentBorrowRegistry::new();
591        let key = test_addr(1);
592        assert!(reg.register_write(&key, 0, 8).is_ok());
593        assert!(reg.register_read(&key, 0, 8).is_err());
594    }
595
596    #[test]
597    fn non_overlapping_write_write_allowed() {
598        let mut reg = SegmentBorrowRegistry::new();
599        let key = test_addr(1);
600        // balance: [0..8), metadata: [8..40)
601        assert!(reg.register_write(&key, 0, 8).is_ok());
602        assert!(reg.register_write(&key, 8, 32).is_ok());
603    }
604
605    #[test]
606    fn partially_overlapping_rejected() {
607        let mut reg = SegmentBorrowRegistry::new();
608        let key = test_addr(1);
609        // [0..16) and [8..24) overlap at [8..16)
610        assert!(reg.register_write(&key, 0, 16).is_ok());
611        assert!(reg.register_write(&key, 8, 16).is_err());
612    }
613
614    #[test]
615    fn different_accounts_always_allowed() {
616        let mut reg = SegmentBorrowRegistry::new();
617        assert!(reg.register_write(&test_addr(1), 0, 8).is_ok());
618        assert!(reg.register_write(&test_addr(2), 0, 8).is_ok());
619    }
620
621    #[test]
622    fn release_then_reacquire() {
623        let mut reg = SegmentBorrowRegistry::new();
624        let key = test_addr(1);
625        let borrow = SegmentBorrow {
626            key_fp: address_fingerprint(&key),
627            key,
628            offset: 0,
629            size: 8,
630            kind: AccessKind::Write,
631        };
632        assert!(reg.register(borrow).is_ok());
633        assert!(reg.register_write(&key, 0, 8).is_err()); // conflict
634        assert!(reg.release(&borrow));
635        assert!(reg.register_write(&key, 0, 8).is_ok()); // now OK
636    }
637
638    #[test]
639    fn capacity_limit() {
640        let mut reg = SegmentBorrowRegistry::new();
641        for i in 0..MAX_SEGMENT_BORROWS {
642            assert!(reg.register_read(&test_addr(1), i as u32 * 8, 8).is_ok());
643        }
644        // One more should fail.
645        assert!(reg.register_read(&test_addr(1), 256, 8).is_err());
646    }
647
648    #[test]
649    fn would_conflict_does_not_mutate() {
650        let mut reg = SegmentBorrowRegistry::new();
651        let key = test_addr(1);
652        assert!(reg.register_write(&key, 0, 8).is_ok());
653        let proposed = SegmentBorrow {
654            key_fp: address_fingerprint(&key),
655            key,
656            offset: 0,
657            size: 8,
658            kind: AccessKind::Write,
659        };
660        assert!(reg.would_conflict(&proposed));
661        assert_eq!(reg.len(), 1); // unchanged
662    }
663
664    #[test]
665    fn adjacent_ranges_no_conflict() {
666        let mut reg = SegmentBorrowRegistry::new();
667        let key = test_addr(1);
668        // [0..8) and [8..16) are adjacent, not overlapping.
669        assert!(reg.register_write(&key, 0, 8).is_ok());
670        assert!(reg.register_write(&key, 8, 8).is_ok());
671    }
672
673    // ── SegmentBorrowGuard RAII tests ────────────────────────────────
674    //
675    // The guard holds `&mut SegmentBorrowRegistry`, which provides
676    // compile-time exclusion: the borrow checker prevents any registry
677    // access while a guard is alive, giving *stronger* protection than
678    // runtime conflict checks alone.  Tests verify the auto-release
679    // behavior by inspecting the registry after the guard drops.
680
681    #[test]
682    fn guard_auto_releases_write_on_drop() {
683        let mut reg = SegmentBorrowRegistry::new();
684        let key = test_addr(1);
685        {
686            let _guard = reg.register_guard_write(&key, 0, 8).unwrap();
687            // guard alive, registry exclusively borrowed at compile time
688        }
689        // After drop: slot freed, len back to 0.
690        assert_eq!(reg.len(), 0);
691        // Re-acquire the same range, proves release happened.
692        assert!(reg.register_write(&key, 0, 8).is_ok());
693    }
694
695    #[test]
696    fn guard_auto_releases_read_on_drop() {
697        let mut reg = SegmentBorrowRegistry::new();
698        let key = test_addr(1);
699        {
700            let _guard = reg.register_guard_read(&key, 0, 8).unwrap();
701        }
702        assert_eq!(reg.len(), 0);
703        // Write now succeeds, the read borrow was released.
704        assert!(reg.register_write(&key, 0, 8).is_ok());
705    }
706
707    #[test]
708    fn sequential_guards_reuse_slot() {
709        let mut reg = SegmentBorrowRegistry::new();
710        let key = test_addr(1);
711        for _ in 0..4 {
712            let _guard = reg.register_guard_write(&key, 0, 8).unwrap();
713            // each iteration: acquire, drop at end of loop body
714        }
715        assert_eq!(reg.len(), 0);
716    }
717
718    #[test]
719    fn guard_accessors() {
720        let mut reg = SegmentBorrowRegistry::new();
721        let key = test_addr(1);
722        let guard = reg.register_guard_write(&key, 16, 32).unwrap();
723        assert_eq!(guard.kind(), AccessKind::Write);
724        assert_eq!(guard.offset(), 16);
725        assert_eq!(guard.size(), 32);
726    }
727
728    #[test]
729    fn guard_then_manual_register_ok() {
730        let mut reg = SegmentBorrowRegistry::new();
731        let key = test_addr(1);
732        {
733            let _guard = reg.register_guard_write(&key, 0, 8).unwrap();
734        }
735        // Guard released, manual register on overlapping range works.
736        assert!(reg.register_read(&key, 0, 8).is_ok());
737        assert_eq!(reg.len(), 1);
738    }
739}