Skip to main content

hopper_runtime/
segment_borrow.rs

1//! Segment-level borrow registry for fine-grained access control.
2//!
3//! The account-level [`BorrowRegistry`](crate::borrow_registry) prevents
4//! aliasing across entire accounts. This module adds **segment-level**
5//! conflict detection: two borrows of the *same* account are allowed when
6//! their byte ranges don't overlap, or when both are read-only.
7//!
8//! ## Conflict Rules
9//!
10//! | Existing | New   | Overlapping? | Allowed |
11//! |----------|-------|--------------|---------|
12//! | Read     | Read  | yes          | ✅       |
13//! | Read     | Write | yes          | ❌       |
14//! | Write    | Read  | yes          | ❌       |
15//! | Write    | Write | yes          | ❌       |
16//! | *any*    | *any* | no           | ✅       |
17//!
18//! ## Zero-Cost Design
19//!
20//! - Fixed-capacity array (no heap)
21//! - Inline conflict checks
22//! - Deterministic iteration (bounded loop)
23
24use crate::address::Address;
25use crate::error::ProgramError;
26
27/// Maximum simultaneous segment borrows per instruction.
28///
29/// 16 covers any realistic instruction, most use 2-6 segments.
30/// Keeping it fixed avoids heap allocation while staying well within
31/// Solana's CU budget.  The compact entry representation keeps the
32/// total stack footprint under 200 bytes.
33pub const MAX_SEGMENT_BORROWS: usize = 16;
34
35/// Read or write access intent for a segment borrow.
36#[derive(Clone, Copy, PartialEq, Eq, Debug)]
37#[repr(u8)]
38pub enum AccessKind {
39    /// Shared (immutable) access.
40    Read = 0,
41    /// Exclusive (mutable) access.
42    Write = 1,
43}
44
45/// First-8-byte prefix of an account address, used as a fast-path
46/// comparator in the conflict scan.
47///
48/// The **audit-correct** model is fingerprint-then-verify: a hot-path
49/// `u64` compare rejects unrelated accounts immediately; the slow-path
50/// 32-byte compare fires only when the prefixes match. Because a
51/// full-address compare always follows, fingerprint collisions produce
52/// **no** false conflicts, they only cost one extra 32-byte compare
53/// for the extremely rare collision pair.
54#[inline(always)]
55fn address_fingerprint(address: &Address) -> u64 {
56    let bytes = address.as_array();
57    u64::from_le_bytes([
58        bytes[0], bytes[1], bytes[2], bytes[3],
59        bytes[4], bytes[5], bytes[6], bytes[7],
60    ])
61}
62
63/// Full-identity equality check on the slow path.
64#[inline(always)]
65fn address_eq(a: &Address, b: &Address) -> bool {
66    a.as_array() == b.as_array()
67}
68
69/// A single active segment borrow.
70///
71/// Carries both a fast `u64` fingerprint and the full 32-byte account
72/// address. The fingerprint is the hot-path comparator; the full
73/// address resolves collisions so conflict detection is never
74/// probabilistic.
75#[derive(Clone, Copy, Debug)]
76pub struct SegmentBorrow {
77    /// Fast-path prefix of the account address.
78    pub key_fp: u64,
79    /// Full account address, authoritative identity, checked whenever
80    /// the fast-path fingerprint matches. Pre-audit we relied on the
81    /// fingerprint alone and claimed it was "collision-free for any
82    /// realistic instruction"; that was probabilistic, not a guarantee.
83    pub key: Address,
84    /// Byte offset within the account data.
85    pub offset: u32,
86    /// Byte size of the borrowed segment.
87    pub size: u32,
88    /// Access kind (read or write).
89    pub kind: AccessKind,
90}
91
92/// Check whether two byte ranges overlap.
93#[inline(always)]
94const fn ranges_overlap(a_off: u32, a_size: u32, b_off: u32, b_size: u32) -> bool {
95    let a_end = a_off + a_size;
96    let b_end = b_off + b_size;
97    // Non-overlapping iff one ends before the other starts.
98    !(a_end <= b_off || b_end <= a_off)
99}
100
101/// Instruction-scoped segment borrow registry.
102///
103/// Tracks active segment borrows and enforces conflict rules. Designed
104/// for inline use in an execution context, no heap, no dynamic dispatch.
105///
106/// Uses compact 8-byte address fingerprints and a flat array of
107/// fixed-size entries.  Total stack footprint: ~280 bytes (vs ~1.3 KB
108/// with full 32-byte addresses and Option wrappers).
109///
110/// # Example
111///
112/// ```ignore
113/// let mut borrows = SegmentBorrowRegistry::new();
114/// borrows.register_read(&vault_key, 0, 8)?;   // read balance
115/// borrows.register_write(&vault_key, 8, 32)?;  // write metadata, OK, non-overlapping
116/// borrows.register_write(&vault_key, 0, 8)?;   // REJECTED, overlaps read
117/// ```
118pub struct SegmentBorrowRegistry {
119    entries: [SegmentBorrow; MAX_SEGMENT_BORROWS],
120    len: u8,
121}
122
123impl SegmentBorrowRegistry {
124    /// Create an empty registry.
125    #[inline(always)]
126    pub const fn new() -> Self {
127        const EMPTY: SegmentBorrow = SegmentBorrow {
128            key_fp: 0,
129            key: Address::new([0u8; 32]),
130            offset: 0,
131            size: 0,
132            kind: AccessKind::Read,
133        };
134        Self {
135            entries: [EMPTY; MAX_SEGMENT_BORROWS],
136            len: 0,
137        }
138    }
139
140    /// Number of active borrows.
141    #[inline(always)]
142    pub const fn len(&self) -> usize {
143        self.len as usize
144    }
145
146    /// Whether the registry is empty.
147    #[inline(always)]
148    pub const fn is_empty(&self) -> bool {
149        self.len == 0
150    }
151
152    /// Register a new read borrow and return the `SegmentBorrow`
153    /// record the caller can hand to `SegmentLease::new` for RAII
154    /// release. This is the plumbing that makes
155    /// [`crate::segment_lease::SegRef`] possible.
156    #[inline(always)]
157    pub fn register_leased_read(
158        &mut self,
159        key: &Address,
160        offset: u32,
161        size: u32,
162    ) -> Result<SegmentBorrow, ProgramError> {
163        let borrow = SegmentBorrow {
164            key_fp: address_fingerprint(key),
165            key: *key,
166            offset,
167            size,
168            kind: AccessKind::Read,
169        };
170        self.register(borrow)?;
171        Ok(borrow)
172    }
173
174    /// Mutable counterpart of [`register_leased_read`].
175    #[inline(always)]
176    pub fn register_leased_write(
177        &mut self,
178        key: &Address,
179        offset: u32,
180        size: u32,
181    ) -> Result<SegmentBorrow, ProgramError> {
182        let borrow = SegmentBorrow {
183            key_fp: address_fingerprint(key),
184            key: *key,
185            offset,
186            size,
187            kind: AccessKind::Write,
188        };
189        self.register(borrow)?;
190        Ok(borrow)
191    }
192
193    /// Register a new segment borrow, checking for conflicts.
194    ///
195    /// Returns `Err(AccountBorrowFailed)` if the new borrow overlaps an
196    /// existing borrow with incompatible access (read+write or write+write)
197    /// on the **same** account (full-address identity, not fingerprint).
198    #[inline(always)]
199    pub fn register(&mut self, new: SegmentBorrow) -> Result<(), ProgramError> {
200        let len = self.len as usize;
201        if len >= MAX_SEGMENT_BORROWS {
202            return Err(ProgramError::AccountBorrowFailed);
203        }
204
205        // Check conflicts against all active borrows. Fast path on the
206        // 8-byte fingerprint; slow path confirms with the full 32-byte
207        // address so fingerprint collisions cannot manufacture false
208        // conflicts between unrelated accounts.
209        let mut i = 0;
210        while i < len {
211            let existing = &self.entries[i];
212            if existing.key_fp == new.key_fp
213                && address_eq(&existing.key, &new.key)
214                && ranges_overlap(existing.offset, existing.size, new.offset, new.size)
215            {
216                match (existing.kind, new.kind) {
217                    (AccessKind::Read, AccessKind::Read) => {}
218                    _ => return Err(ProgramError::AccountBorrowFailed),
219                }
220            }
221            i += 1;
222        }
223
224        self.entries[len] = new;
225        self.len = (len + 1) as u8;
226        Ok(())
227    }
228
229    /// Convenience: register a read borrow for the given account region.
230    #[inline(always)]
231    pub fn register_read(
232        &mut self,
233        key: &Address,
234        offset: u32,
235        size: u32,
236    ) -> Result<(), ProgramError> {
237        self.register(SegmentBorrow {
238            key_fp: address_fingerprint(key),
239            key: *key,
240            offset,
241            size,
242            kind: AccessKind::Read,
243        })
244    }
245
246    /// Convenience: register a write borrow for the given account region.
247    #[inline(always)]
248    pub fn register_write(
249        &mut self,
250        key: &Address,
251        offset: u32,
252        size: u32,
253    ) -> Result<(), ProgramError> {
254        self.register(SegmentBorrow {
255            key_fp: address_fingerprint(key),
256            key: *key,
257            offset,
258            size,
259            kind: AccessKind::Write,
260        })
261    }
262
263    /// Release a previously registered borrow.
264    ///
265    /// Finds the first matching entry and removes it, compacting the array.
266    /// Identity is full-address (not fingerprint) to stay collision-safe.
267    #[inline(always)]
268    pub fn release(&mut self, borrow: &SegmentBorrow) -> bool {
269        let len = self.len as usize;
270        let mut i = 0;
271        while i < len {
272            let existing = &self.entries[i];
273            if existing.key_fp == borrow.key_fp
274                && address_eq(&existing.key, &borrow.key)
275                && existing.offset == borrow.offset
276                && existing.size == borrow.size
277                && existing.kind == borrow.kind
278            {
279                // Swap-remove: move last entry into this slot.
280                let new_len = len - 1;
281                self.len = new_len as u8;
282                if i < new_len {
283                    self.entries[i] = self.entries[new_len];
284                }
285                return true;
286            }
287            i += 1;
288        }
289        false
290    }
291
292    /// Reset the registry, clearing all active borrows.
293    #[inline(always)]
294    pub fn clear(&mut self) {
295        self.len = 0;
296    }
297
298    /// Check if a proposed borrow would conflict, without registering it.
299    ///
300    /// Uses full-address identity, fingerprint collisions do not
301    /// produce false positives.
302    #[inline(always)]
303    pub fn would_conflict(&self, proposed: &SegmentBorrow) -> bool {
304        let len = self.len as usize;
305        let mut i = 0;
306        while i < len {
307            let existing = &self.entries[i];
308            if existing.key_fp == proposed.key_fp
309                && address_eq(&existing.key, &proposed.key)
310                && ranges_overlap(existing.offset, existing.size, proposed.offset, proposed.size)
311            {
312                match (existing.kind, proposed.kind) {
313                    (AccessKind::Read, AccessKind::Read) => {}
314                    _ => return true,
315                }
316            }
317            i += 1;
318        }
319        false
320    }
321
322    /// Register a borrow and return an RAII guard that auto-releases it on drop.
323    ///
324    /// This is the preferred way to acquire segment borrows, the guard
325    /// ensures the borrow is released even if the caller returns early
326    /// via `?` or encounters an error.
327    ///
328    /// # Example
329    ///
330    /// ```ignore
331    /// {
332    ///     let _guard = borrows.register_guard_write(&key, 0, 8)?;
333    ///     // ... write to segment ...
334    /// } // guard dropped → borrow released
335    /// ```
336    #[inline(always)]
337    pub fn register_guard(
338        &mut self,
339        borrow: SegmentBorrow,
340    ) -> Result<SegmentBorrowGuard<'_>, ProgramError> {
341        self.register(borrow)?;
342        Ok(SegmentBorrowGuard {
343            registry: self,
344            borrow,
345        })
346    }
347
348    /// Register a read borrow with RAII auto-release.
349    #[inline(always)]
350    pub fn register_guard_read(
351        &mut self,
352        key: &Address,
353        offset: u32,
354        size: u32,
355    ) -> Result<SegmentBorrowGuard<'_>, ProgramError> {
356        let borrow = SegmentBorrow {
357            key_fp: address_fingerprint(key),
358            key: *key,
359            offset,
360            size,
361            kind: AccessKind::Read,
362        };
363        self.register_guard(borrow)
364    }
365
366    /// Register a write borrow with RAII auto-release.
367    #[inline(always)]
368    pub fn register_guard_write(
369        &mut self,
370        key: &Address,
371        offset: u32,
372        size: u32,
373    ) -> Result<SegmentBorrowGuard<'_>, ProgramError> {
374        let borrow = SegmentBorrow {
375            key_fp: address_fingerprint(key),
376            key: *key,
377            offset,
378            size,
379            kind: AccessKind::Write,
380        };
381        self.register_guard(borrow)
382    }
383
384    /// Visit each active borrow in registration order.
385    ///
386    /// Intended for diagnostics and for the `hopper explain`
387    /// introspection path, never for hot-path decisions.
388    #[inline]
389    pub fn for_each<F: FnMut(&SegmentBorrow)>(&self, mut f: F) {
390        let len = self.len as usize;
391        let mut i = 0;
392        while i < len {
393            f(&self.entries[i]);
394            i += 1;
395        }
396    }
397
398    /// Look up an active borrow by exact `(key, offset, size, kind)`.
399    #[inline]
400    pub fn find_exact(
401        &self,
402        key: &Address,
403        offset: u32,
404        size: u32,
405        kind: AccessKind,
406    ) -> Option<&SegmentBorrow> {
407        let fp = address_fingerprint(key);
408        let len = self.len as usize;
409        let mut i = 0;
410        while i < len {
411            let e = &self.entries[i];
412            if e.key_fp == fp
413                && address_eq(&e.key, key)
414                && e.offset == offset
415                && e.size == size
416                && e.kind as u8 == kind as u8
417            {
418                return Some(e);
419            }
420            i += 1;
421        }
422        None
423    }
424}
425
426/// RAII guard that releases a segment borrow when dropped.
427///
428/// Created by [`SegmentBorrowRegistry::register_guard()`] and its
429/// convenience wrappers. The borrow is automatically released from the
430/// registry on drop, preventing borrow leaks.
431pub struct SegmentBorrowGuard<'a> {
432    registry: &'a mut SegmentBorrowRegistry,
433    borrow: SegmentBorrow,
434}
435
436impl<'a> SegmentBorrowGuard<'a> {
437    /// Access kind of the guarded borrow.
438    #[inline(always)]
439    pub fn kind(&self) -> AccessKind {
440        self.borrow.kind
441    }
442
443    /// Byte offset of the guarded segment.
444    #[inline(always)]
445    pub fn offset(&self) -> u32 {
446        self.borrow.offset
447    }
448
449    /// Byte size of the guarded segment.
450    #[inline(always)]
451    pub fn size(&self) -> u32 {
452        self.borrow.size
453    }
454}
455
456impl<'a> Drop for SegmentBorrowGuard<'a> {
457    fn drop(&mut self) {
458        self.registry.release(&self.borrow);
459    }
460}
461
462// ── Tests ────────────────────────────────────────────────────────────
463
464#[cfg(test)]
465mod tests {
466    use super::*;
467    use crate::Address;
468
469    fn test_addr(seed: u8) -> Address {
470        Address::new([seed; 32])
471    }
472
473    #[test]
474    fn read_read_same_range_allowed() {
475        let mut reg = SegmentBorrowRegistry::new();
476        let key = test_addr(1);
477        assert!(reg.register_read(&key, 0, 8).is_ok());
478        assert!(reg.register_read(&key, 0, 8).is_ok());
479        assert_eq!(reg.len(), 2);
480    }
481
482    #[test]
483    fn read_write_same_range_rejected() {
484        let mut reg = SegmentBorrowRegistry::new();
485        let key = test_addr(1);
486        assert!(reg.register_read(&key, 0, 8).is_ok());
487        assert!(reg.register_write(&key, 0, 8).is_err());
488    }
489
490    #[test]
491    fn write_write_same_range_rejected() {
492        let mut reg = SegmentBorrowRegistry::new();
493        let key = test_addr(1);
494        assert!(reg.register_write(&key, 0, 8).is_ok());
495        assert!(reg.register_write(&key, 0, 8).is_err());
496    }
497
498    #[test]
499    fn write_read_same_range_rejected() {
500        let mut reg = SegmentBorrowRegistry::new();
501        let key = test_addr(1);
502        assert!(reg.register_write(&key, 0, 8).is_ok());
503        assert!(reg.register_read(&key, 0, 8).is_err());
504    }
505
506    #[test]
507    fn non_overlapping_write_write_allowed() {
508        let mut reg = SegmentBorrowRegistry::new();
509        let key = test_addr(1);
510        // balance: [0..8), metadata: [8..40)
511        assert!(reg.register_write(&key, 0, 8).is_ok());
512        assert!(reg.register_write(&key, 8, 32).is_ok());
513    }
514
515    #[test]
516    fn partially_overlapping_rejected() {
517        let mut reg = SegmentBorrowRegistry::new();
518        let key = test_addr(1);
519        // [0..16) and [8..24) overlap at [8..16)
520        assert!(reg.register_write(&key, 0, 16).is_ok());
521        assert!(reg.register_write(&key, 8, 16).is_err());
522    }
523
524    #[test]
525    fn different_accounts_always_allowed() {
526        let mut reg = SegmentBorrowRegistry::new();
527        assert!(reg.register_write(&test_addr(1), 0, 8).is_ok());
528        assert!(reg.register_write(&test_addr(2), 0, 8).is_ok());
529    }
530
531    #[test]
532    fn release_then_reacquire() {
533        let mut reg = SegmentBorrowRegistry::new();
534        let key = test_addr(1);
535        let borrow = SegmentBorrow {
536            key_fp: address_fingerprint(&key),
537            key,
538            offset: 0,
539            size: 8,
540            kind: AccessKind::Write,
541        };
542        assert!(reg.register(borrow).is_ok());
543        assert!(reg.register_write(&key, 0, 8).is_err()); // conflict
544        assert!(reg.release(&borrow));
545        assert!(reg.register_write(&key, 0, 8).is_ok()); // now OK
546    }
547
548    #[test]
549    fn capacity_limit() {
550        let mut reg = SegmentBorrowRegistry::new();
551        for i in 0..MAX_SEGMENT_BORROWS {
552            assert!(reg.register_read(&test_addr(1), i as u32 * 8, 8).is_ok());
553        }
554        // One more should fail.
555        assert!(reg.register_read(&test_addr(1), 256, 8).is_err());
556    }
557
558    #[test]
559    fn would_conflict_does_not_mutate() {
560        let mut reg = SegmentBorrowRegistry::new();
561        let key = test_addr(1);
562        assert!(reg.register_write(&key, 0, 8).is_ok());
563        let proposed = SegmentBorrow {
564            key_fp: address_fingerprint(&key),
565            key,
566            offset: 0,
567            size: 8,
568            kind: AccessKind::Write,
569        };
570        assert!(reg.would_conflict(&proposed));
571        assert_eq!(reg.len(), 1); // unchanged
572    }
573
574    #[test]
575    fn adjacent_ranges_no_conflict() {
576        let mut reg = SegmentBorrowRegistry::new();
577        let key = test_addr(1);
578        // [0..8) and [8..16) are adjacent, not overlapping.
579        assert!(reg.register_write(&key, 0, 8).is_ok());
580        assert!(reg.register_write(&key, 8, 8).is_ok());
581    }
582
583    // ── SegmentBorrowGuard RAII tests ────────────────────────────────
584    //
585    // The guard holds `&mut SegmentBorrowRegistry`, which provides
586    // compile-time exclusion: the borrow checker prevents any registry
587    // access while a guard is alive, giving *stronger* protection than
588    // runtime conflict checks alone.  Tests verify the auto-release
589    // behavior by inspecting the registry after the guard drops.
590
591    #[test]
592    fn guard_auto_releases_write_on_drop() {
593        let mut reg = SegmentBorrowRegistry::new();
594        let key = test_addr(1);
595        {
596            let _guard = reg.register_guard_write(&key, 0, 8).unwrap();
597            // guard alive, registry exclusively borrowed at compile time
598        }
599        // After drop: slot freed, len back to 0.
600        assert_eq!(reg.len(), 0);
601        // Re-acquire the same range, proves release happened.
602        assert!(reg.register_write(&key, 0, 8).is_ok());
603    }
604
605    #[test]
606    fn guard_auto_releases_read_on_drop() {
607        let mut reg = SegmentBorrowRegistry::new();
608        let key = test_addr(1);
609        {
610            let _guard = reg.register_guard_read(&key, 0, 8).unwrap();
611        }
612        assert_eq!(reg.len(), 0);
613        // Write now succeeds, the read borrow was released.
614        assert!(reg.register_write(&key, 0, 8).is_ok());
615    }
616
617    #[test]
618    fn sequential_guards_reuse_slot() {
619        let mut reg = SegmentBorrowRegistry::new();
620        let key = test_addr(1);
621        for _ in 0..4 {
622            let _guard = reg.register_guard_write(&key, 0, 8).unwrap();
623            // each iteration: acquire, drop at end of loop body
624        }
625        assert_eq!(reg.len(), 0);
626    }
627
628    #[test]
629    fn guard_accessors() {
630        let mut reg = SegmentBorrowRegistry::new();
631        let key = test_addr(1);
632        let guard = reg.register_guard_write(&key, 16, 32).unwrap();
633        assert_eq!(guard.kind(), AccessKind::Write);
634        assert_eq!(guard.offset(), 16);
635        assert_eq!(guard.size(), 32);
636    }
637
638    #[test]
639    fn guard_then_manual_register_ok() {
640        let mut reg = SegmentBorrowRegistry::new();
641        let key = test_addr(1);
642        {
643            let _guard = reg.register_guard_write(&key, 0, 8).unwrap();
644        }
645        // Guard released, manual register on overlapping range works.
646        assert!(reg.register_read(&key, 0, 8).is_ok());
647        assert_eq!(reg.len(), 1);
648    }
649}