bonsai-index 1.0.1

A zero-dependency, embeddable, self-tuning spatial index for Rust
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
//! Lock-free index router.
//!
//! [`IndexRouter`] holds an [`AtomicPtr`] to the currently active backend and
//! an optional shadow backend used during migration. The atomic pointer swap
//! during migration is a single SeqCst pointer-width operation, so no reader
//! is blocked for more than one cache-line load. Concurrent inserts and
//! queries are serialised by a `RwLock` inside each `BackendBox`.
//!
//! All unsafe pointer manipulation is confined to this module; every unsafe
//! block carries a `// SAFETY:` comment.

use std::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
use std::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};

use crate::backends::SpatialBackend;
use crate::types::{BBox, CoordType, EntryId, Point};

/// Heap-allocated wrapper around a `Box<dyn SpatialBackend<T, C, D>>` with an
/// internal `RwLock` so that concurrent reads and serialised writes are safe.
pub(crate) struct BackendBox<T, C, const D: usize>
where
    C: CoordType,
{
    pub inner: RwLock<Box<dyn SpatialBackend<T, C, D>>>,
}

impl<T, C, const D: usize> BackendBox<T, C, D>
where
    C: CoordType,
    T: Send + Sync + 'static,
{
    fn new(backend: Box<dyn SpatialBackend<T, C, D>>) -> *mut Self {
        Box::into_raw(Box::new(Self {
            inner: RwLock::new(backend),
        }))
    }

    pub(crate) fn read(&self) -> RwLockReadGuard<'_, Box<dyn SpatialBackend<T, C, D>>> {
        self.inner.read().expect("backend RwLock poisoned")
    }

    pub(crate) fn write(&self) -> RwLockWriteGuard<'_, Box<dyn SpatialBackend<T, C, D>>> {
        self.inner.write().expect("backend RwLock poisoned")
    }
}

/// Routes insert, remove, and query operations to the active backend.
pub struct IndexRouter<T, C, const D: usize>
where
    C: CoordType,
    T: Send + Sync + 'static,
{
    active: AtomicPtr<BackendBox<T, C, D>>,
    shadow: AtomicPtr<BackendBox<T, C, D>>,
    migrating: AtomicBool,
}

impl<T, C, const D: usize> IndexRouter<T, C, D>
where
    C: CoordType,
    T: Clone + Send + Sync + 'static,
{
    /// Create a new `IndexRouter` backed by `backend`.
    pub fn new(backend: Box<dyn SpatialBackend<T, C, D>>) -> Self {
        Self {
            active: AtomicPtr::new(BackendBox::new(backend)),
            shadow: AtomicPtr::new(std::ptr::null_mut()),
            migrating: AtomicBool::new(false),
        }
    }

    /// Insert a point into the active backend (and shadow during migration).
    pub fn insert(&self, point: Point<C, D>, payload: T) -> EntryId {
        // SAFETY: `active` is always a valid, non-null pointer to a
        // `BackendBox` allocated by `BackendBox::new`. We load it with SeqCst
        // ordering and take a shared reference for the duration of this call.
        // The pointer is only replaced by `commit_migration` via a SeqCst
        // swap; the old allocation is dropped only after the swap, so any
        // thread that loaded the old pointer before the swap will finish its
        // operation (guarded by the RwLock inside BackendBox) before the drop.
        let active = unsafe { &*self.active.load(Ordering::SeqCst) };
        let id = active.write().insert(point, payload.clone());

        if self.migrating.load(Ordering::SeqCst) {
            let shadow_ptr = self.shadow.load(Ordering::SeqCst);
            if !shadow_ptr.is_null() {
                // SAFETY: `shadow` is non-null only while `migrating` is true,
                // and it is stored before `migrating` is set to true (both
                // with SeqCst ordering), so any thread that observes
                // `migrating == true` is guaranteed to see a valid `shadow`.
                let shadow = unsafe { &*shadow_ptr };
                shadow.write().insert(point, payload);
            }
        }

        id
    }

    /// Remove an entry from the active backend (and shadow during migration).
    pub fn remove(&self, id: EntryId) -> Option<T> {
        // SAFETY: same invariant as in `insert`.
        let active = unsafe { &*self.active.load(Ordering::SeqCst) };
        let result = active.write().remove(id);

        if self.migrating.load(Ordering::SeqCst) {
            let shadow_ptr = self.shadow.load(Ordering::SeqCst);
            if !shadow_ptr.is_null() {
                // SAFETY: same invariant as `shadow` in `insert`.
                let shadow = unsafe { &*shadow_ptr };
                shadow.write().remove(id);
            }
        }

        result
    }

    /// Run a range query against the active backend.
    pub fn range_query(&self, bbox: &BBox<C, D>) -> Vec<(EntryId, T)>
    where
        T: Clone,
    {
        // SAFETY: same invariant as in `insert`.
        let active = unsafe { &*self.active.load(Ordering::SeqCst) };
        active
            .read()
            .range_query(bbox)
            .into_iter()
            .map(|(id, t)| (id, t.clone()))
            .collect()
    }

    /// Return the number of entries in the active backend.
    pub fn len(&self) -> usize {
        // SAFETY: same invariant as in `insert`.
        let active = unsafe { &*self.active.load(Ordering::SeqCst) };
        active.read().len()
    }

    /// Return `true` if the active backend is empty.
    pub fn is_empty(&self) -> bool {
        // SAFETY: same invariant as in `insert`.
        let active = unsafe { &*self.active.load(Ordering::SeqCst) };
        active.read().is_empty()
    }

    /// Return `true` if a migration is currently in progress.
    pub fn is_migrating(&self) -> bool {
        self.migrating.load(Ordering::SeqCst)
    }

    /// Return the raw pointer to the active [`BackendBox`].
    ///
    /// # Safety
    ///
    /// The returned pointer is valid as long as the `IndexRouter` is alive and
    /// no concurrent `commit_migration` call has completed. Callers must not
    /// store the pointer beyond the lifetime of the `IndexRouter`.
    pub(crate) fn active_ptr(&self) -> *mut BackendBox<T, C, D> {
        self.active.load(Ordering::SeqCst)
    }

    /// Install a shadow backend and set the migrating flag.
    ///
    /// Returns `false` if a migration is already in progress.
    pub fn begin_migration(&self, shadow: Box<dyn SpatialBackend<T, C, D>>) -> bool {
        if self
            .migrating
            .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
            .is_err()
        {
            return false;
        }
        let ptr = BackendBox::new(shadow);
        self.shadow.store(ptr, Ordering::SeqCst);
        true
    }

    /// Atomically swap the shadow backend into the active slot and drop the
    /// old active backend.
    ///
    /// Panics if no migration is in progress.
    pub fn commit_migration(&self) {
        assert!(
            self.migrating.load(Ordering::SeqCst),
            "commit_migration called without an active migration"
        );

        let shadow_ptr = self.shadow.swap(std::ptr::null_mut(), Ordering::SeqCst);
        assert!(
            !shadow_ptr.is_null(),
            "shadow pointer is null during commit"
        );

        // SAFETY: We atomically replace `active` with `shadow_ptr` using
        // SeqCst ordering. The old pointer is then dropped via `Box::from_raw`;
        // no other thread can observe the old pointer after this store because
        // all readers load `active` with SeqCst ordering, which is sequenced
        // after this store. Any thread that loaded the old pointer before the
        // swap will have already finished its operation (the RwLock inside
        // BackendBox ensures the write guard is released before we drop).
        let old_ptr = self.active.swap(shadow_ptr, Ordering::SeqCst);

        self.migrating.store(false, Ordering::SeqCst);

        if !old_ptr.is_null() {
            // SAFETY: `old_ptr` was produced by `BackendBox::new` (i.e.
            // `Box::into_raw`) and is no longer reachable from `active` after
            // the swap above. We are the sole owner and may drop it.
            drop(unsafe { Box::from_raw(old_ptr) });
        }
    }
}

impl<T, C, const D: usize> Drop for IndexRouter<T, C, D>
where
    C: CoordType,
    T: Send + Sync + 'static,
{
    fn drop(&mut self) {
        let active_ptr = *self.active.get_mut();
        if !active_ptr.is_null() {
            // SAFETY: `active_ptr` was produced by `BackendBox::new` and we
            // are the sole owner at drop time (no other references exist).
            drop(unsafe { Box::from_raw(active_ptr) });
        }

        let shadow_ptr = *self.shadow.get_mut();
        if !shadow_ptr.is_null() {
            // SAFETY: same invariant as `active_ptr` above.
            drop(unsafe { Box::from_raw(shadow_ptr) });
        }
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::backends::KDTree;
    use crate::types::Point;

    fn make_router() -> IndexRouter<usize, f64, 2> {
        IndexRouter::new(Box::new(KDTree::<usize, f64, 2>::new()))
    }

    #[test]
    fn insert_and_len() {
        let router = make_router();
        router.insert(Point::new([1.0, 2.0]), 42);
        router.insert(Point::new([3.0, 4.0]), 99);
        assert_eq!(router.len(), 2);
    }

    #[test]
    fn remove_returns_payload() {
        let router = make_router();
        let id = router.insert(Point::new([1.0, 2.0]), 7usize);
        let payload = router.remove(id);
        assert_eq!(payload, Some(7));
        assert_eq!(router.len(), 0);
    }

    #[test]
    fn range_query_returns_inserted_points() {
        let router = make_router();
        router.insert(Point::new([1.0, 1.0]), 1usize);
        router.insert(Point::new([5.0, 5.0]), 2usize);
        let bbox = BBox::new(Point::new([0.0, 0.0]), Point::new([3.0, 3.0]));
        let results = router.range_query(&bbox);
        assert_eq!(results.len(), 1);
        assert_eq!(results[0].1, 1);
    }

    #[test]
    fn migration_swap_preserves_entries() {
        let router = make_router();
        for i in 0..10usize {
            router.insert(Point::new([i as f64, i as f64]), i);
        }
        assert_eq!(router.len(), 10);

        let new_backend = Box::new(KDTree::<usize, f64, 2>::new());
        assert!(router.begin_migration(new_backend));
        assert!(router.is_migrating());

        for i in 10..15usize {
            router.insert(Point::new([i as f64, i as f64]), i);
        }

        router.commit_migration();
        assert!(!router.is_migrating());
        assert_eq!(router.len(), 5);
    }

    #[test]
    fn begin_migration_rejects_concurrent() {
        let router = make_router();
        let b1 = Box::new(KDTree::<usize, f64, 2>::new());
        let b2 = Box::new(KDTree::<usize, f64, 2>::new());
        assert!(router.begin_migration(b1));
        assert!(!router.begin_migration(b2));
        router.commit_migration();
    }

    #[test]
    fn concurrent_inserts_and_queries() {
        use std::sync::Arc;
        use std::thread;

        let router = Arc::new(make_router());
        let mut handles = Vec::new();

        for t in 0..4u64 {
            let r = Arc::clone(&router);
            handles.push(thread::spawn(move || {
                for i in 0..100usize {
                    r.insert(Point::new([t as f64 * 100.0 + i as f64, 0.0]), i);
                }
            }));
        }

        for _ in 0..2u64 {
            let r = Arc::clone(&router);
            handles.push(thread::spawn(move || {
                let bbox = BBox::new(Point::new([0.0, 0.0]), Point::new([1000.0, 1000.0]));
                for _ in 0..50 {
                    let _ = r.range_query(&bbox);
                }
            }));
        }

        for h in handles {
            h.join().expect("thread panicked");
        }

        assert_eq!(router.len(), 400);
    }
}

#[cfg(all(loom, test))]
mod loom_tests {
    use loom::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
    use loom::sync::Arc;
    use loom::thread;

    // A minimal router re-implemented with loom atomics for model checking.
    // The production IndexRouter uses std atomics; this variant instruments
    // the same atomic pointer pattern so loom can explore all interleavings.
    struct LoomRouter {
        active: AtomicPtr<LoomBackendBox>,
        migrating: AtomicBool,
    }

    struct LoomBackendBox {
        data: std::sync::Mutex<Vec<([f64; 2], usize)>>,
    }

    impl LoomBackendBox {
        fn new() -> *mut Self {
            Box::into_raw(Box::new(Self {
                data: std::sync::Mutex::new(Vec::new()),
            }))
        }

        fn insert(&self, x: f64, y: f64, val: usize) {
            self.data.lock().unwrap().push(([x, y], val));
        }

        fn query(&self) -> usize {
            self.data.lock().unwrap().len()
        }
    }

    impl LoomRouter {
        fn new() -> Self {
            Self {
                active: AtomicPtr::new(LoomBackendBox::new()),
                migrating: AtomicBool::new(false),
            }
        }

        fn insert(&self, x: f64, y: f64, val: usize) {
            // SAFETY: `active` is always a valid non-null pointer produced by
            // `LoomBackendBox::new`. Under loom, all atomic loads are
            // instrumented to explore all valid interleavings.
            let ptr = self.active.load(Ordering::SeqCst);
            unsafe { &*ptr }.insert(x, y, val);
        }

        fn query(&self) -> usize {
            // SAFETY: same invariant as `insert`.
            let ptr = self.active.load(Ordering::SeqCst);
            unsafe { &*ptr }.query()
        }
    }

    impl Drop for LoomRouter {
        fn drop(&mut self) {
            let ptr = self.active.load(Ordering::Relaxed);
            if !ptr.is_null() {
                // SAFETY: sole owner at drop time.
                drop(unsafe { Box::from_raw(ptr) });
            }
        }
    }

    #[test]
    fn prop_concurrent_read_write_safety() {
        loom::model(|| {
            let router = Arc::new(LoomRouter::new());

            let r1 = Arc::clone(&router);
            let writer = thread::spawn(move || {
                r1.insert(1.0, 1.0, 1);
                r1.insert(2.0, 2.0, 2);
            });

            let r2 = Arc::clone(&router);
            let reader = thread::spawn(move || {
                let _ = r2.query();
            });

            writer.join().unwrap();
            reader.join().unwrap();
        });
    }
}