routingkit_cch/
lib.rs

1#![doc = include_str!("../README.md")]
2
3// Expose test support utilities
4pub mod shp_utils;
5
6#[cxx::bridge]
7pub mod ffi {
8    extern "C++" {
9        include!("routingkit_cch_wrapper.h");
10
11        type CCH; // CustomizableContractionHierarchy
12        type CCHMetric; // CustomizableContractionHierarchyMetric
13        type CCHQuery; // CustomizableContractionHierarchyQuery
14        type CCHPartial; // CustomizableContractionHierarchyPartialCustomization
15
16        /// Build a Customizable Contraction Hierarchy.
17        /// Arguments:
18        /// * order: node permutation (size = number of nodes)
19        /// * tail/head: directed arc lists (same length)
20        /// * filter_always_inf_arcs: if true, arcs with infinite weight placeholders are stripped
21        unsafe fn cch_new(
22            order: &[u32],
23            tail: &[u32],
24            head: &[u32],
25            log_message: fn(&str),
26            filter_always_inf_arcs: bool,
27        ) -> UniquePtr<CCH>;
28
29        /// Create a metric (weights binding) for an existing CCH.
30        /// Keeps pointer to weights in CCHMetric; weights length must equal arc count.
31        unsafe fn cch_metric_new(cch: &CCH, weights: &[u32]) -> UniquePtr<CCHMetric>;
32
33        /// Run customization to compute upward/downward shortcut weights.
34        /// Must be called after creating a metric and before queries.
35        /// Cost: Depends on separator quality; usually near-linear in m * small constant; may allocate temporary buffers.
36        unsafe fn cch_metric_customize(metric: Pin<&mut CCHMetric>);
37
38        /// Parallel customization; thread_count==0 picks an internal default (#procs if OpenMP, else 1).
39        unsafe fn cch_metric_parallel_customize(metric: Pin<&mut CCHMetric>, thread_count: u32);
40
41        /// Allocate a new reusable partial customization helper bound to a CCH.
42        unsafe fn cch_partial_new(cch: &CCH) -> UniquePtr<CCHPartial>;
43
44        /// Reset the partial updater to empty state (no updated arcs).
45        unsafe fn cch_partial_reset(partial: Pin<&mut CCHPartial>);
46
47        /// Mark an arc as updated (weight changed) for the next partial customize call.
48        /// The actual weight change must already happened in the metric's weight vector.
49        unsafe fn cch_partial_update_arc(partial: Pin<&mut CCHPartial>, arc: u32);
50
51        /// Run partial customization to update shortcut weights affected by the marked arcs.
52        unsafe fn cch_partial_customize(partial: Pin<&mut CCHPartial>, metric: Pin<&mut CCHMetric>);
53
54        /// Allocate a new reusable query object bound to a metric.
55        unsafe fn cch_query_new(metric: &CCHMetric) -> UniquePtr<CCHQuery>;
56
57        /// Reset a query so it can be reused with the (same or updated) metric.
58        /// Clears internal state, sources/targets, distance labels.
59        unsafe fn cch_query_reset(query: Pin<&mut CCHQuery>, metric: &CCHMetric);
60
61        /// Add a source node with initial distance (0 for standard shortest path).
62        /// Multiple sources allowed (multi-source query).
63        unsafe fn cch_query_add_source(query: Pin<&mut CCHQuery>, s: u32, dist: u32);
64
65        /// Add a target node with initial distance (0 typical).
66        /// Multiple targets allowed (multi-target query).
67        unsafe fn cch_query_add_target(query: Pin<&mut CCHQuery>, t: u32, dist: u32);
68
69        /// Execute the shortest path search (multi-source / multi-target if several added).
70        /// Must be called after adding at least one source & target.
71        unsafe fn cch_query_run(query: Pin<&mut CCHQuery>);
72
73        /// Get shortest distance after run(). Undefined if run() not called.
74        unsafe fn cch_query_distance(query: &CCHQuery) -> u32;
75
76        /// Extract the node path for the current query result.
77        /// Reconstructs path; may traverse parent pointers.
78        unsafe fn cch_query_node_path(query: &CCHQuery) -> Vec<u32>;
79
80        /// Extract the arc (edge) path corresponding to the shortest path.
81        /// Each entry is an original arc id (after shortcut unpacking).
82        unsafe fn cch_query_arc_path(query: &CCHQuery) -> Vec<u32>;
83
84        /// Compute a high-quality nested dissection order using inertial flow separators.
85        /// Inputs:
86        /// * node_count: number of nodes
87        /// * tail/head: directed arcs (treated as undirected for ordering)
88        /// * latitude/longitude: per-node coords (len = node_count)
89        /// Returns: order permutation (position -> node id).
90        unsafe fn cch_compute_order_inertial(
91            node_count: u32,
92            tail: &[u32],
93            head: &[u32],
94            latitude: &[f32],
95            longitude: &[f32],
96        ) -> Vec<u32>;
97
98        /// Fast fallback order: nodes sorted by (degree, id) ascending.
99        /// Lower quality than nested dissection but zero extra data needed.
100        unsafe fn cch_compute_order_degree(node_count: u32, tail: &[u32], head: &[u32])
101        -> Vec<u32>;
102    }
103}
104
105// Thread-safety markers
106// Safety rationale:
107// - CCH (CustomizableContractionHierarchy) after construction is immutable.
108// - CCHMetric after successful customize() is read-only for queries; underlying RoutingKit code
109//   does not mutate metric during query operations (queries keep their own state).
110// - CCHQuery holds mutable per-query state and must not be shared across threads concurrently;
111//   we allow moving it between threads (Send) but not Sync.
112// If RoutingKit later introduces internal mutation or lazy caching inside CCHMetric, these impls
113// would need re-audit.
114unsafe impl Send for ffi::CCH {}
115unsafe impl Sync for ffi::CCH {}
116unsafe impl Send for ffi::CCHMetric {}
117unsafe impl Sync for ffi::CCHMetric {}
118unsafe impl Send for ffi::CCHQuery {}
119// (No Sync for CCHQuery)
120
121// Rust wrapper over FFI
122use cxx::UniquePtr;
123use ffi::*;
124
125fn is_permutation(arr: &[u32]) -> bool {
126    let n = arr.len();
127    let mut seen = vec![false; n];
128    for &val in arr {
129        if (val as usize) >= n || seen[val as usize] {
130            return false;
131        }
132        seen[val as usize] = true;
133    }
134    true
135}
136
137/// Lightweight fill-in reducing order heuristic: sort nodes by (degree, id) ascending.
138/// Fast but lower quality than nested dissection. Use if no coordinates or other data available.
139/// Panics if tail/head have inconsistent lengths or contain invalid node ids.
140pub fn compute_order_degree(node_count: u32, tail: &[u32], head: &[u32]) -> Vec<u32> {
141    debug_assert!(
142        tail.iter()
143            .chain(head)
144            .max()
145            .map_or(true, |&v| v < node_count),
146        "tail/head contain node ids outside valid range"
147    );
148    assert!(
149        tail.len() == head.len(),
150        "tail and head arrays must have the same length"
151    );
152    unsafe { cch_compute_order_degree(node_count, tail, head) }
153}
154
155/// High-quality nested dissection order using inertial flow separators.
156/// Requires per-node coordinates (latitude/longitude) as input.
157/// Panics if tail/head have inconsistent lengths or contain invalid node ids,
158/// or if latitude/longitude lengths do not match node_count.
159pub fn compute_order_inertial(
160    node_count: u32,
161    tail: &[u32],
162    head: &[u32],
163    latitude: &[f32],
164    longitude: &[f32],
165) -> Vec<u32> {
166    debug_assert!(
167        tail.iter()
168            .chain(head)
169            .max()
170            .map_or(true, |&v| v < node_count),
171        "tail/head contain node ids outside valid range"
172    );
173    assert!(
174        tail.len() == head.len(),
175        "tail and head arrays must have the same length"
176    );
177    assert!(
178        latitude.len() == (node_count as usize) && longitude.len() == (node_count as usize),
179        "latitude/longitude length must equal node count"
180    );
181    unsafe { cch_compute_order_inertial(node_count, tail, head, latitude, longitude) }
182}
183
184/// Immutable Customizable Contraction Hierarchy index.
185pub struct CCH {
186    inner: UniquePtr<ffi::CCH>,
187    edge_count: usize,
188    node_count: usize,
189}
190
191impl CCH {
192    /// Construct a new immutable Customizable Contraction Hierarchy index.
193    ///
194    /// Parameters:
195    /// * `order` – permutation of node ids (length = node count) produced by a fill‑in reducing
196    ///   nested dissection heuristic (e.g. [`compute_order_inertial`]) or a lightweight fallback.
197    /// * `tail`, `head` – parallel arrays encoding each directed arc `i` as `(tail[i], head[i])`.
198    ///   The ordering routine treats them as undirected; here they stay directed for queries.
199    /// * `log_message` – callback for logging progress messages during construction.
200    ///   May be `|_| {}` if you do not want any messages.
201    /// * `filter_always_inf_arcs` – if `true`, arcs whose weight will always be interpreted as
202    ///   an application defined 'infinity' placeholder may be removed during construction to
203    ///   reduce index size. (Typically keep `false` unless you prepared such a list.)
204    ///
205    /// Cost: preprocessing is more expensive than a single customization but usually far cheaper
206    /// than building a full classical CH of the same quality. Construction copies the input
207    /// slices; you may drop them afterwards.
208    ///
209    /// Thread-safety: resulting object is `Send + Sync` and read-only.
210    ///
211    /// Panics if `order` is not a valid permutation of node ids,
212    /// or if `tail`/`head` have inconsistent lengths or contain invalid node ids.
213    pub fn new(
214        order: &[u32],
215        tail: &[u32],
216        head: &[u32],
217        log_message: fn(&str),
218        filter_always_inf_arcs: bool,
219    ) -> Self {
220        debug_assert!(
221            is_permutation(order),
222            "order array is not a valid permutation"
223        );
224        debug_assert!(
225            tail.iter()
226                .chain(head)
227                .max()
228                .map_or(true, |&v| (v as usize) < order.len()),
229            "tail/head contain node ids outside valid range"
230        );
231        assert!(
232            tail.len() == head.len(),
233            "tail and head arrays must have the same length"
234        );
235        let cch = unsafe { cch_new(order, tail, head, log_message, filter_always_inf_arcs) };
236        CCH {
237            inner: cch,
238            edge_count: tail.len(),
239            node_count: order.len(),
240        }
241    }
242}
243
244/// A customized metric (weight binding) for a given [`CCH`].
245/// Keeps ownership of the weight vector so it can be mutated for partial updates.
246/// Thread-safety: `Send + Sync`; read-only for queries.
247pub struct CCHMetric<'a> {
248    inner: UniquePtr<ffi::CCHMetric>,
249    weights: Box<[u32]>, // The C++ side stores only a raw pointer; it is valid for the lifetime of `self`.
250    cch: &'a CCH,
251}
252
253impl<'a> CCHMetric<'a> {
254    /// Create and customize a metric (weight binding) for a given [`CCH`].
255    /// Owns the weight vector so that future partial updates can safely mutate it.
256    pub fn new(cch: &'a CCH, weights: Vec<u32>) -> Self {
257        assert!(
258            weights.len() == cch.edge_count,
259            "weights length must equal arc count",
260        );
261        let boxed: Box<[u32]> = weights.into_boxed_slice();
262        // Temporarily borrow as slice for FFI creation
263        let metric = unsafe {
264            let mut metric = cch_metric_new(&cch.inner, &boxed);
265            cch_metric_customize(metric.as_mut().unwrap());
266            metric
267        };
268        CCHMetric {
269            inner: metric,
270            weights: boxed,
271            cch,
272        }
273    }
274
275    /// Parallel customization variant.
276    #[cfg(feature = "openmp")]
277    pub fn parallel_new(cch: &'a CCH, weights: Vec<u32>, thread_count: u32) -> Self {
278        assert!(
279            weights.len() == cch.edge_count,
280            "weights length must equal arc count",
281        );
282        let boxed: Box<[u32]> = weights.into_boxed_slice();
283        let metric = unsafe {
284            let mut metric = cch_metric_new(&cch.inner, &boxed);
285            cch_metric_parallel_customize(metric.as_mut().unwrap(), thread_count);
286            metric
287        };
288        CCHMetric {
289            inner: metric,
290            weights: boxed,
291            cch,
292        }
293    }
294
295    /// weights slice
296    pub fn weights(&self) -> &[u32] {
297        &self.weights
298    }
299}
300
301/// Reusable partial customization helper. Construct once if you perform many small incremental
302/// weight updates; this avoids reallocating O(m) internal buffers each call.
303pub struct CCHMetricPartialUpdater<'a> {
304    partial: UniquePtr<ffi::CCHPartial>,
305    cch: &'a CCH,
306}
307
308impl<'a> CCHMetricPartialUpdater<'a> {
309    /// Create a reusable partial updater bound to a given CCH. You can then apply it to any
310    /// metric built from the same CCH (even if you rebuild metrics with different weight sets).
311    pub fn new(cch: &'a CCH) -> Self {
312        let partial = unsafe { cch_partial_new(cch.inner.as_ref().unwrap()) };
313        CCHMetricPartialUpdater { partial, cch }
314    }
315
316    /// Apply a batch of (arc, new_weight) updates to the given metric and run partial customize.
317    pub fn apply<T>(&mut self, metric: &mut CCHMetric<'a>, updates: &T)
318    where
319        T: for<'b> std::ops::Index<&'b u32, Output = u32>,
320        for<'b> &'b T: IntoIterator<Item = (&'b u32, &'b u32)>,
321    {
322        assert!(
323            std::ptr::eq(metric.cch, self.cch),
324            "CCHMetricPartialUpdater must be used with metrics from the same CCH"
325        );
326        for (k, v) in updates {
327            metric.weights[*k as usize] = *v; // safe: length invariant unchanged (Box<[u32]>)
328        }
329        unsafe {
330            cch_partial_reset(self.partial.as_mut().unwrap());
331            for (k, _) in updates {
332                cch_partial_update_arc(self.partial.as_mut().unwrap(), *k);
333            }
334            cch_partial_customize(
335                self.partial.as_mut().unwrap(),
336                metric.inner.as_mut().unwrap(),
337            );
338        }
339    }
340}
341
342/// A reusable shortest-path query object bound to a given [`CCHMetric`].
343/// Thread-safety: `Send` but not `Sync`; holds mutable per-query state.
344pub struct CCHQuery<'a> {
345    inner: UniquePtr<ffi::CCHQuery>,
346    metric: &'a CCHMetric<'a>,
347    state: [bool; 2],
348}
349
350impl<'a> CCHQuery<'a> {
351    /// Allocate a new reusable shortest-path query bound to a given customized [`CCHMetric`].
352    ///
353    /// The query object stores its own frontier / label buffers and can be reset and reused for
354    /// many (s, t) pairs or multi-source / multi-target batches. You may have multiple query
355    /// objects referencing the same metric concurrently (read-only access to metric data).
356    pub fn new(metric: &'a CCHMetric<'a>) -> Self {
357        let inner = unsafe { cch_query_new(&metric.inner) };
358        CCHQuery {
359            inner,
360            metric,
361            state: [false; 2],
362        }
363    }
364
365    /// Add a source node with an initial distance (normally 0). Multiple calls allow a multi-
366    /// source query. Distances let you model already-traversed partial paths.
367    pub fn add_source(&mut self, s: u32, dist: u32) {
368        assert!(
369            (s as usize) < self.metric.cch.node_count,
370            "source node id out of range",
371        );
372        unsafe {
373            cch_query_add_source(self.inner.as_mut().unwrap(), s, dist);
374        }
375        self.state[0] = true;
376    }
377
378    /// Add a target node with an initial distance (normally 0). Multiple calls allow multi-target
379    /// queries; the algorithm stops when the frontiers settle the optimal distance to any target.
380    pub fn add_target(&mut self, t: u32, dist: u32) {
381        assert!(
382            (t as usize) < self.metric.cch.node_count,
383            "target node id out of range",
384        );
385        unsafe {
386            cch_query_add_target(self.inner.as_mut().unwrap(), t, dist);
387        }
388        self.state[1] = true;
389    }
390
391    /// Execute the forward/backward upward/downward search to settle the shortest path between the
392    /// added sources and targets. Must be called after at least one source and one target.
393    /// Returns a [`CCHQueryResult`] holding a mutable reference to the query.
394    /// The query is automatically reset when the result is dropped.
395    pub fn run<'b>(&'b mut self) -> CCHQueryResult<'b, 'a> {
396        assert!(
397            self.state.iter().all(|&x| x),
398            "must add at least one source and one target before running the query"
399        );
400        unsafe {
401            cch_query_run(self.inner.as_mut().unwrap());
402        }
403        CCHQueryResult { query: self }
404    }
405}
406
407/// The result of a single execution of a [`CCHQuery`].
408/// Holds a mutable reference to the query to ensure it is not reset or reused
409/// while the result is still alive. So you need to drop the result before reusing the query.
410/// When the result is dropped, the query is automatically reset.
411pub struct CCHQueryResult<'b, 'a> {
412    query: &'b mut CCHQuery<'a>,
413}
414
415impl<'b, 'a> CCHQueryResult<'b, 'a> {
416    /// Return the shortest path distance result of [`CCHQuery::run`].
417    /// Returns `None` if no target is reachable.
418    pub fn distance(&self) -> Option<u32> {
419        let res = unsafe { cch_query_distance(self.query.inner.as_ref().unwrap()) };
420        if res == (i32::MAX as u32) {
421            // internally distance equals `i32::MAX` means unreachable
422            None
423        } else {
424            Some(res)
425        }
426    }
427
428    /// Reconstruct and return the node id sequence of the current best path.
429    /// Returns empty vec if no target is reachable.
430    pub fn node_path(&self) -> Vec<u32> {
431        unsafe { cch_query_node_path(self.query.inner.as_ref().unwrap()) }
432    }
433
434    /// Reconstruct and return the original arc ids along the shortest path.
435    /// Returns empty vec if no target is reachable.
436    pub fn arc_path(&self) -> Vec<u32> {
437        unsafe { cch_query_arc_path(self.query.inner.as_ref().unwrap()) }
438    }
439}
440
441impl<'b, 'a> Drop for CCHQueryResult<'b, 'a> {
442    /// reset the query
443    fn drop(&mut self) {
444        unsafe {
445            cch_query_reset(
446                self.query.inner.as_mut().unwrap(),
447                self.query.metric.inner.as_ref().unwrap(),
448            );
449        }
450        self.query.state.iter_mut().for_each(|x| *x = false);
451    }
452}
453
454#[cfg(feature = "pyo3")]
455mod python_binding;