Skip to main content

cch_rs/cch/
mod.rs

1//! Core engine and orchestration for Customizable Contraction Hierarchies.
2//!
3//! This module provides the primary entry point, [`CchEngine`], which manages the lifecycle
4//! of the routing data. It uses atomic swaps for topology updates and per-profile locks for
5//! metric updates, allowing weight changes without cloning the whole metric snapshot.
6//!
7//! # Key Traits
8//! * [`CchGraph`]: Must be implemented by your base graph representation to provide basic
9//!   topology and iteration capabilities.
10//! * [`ProfileId`]: A marker trait for differentiating routing profiles (e.g., Car, Bike).
11
12use std::cell::RefCell;
13use std::hash::Hash;
14use std::sync::{Arc, OnceLock};
15
16use arc_swap::ArcSwap;
17use parking_lot::RwLock;
18use rustc_hash::{FxBuildHasher, FxHashMap};
19
20use crate::cch::contract::Topology;
21use crate::cch::customize::{PartialUpdateContext, Shortcuts, WeightMap, Weights};
22use crate::cch::query::Query;
23
24pub mod contract;
25pub mod customize;
26pub mod order;
27pub mod path;
28pub mod query;
29
30/// A marker trait for differentiating routing profiles (e.g., Car, Bike, Pedestrian).
31///
32/// Types implementing this trait are used as keys in the engine to concurrently
33/// manage and query multiple sets of metric weights over the same underlying topology.
34pub trait ProfileId: Eq + Hash + Copy + Send + Sync + 'static {}
35impl<T: Eq + Hash + Copy + Send + Sync + 'static> ProfileId for T {}
36
37thread_local! {
38    static QUERY_BUFFER: RefCell<Query> = RefCell::new(Query::new(0));
39}
40
41/// Represents the result of a successful shortest-path query.
42#[derive(Debug, Clone)]
43pub struct PathResult {
44    /// The total accumulated weight (distance/time) of the shortest path.
45    pub distance: f32,
46    /// The sequence of original graph node IDs forming the shortest path.
47    pub path: Vec<u32>,
48}
49
50/// The base graph representation required by the CCH engine.
51///
52/// This trait abstracts the underlying memory layout of your road network,
53/// providing the core structural queries needed to build the elimination tree.
54pub trait CchGraph {
55    /// Returns the total number of nodes in the graph.
56    fn num_nodes(&self) -> usize;
57    /// Returns the total number of directed edges in the graph.
58    fn num_edges(&self) -> usize;
59
60    /// Returns a slice representing the start indices of outgoing edges for each node.
61    /// Standard CSR (Compressed Sparse Row) format.
62    fn first_out(&self) -> &[u32];
63    /// Returns a slice of target nodes corresponding to the edges.
64    fn head(&self) -> &[u32];
65
66    /// The X-coordinate (longitude) of the given node.
67    fn x(&self, node_id: u32) -> f32;
68    /// The Y-coordinate (latitude) of the given node.
69    fn y(&self, node_id: u32) -> f32;
70
71    /// Returns the range of edge indices originating from node `u`.
72    #[inline(always)]
73    fn edge_indices(&self, u: usize) -> std::ops::Range<usize> {
74        let start = self.first_out()[u] as usize;
75        let end = self.first_out()[u + 1] as usize;
76        start..end
77    }
78
79    /// Returns an iterator over the target nodes of all outgoing edges from node `u`.
80    #[inline(always)]
81    fn neighbors(&self, u: u32) -> impl Iterator<Item = u32> + '_ {
82        let range = self.edge_indices(u as usize);
83        self.head()[range].iter().cloned()
84    }
85}
86
87/// The internal shared data state of the engine.
88///
89/// This is held behind an `ArcSwap` in [`CchEngine`] so topology rebuilds can be published atomically.
90pub struct EngineData<P: ProfileId> {
91    /// The shared, metric-independent structural topology.
92    pub cch: Arc<Cch>,
93    /// The customized metric data specific to each routing profile.
94    pub profiles: FxHashMap<P, Arc<RwLock<ProfileData>>>,
95    /// Lazily initialized auxiliary indices used to accelerate repeated partial metric updates.
96    pub update_ctx: OnceLock<Arc<PartialUpdateContext>>,
97}
98
99/// The thread-safe, high-performance CCH routing engine.
100///
101/// This is the primary entry point for issuing queries and updating graph state.
102/// By utilizing `ArcSwap`, topology updates are atomically swapped in. Metric updates mutate
103/// the affected profile in place behind a read-write lock, so queries to that profile may wait briefly.
104#[derive(Clone)]
105pub struct CchEngine<P: ProfileId> {
106    /// Lock-free pointer to the current active routing state.
107    pub data: Arc<ArcSwap<EngineData<P>>>,
108}
109
110impl<P: ProfileId> CchEngine<P> {
111    /// Bootstraps a new routing engine from a base graph and a set of initial profiles.
112    ///
113    /// This performs the initial Nested Dissection ordering, topology contraction,
114    /// and heavily parallelized metric customization for all provided profiles.
115    pub fn new<'a, G: CchGraph + Sync>(graph: &G, profile_weights: impl IntoIterator<Item = (&'a P, &'a Vec<f32>)>) -> Self {
116        let cch = Arc::new(Cch::new(graph));
117        let mut profiles = FxHashMap::default();
118        for (pid, weights) in profile_weights {
119            let (w, s) = cch.customize(&cch.weight_map, &cch.scheduler, weights);
120            profiles.insert(
121                *pid,
122                Arc::new(RwLock::new(ProfileData {
123                    input_weights: weights.clone(),
124                    weights: w,
125                    shortcuts: s,
126                })),
127            );
128        }
129        Self {
130            data: Arc::new(ArcSwap::from_pointee(EngineData {
131                cch,
132                profiles,
133                update_ctx: OnceLock::new(),
134            })),
135        }
136    }
137
138    /// Performs a zero-downtime update of the edge weights for a specific profile (e.g., live traffic).
139    ///
140    /// This triggers the parallel Customization phase for the new weights and updates the
141    /// profile in place. Queries to the same profile will wait on the profile lock.
142    pub fn update_weights(&self, profile_id: P, new_weights: &[f32]) {
143        let current = self.data.load();
144        let Some(profile) = current.profiles.get(&profile_id) else {
145            return;
146        };
147        let mut profile = profile.write();
148        current.cch.recustomize_profile(&mut profile, new_weights);
149    }
150
151    /// Performs an incremental metric update for an existing profile.
152    ///
153    /// `updates` contains `(original_edge_index, new_weight)` pairs in the base graph's CSR edge order.
154    /// The engine only recomputes the affected hierarchy arcs and their dependents.
155    pub fn update_weights_partial(&self, profile_id: P, updates: &[(usize, f32)]) {
156        if updates.is_empty() {
157            return;
158        }
159
160        let current = self.data.load();
161        let Some(profile) = current.profiles.get(&profile_id) else {
162            return;
163        };
164        let update_ctx = current.update_ctx.get_or_init(|| Arc::new(current.cch.build_partial_update_context())).clone();
165
166        let mut profile = profile.write();
167        let ProfileData { input_weights, weights, shortcuts } = &mut *profile;
168        current.cch.customize_partial(&current.cch.weight_map, update_ctx.as_ref(), input_weights, weights, shortcuts, updates);
169    }
170
171    /// Performs a partial structural update to the elimination tree (e.g., minor road closures).
172    ///
173    /// Instead of rebuilding the entire graph from scratch, this identifies the affected
174    /// branches of the elimination tree and only recontracts the modified segments.
175    /// It then re-customizes the metrics and hot-swaps the active state.
176    pub fn update_topology<G: CchGraph + Sync>(&self, graph: &G, modified_old_nodes: &[u32], new_nodes: &[u32], profile_weights: &FxHashMap<P, Vec<f32>>) {
177        let current = self.data.load();
178        let mut new_cch_inner = (*current.cch).clone();
179
180        let start_rank = new_cch_inner.update_order(graph, modified_old_nodes, new_nodes);
181        let new_topology = new_cch_inner.recontract(graph, start_rank, &current.cch.topology);
182
183        new_cch_inner.weight_map = WeightMap::build(graph, &new_cch_inner.ranks, &new_topology);
184        new_cch_inner.scheduler = new_topology.build_scheduler();
185        new_cch_inner.topology = new_topology;
186
187        let cch = Arc::new(new_cch_inner);
188        let mut profiles = FxHashMap::with_capacity_and_hasher(profile_weights.len(), FxBuildHasher);
189
190        for (pid, weights) in profile_weights {
191            let (w, s) = cch.customize(&cch.weight_map, &cch.scheduler, weights);
192            profiles.insert(
193                *pid,
194                Arc::new(RwLock::new(ProfileData {
195                    input_weights: weights.clone(),
196                    weights: w,
197                    shortcuts: s,
198                })),
199            );
200        }
201
202        let update_ctx = OnceLock::new();
203        if current.update_ctx.get().is_some() {
204            let _ = update_ctx.set(Arc::new(cch.build_partial_update_context()));
205        }
206
207        self.data.store(Arc::new(EngineData { cch, profiles, update_ctx }));
208    }
209
210    /// Triggers a complete rebuild of the underlying topology and metric data.
211    ///
212    /// This behaves exactly like `new()`, but atomically swaps the newly built engine state
213    /// into the current instance.
214    pub fn rebuild_topology<G: CchGraph + Sync>(&self, graph: &G, profile_weights: &FxHashMap<P, Vec<f32>>) {
215        let current = self.data.load();
216        let new_engine = Self::new(graph, profile_weights);
217        let next = new_engine.data.load_full();
218        if current.update_ctx.get().is_some() {
219            let _ = next.update_ctx.set(Arc::new(next.cch.build_partial_update_context()));
220        }
221        self.data.store(next);
222    }
223
224    /// Executes a shortest-path query between a source and target node for the given profile.
225    ///
226    /// This method leverages thread-local storage to ensure zero dynamic memory allocation
227    /// during the query. It evaluates a bidirectional Dijkstra search over the shortcut hierarchy.
228    ///
229    /// Returns `None` if the profile doesn't exist or if no path can be found.
230    pub fn query_path(&self, profile: P, from: u32, to: u32) -> Option<PathResult> {
231        let current = self.data.load();
232        let profile_data = current.profiles.get(&profile)?;
233        QUERY_BUFFER.with(|q| {
234            let mut server = q.borrow_mut();
235            let num_nodes = current.cch.ranks.len();
236            if server.fw.len() < num_nodes {
237                *server = Query::new(num_nodes);
238            }
239            let profile_data = profile_data.read();
240            let result = current.cch.query(&mut server, &profile_data.weights, from, to)?;
241            let path = current.cch.query_path(&server, &profile_data.shortcuts, &result);
242            Some(PathResult { distance: result.distance, path })
243        })
244    }
245}
246
247/// Profile-specific routing metrics resulting from the Customization phase.
248#[derive(Clone)]
249pub struct ProfileData {
250    /// The input weight vector in the base graph's CSR edge order.
251    pub input_weights: Vec<f32>,
252    /// The computed weights for the hierarchy arcs.
253    pub weights: Weights,
254    /// Witness nodes required to unpack shortcut arcs back into original graph node sequences.
255    pub shortcuts: Shortcuts,
256}
257
258/// The profile-independent, structural core of the Contraction Hierarchy.
259///
260/// This maintains the node ordering, the elimination tree, and the mapping
261/// from the original graph's edges to the hierarchy's arcs.
262#[derive(Clone)]
263pub struct Cch {
264    /// The structural contraction hierarchy containing forward and backward shortcut arcs.
265    pub topology: Topology,
266    /// Maps original edge indices to their corresponding arc in the `Topology`.
267    pub weight_map: WeightMap,
268    /// Grouped nodes mapped by tree depth, allowing safe parallelization during customization.
269    pub scheduler: Vec<Vec<u32>>,
270
271    /// Maps original node IDs to their rank in the contraction order.
272    pub ranks: Vec<u32>,
273    /// Maps ranks back to their original node IDs.
274    pub order: Vec<u32>,
275}
276
277impl Cch {
278    /// Constructs a new structural hierarchy from a given graph.
279    ///
280    /// Computes the node contraction order via METIS, contracts the graph to build
281    /// the topology, and maps out the parallel scheduler.
282    pub fn new<G: CchGraph + Sync>(graph: &G) -> Self {
283        let ranks = Self::get_metis_order(graph);
284
285        let mut order = vec![0; graph.num_nodes()];
286        for (node_id, &rank) in ranks.iter().enumerate() {
287            order[rank as usize] = node_id as u32;
288        }
289
290        let topology = Self::contract(&ranks, graph);
291        let weight_map = WeightMap::build(graph, &ranks, &topology);
292        let scheduler = topology.build_scheduler();
293
294        Self {
295            topology,
296            weight_map,
297            scheduler,
298            ranks,
299            order,
300        }
301    }
302
303    /// Builds a reusable mutable profile for a fixed CCH topology.
304    ///
305    /// Unlike [`CchEngine`], this is intended for single-owner workflows such as
306    /// benchmarking or offline customization, where in-place updates are acceptable.
307    pub fn build_profile(&self, original_weights: &[f32]) -> ProfileData {
308        let (weights, shortcuts) = self.customize(&self.weight_map, &self.scheduler, original_weights);
309        ProfileData {
310            input_weights: original_weights.to_vec(),
311            weights,
312            shortcuts,
313        }
314    }
315
316    /// Re-customizes an owned profile in place, reusing its allocations.
317    pub fn recustomize_profile(&self, profile: &mut ProfileData, new_weights: &[f32]) {
318        profile.input_weights.clear();
319        profile.input_weights.extend_from_slice(new_weights);
320        let (weights, shortcuts) = self.customize(&self.weight_map, &self.scheduler, &profile.input_weights);
321        profile.weights = weights;
322        profile.shortcuts = shortcuts;
323    }
324
325    /// Applies a partial metric update to an owned profile in place.
326    pub fn customize_profile_partial(&self, profile: &mut ProfileData, updates: &[(usize, f32)]) {
327        let update_ctx = self.build_partial_update_context();
328        self.customize_profile_partial_with_context(profile, &update_ctx, updates);
329    }
330
331    /// Applies a partial metric update to an owned profile in place using a reusable context.
332    pub fn customize_profile_partial_with_context(&self, profile: &mut ProfileData, update_ctx: &PartialUpdateContext, updates: &[(usize, f32)]) {
333        self.customize_partial(&self.weight_map, update_ctx, &mut profile.input_weights, &mut profile.weights, &mut profile.shortcuts, updates);
334    }
335
336    /// Incrementally updates the node order mapping when new nodes are added or modified.
337    ///
338    /// Returns the lowest rank affected by the changes, which serves as the starting
339    /// point for a partial topology `recontract`.
340    pub fn update_order<G: CchGraph + Sync>(&mut self, graph: &G, modified_old_nodes: &[u32], new_nodes: &[u32]) -> u32 {
341        let old_num = self.ranks.len();
342        let new_num = graph.num_nodes();
343
344        self.ranks.resize(new_num, 0);
345        self.order.resize(new_num, 0);
346
347        for (i, &u) in new_nodes.iter().enumerate() {
348            let rank = (old_num + i) as u32;
349            self.ranks[u as usize] = rank;
350            self.order[rank as usize] = u;
351        }
352
353        let mut min_dirty_rank = old_num as u32;
354        for &u in modified_old_nodes {
355            let r = self.ranks[u as usize];
356            if r < min_dirty_rank {
357                min_dirty_rank = r;
358            }
359        }
360
361        min_dirty_rank
362    }
363
364    /// Returns the mapping from rank to original node ID.
365    pub fn get_order(&self) -> &[u32] { &self.order }
366
367    /// Returns the mapping from original node ID to rank.
368    pub fn get_ranks(&self) -> &[u32] { &self.ranks }
369}