sqlitegraph 2.2.2

Embedded graph database with full ACID transactions, HNSW vector search, dual backend support, and comprehensive graph algorithms library
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
//! Centrality algorithms for graph analysis.
//!
//! This module provides algorithms for measuring node importance in a graph
//! based on position and connectivity. Centrality measures help identify
//! influential nodes, bridges between communities, and key control points.
//!
//! # Available Algorithms
//!
//! - [`pagerank`] - PageRank centrality for identifying influential nodes
//! - [`betweenness_centrality`] - Betweenness centrality for finding bridge nodes
//!
//! # When to Use Centrality Algorithms
//!
//! - **PageRank**: Identify influential nodes in citation networks, rank web pages,
//!   find key entities in knowledge graphs, recommendation systems based on graph
//!   structure
//! - **Betweenness Centrality**: Find bridge nodes connecting communities,
//!   identify bottlenecks in communication networks, detect control points in flow
//!   networks, analyze information flow in social networks

use std::collections::VecDeque;

use ahash::AHashMap;

use crate::progress::ProgressCallback;
use crate::{errors::SqliteGraphError, graph::SqliteGraph};

/// Computes PageRank scores for all nodes in the graph.
///
/// PageRank measures node importance based on link structure. Nodes with many
/// incoming links from important nodes receive higher scores. Originally developed
/// by Google for ranking web pages.
///
/// # Arguments
/// * `graph` - The graph to analyze
/// * `damping` - Damping factor (typically 0.85), representing probability of continuing random walk
/// * `iterations` - Number of power iteration iterations (20-50 recommended, fixed not convergence-based)
///
/// # Returns
/// Vector of (node_id, score) tuples sorted by score descending. Scores sum to approximately 1.0.
///
/// # Complexity
/// Time: O(k * |E|) where k = iterations
/// Space: O(|V|) for score storage
///
/// # Algorithm Details
/// Uses power iteration method (fixed iteration count for determinism):
/// 1. Initialize all nodes with equal score (1.0 / node_count)
/// 2. For each iteration:
///    - new_score = (1-d)/n + d * sum(incoming_scores / outgoing_count)
///    - Handle dangling nodes (no outgoing edges) by redistributing their score equally
/// 3. Sort results by score descending
///
/// # References
/// - Page, L., Brin, S., Motwani, R., & Winograd, T. (1999). "The PageRank Citation Ranking: Bringing Order to the Web."
///
/// # Example
/// ```rust
/// use sqlitegraph::{SqliteGraph, algo::pagerank};
/// let graph = SqliteGraph::open_in_memory()?;
/// // ... add nodes and edges ...
/// let scores = pagerank(&graph, 0.85, 20)?;
/// ```
pub fn pagerank(
    graph: &SqliteGraph,
    damping: f64,
    iterations: usize,
) -> Result<Vec<(i64, f64)>, SqliteGraphError> {
    let all_ids = graph.all_entity_ids()?;
    let n = all_ids.len();

    if n == 0 {
        return Ok(Vec::new());
    }

    // Initialize all nodes with equal score
    let mut scores: AHashMap<i64, f64> = all_ids.iter().map(|&id| (id, 1.0 / n as f64)).collect();

    // Pre-compute outgoing counts for all nodes
    let mut outgoing_counts: AHashMap<i64, usize> = AHashMap::new();
    for &id in &all_ids {
        let count = graph.fetch_outgoing(id)?.len();
        outgoing_counts.insert(id, count);
    }

    // Power iteration
    for _ in 0..iterations {
        let mut new_scores: AHashMap<i64, f64> = AHashMap::new();

        // Initialize with teleport probability (1-d)/n
        let base_score = (1.0 - damping) / n as f64;
        for &id in &all_ids {
            new_scores.insert(id, base_score);
        }

        // Track total dangling score to redistribute
        let mut dangling_score = 0.0;

        // Distribute scores from outgoing edges
        for &id in &all_ids {
            let score = scores[&id];
            let out_count = outgoing_counts[&id];

            if out_count == 0 {
                // Dangling node - add score to redistribution pool
                dangling_score += score;
            } else {
                // Distribute score evenly to all outgoing neighbors
                let share = score / out_count as f64;
                for &neighbor in &graph.fetch_outgoing(id)? {
                    *new_scores.get_mut(&neighbor).unwrap() += damping * share;
                }
            }
        }

        // Redistribute dangling score equally to all nodes
        let dangling_share = damping * dangling_score / n as f64;
        for (_, score) in new_scores.iter_mut() {
            *score += dangling_share;
        }

        scores = new_scores;
    }

    // Convert to sorted vector
    let mut result: Vec<(i64, f64)> = scores.into_iter().collect();
    result.sort_by(|a, b| {
        b.1.partial_cmp(&a.1)
            .unwrap_or(std::cmp::Ordering::Equal)
            .then_with(|| a.0.cmp(&b.0))
    });

    Ok(result)
}

/// Computes PageRank scores with progress callback reporting.
///
/// This is the progress-reporting variant of [`pagerank`]. See that function
/// for full algorithm documentation.
///
/// # Arguments
/// * `graph` - The graph to analyze
/// * `damping` - Damping factor (typically 0.85)
/// * `iterations` - Number of power iteration iterations
/// * `progress` - Callback for progress updates
///
/// # Progress Reporting
/// - Reports progress at each iteration: "PageRank iteration X/Y"
/// - Calls `on_complete()` when finished
/// - Calls `on_error()` if an error occurs
///
/// # Example
///
/// ```rust
/// use sqlitegraph::{SqliteGraph, algo::pagerank_with_progress};
/// use sqlitegraph::progress::NoProgress;
///
/// let graph = SqliteGraph::open_in_memory()?;
/// // ... add nodes and edges ...
/// let progress = NoProgress;
/// let scores = pagerank_with_progress(&graph, 0.85, 20, &progress)?;
/// ```
pub fn pagerank_with_progress<F>(
    graph: &SqliteGraph,
    damping: f64,
    iterations: usize,
    progress: &F,
) -> Result<Vec<(i64, f64)>, SqliteGraphError>
where
    F: ProgressCallback,
{
    let all_ids = graph.all_entity_ids()?;
    let n = all_ids.len();

    if n == 0 {
        progress.on_complete();
        return Ok(Vec::new());
    }

    // Initialize all nodes with equal score
    let mut scores: AHashMap<i64, f64> = all_ids.iter().map(|&id| (id, 1.0 / n as f64)).collect();

    // Pre-compute outgoing counts for all nodes
    let mut outgoing_counts: AHashMap<i64, usize> = AHashMap::new();
    for &id in &all_ids {
        let count = graph.fetch_outgoing(id)?.len();
        outgoing_counts.insert(id, count);
    }

    // Power iteration with progress reporting
    for iteration in 0..iterations {
        progress.on_progress(
            iteration + 1,
            Some(iterations),
            &format!("PageRank iteration {}", iteration + 1),
        );

        let mut new_scores: AHashMap<i64, f64> = AHashMap::new();

        // Initialize with teleport probability (1-d)/n
        let base_score = (1.0 - damping) / n as f64;
        for &id in &all_ids {
            new_scores.insert(id, base_score);
        }

        // Track total dangling score to redistribute
        let mut dangling_score = 0.0;

        // Distribute scores from outgoing edges
        for &id in &all_ids {
            let score = scores[&id];
            let out_count = outgoing_counts[&id];

            if out_count == 0 {
                // Dangling node - add score to redistribution pool
                dangling_score += score;
            } else {
                // Distribute score evenly to all outgoing neighbors
                let share = score / out_count as f64;
                for &neighbor in &graph.fetch_outgoing(id)? {
                    *new_scores.get_mut(&neighbor).unwrap() += damping * share;
                }
            }
        }

        // Redistribute dangling score equally to all nodes
        let dangling_share = damping * dangling_score / n as f64;
        for (_, score) in new_scores.iter_mut() {
            *score += dangling_share;
        }

        scores = new_scores;
    }

    progress.on_complete();

    // Convert to sorted vector
    let mut result: Vec<(i64, f64)> = scores.into_iter().collect();
    result.sort_by(|a, b| {
        b.1.partial_cmp(&a.1)
            .unwrap_or(std::cmp::Ordering::Equal)
            .then_with(|| a.0.cmp(&b.0))
    });

    Ok(result)
}

/// Computes betweenness centrality for all nodes in the graph.
///
/// Betweenness centrality measures how often a node appears on shortest paths
/// between other nodes. Bridge nodes (connecting different parts of the graph)
/// score higher. Useful for finding bottlenecks or control points in networks.
///
/// # Arguments
/// * `graph` - The graph to analyze
///
/// # Returns
/// Vector of (node_id, centrality) tuples sorted by centrality descending.
/// Values are normalized by default (divide by 2 for undirected graphs).
///
/// # Complexity
/// Time: O(|V| * |E|) for unweighted graphs (Brandes' algorithm)
/// Space: O(|V| + |E|) for BFS traversal and accumulation
///
/// # Algorithm Details
/// Implements Brandes' algorithm for unweighted graphs:
/// 1. For each node s, run BFS to compute shortest paths
/// 2. Track predecessors and path counts during BFS
/// 3. Accumulate dependency values (how much s depends on each node)
/// 4. Sum dependencies across all source nodes
///
/// Handles disconnected components gracefully (pairs with no path are ignored).
///
/// # Caveats
/// - Expensive for large graphs (O(VE) time complexity)
/// - Does not support edge weights (unweighted only)
/// - For graphs > 10K nodes, consider sampling approximation
///
/// # References
/// - Brandes, U. (2001). "A Faster Algorithm for Betweenness Centrality."
///
/// # Example
/// ```rust
/// use sqlitegraph::{SqliteGraph, algo::betweenness_centrality};
/// let graph = SqliteGraph::open_in_memory()?;
/// // ... add nodes and edges ...
/// let centrality = betweenness_centrality(&graph)?;
/// ```
pub fn betweenness_centrality(graph: &SqliteGraph) -> Result<Vec<(i64, f64)>, SqliteGraphError> {
    let all_ids = graph.all_entity_ids()?;
    let n = all_ids.len();

    if n == 0 {
        return Ok(Vec::new());
    }

    // Initialize centrality scores
    let mut centrality: AHashMap<i64, f64> = all_ids.iter().map(|&id| (id, 0.0)).collect();

    // Brandes' algorithm: for each node as source
    for &s in &all_ids {
        // BFS from s
        let mut dist: AHashMap<i64, i64> = AHashMap::new();
        let mut sigma: AHashMap<i64, f64> = AHashMap::new(); // number of shortest paths
        let mut predecessors: AHashMap<i64, Vec<i64>> = AHashMap::new();

        // Initialize source
        dist.insert(s, 0);
        sigma.insert(s, 1.0);

        let mut queue = VecDeque::new();
        queue.push_back(s);

        while let Some(v) = queue.pop_front() {
            for &w in &graph.fetch_outgoing(v)? {
                // First time discovering w
                if !dist.contains_key(&w) {
                    dist.insert(w, dist[&v] + 1);
                    queue.push_back(w);
                }

                // Found another shortest path to w through v
                if dist.get(&w) == Some(&(dist[&v] + 1)) {
                    *sigma.entry(w).or_insert(0.0) += sigma[&v];
                    predecessors.entry(w).or_default().push(v);
                }
            }
        }

        // Accumulate centrality (dependency propagation)
        let mut delta: AHashMap<i64, f64> = all_ids.iter().map(|&id| (id, 0.0)).collect();

        // Process nodes in reverse order of distance from s
        let mut nodes: Vec<i64> = dist.keys().copied().collect();
        nodes.sort_by_key(|&id| std::cmp::Reverse(dist[&id]));

        for w in nodes {
            if w == s {
                continue;
            }

            for &v in predecessors.get(&w).unwrap_or(&vec![]) {
                let contribution = (sigma[&v] / sigma[&w]) * (1.0 + delta[&w]);
                *delta.get_mut(&v).unwrap() += contribution;
            }

            if w != s {
                *centrality.get_mut(&w).unwrap() += delta[&w];
            }
        }
    }

    // Convert to sorted vector
    let mut result: Vec<(i64, f64)> = centrality.into_iter().collect();
    result.sort_by(|a, b| {
        b.1.partial_cmp(&a.1)
            .unwrap_or(std::cmp::Ordering::Equal)
            .then_with(|| a.0.cmp(&b.0))
    });

    Ok(result)
}

/// Computes betweenness centrality with progress callback reporting.
///
/// This is the progress-reporting variant of [`betweenness_centrality`]. See that function
/// for full algorithm documentation.
///
/// # Arguments
/// * `graph` - The graph to analyze
/// * `progress` - Callback for progress updates
///
/// # Progress Reporting
/// - Reports progress for each source node: "Betweenness: processing source X/Y"
/// - Total is the number of nodes in the graph
/// - Calls `on_complete()` when finished
/// - Calls `on_error()` if an error occurs
///
/// # Example
///
/// ```rust
/// use sqlitegraph::{SqliteGraph, algo::betweenness_centrality_with_progress};
/// use sqlitegraph::progress::NoProgress;
///
/// let graph = SqliteGraph::open_in_memory()?;
/// // ... add nodes and edges ...
/// let progress = NoProgress;
/// let centrality = betweenness_centrality_with_progress(&graph, &progress)?;
/// ```
pub fn betweenness_centrality_with_progress<F>(
    graph: &SqliteGraph,
    progress: &F,
) -> Result<Vec<(i64, f64)>, SqliteGraphError>
where
    F: ProgressCallback,
{
    let all_ids = graph.all_entity_ids()?;
    let n = all_ids.len();

    if n == 0 {
        progress.on_complete();
        return Ok(Vec::new());
    }

    // Initialize centrality scores
    let mut centrality: AHashMap<i64, f64> = all_ids.iter().map(|&id| (id, 0.0)).collect();

    // Brandes' algorithm: for each node as source
    for (idx, &s) in all_ids.iter().enumerate() {
        progress.on_progress(
            idx + 1,
            Some(n),
            &format!("Betweenness: processing source {}/{}", idx + 1, n),
        );

        // BFS from s
        let mut dist: AHashMap<i64, i64> = AHashMap::new();
        let mut sigma: AHashMap<i64, f64> = AHashMap::new(); // number of shortest paths
        let mut predecessors: AHashMap<i64, Vec<i64>> = AHashMap::new();

        // Initialize source
        dist.insert(s, 0);
        sigma.insert(s, 1.0);

        let mut queue = VecDeque::new();
        queue.push_back(s);

        while let Some(v) = queue.pop_front() {
            for &w in &graph.fetch_outgoing(v)? {
                // First time discovering w
                if !dist.contains_key(&w) {
                    dist.insert(w, dist[&v] + 1);
                    queue.push_back(w);
                }

                // Found another shortest path to w through v
                if dist.get(&w) == Some(&(dist[&v] + 1)) {
                    *sigma.entry(w).or_insert(0.0) += sigma[&v];
                    predecessors.entry(w).or_default().push(v);
                }
            }
        }

        // Accumulate centrality (dependency propagation)
        let mut delta: AHashMap<i64, f64> = all_ids.iter().map(|&id| (id, 0.0)).collect();

        // Process nodes in reverse order of distance from s
        let mut nodes: Vec<i64> = dist.keys().copied().collect();
        nodes.sort_by_key(|&id| std::cmp::Reverse(dist[&id]));

        for w in nodes {
            if w == s {
                continue;
            }

            for &v in predecessors.get(&w).unwrap_or(&vec![]) {
                let contribution = (sigma[&v] / sigma[&w]) * (1.0 + delta[&w]);
                *delta.get_mut(&v).unwrap() += contribution;
            }

            if w != s {
                *centrality.get_mut(&w).unwrap() += delta[&w];
            }
        }
    }

    progress.on_complete();

    // Convert to sorted vector
    let mut result: Vec<(i64, f64)> = centrality.into_iter().collect();
    result.sort_by(|a, b| {
        b.1.partial_cmp(&a.1)
            .unwrap_or(std::cmp::Ordering::Equal)
            .then_with(|| a.0.cmp(&b.0))
    });

    Ok(result)
}