Skip to main content

ruvector_solver_node/
lib.rs

1//! Node.js NAPI bindings for the RuVector sublinear-time solver.
2//!
3//! Provides high-performance sparse linear system solving, PageRank
4//! computation, and complexity estimation for Node.js applications.
5//!
6//! All heavy computation runs on worker threads via `tokio::task::spawn_blocking`
7//! to avoid blocking the Node.js event loop.
8
9#![deny(clippy::all)]
10
11use napi::bindgen_prelude::*;
12use napi_derive::napi;
13use ruvector_solver::types::Algorithm;
14use std::time::Instant;
15
16// ---------------------------------------------------------------------------
17// Configuration types (NAPI objects)
18// ---------------------------------------------------------------------------
19
20/// Configuration for solving a sparse linear system Ax = b.
21#[napi(object)]
22pub struct SolveConfig {
23    /// Non-zero values in CSR format.
24    pub values: Vec<f64>,
25    /// Column indices for each non-zero entry.
26    pub col_indices: Vec<u32>,
27    /// Row pointers (length = rows + 1).
28    pub row_ptrs: Vec<u32>,
29    /// Number of rows in the matrix.
30    pub rows: u32,
31    /// Number of columns in the matrix.
32    pub cols: u32,
33    /// Right-hand side vector b.
34    pub rhs: Vec<f64>,
35    /// Convergence tolerance (default: 1e-6).
36    pub tolerance: Option<f64>,
37    /// Maximum number of iterations (default: 1000).
38    pub max_iterations: Option<u32>,
39    /// Algorithm to use: "neumann", "jacobi", "gauss-seidel", "conjugate-gradient".
40    /// Defaults to "jacobi".
41    pub algorithm: Option<String>,
42}
43
44/// Result of solving a sparse linear system.
45#[napi(object)]
46pub struct SolveResult {
47    /// Solution vector x.
48    pub solution: Vec<f64>,
49    /// Number of iterations performed.
50    pub iterations: u32,
51    /// Final residual norm ||Ax - b||.
52    pub residual: f64,
53    /// Whether the solver converged within tolerance.
54    pub converged: bool,
55    /// Algorithm that was used.
56    pub algorithm: String,
57    /// Wall-clock time in microseconds.
58    pub time_us: u32,
59}
60
61/// Configuration for PageRank computation.
62#[napi(object)]
63pub struct PageRankConfig {
64    /// Non-zero values in CSR format (edge weights).
65    pub values: Vec<f64>,
66    /// Column indices for each non-zero entry.
67    pub col_indices: Vec<u32>,
68    /// Row pointers (length = rows + 1).
69    pub row_ptrs: Vec<u32>,
70    /// Number of nodes in the graph.
71    pub num_nodes: u32,
72    /// Damping factor (default: 0.85).
73    pub damping: Option<f64>,
74    /// Convergence tolerance (default: 1e-6).
75    pub tolerance: Option<f64>,
76    /// Maximum number of iterations (default: 100).
77    pub max_iterations: Option<u32>,
78    /// Personalization vector (uniform if omitted).
79    pub personalization: Option<Vec<f64>>,
80}
81
82/// Result of PageRank computation.
83#[napi(object)]
84pub struct PageRankResult {
85    /// PageRank scores for each node (sums to 1.0).
86    pub scores: Vec<f64>,
87    /// Number of iterations performed.
88    pub iterations: u32,
89    /// Final convergence residual.
90    pub residual: f64,
91    /// Whether the computation converged.
92    pub converged: bool,
93    /// Wall-clock time in microseconds.
94    pub time_us: u32,
95}
96
97/// Configuration for complexity estimation.
98#[napi(object)]
99pub struct ComplexityConfig {
100    /// Number of rows in the matrix.
101    pub rows: u32,
102    /// Number of non-zero entries.
103    pub nnz: u32,
104    /// Algorithm to estimate for.
105    pub algorithm: Option<String>,
106}
107
108/// Result of complexity estimation.
109#[napi(object)]
110pub struct ComplexityResult {
111    /// Estimated time complexity class (e.g. "O(n log n)").
112    pub complexity_class: String,
113    /// Estimated number of floating-point operations.
114    pub estimated_flops: f64,
115    /// Recommended algorithm for this problem size.
116    pub recommended_algorithm: String,
117    /// Estimated wall-clock time in microseconds.
118    pub estimated_time_us: f64,
119    /// Sparsity ratio (nnz / n^2).
120    pub sparsity: f64,
121}
122
123/// Convergence history entry.
124#[napi(object)]
125pub struct ConvergenceEntry {
126    /// Iteration index (0-based).
127    pub iteration: u32,
128    /// Residual norm at this iteration.
129    pub residual: f64,
130}
131
132/// Result of solving with convergence history.
133#[napi(object)]
134pub struct SolveWithHistoryResult {
135    /// Solution vector x.
136    pub solution: Vec<f64>,
137    /// Number of iterations performed.
138    pub iterations: u32,
139    /// Final residual norm.
140    pub residual: f64,
141    /// Whether the solver converged.
142    pub converged: bool,
143    /// Algorithm used.
144    pub algorithm: String,
145    /// Wall-clock time in microseconds.
146    pub time_us: u32,
147    /// Per-iteration convergence history.
148    pub convergence_history: Vec<ConvergenceEntry>,
149}
150
151// ---------------------------------------------------------------------------
152// Internal helpers
153// ---------------------------------------------------------------------------
154
155/// Parse an algorithm name string into the core Algorithm enum.
156fn parse_algorithm(name: &str) -> Result<Algorithm> {
157    match name.to_lowercase().as_str() {
158        "neumann" | "neumann-series" => Ok(Algorithm::Neumann),
159        "jacobi" => Ok(Algorithm::Jacobi),
160        "gauss-seidel" | "gaussseidel" | "gs" => Ok(Algorithm::GaussSeidel),
161        "forward-push" | "forwardpush" => Ok(Algorithm::ForwardPush),
162        "backward-push" | "backwardpush" => Ok(Algorithm::BackwardPush),
163        "conjugate-gradient" | "cg" => Ok(Algorithm::CG),
164        other => Err(Error::new(
165            Status::InvalidArg,
166            format!(
167                "Unknown algorithm '{}'. Expected one of: neumann, jacobi, \
168                 gauss-seidel, conjugate-gradient, forward-push, backward-push",
169                other
170            ),
171        )),
172    }
173}
174
175/// Validate CSR input dimensions for consistency.
176fn validate_csr_input(
177    values: &[f64],
178    col_indices: &[u32],
179    row_ptrs: &[u32],
180    rows: usize,
181    cols: usize,
182) -> Result<()> {
183    if row_ptrs.len() != rows + 1 {
184        return Err(Error::new(
185            Status::InvalidArg,
186            format!(
187                "row_ptrs length {} does not equal rows + 1 = {}",
188                row_ptrs.len(),
189                rows + 1
190            ),
191        ));
192    }
193
194    if values.len() != col_indices.len() {
195        return Err(Error::new(
196            Status::InvalidArg,
197            format!(
198                "values length {} does not match col_indices length {}",
199                values.len(),
200                col_indices.len()
201            ),
202        ));
203    }
204
205    let expected_nnz = row_ptrs[rows] as usize;
206    if values.len() != expected_nnz {
207        return Err(Error::new(
208            Status::InvalidArg,
209            format!(
210                "values length {} does not match row_ptrs[rows] = {}",
211                values.len(),
212                expected_nnz
213            ),
214        ));
215    }
216
217    // Validate monotonicity of row_ptrs.
218    for i in 1..row_ptrs.len() {
219        if row_ptrs[i] < row_ptrs[i - 1] {
220            return Err(Error::new(
221                Status::InvalidArg,
222                format!(
223                    "row_ptrs is not monotonically non-decreasing at position {}",
224                    i
225                ),
226            ));
227        }
228    }
229
230    // Validate column indices and value finiteness.
231    for (idx, (&col, &val)) in col_indices.iter().zip(values.iter()).enumerate() {
232        if col as usize >= cols {
233            return Err(Error::new(
234                Status::InvalidArg,
235                format!(
236                    "column index {} at position {} is out of bounds (cols={})",
237                    col, idx, cols
238                ),
239            ));
240        }
241        if !val.is_finite() {
242            return Err(Error::new(
243                Status::InvalidArg,
244                format!("non-finite value {} at position {}", val, idx),
245            ));
246        }
247    }
248
249    Ok(())
250}
251
252/// Sparse matrix-vector multiply y = A*x using CSR arrays.
253fn spmv_f64(
254    row_ptrs: &[usize],
255    col_indices: &[usize],
256    values: &[f64],
257    x: &[f64],
258    y: &mut [f64],
259    rows: usize,
260) {
261    for i in 0..rows {
262        let start = row_ptrs[i];
263        let end = row_ptrs[i + 1];
264        let mut sum = 0.0f64;
265        for idx in start..end {
266            sum += values[idx] * x[col_indices[idx]];
267        }
268        y[i] = sum;
269    }
270}
271
272/// Compute L2 norm of the residual r = b - A*x.
273fn residual_norm(
274    row_ptrs: &[usize],
275    col_indices: &[usize],
276    values: &[f64],
277    x: &[f64],
278    b: &[f64],
279    rows: usize,
280) -> f64 {
281    let mut norm_sq = 0.0f64;
282    for i in 0..rows {
283        let start = row_ptrs[i];
284        let end = row_ptrs[i + 1];
285        let mut ax_i = 0.0f64;
286        for idx in start..end {
287            ax_i += values[idx] * x[col_indices[idx]];
288        }
289        let r = b[i] - ax_i;
290        norm_sq += r * r;
291    }
292    norm_sq.sqrt()
293}
294
295/// Extract the diagonal entries of a CSR matrix.
296///
297/// Returns `None` if any diagonal entry is zero (or missing).
298fn extract_diagonal(
299    row_ptrs: &[usize],
300    col_indices: &[usize],
301    values: &[f64],
302    rows: usize,
303) -> Option<Vec<f64>> {
304    let mut diag = vec![0.0f64; rows];
305    for i in 0..rows {
306        let start = row_ptrs[i];
307        let end = row_ptrs[i + 1];
308        let mut found = false;
309        for idx in start..end {
310            if col_indices[idx] == i {
311                diag[i] = values[idx];
312                found = true;
313                break;
314            }
315        }
316        if !found || diag[i].abs() < 1e-15 {
317            return None;
318        }
319    }
320    Some(diag)
321}
322
323/// Jacobi iterative solver for Ax = b.
324///
325/// Requires the diagonal of A to be non-zero. Iterates:
326///   x_{k+1}[i] = (b[i] - sum_{j!=i} a_{ij} * x_k[j]) / a_{ii}
327fn solve_jacobi(
328    row_ptrs: &[usize],
329    col_indices: &[usize],
330    values: &[f64],
331    rhs: &[f64],
332    rows: usize,
333    tolerance: f64,
334    max_iterations: usize,
335) -> (Vec<f64>, usize, f64, bool, Vec<(usize, f64)>) {
336    let mut x = vec![0.0f64; rows];
337    let mut x_new = vec![0.0f64; rows];
338    let mut history = Vec::new();
339
340    let diag = match extract_diagonal(row_ptrs, col_indices, values, rows) {
341        Some(d) => d,
342        None => {
343            let res = residual_norm(row_ptrs, col_indices, values, &x, rhs, rows);
344            history.push((0, res));
345            return (x, 0, res, false, history);
346        }
347    };
348
349    let mut converged = false;
350    let mut final_residual = f64::MAX;
351    let mut iters = 0;
352
353    for iter in 0..max_iterations {
354        for i in 0..rows {
355            let start = row_ptrs[i];
356            let end = row_ptrs[i + 1];
357            let mut sigma = 0.0f64;
358            for idx in start..end {
359                let j = col_indices[idx];
360                if j != i {
361                    sigma += values[idx] * x[j];
362                }
363            }
364            x_new[i] = (rhs[i] - sigma) / diag[i];
365        }
366
367        std::mem::swap(&mut x, &mut x_new);
368
369        let res = residual_norm(row_ptrs, col_indices, values, &x, rhs, rows);
370        history.push((iter, res));
371        final_residual = res;
372        iters = iter + 1;
373
374        if res < tolerance {
375            converged = true;
376            break;
377        }
378    }
379
380    (x, iters, final_residual, converged, history)
381}
382
383/// Gauss-Seidel iterative solver for Ax = b.
384///
385/// Updates x in-place within each iteration using the most recent values.
386/// Generally converges faster than Jacobi for the same problem.
387fn solve_gauss_seidel(
388    row_ptrs: &[usize],
389    col_indices: &[usize],
390    values: &[f64],
391    rhs: &[f64],
392    rows: usize,
393    tolerance: f64,
394    max_iterations: usize,
395) -> (Vec<f64>, usize, f64, bool, Vec<(usize, f64)>) {
396    let mut x = vec![0.0f64; rows];
397    let mut history = Vec::new();
398
399    let diag = match extract_diagonal(row_ptrs, col_indices, values, rows) {
400        Some(d) => d,
401        None => {
402            let res = residual_norm(row_ptrs, col_indices, values, &x, rhs, rows);
403            history.push((0, res));
404            return (x, 0, res, false, history);
405        }
406    };
407
408    let mut converged = false;
409    let mut final_residual = f64::MAX;
410    let mut iters = 0;
411
412    for iter in 0..max_iterations {
413        for i in 0..rows {
414            let start = row_ptrs[i];
415            let end = row_ptrs[i + 1];
416            let mut sigma = 0.0f64;
417            for idx in start..end {
418                let j = col_indices[idx];
419                if j != i {
420                    sigma += values[idx] * x[j];
421                }
422            }
423            x[i] = (rhs[i] - sigma) / diag[i];
424        }
425
426        let res = residual_norm(row_ptrs, col_indices, values, &x, rhs, rows);
427        history.push((iter, res));
428        final_residual = res;
429        iters = iter + 1;
430
431        if res < tolerance {
432            converged = true;
433            break;
434        }
435    }
436
437    (x, iters, final_residual, converged, history)
438}
439
440/// Neumann series solver: x = sum_{k=0}^{K} (I - A)^k * b.
441///
442/// Converges when the spectral radius of (I - A) is less than 1.
443fn solve_neumann(
444    row_ptrs: &[usize],
445    col_indices: &[usize],
446    values: &[f64],
447    rhs: &[f64],
448    rows: usize,
449    tolerance: f64,
450    max_iterations: usize,
451) -> (Vec<f64>, usize, f64, bool, Vec<(usize, f64)>) {
452    let mut x = vec![0.0f64; rows];
453    let mut term = rhs.to_vec();
454    let mut temp = vec![0.0f64; rows];
455    let mut history = Vec::new();
456
457    let mut converged = false;
458    let mut final_residual = f64::MAX;
459    let mut iters = 0;
460
461    for iter in 0..max_iterations {
462        // Accumulate current term into x.
463        for i in 0..rows {
464            x[i] += term[i];
465        }
466
467        // Compute next term: term_{k+1} = (I - A) * term_k
468        spmv_f64(row_ptrs, col_indices, values, &term, &mut temp, rows);
469        for i in 0..rows {
470            temp[i] = term[i] - temp[i];
471        }
472        std::mem::swap(&mut term, &mut temp);
473
474        let term_norm: f64 = term.iter().map(|&t| t * t).sum::<f64>().sqrt();
475        let res = residual_norm(row_ptrs, col_indices, values, &x, rhs, rows);
476        history.push((iter, res));
477        final_residual = res;
478        iters = iter + 1;
479
480        if res < tolerance || term_norm < tolerance * 1e-2 {
481            converged = true;
482            break;
483        }
484
485        // Divergence detection.
486        if !term_norm.is_finite() {
487            break;
488        }
489    }
490
491    (x, iters, final_residual, converged, history)
492}
493
494/// Conjugate gradient solver for symmetric positive-definite Ax = b.
495fn solve_cg(
496    row_ptrs: &[usize],
497    col_indices: &[usize],
498    values: &[f64],
499    rhs: &[f64],
500    rows: usize,
501    tolerance: f64,
502    max_iterations: usize,
503) -> (Vec<f64>, usize, f64, bool, Vec<(usize, f64)>) {
504    let mut x = vec![0.0f64; rows];
505    let mut history = Vec::new();
506
507    // r = b - A*x (initially r = b since x = 0).
508    let mut r = rhs.to_vec();
509    let mut p = r.clone();
510    let mut ap = vec![0.0f64; rows];
511
512    let mut rs_old: f64 = r.iter().map(|&v| v * v).sum();
513    let tol_sq = tolerance * tolerance;
514
515    let mut converged = false;
516    let mut final_residual = rs_old.sqrt();
517    let mut iters = 0;
518
519    for iter in 0..max_iterations {
520        spmv_f64(row_ptrs, col_indices, values, &p, &mut ap, rows);
521
522        let p_ap: f64 = p.iter().zip(ap.iter()).map(|(&a, &b)| a * b).sum();
523        if p_ap.abs() < 1e-30 {
524            break;
525        }
526        let alpha = rs_old / p_ap;
527
528        for i in 0..rows {
529            x[i] += alpha * p[i];
530        }
531
532        for i in 0..rows {
533            r[i] -= alpha * ap[i];
534        }
535
536        let rs_new: f64 = r.iter().map(|&v| v * v).sum();
537        final_residual = rs_new.sqrt();
538        history.push((iter, final_residual));
539        iters = iter + 1;
540
541        if rs_new < tol_sq {
542            converged = true;
543            break;
544        }
545
546        let beta = rs_new / rs_old;
547        for i in 0..rows {
548            p[i] = r[i] + beta * p[i];
549        }
550
551        rs_old = rs_new;
552    }
553
554    (x, iters, final_residual, converged, history)
555}
556
557/// Dispatch to the appropriate solver based on algorithm selection.
558fn dispatch_solver(
559    algo: Algorithm,
560    row_ptrs: &[usize],
561    col_indices: &[usize],
562    values: &[f64],
563    rhs: &[f64],
564    rows: usize,
565    tolerance: f64,
566    max_iterations: usize,
567) -> (Vec<f64>, usize, f64, bool, Vec<(usize, f64)>) {
568    match algo {
569        Algorithm::Jacobi => {
570            solve_jacobi(row_ptrs, col_indices, values, rhs, rows, tolerance, max_iterations)
571        }
572        Algorithm::GaussSeidel => {
573            solve_gauss_seidel(row_ptrs, col_indices, values, rhs, rows, tolerance, max_iterations)
574        }
575        Algorithm::Neumann => {
576            solve_neumann(row_ptrs, col_indices, values, rhs, rows, tolerance, max_iterations)
577        }
578        Algorithm::CG => {
579            solve_cg(row_ptrs, col_indices, values, rhs, rows, tolerance, max_iterations)
580        }
581        // Forward/backward push are graph algorithms, not general linear solvers.
582        // Fall back to Jacobi.
583        _ => solve_jacobi(row_ptrs, col_indices, values, rhs, rows, tolerance, max_iterations),
584    }
585}
586
587// ---------------------------------------------------------------------------
588// NapiSolver
589// ---------------------------------------------------------------------------
590
591/// High-performance sparse linear solver with automatic algorithm selection.
592///
593/// Provides async methods for solving Ax = b, computing PageRank, and
594/// estimating computational complexity. All heavy computation runs on
595/// worker threads.
596///
597/// # Example
598/// ```javascript
599/// const { NapiSolver } = require('@ruvector/solver');
600///
601/// const solver = new NapiSolver();
602/// const result = await solver.solve({
603///   values: [4, -1, -1, 4, -1, -1, 4],
604///   colIndices: [0, 1, 0, 1, 2, 1, 2],
605///   rowPtrs: [0, 2, 5, 7],
606///   rows: 3, cols: 3,
607///   rhs: [1, 0, 1],
608/// });
609/// console.log('Solution:', result.solution);
610/// console.log('Converged:', result.converged);
611/// ```
612#[napi]
613pub struct NapiSolver {
614    default_tolerance: f64,
615    default_max_iterations: usize,
616}
617
618#[napi]
619impl NapiSolver {
620    /// Create a new solver instance with default settings.
621    #[napi(constructor)]
622    pub fn new() -> Self {
623        Self {
624            default_tolerance: 1e-6,
625            default_max_iterations: 1000,
626        }
627    }
628
629    /// Solve a sparse linear system Ax = b asynchronously.
630    ///
631    /// Runs the computation on a worker thread to avoid blocking the
632    /// Node.js event loop.
633    ///
634    /// # Arguments
635    /// * `config` - Solver configuration including the CSR matrix, RHS vector,
636    ///   tolerance, max iterations, and algorithm selection.
637    ///
638    /// # Returns
639    /// A `SolveResult` containing the solution vector, convergence info,
640    /// and timing data.
641    ///
642    /// # Example
643    /// ```javascript
644    /// const result = await solver.solve({
645    ///   values: [2, -1, -1, 2, -1, -1, 2],
646    ///   colIndices: [0, 1, 0, 1, 2, 1, 2],
647    ///   rowPtrs: [0, 2, 5, 7],
648    ///   rows: 3, cols: 3,
649    ///   rhs: [1, 0, 1],
650    ///   tolerance: 1e-8,
651    ///   algorithm: 'jacobi',
652    /// });
653    /// ```
654    #[napi]
655    pub async fn solve(&self, config: SolveConfig) -> Result<SolveResult> {
656        let tolerance = config.tolerance.unwrap_or(self.default_tolerance);
657        let max_iterations = config
658            .max_iterations
659            .map(|m| m as usize)
660            .unwrap_or(self.default_max_iterations);
661        let algo = parse_algorithm(config.algorithm.as_deref().unwrap_or("jacobi"))?;
662        let algo_name = algo.to_string();
663
664        let rows = config.rows as usize;
665        let cols = config.cols as usize;
666        validate_csr_input(&config.values, &config.col_indices, &config.row_ptrs, rows, cols)?;
667
668        if config.rhs.len() != rows {
669            return Err(Error::new(
670                Status::InvalidArg,
671                format!("rhs length {} does not match rows = {}", config.rhs.len(), rows),
672            ));
673        }
674
675        let values = config.values;
676        let col_indices: Vec<usize> = config.col_indices.iter().map(|&c| c as usize).collect();
677        let row_ptrs: Vec<usize> = config.row_ptrs.iter().map(|&p| p as usize).collect();
678        let rhs = config.rhs;
679
680        let result = tokio::task::spawn_blocking(move || {
681            let start = Instant::now();
682
683            let (solution, iterations, residual, converged, _history) =
684                dispatch_solver(algo, &row_ptrs, &col_indices, &values, &rhs, rows, tolerance, max_iterations);
685
686            let elapsed_us = start.elapsed().as_micros().min(u32::MAX as u128) as u32;
687
688            SolveResult {
689                solution,
690                iterations: iterations as u32,
691                residual,
692                converged,
693                algorithm: algo_name,
694                time_us: elapsed_us,
695            }
696        })
697        .await
698        .map_err(|e| Error::from_reason(format!("Solver task failed: {}", e)))?;
699
700        Ok(result)
701    }
702
703    /// Solve a sparse linear system from JSON input.
704    ///
705    /// Accepts a JSON string with the same fields as `SolveConfig` and
706    /// returns a JSON string with the `SolveResult` fields.
707    ///
708    /// # Example
709    /// ```javascript
710    /// const input = JSON.stringify({
711    ///   values: [2, -1, -1, 2],
712    ///   col_indices: [0, 1, 0, 1],
713    ///   row_ptrs: [0, 2, 4],
714    ///   rows: 2, cols: 2,
715    ///   rhs: [1, 1],
716    /// });
717    /// const output = await solver.solveJson(input);
718    /// const result = JSON.parse(output);
719    /// ```
720    #[napi]
721    pub async fn solve_json(&self, json: String) -> Result<String> {
722        let input: SolveJsonInput = serde_json::from_str(&json).map_err(|e| {
723            Error::new(Status::InvalidArg, format!("Invalid JSON input: {}", e))
724        })?;
725
726        let config = SolveConfig {
727            values: input.values,
728            col_indices: input.col_indices,
729            row_ptrs: input.row_ptrs,
730            rows: input.rows,
731            cols: input.cols,
732            rhs: input.rhs,
733            tolerance: input.tolerance,
734            max_iterations: input.max_iterations,
735            algorithm: input.algorithm,
736        };
737
738        let result = self.solve(config).await?;
739
740        let output = SolveJsonOutput {
741            solution: result.solution,
742            iterations: result.iterations,
743            residual: result.residual,
744            converged: result.converged,
745            algorithm: result.algorithm,
746            time_us: result.time_us,
747        };
748
749        serde_json::to_string(&output).map_err(|e| {
750            Error::new(Status::GenericFailure, format!("Serialization error: {}", e))
751        })
752    }
753
754    /// Compute PageRank scores for a directed graph asynchronously.
755    ///
756    /// Implements the power iteration method:
757    ///   r_{k+1} = d * A^T * D^{-1} * r_k + (1 - d) * p
758    /// where d is the damping factor, D is the out-degree diagonal, and p
759    /// is the personalization vector.
760    ///
761    /// # Example
762    /// ```javascript
763    /// // Simple 3-node graph: 0->1, 1->2, 2->0
764    /// const result = await solver.pagerank({
765    ///   values: [1, 1, 1],
766    ///   colIndices: [1, 2, 0],
767    ///   rowPtrs: [0, 1, 2, 3],
768    ///   numNodes: 3,
769    ///   damping: 0.85,
770    /// });
771    /// console.log('PageRank:', result.scores);
772    /// ```
773    #[napi]
774    pub async fn pagerank(&self, config: PageRankConfig) -> Result<PageRankResult> {
775        let damping = config.damping.unwrap_or(0.85);
776        let tolerance = config.tolerance.unwrap_or(1e-6);
777        let max_iterations = config.max_iterations.map(|m| m as usize).unwrap_or(100);
778        let num_nodes = config.num_nodes as usize;
779
780        if damping < 0.0 || damping > 1.0 {
781            return Err(Error::new(
782                Status::InvalidArg,
783                format!("Damping factor must be in [0, 1], got {}", damping),
784            ));
785        }
786
787        validate_csr_input(
788            &config.values,
789            &config.col_indices,
790            &config.row_ptrs,
791            num_nodes,
792            num_nodes,
793        )?;
794
795        let values: Vec<f64> = config.values;
796        let col_indices: Vec<usize> = config.col_indices.iter().map(|&c| c as usize).collect();
797        let row_ptrs: Vec<usize> = config.row_ptrs.iter().map(|&p| p as usize).collect();
798        let personalization = config.personalization;
799
800        if let Some(ref pv) = personalization {
801            if pv.len() != num_nodes {
802                return Err(Error::new(
803                    Status::InvalidArg,
804                    format!(
805                        "personalization length {} does not match num_nodes = {}",
806                        pv.len(),
807                        num_nodes
808                    ),
809                ));
810            }
811        }
812
813        let result = tokio::task::spawn_blocking(move || {
814            let start = Instant::now();
815
816            let p = personalization
817                .unwrap_or_else(|| vec![1.0 / num_nodes as f64; num_nodes]);
818
819            // Compute out-degrees for row-stochastic normalization.
820            let mut out_degree = vec![0.0f64; num_nodes];
821            for i in 0..num_nodes {
822                for idx in row_ptrs[i]..row_ptrs[i + 1] {
823                    out_degree[i] += values[idx];
824                }
825            }
826
827            let mut rank = vec![1.0 / num_nodes as f64; num_nodes];
828            let mut new_rank = vec![0.0f64; num_nodes];
829
830            let mut converged = false;
831            let mut final_residual = f64::MAX;
832            let mut iters = 0;
833
834            for iter in 0..max_iterations {
835                for i in 0..num_nodes {
836                    new_rank[i] = (1.0 - damping) * p[i];
837                }
838
839                let mut dangling_sum = 0.0f64;
840                for i in 0..num_nodes {
841                    let s = row_ptrs[i];
842                    let e = row_ptrs[i + 1];
843                    if s == e || out_degree[i].abs() < 1e-15 {
844                        dangling_sum += rank[i];
845                    } else {
846                        let contribution = rank[i] / out_degree[i];
847                        for idx in s..e {
848                            new_rank[col_indices[idx]] += damping * values[idx] * contribution;
849                        }
850                    }
851                }
852
853                if dangling_sum > 0.0 {
854                    let dangling_contrib = damping * dangling_sum / num_nodes as f64;
855                    for i in 0..num_nodes {
856                        new_rank[i] += dangling_contrib;
857                    }
858                }
859
860                let mut diff = 0.0f64;
861                for i in 0..num_nodes {
862                    diff += (new_rank[i] - rank[i]).abs();
863                }
864
865                std::mem::swap(&mut rank, &mut new_rank);
866                final_residual = diff;
867                iters = iter + 1;
868
869                if diff < tolerance {
870                    converged = true;
871                    break;
872                }
873            }
874
875            let elapsed_us = start.elapsed().as_micros().min(u32::MAX as u128) as u32;
876
877            PageRankResult {
878                scores: rank,
879                iterations: iters as u32,
880                residual: final_residual,
881                converged,
882                time_us: elapsed_us,
883            }
884        })
885        .await
886        .map_err(|e| Error::from_reason(format!("PageRank task failed: {}", e)))?;
887
888        Ok(result)
889    }
890
891    /// Estimate computational complexity for a given problem size.
892    ///
893    /// This is a synchronous method since the estimation is O(1).
894    ///
895    /// # Example
896    /// ```javascript
897    /// const estimate = solver.estimateComplexity({
898    ///   rows: 10000,
899    ///   nnz: 50000,
900    ///   algorithm: 'jacobi',
901    /// });
902    /// console.log('Complexity:', estimate.complexityClass);
903    /// console.log('Recommended:', estimate.recommendedAlgorithm);
904    /// ```
905    #[napi]
906    pub fn estimate_complexity(&self, config: ComplexityConfig) -> Result<ComplexityResult> {
907        let n = config.rows as f64;
908        let nnz = config.nnz as f64;
909        let sparsity = if n * n > 0.0 { nnz / (n * n) } else { 0.0 };
910
911        let algo_name = config.algorithm.as_deref().unwrap_or("auto");
912
913        let recommended = if n < 100.0 {
914            "gauss-seidel"
915        } else if sparsity < 0.01 && n > 10000.0 {
916            "conjugate-gradient"
917        } else if sparsity < 0.05 {
918            "neumann"
919        } else {
920            "jacobi"
921        };
922
923        let (complexity_class, estimated_flops) = match algo_name {
924            "neumann" | "neumann-series" => {
925                let k = 50.0;
926                ("O(k * nnz)".to_string(), k * nnz)
927            }
928            "jacobi" => {
929                let k = n.sqrt().max(10.0);
930                ("O(sqrt(n) * nnz)".to_string(), k * nnz)
931            }
932            "gauss-seidel" | "gs" => {
933                let k = (n.sqrt() / 2.0).max(5.0);
934                ("O(sqrt(n) * nnz)".to_string(), k * nnz)
935            }
936            "conjugate-gradient" | "cg" => {
937                let cond_est = n.sqrt();
938                ("O(sqrt(kappa) * nnz)".to_string(), cond_est * nnz)
939            }
940            _ => {
941                let k = n.sqrt().max(10.0);
942                ("O(sqrt(n) * nnz)".to_string(), k * nnz)
943            }
944        };
945
946        let estimated_time_us = estimated_flops / 1000.0;
947
948        Ok(ComplexityResult {
949            complexity_class,
950            estimated_flops,
951            recommended_algorithm: recommended.to_string(),
952            estimated_time_us,
953            sparsity,
954        })
955    }
956
957    /// Solve with full convergence history returned.
958    ///
959    /// Identical to `solve` but also returns per-iteration residual data
960    /// for convergence analysis and visualization.
961    ///
962    /// # Example
963    /// ```javascript
964    /// const result = await solver.solveWithHistory({
965    ///   values: [4, -1, -1, 4],
966    ///   colIndices: [0, 1, 0, 1],
967    ///   rowPtrs: [0, 2, 4],
968    ///   rows: 2, cols: 2,
969    ///   rhs: [1, 1],
970    /// });
971    /// result.convergenceHistory.forEach(entry => {
972    ///   console.log(`Iter ${entry.iteration}: residual = ${entry.residual}`);
973    /// });
974    /// ```
975    #[napi]
976    pub async fn solve_with_history(&self, config: SolveConfig) -> Result<SolveWithHistoryResult> {
977        let tolerance = config.tolerance.unwrap_or(self.default_tolerance);
978        let max_iterations = config
979            .max_iterations
980            .map(|m| m as usize)
981            .unwrap_or(self.default_max_iterations);
982        let algo = parse_algorithm(config.algorithm.as_deref().unwrap_or("jacobi"))?;
983        let algo_name = algo.to_string();
984
985        let rows = config.rows as usize;
986        let cols = config.cols as usize;
987        validate_csr_input(&config.values, &config.col_indices, &config.row_ptrs, rows, cols)?;
988
989        if config.rhs.len() != rows {
990            return Err(Error::new(
991                Status::InvalidArg,
992                format!("rhs length {} does not match rows = {}", config.rhs.len(), rows),
993            ));
994        }
995
996        let values = config.values;
997        let col_indices: Vec<usize> = config.col_indices.iter().map(|&c| c as usize).collect();
998        let row_ptrs: Vec<usize> = config.row_ptrs.iter().map(|&p| p as usize).collect();
999        let rhs = config.rhs;
1000
1001        let result = tokio::task::spawn_blocking(move || {
1002            let start = Instant::now();
1003
1004            let (solution, iterations, residual, converged, history) =
1005                dispatch_solver(algo, &row_ptrs, &col_indices, &values, &rhs, rows, tolerance, max_iterations);
1006
1007            let elapsed_us = start.elapsed().as_micros().min(u32::MAX as u128) as u32;
1008
1009            let convergence_history: Vec<ConvergenceEntry> = history
1010                .into_iter()
1011                .map(|(iter, res)| ConvergenceEntry {
1012                    iteration: iter as u32,
1013                    residual: res,
1014                })
1015                .collect();
1016
1017            SolveWithHistoryResult {
1018                solution,
1019                iterations: iterations as u32,
1020                residual,
1021                converged,
1022                algorithm: algo_name,
1023                time_us: elapsed_us,
1024                convergence_history,
1025            }
1026        })
1027        .await
1028        .map_err(|e| Error::from_reason(format!("Solver task failed: {}", e)))?;
1029
1030        Ok(result)
1031    }
1032}
1033
1034// ---------------------------------------------------------------------------
1035// Serde types for JSON solve interface
1036// ---------------------------------------------------------------------------
1037
1038#[derive(serde::Deserialize)]
1039struct SolveJsonInput {
1040    values: Vec<f64>,
1041    col_indices: Vec<u32>,
1042    row_ptrs: Vec<u32>,
1043    rows: u32,
1044    cols: u32,
1045    rhs: Vec<f64>,
1046    tolerance: Option<f64>,
1047    max_iterations: Option<u32>,
1048    algorithm: Option<String>,
1049}
1050
1051#[derive(serde::Serialize)]
1052struct SolveJsonOutput {
1053    solution: Vec<f64>,
1054    iterations: u32,
1055    residual: f64,
1056    converged: bool,
1057    algorithm: String,
1058    time_us: u32,
1059}
1060
1061// ---------------------------------------------------------------------------
1062// Standalone functions
1063// ---------------------------------------------------------------------------
1064
1065/// Get the library version.
1066#[napi]
1067pub fn version() -> String {
1068    env!("CARGO_PKG_VERSION").to_string()
1069}
1070
1071/// Get library information.
1072#[napi]
1073pub fn info() -> LibraryInfo {
1074    LibraryInfo {
1075        name: "ruvector-solver-node".to_string(),
1076        version: env!("CARGO_PKG_VERSION").to_string(),
1077        description: "Sublinear-time sparse linear solver for Node.js".to_string(),
1078        algorithms: vec![
1079            "neumann".to_string(),
1080            "jacobi".to_string(),
1081            "gauss-seidel".to_string(),
1082            "conjugate-gradient".to_string(),
1083        ],
1084        features: vec![
1085            "async-solve".to_string(),
1086            "json-interface".to_string(),
1087            "pagerank".to_string(),
1088            "complexity-estimation".to_string(),
1089            "convergence-history".to_string(),
1090        ],
1091    }
1092}
1093
1094/// Library information.
1095#[napi(object)]
1096pub struct LibraryInfo {
1097    pub name: String,
1098    pub version: String,
1099    pub description: String,
1100    pub algorithms: Vec<String>,
1101    pub features: Vec<String>,
1102}
1103
1104/// List available solver algorithms.
1105#[napi]
1106pub fn available_algorithms() -> Vec<String> {
1107    vec![
1108        "neumann".to_string(),
1109        "jacobi".to_string(),
1110        "gauss-seidel".to_string(),
1111        "conjugate-gradient".to_string(),
1112    ]
1113}