Skip to main content

ruvector_solver_node/
lib.rs

1//! Node.js NAPI bindings for the RuVector sublinear-time solver.
2//!
3//! Provides high-performance sparse linear system solving, PageRank
4//! computation, and complexity estimation for Node.js applications.
5//!
6//! All heavy computation runs on worker threads via `tokio::task::spawn_blocking`
7//! to avoid blocking the Node.js event loop.
8
9#![deny(clippy::all)]
10
11use napi::bindgen_prelude::*;
12use napi_derive::napi;
13use ruvector_solver::types::Algorithm;
14use std::time::Instant;
15
16// ---------------------------------------------------------------------------
17// Configuration types (NAPI objects)
18// ---------------------------------------------------------------------------
19
20/// Configuration for solving a sparse linear system Ax = b.
21#[napi(object)]
22pub struct SolveConfig {
23    /// Non-zero values in CSR format.
24    pub values: Vec<f64>,
25    /// Column indices for each non-zero entry.
26    pub col_indices: Vec<u32>,
27    /// Row pointers (length = rows + 1).
28    pub row_ptrs: Vec<u32>,
29    /// Number of rows in the matrix.
30    pub rows: u32,
31    /// Number of columns in the matrix.
32    pub cols: u32,
33    /// Right-hand side vector b.
34    pub rhs: Vec<f64>,
35    /// Convergence tolerance (default: 1e-6).
36    pub tolerance: Option<f64>,
37    /// Maximum number of iterations (default: 1000).
38    pub max_iterations: Option<u32>,
39    /// Algorithm to use: "neumann", "jacobi", "gauss-seidel", "conjugate-gradient".
40    /// Defaults to "jacobi".
41    pub algorithm: Option<String>,
42}
43
44/// Result of solving a sparse linear system.
45#[napi(object)]
46pub struct SolveResult {
47    /// Solution vector x.
48    pub solution: Vec<f64>,
49    /// Number of iterations performed.
50    pub iterations: u32,
51    /// Final residual norm ||Ax - b||.
52    pub residual: f64,
53    /// Whether the solver converged within tolerance.
54    pub converged: bool,
55    /// Algorithm that was used.
56    pub algorithm: String,
57    /// Wall-clock time in microseconds.
58    pub time_us: u32,
59}
60
61/// Configuration for PageRank computation.
62#[napi(object)]
63pub struct PageRankConfig {
64    /// Non-zero values in CSR format (edge weights).
65    pub values: Vec<f64>,
66    /// Column indices for each non-zero entry.
67    pub col_indices: Vec<u32>,
68    /// Row pointers (length = rows + 1).
69    pub row_ptrs: Vec<u32>,
70    /// Number of nodes in the graph.
71    pub num_nodes: u32,
72    /// Damping factor (default: 0.85).
73    pub damping: Option<f64>,
74    /// Convergence tolerance (default: 1e-6).
75    pub tolerance: Option<f64>,
76    /// Maximum number of iterations (default: 100).
77    pub max_iterations: Option<u32>,
78    /// Personalization vector (uniform if omitted).
79    pub personalization: Option<Vec<f64>>,
80}
81
82/// Result of PageRank computation.
83#[napi(object)]
84pub struct PageRankResult {
85    /// PageRank scores for each node (sums to 1.0).
86    pub scores: Vec<f64>,
87    /// Number of iterations performed.
88    pub iterations: u32,
89    /// Final convergence residual.
90    pub residual: f64,
91    /// Whether the computation converged.
92    pub converged: bool,
93    /// Wall-clock time in microseconds.
94    pub time_us: u32,
95}
96
97/// Configuration for complexity estimation.
98#[napi(object)]
99pub struct ComplexityConfig {
100    /// Number of rows in the matrix.
101    pub rows: u32,
102    /// Number of non-zero entries.
103    pub nnz: u32,
104    /// Algorithm to estimate for.
105    pub algorithm: Option<String>,
106}
107
108/// Result of complexity estimation.
109#[napi(object)]
110pub struct ComplexityResult {
111    /// Estimated time complexity class (e.g. "O(n log n)").
112    pub complexity_class: String,
113    /// Estimated number of floating-point operations.
114    pub estimated_flops: f64,
115    /// Recommended algorithm for this problem size.
116    pub recommended_algorithm: String,
117    /// Estimated wall-clock time in microseconds.
118    pub estimated_time_us: f64,
119    /// Sparsity ratio (nnz / n^2).
120    pub sparsity: f64,
121}
122
123/// Convergence history entry.
124#[napi(object)]
125pub struct ConvergenceEntry {
126    /// Iteration index (0-based).
127    pub iteration: u32,
128    /// Residual norm at this iteration.
129    pub residual: f64,
130}
131
132/// Result of solving with convergence history.
133#[napi(object)]
134pub struct SolveWithHistoryResult {
135    /// Solution vector x.
136    pub solution: Vec<f64>,
137    /// Number of iterations performed.
138    pub iterations: u32,
139    /// Final residual norm.
140    pub residual: f64,
141    /// Whether the solver converged.
142    pub converged: bool,
143    /// Algorithm used.
144    pub algorithm: String,
145    /// Wall-clock time in microseconds.
146    pub time_us: u32,
147    /// Per-iteration convergence history.
148    pub convergence_history: Vec<ConvergenceEntry>,
149}
150
151// ---------------------------------------------------------------------------
152// Internal helpers
153// ---------------------------------------------------------------------------
154
155/// Parse an algorithm name string into the core Algorithm enum.
156fn parse_algorithm(name: &str) -> Result<Algorithm> {
157    match name.to_lowercase().as_str() {
158        "neumann" | "neumann-series" => Ok(Algorithm::Neumann),
159        "jacobi" => Ok(Algorithm::Jacobi),
160        "gauss-seidel" | "gaussseidel" | "gs" => Ok(Algorithm::GaussSeidel),
161        "forward-push" | "forwardpush" => Ok(Algorithm::ForwardPush),
162        "backward-push" | "backwardpush" => Ok(Algorithm::BackwardPush),
163        "conjugate-gradient" | "cg" => Ok(Algorithm::CG),
164        other => Err(Error::new(
165            Status::InvalidArg,
166            format!(
167                "Unknown algorithm '{}'. Expected one of: neumann, jacobi, \
168                 gauss-seidel, conjugate-gradient, forward-push, backward-push",
169                other
170            ),
171        )),
172    }
173}
174
175/// Validate CSR input dimensions for consistency.
176fn validate_csr_input(
177    values: &[f64],
178    col_indices: &[u32],
179    row_ptrs: &[u32],
180    rows: usize,
181    cols: usize,
182) -> Result<()> {
183    if row_ptrs.len() != rows + 1 {
184        return Err(Error::new(
185            Status::InvalidArg,
186            format!(
187                "row_ptrs length {} does not equal rows + 1 = {}",
188                row_ptrs.len(),
189                rows + 1
190            ),
191        ));
192    }
193
194    if values.len() != col_indices.len() {
195        return Err(Error::new(
196            Status::InvalidArg,
197            format!(
198                "values length {} does not match col_indices length {}",
199                values.len(),
200                col_indices.len()
201            ),
202        ));
203    }
204
205    let expected_nnz = row_ptrs[rows] as usize;
206    if values.len() != expected_nnz {
207        return Err(Error::new(
208            Status::InvalidArg,
209            format!(
210                "values length {} does not match row_ptrs[rows] = {}",
211                values.len(),
212                expected_nnz
213            ),
214        ));
215    }
216
217    // Validate monotonicity of row_ptrs.
218    for i in 1..row_ptrs.len() {
219        if row_ptrs[i] < row_ptrs[i - 1] {
220            return Err(Error::new(
221                Status::InvalidArg,
222                format!(
223                    "row_ptrs is not monotonically non-decreasing at position {}",
224                    i
225                ),
226            ));
227        }
228    }
229
230    // Validate column indices and value finiteness.
231    for (idx, (&col, &val)) in col_indices.iter().zip(values.iter()).enumerate() {
232        if col as usize >= cols {
233            return Err(Error::new(
234                Status::InvalidArg,
235                format!(
236                    "column index {} at position {} is out of bounds (cols={})",
237                    col, idx, cols
238                ),
239            ));
240        }
241        if !val.is_finite() {
242            return Err(Error::new(
243                Status::InvalidArg,
244                format!("non-finite value {} at position {}", val, idx),
245            ));
246        }
247    }
248
249    Ok(())
250}
251
252/// Sparse matrix-vector multiply y = A*x using CSR arrays.
253fn spmv_f64(
254    row_ptrs: &[usize],
255    col_indices: &[usize],
256    values: &[f64],
257    x: &[f64],
258    y: &mut [f64],
259    rows: usize,
260) {
261    for i in 0..rows {
262        let start = row_ptrs[i];
263        let end = row_ptrs[i + 1];
264        let mut sum = 0.0f64;
265        for idx in start..end {
266            sum += values[idx] * x[col_indices[idx]];
267        }
268        y[i] = sum;
269    }
270}
271
272/// Compute L2 norm of the residual r = b - A*x.
273fn residual_norm(
274    row_ptrs: &[usize],
275    col_indices: &[usize],
276    values: &[f64],
277    x: &[f64],
278    b: &[f64],
279    rows: usize,
280) -> f64 {
281    let mut norm_sq = 0.0f64;
282    for i in 0..rows {
283        let start = row_ptrs[i];
284        let end = row_ptrs[i + 1];
285        let mut ax_i = 0.0f64;
286        for idx in start..end {
287            ax_i += values[idx] * x[col_indices[idx]];
288        }
289        let r = b[i] - ax_i;
290        norm_sq += r * r;
291    }
292    norm_sq.sqrt()
293}
294
295/// Extract the diagonal entries of a CSR matrix.
296///
297/// Returns `None` if any diagonal entry is zero (or missing).
298fn extract_diagonal(
299    row_ptrs: &[usize],
300    col_indices: &[usize],
301    values: &[f64],
302    rows: usize,
303) -> Option<Vec<f64>> {
304    let mut diag = vec![0.0f64; rows];
305    for i in 0..rows {
306        let start = row_ptrs[i];
307        let end = row_ptrs[i + 1];
308        let mut found = false;
309        for idx in start..end {
310            if col_indices[idx] == i {
311                diag[i] = values[idx];
312                found = true;
313                break;
314            }
315        }
316        if !found || diag[i].abs() < 1e-15 {
317            return None;
318        }
319    }
320    Some(diag)
321}
322
323/// Jacobi iterative solver for Ax = b.
324///
325/// Requires the diagonal of A to be non-zero. Iterates:
326///   x_{k+1}[i] = (b[i] - sum_{j!=i} a_{ij} * x_k[j]) / a_{ii}
327fn solve_jacobi(
328    row_ptrs: &[usize],
329    col_indices: &[usize],
330    values: &[f64],
331    rhs: &[f64],
332    rows: usize,
333    tolerance: f64,
334    max_iterations: usize,
335) -> (Vec<f64>, usize, f64, bool, Vec<(usize, f64)>) {
336    let mut x = vec![0.0f64; rows];
337    let mut x_new = vec![0.0f64; rows];
338    let mut history = Vec::new();
339
340    let diag = match extract_diagonal(row_ptrs, col_indices, values, rows) {
341        Some(d) => d,
342        None => {
343            let res = residual_norm(row_ptrs, col_indices, values, &x, rhs, rows);
344            history.push((0, res));
345            return (x, 0, res, false, history);
346        }
347    };
348
349    let mut converged = false;
350    let mut final_residual = f64::MAX;
351    let mut iters = 0;
352
353    for iter in 0..max_iterations {
354        for i in 0..rows {
355            let start = row_ptrs[i];
356            let end = row_ptrs[i + 1];
357            let mut sigma = 0.0f64;
358            for idx in start..end {
359                let j = col_indices[idx];
360                if j != i {
361                    sigma += values[idx] * x[j];
362                }
363            }
364            x_new[i] = (rhs[i] - sigma) / diag[i];
365        }
366
367        std::mem::swap(&mut x, &mut x_new);
368
369        let res = residual_norm(row_ptrs, col_indices, values, &x, rhs, rows);
370        history.push((iter, res));
371        final_residual = res;
372        iters = iter + 1;
373
374        if res < tolerance {
375            converged = true;
376            break;
377        }
378    }
379
380    (x, iters, final_residual, converged, history)
381}
382
383/// Gauss-Seidel iterative solver for Ax = b.
384///
385/// Updates x in-place within each iteration using the most recent values.
386/// Generally converges faster than Jacobi for the same problem.
387fn solve_gauss_seidel(
388    row_ptrs: &[usize],
389    col_indices: &[usize],
390    values: &[f64],
391    rhs: &[f64],
392    rows: usize,
393    tolerance: f64,
394    max_iterations: usize,
395) -> (Vec<f64>, usize, f64, bool, Vec<(usize, f64)>) {
396    let mut x = vec![0.0f64; rows];
397    let mut history = Vec::new();
398
399    let diag = match extract_diagonal(row_ptrs, col_indices, values, rows) {
400        Some(d) => d,
401        None => {
402            let res = residual_norm(row_ptrs, col_indices, values, &x, rhs, rows);
403            history.push((0, res));
404            return (x, 0, res, false, history);
405        }
406    };
407
408    let mut converged = false;
409    let mut final_residual = f64::MAX;
410    let mut iters = 0;
411
412    for iter in 0..max_iterations {
413        for i in 0..rows {
414            let start = row_ptrs[i];
415            let end = row_ptrs[i + 1];
416            let mut sigma = 0.0f64;
417            for idx in start..end {
418                let j = col_indices[idx];
419                if j != i {
420                    sigma += values[idx] * x[j];
421                }
422            }
423            x[i] = (rhs[i] - sigma) / diag[i];
424        }
425
426        let res = residual_norm(row_ptrs, col_indices, values, &x, rhs, rows);
427        history.push((iter, res));
428        final_residual = res;
429        iters = iter + 1;
430
431        if res < tolerance {
432            converged = true;
433            break;
434        }
435    }
436
437    (x, iters, final_residual, converged, history)
438}
439
440/// Neumann series solver: x = sum_{k=0}^{K} (I - A)^k * b.
441///
442/// Converges when the spectral radius of (I - A) is less than 1.
443fn solve_neumann(
444    row_ptrs: &[usize],
445    col_indices: &[usize],
446    values: &[f64],
447    rhs: &[f64],
448    rows: usize,
449    tolerance: f64,
450    max_iterations: usize,
451) -> (Vec<f64>, usize, f64, bool, Vec<(usize, f64)>) {
452    let mut x = vec![0.0f64; rows];
453    let mut term = rhs.to_vec();
454    let mut temp = vec![0.0f64; rows];
455    let mut history = Vec::new();
456
457    let mut converged = false;
458    let mut final_residual = f64::MAX;
459    let mut iters = 0;
460
461    for iter in 0..max_iterations {
462        // Accumulate current term into x.
463        for i in 0..rows {
464            x[i] += term[i];
465        }
466
467        // Compute next term: term_{k+1} = (I - A) * term_k
468        spmv_f64(row_ptrs, col_indices, values, &term, &mut temp, rows);
469        for i in 0..rows {
470            temp[i] = term[i] - temp[i];
471        }
472        std::mem::swap(&mut term, &mut temp);
473
474        let term_norm: f64 = term.iter().map(|&t| t * t).sum::<f64>().sqrt();
475        let res = residual_norm(row_ptrs, col_indices, values, &x, rhs, rows);
476        history.push((iter, res));
477        final_residual = res;
478        iters = iter + 1;
479
480        if res < tolerance || term_norm < tolerance * 1e-2 {
481            converged = true;
482            break;
483        }
484
485        // Divergence detection.
486        if !term_norm.is_finite() {
487            break;
488        }
489    }
490
491    (x, iters, final_residual, converged, history)
492}
493
494/// Conjugate gradient solver for symmetric positive-definite Ax = b.
495fn solve_cg(
496    row_ptrs: &[usize],
497    col_indices: &[usize],
498    values: &[f64],
499    rhs: &[f64],
500    rows: usize,
501    tolerance: f64,
502    max_iterations: usize,
503) -> (Vec<f64>, usize, f64, bool, Vec<(usize, f64)>) {
504    let mut x = vec![0.0f64; rows];
505    let mut history = Vec::new();
506
507    // r = b - A*x (initially r = b since x = 0).
508    let mut r = rhs.to_vec();
509    let mut p = r.clone();
510    let mut ap = vec![0.0f64; rows];
511
512    let mut rs_old: f64 = r.iter().map(|&v| v * v).sum();
513    let tol_sq = tolerance * tolerance;
514
515    let mut converged = false;
516    let mut final_residual = rs_old.sqrt();
517    let mut iters = 0;
518
519    for iter in 0..max_iterations {
520        spmv_f64(row_ptrs, col_indices, values, &p, &mut ap, rows);
521
522        let p_ap: f64 = p.iter().zip(ap.iter()).map(|(&a, &b)| a * b).sum();
523        if p_ap.abs() < 1e-30 {
524            break;
525        }
526        let alpha = rs_old / p_ap;
527
528        for i in 0..rows {
529            x[i] += alpha * p[i];
530        }
531
532        for i in 0..rows {
533            r[i] -= alpha * ap[i];
534        }
535
536        let rs_new: f64 = r.iter().map(|&v| v * v).sum();
537        final_residual = rs_new.sqrt();
538        history.push((iter, final_residual));
539        iters = iter + 1;
540
541        if rs_new < tol_sq {
542            converged = true;
543            break;
544        }
545
546        let beta = rs_new / rs_old;
547        for i in 0..rows {
548            p[i] = r[i] + beta * p[i];
549        }
550
551        rs_old = rs_new;
552    }
553
554    (x, iters, final_residual, converged, history)
555}
556
557/// Dispatch to the appropriate solver based on algorithm selection.
558fn dispatch_solver(
559    algo: Algorithm,
560    row_ptrs: &[usize],
561    col_indices: &[usize],
562    values: &[f64],
563    rhs: &[f64],
564    rows: usize,
565    tolerance: f64,
566    max_iterations: usize,
567) -> (Vec<f64>, usize, f64, bool, Vec<(usize, f64)>) {
568    match algo {
569        Algorithm::Jacobi => solve_jacobi(
570            row_ptrs,
571            col_indices,
572            values,
573            rhs,
574            rows,
575            tolerance,
576            max_iterations,
577        ),
578        Algorithm::GaussSeidel => solve_gauss_seidel(
579            row_ptrs,
580            col_indices,
581            values,
582            rhs,
583            rows,
584            tolerance,
585            max_iterations,
586        ),
587        Algorithm::Neumann => solve_neumann(
588            row_ptrs,
589            col_indices,
590            values,
591            rhs,
592            rows,
593            tolerance,
594            max_iterations,
595        ),
596        Algorithm::CG => solve_cg(
597            row_ptrs,
598            col_indices,
599            values,
600            rhs,
601            rows,
602            tolerance,
603            max_iterations,
604        ),
605        // Forward/backward push are graph algorithms, not general linear solvers.
606        // Fall back to Jacobi.
607        _ => solve_jacobi(
608            row_ptrs,
609            col_indices,
610            values,
611            rhs,
612            rows,
613            tolerance,
614            max_iterations,
615        ),
616    }
617}
618
619// ---------------------------------------------------------------------------
620// NapiSolver
621// ---------------------------------------------------------------------------
622
623/// High-performance sparse linear solver with automatic algorithm selection.
624///
625/// Provides async methods for solving Ax = b, computing PageRank, and
626/// estimating computational complexity. All heavy computation runs on
627/// worker threads.
628///
629/// # Example
630/// ```javascript
631/// const { NapiSolver } = require('@ruvector/solver');
632///
633/// const solver = new NapiSolver();
634/// const result = await solver.solve({
635///   values: [4, -1, -1, 4, -1, -1, 4],
636///   colIndices: [0, 1, 0, 1, 2, 1, 2],
637///   rowPtrs: [0, 2, 5, 7],
638///   rows: 3, cols: 3,
639///   rhs: [1, 0, 1],
640/// });
641/// console.log('Solution:', result.solution);
642/// console.log('Converged:', result.converged);
643/// ```
644#[napi]
645pub struct NapiSolver {
646    default_tolerance: f64,
647    default_max_iterations: usize,
648}
649
650#[napi]
651impl NapiSolver {
652    /// Create a new solver instance with default settings.
653    #[napi(constructor)]
654    pub fn new() -> Self {
655        Self {
656            default_tolerance: 1e-6,
657            default_max_iterations: 1000,
658        }
659    }
660
661    /// Solve a sparse linear system Ax = b asynchronously.
662    ///
663    /// Runs the computation on a worker thread to avoid blocking the
664    /// Node.js event loop.
665    ///
666    /// # Arguments
667    /// * `config` - Solver configuration including the CSR matrix, RHS vector,
668    ///   tolerance, max iterations, and algorithm selection.
669    ///
670    /// # Returns
671    /// A `SolveResult` containing the solution vector, convergence info,
672    /// and timing data.
673    ///
674    /// # Example
675    /// ```javascript
676    /// const result = await solver.solve({
677    ///   values: [2, -1, -1, 2, -1, -1, 2],
678    ///   colIndices: [0, 1, 0, 1, 2, 1, 2],
679    ///   rowPtrs: [0, 2, 5, 7],
680    ///   rows: 3, cols: 3,
681    ///   rhs: [1, 0, 1],
682    ///   tolerance: 1e-8,
683    ///   algorithm: 'jacobi',
684    /// });
685    /// ```
686    #[napi]
687    pub async fn solve(&self, config: SolveConfig) -> Result<SolveResult> {
688        let tolerance = config.tolerance.unwrap_or(self.default_tolerance);
689        let max_iterations = config
690            .max_iterations
691            .map(|m| m as usize)
692            .unwrap_or(self.default_max_iterations);
693        let algo = parse_algorithm(config.algorithm.as_deref().unwrap_or("jacobi"))?;
694        let algo_name = algo.to_string();
695
696        let rows = config.rows as usize;
697        let cols = config.cols as usize;
698        validate_csr_input(
699            &config.values,
700            &config.col_indices,
701            &config.row_ptrs,
702            rows,
703            cols,
704        )?;
705
706        if config.rhs.len() != rows {
707            return Err(Error::new(
708                Status::InvalidArg,
709                format!(
710                    "rhs length {} does not match rows = {}",
711                    config.rhs.len(),
712                    rows
713                ),
714            ));
715        }
716
717        let values = config.values;
718        let col_indices: Vec<usize> = config.col_indices.iter().map(|&c| c as usize).collect();
719        let row_ptrs: Vec<usize> = config.row_ptrs.iter().map(|&p| p as usize).collect();
720        let rhs = config.rhs;
721
722        let result = tokio::task::spawn_blocking(move || {
723            let start = Instant::now();
724
725            let (solution, iterations, residual, converged, _history) = dispatch_solver(
726                algo,
727                &row_ptrs,
728                &col_indices,
729                &values,
730                &rhs,
731                rows,
732                tolerance,
733                max_iterations,
734            );
735
736            let elapsed_us = start.elapsed().as_micros().min(u32::MAX as u128) as u32;
737
738            SolveResult {
739                solution,
740                iterations: iterations as u32,
741                residual,
742                converged,
743                algorithm: algo_name,
744                time_us: elapsed_us,
745            }
746        })
747        .await
748        .map_err(|e| Error::from_reason(format!("Solver task failed: {}", e)))?;
749
750        Ok(result)
751    }
752
753    /// Solve a sparse linear system from JSON input.
754    ///
755    /// Accepts a JSON string with the same fields as `SolveConfig` and
756    /// returns a JSON string with the `SolveResult` fields.
757    ///
758    /// # Example
759    /// ```javascript
760    /// const input = JSON.stringify({
761    ///   values: [2, -1, -1, 2],
762    ///   col_indices: [0, 1, 0, 1],
763    ///   row_ptrs: [0, 2, 4],
764    ///   rows: 2, cols: 2,
765    ///   rhs: [1, 1],
766    /// });
767    /// const output = await solver.solveJson(input);
768    /// const result = JSON.parse(output);
769    /// ```
770    #[napi]
771    pub async fn solve_json(&self, json: String) -> Result<String> {
772        let input: SolveJsonInput = serde_json::from_str(&json)
773            .map_err(|e| Error::new(Status::InvalidArg, format!("Invalid JSON input: {}", e)))?;
774
775        let config = SolveConfig {
776            values: input.values,
777            col_indices: input.col_indices,
778            row_ptrs: input.row_ptrs,
779            rows: input.rows,
780            cols: input.cols,
781            rhs: input.rhs,
782            tolerance: input.tolerance,
783            max_iterations: input.max_iterations,
784            algorithm: input.algorithm,
785        };
786
787        let result = self.solve(config).await?;
788
789        let output = SolveJsonOutput {
790            solution: result.solution,
791            iterations: result.iterations,
792            residual: result.residual,
793            converged: result.converged,
794            algorithm: result.algorithm,
795            time_us: result.time_us,
796        };
797
798        serde_json::to_string(&output).map_err(|e| {
799            Error::new(
800                Status::GenericFailure,
801                format!("Serialization error: {}", e),
802            )
803        })
804    }
805
806    /// Compute PageRank scores for a directed graph asynchronously.
807    ///
808    /// Implements the power iteration method:
809    ///   r_{k+1} = d * A^T * D^{-1} * r_k + (1 - d) * p
810    /// where d is the damping factor, D is the out-degree diagonal, and p
811    /// is the personalization vector.
812    ///
813    /// # Example
814    /// ```javascript
815    /// // Simple 3-node graph: 0->1, 1->2, 2->0
816    /// const result = await solver.pagerank({
817    ///   values: [1, 1, 1],
818    ///   colIndices: [1, 2, 0],
819    ///   rowPtrs: [0, 1, 2, 3],
820    ///   numNodes: 3,
821    ///   damping: 0.85,
822    /// });
823    /// console.log('PageRank:', result.scores);
824    /// ```
825    #[napi]
826    pub async fn pagerank(&self, config: PageRankConfig) -> Result<PageRankResult> {
827        let damping = config.damping.unwrap_or(0.85);
828        let tolerance = config.tolerance.unwrap_or(1e-6);
829        let max_iterations = config.max_iterations.map(|m| m as usize).unwrap_or(100);
830        let num_nodes = config.num_nodes as usize;
831
832        if damping < 0.0 || damping > 1.0 {
833            return Err(Error::new(
834                Status::InvalidArg,
835                format!("Damping factor must be in [0, 1], got {}", damping),
836            ));
837        }
838
839        validate_csr_input(
840            &config.values,
841            &config.col_indices,
842            &config.row_ptrs,
843            num_nodes,
844            num_nodes,
845        )?;
846
847        let values: Vec<f64> = config.values;
848        let col_indices: Vec<usize> = config.col_indices.iter().map(|&c| c as usize).collect();
849        let row_ptrs: Vec<usize> = config.row_ptrs.iter().map(|&p| p as usize).collect();
850        let personalization = config.personalization;
851
852        if let Some(ref pv) = personalization {
853            if pv.len() != num_nodes {
854                return Err(Error::new(
855                    Status::InvalidArg,
856                    format!(
857                        "personalization length {} does not match num_nodes = {}",
858                        pv.len(),
859                        num_nodes
860                    ),
861                ));
862            }
863        }
864
865        let result = tokio::task::spawn_blocking(move || {
866            let start = Instant::now();
867
868            let p = personalization.unwrap_or_else(|| vec![1.0 / num_nodes as f64; num_nodes]);
869
870            // Compute out-degrees for row-stochastic normalization.
871            let mut out_degree = vec![0.0f64; num_nodes];
872            for i in 0..num_nodes {
873                for idx in row_ptrs[i]..row_ptrs[i + 1] {
874                    out_degree[i] += values[idx];
875                }
876            }
877
878            let mut rank = vec![1.0 / num_nodes as f64; num_nodes];
879            let mut new_rank = vec![0.0f64; num_nodes];
880
881            let mut converged = false;
882            let mut final_residual = f64::MAX;
883            let mut iters = 0;
884
885            for iter in 0..max_iterations {
886                for i in 0..num_nodes {
887                    new_rank[i] = (1.0 - damping) * p[i];
888                }
889
890                let mut dangling_sum = 0.0f64;
891                for i in 0..num_nodes {
892                    let s = row_ptrs[i];
893                    let e = row_ptrs[i + 1];
894                    if s == e || out_degree[i].abs() < 1e-15 {
895                        dangling_sum += rank[i];
896                    } else {
897                        let contribution = rank[i] / out_degree[i];
898                        for idx in s..e {
899                            new_rank[col_indices[idx]] += damping * values[idx] * contribution;
900                        }
901                    }
902                }
903
904                if dangling_sum > 0.0 {
905                    let dangling_contrib = damping * dangling_sum / num_nodes as f64;
906                    for i in 0..num_nodes {
907                        new_rank[i] += dangling_contrib;
908                    }
909                }
910
911                let mut diff = 0.0f64;
912                for i in 0..num_nodes {
913                    diff += (new_rank[i] - rank[i]).abs();
914                }
915
916                std::mem::swap(&mut rank, &mut new_rank);
917                final_residual = diff;
918                iters = iter + 1;
919
920                if diff < tolerance {
921                    converged = true;
922                    break;
923                }
924            }
925
926            let elapsed_us = start.elapsed().as_micros().min(u32::MAX as u128) as u32;
927
928            PageRankResult {
929                scores: rank,
930                iterations: iters as u32,
931                residual: final_residual,
932                converged,
933                time_us: elapsed_us,
934            }
935        })
936        .await
937        .map_err(|e| Error::from_reason(format!("PageRank task failed: {}", e)))?;
938
939        Ok(result)
940    }
941
942    /// Estimate computational complexity for a given problem size.
943    ///
944    /// This is a synchronous method since the estimation is O(1).
945    ///
946    /// # Example
947    /// ```javascript
948    /// const estimate = solver.estimateComplexity({
949    ///   rows: 10000,
950    ///   nnz: 50000,
951    ///   algorithm: 'jacobi',
952    /// });
953    /// console.log('Complexity:', estimate.complexityClass);
954    /// console.log('Recommended:', estimate.recommendedAlgorithm);
955    /// ```
956    #[napi]
957    pub fn estimate_complexity(&self, config: ComplexityConfig) -> Result<ComplexityResult> {
958        let n = config.rows as f64;
959        let nnz = config.nnz as f64;
960        let sparsity = if n * n > 0.0 { nnz / (n * n) } else { 0.0 };
961
962        let algo_name = config.algorithm.as_deref().unwrap_or("auto");
963
964        let recommended = if n < 100.0 {
965            "gauss-seidel"
966        } else if sparsity < 0.01 && n > 10000.0 {
967            "conjugate-gradient"
968        } else if sparsity < 0.05 {
969            "neumann"
970        } else {
971            "jacobi"
972        };
973
974        let (complexity_class, estimated_flops) = match algo_name {
975            "neumann" | "neumann-series" => {
976                let k = 50.0;
977                ("O(k * nnz)".to_string(), k * nnz)
978            }
979            "jacobi" => {
980                let k = n.sqrt().max(10.0);
981                ("O(sqrt(n) * nnz)".to_string(), k * nnz)
982            }
983            "gauss-seidel" | "gs" => {
984                let k = (n.sqrt() / 2.0).max(5.0);
985                ("O(sqrt(n) * nnz)".to_string(), k * nnz)
986            }
987            "conjugate-gradient" | "cg" => {
988                let cond_est = n.sqrt();
989                ("O(sqrt(kappa) * nnz)".to_string(), cond_est * nnz)
990            }
991            _ => {
992                let k = n.sqrt().max(10.0);
993                ("O(sqrt(n) * nnz)".to_string(), k * nnz)
994            }
995        };
996
997        let estimated_time_us = estimated_flops / 1000.0;
998
999        Ok(ComplexityResult {
1000            complexity_class,
1001            estimated_flops,
1002            recommended_algorithm: recommended.to_string(),
1003            estimated_time_us,
1004            sparsity,
1005        })
1006    }
1007
1008    /// Solve with full convergence history returned.
1009    ///
1010    /// Identical to `solve` but also returns per-iteration residual data
1011    /// for convergence analysis and visualization.
1012    ///
1013    /// # Example
1014    /// ```javascript
1015    /// const result = await solver.solveWithHistory({
1016    ///   values: [4, -1, -1, 4],
1017    ///   colIndices: [0, 1, 0, 1],
1018    ///   rowPtrs: [0, 2, 4],
1019    ///   rows: 2, cols: 2,
1020    ///   rhs: [1, 1],
1021    /// });
1022    /// result.convergenceHistory.forEach(entry => {
1023    ///   console.log(`Iter ${entry.iteration}: residual = ${entry.residual}`);
1024    /// });
1025    /// ```
1026    #[napi]
1027    pub async fn solve_with_history(&self, config: SolveConfig) -> Result<SolveWithHistoryResult> {
1028        let tolerance = config.tolerance.unwrap_or(self.default_tolerance);
1029        let max_iterations = config
1030            .max_iterations
1031            .map(|m| m as usize)
1032            .unwrap_or(self.default_max_iterations);
1033        let algo = parse_algorithm(config.algorithm.as_deref().unwrap_or("jacobi"))?;
1034        let algo_name = algo.to_string();
1035
1036        let rows = config.rows as usize;
1037        let cols = config.cols as usize;
1038        validate_csr_input(
1039            &config.values,
1040            &config.col_indices,
1041            &config.row_ptrs,
1042            rows,
1043            cols,
1044        )?;
1045
1046        if config.rhs.len() != rows {
1047            return Err(Error::new(
1048                Status::InvalidArg,
1049                format!(
1050                    "rhs length {} does not match rows = {}",
1051                    config.rhs.len(),
1052                    rows
1053                ),
1054            ));
1055        }
1056
1057        let values = config.values;
1058        let col_indices: Vec<usize> = config.col_indices.iter().map(|&c| c as usize).collect();
1059        let row_ptrs: Vec<usize> = config.row_ptrs.iter().map(|&p| p as usize).collect();
1060        let rhs = config.rhs;
1061
1062        let result = tokio::task::spawn_blocking(move || {
1063            let start = Instant::now();
1064
1065            let (solution, iterations, residual, converged, history) = dispatch_solver(
1066                algo,
1067                &row_ptrs,
1068                &col_indices,
1069                &values,
1070                &rhs,
1071                rows,
1072                tolerance,
1073                max_iterations,
1074            );
1075
1076            let elapsed_us = start.elapsed().as_micros().min(u32::MAX as u128) as u32;
1077
1078            let convergence_history: Vec<ConvergenceEntry> = history
1079                .into_iter()
1080                .map(|(iter, res)| ConvergenceEntry {
1081                    iteration: iter as u32,
1082                    residual: res,
1083                })
1084                .collect();
1085
1086            SolveWithHistoryResult {
1087                solution,
1088                iterations: iterations as u32,
1089                residual,
1090                converged,
1091                algorithm: algo_name,
1092                time_us: elapsed_us,
1093                convergence_history,
1094            }
1095        })
1096        .await
1097        .map_err(|e| Error::from_reason(format!("Solver task failed: {}", e)))?;
1098
1099        Ok(result)
1100    }
1101}
1102
1103// ---------------------------------------------------------------------------
1104// Serde types for JSON solve interface
1105// ---------------------------------------------------------------------------
1106
1107#[derive(serde::Deserialize)]
1108struct SolveJsonInput {
1109    values: Vec<f64>,
1110    col_indices: Vec<u32>,
1111    row_ptrs: Vec<u32>,
1112    rows: u32,
1113    cols: u32,
1114    rhs: Vec<f64>,
1115    tolerance: Option<f64>,
1116    max_iterations: Option<u32>,
1117    algorithm: Option<String>,
1118}
1119
1120#[derive(serde::Serialize)]
1121struct SolveJsonOutput {
1122    solution: Vec<f64>,
1123    iterations: u32,
1124    residual: f64,
1125    converged: bool,
1126    algorithm: String,
1127    time_us: u32,
1128}
1129
1130// ---------------------------------------------------------------------------
1131// Standalone functions
1132// ---------------------------------------------------------------------------
1133
1134/// Get the library version.
1135#[napi]
1136pub fn version() -> String {
1137    env!("CARGO_PKG_VERSION").to_string()
1138}
1139
1140/// Get library information.
1141#[napi]
1142pub fn info() -> LibraryInfo {
1143    LibraryInfo {
1144        name: "ruvector-solver-node".to_string(),
1145        version: env!("CARGO_PKG_VERSION").to_string(),
1146        description: "Sublinear-time sparse linear solver for Node.js".to_string(),
1147        algorithms: vec![
1148            "neumann".to_string(),
1149            "jacobi".to_string(),
1150            "gauss-seidel".to_string(),
1151            "conjugate-gradient".to_string(),
1152        ],
1153        features: vec![
1154            "async-solve".to_string(),
1155            "json-interface".to_string(),
1156            "pagerank".to_string(),
1157            "complexity-estimation".to_string(),
1158            "convergence-history".to_string(),
1159        ],
1160    }
1161}
1162
1163/// Library information.
1164#[napi(object)]
1165pub struct LibraryInfo {
1166    pub name: String,
1167    pub version: String,
1168    pub description: String,
1169    pub algorithms: Vec<String>,
1170    pub features: Vec<String>,
1171}
1172
1173/// List available solver algorithms.
1174#[napi]
1175pub fn available_algorithms() -> Vec<String> {
1176    vec![
1177        "neumann".to_string(),
1178        "jacobi".to_string(),
1179        "gauss-seidel".to_string(),
1180        "conjugate-gradient".to_string(),
1181    ]
1182}