Skip to main content

oxilean_std/operations_research/
types.rs

1//! Auto-generated module
2//!
3//! 🤖 Generated with [SplitRS](https://github.com/cool-japan/splitrs)
4
5use super::functions::*;
6use std::cmp::Reverse;
7use std::collections::{BinaryHeap, VecDeque};
8
9/// Bellman-Ford single-source shortest path algorithm.
10///
11/// Handles negative edge weights and detects negative cycles.
12#[derive(Debug, Clone)]
13pub struct BellmanFord {
14    /// Number of vertices.
15    pub n: usize,
16    /// Edge list: (from, to, weight).
17    pub edges: Vec<(usize, usize, i64)>,
18}
19impl BellmanFord {
20    /// Create a new graph with `n` vertices.
21    pub fn new(n: usize) -> Self {
22        Self {
23            n,
24            edges: Vec::new(),
25        }
26    }
27    /// Add a directed edge.
28    pub fn add_edge(&mut self, from: usize, to: usize, weight: i64) {
29        self.edges.push((from, to, weight));
30    }
31    /// Run Bellman-Ford from `source`.
32    ///
33    /// Returns `Ok(dist)` where `dist[v]` is the shortest distance to `v`
34    /// (`i64::MAX` means unreachable), or `Err(())` if a negative cycle exists.
35    #[allow(clippy::result_unit_err)]
36    pub fn shortest_paths(&self, source: usize) -> Result<Vec<i64>, ()> {
37        let inf = i64::MAX / 2;
38        let mut dist = vec![inf; self.n];
39        dist[source] = 0;
40        for _ in 0..self.n - 1 {
41            for &(u, v, w) in &self.edges {
42                if dist[u] != inf && dist[u] + w < dist[v] {
43                    dist[v] = dist[u] + w;
44                }
45            }
46        }
47        for &(u, v, w) in &self.edges {
48            if dist[u] != inf && dist[u] + w < dist[v] {
49                return Err(());
50            }
51        }
52        Ok(dist)
53    }
54}
55/// Discrete-state, discrete-action MDP solver.
56///
57/// Implements value iteration and policy iteration for discounted MDPs.
58#[allow(dead_code)]
59pub struct MdpSolver {
60    /// Number of states.
61    pub num_states: usize,
62    /// Number of actions.
63    pub num_actions: usize,
64    /// Discount factor γ ∈ [0, 1).
65    pub discount: f64,
66    /// Reward matrix R[s][a].
67    pub rewards: Vec<Vec<f64>>,
68    /// Transition probabilities P[s][a][s'].
69    pub transitions: Vec<Vec<Vec<f64>>>,
70}
71impl MdpSolver {
72    /// Create a new MDP solver.
73    #[allow(clippy::too_many_arguments)]
74    pub fn new(
75        num_states: usize,
76        num_actions: usize,
77        discount: f64,
78        rewards: Vec<Vec<f64>>,
79        transitions: Vec<Vec<Vec<f64>>>,
80    ) -> Self {
81        Self {
82            num_states,
83            num_actions,
84            discount,
85            rewards,
86            transitions,
87        }
88    }
89    /// Apply the Bellman operator once: T V(s) = max_a [R(s,a) + γ Σ P(s'|s,a) V(s')]
90    pub fn bellman_update(&self, v: &[f64]) -> Vec<f64> {
91        (0..self.num_states)
92            .map(|s| {
93                (0..self.num_actions)
94                    .map(|a| {
95                        let future: f64 = (0..self.num_states)
96                            .map(|sp| self.transitions[s][a][sp] * v[sp])
97                            .sum();
98                        self.rewards[s][a] + self.discount * future
99                    })
100                    .fold(f64::NEG_INFINITY, f64::max)
101            })
102            .collect()
103    }
104    /// Run value iteration until convergence.
105    ///
106    /// Returns (V*, iterations) where V*[s] is the optimal value of state s.
107    pub fn value_iteration(&self, tol: f64, max_iter: usize) -> (Vec<f64>, usize) {
108        let mut v = vec![0.0_f64; self.num_states];
109        for iter in 0..max_iter {
110            let v_new = self.bellman_update(&v);
111            let delta = v_new
112                .iter()
113                .zip(v.iter())
114                .map(|(a, b)| (a - b).abs())
115                .fold(0.0_f64, f64::max);
116            v = v_new;
117            if delta < tol {
118                return (v, iter + 1);
119            }
120        }
121        (v, max_iter)
122    }
123    /// Extract greedy policy from value function V.
124    ///
125    /// Returns policy[s] = argmax_a [R(s,a) + γ Σ P(s'|s,a) V(s')].
126    pub fn extract_policy(&self, v: &[f64]) -> Vec<usize> {
127        (0..self.num_states)
128            .map(|s| {
129                (0..self.num_actions)
130                    .map(|a| {
131                        let future: f64 = (0..self.num_states)
132                            .map(|sp| self.transitions[s][a][sp] * v[sp])
133                            .sum();
134                        (a, self.rewards[s][a] + self.discount * future)
135                    })
136                    .max_by(|x, y| x.1.partial_cmp(&y.1).unwrap_or(std::cmp::Ordering::Equal))
137                    .map(|(a, _)| a)
138                    .unwrap_or(0)
139            })
140            .collect()
141    }
142    /// Evaluate a fixed policy: V^π(s) = R(s,π(s)) + γ Σ P(s'|s,π(s)) V^π(s')
143    ///
144    /// Solved by iteration (similar to value iteration for fixed policy).
145    pub fn policy_evaluation(&self, policy: &[usize], tol: f64, max_iter: usize) -> Vec<f64> {
146        let mut v = vec![0.0_f64; self.num_states];
147        for _ in 0..max_iter {
148            let v_new: Vec<f64> = (0..self.num_states)
149                .map(|s| {
150                    let a = policy[s];
151                    let future: f64 = (0..self.num_states)
152                        .map(|sp| self.transitions[s][a][sp] * v[sp])
153                        .sum();
154                    self.rewards[s][a] + self.discount * future
155                })
156                .collect();
157            let delta = v_new
158                .iter()
159                .zip(v.iter())
160                .map(|(a, b)| (a - b).abs())
161                .fold(0.0_f64, f64::max);
162            v = v_new;
163            if delta < tol {
164                break;
165            }
166        }
167        v
168    }
169    /// Policy iteration: alternate between evaluation and improvement.
170    ///
171    /// Returns (optimal_policy, optimal_value, iterations).
172    pub fn policy_iteration(&self, tol: f64, max_iter: usize) -> (Vec<usize>, Vec<f64>, usize) {
173        let mut policy: Vec<usize> = vec![0; self.num_states];
174        for iter in 0..max_iter {
175            let v = self.policy_evaluation(&policy, tol / 10.0, 1000);
176            let new_policy = self.extract_policy(&v);
177            if new_policy == policy {
178                return (policy, v, iter + 1);
179            }
180            policy = new_policy;
181        }
182        let v = self.policy_evaluation(&policy, tol, 1000);
183        (policy, v, max_iter)
184    }
185}
186/// A directed graph for max-flow computation.
187#[derive(Debug, Clone)]
188pub struct NetworkFlowGraph {
189    /// Number of nodes.
190    pub n: usize,
191    /// Capacity matrix (n × n).
192    pub capacities: Vec<Vec<i64>>,
193}
194impl NetworkFlowGraph {
195    /// Create a new flow graph with `n` nodes and all-zero capacities.
196    pub fn new(n: usize) -> Self {
197        Self {
198            n,
199            capacities: vec![vec![0_i64; n]; n],
200        }
201    }
202    /// Add a directed edge u → v with capacity `cap`.
203    pub fn add_edge(&mut self, u: usize, v: usize, cap: i64) {
204        self.capacities[u][v] += cap;
205    }
206    /// Compute the maximum flow from `source` to `sink` using Edmonds-Karp (BFS augmentation).
207    pub fn max_flow_bfs(&self, source: usize, sink: usize) -> i64 {
208        let n = self.n;
209        let mut residual = self.capacities.clone();
210        let mut total_flow = 0_i64;
211        loop {
212            let mut parent = vec![usize::MAX; n];
213            parent[source] = source;
214            let mut queue = VecDeque::new();
215            queue.push_back(source);
216            while let Some(u) = queue.pop_front() {
217                for v in 0..n {
218                    if parent[v] == usize::MAX && residual[u][v] > 0 {
219                        parent[v] = u;
220                        if v == sink {
221                            break;
222                        }
223                        queue.push_back(v);
224                    }
225                }
226                if parent[sink] != usize::MAX {
227                    break;
228                }
229            }
230            if parent[sink] == usize::MAX {
231                break;
232            }
233            let mut path_flow = i64::MAX;
234            let mut v = sink;
235            while v != source {
236                let u = parent[v];
237                path_flow = path_flow.min(residual[u][v]);
238                v = u;
239            }
240            v = sink;
241            while v != source {
242                let u = parent[v];
243                residual[u][v] -= path_flow;
244                residual[v][u] += path_flow;
245                v = u;
246            }
247            total_flow += path_flow;
248        }
249        total_flow
250    }
251    /// The minimum cut value equals the maximum flow (max-flow min-cut theorem).
252    pub fn min_cut_value(&self, source: usize, sink: usize) -> i64 {
253        self.max_flow_bfs(source, sink)
254    }
255}
256/// A collection of classic dynamic programming algorithms.
257pub struct DynamicProgramming;
258impl DynamicProgramming {
259    /// 0/1 knapsack: maximum value achievable with given `capacity`.
260    pub fn knapsack(capacity: usize, weights: &[usize], values: &[usize]) -> usize {
261        let n = weights.len();
262        let mut dp = vec![vec![0_usize; capacity + 1]; n + 1];
263        for i in 1..=n {
264            for w in 0..=capacity {
265                dp[i][w] = dp[i - 1][w];
266                if weights[i - 1] <= w {
267                    let with_item = dp[i - 1][w - weights[i - 1]] + values[i - 1];
268                    if with_item > dp[i][w] {
269                        dp[i][w] = with_item;
270                    }
271                }
272            }
273        }
274        dp[n][capacity]
275    }
276    /// Length of the longest common subsequence of byte slices `s1` and `s2`.
277    pub fn longest_common_subseq(s1: &[u8], s2: &[u8]) -> usize {
278        let (m, n) = (s1.len(), s2.len());
279        let mut dp = vec![vec![0_usize; n + 1]; m + 1];
280        for i in 1..=m {
281            for j in 1..=n {
282                dp[i][j] = if s1[i - 1] == s2[j - 1] {
283                    dp[i - 1][j - 1] + 1
284                } else {
285                    dp[i - 1][j].max(dp[i][j - 1])
286                };
287            }
288        }
289        dp[m][n]
290    }
291    /// Minimum number of scalar multiplications to compute a chain of matrices.
292    ///
293    /// `dims` has length n+1 where the i-th matrix is dims[i] × dims[i+1].
294    pub fn matrix_chain_order(dims: &[usize]) -> usize {
295        let n = dims.len().saturating_sub(1);
296        if n == 0 {
297            return 0;
298        }
299        let mut dp = vec![vec![0_usize; n]; n];
300        for len in 2..=n {
301            for i in 0..=(n - len) {
302                let j = i + len - 1;
303                dp[i][j] = usize::MAX;
304                for k in i..j {
305                    let cost = dp[i][k]
306                        .saturating_add(dp[k + 1][j])
307                        .saturating_add(dims[i] * dims[k + 1] * dims[j + 1]);
308                    if cost < dp[i][j] {
309                        dp[i][j] = cost;
310                    }
311                }
312            }
313        }
314        dp[0][n - 1]
315    }
316    /// Minimum number of coins from `coins` that sum to `amount`.
317    ///
318    /// Returns `None` if no solution exists.
319    pub fn coin_change(coins: &[usize], amount: usize) -> Option<usize> {
320        let inf = usize::MAX / 2;
321        let mut dp = vec![inf; amount + 1];
322        dp[0] = 0;
323        for a in 1..=amount {
324            for &c in coins {
325                if c <= a && dp[a - c] != inf {
326                    let cand = dp[a - c] + 1;
327                    if cand < dp[a] {
328                        dp[a] = cand;
329                    }
330                }
331            }
332        }
333        if dp[amount] == inf {
334            None
335        } else {
336            Some(dp[amount])
337        }
338    }
339}
340/// An M/M/c queueing model.
341#[derive(Debug, Clone)]
342pub struct QueueingSystem {
343    /// Arrival rate λ (customers per unit time).
344    pub arrival_rate: f64,
345    /// Service rate μ per server (customers per unit time).
346    pub service_rate: f64,
347    /// Number of servers c.
348    pub num_servers: usize,
349}
350impl QueueingSystem {
351    /// Create a new M/M/c queueing system.
352    pub fn new(lambda: f64, mu: f64, c: usize) -> Self {
353        Self {
354            arrival_rate: lambda,
355            service_rate: mu,
356            num_servers: c,
357        }
358    }
359    /// Traffic intensity ρ = λ / (c μ).
360    pub fn utilization(&self) -> f64 {
361        self.arrival_rate / (self.num_servers as f64 * self.service_rate)
362    }
363    /// The system is stable when ρ < 1.
364    pub fn is_stable(&self) -> bool {
365        self.utilization() < 1.0
366    }
367    /// Mean number of customers in the M/M/1 queue: L = ρ / (1 − ρ).
368    ///
369    /// Returns `None` if the system is unstable or has more than one server.
370    pub fn mean_queue_length_m_m_1(&self) -> Option<f64> {
371        if self.num_servers != 1 || !self.is_stable() {
372            return None;
373        }
374        let rho = self.utilization();
375        Some(rho / (1.0 - rho))
376    }
377    /// Mean sojourn time W = L / λ (Little's law), for M/M/1.
378    ///
379    /// Returns `None` if the system is unstable or has more than one server.
380    pub fn mean_waiting_time(&self) -> Option<f64> {
381        let l = self.mean_queue_length_m_m_1()?;
382        Some(l / self.arrival_rate)
383    }
384}
385/// A collection of jobs with processing times and deadlines.
386#[derive(Debug, Clone)]
387pub struct JobScheduler {
388    /// Jobs: (name, processing_time, deadline).
389    pub jobs: Vec<(String, u64, u64)>,
390}
391impl JobScheduler {
392    /// Create an empty job scheduler.
393    pub fn new() -> Self {
394        Self { jobs: Vec::new() }
395    }
396    /// Add a job with the given name, processing time and deadline.
397    pub fn add_job(&mut self, name: &str, proc_time: u64, deadline: u64) {
398        self.jobs.push((name.to_owned(), proc_time, deadline));
399    }
400    /// Earliest Deadline First (EDF) schedule — returns job names in EDF order.
401    pub fn earliest_deadline_first(&self) -> Vec<&str> {
402        let mut indices: Vec<usize> = (0..self.jobs.len()).collect();
403        indices.sort_by_key(|&i| self.jobs[i].2);
404        indices.iter().map(|&i| self.jobs[i].0.as_str()).collect()
405    }
406    /// Shortest Job First (SJF) schedule — returns job names in SJF order.
407    pub fn shortest_job_first(&self) -> Vec<&str> {
408        let mut indices: Vec<usize> = (0..self.jobs.len()).collect();
409        indices.sort_by_key(|&i| self.jobs[i].1);
410        indices.iter().map(|&i| self.jobs[i].0.as_str()).collect()
411    }
412    /// Compute the total completion time for a given schedule (by job name).
413    pub fn total_completion_time(&self, schedule: &[&str]) -> u64 {
414        let mut time = 0_u64;
415        let mut total = 0_u64;
416        for name in schedule {
417            if let Some(job) = self.jobs.iter().find(|(n, _, _)| n == name) {
418                time += job.1;
419                total += time;
420            }
421        }
422        total
423    }
424    /// Compute the makespan (total elapsed time) for a given schedule.
425    pub fn makespan(&self, schedule: &[&str]) -> u64 {
426        schedule
427            .iter()
428            .filter_map(|name| self.jobs.iter().find(|(n, _, _)| n == name))
429            .map(|(_, p, _)| p)
430            .sum()
431    }
432}
433/// Prim's algorithm for Minimum Spanning Tree on a dense graph.
434#[derive(Debug, Clone)]
435pub struct PrimMst {
436    /// Number of vertices.
437    pub n: usize,
438    /// Adjacency matrix; `cost[i][j] = i64::MAX` means no edge.
439    pub cost: Vec<Vec<i64>>,
440}
441impl PrimMst {
442    /// Create a new MST solver with `n` vertices (no edges initially).
443    pub fn new(n: usize) -> Self {
444        Self {
445            n,
446            cost: vec![vec![i64::MAX; n]; n],
447        }
448    }
449    /// Add an undirected edge between u and v with weight w.
450    pub fn add_edge(&mut self, u: usize, v: usize, w: i64) {
451        self.cost[u][v] = w;
452        self.cost[v][u] = w;
453    }
454    /// Run Prim's algorithm starting from vertex 0.
455    ///
456    /// Returns `(total_weight, edges)` where each edge is `(u, v, weight)`.
457    pub fn run(&self) -> (i64, Vec<(usize, usize, i64)>) {
458        let n = self.n;
459        let mut in_mst = vec![false; n];
460        let mut key = vec![i64::MAX; n];
461        let mut parent = vec![usize::MAX; n];
462        key[0] = 0;
463        let mut mst_edges = Vec::new();
464        let mut total = 0_i64;
465        for _ in 0..n {
466            let u = (0..n)
467                .filter(|&v| !in_mst[v])
468                .min_by_key(|&v| key[v])
469                .expect("Prim's algorithm: there is always an unvisited vertex in 0..n iterations");
470            in_mst[u] = true;
471            if parent[u] != usize::MAX {
472                let w = key[u];
473                mst_edges.push((parent[u], u, w));
474                total += w;
475            }
476            for v in 0..n {
477                if !in_mst[v] && self.cost[u][v] < key[v] {
478                    key[v] = self.cost[u][v];
479                    parent[v] = u;
480                }
481            }
482        }
483        (total, mst_edges)
484    }
485}
486/// Reliability calculations for series and parallel systems.
487#[derive(Debug, Clone)]
488pub struct ReliabilitySystem {
489    /// Component reliabilities (probabilities of success in [0, 1]).
490    pub components: Vec<f64>,
491}
492impl ReliabilitySystem {
493    /// Create a reliability system from a list of component reliabilities.
494    pub fn new(components: Vec<f64>) -> Self {
495        Self { components }
496    }
497    /// Series system reliability: R = ∏ R_i.
498    pub fn series_reliability(&self) -> f64 {
499        self.components.iter().product()
500    }
501    /// Parallel system reliability: R = 1 - ∏ (1 - R_i).
502    pub fn parallel_reliability(&self) -> f64 {
503        1.0 - self.components.iter().map(|&r| 1.0 - r).product::<f64>()
504    }
505    /// k-out-of-n system reliability (exact binomial sum).
506    ///
507    /// Returns the probability that at least `k` out of `n` components succeed,
508    /// assuming all components have the same reliability `p = components[0]`.
509    pub fn k_out_of_n_reliability(&self, k: usize) -> f64 {
510        let n = self.components.len();
511        let p = if n == 0 {
512            return 0.0;
513        } else {
514            self.components[0]
515        };
516        let q = 1.0 - p;
517        (k..=n)
518            .map(|j| {
519                let binom = binomial_coeff(n, j) as f64;
520                binom * p.powi(j as i32) * q.powi((n - j) as i32)
521            })
522            .sum()
523    }
524}
525/// Simulate a multi-armed bandit environment with known true means.
526#[allow(dead_code)]
527pub struct BanditEnvironment {
528    /// True mean rewards for each arm.
529    pub true_means: Vec<f64>,
530    /// Pseudo-random state.
531    rng_state: u64,
532}
533impl BanditEnvironment {
534    /// Create a bandit environment with given true means.
535    pub fn new(true_means: Vec<f64>) -> Self {
536        Self {
537            true_means,
538            rng_state: 42,
539        }
540    }
541    /// Sample a reward from arm i (Gaussian noise with σ=1).
542    pub fn sample(&mut self, arm: usize) -> f64 {
543        let u1 = self.lcg_next();
544        let u2 = self.lcg_next();
545        let z = (-2.0 * u1.ln()).sqrt() * (2.0 * std::f64::consts::PI * u2).cos();
546        self.true_means[arm] + z
547    }
548    /// LCG pseudo-random number in (0, 1).
549    fn lcg_next(&mut self) -> f64 {
550        self.rng_state = self
551            .rng_state
552            .wrapping_mul(6364136223846793005)
553            .wrapping_add(1442695040888963407);
554        let val = (self.rng_state >> 33) as f64 / (u32::MAX as f64 + 1.0);
555        val.max(1e-10)
556    }
557    /// Optimal arm (highest true mean).
558    pub fn optimal_arm(&self) -> usize {
559        self.true_means
560            .iter()
561            .enumerate()
562            .max_by(|a, b| a.1.partial_cmp(b.1).unwrap_or(std::cmp::Ordering::Equal))
563            .map(|(i, _)| i)
564            .unwrap_or(0)
565    }
566    /// Run UCB1 for T rounds; return cumulative regret.
567    pub fn run_ucb1(&mut self, rounds: usize) -> f64 {
568        let k = self.true_means.len();
569        let optimal_mean = self.true_means[self.optimal_arm()];
570        let mut bandit = MultiArmedBanditUcb::new(k);
571        let mut cumulative_regret = 0.0_f64;
572        for _ in 0..rounds {
573            let arm = bandit.select_arm();
574            let reward = self.sample(arm);
575            bandit.update(arm, reward);
576            cumulative_regret += optimal_mean - self.true_means[arm];
577        }
578        cumulative_regret
579    }
580}
581/// Multi-armed bandit solver using the UCB1 algorithm.
582///
583/// UCB1 index: Q̄_i + √(2 ln T / n_i)
584/// where Q̄_i = average reward of arm i, T = total plays, n_i = plays of arm i.
585#[allow(dead_code)]
586pub struct MultiArmedBanditUcb {
587    /// Number of arms.
588    pub num_arms: usize,
589    /// Cumulative rewards per arm.
590    pub total_rewards: Vec<f64>,
591    /// Number of pulls per arm.
592    pub pull_counts: Vec<u64>,
593    /// Total number of rounds played.
594    pub total_rounds: u64,
595}
596impl MultiArmedBanditUcb {
597    /// Create a new UCB1 bandit with k arms.
598    pub fn new(num_arms: usize) -> Self {
599        Self {
600            num_arms,
601            total_rewards: vec![0.0; num_arms],
602            pull_counts: vec![0; num_arms],
603            total_rounds: 0,
604        }
605    }
606    /// UCB1 index for arm i.
607    pub fn ucb_index(&self, arm: usize) -> f64 {
608        if self.pull_counts[arm] == 0 {
609            return f64::INFINITY;
610        }
611        let mean = self.total_rewards[arm] / self.pull_counts[arm] as f64;
612        let t = self.total_rounds as f64;
613        let n = self.pull_counts[arm] as f64;
614        mean + (2.0 * t.ln() / n).sqrt()
615    }
616    /// Select the arm with the highest UCB index (ties broken by smallest index).
617    pub fn select_arm(&self) -> usize {
618        (0..self.num_arms)
619            .max_by(|&a, &b| {
620                let cmp = self
621                    .ucb_index(a)
622                    .partial_cmp(&self.ucb_index(b))
623                    .unwrap_or(std::cmp::Ordering::Equal);
624                // Break ties by preferring smaller arm index
625                cmp.then(b.cmp(&a))
626            })
627            .unwrap_or(0)
628    }
629    /// Update statistics after pulling arm with observed reward.
630    pub fn update(&mut self, arm: usize, reward: f64) {
631        self.total_rewards[arm] += reward;
632        self.pull_counts[arm] += 1;
633        self.total_rounds += 1;
634    }
635    /// Average reward for arm i.
636    pub fn average_reward(&self, arm: usize) -> f64 {
637        if self.pull_counts[arm] == 0 {
638            return 0.0;
639        }
640        self.total_rewards[arm] / self.pull_counts[arm] as f64
641    }
642    /// Empirically best arm (highest average reward).
643    pub fn best_arm(&self) -> usize {
644        (0..self.num_arms)
645            .filter(|&i| self.pull_counts[i] > 0)
646            .max_by(|&a, &b| {
647                self.average_reward(a)
648                    .partial_cmp(&self.average_reward(b))
649                    .unwrap_or(std::cmp::Ordering::Equal)
650            })
651            .unwrap_or(0)
652    }
653}
654/// Two-stage stochastic linear program.
655///
656/// Stage 1: min cᵀx + E_ξ[Q(x, ξ)]  s.t. Ax = b, x ≥ 0
657/// Stage 2: Q(x, ξ) = min qᵀy  s.t. Wy = h - Tx, y ≥ 0
658#[allow(dead_code)]
659pub struct TwoStageStochasticLP {
660    /// Stage 1 objective coefficient c.
661    pub stage1_obj: Vec<f64>,
662    /// Stage 2 objective coefficient q (same for all scenarios here).
663    pub stage2_obj: Vec<f64>,
664    /// Scenario probabilities.
665    pub probabilities: Vec<f64>,
666    /// Recourse right-hand sides h_s for each scenario s.
667    pub scenario_rhs: Vec<Vec<f64>>,
668}
669impl TwoStageStochasticLP {
670    /// Create a two-stage stochastic LP.
671    pub fn new(
672        stage1_obj: Vec<f64>,
673        stage2_obj: Vec<f64>,
674        probabilities: Vec<f64>,
675        scenario_rhs: Vec<Vec<f64>>,
676    ) -> Self {
677        Self {
678            stage1_obj,
679            stage2_obj,
680            probabilities,
681            scenario_rhs,
682        }
683    }
684    /// Number of scenarios.
685    pub fn num_scenarios(&self) -> usize {
686        self.probabilities.len()
687    }
688    /// Expected recourse cost given stage-2 decisions y_s per scenario.
689    ///
690    /// E[qᵀy] = Σ_s p_s * qᵀy_s
691    pub fn expected_recourse_cost(&self, stage2_decisions: &[Vec<f64>]) -> f64 {
692        self.probabilities
693            .iter()
694            .zip(stage2_decisions.iter())
695            .map(|(&p, y)| {
696                let cost: f64 = self
697                    .stage2_obj
698                    .iter()
699                    .zip(y.iter())
700                    .map(|(q, v)| q * v)
701                    .sum();
702                p * cost
703            })
704            .sum()
705    }
706    /// Total cost given stage-1 decisions x and stage-2 decisions y_s.
707    pub fn total_cost(&self, stage1_x: &[f64], stage2_decisions: &[Vec<f64>]) -> f64 {
708        let c1: f64 = self
709            .stage1_obj
710            .iter()
711            .zip(stage1_x.iter())
712            .map(|(c, x)| c * x)
713            .sum();
714        c1 + self.expected_recourse_cost(stage2_decisions)
715    }
716    /// Check if stage-1 solution is non-negative.
717    pub fn is_stage1_feasible(&self, x: &[f64]) -> bool {
718        x.iter().all(|&v| v >= -1e-9)
719    }
720}
721/// 0/1 knapsack solver with item selection tracking.
722#[derive(Debug, Clone)]
723pub struct KnapsackDP {
724    /// Item weights.
725    pub weights: Vec<usize>,
726    /// Item values.
727    pub values: Vec<usize>,
728    /// Knapsack capacity.
729    pub capacity: usize,
730}
731impl KnapsackDP {
732    /// Create a new knapsack instance.
733    pub fn new(capacity: usize, weights: Vec<usize>, values: Vec<usize>) -> Self {
734        Self {
735            weights,
736            values,
737            capacity,
738        }
739    }
740    /// Solve and return `(optimal_value, selected_item_indices)`.
741    pub fn solve(&self) -> (usize, Vec<usize>) {
742        let n = self.weights.len();
743        let cap = self.capacity;
744        let mut dp = vec![vec![0_usize; cap + 1]; n + 1];
745        for i in 1..=n {
746            for w in 0..=cap {
747                dp[i][w] = dp[i - 1][w];
748                if self.weights[i - 1] <= w {
749                    let with_item = dp[i - 1][w - self.weights[i - 1]] + self.values[i - 1];
750                    if with_item > dp[i][w] {
751                        dp[i][w] = with_item;
752                    }
753                }
754            }
755        }
756        let mut selected = Vec::new();
757        let mut w = cap;
758        for i in (1..=n).rev() {
759            if dp[i][w] != dp[i - 1][w] {
760                selected.push(i - 1);
761                w -= self.weights[i - 1];
762            }
763        }
764        selected.reverse();
765        (dp[n][cap], selected)
766    }
767}
768/// Classical Economic Order Quantity (EOQ) inventory model.
769#[derive(Debug, Clone)]
770pub struct InventoryModel {
771    /// Annual demand rate D.
772    pub demand: f64,
773    /// Fixed order cost per order S.
774    pub order_cost: f64,
775    /// Holding cost per unit per year H.
776    pub holding_cost: f64,
777    /// Lead time L (in the same time unit as demand).
778    pub lead_time: f64,
779}
780impl InventoryModel {
781    /// Create a new inventory model.
782    pub fn new(d: f64, s: f64, h: f64, l: f64) -> Self {
783        Self {
784            demand: d,
785            order_cost: s,
786            holding_cost: h,
787            lead_time: l,
788        }
789    }
790    /// Economic Order Quantity: Q* = sqrt(2 D S / H).
791    pub fn eoq(&self) -> f64 {
792        (2.0 * self.demand * self.order_cost / self.holding_cost).sqrt()
793    }
794    /// Reorder point: R = D * L + safety_stock.
795    pub fn reorder_point(&self, safety_stock: f64) -> f64 {
796        self.demand * self.lead_time + safety_stock
797    }
798    /// Total annual cost for order quantity q: (D/q)S + (q/2)H.
799    pub fn total_cost(&self, q: f64) -> f64 {
800        (self.demand / q) * self.order_cost + (q / 2.0) * self.holding_cost
801    }
802}
803/// Floyd-Warshall all-pairs shortest paths.
804#[derive(Debug, Clone)]
805pub struct FloydWarshall {
806    /// Number of vertices.
807    pub n: usize,
808    /// Distance matrix; `dist[i][j] = i64::MAX/2` means no direct edge.
809    pub dist: Vec<Vec<i64>>,
810}
811impl FloydWarshall {
812    /// Create a new instance with `n` vertices (self-loops = 0, rest = infinity).
813    pub fn new(n: usize) -> Self {
814        let inf = i64::MAX / 2;
815        let mut dist = vec![vec![inf; n]; n];
816        for i in 0..n {
817            dist[i][i] = 0;
818        }
819        Self { n, dist }
820    }
821    /// Add a directed edge u → v with weight `w`.
822    pub fn add_edge(&mut self, u: usize, v: usize, w: i64) {
823        if w < self.dist[u][v] {
824            self.dist[u][v] = w;
825        }
826    }
827    /// Run Floyd-Warshall and return the all-pairs distance matrix.
828    ///
829    /// Returns `Err(())` if a negative cycle is detected.
830    #[allow(clippy::result_unit_err)]
831    pub fn run(&mut self) -> Result<Vec<Vec<i64>>, ()> {
832        let n = self.n;
833        for k in 0..n {
834            for i in 0..n {
835                for j in 0..n {
836                    if self.dist[i][k] != i64::MAX / 2 && self.dist[k][j] != i64::MAX / 2 {
837                        let through_k = self.dist[i][k] + self.dist[k][j];
838                        if through_k < self.dist[i][j] {
839                            self.dist[i][j] = through_k;
840                        }
841                    }
842                }
843            }
844        }
845        for i in 0..n {
846            if self.dist[i][i] < 0 {
847                return Err(());
848            }
849        }
850        Ok(self.dist.clone())
851    }
852}
853/// Hungarian algorithm for the linear assignment problem.
854///
855/// Given an n×n cost matrix, find an assignment of agents to tasks with
856/// minimum total cost. Runs in O(n³).
857#[derive(Debug, Clone)]
858pub struct HungarianSolver {
859    /// Cost matrix (n × n).
860    pub cost: Vec<Vec<i64>>,
861}
862impl HungarianSolver {
863    /// Create a new solver with the given square cost matrix.
864    pub fn new(cost: Vec<Vec<i64>>) -> Self {
865        Self { cost }
866    }
867    /// Solve the assignment problem.
868    ///
869    /// Returns `(min_cost, assignment)` where `assignment[i]` is the task
870    /// assigned to agent `i`.
871    pub fn solve(&self) -> (i64, Vec<usize>) {
872        let n = self.cost.len();
873        if n == 0 {
874            return (0, vec![]);
875        }
876        let inf = i64::MAX / 2;
877        let mut u = vec![0_i64; n + 1];
878        let mut v = vec![0_i64; n + 1];
879        let mut p = vec![0_usize; n + 1];
880        let mut way = vec![0_usize; n + 1];
881        for i in 1..=n {
882            p[0] = i;
883            let mut j0 = 0_usize;
884            let mut minv = vec![inf; n + 1];
885            let mut used = vec![false; n + 1];
886            loop {
887                used[j0] = true;
888                let i0 = p[j0];
889                let mut delta = inf;
890                let mut j1 = 0_usize;
891                for j in 1..=n {
892                    if !used[j] {
893                        let cur = self.cost[i0 - 1][j - 1] - u[i0] - v[j];
894                        if cur < minv[j] {
895                            minv[j] = cur;
896                            way[j] = j0;
897                        }
898                        if minv[j] < delta {
899                            delta = minv[j];
900                            j1 = j;
901                        }
902                    }
903                }
904                for j in 0..=n {
905                    if used[j] {
906                        u[p[j]] += delta;
907                        v[j] -= delta;
908                    } else {
909                        minv[j] -= delta;
910                    }
911                }
912                j0 = j1;
913                if p[j0] == 0 {
914                    break;
915                }
916            }
917            loop {
918                p[j0] = p[way[j0]];
919                j0 = way[j0];
920                if j0 == 0 {
921                    break;
922                }
923            }
924        }
925        let mut assignment = vec![0_usize; n];
926        for j in 1..=n {
927            if p[j] != 0 {
928                assignment[p[j] - 1] = j - 1;
929            }
930        }
931        let min_cost: i64 = (0..n).map(|i| self.cost[i][assignment[i]]).sum();
932        (min_cost, assignment)
933    }
934}
935/// The newsvendor (critical fractile) inventory model.
936///
937/// Demand is assumed uniform on [demand_lo, demand_hi].
938#[derive(Debug, Clone)]
939pub struct NewsvendorModel {
940    /// Lower bound of uniform demand.
941    pub demand_lo: f64,
942    /// Upper bound of uniform demand.
943    pub demand_hi: f64,
944    /// Unit purchase (production) cost c_u.
945    pub unit_cost: f64,
946    /// Unit selling price p.
947    pub selling_price: f64,
948    /// Unit salvage value s (for unsold units).
949    pub salvage_value: f64,
950}
951impl NewsvendorModel {
952    /// Create a new newsvendor model.
953    pub fn new(lo: f64, hi: f64, cost: f64, price: f64, salvage: f64) -> Self {
954        Self {
955            demand_lo: lo,
956            demand_hi: hi,
957            unit_cost: cost,
958            selling_price: price,
959            salvage_value: salvage,
960        }
961    }
962    /// Critical fractile: c_r / (c_r + c_e) where c_r = underage cost, c_e = overage cost.
963    pub fn critical_fractile(&self) -> f64 {
964        let c_e = self.unit_cost - self.salvage_value;
965        let c_r = self.selling_price - self.unit_cost;
966        c_r / (c_r + c_e)
967    }
968    /// Optimal order quantity Q* (using inverse CDF of uniform distribution).
969    pub fn optimal_quantity(&self) -> f64 {
970        let cf = self.critical_fractile();
971        self.demand_lo + cf * (self.demand_hi - self.demand_lo)
972    }
973    /// Expected profit at order quantity q.
974    pub fn expected_profit(&self, q: f64) -> f64 {
975        let lo = self.demand_lo;
976        let hi = self.demand_hi;
977        let range = hi - lo;
978        let e_min = if q <= lo {
979            q
980        } else if q >= hi {
981            (lo + hi) / 2.0
982        } else {
983            let below = (q * (q - lo) - (q * q - lo * lo) / 2.0) / range;
984            let above = q * (hi - q) / range;
985            below + above
986        };
987        let e_over = q - e_min;
988        self.selling_price * e_min + self.salvage_value * e_over - self.unit_cost * q
989    }
990}
991/// Ford-Fulkerson max-flow using DFS (depth-first) augmentation.
992#[derive(Debug, Clone)]
993pub struct FordFulkerson {
994    /// Number of nodes.
995    pub n: usize,
996    /// Residual capacity matrix.
997    residual: Vec<Vec<i64>>,
998}
999impl FordFulkerson {
1000    /// Create a new graph with `n` nodes.
1001    pub fn new(n: usize) -> Self {
1002        Self {
1003            n,
1004            residual: vec![vec![0_i64; n]; n],
1005        }
1006    }
1007    /// Add directed edge u → v with capacity `cap`.
1008    pub fn add_edge(&mut self, u: usize, v: usize, cap: i64) {
1009        self.residual[u][v] += cap;
1010    }
1011    /// DFS to find an augmenting path; returns bottleneck flow or 0.
1012    fn dfs(&mut self, u: usize, sink: usize, flow: i64, visited: &mut Vec<bool>) -> i64 {
1013        if u == sink {
1014            return flow;
1015        }
1016        visited[u] = true;
1017        for v in 0..self.n {
1018            if !visited[v] && self.residual[u][v] > 0 {
1019                let pushed = self.dfs(v, sink, flow.min(self.residual[u][v]), visited);
1020                if pushed > 0 {
1021                    self.residual[u][v] -= pushed;
1022                    self.residual[v][u] += pushed;
1023                    return pushed;
1024                }
1025            }
1026        }
1027        0
1028    }
1029    /// Compute max-flow from `source` to `sink`.
1030    pub fn max_flow(&mut self, source: usize, sink: usize) -> i64 {
1031        let mut total = 0_i64;
1032        loop {
1033            let mut visited = vec![false; self.n];
1034            let f = self.dfs(source, sink, i64::MAX, &mut visited);
1035            if f == 0 {
1036                break;
1037            }
1038            total += f;
1039        }
1040        total
1041    }
1042}
1043/// Lagrangian relaxation solver using the subgradient method.
1044///
1045/// For a problem: min f(x) s.t. g(x) ≤ 0, x ∈ X
1046/// The Lagrangian is: L(x, λ) = f(x) + λᵀg(x)
1047/// The dual is: max_{λ ≥ 0} min_{x ∈ X} L(x, λ)
1048#[allow(dead_code)]
1049pub struct LagrangianRelaxationSolver {
1050    /// Current Lagrange multipliers (one per constraint).
1051    pub multipliers: Vec<f64>,
1052    /// Step size rule parameter.
1053    pub step_size: f64,
1054    /// Best lower bound found.
1055    pub best_lower_bound: f64,
1056    /// Iteration count.
1057    pub iterations: usize,
1058}
1059impl LagrangianRelaxationSolver {
1060    /// Create a new solver with initial multipliers all zero.
1061    pub fn new(num_constraints: usize, initial_step: f64) -> Self {
1062        Self {
1063            multipliers: vec![0.0; num_constraints],
1064            step_size: initial_step,
1065            best_lower_bound: f64::NEG_INFINITY,
1066            iterations: 0,
1067        }
1068    }
1069    /// Subgradient update: λ_{k+1} = max(0, λ_k + t_k * g(x_k))
1070    ///
1071    /// `subgradient[i]` = g_i(x_k) (constraint violation of current solution).
1072    /// `step_size` is the current step t_k.
1073    pub fn subgradient_update(&mut self, subgradient: &[f64], step: f64) {
1074        for (lambda, &sg) in self.multipliers.iter_mut().zip(subgradient.iter()) {
1075            *lambda = (*lambda + step * sg).max(0.0);
1076        }
1077        self.iterations += 1;
1078    }
1079    /// Polyak step size: t_k = (UB - L_k) / ||g||²
1080    ///
1081    /// `upper_bound` = known upper bound (primal feasible solution value).
1082    /// `lagrangian_value` = current Lagrangian value L_k.
1083    /// `subgradient` = constraint violations.
1084    pub fn polyak_step(&self, upper_bound: f64, lagrangian_value: f64, subgradient: &[f64]) -> f64 {
1085        let sg_norm_sq: f64 = subgradient.iter().map(|&g| g * g).sum();
1086        if sg_norm_sq < 1e-10 {
1087            return 0.0;
1088        }
1089        (upper_bound - lagrangian_value) / sg_norm_sq * self.step_size
1090    }
1091    /// Update best lower bound if current is better.
1092    pub fn update_lower_bound(&mut self, lb: f64) {
1093        if lb > self.best_lower_bound {
1094            self.best_lower_bound = lb;
1095        }
1096    }
1097    /// Current duality gap estimate given an upper bound.
1098    pub fn duality_gap(&self, upper_bound: f64) -> f64 {
1099        upper_bound - self.best_lower_bound
1100    }
1101}
1102/// A tableau-based simplex solver for LP: minimize cᵀx subject to Ax ≤ b, x ≥ 0.
1103///
1104/// Uses the standard two-phase / big-M approach in the augmented tableau form.
1105/// For simplicity this implementation handles only the bounded feasible case.
1106#[derive(Debug, Clone)]
1107pub struct SimplexSolver {
1108    /// Number of decision variables.
1109    pub num_vars: usize,
1110    /// Number of constraints (rows in A).
1111    pub num_constraints: usize,
1112    /// Objective coefficients c (length = num_vars).
1113    pub obj: Vec<f64>,
1114    /// Constraint matrix A (num_constraints × num_vars).
1115    pub a_matrix: Vec<Vec<f64>>,
1116    /// Right-hand side b (length = num_constraints).
1117    pub rhs: Vec<f64>,
1118}
1119impl SimplexSolver {
1120    /// Create a new simplex solver instance.
1121    pub fn new(obj: Vec<f64>, a_matrix: Vec<Vec<f64>>, rhs: Vec<f64>) -> Self {
1122        let num_vars = obj.len();
1123        let num_constraints = rhs.len();
1124        Self {
1125            num_vars,
1126            num_constraints,
1127            obj,
1128            a_matrix,
1129            rhs,
1130        }
1131    }
1132    /// Solve the LP using the simplex method (minimization).
1133    ///
1134    /// Uses the standard full-tableau form.  The objective row stores
1135    /// the current reduced costs; a negative reduced cost means the
1136    /// variable can improve the objective.
1137    ///
1138    /// Returns `Some(optimal_value)` if an optimal solution is found,
1139    /// or `None` if the problem is unbounded or infeasible.
1140    pub fn solve(&self) -> Option<f64> {
1141        let m = self.num_constraints;
1142        let n = self.num_vars;
1143        let total_cols = n + m + 1;
1144        let rhs_col = n + m;
1145        let mut tab = vec![vec![0.0_f64; total_cols]; m + 1];
1146        for i in 0..m {
1147            for j in 0..n {
1148                tab[i][j] = self.a_matrix[i][j];
1149            }
1150            tab[i][n + i] = 1.0;
1151            tab[i][rhs_col] = self.rhs[i];
1152        }
1153        for j in 0..n {
1154            tab[m][j] = self.obj[j];
1155        }
1156        let max_iter = 1000;
1157        for _ in 0..max_iter {
1158            let pivot_col = (0..n + m).filter(|&j| tab[m][j] < -1e-9).min_by(|&a, &b| {
1159                tab[m][a]
1160                    .partial_cmp(&tab[m][b])
1161                    .unwrap_or(std::cmp::Ordering::Equal)
1162            });
1163            let pivot_col = match pivot_col {
1164                Some(c) => c,
1165                None => break,
1166            };
1167            let pivot_row = (0..m)
1168                .filter(|&i| tab[i][pivot_col] > 1e-9)
1169                .min_by(|&a, &b| {
1170                    let ra = tab[a][rhs_col] / tab[a][pivot_col];
1171                    let rb = tab[b][rhs_col] / tab[b][pivot_col];
1172                    ra.partial_cmp(&rb).unwrap_or(std::cmp::Ordering::Equal)
1173                });
1174            let pivot_row = pivot_row?;
1175            let pval = tab[pivot_row][pivot_col];
1176            for j in 0..total_cols {
1177                tab[pivot_row][j] /= pval;
1178            }
1179            for i in 0..=m {
1180                if i != pivot_row {
1181                    let factor = tab[i][pivot_col];
1182                    if factor.abs() > 1e-15 {
1183                        for j in 0..total_cols {
1184                            let delta = factor * tab[pivot_row][j];
1185                            tab[i][j] -= delta;
1186                        }
1187                    }
1188                }
1189            }
1190        }
1191        Some(-tab[m][rhs_col])
1192    }
1193}
1194/// Dijkstra's algorithm for single-source shortest path (non-negative weights).
1195#[derive(Debug, Clone)]
1196pub struct Dijkstra {
1197    /// Number of vertices.
1198    pub n: usize,
1199    /// Adjacency list: adj[u] = Vec<(v, weight)>.
1200    pub adj: Vec<Vec<(usize, u64)>>,
1201}
1202impl Dijkstra {
1203    /// Create a new graph with `n` vertices.
1204    pub fn new(n: usize) -> Self {
1205        Self {
1206            n,
1207            adj: vec![Vec::new(); n],
1208        }
1209    }
1210    /// Add a directed edge u → v with non-negative weight.
1211    pub fn add_edge(&mut self, u: usize, v: usize, w: u64) {
1212        self.adj[u].push((v, w));
1213    }
1214    /// Compute shortest distances from `source` to all vertices.
1215    ///
1216    /// Returns `dist` where `dist[v] = u64::MAX` means unreachable.
1217    pub fn shortest_paths(&self, source: usize) -> Vec<u64> {
1218        let mut dist = vec![u64::MAX; self.n];
1219        dist[source] = 0;
1220        let mut heap: BinaryHeap<Reverse<(u64, usize)>> = BinaryHeap::new();
1221        heap.push(Reverse((0, source)));
1222        while let Some(Reverse((d, u))) = heap.pop() {
1223            if d > dist[u] {
1224                continue;
1225            }
1226            for &(v, w) in &self.adj[u] {
1227                let nd = d + w;
1228                if nd < dist[v] {
1229                    dist[v] = nd;
1230                    heap.push(Reverse((nd, v)));
1231                }
1232            }
1233        }
1234        dist
1235    }
1236}