1use super::functions::*;
6use std::cmp::Reverse;
7use std::collections::{BinaryHeap, VecDeque};
8
9#[derive(Debug, Clone)]
13pub struct BellmanFord {
14 pub n: usize,
16 pub edges: Vec<(usize, usize, i64)>,
18}
19impl BellmanFord {
20 pub fn new(n: usize) -> Self {
22 Self {
23 n,
24 edges: Vec::new(),
25 }
26 }
27 pub fn add_edge(&mut self, from: usize, to: usize, weight: i64) {
29 self.edges.push((from, to, weight));
30 }
31 #[allow(clippy::result_unit_err)]
36 pub fn shortest_paths(&self, source: usize) -> Result<Vec<i64>, ()> {
37 let inf = i64::MAX / 2;
38 let mut dist = vec![inf; self.n];
39 dist[source] = 0;
40 for _ in 0..self.n - 1 {
41 for &(u, v, w) in &self.edges {
42 if dist[u] != inf && dist[u] + w < dist[v] {
43 dist[v] = dist[u] + w;
44 }
45 }
46 }
47 for &(u, v, w) in &self.edges {
48 if dist[u] != inf && dist[u] + w < dist[v] {
49 return Err(());
50 }
51 }
52 Ok(dist)
53 }
54}
55#[allow(dead_code)]
59pub struct MdpSolver {
60 pub num_states: usize,
62 pub num_actions: usize,
64 pub discount: f64,
66 pub rewards: Vec<Vec<f64>>,
68 pub transitions: Vec<Vec<Vec<f64>>>,
70}
71impl MdpSolver {
72 #[allow(clippy::too_many_arguments)]
74 pub fn new(
75 num_states: usize,
76 num_actions: usize,
77 discount: f64,
78 rewards: Vec<Vec<f64>>,
79 transitions: Vec<Vec<Vec<f64>>>,
80 ) -> Self {
81 Self {
82 num_states,
83 num_actions,
84 discount,
85 rewards,
86 transitions,
87 }
88 }
89 pub fn bellman_update(&self, v: &[f64]) -> Vec<f64> {
91 (0..self.num_states)
92 .map(|s| {
93 (0..self.num_actions)
94 .map(|a| {
95 let future: f64 = (0..self.num_states)
96 .map(|sp| self.transitions[s][a][sp] * v[sp])
97 .sum();
98 self.rewards[s][a] + self.discount * future
99 })
100 .fold(f64::NEG_INFINITY, f64::max)
101 })
102 .collect()
103 }
104 pub fn value_iteration(&self, tol: f64, max_iter: usize) -> (Vec<f64>, usize) {
108 let mut v = vec![0.0_f64; self.num_states];
109 for iter in 0..max_iter {
110 let v_new = self.bellman_update(&v);
111 let delta = v_new
112 .iter()
113 .zip(v.iter())
114 .map(|(a, b)| (a - b).abs())
115 .fold(0.0_f64, f64::max);
116 v = v_new;
117 if delta < tol {
118 return (v, iter + 1);
119 }
120 }
121 (v, max_iter)
122 }
123 pub fn extract_policy(&self, v: &[f64]) -> Vec<usize> {
127 (0..self.num_states)
128 .map(|s| {
129 (0..self.num_actions)
130 .map(|a| {
131 let future: f64 = (0..self.num_states)
132 .map(|sp| self.transitions[s][a][sp] * v[sp])
133 .sum();
134 (a, self.rewards[s][a] + self.discount * future)
135 })
136 .max_by(|x, y| x.1.partial_cmp(&y.1).unwrap_or(std::cmp::Ordering::Equal))
137 .map(|(a, _)| a)
138 .unwrap_or(0)
139 })
140 .collect()
141 }
142 pub fn policy_evaluation(&self, policy: &[usize], tol: f64, max_iter: usize) -> Vec<f64> {
146 let mut v = vec![0.0_f64; self.num_states];
147 for _ in 0..max_iter {
148 let v_new: Vec<f64> = (0..self.num_states)
149 .map(|s| {
150 let a = policy[s];
151 let future: f64 = (0..self.num_states)
152 .map(|sp| self.transitions[s][a][sp] * v[sp])
153 .sum();
154 self.rewards[s][a] + self.discount * future
155 })
156 .collect();
157 let delta = v_new
158 .iter()
159 .zip(v.iter())
160 .map(|(a, b)| (a - b).abs())
161 .fold(0.0_f64, f64::max);
162 v = v_new;
163 if delta < tol {
164 break;
165 }
166 }
167 v
168 }
169 pub fn policy_iteration(&self, tol: f64, max_iter: usize) -> (Vec<usize>, Vec<f64>, usize) {
173 let mut policy: Vec<usize> = vec![0; self.num_states];
174 for iter in 0..max_iter {
175 let v = self.policy_evaluation(&policy, tol / 10.0, 1000);
176 let new_policy = self.extract_policy(&v);
177 if new_policy == policy {
178 return (policy, v, iter + 1);
179 }
180 policy = new_policy;
181 }
182 let v = self.policy_evaluation(&policy, tol, 1000);
183 (policy, v, max_iter)
184 }
185}
186#[derive(Debug, Clone)]
188pub struct NetworkFlowGraph {
189 pub n: usize,
191 pub capacities: Vec<Vec<i64>>,
193}
194impl NetworkFlowGraph {
195 pub fn new(n: usize) -> Self {
197 Self {
198 n,
199 capacities: vec![vec![0_i64; n]; n],
200 }
201 }
202 pub fn add_edge(&mut self, u: usize, v: usize, cap: i64) {
204 self.capacities[u][v] += cap;
205 }
206 pub fn max_flow_bfs(&self, source: usize, sink: usize) -> i64 {
208 let n = self.n;
209 let mut residual = self.capacities.clone();
210 let mut total_flow = 0_i64;
211 loop {
212 let mut parent = vec![usize::MAX; n];
213 parent[source] = source;
214 let mut queue = VecDeque::new();
215 queue.push_back(source);
216 while let Some(u) = queue.pop_front() {
217 for v in 0..n {
218 if parent[v] == usize::MAX && residual[u][v] > 0 {
219 parent[v] = u;
220 if v == sink {
221 break;
222 }
223 queue.push_back(v);
224 }
225 }
226 if parent[sink] != usize::MAX {
227 break;
228 }
229 }
230 if parent[sink] == usize::MAX {
231 break;
232 }
233 let mut path_flow = i64::MAX;
234 let mut v = sink;
235 while v != source {
236 let u = parent[v];
237 path_flow = path_flow.min(residual[u][v]);
238 v = u;
239 }
240 v = sink;
241 while v != source {
242 let u = parent[v];
243 residual[u][v] -= path_flow;
244 residual[v][u] += path_flow;
245 v = u;
246 }
247 total_flow += path_flow;
248 }
249 total_flow
250 }
251 pub fn min_cut_value(&self, source: usize, sink: usize) -> i64 {
253 self.max_flow_bfs(source, sink)
254 }
255}
256pub struct DynamicProgramming;
258impl DynamicProgramming {
259 pub fn knapsack(capacity: usize, weights: &[usize], values: &[usize]) -> usize {
261 let n = weights.len();
262 let mut dp = vec![vec![0_usize; capacity + 1]; n + 1];
263 for i in 1..=n {
264 for w in 0..=capacity {
265 dp[i][w] = dp[i - 1][w];
266 if weights[i - 1] <= w {
267 let with_item = dp[i - 1][w - weights[i - 1]] + values[i - 1];
268 if with_item > dp[i][w] {
269 dp[i][w] = with_item;
270 }
271 }
272 }
273 }
274 dp[n][capacity]
275 }
276 pub fn longest_common_subseq(s1: &[u8], s2: &[u8]) -> usize {
278 let (m, n) = (s1.len(), s2.len());
279 let mut dp = vec![vec![0_usize; n + 1]; m + 1];
280 for i in 1..=m {
281 for j in 1..=n {
282 dp[i][j] = if s1[i - 1] == s2[j - 1] {
283 dp[i - 1][j - 1] + 1
284 } else {
285 dp[i - 1][j].max(dp[i][j - 1])
286 };
287 }
288 }
289 dp[m][n]
290 }
291 pub fn matrix_chain_order(dims: &[usize]) -> usize {
295 let n = dims.len().saturating_sub(1);
296 if n == 0 {
297 return 0;
298 }
299 let mut dp = vec![vec![0_usize; n]; n];
300 for len in 2..=n {
301 for i in 0..=(n - len) {
302 let j = i + len - 1;
303 dp[i][j] = usize::MAX;
304 for k in i..j {
305 let cost = dp[i][k]
306 .saturating_add(dp[k + 1][j])
307 .saturating_add(dims[i] * dims[k + 1] * dims[j + 1]);
308 if cost < dp[i][j] {
309 dp[i][j] = cost;
310 }
311 }
312 }
313 }
314 dp[0][n - 1]
315 }
316 pub fn coin_change(coins: &[usize], amount: usize) -> Option<usize> {
320 let inf = usize::MAX / 2;
321 let mut dp = vec![inf; amount + 1];
322 dp[0] = 0;
323 for a in 1..=amount {
324 for &c in coins {
325 if c <= a && dp[a - c] != inf {
326 let cand = dp[a - c] + 1;
327 if cand < dp[a] {
328 dp[a] = cand;
329 }
330 }
331 }
332 }
333 if dp[amount] == inf {
334 None
335 } else {
336 Some(dp[amount])
337 }
338 }
339}
340#[derive(Debug, Clone)]
342pub struct QueueingSystem {
343 pub arrival_rate: f64,
345 pub service_rate: f64,
347 pub num_servers: usize,
349}
350impl QueueingSystem {
351 pub fn new(lambda: f64, mu: f64, c: usize) -> Self {
353 Self {
354 arrival_rate: lambda,
355 service_rate: mu,
356 num_servers: c,
357 }
358 }
359 pub fn utilization(&self) -> f64 {
361 self.arrival_rate / (self.num_servers as f64 * self.service_rate)
362 }
363 pub fn is_stable(&self) -> bool {
365 self.utilization() < 1.0
366 }
367 pub fn mean_queue_length_m_m_1(&self) -> Option<f64> {
371 if self.num_servers != 1 || !self.is_stable() {
372 return None;
373 }
374 let rho = self.utilization();
375 Some(rho / (1.0 - rho))
376 }
377 pub fn mean_waiting_time(&self) -> Option<f64> {
381 let l = self.mean_queue_length_m_m_1()?;
382 Some(l / self.arrival_rate)
383 }
384}
385#[derive(Debug, Clone)]
387pub struct JobScheduler {
388 pub jobs: Vec<(String, u64, u64)>,
390}
391impl JobScheduler {
392 pub fn new() -> Self {
394 Self { jobs: Vec::new() }
395 }
396 pub fn add_job(&mut self, name: &str, proc_time: u64, deadline: u64) {
398 self.jobs.push((name.to_owned(), proc_time, deadline));
399 }
400 pub fn earliest_deadline_first(&self) -> Vec<&str> {
402 let mut indices: Vec<usize> = (0..self.jobs.len()).collect();
403 indices.sort_by_key(|&i| self.jobs[i].2);
404 indices.iter().map(|&i| self.jobs[i].0.as_str()).collect()
405 }
406 pub fn shortest_job_first(&self) -> Vec<&str> {
408 let mut indices: Vec<usize> = (0..self.jobs.len()).collect();
409 indices.sort_by_key(|&i| self.jobs[i].1);
410 indices.iter().map(|&i| self.jobs[i].0.as_str()).collect()
411 }
412 pub fn total_completion_time(&self, schedule: &[&str]) -> u64 {
414 let mut time = 0_u64;
415 let mut total = 0_u64;
416 for name in schedule {
417 if let Some(job) = self.jobs.iter().find(|(n, _, _)| n == name) {
418 time += job.1;
419 total += time;
420 }
421 }
422 total
423 }
424 pub fn makespan(&self, schedule: &[&str]) -> u64 {
426 schedule
427 .iter()
428 .filter_map(|name| self.jobs.iter().find(|(n, _, _)| n == name))
429 .map(|(_, p, _)| p)
430 .sum()
431 }
432}
433#[derive(Debug, Clone)]
435pub struct PrimMst {
436 pub n: usize,
438 pub cost: Vec<Vec<i64>>,
440}
441impl PrimMst {
442 pub fn new(n: usize) -> Self {
444 Self {
445 n,
446 cost: vec![vec![i64::MAX; n]; n],
447 }
448 }
449 pub fn add_edge(&mut self, u: usize, v: usize, w: i64) {
451 self.cost[u][v] = w;
452 self.cost[v][u] = w;
453 }
454 pub fn run(&self) -> (i64, Vec<(usize, usize, i64)>) {
458 let n = self.n;
459 let mut in_mst = vec![false; n];
460 let mut key = vec![i64::MAX; n];
461 let mut parent = vec![usize::MAX; n];
462 key[0] = 0;
463 let mut mst_edges = Vec::new();
464 let mut total = 0_i64;
465 for _ in 0..n {
466 let u = (0..n)
467 .filter(|&v| !in_mst[v])
468 .min_by_key(|&v| key[v])
469 .expect("Prim's algorithm: there is always an unvisited vertex in 0..n iterations");
470 in_mst[u] = true;
471 if parent[u] != usize::MAX {
472 let w = key[u];
473 mst_edges.push((parent[u], u, w));
474 total += w;
475 }
476 for v in 0..n {
477 if !in_mst[v] && self.cost[u][v] < key[v] {
478 key[v] = self.cost[u][v];
479 parent[v] = u;
480 }
481 }
482 }
483 (total, mst_edges)
484 }
485}
486#[derive(Debug, Clone)]
488pub struct ReliabilitySystem {
489 pub components: Vec<f64>,
491}
492impl ReliabilitySystem {
493 pub fn new(components: Vec<f64>) -> Self {
495 Self { components }
496 }
497 pub fn series_reliability(&self) -> f64 {
499 self.components.iter().product()
500 }
501 pub fn parallel_reliability(&self) -> f64 {
503 1.0 - self.components.iter().map(|&r| 1.0 - r).product::<f64>()
504 }
505 pub fn k_out_of_n_reliability(&self, k: usize) -> f64 {
510 let n = self.components.len();
511 let p = if n == 0 {
512 return 0.0;
513 } else {
514 self.components[0]
515 };
516 let q = 1.0 - p;
517 (k..=n)
518 .map(|j| {
519 let binom = binomial_coeff(n, j) as f64;
520 binom * p.powi(j as i32) * q.powi((n - j) as i32)
521 })
522 .sum()
523 }
524}
525#[allow(dead_code)]
527pub struct BanditEnvironment {
528 pub true_means: Vec<f64>,
530 rng_state: u64,
532}
533impl BanditEnvironment {
534 pub fn new(true_means: Vec<f64>) -> Self {
536 Self {
537 true_means,
538 rng_state: 42,
539 }
540 }
541 pub fn sample(&mut self, arm: usize) -> f64 {
543 let u1 = self.lcg_next();
544 let u2 = self.lcg_next();
545 let z = (-2.0 * u1.ln()).sqrt() * (2.0 * std::f64::consts::PI * u2).cos();
546 self.true_means[arm] + z
547 }
548 fn lcg_next(&mut self) -> f64 {
550 self.rng_state = self
551 .rng_state
552 .wrapping_mul(6364136223846793005)
553 .wrapping_add(1442695040888963407);
554 let val = (self.rng_state >> 33) as f64 / (u32::MAX as f64 + 1.0);
555 val.max(1e-10)
556 }
557 pub fn optimal_arm(&self) -> usize {
559 self.true_means
560 .iter()
561 .enumerate()
562 .max_by(|a, b| a.1.partial_cmp(b.1).unwrap_or(std::cmp::Ordering::Equal))
563 .map(|(i, _)| i)
564 .unwrap_or(0)
565 }
566 pub fn run_ucb1(&mut self, rounds: usize) -> f64 {
568 let k = self.true_means.len();
569 let optimal_mean = self.true_means[self.optimal_arm()];
570 let mut bandit = MultiArmedBanditUcb::new(k);
571 let mut cumulative_regret = 0.0_f64;
572 for _ in 0..rounds {
573 let arm = bandit.select_arm();
574 let reward = self.sample(arm);
575 bandit.update(arm, reward);
576 cumulative_regret += optimal_mean - self.true_means[arm];
577 }
578 cumulative_regret
579 }
580}
581#[allow(dead_code)]
586pub struct MultiArmedBanditUcb {
587 pub num_arms: usize,
589 pub total_rewards: Vec<f64>,
591 pub pull_counts: Vec<u64>,
593 pub total_rounds: u64,
595}
596impl MultiArmedBanditUcb {
597 pub fn new(num_arms: usize) -> Self {
599 Self {
600 num_arms,
601 total_rewards: vec![0.0; num_arms],
602 pull_counts: vec![0; num_arms],
603 total_rounds: 0,
604 }
605 }
606 pub fn ucb_index(&self, arm: usize) -> f64 {
608 if self.pull_counts[arm] == 0 {
609 return f64::INFINITY;
610 }
611 let mean = self.total_rewards[arm] / self.pull_counts[arm] as f64;
612 let t = self.total_rounds as f64;
613 let n = self.pull_counts[arm] as f64;
614 mean + (2.0 * t.ln() / n).sqrt()
615 }
616 pub fn select_arm(&self) -> usize {
618 (0..self.num_arms)
619 .max_by(|&a, &b| {
620 let cmp = self
621 .ucb_index(a)
622 .partial_cmp(&self.ucb_index(b))
623 .unwrap_or(std::cmp::Ordering::Equal);
624 cmp.then(b.cmp(&a))
626 })
627 .unwrap_or(0)
628 }
629 pub fn update(&mut self, arm: usize, reward: f64) {
631 self.total_rewards[arm] += reward;
632 self.pull_counts[arm] += 1;
633 self.total_rounds += 1;
634 }
635 pub fn average_reward(&self, arm: usize) -> f64 {
637 if self.pull_counts[arm] == 0 {
638 return 0.0;
639 }
640 self.total_rewards[arm] / self.pull_counts[arm] as f64
641 }
642 pub fn best_arm(&self) -> usize {
644 (0..self.num_arms)
645 .filter(|&i| self.pull_counts[i] > 0)
646 .max_by(|&a, &b| {
647 self.average_reward(a)
648 .partial_cmp(&self.average_reward(b))
649 .unwrap_or(std::cmp::Ordering::Equal)
650 })
651 .unwrap_or(0)
652 }
653}
654#[allow(dead_code)]
659pub struct TwoStageStochasticLP {
660 pub stage1_obj: Vec<f64>,
662 pub stage2_obj: Vec<f64>,
664 pub probabilities: Vec<f64>,
666 pub scenario_rhs: Vec<Vec<f64>>,
668}
669impl TwoStageStochasticLP {
670 pub fn new(
672 stage1_obj: Vec<f64>,
673 stage2_obj: Vec<f64>,
674 probabilities: Vec<f64>,
675 scenario_rhs: Vec<Vec<f64>>,
676 ) -> Self {
677 Self {
678 stage1_obj,
679 stage2_obj,
680 probabilities,
681 scenario_rhs,
682 }
683 }
684 pub fn num_scenarios(&self) -> usize {
686 self.probabilities.len()
687 }
688 pub fn expected_recourse_cost(&self, stage2_decisions: &[Vec<f64>]) -> f64 {
692 self.probabilities
693 .iter()
694 .zip(stage2_decisions.iter())
695 .map(|(&p, y)| {
696 let cost: f64 = self
697 .stage2_obj
698 .iter()
699 .zip(y.iter())
700 .map(|(q, v)| q * v)
701 .sum();
702 p * cost
703 })
704 .sum()
705 }
706 pub fn total_cost(&self, stage1_x: &[f64], stage2_decisions: &[Vec<f64>]) -> f64 {
708 let c1: f64 = self
709 .stage1_obj
710 .iter()
711 .zip(stage1_x.iter())
712 .map(|(c, x)| c * x)
713 .sum();
714 c1 + self.expected_recourse_cost(stage2_decisions)
715 }
716 pub fn is_stage1_feasible(&self, x: &[f64]) -> bool {
718 x.iter().all(|&v| v >= -1e-9)
719 }
720}
721#[derive(Debug, Clone)]
723pub struct KnapsackDP {
724 pub weights: Vec<usize>,
726 pub values: Vec<usize>,
728 pub capacity: usize,
730}
731impl KnapsackDP {
732 pub fn new(capacity: usize, weights: Vec<usize>, values: Vec<usize>) -> Self {
734 Self {
735 weights,
736 values,
737 capacity,
738 }
739 }
740 pub fn solve(&self) -> (usize, Vec<usize>) {
742 let n = self.weights.len();
743 let cap = self.capacity;
744 let mut dp = vec![vec![0_usize; cap + 1]; n + 1];
745 for i in 1..=n {
746 for w in 0..=cap {
747 dp[i][w] = dp[i - 1][w];
748 if self.weights[i - 1] <= w {
749 let with_item = dp[i - 1][w - self.weights[i - 1]] + self.values[i - 1];
750 if with_item > dp[i][w] {
751 dp[i][w] = with_item;
752 }
753 }
754 }
755 }
756 let mut selected = Vec::new();
757 let mut w = cap;
758 for i in (1..=n).rev() {
759 if dp[i][w] != dp[i - 1][w] {
760 selected.push(i - 1);
761 w -= self.weights[i - 1];
762 }
763 }
764 selected.reverse();
765 (dp[n][cap], selected)
766 }
767}
768#[derive(Debug, Clone)]
770pub struct InventoryModel {
771 pub demand: f64,
773 pub order_cost: f64,
775 pub holding_cost: f64,
777 pub lead_time: f64,
779}
780impl InventoryModel {
781 pub fn new(d: f64, s: f64, h: f64, l: f64) -> Self {
783 Self {
784 demand: d,
785 order_cost: s,
786 holding_cost: h,
787 lead_time: l,
788 }
789 }
790 pub fn eoq(&self) -> f64 {
792 (2.0 * self.demand * self.order_cost / self.holding_cost).sqrt()
793 }
794 pub fn reorder_point(&self, safety_stock: f64) -> f64 {
796 self.demand * self.lead_time + safety_stock
797 }
798 pub fn total_cost(&self, q: f64) -> f64 {
800 (self.demand / q) * self.order_cost + (q / 2.0) * self.holding_cost
801 }
802}
803#[derive(Debug, Clone)]
805pub struct FloydWarshall {
806 pub n: usize,
808 pub dist: Vec<Vec<i64>>,
810}
811impl FloydWarshall {
812 pub fn new(n: usize) -> Self {
814 let inf = i64::MAX / 2;
815 let mut dist = vec![vec![inf; n]; n];
816 for i in 0..n {
817 dist[i][i] = 0;
818 }
819 Self { n, dist }
820 }
821 pub fn add_edge(&mut self, u: usize, v: usize, w: i64) {
823 if w < self.dist[u][v] {
824 self.dist[u][v] = w;
825 }
826 }
827 #[allow(clippy::result_unit_err)]
831 pub fn run(&mut self) -> Result<Vec<Vec<i64>>, ()> {
832 let n = self.n;
833 for k in 0..n {
834 for i in 0..n {
835 for j in 0..n {
836 if self.dist[i][k] != i64::MAX / 2 && self.dist[k][j] != i64::MAX / 2 {
837 let through_k = self.dist[i][k] + self.dist[k][j];
838 if through_k < self.dist[i][j] {
839 self.dist[i][j] = through_k;
840 }
841 }
842 }
843 }
844 }
845 for i in 0..n {
846 if self.dist[i][i] < 0 {
847 return Err(());
848 }
849 }
850 Ok(self.dist.clone())
851 }
852}
853#[derive(Debug, Clone)]
858pub struct HungarianSolver {
859 pub cost: Vec<Vec<i64>>,
861}
862impl HungarianSolver {
863 pub fn new(cost: Vec<Vec<i64>>) -> Self {
865 Self { cost }
866 }
867 pub fn solve(&self) -> (i64, Vec<usize>) {
872 let n = self.cost.len();
873 if n == 0 {
874 return (0, vec![]);
875 }
876 let inf = i64::MAX / 2;
877 let mut u = vec![0_i64; n + 1];
878 let mut v = vec![0_i64; n + 1];
879 let mut p = vec![0_usize; n + 1];
880 let mut way = vec![0_usize; n + 1];
881 for i in 1..=n {
882 p[0] = i;
883 let mut j0 = 0_usize;
884 let mut minv = vec![inf; n + 1];
885 let mut used = vec![false; n + 1];
886 loop {
887 used[j0] = true;
888 let i0 = p[j0];
889 let mut delta = inf;
890 let mut j1 = 0_usize;
891 for j in 1..=n {
892 if !used[j] {
893 let cur = self.cost[i0 - 1][j - 1] - u[i0] - v[j];
894 if cur < minv[j] {
895 minv[j] = cur;
896 way[j] = j0;
897 }
898 if minv[j] < delta {
899 delta = minv[j];
900 j1 = j;
901 }
902 }
903 }
904 for j in 0..=n {
905 if used[j] {
906 u[p[j]] += delta;
907 v[j] -= delta;
908 } else {
909 minv[j] -= delta;
910 }
911 }
912 j0 = j1;
913 if p[j0] == 0 {
914 break;
915 }
916 }
917 loop {
918 p[j0] = p[way[j0]];
919 j0 = way[j0];
920 if j0 == 0 {
921 break;
922 }
923 }
924 }
925 let mut assignment = vec![0_usize; n];
926 for j in 1..=n {
927 if p[j] != 0 {
928 assignment[p[j] - 1] = j - 1;
929 }
930 }
931 let min_cost: i64 = (0..n).map(|i| self.cost[i][assignment[i]]).sum();
932 (min_cost, assignment)
933 }
934}
935#[derive(Debug, Clone)]
939pub struct NewsvendorModel {
940 pub demand_lo: f64,
942 pub demand_hi: f64,
944 pub unit_cost: f64,
946 pub selling_price: f64,
948 pub salvage_value: f64,
950}
951impl NewsvendorModel {
952 pub fn new(lo: f64, hi: f64, cost: f64, price: f64, salvage: f64) -> Self {
954 Self {
955 demand_lo: lo,
956 demand_hi: hi,
957 unit_cost: cost,
958 selling_price: price,
959 salvage_value: salvage,
960 }
961 }
962 pub fn critical_fractile(&self) -> f64 {
964 let c_e = self.unit_cost - self.salvage_value;
965 let c_r = self.selling_price - self.unit_cost;
966 c_r / (c_r + c_e)
967 }
968 pub fn optimal_quantity(&self) -> f64 {
970 let cf = self.critical_fractile();
971 self.demand_lo + cf * (self.demand_hi - self.demand_lo)
972 }
973 pub fn expected_profit(&self, q: f64) -> f64 {
975 let lo = self.demand_lo;
976 let hi = self.demand_hi;
977 let range = hi - lo;
978 let e_min = if q <= lo {
979 q
980 } else if q >= hi {
981 (lo + hi) / 2.0
982 } else {
983 let below = (q * (q - lo) - (q * q - lo * lo) / 2.0) / range;
984 let above = q * (hi - q) / range;
985 below + above
986 };
987 let e_over = q - e_min;
988 self.selling_price * e_min + self.salvage_value * e_over - self.unit_cost * q
989 }
990}
991#[derive(Debug, Clone)]
993pub struct FordFulkerson {
994 pub n: usize,
996 residual: Vec<Vec<i64>>,
998}
999impl FordFulkerson {
1000 pub fn new(n: usize) -> Self {
1002 Self {
1003 n,
1004 residual: vec![vec![0_i64; n]; n],
1005 }
1006 }
1007 pub fn add_edge(&mut self, u: usize, v: usize, cap: i64) {
1009 self.residual[u][v] += cap;
1010 }
1011 fn dfs(&mut self, u: usize, sink: usize, flow: i64, visited: &mut Vec<bool>) -> i64 {
1013 if u == sink {
1014 return flow;
1015 }
1016 visited[u] = true;
1017 for v in 0..self.n {
1018 if !visited[v] && self.residual[u][v] > 0 {
1019 let pushed = self.dfs(v, sink, flow.min(self.residual[u][v]), visited);
1020 if pushed > 0 {
1021 self.residual[u][v] -= pushed;
1022 self.residual[v][u] += pushed;
1023 return pushed;
1024 }
1025 }
1026 }
1027 0
1028 }
1029 pub fn max_flow(&mut self, source: usize, sink: usize) -> i64 {
1031 let mut total = 0_i64;
1032 loop {
1033 let mut visited = vec![false; self.n];
1034 let f = self.dfs(source, sink, i64::MAX, &mut visited);
1035 if f == 0 {
1036 break;
1037 }
1038 total += f;
1039 }
1040 total
1041 }
1042}
1043#[allow(dead_code)]
1049pub struct LagrangianRelaxationSolver {
1050 pub multipliers: Vec<f64>,
1052 pub step_size: f64,
1054 pub best_lower_bound: f64,
1056 pub iterations: usize,
1058}
1059impl LagrangianRelaxationSolver {
1060 pub fn new(num_constraints: usize, initial_step: f64) -> Self {
1062 Self {
1063 multipliers: vec![0.0; num_constraints],
1064 step_size: initial_step,
1065 best_lower_bound: f64::NEG_INFINITY,
1066 iterations: 0,
1067 }
1068 }
1069 pub fn subgradient_update(&mut self, subgradient: &[f64], step: f64) {
1074 for (lambda, &sg) in self.multipliers.iter_mut().zip(subgradient.iter()) {
1075 *lambda = (*lambda + step * sg).max(0.0);
1076 }
1077 self.iterations += 1;
1078 }
1079 pub fn polyak_step(&self, upper_bound: f64, lagrangian_value: f64, subgradient: &[f64]) -> f64 {
1085 let sg_norm_sq: f64 = subgradient.iter().map(|&g| g * g).sum();
1086 if sg_norm_sq < 1e-10 {
1087 return 0.0;
1088 }
1089 (upper_bound - lagrangian_value) / sg_norm_sq * self.step_size
1090 }
1091 pub fn update_lower_bound(&mut self, lb: f64) {
1093 if lb > self.best_lower_bound {
1094 self.best_lower_bound = lb;
1095 }
1096 }
1097 pub fn duality_gap(&self, upper_bound: f64) -> f64 {
1099 upper_bound - self.best_lower_bound
1100 }
1101}
1102#[derive(Debug, Clone)]
1107pub struct SimplexSolver {
1108 pub num_vars: usize,
1110 pub num_constraints: usize,
1112 pub obj: Vec<f64>,
1114 pub a_matrix: Vec<Vec<f64>>,
1116 pub rhs: Vec<f64>,
1118}
1119impl SimplexSolver {
1120 pub fn new(obj: Vec<f64>, a_matrix: Vec<Vec<f64>>, rhs: Vec<f64>) -> Self {
1122 let num_vars = obj.len();
1123 let num_constraints = rhs.len();
1124 Self {
1125 num_vars,
1126 num_constraints,
1127 obj,
1128 a_matrix,
1129 rhs,
1130 }
1131 }
1132 pub fn solve(&self) -> Option<f64> {
1141 let m = self.num_constraints;
1142 let n = self.num_vars;
1143 let total_cols = n + m + 1;
1144 let rhs_col = n + m;
1145 let mut tab = vec![vec![0.0_f64; total_cols]; m + 1];
1146 for i in 0..m {
1147 for j in 0..n {
1148 tab[i][j] = self.a_matrix[i][j];
1149 }
1150 tab[i][n + i] = 1.0;
1151 tab[i][rhs_col] = self.rhs[i];
1152 }
1153 for j in 0..n {
1154 tab[m][j] = self.obj[j];
1155 }
1156 let max_iter = 1000;
1157 for _ in 0..max_iter {
1158 let pivot_col = (0..n + m).filter(|&j| tab[m][j] < -1e-9).min_by(|&a, &b| {
1159 tab[m][a]
1160 .partial_cmp(&tab[m][b])
1161 .unwrap_or(std::cmp::Ordering::Equal)
1162 });
1163 let pivot_col = match pivot_col {
1164 Some(c) => c,
1165 None => break,
1166 };
1167 let pivot_row = (0..m)
1168 .filter(|&i| tab[i][pivot_col] > 1e-9)
1169 .min_by(|&a, &b| {
1170 let ra = tab[a][rhs_col] / tab[a][pivot_col];
1171 let rb = tab[b][rhs_col] / tab[b][pivot_col];
1172 ra.partial_cmp(&rb).unwrap_or(std::cmp::Ordering::Equal)
1173 });
1174 let pivot_row = pivot_row?;
1175 let pval = tab[pivot_row][pivot_col];
1176 for j in 0..total_cols {
1177 tab[pivot_row][j] /= pval;
1178 }
1179 for i in 0..=m {
1180 if i != pivot_row {
1181 let factor = tab[i][pivot_col];
1182 if factor.abs() > 1e-15 {
1183 for j in 0..total_cols {
1184 let delta = factor * tab[pivot_row][j];
1185 tab[i][j] -= delta;
1186 }
1187 }
1188 }
1189 }
1190 }
1191 Some(-tab[m][rhs_col])
1192 }
1193}
1194#[derive(Debug, Clone)]
1196pub struct Dijkstra {
1197 pub n: usize,
1199 pub adj: Vec<Vec<(usize, u64)>>,
1201}
1202impl Dijkstra {
1203 pub fn new(n: usize) -> Self {
1205 Self {
1206 n,
1207 adj: vec![Vec::new(); n],
1208 }
1209 }
1210 pub fn add_edge(&mut self, u: usize, v: usize, w: u64) {
1212 self.adj[u].push((v, w));
1213 }
1214 pub fn shortest_paths(&self, source: usize) -> Vec<u64> {
1218 let mut dist = vec![u64::MAX; self.n];
1219 dist[source] = 0;
1220 let mut heap: BinaryHeap<Reverse<(u64, usize)>> = BinaryHeap::new();
1221 heap.push(Reverse((0, source)));
1222 while let Some(Reverse((d, u))) = heap.pop() {
1223 if d > dist[u] {
1224 continue;
1225 }
1226 for &(v, w) in &self.adj[u] {
1227 let nd = d + w;
1228 if nd < dist[v] {
1229 dist[v] = nd;
1230 heap.push(Reverse((nd, v)));
1231 }
1232 }
1233 }
1234 dist
1235 }
1236}