1use super::functions::*;
5
6#[derive(Debug, Clone)]
7pub struct TransportationProblem {
8 pub supply: Vec<f64>,
9 pub demand: Vec<f64>,
10 pub cost: Vec<Vec<f64>>,
11}
12impl TransportationProblem {
13 pub fn new(supply: Vec<f64>, demand: Vec<f64>, cost: Vec<Vec<f64>>) -> Self {
14 TransportationProblem {
15 supply,
16 demand,
17 cost,
18 }
19 }
20 pub fn is_balanced(&self) -> bool {
21 let ts: f64 = self.supply.iter().sum();
22 let td: f64 = self.demand.iter().sum();
23 (ts - td).abs() < 1e-9
24 }
25 pub fn northwest_corner(&self) -> Vec<Vec<f64>> {
26 let m = self.supply.len();
27 let n = self.demand.len();
28 let mut alloc = vec![vec![0.0f64; n]; m];
29 let mut supply = self.supply.clone();
30 let mut demand = self.demand.clone();
31 let (mut i, mut j) = (0, 0);
32 while i < m && j < n {
33 let qty = supply[i].min(demand[j]);
34 alloc[i][j] = qty;
35 supply[i] -= qty;
36 demand[j] -= qty;
37 if supply[i] < 1e-10 {
38 i += 1;
39 }
40 if demand[j] < 1e-10 {
41 j += 1;
42 }
43 }
44 alloc
45 }
46 pub fn total_cost(&self, alloc: &[Vec<f64>]) -> f64 {
47 let mut cost = 0.0;
48 for (i, row) in alloc.iter().enumerate() {
49 for (j, &qty) in row.iter().enumerate() {
50 if i < self.cost.len() && j < self.cost[i].len() {
51 cost += qty * self.cost[i][j];
52 }
53 }
54 }
55 cost
56 }
57 pub fn vogel_approximation(&self) -> Vec<Vec<f64>> {
58 let m = self.supply.len();
59 let n = self.demand.len();
60 let mut alloc = vec![vec![0.0f64; n]; m];
61 let mut supply = self.supply.clone();
62 let mut demand = self.demand.clone();
63 let mut row_done = vec![false; m];
64 let mut col_done = vec![false; n];
65 for _ in 0..(m + n) {
66 let mut best_penalty = -1.0f64;
67 let mut best_is_row = true;
68 let mut best_idx = 0;
69 for i in 0..m {
70 if row_done[i] || supply[i] < 1e-10 {
71 continue;
72 }
73 let mut costs: Vec<f64> = (0..n)
74 .filter(|&j| !col_done[j] && demand[j] > 1e-10)
75 .map(|j| self.cost[i][j])
76 .collect();
77 costs.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
78 let penalty = if costs.len() >= 2 {
79 costs[1] - costs[0]
80 } else if costs.len() == 1 {
81 costs[0]
82 } else {
83 continue;
84 };
85 if penalty > best_penalty {
86 best_penalty = penalty;
87 best_is_row = true;
88 best_idx = i;
89 }
90 }
91 for j in 0..n {
92 if col_done[j] || demand[j] < 1e-10 {
93 continue;
94 }
95 let mut costs: Vec<f64> = (0..m)
96 .filter(|&i| !row_done[i] && supply[i] > 1e-10)
97 .map(|i| self.cost[i][j])
98 .collect();
99 costs.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal));
100 let penalty = if costs.len() >= 2 {
101 costs[1] - costs[0]
102 } else if costs.len() == 1 {
103 costs[0]
104 } else {
105 continue;
106 };
107 if penalty > best_penalty {
108 best_penalty = penalty;
109 best_is_row = false;
110 best_idx = j;
111 }
112 }
113 if best_penalty < 0.0 {
114 break;
115 }
116 if best_is_row {
117 let i = best_idx;
118 let j = (0..n)
119 .filter(|&j| !col_done[j] && demand[j] > 1e-10)
120 .min_by(|&a, &b| {
121 self.cost[i][a]
122 .partial_cmp(&self.cost[i][b])
123 .unwrap_or(std::cmp::Ordering::Equal)
124 })
125 .unwrap_or(0);
126 let qty = supply[i].min(demand[j]);
127 alloc[i][j] = qty;
128 supply[i] -= qty;
129 demand[j] -= qty;
130 if supply[i] < 1e-10 {
131 row_done[i] = true;
132 }
133 if demand[j] < 1e-10 {
134 col_done[j] = true;
135 }
136 } else {
137 let j = best_idx;
138 let i = (0..m)
139 .filter(|&i| !row_done[i] && supply[i] > 1e-10)
140 .min_by(|&a, &b| {
141 self.cost[a][j]
142 .partial_cmp(&self.cost[b][j])
143 .unwrap_or(std::cmp::Ordering::Equal)
144 })
145 .unwrap_or(0);
146 let qty = supply[i].min(demand[j]);
147 alloc[i][j] = qty;
148 supply[i] -= qty;
149 demand[j] -= qty;
150 if supply[i] < 1e-10 {
151 row_done[i] = true;
152 }
153 if demand[j] < 1e-10 {
154 col_done[j] = true;
155 }
156 }
157 }
158 alloc
159 }
160}
161#[derive(Debug, Clone)]
162pub struct LinearProgram {
163 pub c: Vec<f64>,
164 pub a: Vec<Vec<f64>>,
165 pub b: Vec<f64>,
166 pub n_vars: usize,
167 pub n_constraints: usize,
168}
169impl LinearProgram {
170 pub fn new(c: Vec<f64>, a: Vec<Vec<f64>>, b: Vec<f64>) -> Self {
171 let n_vars = c.len();
172 let n_constraints = b.len();
173 LinearProgram {
174 c,
175 a,
176 b,
177 n_vars,
178 n_constraints,
179 }
180 }
181 pub fn solve(&self) -> LpResult {
182 if self.n_vars == 0 || self.n_constraints == 0 {
183 return LpResult::Optimal {
184 objective: 0.0,
185 solution: vec![0.0; self.n_vars],
186 };
187 }
188 let m = self.n_constraints;
189 let n = self.n_vars;
190 for &bi in &self.b {
191 if bi < -1e-10 {
192 return LpResult::Infeasible;
193 }
194 }
195 let mut basis: Vec<usize> = (n..n + m).collect();
196 let total = n + m;
197 let mut tableau: Vec<Vec<f64>> = (0..m)
198 .map(|i| {
199 let mut row = vec![0.0_f64; total + 1];
200 for j in 0..n {
201 row[j] = if j < self.a[i].len() {
202 self.a[i][j]
203 } else {
204 0.0
205 };
206 }
207 row[n + i] = 1.0;
208 row[total] = self.b[i];
209 row
210 })
211 .collect();
212 let mut cost: Vec<f64> = self.c.clone();
213 cost.extend(vec![0.0; m]);
214 for _ in 0..4 * (n + m) {
215 let (enter, rc) = cost
216 .iter()
217 .enumerate()
218 .filter(|(j, _)| !basis.contains(j))
219 .min_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
220 .map(|(j, &v)| (j, v))
221 .unwrap_or((0, 0.0));
222 if rc >= -1e-10 {
223 break;
224 }
225 let mut min_ratio = f64::INFINITY;
226 let mut leave_row = None;
227 for (i, row) in tableau.iter().enumerate() {
228 let a_ij = row[enter];
229 if a_ij > 1e-10 {
230 let ratio = row[total] / a_ij;
231 if ratio < min_ratio - 1e-14 {
232 min_ratio = ratio;
233 leave_row = Some(i);
234 }
235 }
236 }
237 let lr = match leave_row {
238 Some(r) => r,
239 None => return LpResult::Unbounded,
240 };
241 let pivot = tableau[lr][enter];
242 for v in tableau[lr].iter_mut() {
243 *v /= pivot;
244 }
245 for i in 0..m {
246 if i != lr {
247 let factor = tableau[i][enter];
248 let pivot_row: Vec<f64> = tableau[lr].clone();
249 for j in 0..=total {
250 tableau[i][j] -= factor * pivot_row[j];
251 }
252 }
253 }
254 let factor = cost[enter];
255 for j in 0..=total {
256 let delta = factor * tableau[lr][j];
257 if j < total {
258 cost[j] -= delta;
259 }
260 }
261 cost[enter] = 0.0;
262 basis[lr] = enter;
263 }
264 let mut x = vec![0.0_f64; n];
265 for (i, &b_var) in basis.iter().enumerate() {
266 if b_var < n {
267 x[b_var] = tableau[i][total];
268 }
269 }
270 LpResult::Optimal {
271 objective: self.objective(&x),
272 solution: x,
273 }
274 }
275 pub fn is_feasible(&self, x: &[f64]) -> bool {
276 if x.len() != self.n_vars {
277 return false;
278 }
279 if x.iter().any(|&v| v < -1e-9) {
280 return false;
281 }
282 for (i, row) in self.a.iter().enumerate() {
283 let lhs: f64 = row.iter().zip(x.iter()).map(|(a, xv)| a * xv).sum();
284 let rhs = if i < self.b.len() { self.b[i] } else { 0.0 };
285 if (lhs - rhs).abs() > 1e-9 {
286 return false;
287 }
288 }
289 true
290 }
291 pub fn objective(&self, x: &[f64]) -> f64 {
292 self.c.iter().zip(x.iter()).map(|(ci, xi)| ci * xi).sum()
293 }
294 pub fn dual(&self) -> LinearProgram {
295 let m = self.n_constraints;
296 let n = self.n_vars;
297 let dual_c: Vec<f64> = self.b.iter().map(|&bi| -bi).collect();
298 let dual_b: Vec<f64> = self.c.clone();
299 let dual_a: Vec<Vec<f64>> = (0..n)
300 .map(|j| {
301 (0..m)
302 .map(|i| {
303 if i < self.a.len() && j < self.a[i].len() {
304 self.a[i][j]
305 } else {
306 0.0
307 }
308 })
309 .collect()
310 })
311 .collect();
312 LinearProgram::new(dual_c, dual_a, dual_b)
313 }
314 pub fn shadow_prices(&self) -> Vec<f64> {
315 match self.solve() {
316 LpResult::Optimal { solution, .. } => {
317 let base_obj = self.objective(&solution);
318 (0..self.n_constraints)
319 .map(|i| {
320 let mut pb = self.b.clone();
321 pb[i] += 1e-5;
322 let plp = LinearProgram::new(self.c.clone(), self.a.clone(), pb);
323 match plp.solve() {
324 LpResult::Optimal { objective, .. } => (objective - base_obj) / 1e-5,
325 _ => 0.0,
326 }
327 })
328 .collect()
329 }
330 _ => vec![0.0; self.n_constraints],
331 }
332 }
333 pub fn reduced_costs(&self) -> Vec<f64> {
334 let dual = self.dual();
335 match dual.solve() {
336 LpResult::Optimal { solution: y, .. } => {
337 let mut rc = self.c.clone();
338 for j in 0..self.n_vars {
339 let mut dc = 0.0;
340 for (i, yi) in y.iter().enumerate() {
341 if i < self.a.len() && j < self.a[i].len() {
342 dc += yi * self.a[i][j];
343 }
344 }
345 rc[j] -= dc;
346 }
347 rc
348 }
349 _ => self.c.clone(),
350 }
351 }
352}
353#[derive(Debug, Clone)]
354pub struct NetworkEdge {
355 pub from: usize,
356 pub to: usize,
357 pub capacity: f64,
358 pub cost: f64,
359}
360#[derive(Debug, Clone)]
361pub struct InequalityLP {
362 pub c: Vec<f64>,
363 pub a: Vec<Vec<f64>>,
364 pub b: Vec<f64>,
365}
366impl InequalityLP {
367 pub fn new(c: Vec<f64>, a: Vec<Vec<f64>>, b: Vec<f64>) -> Self {
368 InequalityLP { c, a, b }
369 }
370 pub fn to_standard_form(&self) -> LinearProgram {
371 let m = self.b.len();
372 let n = self.c.len();
373 let mut std_c = self.c.clone();
374 std_c.extend(vec![0.0; m]);
375 let std_a: Vec<Vec<f64>> = (0..m)
376 .map(|i| {
377 let mut row: Vec<f64> = if i < self.a.len() {
378 let mut r = self.a[i].clone();
379 r.resize(n, 0.0);
380 r
381 } else {
382 vec![0.0; n]
383 };
384 for k in 0..m {
385 row.push(if k == i { 1.0 } else { 0.0 });
386 }
387 row
388 })
389 .collect();
390 LinearProgram::new(std_c, std_a, self.b.clone())
391 }
392 pub fn solve(&self) -> LpResult {
393 self.to_standard_form().solve()
394 }
395}
396pub struct IntegerProgram {
397 pub lp: LinearProgram,
398 pub integer_vars: Vec<usize>,
399}
400impl IntegerProgram {
401 pub fn new(lp: LinearProgram, integer_vars: Vec<usize>) -> Self {
402 IntegerProgram { lp, integer_vars }
403 }
404 pub fn solve(&self) -> LpResult {
405 self.branch_and_bound(self.lp.clone(), 0)
406 }
407 fn branch_and_bound(&self, lp: LinearProgram, depth: usize) -> LpResult {
408 if depth > 8 {
409 return lp.solve();
410 }
411 match lp.solve() {
412 LpResult::Infeasible => LpResult::Infeasible,
413 LpResult::Unbounded => LpResult::Unbounded,
414 LpResult::Optimal {
415 objective,
416 solution,
417 } => {
418 let frac_var = self.integer_vars.iter().find(|&&j| {
419 j < solution.len() && (solution[j] - solution[j].round()).abs() > 1e-6
420 });
421 match frac_var {
422 None => LpResult::Optimal {
423 objective,
424 solution,
425 },
426 Some(&j) => {
427 let val = solution[j];
428 let r1 = self.branch_and_bound(
429 add_bound_constraint(&lp, j, val.floor(), true),
430 depth + 1,
431 );
432 let r2 = self.branch_and_bound(
433 add_bound_constraint(&lp, j, val.ceil(), false),
434 depth + 1,
435 );
436 best_result(r1, r2)
437 }
438 }
439 }
440 }
441 }
442}
443#[derive(Debug, Clone)]
444pub struct BendersDecomposition {
445 pub c_first: Vec<f64>,
446 pub a_first: Vec<Vec<f64>>,
447 pub b_first: Vec<f64>,
448 pub scenarios: Vec<ScenarioData>,
449 pub max_iter: u32,
450 pub tolerance: f64,
451}
452impl BendersDecomposition {
453 pub fn new(
454 c_first: Vec<f64>,
455 a_first: Vec<Vec<f64>>,
456 b_first: Vec<f64>,
457 scenarios: Vec<ScenarioData>,
458 ) -> Self {
459 BendersDecomposition {
460 c_first,
461 a_first,
462 b_first,
463 scenarios,
464 max_iter: 50,
465 tolerance: 1e-6,
466 }
467 }
468 pub fn solve_master(&self, cuts: &[(Vec<f64>, f64)]) -> LpResult {
470 let n = self.c_first.len();
471 let mut c = self.c_first.clone();
472 c.push(1.0);
473 let mut rows = self.a_first.clone();
474 let mut rhs = self.b_first.clone();
475 for (pi, q) in cuts {
476 let mut row: Vec<f64> = pi.iter().map(|v| -v).collect();
477 row.resize(n, 0.0);
478 row.push(-1.0);
479 rows.push(row);
480 rhs.push(-q);
481 }
482 let ilp = InequalityLP::new(c, rows, rhs);
483 ilp.solve()
484 }
485 pub fn second_stage_cost(&self, x: &[f64]) -> f64 {
487 self.scenarios
488 .iter()
489 .map(|s| {
490 let n2 = s.c_second.len();
491 if n2 == 0 {
492 return 0.0;
493 }
494 let b_eff: Vec<f64> = s
495 .b_second
496 .iter()
497 .enumerate()
498 .map(|(i, bi)| {
499 let ax: f64 = if i < self.a_first.len() {
500 self.a_first[i]
501 .iter()
502 .zip(x.iter())
503 .map(|(a, xi)| a * xi)
504 .sum()
505 } else {
506 0.0
507 };
508 bi - ax
509 })
510 .collect();
511 let a_eye: Vec<Vec<f64>> = (0..n2)
512 .map(|k| {
513 let mut row = vec![0.0; n2];
514 if k < row.len() {
515 row[k] = 1.0;
516 }
517 row
518 })
519 .collect();
520 let lp = LinearProgram::new(s.c_second.clone(), a_eye, b_eff);
521 match lp.solve() {
522 LpResult::Optimal { objective, .. } => s.probability * objective,
523 _ => 0.0,
524 }
525 })
526 .sum()
527 }
528 pub fn solve(&self) -> Option<(Vec<f64>, f64)> {
530 let mut cuts: Vec<(Vec<f64>, f64)> = Vec::new();
531 let mut best_x = vec![0.0; self.c_first.len()];
532 let mut best_obj = f64::INFINITY;
533 for _iter in 0..self.max_iter {
534 match self.solve_master(&cuts) {
535 LpResult::Optimal { solution, .. } => {
536 let x: Vec<f64> = solution[..self.c_first.len()].to_vec();
537 let first_cost: f64 = self
538 .c_first
539 .iter()
540 .zip(x.iter())
541 .map(|(c, xi)| c * xi)
542 .sum();
543 let second_cost = self.second_stage_cost(&x);
544 let total = first_cost + second_cost;
545 if total < best_obj - self.tolerance {
546 best_obj = total;
547 best_x = x.clone();
548 }
549 let pi: Vec<f64> = self.c_first.clone();
550 let q = second_cost;
551 cuts.push((pi, q));
552 if cuts.len() as u32 >= self.max_iter {
553 break;
554 }
555 }
556 _ => break,
557 }
558 }
559 if best_obj < f64::INFINITY {
560 Some((best_x, best_obj))
561 } else {
562 None
563 }
564 }
565}
566#[derive(Debug, Clone)]
567pub struct NetworkSimplexSolver {
568 pub nodes: usize,
569 pub edges: Vec<NetworkEdge>,
570 pub supply: Vec<f64>,
571}
572impl NetworkSimplexSolver {
573 pub fn new(nodes: usize, edges: Vec<NetworkEdge>, supply: Vec<f64>) -> Self {
574 NetworkSimplexSolver {
575 nodes,
576 edges,
577 supply,
578 }
579 }
580 pub fn solve(&self) -> Option<(Vec<f64>, f64)> {
591 let e = self.edges.len();
592 let n = self.nodes;
593 if e == 0 || n == 0 {
594 return Some((vec![], 0.0));
595 }
596 let c: Vec<f64> = self.edges.iter().map(|ed| ed.cost).collect();
597 let mut rows: Vec<Vec<f64>> = Vec::new();
598 let mut rhs: Vec<f64> = Vec::new();
599 for node in 0..n {
600 let sup = if node < self.supply.len() {
601 self.supply[node]
602 } else {
603 0.0
604 };
605 if sup < 1e-12 {
606 continue;
607 }
608 let mut row = vec![0.0; e];
609 for (k, ed) in self.edges.iter().enumerate() {
610 if ed.from == node {
611 row[k] = 1.0;
612 }
613 }
614 rows.push(row);
615 rhs.push(sup);
616 }
617 for (k, ed) in self.edges.iter().enumerate() {
618 if ed.capacity < f64::INFINITY {
619 let mut row = vec![0.0; e];
620 row[k] = 1.0;
621 rows.push(row);
622 rhs.push(ed.capacity);
623 }
624 }
625 let ilp = InequalityLP::new(c, rows, rhs);
626 match ilp.solve() {
627 LpResult::Optimal {
628 objective,
629 solution,
630 } => Some((solution, objective)),
631 _ => None,
632 }
633 }
634 pub fn total_cost(&self, flows: &[f64]) -> f64 {
635 self.edges
636 .iter()
637 .zip(flows.iter())
638 .map(|(ed, &f)| ed.cost * f)
639 .sum()
640 }
641}
642pub struct InteriorPointSolver {
643 pub mu: f64,
644 pub t_init: f64,
645 pub max_outer: u32,
646 pub max_inner: u32,
647 pub tolerance: f64,
648}
649impl InteriorPointSolver {
650 pub fn new() -> Self {
651 InteriorPointSolver {
652 mu: 10.0,
653 t_init: 1.0,
654 max_outer: 50,
655 max_inner: 100,
656 tolerance: 1e-8,
657 }
658 }
659 pub fn with_params(mu: f64, t_init: f64, max_outer: u32, max_inner: u32) -> Self {
660 InteriorPointSolver {
661 mu,
662 t_init,
663 max_outer,
664 max_inner,
665 tolerance: 1e-8,
666 }
667 }
668 pub fn solve(&self, c: &[f64], a: &[Vec<f64>], b: &[f64]) -> LpResult {
669 let n = c.len();
670 let m = b.len();
671 if n == 0 {
672 return LpResult::Optimal {
673 objective: 0.0,
674 solution: vec![],
675 };
676 }
677 let mut x = vec![0.1_f64; n];
678 if !self.is_strictly_feasible(&x, a, b) {
679 x = self.find_initial_point(n, a, b);
680 if !self.is_strictly_feasible(&x, a, b) {
681 return LpResult::Infeasible;
682 }
683 }
684 let mut t = self.t_init;
685 let num_ineq = m + n;
686 for _ in 0..self.max_outer {
687 x = self.centering_step(&x, c, a, b, t);
688 if (num_ineq as f64 / t) < self.tolerance {
689 break;
690 }
691 t *= self.mu;
692 }
693 let obj: f64 = c.iter().zip(x.iter()).map(|(ci, xi)| ci * xi).sum();
694 LpResult::Optimal {
695 objective: obj,
696 solution: x,
697 }
698 }
699 fn is_strictly_feasible(&self, x: &[f64], a: &[Vec<f64>], b: &[f64]) -> bool {
700 if x.iter().any(|&v| v <= 0.0) {
701 return false;
702 }
703 for (i, row) in a.iter().enumerate() {
704 let lhs: f64 = row.iter().zip(x.iter()).map(|(ai, xi)| ai * xi).sum();
705 if lhs >= b[i] - 1e-10 {
706 return false;
707 }
708 }
709 true
710 }
711 fn find_initial_point(&self, n: usize, a: &[Vec<f64>], b: &[f64]) -> Vec<f64> {
712 let mut scale = 0.1;
713 for _ in 0..20 {
714 let x = vec![scale; n];
715 if self.is_strictly_feasible(&x, a, b) {
716 return x;
717 }
718 scale *= 0.5;
719 }
720 vec![scale; n]
721 }
722 fn centering_step(&self, x0: &[f64], c: &[f64], a: &[Vec<f64>], b: &[f64], t: f64) -> Vec<f64> {
723 let n = x0.len();
724 let mut x = x0.to_vec();
725 let step_size = 0.01;
726 for _ in 0..self.max_inner {
727 let mut grad = vec![0.0f64; n];
728 for j in 0..n {
729 grad[j] = t * c[j];
730 }
731 for j in 0..n {
732 if x[j] > 1e-15 {
733 grad[j] -= 1.0 / x[j];
734 }
735 }
736 for (i, row) in a.iter().enumerate() {
737 let slack: f64 = b[i]
738 - row
739 .iter()
740 .zip(x.iter())
741 .map(|(ai, xi)| ai * xi)
742 .sum::<f64>();
743 if slack > 1e-15 {
744 for j in 0..n.min(row.len()) {
745 grad[j] += row[j] / slack;
746 }
747 }
748 }
749 let grad_norm: f64 = grad.iter().map(|g| g * g).sum::<f64>().sqrt();
750 if grad_norm < self.tolerance {
751 break;
752 }
753 let mut alpha = step_size;
754 for _ in 0..20 {
755 let x_new: Vec<f64> = x
756 .iter()
757 .zip(grad.iter())
758 .map(|(&xi, &gi)| xi - alpha * gi)
759 .collect();
760 if self.is_strictly_feasible(&x_new, a, b) {
761 x = x_new;
762 break;
763 }
764 alpha *= 0.5;
765 }
766 }
767 x
768 }
769}
770#[derive(Debug, Clone)]
771pub enum LpResult {
772 Optimal { objective: f64, solution: Vec<f64> },
773 Infeasible,
774 Unbounded,
775}
776#[derive(Debug, Clone)]
777pub struct ColumnGenerationSolver {
778 pub item_lengths: Vec<f64>,
779 pub demands: Vec<u64>,
780 pub stock_length: f64,
781 pub max_iter: u32,
782}
783impl ColumnGenerationSolver {
784 pub fn new(item_lengths: Vec<f64>, demands: Vec<u64>, stock_length: f64) -> Self {
785 ColumnGenerationSolver {
786 item_lengths,
787 demands,
788 stock_length,
789 max_iter: 100,
790 }
791 }
792 pub fn initial_patterns(&self) -> Vec<Vec<u64>> {
794 self.item_lengths
795 .iter()
796 .map(|&len| {
797 let count = (self.stock_length / len).floor() as u64;
798 let mut pattern = vec![0u64; self.item_lengths.len()];
799 if let Some(pos) = self
800 .item_lengths
801 .iter()
802 .position(|&l| (l - len).abs() < 1e-12)
803 {
804 pattern[pos] = count;
805 }
806 pattern
807 })
808 .collect()
809 }
810 fn solve_master(&self, patterns: &[Vec<u64>]) -> Option<Vec<f64>> {
812 let n_items = self.item_lengths.len();
813 let n_patterns = patterns.len();
814 if n_patterns == 0 {
815 return None;
816 }
817 let c = vec![1.0; n_patterns];
818 let rows: Vec<Vec<f64>> = (0..n_items)
819 .map(|i| {
820 patterns
821 .iter()
822 .map(|p| -(if i < p.len() { p[i] as f64 } else { 0.0 }))
823 .collect()
824 })
825 .collect();
826 let rhs: Vec<f64> = self.demands.iter().map(|&d| -(d as f64)).collect();
827 let ilp = InequalityLP::new(c, rows, rhs);
828 match ilp.solve() {
829 LpResult::Optimal { solution, .. } => Some(solution),
830 _ => None,
831 }
832 }
833 fn pricing_subproblem(&self, duals: &[f64]) -> Option<Vec<u64>> {
835 let n = self.item_lengths.len();
836 let scale = 100.0;
837 let cap = (self.stock_length * scale) as u64;
838 let weights: Vec<u64> = self
839 .item_lengths
840 .iter()
841 .map(|&l| (l * scale).round() as u64)
842 .collect();
843 let values: Vec<u64> = duals
844 .iter()
845 .map(|&d| (d.max(0.0) * scale * scale).round() as u64)
846 .collect();
847 let (selected, _) = knapsack_dp(&weights, &values, cap);
848 let mut pattern = vec![0u64; n];
849 for (i, s) in selected.iter().enumerate() {
850 if *s {
851 pattern[i] += 1;
852 }
853 }
854 let rc: f64 = 1.0
855 - duals
856 .iter()
857 .zip(pattern.iter())
858 .map(|(&d, &p)| d * p as f64)
859 .sum::<f64>();
860 if rc < -1e-6 {
861 Some(pattern)
862 } else {
863 None
864 }
865 }
866 pub fn solve(&self) -> Option<(Vec<Vec<u64>>, Vec<f64>, f64)> {
868 let mut patterns = self.initial_patterns();
869 for _ in 0..self.max_iter {
870 let x = self.solve_master(&patterns)?;
871 let n_items = self.item_lengths.len();
872 let n_pat = patterns.len();
873 let duals: Vec<f64> = (0..n_items)
874 .map(|i| {
875 let contrib: f64 = patterns
876 .iter()
877 .zip(x.iter())
878 .map(|(p, &xj)| {
879 let pij = if i < p.len() { p[i] as f64 } else { 0.0 };
880 pij * xj
881 })
882 .sum();
883 let demand = if i < self.demands.len() {
884 self.demands[i] as f64
885 } else {
886 0.0
887 };
888 if demand > 1e-10 {
889 contrib / demand
890 } else {
891 0.0
892 }
893 })
894 .collect();
895 match self.pricing_subproblem(&duals) {
896 Some(new_pat) => patterns.push(new_pat),
897 None => {
898 let obj: f64 = x.iter().sum();
899 let _ = n_pat;
900 return Some((patterns, x, obj));
901 }
902 }
903 }
904 let x = self.solve_master(&patterns)?;
905 let obj: f64 = x.iter().sum();
906 Some((patterns, x, obj))
907 }
908}
909#[derive(Debug, Clone)]
910pub struct EllipsoidMethodSolver {
911 pub max_iter: u32,
912 pub tolerance: f64,
913 pub initial_radius: f64,
914}
915impl EllipsoidMethodSolver {
916 pub fn new() -> Self {
917 EllipsoidMethodSolver {
918 max_iter: 500,
919 tolerance: 1e-8,
920 initial_radius: 1e6,
921 }
922 }
923 pub fn with_params(max_iter: u32, tolerance: f64, initial_radius: f64) -> Self {
924 EllipsoidMethodSolver {
925 max_iter,
926 tolerance,
927 initial_radius,
928 }
929 }
930 pub fn find_feasible(&self, a: &[Vec<f64>], b: &[f64]) -> Option<Vec<f64>> {
933 let n = if a.is_empty() {
934 return Some(vec![]);
935 } else {
936 a[0].len()
937 };
938 if n == 0 {
939 return Some(vec![]);
940 }
941 let mut center = vec![0.0; n];
942 let mut p_diag = vec![self.initial_radius * self.initial_radius; n];
943 for _ in 0..self.max_iter {
944 let (viol_idx, viol_val) = a
945 .iter()
946 .zip(b.iter())
947 .enumerate()
948 .map(|(i, (row, &bi))| {
949 let ax: f64 = row
950 .iter()
951 .zip(center.iter())
952 .map(|(aij, xj)| aij * xj)
953 .sum();
954 (i, ax - bi)
955 })
956 .max_by(|(_, v1), (_, v2)| v1.partial_cmp(v2).unwrap_or(std::cmp::Ordering::Equal))
957 .unwrap_or((0, f64::NEG_INFINITY));
958 if viol_val <= self.tolerance {
959 return Some(center);
960 }
961 let g: Vec<f64> = if viol_idx < a.len() {
962 a[viol_idx].clone()
963 } else {
964 vec![0.0; n]
965 };
966 let pg: Vec<f64> = g
967 .iter()
968 .zip(p_diag.iter())
969 .map(|(gi, pi)| gi * pi)
970 .collect();
971 let gpg: f64 = g.iter().zip(pg.iter()).map(|(gi, pgi)| gi * pgi).sum();
972 if gpg < 1e-30 {
973 break;
974 }
975 let sqrt_gpg = gpg.sqrt();
976 let step = 1.0 / ((n + 1) as f64);
977 for j in 0..n {
978 center[j] -= step * pg[j] / sqrt_gpg;
979 }
980 let factor = (n * n) as f64 / ((n * n - 1).max(1) as f64);
981 let cut = 2.0 / ((n + 1) as f64);
982 for j in 0..n {
983 let pgj = pg[j];
984 p_diag[j] = factor * (p_diag[j] - cut * pgj * pgj / gpg);
985 if p_diag[j] < 1e-30 {
986 p_diag[j] = 1e-30;
987 }
988 }
989 }
990 None
991 }
992 pub fn lp_feasible(&self, c: &[f64], a: &[Vec<f64>], b: &[f64]) -> bool {
994 let _ = c;
995 self.find_feasible(a, b).is_some()
996 }
997}
998#[derive(Debug, Clone)]
999pub struct GomoryCutGenerator {
1000 pub max_cuts: u32,
1001 pub tolerance: f64,
1002}
1003impl GomoryCutGenerator {
1004 pub fn new() -> Self {
1005 GomoryCutGenerator {
1006 max_cuts: 20,
1007 tolerance: 1e-6,
1008 }
1009 }
1010 pub fn with_params(max_cuts: u32, tolerance: f64) -> Self {
1011 GomoryCutGenerator {
1012 max_cuts,
1013 tolerance,
1014 }
1015 }
1016 pub fn generate_cuts(&self, solution: &[f64], _lp: &LinearProgram) -> Vec<GomoryCut> {
1020 let mut cuts = Vec::new();
1021 for (j, &xj) in solution.iter().enumerate() {
1022 if cuts.len() as u32 >= self.max_cuts {
1023 break;
1024 }
1025 let frac = xj - xj.floor();
1026 if frac > self.tolerance && frac < 1.0 - self.tolerance {
1027 let mut coeffs = vec![0.0; solution.len()];
1028 if j < coeffs.len() {
1029 coeffs[j] = 1.0;
1030 }
1031 cuts.push(GomoryCut {
1032 coefficients: coeffs,
1033 rhs: xj.floor(),
1034 });
1035 }
1036 }
1037 cuts
1038 }
1039 pub fn solve_with_cuts(&self, lp: &LinearProgram) -> LpResult {
1041 let mut current_lp = lp.clone();
1042 for _ in 0..self.max_cuts {
1043 match current_lp.solve() {
1044 LpResult::Optimal {
1045 objective,
1046 solution,
1047 } => {
1048 let cuts = self.generate_cuts(&solution, ¤t_lp);
1049 if cuts.is_empty() {
1050 return LpResult::Optimal {
1051 objective,
1052 solution,
1053 };
1054 }
1055 for cut in &cuts {
1056 let m = current_lp.n_constraints;
1057 let n = current_lp.n_vars;
1058 let mut new_a = current_lp.a.clone();
1059 let mut row = cut.coefficients.clone();
1060 row.resize(n, 0.0);
1061 new_a.push(row);
1062 let mut new_b = current_lp.b.clone();
1063 new_b.push(cut.rhs);
1064 current_lp = LinearProgram {
1065 c: current_lp.c.clone(),
1066 a: new_a,
1067 b: new_b,
1068 n_vars: n,
1069 n_constraints: m + 1,
1070 };
1071 }
1072 }
1073 other => return other,
1074 }
1075 }
1076 current_lp.solve()
1077 }
1078}
1079#[derive(Debug, Clone)]
1080pub struct GomoryCut {
1081 pub coefficients: Vec<f64>,
1082 pub rhs: f64,
1083}
1084#[derive(Debug, Clone)]
1085pub struct ScenarioData {
1086 pub probability: f64,
1087 pub b_second: Vec<f64>,
1088 pub c_second: Vec<f64>,
1089}