1use crate::error::OptimizeError;
8use crate::sparse_numdiff::SparseFiniteDiffOptions;
9use crate::unconstrained::memory_efficient::MemoryOptions;
10use crate::unconstrained::result::OptimizeResult;
11use ndarray::{Array1, ArrayView1};
13use scirs2_sparse::csr_array::CsrArray;
14use std::fs::{File, OpenOptions};
15use std::io::{Read, Seek, SeekFrom, Write};
16use std::path::PathBuf;
17
18#[derive(Debug, Clone)]
20pub struct AdvancedScaleOptions {
21 pub memory_options: MemoryOptions,
23 pub sparse_options: SparseFiniteDiffOptions,
25 pub max_variables_in_memory: usize,
27 pub block_size: usize,
29 pub refinement_passes: usize,
31 pub use_disk_storage: bool,
33 pub mmap_threshold: usize,
35 pub compression_level: u32,
37}
38
39impl Default for AdvancedScaleOptions {
40 fn default() -> Self {
41 Self {
42 memory_options: MemoryOptions::default(),
43 sparse_options: SparseFiniteDiffOptions::default(),
44 max_variables_in_memory: 100_000,
45 block_size: 10_000,
46 refinement_passes: 3,
47 use_disk_storage: false,
48 mmap_threshold: 1_000_000,
49 compression_level: 3,
50 }
51 }
52}
53
54struct AdvancedScaleState {
56 variables: VariableStorage,
58 gradient: Option<CsrArray<f64>>,
60 blocks: Vec<VariableBlock>,
62 #[allow(dead_code)]
64 active_variables: Vec<usize>,
65 #[allow(dead_code)]
67 temp_dir: Option<PathBuf>,
68}
69
70enum VariableStorage {
72 Memory(Array1<f64>),
74 Disk {
76 file_path: PathBuf,
77 size: usize,
78 #[allow(dead_code)]
79 buffer: Vec<f64>, #[allow(dead_code)]
81 active_indices: Vec<usize>,
82 },
83}
84
85#[derive(Debug, Clone)]
87struct VariableBlock {
88 start_idx: usize,
90 size: usize,
92 priority: f64,
94 last_updated: usize,
96}
97
98impl AdvancedScaleState {
99 fn new(x0: Array1<f64>, options: &AdvancedScaleOptions) -> Result<Self, OptimizeError> {
100 let n = x0.len();
101
102 let variables = if options.use_disk_storage || n > options.mmap_threshold {
104 let temp_dir = options
106 .memory_options
107 .temp_dir
108 .clone()
109 .unwrap_or_else(std::env::temp_dir);
110
111 std::fs::create_dir_all(&temp_dir).map_err(|e| {
112 OptimizeError::ComputationError(format!("Failed to create temp directory: {}", e))
113 })?;
114
115 let file_path = temp_dir.join(format!("advancedscale_vars_{}.dat", std::process::id()));
116
117 let mut file = File::create(&file_path).map_err(|e| {
119 OptimizeError::ComputationError(format!("Failed to create temp file: {}", e))
120 })?;
121
122 let bytes: Vec<u8> = x0
123 .as_slice()
124 .unwrap()
125 .iter()
126 .flat_map(|&x| x.to_le_bytes())
127 .collect();
128 file.write_all(&bytes).map_err(|e| {
129 OptimizeError::ComputationError(format!("Failed to write to temp file: {}", e))
130 })?;
131
132 VariableStorage::Disk {
133 file_path,
134 size: n,
135 buffer: vec![0.0; options.max_variables_in_memory.min(n)],
136 active_indices: Vec::new(),
137 }
138 } else {
139 VariableStorage::Memory(x0)
140 };
141
142 let blocks = create_variable_blocks(n, options.block_size);
144
145 Ok(Self {
146 variables,
147 gradient: None,
148 blocks,
149 active_variables: Vec::new(),
150 temp_dir: options.memory_options.temp_dir.clone(),
151 })
152 }
153
154 fn get_variables(&mut self) -> Result<Array1<f64>, OptimizeError> {
156 match &mut self.variables {
157 VariableStorage::Memory(x) => Ok(x.clone()),
158 VariableStorage::Disk {
159 file_path,
160 size,
161 buffer: _,
162 active_indices: _,
163 } => {
164 let mut file = File::open(file_path).map_err(|e| {
166 OptimizeError::ComputationError(format!("Failed to open temp file: {}", e))
167 })?;
168
169 let mut bytes = vec![0u8; *size * 8];
170 file.read_exact(&mut bytes).map_err(|e| {
171 OptimizeError::ComputationError(format!("Failed to read from temp file: {}", e))
172 })?;
173
174 let values: Result<Vec<f64>, _> = bytes
175 .chunks_exact(8)
176 .map(|chunk| {
177 let array: [u8; 8] = chunk.try_into().unwrap();
178 Ok(f64::from_le_bytes(array))
179 })
180 .collect();
181
182 let values = values.map_err(|_: std::io::Error| {
183 OptimizeError::ComputationError("Failed to deserialize variables".to_string())
184 })?;
185
186 Ok(Array1::from_vec(values))
187 }
188 }
189 }
190
191 fn update_variables(&mut self, new_x: &Array1<f64>) -> Result<(), OptimizeError> {
193 match &mut self.variables {
194 VariableStorage::Memory(x) => {
195 x.assign(new_x);
196 Ok(())
197 }
198 VariableStorage::Disk {
199 file_path,
200 size: _,
201 buffer: _,
202 active_indices: _,
203 } => {
204 let mut file = OpenOptions::new()
206 .write(true)
207 .open(file_path)
208 .map_err(|e| {
209 OptimizeError::ComputationError(format!("Failed to open temp file: {}", e))
210 })?;
211
212 let bytes: Vec<u8> = new_x
213 .as_slice()
214 .unwrap()
215 .iter()
216 .flat_map(|&x| x.to_le_bytes())
217 .collect();
218
219 file.seek(SeekFrom::Start(0)).map_err(|e| {
220 OptimizeError::ComputationError(format!("Failed to seek in temp file: {}", e))
221 })?;
222
223 file.write_all(&bytes).map_err(|e| {
224 OptimizeError::ComputationError(format!("Failed to write to temp file: {}", e))
225 })?;
226
227 Ok(())
228 }
229 }
230 }
231}
232
233impl Drop for AdvancedScaleState {
234 fn drop(&mut self) {
235 if let VariableStorage::Disk { file_path, .. } = &self.variables {
237 let _ = std::fs::remove_file(file_path);
238 }
239 }
240}
241
242#[allow(dead_code)]
244fn create_variable_blocks(n: usize, block_size: usize) -> Vec<VariableBlock> {
245 let mut _blocks = Vec::new();
246 let num_blocks = n.div_ceil(block_size);
247
248 for i in 0..num_blocks {
249 let start_idx = i * block_size;
250 let end_idx = std::cmp::min((i + 1) * block_size, n);
251 let size = end_idx - start_idx;
252
253 _blocks.push(VariableBlock {
254 start_idx,
255 size,
256 priority: 1.0, last_updated: 0,
258 });
259 }
260
261 _blocks
262}
263
264#[allow(dead_code)]
266pub fn minimize_advanced_scale<F, S>(
267 fun: F,
268 x0: Array1<f64>,
269 options: &AdvancedScaleOptions,
270) -> Result<OptimizeResult<S>, OptimizeError>
271where
272 F: Fn(&ArrayView1<f64>) -> S + Clone + Sync,
273 S: Into<f64> + Clone + Send,
274{
275 let n = x0.len();
276
277 if n < options.max_variables_in_memory {
278 return super::memory_efficient::minimize_memory_efficient_lbfgs(
280 fun,
281 x0,
282 &options.memory_options,
283 );
284 }
285
286 println!(
287 "Starting advanced-large-scale optimization for {} variables",
288 n
289 );
290
291 let mut state = AdvancedScaleState::new(x0, options)?;
293 let mut iteration = 0;
294 let mut total_nfev = 0;
295
296 for pass in 0..options.refinement_passes {
298 println!(
299 "Progressive refinement pass {}/{}",
300 pass + 1,
301 options.refinement_passes
302 );
303
304 if let Some(ref gradient) = state.gradient {
306 update_block_priorities(&mut state.blocks, gradient);
307 }
308
309 state
311 .blocks
312 .sort_by(|a, b| b.priority.partial_cmp(&a.priority).unwrap());
313
314 let num_blocks = state.blocks.len();
316 for i in 0..num_blocks {
317 iteration += 1;
318
319 let current_x = state.get_variables()?;
321 let block_start = state.blocks[i].start_idx;
322 let block_size = state.blocks[i].size;
323 let block_x = current_x
324 .slice(ndarray::s![block_start..block_start + block_size])
325 .to_owned();
326
327 let block_result = optimize_block(
329 &fun,
330 ¤t_x,
331 block_start,
332 block_x,
333 &options.memory_options,
334 )?;
335
336 total_nfev += block_result.nfev;
337
338 let mut new_x = current_x.clone();
340 new_x
341 .slice_mut(ndarray::s![block_start..block_start + block_size])
342 .assign(&block_result.x);
343
344 state.update_variables(&new_x)?;
345 state.blocks[i].last_updated = iteration;
346
347 if iteration % 10 == 0 {
349 let f_val = fun(&new_x.view()).into();
350 let grad_norm = estimate_sparse_gradient_norm(&fun, &new_x.view(), options)?;
351
352 if grad_norm < options.memory_options.base_options.gtol {
353 println!("Converged at pass {}, iteration {}", pass + 1, iteration);
354
355 return Ok(OptimizeResult {
356 x: new_x.clone(),
357 fun: fun(&new_x.view()),
358 nit: iteration,
359 func_evals: total_nfev,
360 nfev: total_nfev,
361 success: true,
362 message: "Advanced-scale optimization converged successfully.".to_string(),
363 jacobian: None,
364 hessian: None,
365 });
366 }
367
368 println!(
369 "Iteration {}: f = {:.6e}, ||g|| ≈ {:.6e}",
370 iteration, f_val, grad_norm
371 );
372 }
373
374 if iteration >= options.memory_options.base_options.max_iter {
375 break;
376 }
377 }
378 }
379
380 let final_x = state.get_variables()?;
381
382 Ok(OptimizeResult {
383 x: final_x.clone(),
384 fun: fun(&final_x.view()),
385 nit: iteration,
386 func_evals: total_nfev,
387 nfev: total_nfev,
388 success: iteration < options.memory_options.base_options.max_iter,
389 message: if iteration < options.memory_options.base_options.max_iter {
390 "Advanced-scale optimization completed successfully.".to_string()
391 } else {
392 "Maximum iterations reached.".to_string()
393 },
394 jacobian: None,
395 hessian: None,
396 })
397}
398
399#[allow(dead_code)]
401fn optimize_block<F, S>(
402 fun: &F,
403 full_x: &Array1<f64>,
404 block_start: usize,
405 block_x0: Array1<f64>,
406 options: &MemoryOptions,
407) -> Result<OptimizeResult<f64>, OptimizeError>
408where
409 F: Fn(&ArrayView1<f64>) -> S,
410 S: Into<f64> + Clone,
411{
412 let block_fun = |block_vars: &ArrayView1<f64>| -> f64 {
414 let mut temp_x = full_x.clone();
415 temp_x
416 .slice_mut(ndarray::s![block_start..block_start + block_vars.len()])
417 .assign(block_vars);
418 fun(&temp_x.view()).into()
419 };
420
421 super::memory_efficient::minimize_memory_efficient_lbfgs(block_fun, block_x0, options)
423}
424
425#[allow(dead_code)]
427fn update_block_priorities(blocks: &mut [VariableBlock], gradient: &CsrArray<f64>) {
428 for block in blocks {
429 let mut total_grad_mag = 0.0;
431 let mut count = 0;
432
433 for idx in block.start_idx..block.start_idx + block.size {
435 if let Some(&grad_val) = gradient.get_data().get(idx) {
436 total_grad_mag += grad_val.abs();
437 count += 1;
438 }
439 }
440
441 if count > 0 {
442 block.priority = total_grad_mag / count as f64;
443 } else {
444 block.priority *= 0.9; }
446 }
447}
448
449#[allow(dead_code)]
451fn estimate_sparse_gradient_norm<F, S>(
452 fun: &F,
453 x: &ArrayView1<f64>,
454 options: &AdvancedScaleOptions,
455) -> Result<f64, OptimizeError>
456where
457 F: Fn(&ArrayView1<f64>) -> S,
458 S: Into<f64>,
459{
460 let n = x.len();
461
462 if n > options.max_variables_in_memory {
464 let sample_size = (options.max_variables_in_memory / 10).min(1000).max(10);
466 let step_size = options.sparse_options.rel_step.unwrap_or(1e-8);
467
468 let mut gradient_norm_squared = 0.0;
469 let f0 = fun(x).into();
470
471 let step = n / sample_size;
473 for i in (0..n).step_by(step).take(sample_size) {
474 let mut x_pert = x.to_owned();
476 let h = step_size * (1.0 + x[i].abs());
477 x_pert[i] += h;
478
479 let f_pert = fun(&x_pert.view()).into();
480 let grad_i = (f_pert - f0) / h;
481 gradient_norm_squared += grad_i * grad_i;
482 }
483
484 let scaling_factor = n as f64 / sample_size as f64;
486 Ok((gradient_norm_squared * scaling_factor).sqrt())
487 } else {
488 compute_sparse_gradient_norm(fun, x, &options.sparse_options)
490 }
491}
492
493#[allow(dead_code)]
495fn compute_sparse_gradient_norm<F, S>(
496 fun: &F,
497 x: &ArrayView1<f64>,
498 sparse_options: &SparseFiniteDiffOptions,
499) -> Result<f64, OptimizeError>
500where
501 F: Fn(&ArrayView1<f64>) -> S,
502 S: Into<f64>,
503{
504 let n = x.len();
505 let step_size = sparse_options.rel_step.unwrap_or(1e-8);
506 let _f0 = fun(x).into();
507
508 let mut gradient_norm_squared = 0.0;
510
511 for i in 0..n {
512 let h = step_size * (1.0 + x[i].abs());
513
514 let mut x_forward = x.to_owned();
516 x_forward[i] += h;
517 let f_forward = fun(&x_forward.view()).into();
518
519 let mut x_backward = x.to_owned();
521 x_backward[i] -= h;
522 let f_backward = fun(&x_backward.view()).into();
523
524 let grad_i = (f_forward - f_backward) / (2.0 * h);
526 gradient_norm_squared += grad_i * grad_i;
527 }
528
529 Ok(gradient_norm_squared.sqrt())
530}
531
532#[allow(dead_code)]
534pub fn create_advanced_scale_optimizer(
535 problem_size: usize,
536 available_memory_mb: usize,
537 estimated_sparsity: f64, ) -> AdvancedScaleOptions {
539 let available_bytes = available_memory_mb * 1024 * 1024;
540
541 let bytes_per_var = std::mem::size_of::<f64>() * 8; let max_vars_in_memory = (available_bytes / bytes_per_var).min(problem_size);
544
545 let block_size = if estimated_sparsity < 0.1 {
547 (max_vars_in_memory / 4).max(1000)
549 } else {
550 (max_vars_in_memory / 10).max(100)
552 };
553
554 let use_disk = problem_size >= 1_000_000 || available_memory_mb < 512;
556
557 let refinement_passes = if problem_size > 10_000_000 {
559 5
560 } else if problem_size > 1_000_000 {
561 4
562 } else {
563 3
564 };
565
566 AdvancedScaleOptions {
567 memory_options: super::memory_efficient::create_memory_efficient_optimizer(
568 max_vars_in_memory,
569 available_memory_mb / 2,
570 ),
571 sparse_options: SparseFiniteDiffOptions {
572 max_group_size: (estimated_sparsity * 1000.0) as usize,
573 ..Default::default()
574 },
575 max_variables_in_memory: max_vars_in_memory,
576 block_size,
577 refinement_passes,
578 use_disk_storage: use_disk,
579 mmap_threshold: available_bytes / (2 * std::mem::size_of::<f64>()),
580 compression_level: if available_memory_mb < 256 { 6 } else { 3 },
581 }
582}
583
584#[cfg(test)]
585mod tests {
586 use super::*;
587 use approx::assert_abs_diff_eq;
588
589 #[test]
590 fn test_advanced_scale_small_problem() {
591 let quadratic = |x: &ArrayView1<f64>| -> f64 { x.mapv(|xi| xi.powi(2)).sum() };
593
594 let n = 50; let x0 = Array1::ones(n);
596 let options = AdvancedScaleOptions::default();
597
598 let result = minimize_advanced_scale(quadratic, x0, &options).unwrap();
599
600 assert!(result.success);
601 for i in 0..n {
603 assert_abs_diff_eq!(result.x[i], 0.0, epsilon = 1e-3);
604 }
605 }
606
607 #[test]
608 fn test_variable_blocks() {
609 let blocks = create_variable_blocks(1000, 100);
610 assert_eq!(blocks.len(), 10);
611 assert_eq!(blocks[0].start_idx, 0);
612 assert_eq!(blocks[0].size, 100);
613 assert_eq!(blocks[9].start_idx, 900);
614 assert_eq!(blocks[9].size, 100);
615 }
616
617 #[test]
618 fn test_auto_parameter_selection() {
619 let options = create_advanced_scale_optimizer(1_000_000, 1024, 0.05);
620
621 assert!(options.max_variables_in_memory > 0);
622 assert!(options.block_size > 0);
623 assert!(options.refinement_passes >= 3);
624 assert!(options.use_disk_storage); }
626}