scirs2_linalg/parallel/
mod.rs1use std::sync::Mutex;
7
8pub mod thread_pools;
10pub mod work_stealing;
11
12pub mod adaptive;
14pub mod advanced_work_stealing;
15pub mod affinity;
16pub mod algorithms;
17pub mod iter;
18pub mod numa;
19pub mod scheduler;
20pub mod thread_pool;
21
22pub use thread_pools::{
24 get_global_manager, AdvancedPerformanceStats, AdvancedPerformanceThreadPool,
25 AdvancedThreadPoolConfig, AffinityStrategy, AnomalySeverity, AnomalyType,
26 CacheAllocationPolicy, DecompositionType, DynamicSizingConfig, DynamicThreadManager,
27 IterativeSolverType, MemoryMetrics, MonitoringConfig, OperationType, PerformanceAnomaly,
28 PredictionModelParams, ProfileMetrics, ResourceIsolationConfig, ResourceUsagePattern,
29 ScalingDecision, ScalingReason, ScopedThreadPool, ThreadPoolConfig, ThreadPoolManager,
30 ThreadPoolProfile, ThreadPoolProfiler, ThreadPoolStats, WorkloadAdaptationConfig,
31 WorkloadCharacteristics, WorkloadPattern, WorkloadPredictor,
32};
33pub use work_stealing::{
34 AdaptiveChunking, AdaptiveChunkingStats, CacheAwareStrategy, CacheAwareWorkStealer,
35 CacheLocalityOptimizer, CacheOptimizationRecommendations, ChunkPerformance,
36 LoadBalancingParams, MatrixOperationType, MemoryAccessPattern, NumaTopology,
37 OptimizedSchedulerStats, OptimizedWorkStealingScheduler, PerformanceMonitor, PerformanceStats,
38 SchedulerStats, StealingStrategy, WorkComplexity, WorkItem, WorkPriority,
39 WorkStealingScheduler,
40};
41
42pub use work_stealing::matrix_ops::{
44 parallel_band_solve, parallel_block_gemm, parallel_cholesky_work_stealing,
45 parallel_eigvalsh_work_stealing, parallel_gemm_work_stealing, parallel_hessenberg_reduction,
46 parallel_lu_work_stealing, parallel_matvec_work_stealing, parallel_power_iteration,
47 parallel_qr_work_stealing, parallel_svd_work_stealing,
48};
49
50pub use work_stealing::parallel_gemm_cache_aware;
52
53static GLOBAL_WORKERS: Mutex<Option<usize>> = Mutex::new(None);
55
56#[allow(dead_code)]
77pub fn set_global_workers(workers: Option<usize>) {
78 if let Ok(mut global) = GLOBAL_WORKERS.lock() {
79 *global = workers;
80
81 if let Some(num_workers) = workers {
83 std::env::set_var("OMP_NUM_THREADS", num_workers.to_string());
84 } else {
85 std::env::remove_var("OMP_NUM_THREADS");
87 }
88 }
89}
90
91#[allow(dead_code)]
97pub fn get_global_workers() -> Option<usize> {
98 GLOBAL_WORKERS.lock().ok().and_then(|global| *global)
99}
100
101#[allow(dead_code)]
114pub fn configure_workers(workers: Option<usize>) -> Option<usize> {
115 match workers {
116 Some(count) => {
117 std::env::set_var("OMP_NUM_THREADS", count.to_string());
119 Some(count)
120 }
121 None => {
122 let global_workers = get_global_workers();
124 if let Some(count) = global_workers {
125 std::env::set_var("OMP_NUM_THREADS", count.to_string());
126 }
127 global_workers
128 }
129 }
130}
131
132#[derive(Debug, Clone)]
134pub struct WorkerConfig {
135 pub workers: Option<usize>,
137 pub parallel_threshold: usize,
139 pub chunksize: usize,
141}
142
143impl Default for WorkerConfig {
144 fn default() -> Self {
145 Self {
146 workers: None,
147 parallel_threshold: 1000,
148 chunksize: 64,
149 }
150 }
151}
152
153impl WorkerConfig {
154 pub fn new() -> Self {
156 Self::default()
157 }
158
159 pub fn with_workers(mut self, workers: usize) -> Self {
161 self.workers = Some(workers);
162 self
163 }
164
165 pub fn with_threshold(mut self, threshold: usize) -> Self {
167 self.parallel_threshold = threshold;
168 self
169 }
170
171 pub fn with_chunksize(mut self, chunksize: usize) -> Self {
173 self.chunksize = chunksize;
174 self
175 }
176
177 pub fn apply(&self) {
179 configure_workers(self.workers);
180 }
181}
182
183pub struct ScopedWorkers {
188 previous_workers: Option<usize>,
189}
190
191impl ScopedWorkers {
192 pub fn new(workers: Option<usize>) -> Self {
202 let previous_workers = get_global_workers();
203 set_global_workers(workers);
204 Self { previous_workers }
205 }
206}
207
208impl Drop for ScopedWorkers {
209 fn drop(&mut self) {
210 set_global_workers(self.previous_workers);
211 }
212}