1use std::collections::{BTreeMap, HashMap, HashSet, VecDeque};
14use std::hash::{Hash, Hasher};
15use std::sync::{Arc, Mutex, RwLock};
16use std::time::{Duration, Instant, SystemTime};
17
18use quantrs2_circuit::{optimization::analysis::CircuitAnalyzer, prelude::Circuit};
19use quantrs2_core::{
20 error::{QuantRS2Error, QuantRS2Result},
21 qubit::QubitId,
22};
23
24use crate::{
25 backend_traits::{query_backend_capabilities, BackendCapabilities, BackendFeatures},
26 translation::HardwareBackend,
27 CircuitExecutor, CircuitResult, DeviceError, DeviceResult, QuantumDevice,
28};
29
30#[cfg(feature = "scirs2")]
32use scirs2_graph::{dijkstra_path, minimum_spanning_tree, Graph};
33#[cfg(feature = "scirs2")]
34use scirs2_optimize::{minimize, OptimizeResult};
35#[cfg(feature = "scirs2")]
36use scirs2_stats::{corrcoef, mean, std};
37
38#[cfg(not(feature = "scirs2"))]
40mod fallback_scirs2 {
41 pub fn mean(_data: &[f64]) -> f64 {
42 0.0
43 }
44 pub fn std(_data: &[f64]) -> f64 {
45 1.0
46 }
47 pub fn correlation(_x: &[f64], _y: &[f64]) -> f64 {
48 0.0
49 }
50
51 pub struct OptimizeResult {
52 pub x: Vec<f64>,
53 pub success: bool,
54 }
55
56 pub fn minimize<F>(_func: F, _x0: Vec<f64>, _bounds: Option<Vec<(f64, f64)>>) -> OptimizeResult
57 where
58 F: Fn(&[f64]) -> f64,
59 {
60 OptimizeResult {
61 x: vec![0.0],
62 success: false,
63 }
64 }
65}
66
67#[cfg(not(feature = "scirs2"))]
68use fallback_scirs2::*;
69
70use serde::{Deserialize, Serialize};
71use tokio::sync::{mpsc, Semaphore};
72use uuid::Uuid;
73
74#[derive(
76 Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Default,
77)]
78pub enum JobPriority {
79 Critical = 0,
81 High = 1,
83 #[default]
85 Normal = 2,
86 Low = 3,
88 BestEffort = 4,
90}
91
92#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
94pub enum JobStatus {
95 Pending,
97 Validating,
99 Scheduled,
101 Running,
103 Completed,
105 Failed,
107 Cancelled,
109 TimedOut,
111 Retrying,
113 Paused,
115}
116
117#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
119pub enum SchedulingStrategy {
120 PriorityFIFO,
122 ShortestJobFirst,
124 ShortestRemainingTimeFirst,
126 FairShare,
128 PriorityRoundRobin,
130 Backfill,
132 EarliestDeadlineFirst,
134 RateMonotonic,
136 MLOptimized,
138 MultiObjectiveOptimized,
140 ReinforcementLearning,
142 GeneticAlgorithm,
144 GameTheoreticFair,
146 EnergyAware,
148 DeadlineAwareSLA,
150 Custom(String),
152}
153
154#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
156pub enum AllocationStrategy {
157 FirstFit,
159 BestFit,
161 WorstFit,
163 LeastLoaded,
165 MostLoaded,
167 RoundRobin,
169 WeightedRoundRobin,
171 CostOptimized,
173 PerformanceOptimized,
175 EnergyEfficient,
177 SciRS2Optimized,
179 MultiObjectiveOptimized,
181 LocalityAware,
183 FaultTolerant,
185 PredictiveAllocation,
187}
188
189#[derive(Debug, Clone, Serialize, Deserialize)]
191pub struct JobConfig {
192 pub priority: JobPriority,
194 pub max_execution_time: Duration,
196 pub max_queue_time: Option<Duration>,
198 pub retry_attempts: u32,
200 pub retry_delay: Duration,
202 pub resource_requirements: ResourceRequirements,
204 pub preferred_backends: Vec<HardwareBackend>,
206 pub tags: HashMap<String, String>,
208 pub dependencies: Vec<JobId>,
210 pub deadline: Option<SystemTime>,
212 pub cost_limit: Option<f64>,
214}
215
216impl Default for JobConfig {
217 fn default() -> Self {
218 Self {
219 priority: JobPriority::Normal,
220 max_execution_time: Duration::from_secs(3600), max_queue_time: Some(Duration::from_secs(86400)), retry_attempts: 3,
223 retry_delay: Duration::from_secs(60),
224 resource_requirements: ResourceRequirements::default(),
225 preferred_backends: vec![],
226 tags: HashMap::new(),
227 dependencies: vec![],
228 deadline: None,
229 cost_limit: None,
230 }
231 }
232}
233
234#[derive(Debug, Clone, Serialize, Deserialize)]
236pub struct ResourceRequirements {
237 pub min_qubits: usize,
239 pub max_depth: Option<usize>,
241 pub min_fidelity: Option<f64>,
243 pub required_connectivity: Option<String>,
245 pub memory_mb: Option<u64>,
247 pub cpu_cores: Option<u32>,
249 pub required_features: Vec<String>,
251}
252
253impl Default for ResourceRequirements {
254 fn default() -> Self {
255 Self {
256 min_qubits: 1,
257 max_depth: None,
258 min_fidelity: None,
259 required_connectivity: None,
260 memory_mb: None,
261 cpu_cores: None,
262 required_features: vec![],
263 }
264 }
265}
266
267#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
269pub struct JobId(pub String);
270
271impl Default for JobId {
272 fn default() -> Self {
273 Self::new()
274 }
275}
276
277impl JobId {
278 pub fn new() -> Self {
279 Self(Uuid::new_v4().to_string())
280 }
281
282 pub const fn from_string(s: String) -> Self {
283 Self(s)
284 }
285}
286
287impl std::fmt::Display for JobId {
288 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
289 write!(f, "{}", self.0)
290 }
291}
292
293#[derive(Debug, Clone)]
295pub struct QuantumJob<const N: usize> {
296 pub id: JobId,
298 pub config: JobConfig,
300 pub circuit: Circuit<N>,
302 pub shots: usize,
304 pub submitted_at: SystemTime,
306 pub status: JobStatus,
308 pub execution_history: Vec<JobExecution>,
310 pub metadata: HashMap<String, String>,
312 pub user_id: String,
314 pub group_id: Option<String>,
316 pub estimated_duration: Option<Duration>,
318 pub assigned_backend: Option<HardwareBackend>,
320 pub estimated_cost: Option<f64>,
322 pub actual_cost: Option<f64>,
323}
324
325#[derive(Debug, Clone, Serialize, Deserialize)]
327pub struct JobExecution {
328 pub attempt: u32,
330 pub backend: HardwareBackend,
332 pub started_at: SystemTime,
334 pub ended_at: Option<SystemTime>,
336 pub result: Option<CircuitResult>,
338 pub error: Option<String>,
340 pub metrics: ExecutionMetrics,
342}
343
344#[derive(Debug, Clone, Serialize, Deserialize)]
346pub struct ExecutionMetrics {
347 pub queue_time: Duration,
349 pub execution_time: Option<Duration>,
351 pub resource_utilization: f64,
353 pub cost: Option<f64>,
355 pub quality_metrics: HashMap<String, f64>,
357}
358
359impl Default for ExecutionMetrics {
360 fn default() -> Self {
361 Self {
362 queue_time: Duration::from_secs(0),
363 execution_time: None,
364 resource_utilization: 0.0,
365 cost: None,
366 quality_metrics: HashMap::new(),
367 }
368 }
369}
370
371#[derive(Debug, Clone)]
373pub struct BackendPerformance {
374 pub backend: HardwareBackend,
376 pub queue_length: usize,
378 pub avg_queue_time: Duration,
380 pub avg_execution_time: Duration,
382 pub success_rate: f64,
384 pub utilization: f64,
386 pub avg_cost: Option<f64>,
388 pub last_updated: SystemTime,
390 pub history: VecDeque<PerformanceSnapshot>,
392}
393
394#[derive(Debug, Clone, Serialize, Deserialize)]
396pub struct PerformanceSnapshot {
397 pub timestamp: SystemTime,
398 pub queue_length: usize,
399 pub utilization: f64,
400 pub avg_queue_time_secs: f64,
401 pub success_rate: f64,
402}
403
404#[derive(Debug, Clone)]
406pub struct QueueAnalytics {
407 pub total_queue_length: usize,
409 pub queue_by_priority: HashMap<JobPriority, usize>,
411 pub queue_by_backend: HashMap<HardwareBackend, usize>,
413 pub predicted_queue_times: HashMap<HardwareBackend, Duration>,
415 pub system_load: f64,
417 pub throughput: f64,
419 pub avg_wait_time: Duration,
421}
422
423#[derive(Debug, Clone)]
425pub struct SchedulingParams {
426 pub strategy: SchedulingStrategy,
428 pub allocation_strategy: AllocationStrategy,
430 pub time_slice: Duration,
432 pub max_jobs_per_user: Option<usize>,
434 pub fair_share_weights: HashMap<String, f64>,
436 pub backfill_threshold: Duration,
438 pub load_balance_factor: f64,
440 pub scirs2_params: SciRS2SchedulingParams,
442}
443
444#[derive(Debug, Clone)]
446pub struct SciRS2SchedulingParams {
447 pub enabled: bool,
449 pub objective_weights: HashMap<String, f64>,
451 pub learning_window: Duration,
453 pub optimization_frequency: Duration,
455 pub model_params: HashMap<String, f64>,
457 pub ml_algorithm: MLAlgorithm,
459 pub multi_objective_weights: MultiObjectiveWeights,
461 pub rl_params: RLParameters,
463 pub ga_params: GAParameters,
465 pub enable_prediction: bool,
467 pub retrain_frequency: Duration,
469 pub feature_params: FeatureParams,
471}
472
473impl Default for SciRS2SchedulingParams {
474 fn default() -> Self {
475 Self {
476 enabled: true,
477 objective_weights: [
478 ("throughput".to_string(), 0.25),
479 ("fairness".to_string(), 0.25),
480 ("utilization".to_string(), 0.2),
481 ("cost".to_string(), 0.15),
482 ("energy".to_string(), 0.1),
483 ("sla_compliance".to_string(), 0.05),
484 ]
485 .into_iter()
486 .collect(),
487 learning_window: Duration::from_secs(86400), optimization_frequency: Duration::from_secs(180), model_params: HashMap::new(),
490 ml_algorithm: MLAlgorithm::EnsembleMethod,
491 multi_objective_weights: MultiObjectiveWeights::default(),
492 rl_params: RLParameters::default(),
493 ga_params: GAParameters::default(),
494 enable_prediction: true,
495 retrain_frequency: Duration::from_secs(3600), feature_params: FeatureParams::default(),
497 }
498 }
499}
500
501impl Default for SchedulingParams {
502 fn default() -> Self {
503 Self {
504 strategy: SchedulingStrategy::MLOptimized,
505 allocation_strategy: AllocationStrategy::SciRS2Optimized,
506 time_slice: Duration::from_secs(60),
507 max_jobs_per_user: Some(100),
508 fair_share_weights: HashMap::new(),
509 backfill_threshold: Duration::from_secs(300),
510 load_balance_factor: 0.8,
511 scirs2_params: SciRS2SchedulingParams::default(),
512 }
513 }
514}
515
516pub struct QuantumJobScheduler {
518 params: Arc<RwLock<SchedulingParams>>,
520 job_queues: Arc<Mutex<BTreeMap<JobPriority, VecDeque<JobId>>>>,
522 jobs: Arc<RwLock<HashMap<JobId, Box<dyn std::any::Any + Send + Sync>>>>,
524 backend_performance: Arc<RwLock<HashMap<HardwareBackend, BackendPerformance>>>,
526 backends: Arc<RwLock<HashSet<HardwareBackend>>>,
528 running_jobs: Arc<RwLock<HashMap<JobId, (HardwareBackend, SystemTime)>>>,
530 execution_history: Arc<RwLock<Vec<JobExecution>>>,
532 user_shares: Arc<RwLock<HashMap<String, UserShare>>>,
534 scheduler_running: Arc<Mutex<bool>>,
536 event_sender: mpsc::UnboundedSender<SchedulerEvent>,
538 performance_predictor: Arc<Mutex<PerformancePredictor>>,
540 resource_manager: Arc<Mutex<ResourceManager>>,
542}
543
544#[derive(Debug, Clone)]
546struct UserShare {
547 user_id: String,
548 allocated_share: f64,
549 used_share: f64,
550 jobs_running: usize,
551 jobs_queued: usize,
552 last_updated: SystemTime,
553}
554
555#[derive(Debug, Clone)]
557pub enum SchedulerEvent {
558 JobSubmitted(JobId),
559 JobScheduled(JobId, HardwareBackend),
560 JobStarted(JobId),
561 JobCompleted(JobId, CircuitResult),
562 JobFailed(JobId, String),
563 JobCancelled(JobId),
564 BackendStatusChanged(HardwareBackend, BackendStatus),
565 QueueAnalyticsUpdated(QueueAnalytics),
566}
567
568#[derive(Debug, Clone)]
570pub enum BackendStatus {
571 Available,
572 Busy,
573 Maintenance,
574 Offline,
575 Error(String),
576}
577
578struct PerformancePredictor {
580 history: VecDeque<PredictionDataPoint>,
582 model_params: HashMap<String, f64>,
584 accuracy_metrics: HashMap<String, f64>,
586}
587
588#[derive(Debug, Clone)]
590struct PredictionDataPoint {
591 timestamp: SystemTime,
592 backend: HardwareBackend,
593 queue_length: usize,
594 job_complexity: f64,
595 execution_time: Duration,
596 success: bool,
597}
598
599struct ResourceManager {
601 available_resources: HashMap<HardwareBackend, ResourceCapacity>,
603 reservations: HashMap<JobId, ResourceReservation>,
605 utilization_history: VecDeque<ResourceSnapshot>,
607}
608
609#[derive(Debug, Clone)]
611struct ResourceCapacity {
612 qubits: usize,
613 max_circuit_depth: Option<usize>,
614 memory_mb: u64,
615 cpu_cores: u32,
616 concurrent_jobs: usize,
617 features: HashSet<String>,
618}
619
620#[derive(Debug, Clone)]
622struct ResourceReservation {
623 job_id: JobId,
624 backend: HardwareBackend,
625 resources: ResourceRequirements,
626 reserved_at: SystemTime,
627 expires_at: SystemTime,
628}
629
630#[derive(Debug, Clone)]
632struct ResourceSnapshot {
633 timestamp: SystemTime,
634 backend: HardwareBackend,
635 utilization: f64,
636 active_jobs: usize,
637}
638
639impl QuantumJobScheduler {
640 pub fn new(params: SchedulingParams) -> Self {
642 let (event_sender, _) = mpsc::unbounded_channel();
643
644 Self {
645 params: Arc::new(RwLock::new(params)),
646 job_queues: Arc::new(Mutex::new(BTreeMap::new())),
647 jobs: Arc::new(RwLock::new(HashMap::new())),
648 backend_performance: Arc::new(RwLock::new(HashMap::new())),
649 backends: Arc::new(RwLock::new(HashSet::new())),
650 running_jobs: Arc::new(RwLock::new(HashMap::new())),
651 execution_history: Arc::new(RwLock::new(Vec::new())),
652 user_shares: Arc::new(RwLock::new(HashMap::new())),
653 scheduler_running: Arc::new(Mutex::new(false)),
654 event_sender,
655 performance_predictor: Arc::new(Mutex::new(PerformancePredictor::new())),
656 resource_manager: Arc::new(Mutex::new(ResourceManager::new())),
657 }
658 }
659
660 pub async fn register_backend(&self, backend: HardwareBackend) -> DeviceResult<()> {
662 let mut backends = self
663 .backends
664 .write()
665 .expect("Failed to acquire write lock on backends in register_backend");
666 backends.insert(backend);
667
668 let mut performance = self
670 .backend_performance
671 .write()
672 .expect("Failed to acquire write lock on backend_performance in register_backend");
673 performance.insert(
674 backend,
675 BackendPerformance {
676 backend,
677 queue_length: 0,
678 avg_queue_time: Duration::from_secs(0),
679 avg_execution_time: Duration::from_secs(0),
680 success_rate: 1.0,
681 utilization: 0.0,
682 avg_cost: None,
683 last_updated: SystemTime::now(),
684 history: VecDeque::new(),
685 },
686 );
687
688 let capabilities = query_backend_capabilities(backend);
690 let mut resource_manager = self
691 .resource_manager
692 .lock()
693 .expect("Failed to acquire lock on resource_manager in register_backend");
694 resource_manager.available_resources.insert(
695 backend,
696 ResourceCapacity {
697 qubits: capabilities.features.max_qubits,
698 max_circuit_depth: capabilities.features.max_depth,
699 memory_mb: 8192, cpu_cores: 4, concurrent_jobs: 10, features: capabilities
703 .features
704 .supported_measurement_bases
705 .into_iter()
706 .collect(),
707 },
708 );
709
710 Ok(())
711 }
712
713 pub fn get_available_backends(&self) -> Vec<HardwareBackend> {
715 let backends = self
716 .backends
717 .read()
718 .expect("Failed to acquire read lock on backends in get_available_backends");
719 backends.iter().copied().collect()
720 }
721
722 pub async fn submit_job<const N: usize>(
724 &self,
725 circuit: Circuit<N>,
726 shots: usize,
727 config: JobConfig,
728 user_id: String,
729 ) -> DeviceResult<JobId> {
730 let job_id = JobId::new();
731 let now = SystemTime::now();
732
733 self.validate_job_config(&config).await?;
735
736 let estimated_duration = self
738 .estimate_execution_time(&circuit, shots, &config)
739 .await?;
740 let estimated_cost = self.estimate_cost(&circuit, shots, &config).await?;
741
742 let job = QuantumJob {
744 id: job_id.clone(),
745 config,
746 circuit,
747 shots,
748 submitted_at: now,
749 status: JobStatus::Pending,
750 execution_history: vec![],
751 metadata: HashMap::new(),
752 user_id: user_id.clone(),
753 group_id: None,
754 estimated_duration: Some(estimated_duration),
755 assigned_backend: None,
756 estimated_cost: Some(estimated_cost),
757 actual_cost: None,
758 };
759
760 let mut jobs = self
762 .jobs
763 .write()
764 .expect("Failed to acquire write lock on jobs in submit_job");
765 jobs.insert(job_id.clone(), Box::new(job.clone()));
766 drop(jobs);
767
768 let mut queues = self
770 .job_queues
771 .lock()
772 .expect("Failed to acquire lock on job_queues in submit_job");
773 let queue = queues.entry(job.config.priority).or_default();
774 queue.push_back(job_id.clone());
775 drop(queues);
776
777 self.update_user_share(&user_id, 1, 0).await;
779
780 let _ = self
782 .event_sender
783 .send(SchedulerEvent::JobSubmitted(job_id.clone()));
784
785 self.ensure_scheduler_running().await;
787
788 Ok(job_id)
789 }
790
791 pub async fn cancel_job(&self, job_id: &JobId) -> DeviceResult<bool> {
793 let mut queues = self
795 .job_queues
796 .lock()
797 .expect("Failed to acquire lock on job_queues in cancel_job");
798 for queue in queues.values_mut() {
799 if let Some(pos) = queue.iter().position(|id| id == job_id) {
800 queue.remove(pos);
801 drop(queues);
802
803 self.update_job_status(job_id, JobStatus::Cancelled).await?;
805
806 let _ = self
808 .event_sender
809 .send(SchedulerEvent::JobCancelled(job_id.clone()));
810 return Ok(true);
811 }
812 }
813 drop(queues);
814
815 let running_jobs = self
817 .running_jobs
818 .read()
819 .expect("Failed to acquire read lock on running_jobs in cancel_job");
820 if running_jobs.contains_key(job_id) {
821 self.update_job_status(job_id, JobStatus::Cancelled).await?;
824 return Ok(true);
825 }
826
827 Ok(false)
828 }
829
830 pub async fn get_job_status<const N: usize>(
832 &self,
833 job_id: &JobId,
834 ) -> DeviceResult<Option<QuantumJob<N>>> {
835 let jobs = self
836 .jobs
837 .read()
838 .expect("Failed to acquire read lock on jobs in get_job_status");
839 if let Some(job_any) = jobs.get(job_id) {
840 if let Some(job) = job_any.downcast_ref::<QuantumJob<N>>() {
841 return Ok(Some(job.clone()));
842 }
843 }
844 Ok(None)
845 }
846
847 pub async fn get_queue_analytics(&self) -> DeviceResult<QueueAnalytics> {
849 let queues = self
850 .job_queues
851 .lock()
852 .expect("Failed to acquire lock on job_queues in get_queue_analytics");
853 let backend_performance = self
854 .backend_performance
855 .read()
856 .expect("Failed to acquire read lock on backend_performance in get_queue_analytics");
857
858 let total_queue_length = queues.values().map(|q| q.len()).sum();
859
860 let queue_by_priority = queues
861 .iter()
862 .map(|(priority, queue)| (*priority, queue.len()))
863 .collect();
864
865 let queue_by_backend = backend_performance
866 .iter()
867 .map(|(backend, perf)| (*backend, perf.queue_length))
868 .collect();
869
870 let predicted_queue_times = self.predict_queue_times(&backend_performance).await;
871
872 let system_load = self.calculate_system_load(&backend_performance).await;
873 let throughput = self.calculate_throughput().await;
874 let avg_wait_time = self.calculate_average_wait_time().await;
875
876 Ok(QueueAnalytics {
877 total_queue_length,
878 queue_by_priority,
879 queue_by_backend,
880 predicted_queue_times,
881 system_load,
882 throughput,
883 avg_wait_time,
884 })
885 }
886
887 pub async fn start_scheduler(&self) -> DeviceResult<()> {
889 let mut running = self
890 .scheduler_running
891 .lock()
892 .expect("Failed to acquire lock on scheduler_running in start_scheduler");
893 if *running {
894 return Err(DeviceError::APIError(
895 "Scheduler already running".to_string(),
896 ));
897 }
898 *running = true;
899 drop(running);
900
901 let scheduler = Arc::new(self.clone());
903 tokio::spawn(async move {
904 scheduler.scheduling_loop().await;
905 });
906
907 let scheduler = Arc::new(self.clone());
909 tokio::spawn(async move {
910 scheduler.performance_monitoring_loop().await;
911 });
912
913 let params = self
915 .params
916 .read()
917 .expect("Failed to acquire read lock on params in start_scheduler");
918 if params.scirs2_params.enabled {
919 drop(params);
920 let scheduler = Arc::new(self.clone());
921 tokio::spawn(async move {
922 scheduler.scirs2_optimization_loop().await;
923 });
924 }
925
926 Ok(())
927 }
928
929 pub async fn stop_scheduler(&self) -> DeviceResult<()> {
931 let mut running = self
932 .scheduler_running
933 .lock()
934 .expect("Failed to acquire lock on scheduler_running in stop_scheduler");
935 *running = false;
936 Ok(())
937 }
938
939 async fn validate_job_config(&self, config: &JobConfig) -> DeviceResult<()> {
942 let backends = self
944 .backends
945 .read()
946 .expect("Failed to acquire read lock on backends in validate_job_config");
947 if backends.is_empty() {
948 return Err(DeviceError::APIError("No backends available".to_string()));
949 }
950
951 let resource_manager = self
953 .resource_manager
954 .lock()
955 .expect("Failed to acquire lock on resource_manager in validate_job_config");
956 let mut can_satisfy = false;
957
958 for (backend, capacity) in &resource_manager.available_resources {
959 if capacity.qubits >= config.resource_requirements.min_qubits {
960 if let Some(max_depth) = config.resource_requirements.max_depth {
961 if let Some(backend_max_depth) = capacity.max_circuit_depth {
962 if max_depth > backend_max_depth {
963 continue;
964 }
965 }
966 }
967 can_satisfy = true;
968 break;
969 }
970 }
971
972 if !can_satisfy {
973 return Err(DeviceError::APIError(
974 "No backend can satisfy resource requirements".to_string(),
975 ));
976 }
977
978 Ok(())
979 }
980
981 async fn estimate_execution_time<const N: usize>(
982 &self,
983 circuit: &Circuit<N>,
984 shots: usize,
985 config: &JobConfig,
986 ) -> DeviceResult<Duration> {
987 let analyzer = CircuitAnalyzer::new();
989 let metrics = analyzer
990 .analyze(circuit)
991 .map_err(|e| DeviceError::APIError(format!("Circuit analysis error: {e:?}")))?;
992 let circuit_complexity = (metrics.gate_count as f64).mul_add(0.1, metrics.depth as f64);
993 let shots_factor = (shots as f64).log10();
994
995 let base_time = Duration::from_secs((circuit_complexity * shots_factor) as u64);
997
998 let backend_performance = self.backend_performance.read().expect(
1000 "Failed to acquire read lock on backend_performance in estimate_execution_time",
1001 );
1002 let avg_execution_time = if backend_performance.is_empty() {
1003 Duration::from_secs(60) } else {
1005 let total_time: Duration = backend_performance
1006 .values()
1007 .map(|p| p.avg_execution_time)
1008 .sum();
1009 total_time / backend_performance.len() as u32
1010 };
1011
1012 let estimated = Duration::from_millis(
1013 u128::midpoint(base_time.as_millis(), avg_execution_time.as_millis())
1014 .try_into()
1015 .expect(
1016 "Failed to convert estimated execution time to u64 in estimate_execution_time",
1017 ),
1018 );
1019
1020 Ok(estimated)
1021 }
1022
1023 async fn estimate_cost<const N: usize>(
1024 &self,
1025 circuit: &Circuit<N>,
1026 shots: usize,
1027 config: &JobConfig,
1028 ) -> DeviceResult<f64> {
1029 let analyzer = CircuitAnalyzer::new();
1031 let metrics = analyzer
1032 .analyze(circuit)
1033 .map_err(|e| DeviceError::APIError(format!("Circuit analysis error: {e:?}")))?;
1034 let circuit_complexity = metrics.depth as f64 + metrics.gate_count as f64;
1035 let base_cost = circuit_complexity * shots as f64 * 0.001; let priority_multiplier = match config.priority {
1039 JobPriority::Critical => 3.0,
1040 JobPriority::High => 2.0,
1041 JobPriority::Normal => 1.0,
1042 JobPriority::Low => 0.7,
1043 JobPriority::BestEffort => 0.5,
1044 };
1045
1046 Ok(base_cost * priority_multiplier)
1047 }
1048
1049 async fn update_user_share(&self, user_id: &str, queued_delta: i32, running_delta: i32) {
1050 let mut user_shares = self
1051 .user_shares
1052 .write()
1053 .expect("Failed to acquire write lock on user_shares in update_user_share");
1054 let share = user_shares
1055 .entry(user_id.to_string())
1056 .or_insert_with(|| UserShare {
1057 user_id: user_id.to_string(),
1058 allocated_share: 1.0, used_share: 0.0,
1060 jobs_running: 0,
1061 jobs_queued: 0,
1062 last_updated: SystemTime::now(),
1063 });
1064
1065 share.jobs_queued = (share.jobs_queued as i32 + queued_delta).max(0) as usize;
1066 share.jobs_running = (share.jobs_running as i32 + running_delta).max(0) as usize;
1067 share.last_updated = SystemTime::now();
1068 }
1069
1070 async fn update_job_status(&self, job_id: &JobId, status: JobStatus) -> DeviceResult<()> {
1071 let mut jobs = self
1072 .jobs
1073 .write()
1074 .expect("Failed to acquire write lock on jobs in update_job_status");
1075 if let Some(job_any) = jobs.get_mut(job_id) {
1076 }
1079 Ok(())
1080 }
1081
1082 async fn ensure_scheduler_running(&self) {
1083 let running = self
1084 .scheduler_running
1085 .lock()
1086 .expect("Failed to acquire lock on scheduler_running in ensure_scheduler_running");
1087 if !*running {
1088 drop(running);
1089 let _ = self.start_scheduler().await;
1090 }
1091 }
1092
1093 async fn predict_queue_times(
1094 &self,
1095 backend_performance: &HashMap<HardwareBackend, BackendPerformance>,
1096 ) -> HashMap<HardwareBackend, Duration> {
1097 let mut predictions = HashMap::new();
1098
1099 for (backend, perf) in backend_performance {
1100 let predicted_time = Duration::from_secs(
1102 (perf.queue_length as u64 * perf.avg_execution_time.as_secs())
1103 / perf.success_rate.max(0.1) as u64,
1104 );
1105 predictions.insert(*backend, predicted_time);
1106 }
1107
1108 predictions
1109 }
1110
1111 async fn calculate_system_load(
1112 &self,
1113 backend_performance: &HashMap<HardwareBackend, BackendPerformance>,
1114 ) -> f64 {
1115 if backend_performance.is_empty() {
1116 return 0.0;
1117 }
1118
1119 let total_utilization: f64 = backend_performance.values().map(|p| p.utilization).sum();
1120
1121 total_utilization / backend_performance.len() as f64
1122 }
1123
1124 async fn calculate_throughput(&self) -> f64 {
1125 let history = self
1126 .execution_history
1127 .read()
1128 .expect("Failed to acquire read lock on execution_history in calculate_throughput");
1129 if history.is_empty() {
1130 return 0.0;
1131 }
1132
1133 let one_hour_ago = SystemTime::now() - Duration::from_secs(3600);
1135 let recent_completions = history
1136 .iter()
1137 .filter(|exec| exec.started_at > one_hour_ago)
1138 .count();
1139
1140 recent_completions as f64
1141 }
1142
1143 async fn calculate_average_wait_time(&self) -> Duration {
1144 let history = self.execution_history.read().expect(
1145 "Failed to acquire read lock on execution_history in calculate_average_wait_time",
1146 );
1147 if history.is_empty() {
1148 return Duration::from_secs(0);
1149 }
1150
1151 let total_wait: Duration = history.iter().map(|exec| exec.metrics.queue_time).sum();
1152
1153 total_wait / history.len() as u32
1154 }
1155
1156 async fn scheduling_loop(&self) {
1158 while *self
1159 .scheduler_running
1160 .lock()
1161 .expect("Failed to acquire lock on scheduler_running in scheduling_loop")
1162 {
1163 if let Err(e) = self.schedule_next_jobs().await {
1164 eprintln!("Scheduling error: {e}");
1165 }
1166
1167 tokio::time::sleep(Duration::from_secs(1)).await;
1168 }
1169 }
1170
1171 async fn schedule_next_jobs(&self) -> DeviceResult<()> {
1172 let params = self
1173 .params
1174 .read()
1175 .expect("Failed to acquire read lock on params in schedule_next_jobs")
1176 .clone();
1177
1178 match params.strategy {
1179 SchedulingStrategy::PriorityFIFO => self.schedule_priority_fifo().await,
1180 SchedulingStrategy::ShortestJobFirst => self.schedule_shortest_job_first().await,
1181 SchedulingStrategy::FairShare => self.schedule_fair_share().await,
1182 SchedulingStrategy::Backfill => self.schedule_backfill().await,
1183 SchedulingStrategy::MLOptimized => self.schedule_ml_optimized().await,
1184 _ => {
1185 self.schedule_priority_fifo().await }
1187 }
1188 }
1189
1190 async fn schedule_priority_fifo(&self) -> DeviceResult<()> {
1191 for priority in [
1193 JobPriority::Critical,
1194 JobPriority::High,
1195 JobPriority::Normal,
1196 JobPriority::Low,
1197 JobPriority::BestEffort,
1198 ] {
1199 let job_id = {
1200 let mut queues = self
1201 .job_queues
1202 .lock()
1203 .expect("Failed to acquire lock on job_queues in schedule_priority_fifo");
1204 queues
1205 .get_mut(&priority)
1206 .and_then(|queue| queue.pop_front())
1207 };
1208
1209 if let Some(job_id) = job_id {
1210 if let Some(backend) = self.find_best_backend(&job_id).await? {
1211 self.assign_job_to_backend(&job_id, backend).await?;
1212 break;
1213 } else {
1214 let mut queues = self.job_queues.lock().expect("Failed to acquire lock on job_queues to requeue job in schedule_priority_fifo");
1216 if let Some(queue) = queues.get_mut(&priority) {
1217 queue.push_front(job_id);
1218 }
1219 break;
1220 }
1221 }
1222 }
1223
1224 Ok(())
1225 }
1226
1227 async fn schedule_shortest_job_first(&self) -> DeviceResult<()> {
1228 self.schedule_priority_fifo().await
1231 }
1232
1233 async fn schedule_fair_share(&self) -> DeviceResult<()> {
1234 self.schedule_priority_fifo().await
1237 }
1238
1239 async fn schedule_backfill(&self) -> DeviceResult<()> {
1240 self.schedule_priority_fifo().await
1243 }
1244
1245 async fn schedule_ml_optimized(&self) -> DeviceResult<()> {
1246 #[cfg(feature = "scirs2")]
1248 {
1249 self.scirs2_optimize_schedule().await
1251 }
1252
1253 #[cfg(not(feature = "scirs2"))]
1254 {
1255 self.schedule_priority_fifo().await
1257 }
1258 }
1259
1260 #[cfg(feature = "scirs2")]
1261 async fn scirs2_optimize_schedule(&self) -> DeviceResult<()> {
1262 self.schedule_priority_fifo().await }
1266
1267 async fn find_best_backend(&self, job_id: &JobId) -> DeviceResult<Option<HardwareBackend>> {
1268 {
1270 let jobs = self
1271 .jobs
1272 .read()
1273 .expect("Failed to acquire read lock on jobs in find_best_backend");
1274 let _job_any = jobs
1275 .get(job_id)
1276 .ok_or_else(|| DeviceError::APIError("Job not found".to_string()))?;
1277 } let backends: Vec<_> = {
1283 let backends = self
1284 .backends
1285 .read()
1286 .expect("Failed to acquire read lock on backends in find_best_backend");
1287 backends.iter().copied().collect()
1288 };
1289
1290 let allocation_strategy = {
1291 let params = self
1292 .params
1293 .read()
1294 .expect("Failed to acquire read lock on params in find_best_backend");
1295 params.allocation_strategy.clone()
1296 };
1297
1298 let backend_performance_snapshot = {
1299 let backend_performance = self
1300 .backend_performance
1301 .read()
1302 .expect("Failed to acquire read lock on backend_performance in find_best_backend");
1303 backend_performance.clone()
1304 };
1305
1306 match allocation_strategy {
1307 AllocationStrategy::FirstFit => {
1308 for backend in backends {
1310 if self.is_backend_available(backend).await {
1311 return Ok(Some(backend));
1312 }
1313 }
1314 }
1315 AllocationStrategy::BestFit => {
1316 for &backend in &backends {
1319 if self.is_backend_available(backend).await {
1320 return Ok(Some(backend));
1321 }
1322 }
1323 }
1324 AllocationStrategy::LeastLoaded => {
1325 let mut best_backend = None;
1327 let mut lowest_utilization = f64::INFINITY;
1328
1329 for (&backend, perf) in &backend_performance_snapshot {
1330 if self.is_backend_available(backend).await
1331 && perf.utilization < lowest_utilization
1332 {
1333 lowest_utilization = perf.utilization;
1334 best_backend = Some(backend);
1335 }
1336 }
1337
1338 return Ok(best_backend);
1339 }
1340 _ => {
1341 for &backend in &backends {
1343 if self.is_backend_available(backend).await {
1344 return Ok(Some(backend));
1345 }
1346 }
1347 }
1348 }
1349
1350 Ok(None)
1351 }
1352
1353 async fn is_backend_available(&self, backend: HardwareBackend) -> bool {
1354 let running_jobs = self
1355 .running_jobs
1356 .read()
1357 .expect("Failed to acquire read lock on running_jobs in is_backend_available");
1358 let backend_jobs = running_jobs.values().filter(|(b, _)| *b == backend).count();
1359
1360 let resource_manager = self
1361 .resource_manager
1362 .lock()
1363 .expect("Failed to acquire lock on resource_manager in is_backend_available");
1364 resource_manager
1365 .available_resources
1366 .get(&backend)
1367 .is_some_and(|capacity| backend_jobs < capacity.concurrent_jobs)
1368 }
1369
1370 async fn assign_job_to_backend(
1371 &self,
1372 job_id: &JobId,
1373 backend: HardwareBackend,
1374 ) -> DeviceResult<()> {
1375 {
1376 let mut running_jobs = self
1377 .running_jobs
1378 .write()
1379 .expect("Failed to acquire write lock on running_jobs in assign_job_to_backend");
1380 running_jobs.insert(job_id.clone(), (backend, SystemTime::now()));
1381 }
1382
1383 self.update_job_status(job_id, JobStatus::Scheduled).await?;
1385
1386 let _ = self
1388 .event_sender
1389 .send(SchedulerEvent::JobScheduled(job_id.clone(), backend));
1390
1391 let job_id_clone = job_id.clone();
1393 let scheduler = Arc::new(self.clone());
1394 tokio::spawn(async move {
1395 let _ = scheduler.execute_job(&job_id_clone, backend).await;
1396 });
1397
1398 Ok(())
1399 }
1400
1401 async fn execute_job(&self, job_id: &JobId, backend: HardwareBackend) -> DeviceResult<()> {
1402 self.update_job_status(job_id, JobStatus::Running).await?;
1404 let _ = self
1405 .event_sender
1406 .send(SchedulerEvent::JobStarted(job_id.clone()));
1407
1408 let execution_start = SystemTime::now();
1409
1410 {
1412 let backends = self
1413 .backends
1414 .read()
1415 .expect("Failed to acquire read lock on backends in execute_job");
1416 if !backends.contains(&backend) {
1417 return Err(DeviceError::APIError("Backend not found".to_string()));
1418 }
1419 }
1420
1421 tokio::time::sleep(Duration::from_secs(10)).await;
1426
1427 {
1429 let mut running_jobs = self
1430 .running_jobs
1431 .write()
1432 .expect("Failed to acquire write lock on running_jobs in execute_job cleanup");
1433 running_jobs.remove(job_id);
1434 }
1435
1436 self.update_job_status(job_id, JobStatus::Completed).await?;
1438
1439 let execution_time = SystemTime::now()
1441 .duration_since(execution_start)
1442 .expect("Failed to calculate execution time duration in execute_job");
1443 Ok(())
1446 }
1447
1448 async fn performance_monitoring_loop(&self) {
1450 while *self
1451 .scheduler_running
1452 .lock()
1453 .expect("Failed to acquire lock on scheduler_running in performance_monitoring_loop")
1454 {
1455 self.update_backend_performance().await;
1456 tokio::time::sleep(Duration::from_secs(30)).await;
1457 }
1458 }
1459
1460 async fn update_backend_performance(&self) {
1461 let mut backend_performance = self.backend_performance.write().expect(
1462 "Failed to acquire write lock on backend_performance in update_backend_performance",
1463 );
1464 let now = SystemTime::now();
1465
1466 for (backend, perf) in backend_performance.iter_mut() {
1467 perf.last_updated = now;
1469
1470 let snapshot = PerformanceSnapshot {
1472 timestamp: now,
1473 queue_length: perf.queue_length,
1474 utilization: perf.utilization,
1475 avg_queue_time_secs: perf.avg_queue_time.as_secs_f64(),
1476 success_rate: perf.success_rate,
1477 };
1478
1479 perf.history.push_back(snapshot);
1480
1481 let cutoff = now - Duration::from_secs(86400);
1483 while let Some(front) = perf.history.front() {
1484 if front.timestamp < cutoff {
1485 perf.history.pop_front();
1486 } else {
1487 break;
1488 }
1489 }
1490 }
1491 }
1492
1493 async fn scirs2_optimization_loop(&self) {
1495 let frequency = {
1496 let params = self
1497 .params
1498 .read()
1499 .expect("Failed to acquire read lock on params in scirs2_optimization_loop");
1500 params.scirs2_params.optimization_frequency
1501 };
1502
1503 loop {
1504 let should_continue = *self
1505 .scheduler_running
1506 .lock()
1507 .expect("Failed to acquire lock on scheduler_running in scirs2_optimization_loop");
1508 if !should_continue {
1509 break;
1510 }
1511
1512 if let Err(e) = self.run_scirs2_optimization().await {
1513 eprintln!("SciRS2 optimization error: {e}");
1514 }
1515
1516 tokio::time::sleep(frequency).await;
1517 }
1518 }
1519
1520 async fn run_scirs2_optimization(&self) -> DeviceResult<()> {
1521 #[cfg(feature = "scirs2")]
1522 {
1523 let backend_performance = self.backend_performance.read().expect(
1529 "Failed to acquire read lock on backend_performance in run_scirs2_optimization",
1530 );
1531 let performance_data: Vec<f64> = backend_performance
1532 .values()
1533 .map(|p| p.utilization)
1534 .collect();
1535
1536 if performance_data.len() > 1 {
1537 use scirs2_core::ndarray::Array1;
1539 let data_array = Array1::from_vec(performance_data);
1540 let avg_utilization = mean(&data_array.view());
1541 let utilization_std = std(&data_array.view(), 1, None);
1542
1543 }
1549 }
1550
1551 Ok(())
1552 }
1553}
1554
1555impl Clone for QuantumJobScheduler {
1557 fn clone(&self) -> Self {
1558 Self {
1559 params: Arc::clone(&self.params),
1560 job_queues: Arc::clone(&self.job_queues),
1561 jobs: Arc::clone(&self.jobs),
1562 backend_performance: Arc::clone(&self.backend_performance),
1563 backends: Arc::clone(&self.backends),
1564 running_jobs: Arc::clone(&self.running_jobs),
1565 execution_history: Arc::clone(&self.execution_history),
1566 user_shares: Arc::clone(&self.user_shares),
1567 scheduler_running: Arc::clone(&self.scheduler_running),
1568 event_sender: self.event_sender.clone(),
1569 performance_predictor: Arc::clone(&self.performance_predictor),
1570 resource_manager: Arc::clone(&self.resource_manager),
1571 }
1572 }
1573}
1574
1575impl PerformancePredictor {
1576 fn new() -> Self {
1577 Self {
1578 history: VecDeque::new(),
1579 model_params: HashMap::new(),
1580 accuracy_metrics: HashMap::new(),
1581 }
1582 }
1583}
1584
1585impl ResourceManager {
1586 fn new() -> Self {
1587 Self {
1588 available_resources: HashMap::new(),
1589 reservations: HashMap::new(),
1590 utilization_history: VecDeque::new(),
1591 }
1592 }
1593}
1594
1595#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
1597pub enum MLAlgorithm {
1598 LinearRegression,
1600 SVM,
1602 RandomForest,
1604 GradientBoosting,
1606 NeuralNetwork,
1608 EnsembleMethod,
1610 DeepRL,
1612 GraphNN,
1614}
1615
1616#[derive(Debug, Clone, Serialize, Deserialize)]
1618pub struct MultiObjectiveWeights {
1619 pub throughput: f64,
1621 pub cost: f64,
1623 pub energy: f64,
1625 pub fairness: f64,
1627 pub sla_compliance: f64,
1629 pub qos: f64,
1631}
1632
1633#[derive(Debug, Clone, Serialize, Deserialize)]
1635pub struct RLParameters {
1636 pub learning_rate: f64,
1638 pub discount_factor: f64,
1640 pub exploration_rate: f64,
1642 pub episode_length: usize,
1644 pub reward_weights: HashMap<String, f64>,
1646 pub state_dimension: usize,
1648 pub action_space_size: usize,
1650}
1651
1652#[derive(Debug, Clone, Serialize, Deserialize)]
1654pub struct GAParameters {
1655 pub population_size: usize,
1657 pub generations: usize,
1659 pub crossover_prob: f64,
1661 pub mutation_prob: f64,
1663 pub selection_strategy: String,
1665 pub elite_size: usize,
1667}
1668
1669#[derive(Debug, Clone, Serialize, Deserialize)]
1671pub struct FeatureParams {
1672 pub enable_temporal_features: bool,
1674 pub enable_complexity_features: bool,
1676 pub enable_user_features: bool,
1678 pub enable_platform_features: bool,
1680 pub enable_historical_features: bool,
1682 pub normalization_method: String,
1684 pub selection_threshold: f64,
1686}
1687
1688#[derive(Debug, Clone, Serialize, Deserialize)]
1690pub struct SLAConfig {
1691 pub max_queue_time: Duration,
1693 pub max_execution_time: Duration,
1695 pub min_availability: f64,
1697 pub violation_penalty: f64,
1699 pub tier: SLATier,
1701}
1702
1703#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
1705pub enum SLATier {
1706 Gold,
1707 Silver,
1708 Bronze,
1709 Basic,
1710}
1711
1712impl Default for MultiObjectiveWeights {
1714 fn default() -> Self {
1715 Self {
1716 throughput: 0.3,
1717 cost: 0.2,
1718 energy: 0.15,
1719 fairness: 0.15,
1720 sla_compliance: 0.1,
1721 qos: 0.1,
1722 }
1723 }
1724}
1725
1726impl Default for RLParameters {
1727 fn default() -> Self {
1728 Self {
1729 learning_rate: 0.001,
1730 discount_factor: 0.95,
1731 exploration_rate: 0.1,
1732 episode_length: 1000,
1733 reward_weights: [
1734 ("throughput".to_string(), 1.0),
1735 ("fairness".to_string(), 0.5),
1736 ("cost".to_string(), -0.3),
1737 ]
1738 .into_iter()
1739 .collect(),
1740 state_dimension: 64,
1741 action_space_size: 16,
1742 }
1743 }
1744}
1745
1746impl Default for GAParameters {
1747 fn default() -> Self {
1748 Self {
1749 population_size: 50,
1750 generations: 100,
1751 crossover_prob: 0.8,
1752 mutation_prob: 0.1,
1753 selection_strategy: "tournament".to_string(),
1754 elite_size: 5,
1755 }
1756 }
1757}
1758
1759impl Default for FeatureParams {
1760 fn default() -> Self {
1761 Self {
1762 enable_temporal_features: true,
1763 enable_complexity_features: true,
1764 enable_user_features: true,
1765 enable_platform_features: true,
1766 enable_historical_features: true,
1767 normalization_method: "z_score".to_string(),
1768 selection_threshold: 0.1,
1769 }
1770 }
1771}
1772
1773pub fn create_high_priority_config(max_execution_time: Duration) -> JobConfig {
1777 JobConfig {
1778 priority: JobPriority::High,
1779 max_execution_time,
1780 retry_attempts: 5,
1781 ..Default::default()
1782 }
1783}
1784
1785pub fn create_batch_job_config() -> JobConfig {
1787 JobConfig {
1788 priority: JobPriority::BestEffort,
1789 max_execution_time: Duration::from_secs(3600 * 24), max_queue_time: None, retry_attempts: 1,
1792 ..Default::default()
1793 }
1794}
1795
1796pub fn create_realtime_config() -> JobConfig {
1798 JobConfig {
1799 priority: JobPriority::Critical,
1800 max_execution_time: Duration::from_secs(60), max_queue_time: Some(Duration::from_secs(30)), retry_attempts: 0, ..Default::default()
1804 }
1805}
1806
1807pub fn create_sla_aware_config(tier: SLATier) -> JobConfig {
1809 let (priority, max_execution_time, max_queue_time, retry_attempts) = match tier {
1810 SLATier::Gold => (
1811 JobPriority::Critical,
1812 Duration::from_secs(300),
1813 Some(Duration::from_secs(60)),
1814 5,
1815 ),
1816 SLATier::Silver => (
1817 JobPriority::High,
1818 Duration::from_secs(600),
1819 Some(Duration::from_secs(300)),
1820 3,
1821 ),
1822 SLATier::Bronze => (
1823 JobPriority::Normal,
1824 Duration::from_secs(1800),
1825 Some(Duration::from_secs(900)),
1826 2,
1827 ),
1828 SLATier::Basic => (
1829 JobPriority::Low,
1830 Duration::from_secs(3600),
1831 Some(Duration::from_secs(1800)),
1832 1,
1833 ),
1834 };
1835
1836 JobConfig {
1837 priority,
1838 max_execution_time,
1839 max_queue_time,
1840 retry_attempts,
1841 ..Default::default()
1842 }
1843}
1844
1845pub fn create_cost_optimized_config(budget_limit: f64) -> JobConfig {
1847 JobConfig {
1848 priority: JobPriority::BestEffort,
1849 max_execution_time: Duration::from_secs(7200), max_queue_time: None, retry_attempts: 1,
1852 cost_limit: Some(budget_limit),
1853 preferred_backends: vec![], ..Default::default()
1855 }
1856}
1857
1858pub fn create_energy_efficient_config() -> JobConfig {
1860 JobConfig {
1861 priority: JobPriority::Low,
1862 max_execution_time: Duration::from_secs(3600), max_queue_time: None, retry_attempts: 2,
1865 tags: std::iter::once(("energy_profile".to_string(), "green".to_string())).collect(),
1866 ..Default::default()
1867 }
1868}
1869
1870pub fn create_research_config() -> JobConfig {
1872 JobConfig {
1873 priority: JobPriority::Normal,
1874 max_execution_time: Duration::from_secs(14400), max_queue_time: Some(Duration::from_secs(7200)), retry_attempts: 3,
1877 tags: [
1878 ("workload_type".to_string(), "research".to_string()),
1879 ("fault_tolerance".to_string(), "high".to_string()),
1880 ]
1881 .into_iter()
1882 .collect(),
1883 ..Default::default()
1884 }
1885}
1886
1887pub fn create_deadline_config(deadline: SystemTime) -> JobConfig {
1889 JobConfig {
1890 priority: JobPriority::High,
1891 max_execution_time: Duration::from_secs(1800), max_queue_time: Some(Duration::from_secs(300)), retry_attempts: 2,
1894 deadline: Some(deadline),
1895 tags: std::iter::once((
1896 "scheduling_type".to_string(),
1897 "deadline_sensitive".to_string(),
1898 ))
1899 .collect(),
1900 ..Default::default()
1901 }
1902}
1903
1904pub fn create_ml_training_config() -> JobConfig {
1906 JobConfig {
1907 priority: JobPriority::Normal,
1908 max_execution_time: Duration::from_secs(21600), max_queue_time: Some(Duration::from_secs(3600)), retry_attempts: 2,
1911 resource_requirements: ResourceRequirements {
1912 min_qubits: 20, max_depth: Some(1000),
1914 min_fidelity: Some(0.95),
1915 memory_mb: Some(16384), cpu_cores: Some(8),
1917 required_features: vec![
1918 "variational_circuits".to_string(),
1919 "parametric_gates".to_string(),
1920 ],
1921 ..Default::default()
1922 },
1923 tags: [
1924 ("workload_type".to_string(), "machine_learning".to_string()),
1925 ("resource_intensive".to_string(), "true".to_string()),
1926 ]
1927 .into_iter()
1928 .collect(),
1929 ..Default::default()
1930 }
1931}
1932
1933pub fn create_optimization_config() -> JobConfig {
1935 JobConfig {
1936 priority: JobPriority::Normal,
1937 max_execution_time: Duration::from_secs(10800), max_queue_time: Some(Duration::from_secs(1800)), retry_attempts: 3,
1940 resource_requirements: ResourceRequirements {
1941 min_qubits: 10,
1942 max_depth: Some(500),
1943 min_fidelity: Some(0.90),
1944 required_features: vec!["qaoa".to_string(), "variational_algorithms".to_string()],
1945 ..Default::default()
1946 },
1947 tags: [
1948 ("workload_type".to_string(), "optimization".to_string()),
1949 ("algorithm_type".to_string(), "variational".to_string()),
1950 ]
1951 .into_iter()
1952 .collect(),
1953 ..Default::default()
1954 }
1955}
1956
1957pub fn create_simulation_config(qubit_count: usize) -> JobConfig {
1959 let (max_execution_time, memory_requirement) = match qubit_count {
1960 1..=20 => (Duration::from_secs(3600), 4096), 21..=30 => (Duration::from_secs(7200), 8192), 31..=40 => (Duration::from_secs(14400), 16384), _ => (Duration::from_secs(28800), 32768), };
1965
1966 JobConfig {
1967 priority: JobPriority::Low, max_execution_time,
1969 max_queue_time: None, retry_attempts: 1,
1971 resource_requirements: ResourceRequirements {
1972 min_qubits: qubit_count,
1973 memory_mb: Some(memory_requirement),
1974 cpu_cores: Some(16), required_features: vec!["high_precision".to_string(), "large_circuits".to_string()],
1976 ..Default::default()
1977 },
1978 tags: [
1979 ("workload_type".to_string(), "simulation".to_string()),
1980 ("qubit_count".to_string(), qubit_count.to_string()),
1981 ]
1982 .into_iter()
1983 .collect(),
1984 ..Default::default()
1985 }
1986}
1987
1988#[cfg(test)]
1989mod tests {
1990 use super::*;
1991 use quantrs2_circuit::prelude::CircuitBuilder;
1992
1993 #[tokio::test]
1994 async fn test_job_scheduler_creation() {
1995 let params = SchedulingParams::default();
1996 let scheduler = QuantumJobScheduler::new(params);
1997 assert!(!*scheduler
1998 .scheduler_running
1999 .lock()
2000 .expect("Failed to acquire lock on scheduler_running in test"));
2001 }
2002
2003 #[tokio::test]
2004 async fn test_job_config_validation() {
2005 let config = JobConfig::default();
2006 assert_eq!(config.priority, JobPriority::Normal);
2007 assert_eq!(config.retry_attempts, 3);
2008 assert!(config.dependencies.is_empty());
2009 }
2010
2011 #[tokio::test]
2012 async fn test_priority_ordering() {
2013 assert!(JobPriority::Critical < JobPriority::High);
2014 assert!(JobPriority::High < JobPriority::Normal);
2015 assert!(JobPriority::Normal < JobPriority::Low);
2016 assert!(JobPriority::Low < JobPriority::BestEffort);
2017 }
2018
2019 #[test]
2020 fn test_job_id_creation() {
2021 let id1 = JobId::new();
2022 let id2 = JobId::new();
2023 assert_ne!(id1, id2);
2024 assert!(!id1.0.is_empty());
2025 }
2026
2027 #[test]
2028 fn test_convenience_configs() {
2029 let high_priority = create_high_priority_config(Duration::from_secs(300));
2030 assert_eq!(high_priority.priority, JobPriority::High);
2031 assert_eq!(high_priority.retry_attempts, 5);
2032
2033 let batch = create_batch_job_config();
2034 assert_eq!(batch.priority, JobPriority::BestEffort);
2035 assert!(batch.max_queue_time.is_none());
2036
2037 let realtime = create_realtime_config();
2038 assert_eq!(realtime.priority, JobPriority::Critical);
2039 assert_eq!(realtime.retry_attempts, 0);
2040 }
2041}