use quantrs2_core::error::{QuantRS2Error, QuantRS2Result};
use scirs2_core::ndarray::Array1;
use scirs2_core::Complex64;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, VecDeque};
use std::sync::{Arc, Mutex, RwLock};
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
#[derive(Debug)]
pub struct RealtimeHardwareManager {
connections: Arc<RwLock<HashMap<String, HardwareConnection>>>,
job_monitor: JobMonitor,
calibration_tracker: CalibrationTracker,
event_stream: EventStream,
config: RealtimeConfig,
stats: Arc<Mutex<RealtimeStats>>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RealtimeConfig {
pub polling_interval_ms: u64,
pub enable_streaming: bool,
pub max_event_buffer: usize,
pub calibration_update_interval: u64,
pub enable_adaptive_mitigation: bool,
pub operation_timeout: u64,
pub enable_availability_notifications: bool,
pub max_concurrent_jobs: usize,
}
impl Default for RealtimeConfig {
fn default() -> Self {
Self {
polling_interval_ms: 500,
enable_streaming: true,
max_event_buffer: 1000,
calibration_update_interval: 300,
enable_adaptive_mitigation: true,
operation_timeout: 3600,
enable_availability_notifications: true,
max_concurrent_jobs: 10,
}
}
}
#[derive(Debug, Clone)]
pub struct HardwareConnection {
pub id: String,
pub provider: HardwareProvider,
pub status: ConnectionStatus,
pub backend: String,
pub connected_at: u64,
pub last_heartbeat: u64,
pub calibration: Option<CalibrationData>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum HardwareProvider {
IBMQuantum,
GoogleQuantumAI,
AmazonBraket,
AzureQuantum,
IonQ,
Rigetti,
Xanadu,
Pasqal,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ConnectionStatus {
Connected,
Connecting,
Disconnected,
Error,
Maintenance,
}
pub struct JobMonitor {
active_jobs: Arc<RwLock<HashMap<String, JobState>>>,
job_history: Arc<Mutex<VecDeque<JobRecord>>>,
callbacks: Arc<Mutex<HashMap<String, Vec<Box<dyn Fn(&JobEvent) + Send + Sync>>>>>,
}
impl std::fmt::Debug for JobMonitor {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("JobMonitor")
.field("active_jobs", &"<jobs>")
.field("job_history", &"<history>")
.field("callbacks", &"<callbacks>")
.finish()
}
}
#[derive(Debug, Clone)]
pub struct JobState {
pub job_id: String,
pub status: JobStatus,
pub progress: f64,
pub start_time: Instant,
pub estimated_completion: Option<Duration>,
pub partial_results: Vec<PartialResult>,
pub error_info: Option<String>,
pub queue_position: Option<usize>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum JobStatus {
Queued,
Running,
Completed,
Failed,
Cancelled,
TimedOut,
}
#[derive(Debug, Clone)]
pub struct PartialResult {
pub index: usize,
pub counts: HashMap<String, usize>,
pub timestamp: u64,
}
#[derive(Debug, Clone)]
pub struct JobEvent {
pub event_type: JobEventType,
pub job_id: String,
pub data: JobEventData,
pub timestamp: u64,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum JobEventType {
StatusChanged,
ProgressUpdate,
PartialResult,
Completed,
Failed,
QueuePositionChanged,
}
#[derive(Debug, Clone)]
pub enum JobEventData {
Status(JobStatus),
Progress(f64),
Result(PartialResult),
Error(String),
QueuePosition(usize),
None,
}
#[derive(Debug, Clone)]
pub struct JobRecord {
pub job_id: String,
pub status: JobStatus,
pub start_time: u64,
pub end_time: u64,
pub total_shots: usize,
pub backend: String,
}
#[derive(Debug)]
pub struct CalibrationTracker {
calibrations: Arc<RwLock<HashMap<String, CalibrationData>>>,
history: Arc<Mutex<HashMap<String, VecDeque<CalibrationSnapshot>>>>,
last_updates: Arc<Mutex<HashMap<String, Instant>>>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CalibrationData {
pub backend: String,
pub timestamp: u64,
pub single_qubit_errors: HashMap<usize, f64>,
pub two_qubit_errors: HashMap<(usize, usize), f64>,
pub readout_errors: HashMap<usize, f64>,
pub t1_times: HashMap<usize, f64>,
pub t2_times: HashMap<usize, f64>,
pub gate_durations: HashMap<String, f64>,
pub connectivity: Vec<(usize, usize)>,
}
#[derive(Debug, Clone)]
pub struct CalibrationSnapshot {
pub timestamp: u64,
pub avg_single_qubit_error: f64,
pub avg_two_qubit_error: f64,
pub avg_readout_error: f64,
}
pub struct EventStream {
buffer: Arc<Mutex<VecDeque<HardwareEvent>>>,
max_size: usize,
subscribers: Arc<Mutex<Vec<Box<dyn Fn(&HardwareEvent) + Send + Sync>>>>,
}
impl std::fmt::Debug for EventStream {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("EventStream")
.field("buffer", &"<buffer>")
.field("max_size", &self.max_size)
.field("subscribers", &"<subscribers>")
.finish()
}
}
#[derive(Debug, Clone)]
pub struct HardwareEvent {
pub event_type: HardwareEventType,
pub backend: String,
pub data: HardwareEventData,
pub timestamp: u64,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum HardwareEventType {
CalibrationUpdated,
AvailabilityChanged,
ErrorRateAlert,
MaintenanceScheduled,
JobQueued,
JobStarted,
JobCompleted,
}
#[derive(Debug, Clone)]
pub enum HardwareEventData {
Calibration(CalibrationData),
Availability(bool),
ErrorRate(f64),
Maintenance { start: u64, end: u64 },
JobInfo { job_id: String, shots: usize },
None,
}
#[derive(Debug, Clone, Default)]
pub struct RealtimeStats {
pub jobs_monitored: u64,
pub jobs_completed: u64,
pub jobs_failed: u64,
pub events_processed: u64,
pub calibration_updates: u64,
pub avg_completion_time: Duration,
pub active_connections: usize,
}
impl RealtimeHardwareManager {
#[must_use]
pub fn new(config: RealtimeConfig) -> Self {
Self {
connections: Arc::new(RwLock::new(HashMap::new())),
job_monitor: JobMonitor::new(),
calibration_tracker: CalibrationTracker::new(),
event_stream: EventStream::new(config.max_event_buffer),
config,
stats: Arc::new(Mutex::new(RealtimeStats::default())),
}
}
pub fn connect(&mut self, provider: HardwareProvider, backend: &str) -> QuantRS2Result<String> {
let conn_id = format!("{provider:?}_{backend}");
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
let connection = HardwareConnection {
id: conn_id.clone(),
provider,
status: ConnectionStatus::Connected,
backend: backend.to_string(),
connected_at: now,
last_heartbeat: now,
calibration: None,
};
let mut connections = self.connections.write().map_err(|_| {
QuantRS2Error::InvalidInput("Failed to acquire connections lock".to_string())
})?;
connections.insert(conn_id.clone(), connection);
let mut stats = self
.stats
.lock()
.map_err(|_| QuantRS2Error::InvalidInput("Failed to acquire stats lock".to_string()))?;
stats.active_connections += 1;
Ok(conn_id)
}
pub fn disconnect(&mut self, connection_id: &str) -> QuantRS2Result<()> {
let mut connections = self.connections.write().map_err(|_| {
QuantRS2Error::InvalidInput("Failed to acquire connections lock".to_string())
})?;
if connections.remove(connection_id).is_some() {
let mut stats = self.stats.lock().map_err(|_| {
QuantRS2Error::InvalidInput("Failed to acquire stats lock".to_string())
})?;
if stats.active_connections > 0 {
stats.active_connections -= 1;
}
}
Ok(())
}
pub fn submit_job(&mut self, job_id: &str, connection_id: &str) -> QuantRS2Result<()> {
let connections = self.connections.read().map_err(|_| {
QuantRS2Error::InvalidInput("Failed to acquire connections lock".to_string())
})?;
if !connections.contains_key(connection_id) {
return Err(QuantRS2Error::InvalidInput(format!(
"Connection {connection_id} not found"
)));
}
let job_state = JobState {
job_id: job_id.to_string(),
status: JobStatus::Queued,
progress: 0.0,
start_time: Instant::now(),
estimated_completion: None,
partial_results: Vec::new(),
error_info: None,
queue_position: Some(1),
};
self.job_monitor.add_job(job_state)?;
let mut stats = self
.stats
.lock()
.map_err(|_| QuantRS2Error::InvalidInput("Failed to acquire stats lock".to_string()))?;
stats.jobs_monitored += 1;
Ok(())
}
pub fn get_job_status(&self, job_id: &str) -> QuantRS2Result<JobStatus> {
self.job_monitor.get_status(job_id)
}
pub fn get_job_progress(&self, job_id: &str) -> QuantRS2Result<f64> {
self.job_monitor.get_progress(job_id)
}
pub fn update_job_status(
&mut self,
job_id: &str,
status: JobStatus,
progress: f64,
) -> QuantRS2Result<()> {
self.job_monitor.update_status(job_id, status, progress)?;
if status == JobStatus::Completed {
let mut stats = self.stats.lock().map_err(|_| {
QuantRS2Error::InvalidInput("Failed to acquire stats lock".to_string())
})?;
stats.jobs_completed += 1;
} else if status == JobStatus::Failed {
let mut stats = self.stats.lock().map_err(|_| {
QuantRS2Error::InvalidInput("Failed to acquire stats lock".to_string())
})?;
stats.jobs_failed += 1;
}
Ok(())
}
pub fn add_partial_result(
&mut self,
job_id: &str,
counts: HashMap<String, usize>,
) -> QuantRS2Result<()> {
self.job_monitor.add_partial_result(job_id, counts)
}
pub fn get_partial_results(&self, job_id: &str) -> QuantRS2Result<Vec<PartialResult>> {
self.job_monitor.get_partial_results(job_id)
}
pub fn update_calibration(
&mut self,
backend: &str,
calibration: CalibrationData,
) -> QuantRS2Result<()> {
self.calibration_tracker
.update_calibration(backend, calibration)?;
let mut stats = self
.stats
.lock()
.map_err(|_| QuantRS2Error::InvalidInput("Failed to acquire stats lock".to_string()))?;
stats.calibration_updates += 1;
Ok(())
}
pub fn get_calibration(&self, backend: &str) -> QuantRS2Result<Option<CalibrationData>> {
self.calibration_tracker.get_calibration(backend)
}
pub fn get_optimal_qubits(
&self,
backend: &str,
num_qubits: usize,
) -> QuantRS2Result<Vec<usize>> {
let calibration = self.get_calibration(backend)?;
match calibration {
Some(cal) => {
let mut qubits: Vec<(usize, f64)> = cal
.single_qubit_errors
.iter()
.map(|(&q, &e)| (q, e))
.collect();
qubits.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal));
Ok(qubits
.into_iter()
.take(num_qubits)
.map(|(q, _)| q)
.collect())
}
None => {
Ok((0..num_qubits).collect())
}
}
}
pub fn get_stats(&self) -> QuantRS2Result<RealtimeStats> {
let stats = self
.stats
.lock()
.map_err(|_| QuantRS2Error::InvalidInput("Failed to acquire stats lock".to_string()))?;
Ok(stats.clone())
}
pub fn get_connections(&self) -> QuantRS2Result<Vec<HardwareConnection>> {
let connections = self.connections.read().map_err(|_| {
QuantRS2Error::InvalidInput("Failed to acquire connections lock".to_string())
})?;
Ok(connections.values().cloned().collect())
}
pub fn register_job_callback<F>(&mut self, job_id: &str, callback: F) -> QuantRS2Result<()>
where
F: Fn(&JobEvent) + Send + Sync + 'static,
{
self.job_monitor.register_callback(job_id, callback)
}
pub fn is_backend_available(&self, connection_id: &str) -> QuantRS2Result<bool> {
let connections = self.connections.read().map_err(|_| {
QuantRS2Error::InvalidInput("Failed to acquire connections lock".to_string())
})?;
match connections.get(connection_id) {
Some(conn) => Ok(conn.status == ConnectionStatus::Connected),
None => Ok(false),
}
}
}
impl JobMonitor {
fn new() -> Self {
Self {
active_jobs: Arc::new(RwLock::new(HashMap::new())),
job_history: Arc::new(Mutex::new(VecDeque::new())),
callbacks: Arc::new(Mutex::new(HashMap::new())),
}
}
fn add_job(&self, job_state: JobState) -> QuantRS2Result<()> {
let mut jobs = self
.active_jobs
.write()
.map_err(|_| QuantRS2Error::InvalidInput("Failed to acquire jobs lock".to_string()))?;
jobs.insert(job_state.job_id.clone(), job_state);
Ok(())
}
fn get_status(&self, job_id: &str) -> QuantRS2Result<JobStatus> {
let jobs = self
.active_jobs
.read()
.map_err(|_| QuantRS2Error::InvalidInput("Failed to acquire jobs lock".to_string()))?;
jobs.get(job_id)
.map(|j| j.status)
.ok_or_else(|| QuantRS2Error::InvalidInput(format!("Job {job_id} not found")))
}
fn get_progress(&self, job_id: &str) -> QuantRS2Result<f64> {
let jobs = self
.active_jobs
.read()
.map_err(|_| QuantRS2Error::InvalidInput("Failed to acquire jobs lock".to_string()))?;
jobs.get(job_id)
.map(|j| j.progress)
.ok_or_else(|| QuantRS2Error::InvalidInput(format!("Job {job_id} not found")))
}
fn update_status(&self, job_id: &str, status: JobStatus, progress: f64) -> QuantRS2Result<()> {
let mut jobs = self
.active_jobs
.write()
.map_err(|_| QuantRS2Error::InvalidInput("Failed to acquire jobs lock".to_string()))?;
if let Some(job) = jobs.get_mut(job_id) {
job.status = status;
job.progress = progress;
self.trigger_callback(
job_id,
JobEventType::StatusChanged,
JobEventData::Status(status),
)?;
self.trigger_callback(
job_id,
JobEventType::ProgressUpdate,
JobEventData::Progress(progress),
)?;
}
Ok(())
}
fn add_partial_result(
&self,
job_id: &str,
counts: HashMap<String, usize>,
) -> QuantRS2Result<()> {
let mut jobs = self
.active_jobs
.write()
.map_err(|_| QuantRS2Error::InvalidInput("Failed to acquire jobs lock".to_string()))?;
if let Some(job) = jobs.get_mut(job_id) {
let result = PartialResult {
index: job.partial_results.len(),
counts,
timestamp: SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs(),
};
job.partial_results.push(result.clone());
drop(jobs);
self.trigger_callback(
job_id,
JobEventType::PartialResult,
JobEventData::Result(result),
)?;
}
Ok(())
}
fn get_partial_results(&self, job_id: &str) -> QuantRS2Result<Vec<PartialResult>> {
let jobs = self
.active_jobs
.read()
.map_err(|_| QuantRS2Error::InvalidInput("Failed to acquire jobs lock".to_string()))?;
jobs.get(job_id)
.map(|j| j.partial_results.clone())
.ok_or_else(|| QuantRS2Error::InvalidInput(format!("Job {job_id} not found")))
}
fn register_callback<F>(&self, job_id: &str, callback: F) -> QuantRS2Result<()>
where
F: Fn(&JobEvent) + Send + Sync + 'static,
{
let mut callbacks = self.callbacks.lock().map_err(|_| {
QuantRS2Error::InvalidInput("Failed to acquire callbacks lock".to_string())
})?;
callbacks
.entry(job_id.to_string())
.or_insert_with(Vec::new)
.push(Box::new(callback));
Ok(())
}
fn trigger_callback(
&self,
job_id: &str,
event_type: JobEventType,
data: JobEventData,
) -> QuantRS2Result<()> {
let callbacks = self.callbacks.lock().map_err(|_| {
QuantRS2Error::InvalidInput("Failed to acquire callbacks lock".to_string())
})?;
if let Some(handlers) = callbacks.get(job_id) {
let event = JobEvent {
event_type,
job_id: job_id.to_string(),
data,
timestamp: SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs(),
};
for handler in handlers {
handler(&event);
}
}
Ok(())
}
}
impl CalibrationTracker {
fn new() -> Self {
Self {
calibrations: Arc::new(RwLock::new(HashMap::new())),
history: Arc::new(Mutex::new(HashMap::new())),
last_updates: Arc::new(Mutex::new(HashMap::new())),
}
}
fn update_calibration(
&self,
backend: &str,
calibration: CalibrationData,
) -> QuantRS2Result<()> {
let avg_single = if calibration.single_qubit_errors.is_empty() {
0.0
} else {
calibration.single_qubit_errors.values().sum::<f64>()
/ calibration.single_qubit_errors.len() as f64
};
let avg_two = if calibration.two_qubit_errors.is_empty() {
0.0
} else {
calibration.two_qubit_errors.values().sum::<f64>()
/ calibration.two_qubit_errors.len() as f64
};
let avg_readout = if calibration.readout_errors.is_empty() {
0.0
} else {
calibration.readout_errors.values().sum::<f64>()
/ calibration.readout_errors.len() as f64
};
let snapshot = CalibrationSnapshot {
timestamp: calibration.timestamp,
avg_single_qubit_error: avg_single,
avg_two_qubit_error: avg_two,
avg_readout_error: avg_readout,
};
let mut calibrations = self.calibrations.write().map_err(|_| {
QuantRS2Error::InvalidInput("Failed to acquire calibrations lock".to_string())
})?;
calibrations.insert(backend.to_string(), calibration);
let mut history = self.history.lock().map_err(|_| {
QuantRS2Error::InvalidInput("Failed to acquire history lock".to_string())
})?;
history
.entry(backend.to_string())
.or_insert_with(VecDeque::new)
.push_back(snapshot);
let mut last_updates = self.last_updates.lock().map_err(|_| {
QuantRS2Error::InvalidInput("Failed to acquire last_updates lock".to_string())
})?;
last_updates.insert(backend.to_string(), Instant::now());
Ok(())
}
fn get_calibration(&self, backend: &str) -> QuantRS2Result<Option<CalibrationData>> {
let calibrations = self.calibrations.read().map_err(|_| {
QuantRS2Error::InvalidInput("Failed to acquire calibrations lock".to_string())
})?;
Ok(calibrations.get(backend).cloned())
}
}
impl EventStream {
fn new(max_size: usize) -> Self {
Self {
buffer: Arc::new(Mutex::new(VecDeque::new())),
max_size,
subscribers: Arc::new(Mutex::new(Vec::new())),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_realtime_manager_creation() {
let config = RealtimeConfig::default();
let manager = RealtimeHardwareManager::new(config);
assert!(manager.get_stats().is_ok());
}
#[test]
fn test_connect_disconnect() {
let config = RealtimeConfig::default();
let mut manager = RealtimeHardwareManager::new(config);
let conn_id = manager
.connect(HardwareProvider::IBMQuantum, "ibm_qasm_simulator")
.expect("Connection should succeed");
assert!(!conn_id.is_empty());
let connections = manager
.get_connections()
.expect("Get connections should succeed");
assert_eq!(connections.len(), 1);
manager
.disconnect(&conn_id)
.expect("Disconnect should succeed");
let connections = manager
.get_connections()
.expect("Get connections should succeed");
assert_eq!(connections.len(), 0);
}
#[test]
fn test_job_monitoring() {
let config = RealtimeConfig::default();
let mut manager = RealtimeHardwareManager::new(config);
let conn_id = manager
.connect(HardwareProvider::IBMQuantum, "ibm_qasm_simulator")
.expect("Connection should succeed");
manager
.submit_job("job_123", &conn_id)
.expect("Job submission should succeed");
let status = manager
.get_job_status("job_123")
.expect("Get job status should succeed");
assert_eq!(status, JobStatus::Queued);
let progress = manager
.get_job_progress("job_123")
.expect("Get job progress should succeed");
assert_eq!(progress, 0.0);
}
#[test]
fn test_job_status_update() {
let config = RealtimeConfig::default();
let mut manager = RealtimeHardwareManager::new(config);
let conn_id = manager
.connect(HardwareProvider::IBMQuantum, "backend")
.expect("Connection should succeed");
manager
.submit_job("job_456", &conn_id)
.expect("Job submission should succeed");
manager
.update_job_status("job_456", JobStatus::Running, 0.5)
.expect("Status update should succeed");
let status = manager
.get_job_status("job_456")
.expect("Get job status should succeed");
assert_eq!(status, JobStatus::Running);
let progress = manager
.get_job_progress("job_456")
.expect("Get job progress should succeed");
assert_eq!(progress, 0.5);
}
#[test]
fn test_partial_results() {
let config = RealtimeConfig::default();
let mut manager = RealtimeHardwareManager::new(config);
let conn_id = manager
.connect(HardwareProvider::GoogleQuantumAI, "backend")
.expect("Connection should succeed");
manager
.submit_job("job_789", &conn_id)
.expect("Job submission should succeed");
let mut counts = HashMap::new();
counts.insert("00".to_string(), 450);
counts.insert("11".to_string(), 550);
manager
.add_partial_result("job_789", counts)
.expect("Add partial result should succeed");
let results = manager
.get_partial_results("job_789")
.expect("Get partial results should succeed");
assert_eq!(results.len(), 1);
assert_eq!(results[0].counts.get("00"), Some(&450));
}
#[test]
fn test_calibration_tracking() {
let config = RealtimeConfig::default();
let mut manager = RealtimeHardwareManager::new(config);
let mut single_qubit_errors = HashMap::new();
single_qubit_errors.insert(0, 0.001);
single_qubit_errors.insert(1, 0.002);
single_qubit_errors.insert(2, 0.0015);
let calibration = CalibrationData {
backend: "test_backend".to_string(),
timestamp: 12_345,
single_qubit_errors,
two_qubit_errors: HashMap::new(),
readout_errors: HashMap::new(),
t1_times: HashMap::new(),
t2_times: HashMap::new(),
gate_durations: HashMap::new(),
connectivity: vec![(0, 1), (1, 2)],
};
manager
.update_calibration("test_backend", calibration)
.expect("Calibration update should succeed");
let cal = manager
.get_calibration("test_backend")
.expect("Get calibration should succeed");
assert!(cal.is_some());
assert_eq!(
cal.expect("Calibration data should exist")
.single_qubit_errors
.len(),
3
);
}
#[test]
fn test_optimal_qubits() {
let config = RealtimeConfig::default();
let mut manager = RealtimeHardwareManager::new(config);
let mut single_qubit_errors = HashMap::new();
single_qubit_errors.insert(0, 0.005);
single_qubit_errors.insert(1, 0.001);
single_qubit_errors.insert(2, 0.003);
single_qubit_errors.insert(3, 0.002);
let calibration = CalibrationData {
backend: "backend".to_string(),
timestamp: 12_345,
single_qubit_errors,
two_qubit_errors: HashMap::new(),
readout_errors: HashMap::new(),
t1_times: HashMap::new(),
t2_times: HashMap::new(),
gate_durations: HashMap::new(),
connectivity: vec![],
};
manager
.update_calibration("backend", calibration)
.expect("Calibration update should succeed");
let optimal = manager
.get_optimal_qubits("backend", 2)
.expect("Get optimal qubits should succeed");
assert_eq!(optimal.len(), 2);
assert!(optimal.contains(&1));
assert!(optimal.contains(&3));
}
#[test]
fn test_backend_availability() {
let config = RealtimeConfig::default();
let mut manager = RealtimeHardwareManager::new(config);
let conn_id = manager
.connect(HardwareProvider::AmazonBraket, "backend")
.expect("Connection should succeed");
assert!(manager
.is_backend_available(&conn_id)
.expect("Backend availability check should succeed"));
assert!(!manager
.is_backend_available("nonexistent")
.expect("Backend availability check should succeed"));
}
#[test]
fn test_statistics() {
let config = RealtimeConfig::default();
let mut manager = RealtimeHardwareManager::new(config);
let conn_id = manager
.connect(HardwareProvider::IonQ, "backend")
.expect("Connection should succeed");
manager
.submit_job("job_a", &conn_id)
.expect("Job submission should succeed");
manager
.submit_job("job_b", &conn_id)
.expect("Job submission should succeed");
manager
.update_job_status("job_a", JobStatus::Completed, 1.0)
.expect("Status update should succeed");
manager
.update_job_status("job_b", JobStatus::Failed, 0.5)
.expect("Status update should succeed");
let stats = manager.get_stats().expect("Get stats should succeed");
assert_eq!(stats.jobs_monitored, 2);
assert_eq!(stats.jobs_completed, 1);
assert_eq!(stats.jobs_failed, 1);
}
#[test]
fn test_config_defaults() {
let config = RealtimeConfig::default();
assert_eq!(config.polling_interval_ms, 500);
assert!(config.enable_streaming);
assert_eq!(config.max_event_buffer, 1000);
assert!(config.enable_adaptive_mitigation);
assert_eq!(config.max_concurrent_jobs, 10);
}
#[test]
fn test_multiple_providers() {
let config = RealtimeConfig::default();
let mut manager = RealtimeHardwareManager::new(config);
manager
.connect(HardwareProvider::IBMQuantum, "ibm_backend")
.expect("IBM connection should succeed");
manager
.connect(HardwareProvider::GoogleQuantumAI, "google_backend")
.expect("Google connection should succeed");
manager
.connect(HardwareProvider::AzureQuantum, "azure_backend")
.expect("Azure connection should succeed");
let connections = manager
.get_connections()
.expect("Get connections should succeed");
assert_eq!(connections.len(), 3);
}
#[test]
fn test_job_completion() {
let config = RealtimeConfig::default();
let mut manager = RealtimeHardwareManager::new(config);
let conn_id = manager
.connect(HardwareProvider::Rigetti, "backend")
.expect("Connection should succeed");
manager
.submit_job("job_complete", &conn_id)
.expect("Job submission should succeed");
manager
.update_job_status("job_complete", JobStatus::Running, 0.0)
.expect("Status update should succeed");
manager
.update_job_status("job_complete", JobStatus::Running, 0.5)
.expect("Status update should succeed");
manager
.update_job_status("job_complete", JobStatus::Completed, 1.0)
.expect("Status update should succeed");
let status = manager
.get_job_status("job_complete")
.expect("Get job status should succeed");
assert_eq!(status, JobStatus::Completed);
}
}