quantrs2_device/distributed/
types.rs

1//! Common types and data structures for distributed orchestration
2
3use serde::{Deserialize, Serialize};
4use std::collections::HashMap;
5use std::net::SocketAddr;
6use std::time::{Duration, Instant, SystemTime};
7
8// Main orchestrator type
9#[derive(Debug)]
10pub struct DistributedQuantumOrchestrator {
11    // Implementation will be added
12}
13
14// Core execution types
15#[derive(Debug, Clone, Serialize, Deserialize)]
16pub struct DistributedExecutionResult {
17    pub execution_id: String,
18    pub status: DistributedExecutionStatus,
19    pub results: HashMap<String, String>,
20    pub performance_metrics: DistributedPerformanceAnalytics,
21    pub execution_time: Duration,
22}
23
24#[derive(Debug, Clone, Serialize, Deserialize)]
25pub enum DistributedExecutionStatus {
26    Pending,
27    Running,
28    Completed,
29    Failed,
30    Cancelled,
31}
32
33// Node information
34#[derive(Debug, Clone, Serialize, Deserialize)]
35pub struct NodeInfo {
36    pub node_id: String,
37    pub address: SocketAddr,
38    pub capabilities: NodeCapabilities,
39    pub status: NodeStatus,
40    #[serde(skip)]
41    pub last_heartbeat: Option<Instant>,
42}
43
44#[derive(Debug, Clone, Serialize, Deserialize)]
45pub struct NodeCapabilities {
46    pub max_qubits: u32,
47    pub supported_gates: Vec<String>,
48    pub connectivity: HashMap<u32, Vec<u32>>,
49    pub error_rates: HashMap<String, f64>,
50}
51
52#[derive(Debug, Clone, Serialize, Deserialize)]
53pub enum NodeStatus {
54    Available,
55    Busy,
56    Offline,
57    Maintenance,
58    Error,
59}
60
61// Workflow types
62#[derive(Debug, Clone, Serialize, Deserialize)]
63pub struct DistributedWorkflow {
64    pub workflow_id: String,
65    pub workflow_type: DistributedWorkflowType,
66    pub steps: Vec<String>,
67    pub dependencies: HashMap<String, Vec<String>>,
68}
69
70#[derive(Debug, Clone, Serialize, Deserialize)]
71pub enum DistributedWorkflowType {
72    Sequential,
73    Parallel,
74    ConditionalBranching,
75    IterativeLoop,
76    EventDriven,
77}
78
79// Event and command types
80#[derive(Debug, Clone, Serialize, Deserialize)]
81pub struct DistributedEvent {
82    pub event_id: String,
83    pub event_type: String,
84    pub timestamp: SystemTime,
85    pub data: HashMap<String, String>,
86}
87
88#[derive(Debug, Clone, Serialize, Deserialize)]
89pub struct DistributedCommand {
90    pub command_id: String,
91    pub command_type: String,
92    pub target_node: String,
93    pub parameters: HashMap<String, String>,
94}
95
96// Circuit decomposition
97#[derive(Debug, Clone, Serialize, Deserialize)]
98pub struct CircuitDecompositionResult {
99    pub subcircuits: Vec<String>,
100    pub dependencies: HashMap<String, Vec<String>>,
101    pub resource_requirements: HashMap<String, u32>,
102}
103
104// Default implementations
105impl Default for DistributedExecutionResult {
106    fn default() -> Self {
107        Self {
108            execution_id: "default".to_string(),
109            status: DistributedExecutionStatus::Pending,
110            results: HashMap::new(),
111            performance_metrics: DistributedPerformanceAnalytics::default(),
112            execution_time: Duration::from_secs(0),
113        }
114    }
115}
116
117impl Default for NodeCapabilities {
118    fn default() -> Self {
119        Self {
120            max_qubits: 5,
121            supported_gates: vec![
122                "X".to_string(),
123                "Y".to_string(),
124                "Z".to_string(),
125                "CNOT".to_string(),
126            ],
127            connectivity: HashMap::new(),
128            error_rates: HashMap::new(),
129        }
130    }
131}
132
133// Placeholder for analytics type
134#[derive(Debug, Clone, Serialize, Deserialize, Default)]
135pub struct DistributedPerformanceAnalytics {
136    pub throughput: f64,
137    pub latency: Duration,
138    pub error_rate: f64,
139    pub resource_utilization: f64,
140}