1#![allow(clippy::elided_lifetimes_in_paths)]
7
8use super::{GpuBackend, GpuBuffer, GpuKernel};
9use crate::{
10 error::{QuantRS2Error, QuantRS2Result},
11 qubit::QubitId,
12};
13use scirs2_core::ndarray::Array2;
14use scirs2_core::Complex64;
15use std::sync::{Arc, Mutex};
16
17pub struct CpuBuffer {
19 data: Arc<Mutex<Vec<Complex64>>>,
20}
21
22impl CpuBuffer {
23 pub fn new(size: usize) -> Self {
25 Self {
26 data: Arc::new(Mutex::new(vec![Complex64::new(0.0, 0.0); size])),
27 }
28 }
29
30 pub fn data(&self) -> std::sync::MutexGuard<'_, Vec<Complex64>> {
32 self.data.lock().unwrap()
33 }
34}
35
36impl GpuBuffer for CpuBuffer {
37 fn size(&self) -> usize {
38 self.data.lock().unwrap().len() * std::mem::size_of::<Complex64>()
39 }
40
41 fn upload(&mut self, data: &[Complex64]) -> QuantRS2Result<()> {
42 let mut buffer = self.data.lock().unwrap();
43 if buffer.len() != data.len() {
44 return Err(QuantRS2Error::InvalidInput(format!(
45 "Buffer size mismatch: {} != {}",
46 buffer.len(),
47 data.len()
48 )));
49 }
50 buffer.copy_from_slice(data);
51 Ok(())
52 }
53
54 fn download(&self, data: &mut [Complex64]) -> QuantRS2Result<()> {
55 let buffer = self.data.lock().unwrap();
56 if buffer.len() != data.len() {
57 return Err(QuantRS2Error::InvalidInput(format!(
58 "Buffer size mismatch: {} != {}",
59 buffer.len(),
60 data.len()
61 )));
62 }
63 data.copy_from_slice(&buffer);
64 Ok(())
65 }
66
67 fn sync(&self) -> QuantRS2Result<()> {
68 Ok(())
70 }
71
72 fn as_any(&self) -> &dyn std::any::Any {
73 self
74 }
75
76 fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
77 self
78 }
79}
80
81pub struct CpuKernel;
83
84impl CpuKernel {
85 fn apply_gate_to_indices(state: &mut [Complex64], gate: &[Complex64], indices: &[usize]) {
87 let gate_size = indices.len();
88 let mut temp = vec![Complex64::new(0.0, 0.0); gate_size];
89
90 for (i, &idx) in indices.iter().enumerate() {
92 temp[i] = state[idx];
93 }
94
95 for (i, &idx) in indices.iter().enumerate() {
97 let mut sum = Complex64::new(0.0, 0.0);
98 for j in 0..gate_size {
99 sum += gate[i * gate_size + j] * temp[j];
100 }
101 state[idx] = sum;
102 }
103 }
104}
105
106impl GpuKernel for CpuKernel {
107 fn apply_single_qubit_gate(
108 &self,
109 state: &mut dyn GpuBuffer,
110 gate_matrix: &[Complex64; 4],
111 qubit: QubitId,
112 n_qubits: usize,
113 ) -> QuantRS2Result<()> {
114 let cpu_buffer = state
115 .as_any_mut()
116 .downcast_mut::<CpuBuffer>()
117 .ok_or_else(|| QuantRS2Error::InvalidInput("Expected CpuBuffer".to_string()))?;
118
119 let mut data = cpu_buffer.data();
120 let qubit_idx = qubit.0 as usize;
121 let stride = 1 << qubit_idx;
122 let pairs = 1 << (n_qubits - 1);
123
124 for i in 0..pairs {
126 let i0 = ((i >> qubit_idx) << (qubit_idx + 1)) | (i & ((1 << qubit_idx) - 1));
127 let i1 = i0 | stride;
128
129 let a = data[i0];
130 let b = data[i1];
131
132 data[i0] = gate_matrix[0] * a + gate_matrix[1] * b;
133 data[i1] = gate_matrix[2] * a + gate_matrix[3] * b;
134 }
135
136 Ok(())
137 }
138
139 fn apply_two_qubit_gate(
140 &self,
141 state: &mut dyn GpuBuffer,
142 gate_matrix: &[Complex64; 16],
143 control: QubitId,
144 target: QubitId,
145 n_qubits: usize,
146 ) -> QuantRS2Result<()> {
147 let cpu_buffer = state
148 .as_any_mut()
149 .downcast_mut::<CpuBuffer>()
150 .ok_or_else(|| QuantRS2Error::InvalidInput("Expected CpuBuffer".to_string()))?;
151
152 let mut data = cpu_buffer.data();
153 let control_idx = control.0 as usize;
154 let target_idx = target.0 as usize;
155
156 let (high_idx, low_idx) = if control_idx > target_idx {
158 (control_idx, target_idx)
159 } else {
160 (target_idx, control_idx)
161 };
162
163 let high_stride = 1 << high_idx;
164 let low_stride = 1 << low_idx;
165
166 let state_size = 1 << n_qubits;
167 let block_size = 1 << (high_idx + 1);
168 let num_blocks = state_size / block_size;
169
170 for block in 0..num_blocks {
172 let block_start = block * block_size;
173
174 for i in 0..(block_size / 4) {
175 let base = block_start
177 + (i & ((1 << low_idx) - 1))
178 + ((i >> low_idx) << (low_idx + 1))
179 + ((i >> (high_idx - 1)) << (high_idx + 1));
180
181 let indices = [
182 base,
183 base + low_stride,
184 base + high_stride,
185 base + low_stride + high_stride,
186 ];
187
188 Self::apply_gate_to_indices(&mut data, gate_matrix, &indices);
189 }
190 }
191
192 Ok(())
193 }
194
195 fn apply_multi_qubit_gate(
196 &self,
197 state: &mut dyn GpuBuffer,
198 gate_matrix: &Array2<Complex64>,
199 qubits: &[QubitId],
200 n_qubits: usize,
201 ) -> QuantRS2Result<()> {
202 let cpu_buffer = state
203 .as_any_mut()
204 .downcast_mut::<CpuBuffer>()
205 .ok_or_else(|| QuantRS2Error::InvalidInput("Expected CpuBuffer".to_string()))?;
206
207 let mut data = cpu_buffer.data();
208 let gate_qubits = qubits.len();
209 let gate_dim = 1 << gate_qubits;
210
211 if gate_matrix.dim() != (gate_dim, gate_dim) {
212 return Err(QuantRS2Error::InvalidInput(format!(
213 "Gate matrix dimension mismatch: {:?} != ({}, {})",
214 gate_matrix.dim(),
215 gate_dim,
216 gate_dim
217 )));
218 }
219
220 let gate_flat: Vec<Complex64> = gate_matrix.iter().cloned().collect();
222
223 let _total_states = 1 << n_qubits;
225 let affected_states = 1 << gate_qubits;
226 let unaffected_qubits = n_qubits - gate_qubits;
227 let iterations = 1 << unaffected_qubits;
228
229 let mut qubit_indices: Vec<usize> = qubits.iter().map(|q| q.0 as usize).collect();
231 qubit_indices.sort_unstable();
232
233 for i in 0..iterations {
235 let mut indices = vec![0; affected_states];
236
237 let mut base = 0;
239 let mut remaining = i;
240 let mut qubit_pos = 0;
241
242 for bit in 0..n_qubits {
243 if qubit_pos < gate_qubits && bit == qubit_indices[qubit_pos] {
244 qubit_pos += 1;
245 } else {
246 if remaining & 1 == 1 {
247 base |= 1 << bit;
248 }
249 remaining >>= 1;
250 }
251 }
252
253 for j in 0..affected_states {
255 indices[j] = base;
256 for (k, &qubit_idx) in qubit_indices.iter().enumerate() {
257 if (j >> k) & 1 == 1 {
258 indices[j] |= 1 << qubit_idx;
259 }
260 }
261 }
262
263 Self::apply_gate_to_indices(&mut data, &gate_flat, &indices);
264 }
265
266 Ok(())
267 }
268
269 fn measure_qubit(
270 &self,
271 state: &dyn GpuBuffer,
272 qubit: QubitId,
273 n_qubits: usize,
274 ) -> QuantRS2Result<(bool, f64)> {
275 let cpu_buffer = state
276 .as_any()
277 .downcast_ref::<CpuBuffer>()
278 .ok_or_else(|| QuantRS2Error::InvalidInput("Expected CpuBuffer".to_string()))?;
279
280 let data = cpu_buffer.data();
281 let qubit_idx = qubit.0 as usize;
282 let _stride = 1 << qubit_idx;
283
284 let mut prob_one = 0.0;
286 for i in 0..(1 << n_qubits) {
287 if (i >> qubit_idx) & 1 == 1 {
288 prob_one += data[i].norm_sqr();
289 }
290 }
291
292 use scirs2_core::random::prelude::*;
294 let outcome = thread_rng().gen::<f64>() < prob_one;
295
296 Ok((outcome, if outcome { prob_one } else { 1.0 - prob_one }))
297 }
298
299 fn expectation_value(
300 &self,
301 state: &dyn GpuBuffer,
302 observable: &Array2<Complex64>,
303 qubits: &[QubitId],
304 n_qubits: usize,
305 ) -> QuantRS2Result<f64> {
306 let cpu_buffer = state
307 .as_any()
308 .downcast_ref::<CpuBuffer>()
309 .ok_or_else(|| QuantRS2Error::InvalidInput("Expected CpuBuffer".to_string()))?;
310
311 let data = cpu_buffer.data();
312
313 if qubits.len() != 1 || observable.dim() != (2, 2) {
315 return Err(QuantRS2Error::UnsupportedOperation(
316 "Only single-qubit observables supported currently".to_string(),
317 ));
318 }
319
320 let qubit_idx = qubits[0].0 as usize;
321 let stride = 1 << qubit_idx;
322 let pairs = 1 << (n_qubits - 1);
323
324 let mut expectation = Complex64::new(0.0, 0.0);
325
326 for i in 0..pairs {
327 let i0 = ((i >> qubit_idx) << (qubit_idx + 1)) | (i & ((1 << qubit_idx) - 1));
328 let i1 = i0 | stride;
329
330 let a = data[i0];
331 let b = data[i1];
332
333 expectation += a.conj() * (observable[(0, 0)] * a + observable[(0, 1)] * b);
334 expectation += b.conj() * (observable[(1, 0)] * a + observable[(1, 1)] * b);
335 }
336
337 if expectation.im.abs() > 1e-10 {
338 return Err(QuantRS2Error::InvalidInput(
339 "Observable expectation value is not real".to_string(),
340 ));
341 }
342
343 Ok(expectation.re)
344 }
345}
346
347pub struct CpuBackend {
349 kernel: CpuKernel,
350}
351
352impl CpuBackend {
353 pub fn new() -> Self {
355 Self { kernel: CpuKernel }
356 }
357}
358
359impl Default for CpuBackend {
360 fn default() -> Self {
361 Self::new()
362 }
363}
364
365impl GpuBackend for CpuBackend {
366 fn is_available() -> bool {
367 true }
369
370 fn name(&self) -> &str {
371 "CPU"
372 }
373
374 fn device_info(&self) -> String {
375 format!("CPU backend with {} threads", rayon::current_num_threads())
376 }
377
378 fn allocate_state_vector(&self, n_qubits: usize) -> QuantRS2Result<Box<dyn GpuBuffer>> {
379 let size = 1 << n_qubits;
380 Ok(Box::new(CpuBuffer::new(size)))
381 }
382
383 fn allocate_density_matrix(&self, n_qubits: usize) -> QuantRS2Result<Box<dyn GpuBuffer>> {
384 let size = 1 << (2 * n_qubits);
385 Ok(Box::new(CpuBuffer::new(size)))
386 }
387
388 fn kernel(&self) -> &dyn GpuKernel {
389 &self.kernel
390 }
391}
392
393#[cfg(test)]
394mod tests {
395 use super::*;
396
397 #[test]
398 fn test_cpu_buffer() {
399 let mut buffer = CpuBuffer::new(4);
400 let data = vec![
401 Complex64::new(1.0, 0.0),
402 Complex64::new(0.0, 1.0),
403 Complex64::new(-1.0, 0.0),
404 Complex64::new(0.0, -1.0),
405 ];
406
407 buffer.upload(&data).unwrap();
408
409 let mut downloaded = vec![Complex64::new(0.0, 0.0); 4];
410 buffer.download(&mut downloaded).unwrap();
411
412 assert_eq!(data, downloaded);
413 }
414
415 #[test]
416 fn test_cpu_backend() {
417 let backend = CpuBackend::new();
418 assert!(CpuBackend::is_available());
419 assert_eq!(backend.name(), "CPU");
420
421 let buffer = backend.allocate_state_vector(3).unwrap();
423 assert_eq!(buffer.size(), 8 * std::mem::size_of::<Complex64>());
424 }
425}