1#![allow(clippy::elided_lifetimes_in_paths)]
7
8use super::{GpuBackend, GpuBuffer, GpuKernel};
9use crate::{
10 error::{QuantRS2Error, QuantRS2Result},
11 qubit::QubitId,
12};
13use ndarray::Array2;
14use num_complex::Complex64;
15use std::sync::{Arc, Mutex};
16
17pub struct CpuBuffer {
19 data: Arc<Mutex<Vec<Complex64>>>,
20}
21
22impl CpuBuffer {
23 pub fn new(size: usize) -> Self {
25 Self {
26 data: Arc::new(Mutex::new(vec![Complex64::new(0.0, 0.0); size])),
27 }
28 }
29
30 pub fn data(&self) -> std::sync::MutexGuard<'_, Vec<Complex64>> {
32 self.data.lock().unwrap()
33 }
34}
35
36impl GpuBuffer for CpuBuffer {
37 fn size(&self) -> usize {
38 self.data.lock().unwrap().len() * std::mem::size_of::<Complex64>()
39 }
40
41 fn upload(&mut self, data: &[Complex64]) -> QuantRS2Result<()> {
42 let mut buffer = self.data.lock().unwrap();
43 if buffer.len() != data.len() {
44 return Err(QuantRS2Error::InvalidInput(format!(
45 "Buffer size mismatch: {} != {}",
46 buffer.len(),
47 data.len()
48 )));
49 }
50 buffer.copy_from_slice(data);
51 Ok(())
52 }
53
54 fn download(&self, data: &mut [Complex64]) -> QuantRS2Result<()> {
55 let buffer = self.data.lock().unwrap();
56 if buffer.len() != data.len() {
57 return Err(QuantRS2Error::InvalidInput(format!(
58 "Buffer size mismatch: {} != {}",
59 buffer.len(),
60 data.len()
61 )));
62 }
63 data.copy_from_slice(&buffer);
64 Ok(())
65 }
66
67 fn sync(&self) -> QuantRS2Result<()> {
68 Ok(())
70 }
71
72 fn as_any(&self) -> &dyn std::any::Any {
73 self
74 }
75
76 fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
77 self
78 }
79}
80
81pub struct CpuKernel;
83
84impl CpuKernel {
85 fn apply_gate_to_indices(state: &mut [Complex64], gate: &[Complex64], indices: &[usize]) {
87 let gate_size = indices.len();
88 let mut temp = vec![Complex64::new(0.0, 0.0); gate_size];
89
90 for (i, &idx) in indices.iter().enumerate() {
92 temp[i] = state[idx];
93 }
94
95 for (i, &idx) in indices.iter().enumerate() {
97 let mut sum = Complex64::new(0.0, 0.0);
98 for j in 0..gate_size {
99 sum += gate[i * gate_size + j] * temp[j];
100 }
101 state[idx] = sum;
102 }
103 }
104}
105
106impl GpuKernel for CpuKernel {
107 fn apply_single_qubit_gate(
108 &self,
109 state: &mut dyn GpuBuffer,
110 gate_matrix: &[Complex64; 4],
111 qubit: QubitId,
112 n_qubits: usize,
113 ) -> QuantRS2Result<()> {
114 let cpu_buffer = state
115 .as_any_mut()
116 .downcast_mut::<CpuBuffer>()
117 .ok_or_else(|| QuantRS2Error::InvalidInput("Expected CpuBuffer".to_string()))?;
118
119 let mut data = cpu_buffer.data();
120 let qubit_idx = qubit.0 as usize;
121 let stride = 1 << qubit_idx;
122 let pairs = 1 << (n_qubits - 1);
123
124 for i in 0..pairs {
126 let i0 = ((i >> qubit_idx) << (qubit_idx + 1)) | (i & ((1 << qubit_idx) - 1));
127 let i1 = i0 | stride;
128
129 let a = data[i0];
130 let b = data[i1];
131
132 data[i0] = gate_matrix[0] * a + gate_matrix[1] * b;
133 data[i1] = gate_matrix[2] * a + gate_matrix[3] * b;
134 }
135
136 Ok(())
137 }
138
139 fn apply_two_qubit_gate(
140 &self,
141 state: &mut dyn GpuBuffer,
142 gate_matrix: &[Complex64; 16],
143 control: QubitId,
144 target: QubitId,
145 n_qubits: usize,
146 ) -> QuantRS2Result<()> {
147 let cpu_buffer = state
148 .as_any_mut()
149 .downcast_mut::<CpuBuffer>()
150 .ok_or_else(|| QuantRS2Error::InvalidInput("Expected CpuBuffer".to_string()))?;
151
152 let mut data = cpu_buffer.data();
153 let control_idx = control.0 as usize;
154 let target_idx = target.0 as usize;
155
156 let (high_idx, low_idx) = if control_idx > target_idx {
158 (control_idx, target_idx)
159 } else {
160 (target_idx, control_idx)
161 };
162
163 let high_stride = 1 << high_idx;
164 let low_stride = 1 << low_idx;
165
166 let state_size = 1 << n_qubits;
167 let block_size = 1 << (high_idx + 1);
168 let num_blocks = state_size / block_size;
169
170 for block in 0..num_blocks {
172 let block_start = block * block_size;
173
174 for i in 0..(block_size / 4) {
175 let base = block_start
177 + (i & ((1 << low_idx) - 1))
178 + ((i >> low_idx) << (low_idx + 1))
179 + ((i >> (high_idx - 1)) << (high_idx + 1));
180
181 let indices = [
182 base,
183 base + low_stride,
184 base + high_stride,
185 base + low_stride + high_stride,
186 ];
187
188 Self::apply_gate_to_indices(&mut data, gate_matrix, &indices);
189 }
190 }
191
192 Ok(())
193 }
194
195 fn apply_multi_qubit_gate(
196 &self,
197 state: &mut dyn GpuBuffer,
198 gate_matrix: &Array2<Complex64>,
199 qubits: &[QubitId],
200 n_qubits: usize,
201 ) -> QuantRS2Result<()> {
202 let cpu_buffer = state
203 .as_any_mut()
204 .downcast_mut::<CpuBuffer>()
205 .ok_or_else(|| QuantRS2Error::InvalidInput("Expected CpuBuffer".to_string()))?;
206
207 let mut data = cpu_buffer.data();
208 let gate_qubits = qubits.len();
209 let gate_dim = 1 << gate_qubits;
210
211 if gate_matrix.dim() != (gate_dim, gate_dim) {
212 return Err(QuantRS2Error::InvalidInput(format!(
213 "Gate matrix dimension mismatch: {:?} != ({}, {})",
214 gate_matrix.dim(),
215 gate_dim,
216 gate_dim
217 )));
218 }
219
220 let gate_flat: Vec<Complex64> = gate_matrix.iter().cloned().collect();
222
223 let _total_states = 1 << n_qubits;
225 let affected_states = 1 << gate_qubits;
226 let unaffected_qubits = n_qubits - gate_qubits;
227 let iterations = 1 << unaffected_qubits;
228
229 let mut qubit_indices: Vec<usize> = qubits.iter().map(|q| q.0 as usize).collect();
231 qubit_indices.sort_unstable();
232
233 for i in 0..iterations {
235 let mut indices = vec![0; affected_states];
236
237 let mut base = 0;
239 let mut remaining = i;
240 let mut qubit_pos = 0;
241
242 for bit in 0..n_qubits {
243 if qubit_pos < gate_qubits && bit == qubit_indices[qubit_pos] {
244 qubit_pos += 1;
245 } else {
246 if remaining & 1 == 1 {
247 base |= 1 << bit;
248 }
249 remaining >>= 1;
250 }
251 }
252
253 for j in 0..affected_states {
255 indices[j] = base;
256 for (k, &qubit_idx) in qubit_indices.iter().enumerate() {
257 if (j >> k) & 1 == 1 {
258 indices[j] |= 1 << qubit_idx;
259 }
260 }
261 }
262
263 Self::apply_gate_to_indices(&mut data, &gate_flat, &indices);
264 }
265
266 Ok(())
267 }
268
269 fn measure_qubit(
270 &self,
271 state: &dyn GpuBuffer,
272 qubit: QubitId,
273 n_qubits: usize,
274 ) -> QuantRS2Result<(bool, f64)> {
275 let cpu_buffer = state
276 .as_any()
277 .downcast_ref::<CpuBuffer>()
278 .ok_or_else(|| QuantRS2Error::InvalidInput("Expected CpuBuffer".to_string()))?;
279
280 let data = cpu_buffer.data();
281 let qubit_idx = qubit.0 as usize;
282 let _stride = 1 << qubit_idx;
283
284 let mut prob_one = 0.0;
286 for i in 0..(1 << n_qubits) {
287 if (i >> qubit_idx) & 1 == 1 {
288 prob_one += data[i].norm_sqr();
289 }
290 }
291
292 let outcome = rand::random::<f64>() < prob_one;
294
295 Ok((outcome, if outcome { prob_one } else { 1.0 - prob_one }))
296 }
297
298 fn expectation_value(
299 &self,
300 state: &dyn GpuBuffer,
301 observable: &Array2<Complex64>,
302 qubits: &[QubitId],
303 n_qubits: usize,
304 ) -> QuantRS2Result<f64> {
305 let cpu_buffer = state
306 .as_any()
307 .downcast_ref::<CpuBuffer>()
308 .ok_or_else(|| QuantRS2Error::InvalidInput("Expected CpuBuffer".to_string()))?;
309
310 let data = cpu_buffer.data();
311
312 if qubits.len() != 1 || observable.dim() != (2, 2) {
314 return Err(QuantRS2Error::UnsupportedOperation(
315 "Only single-qubit observables supported currently".to_string(),
316 ));
317 }
318
319 let qubit_idx = qubits[0].0 as usize;
320 let stride = 1 << qubit_idx;
321 let pairs = 1 << (n_qubits - 1);
322
323 let mut expectation = Complex64::new(0.0, 0.0);
324
325 for i in 0..pairs {
326 let i0 = ((i >> qubit_idx) << (qubit_idx + 1)) | (i & ((1 << qubit_idx) - 1));
327 let i1 = i0 | stride;
328
329 let a = data[i0];
330 let b = data[i1];
331
332 expectation += a.conj() * (observable[(0, 0)] * a + observable[(0, 1)] * b);
333 expectation += b.conj() * (observable[(1, 0)] * a + observable[(1, 1)] * b);
334 }
335
336 if expectation.im.abs() > 1e-10 {
337 return Err(QuantRS2Error::InvalidInput(
338 "Observable expectation value is not real".to_string(),
339 ));
340 }
341
342 Ok(expectation.re)
343 }
344}
345
346pub struct CpuBackend {
348 kernel: CpuKernel,
349}
350
351impl CpuBackend {
352 pub fn new() -> Self {
354 Self { kernel: CpuKernel }
355 }
356}
357
358impl Default for CpuBackend {
359 fn default() -> Self {
360 Self::new()
361 }
362}
363
364impl GpuBackend for CpuBackend {
365 fn is_available() -> bool {
366 true }
368
369 fn name(&self) -> &str {
370 "CPU"
371 }
372
373 fn device_info(&self) -> String {
374 format!("CPU backend with {} threads", rayon::current_num_threads())
375 }
376
377 fn allocate_state_vector(&self, n_qubits: usize) -> QuantRS2Result<Box<dyn GpuBuffer>> {
378 let size = 1 << n_qubits;
379 Ok(Box::new(CpuBuffer::new(size)))
380 }
381
382 fn allocate_density_matrix(&self, n_qubits: usize) -> QuantRS2Result<Box<dyn GpuBuffer>> {
383 let size = 1 << (2 * n_qubits);
384 Ok(Box::new(CpuBuffer::new(size)))
385 }
386
387 fn kernel(&self) -> &dyn GpuKernel {
388 &self.kernel
389 }
390}
391
392#[cfg(test)]
393mod tests {
394 use super::*;
395
396 #[test]
397 fn test_cpu_buffer() {
398 let mut buffer = CpuBuffer::new(4);
399 let data = vec![
400 Complex64::new(1.0, 0.0),
401 Complex64::new(0.0, 1.0),
402 Complex64::new(-1.0, 0.0),
403 Complex64::new(0.0, -1.0),
404 ];
405
406 buffer.upload(&data).unwrap();
407
408 let mut downloaded = vec![Complex64::new(0.0, 0.0); 4];
409 buffer.download(&mut downloaded).unwrap();
410
411 assert_eq!(data, downloaded);
412 }
413
414 #[test]
415 fn test_cpu_backend() {
416 let backend = CpuBackend::new();
417 assert!(CpuBackend::is_available());
418 assert_eq!(backend.name(), "CPU");
419
420 let buffer = backend.allocate_state_vector(3).unwrap();
422 assert_eq!(buffer.size(), 8 * std::mem::size_of::<Complex64>());
423 }
424}