use crate::{
error::{QuantRS2Error, QuantRS2Result},
qubit::QubitId,
};
use scirs2_core::ndarray::Array2;
use scirs2_core::Complex64;
use super::{GpuBackend, GpuBuffer, GpuKernel};
#[inline(always)]
fn cuda_unavailable(context: &str) -> QuantRS2Error {
QuantRS2Error::UnsupportedOperation(format!(
"CUDA backend not available in this build: {context}. \
Enable the `cuda` feature and ensure CUDA runtime is installed."
))
}
pub struct CudaBuffer {
size_elements: usize,
}
impl GpuBuffer for CudaBuffer {
fn size(&self) -> usize {
self.size_elements * std::mem::size_of::<Complex64>()
}
fn upload(&mut self, _data: &[Complex64]) -> QuantRS2Result<()> {
Err(cuda_unavailable("upload to device buffer"))
}
fn download(&self, _data: &mut [Complex64]) -> QuantRS2Result<()> {
Err(cuda_unavailable("download from device buffer"))
}
fn sync(&self) -> QuantRS2Result<()> {
Err(cuda_unavailable("device stream synchronization"))
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
}
pub struct CudaKernel;
impl GpuKernel for CudaKernel {
fn apply_single_qubit_gate(
&self,
_state: &mut dyn GpuBuffer,
_gate_matrix: &[Complex64; 4],
_qubit: QubitId,
_n_qubits: usize,
) -> QuantRS2Result<()> {
Err(cuda_unavailable("apply_single_qubit_gate"))
}
fn apply_two_qubit_gate(
&self,
_state: &mut dyn GpuBuffer,
_gate_matrix: &[Complex64; 16],
_control: QubitId,
_target: QubitId,
_n_qubits: usize,
) -> QuantRS2Result<()> {
Err(cuda_unavailable("apply_two_qubit_gate"))
}
fn apply_multi_qubit_gate(
&self,
_state: &mut dyn GpuBuffer,
_gate_matrix: &Array2<Complex64>,
_qubits: &[QubitId],
_n_qubits: usize,
) -> QuantRS2Result<()> {
Err(cuda_unavailable("apply_multi_qubit_gate"))
}
fn measure_qubit(
&self,
_state: &dyn GpuBuffer,
_qubit: QubitId,
_n_qubits: usize,
) -> QuantRS2Result<(bool, f64)> {
Err(cuda_unavailable("measure_qubit"))
}
fn expectation_value(
&self,
_state: &dyn GpuBuffer,
_observable: &Array2<Complex64>,
_qubits: &[QubitId],
_n_qubits: usize,
) -> QuantRS2Result<f64> {
Err(cuda_unavailable("expectation_value"))
}
}
pub struct CudaBackend {
kernel: CudaKernel,
}
impl CudaBackend {
pub fn new() -> QuantRS2Result<Self> {
eprintln!(
"[quantrs2-core] WARNING: CUDA backend requested but no CUDA runtime is available. \
Falling back is recommended. Enable the `cuda` feature and install CUDA ≥ 11.x."
);
Err(QuantRS2Error::UnsupportedOperation(
"CUDA backend not available in this build. \
Compile with the `cuda` feature and ensure a CUDA-capable NVIDIA GPU is present."
.to_string(),
))
}
}
impl GpuBackend for CudaBackend {
fn is_available() -> bool {
false
}
fn name(&self) -> &'static str {
"CUDA"
}
fn device_info(&self) -> String {
"CUDA backend (stub — no runtime available)".to_string()
}
fn allocate_state_vector(&self, n_qubits: usize) -> QuantRS2Result<Box<dyn GpuBuffer>> {
eprintln!(
"[quantrs2-core] WARNING: CudaBackend::allocate_state_vector called for {n_qubits} qubits \
but CUDA runtime is not available."
);
Err(cuda_unavailable(&format!(
"allocate_state_vector for {n_qubits} qubits"
)))
}
fn allocate_density_matrix(&self, n_qubits: usize) -> QuantRS2Result<Box<dyn GpuBuffer>> {
eprintln!(
"[quantrs2-core] WARNING: CudaBackend::allocate_density_matrix called for {n_qubits} qubits \
but CUDA runtime is not available."
);
Err(cuda_unavailable(&format!(
"allocate_density_matrix for {n_qubits} qubits"
)))
}
fn kernel(&self) -> &dyn GpuKernel {
&self.kernel
}
}