Skip to main content

runmat_accelerate_api/
lib.rs

1use anyhow::anyhow;
2use once_cell::sync::{Lazy, OnceCell};
3use serde::{Deserialize, Serialize};
4#[cfg(not(target_arch = "wasm32"))]
5use std::cell::Cell;
6use std::collections::{HashMap, HashSet};
7use std::future::Future;
8use std::pin::Pin;
9use std::sync::atomic::{AtomicU32, Ordering};
10#[cfg(feature = "wgpu")]
11use std::sync::Arc;
12#[cfg(target_arch = "wasm32")]
13use std::sync::Mutex;
14use std::sync::RwLock;
15
16type ResidencyMarkFn = fn(&GpuTensorHandle);
17type ResidencyClearFn = fn(&GpuTensorHandle);
18type SequenceThresholdFn = fn() -> Option<usize>;
19type WorkgroupSizeHintFn = fn() -> Option<u32>;
20
21static RESIDENCY_MARK: OnceCell<ResidencyMarkFn> = OnceCell::new();
22static RESIDENCY_CLEAR: OnceCell<ResidencyClearFn> = OnceCell::new();
23static SEQUENCE_THRESHOLD_PROVIDER: OnceCell<SequenceThresholdFn> = OnceCell::new();
24static WORKGROUP_SIZE_HINT_PROVIDER: OnceCell<WorkgroupSizeHintFn> = OnceCell::new();
25
26static LOGICAL_HANDLES: Lazy<RwLock<HashSet<u64>>> = Lazy::new(|| RwLock::new(HashSet::new()));
27static LOGICAL_HANDLE_HITS: Lazy<RwLock<HashMap<u64, u64>>> =
28    Lazy::new(|| RwLock::new(HashMap::new()));
29static TRANSPOSED_HANDLES: Lazy<RwLock<HashMap<u64, TransposeInfo>>> =
30    Lazy::new(|| RwLock::new(HashMap::new()));
31
32static HANDLE_PRECISIONS: Lazy<RwLock<HashMap<u64, ProviderPrecision>>> =
33    Lazy::new(|| RwLock::new(HashMap::new()));
34static HANDLE_STORAGES: Lazy<RwLock<HashMap<u64, GpuTensorStorage>>> =
35    Lazy::new(|| RwLock::new(HashMap::new()));
36
37#[derive(Debug, Clone, Copy, PartialEq, Eq)]
38pub struct TransposeInfo {
39    pub base_rows: usize,
40    pub base_cols: usize,
41}
42
43/// Register a callback used to mark residency tracking when GPU tensors are
44/// created or returned by device-side execution paths.
45pub fn register_residency_mark(handler: ResidencyMarkFn) {
46    let _ = RESIDENCY_MARK.set(handler);
47}
48
49/// Mark residency metadata for the provided GPU tensor handle, if a backend
50/// has registered a handler via [`register_residency_mark`].
51pub fn mark_residency(handle: &GpuTensorHandle) {
52    if let Some(handler) = RESIDENCY_MARK.get() {
53        handler(handle);
54    }
55}
56
57/// Register a callback used to clear residency tracking when GPU tensors are
58/// gathered back to the host. Backends that maintain residency metadata should
59/// install this hook during initialization.
60pub fn register_residency_clear(handler: ResidencyClearFn) {
61    let _ = RESIDENCY_CLEAR.set(handler);
62}
63
64/// Clear residency metadata for the provided GPU tensor handle, if a backend
65/// has registered a handler via [`register_residency_clear`].
66pub fn clear_residency(handle: &GpuTensorHandle) {
67    if let Some(handler) = RESIDENCY_CLEAR.get() {
68        handler(handle);
69    }
70}
71
72/// Register a callback that exposes the current sequence length threshold
73/// derived from the auto-offload planner. Array constructors can use this hint
74/// to decide when to prefer GPU residency automatically.
75pub fn register_sequence_threshold_provider(provider: SequenceThresholdFn) {
76    let _ = SEQUENCE_THRESHOLD_PROVIDER.set(provider);
77}
78
79/// Query the currently registered sequence threshold hint, if any.
80pub fn sequence_threshold_hint() -> Option<usize> {
81    SEQUENCE_THRESHOLD_PROVIDER
82        .get()
83        .and_then(|provider| provider())
84}
85
86/// Register a callback that reports the calibrated workgroup size selected by
87/// the active acceleration provider (if any). Plotting kernels can reuse this
88/// hint to match backend tuning.
89pub fn register_workgroup_size_hint_provider(provider: WorkgroupSizeHintFn) {
90    let _ = WORKGROUP_SIZE_HINT_PROVIDER.set(provider);
91}
92
93/// Query the current workgroup size hint exposed by the provider.
94pub fn workgroup_size_hint() -> Option<u32> {
95    WORKGROUP_SIZE_HINT_PROVIDER
96        .get()
97        .and_then(|provider| provider())
98}
99
100/// Export a shared acceleration context (e.g., the active WGPU device) when the
101/// current provider exposes one.
102pub fn export_context(kind: AccelContextKind) -> Option<AccelContextHandle> {
103    provider().and_then(|p| p.export_context(kind))
104}
105
106/// Request a provider-owned WGPU buffer for zero-copy consumers. Returns `None`
107/// when the active provider does not expose buffers or does not support the
108/// supplied handle.
109#[cfg(feature = "wgpu")]
110pub fn export_wgpu_buffer(handle: &GpuTensorHandle) -> Option<WgpuBufferRef> {
111    provider().and_then(|p| p.export_wgpu_buffer(handle))
112}
113
114/// Record the precision associated with a GPU tensor handle so host operations can
115/// reconstruct the original dtype when gathering back to the CPU.
116pub fn set_handle_precision(handle: &GpuTensorHandle, precision: ProviderPrecision) {
117    if let Ok(mut guard) = HANDLE_PRECISIONS.write() {
118        guard.insert(handle.buffer_id, precision);
119    }
120}
121
122/// Look up the recorded precision for a GPU tensor handle, if any.
123pub fn handle_precision(handle: &GpuTensorHandle) -> Option<ProviderPrecision> {
124    HANDLE_PRECISIONS
125        .read()
126        .ok()
127        .and_then(|guard| guard.get(&handle.buffer_id).copied())
128}
129
130/// Clear any recorded precision metadata for a GPU tensor handle.
131pub fn clear_handle_precision(handle: &GpuTensorHandle) {
132    if let Ok(mut guard) = HANDLE_PRECISIONS.write() {
133        guard.remove(&handle.buffer_id);
134    }
135}
136
137/// Annotate a GPU tensor handle as logically-typed (`logical` in MATLAB terms)
138/// or clear the logical flag when `logical` is `false`.
139pub fn set_handle_logical(handle: &GpuTensorHandle, logical: bool) {
140    if let Ok(mut guard) = LOGICAL_HANDLES.write() {
141        if logical {
142            guard.insert(handle.buffer_id);
143            if let Ok(mut hits) = LOGICAL_HANDLE_HITS.write() {
144                *hits.entry(handle.buffer_id).or_insert(0) += 1;
145            }
146        } else {
147            guard.remove(&handle.buffer_id);
148            if let Ok(mut hits) = LOGICAL_HANDLE_HITS.write() {
149                hits.remove(&handle.buffer_id);
150            }
151        }
152    }
153}
154
155/// Convenience helper for clearing logical annotations explicitly.
156pub fn clear_handle_logical(handle: &GpuTensorHandle) {
157    set_handle_logical(handle, false);
158}
159
160/// Returns true when the supplied handle has been marked as logical.
161pub fn handle_is_logical(handle: &GpuTensorHandle) -> bool {
162    LOGICAL_HANDLES
163        .read()
164        .map(|guard| guard.contains(&handle.buffer_id))
165        .unwrap_or(false)
166}
167
168pub fn handle_logical_hits(buffer_id: u64) -> Option<u64> {
169    LOGICAL_HANDLE_HITS
170        .read()
171        .ok()
172        .and_then(|guard| guard.get(&buffer_id).copied())
173}
174
175pub fn record_handle_transpose(handle: &GpuTensorHandle, base_rows: usize, base_cols: usize) {
176    if let Ok(mut guard) = TRANSPOSED_HANDLES.write() {
177        guard.insert(
178            handle.buffer_id,
179            TransposeInfo {
180                base_rows,
181                base_cols,
182            },
183        );
184    }
185}
186
187pub fn clear_handle_transpose(handle: &GpuTensorHandle) {
188    if let Ok(mut guard) = TRANSPOSED_HANDLES.write() {
189        guard.remove(&handle.buffer_id);
190    }
191}
192
193pub fn handle_transpose_info(handle: &GpuTensorHandle) -> Option<TransposeInfo> {
194    TRANSPOSED_HANDLES
195        .read()
196        .ok()
197        .and_then(|guard| guard.get(&handle.buffer_id).copied())
198}
199
200pub fn handle_is_transposed(handle: &GpuTensorHandle) -> bool {
201    handle_transpose_info(handle).is_some()
202}
203
204#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
205pub enum GpuTensorStorage {
206    Real,
207    ComplexInterleaved,
208}
209
210impl Default for GpuTensorStorage {
211    fn default() -> Self {
212        Self::Real
213    }
214}
215
216#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
217pub struct GpuTensorHandle {
218    pub shape: Vec<usize>,
219    pub device_id: u32,
220    pub buffer_id: u64,
221}
222
223#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
224pub struct ApiDeviceInfo {
225    pub device_id: u32,
226    pub name: String,
227    pub vendor: String,
228    pub memory_bytes: Option<u64>,
229    pub backend: Option<String>,
230}
231
232#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
233pub struct ReduceDimResult {
234    pub values: GpuTensorHandle,
235    pub indices: GpuTensorHandle,
236}
237
238#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
239pub struct ProviderCumminResult {
240    pub values: GpuTensorHandle,
241    pub indices: GpuTensorHandle,
242}
243
244/// Result payload returned by provider-side `cummax` scans.
245///
246/// Alias of [`ProviderCumminResult`] because both operations return the same pair of tensors
247/// (running values and MATLAB-compatible indices).
248pub type ProviderCummaxResult = ProviderCumminResult;
249
250/// Names a shared acceleration context that callers may request (e.g. plotting).
251#[derive(Debug, Clone, Copy, PartialEq, Eq)]
252pub enum AccelContextKind {
253    Plotting,
254}
255
256/// Handle returned by [`export_context`] that describes a shared GPU context.
257#[derive(Clone)]
258pub enum AccelContextHandle {
259    #[cfg(feature = "wgpu")]
260    Wgpu(WgpuContextHandle),
261}
262
263impl AccelContextHandle {
264    /// Returns the underlying WGPU context when available.
265    #[cfg(feature = "wgpu")]
266    pub fn as_wgpu(&self) -> Option<&WgpuContextHandle> {
267        match self {
268            AccelContextHandle::Wgpu(ctx) => Some(ctx),
269        }
270    }
271}
272
273/// Shared WGPU device/queue pair exported by the acceleration provider.
274#[cfg(feature = "wgpu")]
275#[derive(Clone)]
276pub struct WgpuContextHandle {
277    pub instance: Arc<wgpu::Instance>,
278    pub device: Arc<wgpu::Device>,
279    pub queue: Arc<wgpu::Queue>,
280    pub adapter: Arc<wgpu::Adapter>,
281    pub adapter_info: wgpu::AdapterInfo,
282    pub limits: wgpu::Limits,
283    pub features: wgpu::Features,
284}
285
286/// Borrowed reference to a provider-owned WGPU buffer corresponding to a `GpuTensorHandle`.
287#[cfg(feature = "wgpu")]
288#[derive(Clone)]
289pub struct WgpuBufferRef {
290    pub buffer: Arc<wgpu::Buffer>,
291    pub len: usize,
292    pub shape: Vec<usize>,
293    pub element_size: usize,
294    pub precision: ProviderPrecision,
295}
296
297pub fn set_handle_storage(handle: &GpuTensorHandle, storage: GpuTensorStorage) {
298    if let Ok(mut guard) = HANDLE_STORAGES.write() {
299        guard.insert(handle.buffer_id, storage);
300    }
301}
302
303pub fn handle_storage(handle: &GpuTensorHandle) -> GpuTensorStorage {
304    HANDLE_STORAGES
305        .read()
306        .ok()
307        .and_then(|guard| guard.get(&handle.buffer_id).cloned())
308        .unwrap_or(GpuTensorStorage::Real)
309}
310
311pub fn clear_handle_storage(handle: &GpuTensorHandle) {
312    if let Ok(mut guard) = HANDLE_STORAGES.write() {
313        guard.remove(&handle.buffer_id);
314    }
315}
316
317#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
318pub enum PagefunOp {
319    Mtimes,
320}
321
322#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
323pub struct PagefunRequest {
324    pub op: PagefunOp,
325    pub inputs: Vec<GpuTensorHandle>,
326    pub output_shape: Vec<usize>,
327    pub page_dims: Vec<usize>,
328    pub input_page_dims: Vec<Vec<usize>>,
329}
330
331#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
332pub enum FindDirection {
333    First,
334    Last,
335}
336
337#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
338pub struct ProviderFindResult {
339    pub linear: GpuTensorHandle,
340    pub rows: GpuTensorHandle,
341    pub cols: GpuTensorHandle,
342    pub values: Option<GpuTensorHandle>,
343}
344
345#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
346pub struct ProviderBandwidth {
347    pub lower: u32,
348    pub upper: u32,
349}
350
351#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
352pub enum ProviderSymmetryKind {
353    Symmetric,
354    Skew,
355}
356
357#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
358pub enum ProviderHermitianKind {
359    Hermitian,
360    Skew,
361}
362
363#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
364pub struct ProviderLuResult {
365    pub combined: GpuTensorHandle,
366    pub lower: GpuTensorHandle,
367    pub upper: GpuTensorHandle,
368    pub perm_matrix: GpuTensorHandle,
369    pub perm_vector: GpuTensorHandle,
370}
371
372#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
373pub struct ProviderCholResult {
374    pub factor: GpuTensorHandle,
375    /// MATLAB-compatible failure index (0 indicates success).
376    pub info: u32,
377}
378
379#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
380pub struct ProviderQrResult {
381    pub q: GpuTensorHandle,
382    pub r: GpuTensorHandle,
383    pub perm_matrix: GpuTensorHandle,
384    pub perm_vector: GpuTensorHandle,
385}
386
387#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
388pub struct ProviderQrPowerIterResult {
389    pub q: GpuTensorHandle,
390    pub r: GpuTensorHandle,
391    pub perm_matrix: GpuTensorHandle,
392    pub perm_vector: GpuTensorHandle,
393}
394
395#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Default)]
396pub struct ProviderLinsolveOptions {
397    pub lower: bool,
398    pub upper: bool,
399    pub rectangular: bool,
400    pub transposed: bool,
401    pub conjugate: bool,
402    pub symmetric: bool,
403    pub posdef: bool,
404    pub need_rcond: bool,
405    pub rcond: Option<f64>,
406}
407
408#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
409pub struct ProviderLinsolveResult {
410    pub solution: GpuTensorHandle,
411    pub reciprocal_condition: f64,
412}
413
414#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Default)]
415pub struct ProviderPinvOptions {
416    pub tolerance: Option<f64>,
417}
418
419#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
420pub struct ProviderPolyvalMu {
421    pub mean: f64,
422    pub scale: f64,
423}
424
425#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Default)]
426pub struct ProviderPolyvalOptions {
427    pub mu: Option<ProviderPolyvalMu>,
428}
429
430#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)]
431pub struct ProviderInvOptions {}
432
433#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
434pub struct ProviderPolyfitResult {
435    pub coefficients: Vec<f64>,
436    pub r_matrix: Vec<f64>,
437    pub normr: f64,
438    pub df: f64,
439    pub mu: [f64; 2],
440}
441
442/// Numerator/denominator payload returned by provider-backed `polyder` quotient rule.
443#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
444pub struct ProviderPolyderQuotient {
445    pub numerator: GpuTensorHandle,
446    pub denominator: GpuTensorHandle,
447}
448
449/// Supported norm specifications for the `cond` builtin.
450#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
451pub enum ProviderCondNorm {
452    Two,
453    One,
454    Inf,
455    Fro,
456}
457
458/// Supported norm orders for the `norm` builtin.
459#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
460pub enum ProviderNormOrder {
461    Two,
462    One,
463    Inf,
464    NegInf,
465    Zero,
466    Fro,
467    Nuc,
468    P(f64),
469}
470
471#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
472pub struct ProviderEigResult {
473    pub eigenvalues: GpuTensorHandle,
474    pub diagonal: GpuTensorHandle,
475    pub right: GpuTensorHandle,
476    pub left: Option<GpuTensorHandle>,
477}
478
479#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
480pub enum ProviderQrPivot {
481    Matrix,
482    Vector,
483}
484
485#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
486pub struct ProviderQrOptions {
487    pub economy: bool,
488    pub pivot: ProviderQrPivot,
489}
490
491impl Default for ProviderQrOptions {
492    fn default() -> Self {
493        Self {
494            economy: false,
495            pivot: ProviderQrPivot::Matrix,
496        }
497    }
498}
499
500#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
501pub enum ProviderPrecision {
502    F32,
503    F64,
504}
505
506#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
507pub enum ReductionTwoPassMode {
508    Auto,
509    ForceOn,
510    ForceOff,
511}
512
513impl ReductionTwoPassMode {
514    pub fn as_str(self) -> &'static str {
515        match self {
516            ReductionTwoPassMode::Auto => "auto",
517            ReductionTwoPassMode::ForceOn => "force_on",
518            ReductionTwoPassMode::ForceOff => "force_off",
519        }
520    }
521}
522
523#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
524pub enum ReductionFlavor {
525    Sum,
526    Mean,
527    CustomScale(f64),
528}
529
530impl ReductionFlavor {
531    pub fn is_mean(self) -> bool {
532        matches!(self, ReductionFlavor::Mean)
533    }
534
535    pub fn scale(self, reduce_len: usize) -> f64 {
536        match self {
537            ReductionFlavor::Sum => 1.0,
538            ReductionFlavor::Mean => {
539                if reduce_len == 0 {
540                    1.0
541                } else {
542                    1.0 / reduce_len as f64
543                }
544            }
545            ReductionFlavor::CustomScale(scale) => scale,
546        }
547    }
548}
549
550/// Normalisation mode for correlation coefficients.
551#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
552pub enum CorrcoefNormalization {
553    Unbiased,
554    Biased,
555}
556
557/// Row-selection strategy for correlation coefficients.
558#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
559pub enum CorrcoefRows {
560    All,
561    Complete,
562    Pairwise,
563}
564
565/// Options controlling provider-backed correlation coefficient computation.
566#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
567pub struct CorrcoefOptions {
568    pub normalization: CorrcoefNormalization,
569    pub rows: CorrcoefRows,
570}
571
572impl Default for CorrcoefOptions {
573    fn default() -> Self {
574        Self {
575            normalization: CorrcoefNormalization::Unbiased,
576            rows: CorrcoefRows::All,
577        }
578    }
579}
580
581/// Normalisation mode used by covariance computations.
582#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
583pub enum CovNormalization {
584    Unbiased,
585    Biased,
586}
587
588/// Row handling strategy for covariance computations.
589#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
590pub enum CovRows {
591    All,
592    OmitRows,
593    PartialRows,
594}
595
596/// Options controlling provider-backed covariance computation.
597#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
598pub struct CovarianceOptions {
599    pub normalization: CovNormalization,
600    pub rows: CovRows,
601    pub has_weight_vector: bool,
602}
603
604impl Default for CovarianceOptions {
605    fn default() -> Self {
606        Self {
607            normalization: CovNormalization::Unbiased,
608            rows: CovRows::All,
609            has_weight_vector: false,
610        }
611    }
612}
613
614/// Normalization strategy used by provider-backed standard deviation reductions.
615#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
616pub enum ProviderStdNormalization {
617    Sample,
618    Population,
619}
620
621/// NaN handling mode for provider-backed reductions.
622#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
623pub enum ProviderNanMode {
624    Include,
625    Omit,
626}
627
628/// Direction used when computing prefix sums on the device.
629#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
630pub enum ProviderScanDirection {
631    Forward,
632    Reverse,
633}
634
635/// Sort direction used by acceleration providers.
636#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
637pub enum SortOrder {
638    Ascend,
639    Descend,
640}
641
642/// Comparison strategy applied during sorting.
643#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
644pub enum SortComparison {
645    Auto,
646    Real,
647    Abs,
648}
649
650/// Host-resident outputs returned by provider-backed sort operations.
651#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
652pub struct SortResult {
653    pub values: HostTensorOwned,
654    pub indices: HostTensorOwned,
655}
656
657#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
658pub struct SortRowsColumnSpec {
659    pub index: usize,
660    pub order: SortOrder,
661}
662
663/// Ordering applied by provider-backed `unique` operations.
664#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
665pub enum UniqueOrder {
666    Sorted,
667    Stable,
668}
669
670/// Occurrence selection for provider-backed `unique` operations.
671#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
672pub enum UniqueOccurrence {
673    First,
674    Last,
675}
676
677/// Options controlling provider-backed `unique` operations.
678#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
679pub struct UniqueOptions {
680    pub rows: bool,
681    pub order: UniqueOrder,
682    pub occurrence: UniqueOccurrence,
683}
684
685/// Host-resident outputs returned by provider-backed `unique` operations.
686#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
687pub struct UniqueResult {
688    pub values: HostTensorOwned,
689    pub ia: HostTensorOwned,
690    pub ic: HostTensorOwned,
691}
692
693/// Ordering applied by provider-backed `union` operations.
694#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
695pub enum UnionOrder {
696    Sorted,
697    Stable,
698}
699
700/// Options controlling provider-backed `union` operations.
701#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
702pub struct UnionOptions {
703    pub rows: bool,
704    pub order: UnionOrder,
705}
706
707/// Host-resident outputs returned by provider-backed `union` operations.
708#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
709pub struct UnionResult {
710    pub values: HostTensorOwned,
711    pub ia: HostTensorOwned,
712    pub ib: HostTensorOwned,
713}
714
715/// Parameterisation of 2-D filters generated by `fspecial`.
716#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
717pub enum FspecialFilter {
718    Average {
719        rows: u32,
720        cols: u32,
721    },
722    Disk {
723        radius: f64,
724        size: u32,
725    },
726    Gaussian {
727        rows: u32,
728        cols: u32,
729        sigma: f64,
730    },
731    Laplacian {
732        alpha: f64,
733    },
734    Log {
735        rows: u32,
736        cols: u32,
737        sigma: f64,
738    },
739    Motion {
740        length: u32,
741        kernel_size: u32,
742        angle_degrees: f64,
743        oversample: u32,
744    },
745    Prewitt,
746    Sobel,
747    Unsharp {
748        alpha: f64,
749    },
750}
751
752/// Request dispatched to acceleration providers for `fspecial` kernels.
753#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
754pub struct FspecialRequest {
755    pub filter: FspecialFilter,
756}
757
758/// Padding strategy used by `imfilter`.
759#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
760pub enum ImfilterPadding {
761    Constant,
762    Replicate,
763    Symmetric,
764    Circular,
765}
766
767/// Output sizing mode used by `imfilter`.
768#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
769pub enum ImfilterShape {
770    Same,
771    Full,
772    Valid,
773}
774
775/// Correlation vs convolution behaviour for `imfilter`.
776#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
777pub enum ImfilterMode {
778    Correlation,
779    Convolution,
780}
781
782/// Options supplied to acceleration providers for `imfilter`.
783#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
784pub struct ImfilterOptions {
785    pub padding: ImfilterPadding,
786    pub constant_value: f64,
787    pub shape: ImfilterShape,
788    pub mode: ImfilterMode,
789}
790
791impl Default for ImfilterOptions {
792    fn default() -> Self {
793        Self {
794            padding: ImfilterPadding::Constant,
795            constant_value: 0.0,
796            shape: ImfilterShape::Same,
797            mode: ImfilterMode::Correlation,
798        }
799    }
800}
801
802/// Ordering applied by provider-backed `setdiff` operations.
803#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
804pub enum SetdiffOrder {
805    Sorted,
806    Stable,
807}
808
809/// Options controlling provider-backed `setdiff` operations.
810#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
811pub struct SetdiffOptions {
812    pub rows: bool,
813    pub order: SetdiffOrder,
814}
815
816/// Host-resident outputs returned by provider-backed `setdiff` operations.
817#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
818pub struct SetdiffResult {
819    pub values: HostTensorOwned,
820    pub ia: HostTensorOwned,
821}
822
823/// Options controlling provider-backed `ismember` operations.
824#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
825pub struct IsMemberOptions {
826    pub rows: bool,
827}
828
829/// Host-resident logical output returned by providers.
830#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
831pub struct HostLogicalOwned {
832    pub data: Vec<u8>,
833    pub shape: Vec<usize>,
834}
835
836/// Host-resident outputs returned by provider-backed `ismember` operations.
837#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
838pub struct IsMemberResult {
839    pub mask: HostLogicalOwned,
840    pub loc: HostTensorOwned,
841}
842
843#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
844pub enum ProviderConvMode {
845    Full,
846    Same,
847    Valid,
848}
849
850#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
851pub enum ProviderConvOrientation {
852    Row,
853    Column,
854}
855
856#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
857pub struct ProviderConv1dOptions {
858    pub mode: ProviderConvMode,
859    pub orientation: ProviderConvOrientation,
860}
861
862#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
863pub struct ProviderIirFilterOptions {
864    /// Zero-based dimension along which filtering should be applied.
865    pub dim: usize,
866    /// Optional initial conditions (state vector) residing on the device.
867    pub zi: Option<GpuTensorHandle>,
868}
869
870#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
871pub struct ProviderIirFilterResult {
872    /// Filtered output tensor, matching the input signal shape.
873    pub output: GpuTensorHandle,
874    /// Final conditions for the filter state (same shape as the requested `zi` layout).
875    pub final_state: Option<GpuTensorHandle>,
876}
877
878#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
879pub struct ProviderMoments2 {
880    pub mean: GpuTensorHandle,
881    pub ex2: GpuTensorHandle,
882}
883
884#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)]
885pub struct ProviderDispatchStats {
886    /// Number of GPU dispatches recorded for this category.
887    pub count: u64,
888    /// Accumulated wall-clock time of dispatches in nanoseconds (host measured).
889    pub total_wall_time_ns: u64,
890}
891
892#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
893pub struct ProviderFallbackStat {
894    pub reason: String,
895    pub count: u64,
896}
897
898#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
899pub struct ProviderTelemetry {
900    pub fused_elementwise: ProviderDispatchStats,
901    pub fused_reduction: ProviderDispatchStats,
902    pub matmul: ProviderDispatchStats,
903    pub linsolve: ProviderDispatchStats,
904    pub mldivide: ProviderDispatchStats,
905    pub mrdivide: ProviderDispatchStats,
906    pub upload_bytes: u64,
907    pub download_bytes: u64,
908    pub solve_fallbacks: Vec<ProviderFallbackStat>,
909    pub fusion_cache_hits: u64,
910    pub fusion_cache_misses: u64,
911    pub bind_group_cache_hits: u64,
912    pub bind_group_cache_misses: u64,
913    /// Optional per-layout bind group cache counters (layout tags and their hit/miss counts)
914    pub bind_group_cache_by_layout: Option<Vec<BindGroupLayoutTelemetry>>,
915    /// Recent kernel launch metadata (bounded log; newest last)
916    pub kernel_launches: Vec<KernelLaunchTelemetry>,
917}
918
919#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
920pub struct BindGroupLayoutTelemetry {
921    pub tag: String,
922    pub hits: u64,
923    pub misses: u64,
924}
925
926#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
927pub struct KernelAttrTelemetry {
928    pub key: String,
929    pub value: u64,
930}
931
932#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
933pub struct KernelLaunchTelemetry {
934    pub kernel: String,
935    pub precision: Option<String>,
936    pub shape: Vec<KernelAttrTelemetry>,
937    pub tuning: Vec<KernelAttrTelemetry>,
938}
939
940pub type AccelProviderFuture<'a, T> = Pin<Box<dyn Future<Output = anyhow::Result<T>> + 'a>>;
941pub type AccelDownloadFuture<'a> = AccelProviderFuture<'a, crate::HostTensorOwned>;
942
943fn unsupported_future<T>(message: &'static str) -> AccelProviderFuture<'static, T> {
944    Box::pin(async move { Err(anyhow::anyhow!(message)) })
945}
946
947/// Device/provider interface that backends implement and register into the runtime layer
948pub trait AccelProvider: Send + Sync {
949    fn upload(&self, host: &crate::HostTensorView) -> anyhow::Result<GpuTensorHandle>;
950    fn download<'a>(&'a self, h: &'a GpuTensorHandle) -> AccelDownloadFuture<'a>;
951    fn free(&self, h: &GpuTensorHandle) -> anyhow::Result<()>;
952    fn device_info(&self) -> String;
953    fn device_id(&self) -> u32 {
954        0
955    }
956
957    /// Export a shared GPU context handle, allowing downstream systems (plotting, visualization)
958    /// to reuse the same device/queue without copying tensor data back to the host.
959    fn export_context(&self, _kind: AccelContextKind) -> Option<AccelContextHandle> {
960        None
961    }
962
963    /// Export a provider-owned WGPU buffer for zero-copy integrations.
964    #[cfg(feature = "wgpu")]
965    fn export_wgpu_buffer(&self, _handle: &GpuTensorHandle) -> Option<WgpuBufferRef> {
966        let _ = _handle;
967        None
968    }
969
970    /// Gather elements from `source` at the provided zero-based linear `indices`, materialising
971    /// a dense tensor with the specified `output_shape`.
972    fn gather_linear(
973        &self,
974        _source: &GpuTensorHandle,
975        _indices: &[u32],
976        _output_shape: &[usize],
977    ) -> anyhow::Result<GpuTensorHandle> {
978        Err(anyhow::anyhow!("gather_linear not supported by provider"))
979    }
980
981    /// Scatter the contents of `values` into `target` at the provided zero-based linear `indices`.
982    ///
983    /// The provider must ensure `values.len() == indices.len()` and update `target` in place.
984    fn scatter_linear(
985        &self,
986        _target: &GpuTensorHandle,
987        _indices: &[u32],
988        _values: &GpuTensorHandle,
989    ) -> anyhow::Result<()> {
990        Err(anyhow::anyhow!("scatter_linear not supported by provider"))
991    }
992
993    /// Structured device information (optional to override). Default adapts from `device_info()`.
994    fn device_info_struct(&self) -> ApiDeviceInfo {
995        ApiDeviceInfo {
996            device_id: 0,
997            name: self.device_info(),
998            vendor: String::new(),
999            memory_bytes: None,
1000            backend: None,
1001        }
1002    }
1003
1004    fn precision(&self) -> ProviderPrecision {
1005        ProviderPrecision::F64
1006    }
1007
1008    /// Read a single scalar at linear index from a device tensor, returning it as f64.
1009    fn read_scalar(&self, _h: &GpuTensorHandle, _linear_index: usize) -> anyhow::Result<f64> {
1010        Err(anyhow::anyhow!("read_scalar not supported by provider"))
1011    }
1012
1013    /// Allocate a zero-initialised tensor with the provided shape on the device.
1014    fn zeros(&self, _shape: &[usize]) -> anyhow::Result<GpuTensorHandle> {
1015        Err(anyhow::anyhow!("zeros not supported by provider"))
1016    }
1017
1018    /// Allocate a one-initialised tensor with the provided shape on the device.
1019    fn ones(&self, _shape: &[usize]) -> anyhow::Result<GpuTensorHandle> {
1020        Err(anyhow::anyhow!("ones not supported by provider"))
1021    }
1022
1023    /// Allocate a zero-initialised tensor matching the prototype tensor.
1024    fn zeros_like(&self, prototype: &GpuTensorHandle) -> anyhow::Result<GpuTensorHandle> {
1025        self.zeros(&prototype.shape)
1026    }
1027
1028    /// Allocate a tensor filled with a constant value on the device.
1029    fn fill(&self, shape: &[usize], value: f64) -> anyhow::Result<GpuTensorHandle> {
1030        if value == 0.0 {
1031            return self.zeros(shape);
1032        }
1033        if let Ok(base) = self.zeros(shape) {
1034            match self.scalar_add(&base, value) {
1035                Ok(out) => {
1036                    let _ = self.free(&base);
1037                    return Ok(out);
1038                }
1039                Err(_) => {
1040                    let _ = self.free(&base);
1041                }
1042            }
1043        }
1044        let len: usize = shape.iter().copied().product();
1045        let data = vec![value; len];
1046        let view = HostTensorView { data: &data, shape };
1047        self.upload(&view)
1048    }
1049
1050    /// Allocate a tensor filled with a constant value, matching a prototype's residency.
1051    fn fill_like(
1052        &self,
1053        prototype: &GpuTensorHandle,
1054        value: f64,
1055    ) -> anyhow::Result<GpuTensorHandle> {
1056        if value == 0.0 {
1057            return self.zeros_like(prototype);
1058        }
1059        if let Ok(base) = self.zeros_like(prototype) {
1060            match self.scalar_add(&base, value) {
1061                Ok(out) => {
1062                    let _ = self.free(&base);
1063                    return Ok(out);
1064                }
1065                Err(_) => {
1066                    let _ = self.free(&base);
1067                }
1068            }
1069        }
1070        self.fill(&prototype.shape, value)
1071    }
1072
1073    /// Allocate a one-initialised tensor matching the prototype tensor.
1074    fn ones_like(&self, prototype: &GpuTensorHandle) -> anyhow::Result<GpuTensorHandle> {
1075        self.ones(&prototype.shape)
1076    }
1077
1078    /// Allocate an identity tensor with ones along the leading diagonal of the first two axes.
1079    fn eye(&self, _shape: &[usize]) -> anyhow::Result<GpuTensorHandle> {
1080        Err(anyhow::anyhow!("eye not supported by provider"))
1081    }
1082
1083    /// Allocate an identity tensor matching the prototype tensor's shape.
1084    fn eye_like(&self, prototype: &GpuTensorHandle) -> anyhow::Result<GpuTensorHandle> {
1085        self.eye(&prototype.shape)
1086    }
1087
1088    /// Construct MATLAB-style coordinate grids from axis vectors.
1089    fn meshgrid(&self, _axes: &[MeshgridAxisView<'_>]) -> anyhow::Result<ProviderMeshgridResult> {
1090        Err(anyhow::anyhow!("meshgrid not supported by provider"))
1091    }
1092
1093    /// Construct a diagonal matrix from a vector-like tensor. `offset` matches MATLAB semantics.
1094    fn diag_from_vector(
1095        &self,
1096        _vector: &GpuTensorHandle,
1097        _offset: isize,
1098    ) -> anyhow::Result<GpuTensorHandle> {
1099        Err(anyhow::anyhow!(
1100            "diag_from_vector not supported by provider"
1101        ))
1102    }
1103
1104    /// Extract a diagonal from a matrix-like tensor. The result is always a column vector.
1105    fn diag_extract(
1106        &self,
1107        _matrix: &GpuTensorHandle,
1108        _offset: isize,
1109    ) -> anyhow::Result<GpuTensorHandle> {
1110        Err(anyhow::anyhow!("diag_extract not supported by provider"))
1111    }
1112
1113    /// Apply a lower-triangular mask to the first two dimensions of a tensor.
1114    fn tril<'a>(
1115        &'a self,
1116        _matrix: &'a GpuTensorHandle,
1117        _offset: isize,
1118    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1119        Box::pin(async move { Err(anyhow!("tril not supported by provider")) })
1120    }
1121
1122    /// Apply an upper-triangular mask to the first two dimensions of a tensor.
1123    fn triu<'a>(
1124        &'a self,
1125        _matrix: &'a GpuTensorHandle,
1126        _offset: isize,
1127    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1128        Box::pin(async move { Err(anyhow!("triu not supported by provider")) })
1129    }
1130
1131    /// Evaluate a polynomial expressed by `coefficients` at each element in `points`.
1132    fn polyval(
1133        &self,
1134        _coefficients: &GpuTensorHandle,
1135        _points: &GpuTensorHandle,
1136        _options: &ProviderPolyvalOptions,
1137    ) -> anyhow::Result<GpuTensorHandle> {
1138        Err(anyhow::anyhow!("polyval not supported by provider"))
1139    }
1140
1141    /// Fit a polynomial of degree `degree` to `(x, y)` samples. Optional weights must match `x`.
1142    fn polyfit<'a>(
1143        &'a self,
1144        _x: &'a GpuTensorHandle,
1145        _y: &'a GpuTensorHandle,
1146        _degree: usize,
1147        _weights: Option<&'a GpuTensorHandle>,
1148    ) -> AccelProviderFuture<'a, ProviderPolyfitResult> {
1149        Box::pin(async move { Err(anyhow::anyhow!("polyfit not supported by provider")) })
1150    }
1151
1152    /// Differentiate a polynomial represented as a vector of coefficients.
1153    fn polyder_single<'a>(
1154        &'a self,
1155        _polynomial: &'a GpuTensorHandle,
1156    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1157        Box::pin(async move { Err(anyhow::anyhow!("polyder_single not supported by provider")) })
1158    }
1159
1160    /// Apply the product rule to polynomials `p` and `q`.
1161    fn polyder_product<'a>(
1162        &'a self,
1163        _p: &'a GpuTensorHandle,
1164        _q: &'a GpuTensorHandle,
1165    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1166        Box::pin(async move { Err(anyhow::anyhow!("polyder_product not supported by provider")) })
1167    }
1168
1169    /// Apply the quotient rule to polynomials `u` and `v`.
1170    fn polyder_quotient<'a>(
1171        &'a self,
1172        _u: &'a GpuTensorHandle,
1173        _v: &'a GpuTensorHandle,
1174    ) -> AccelProviderFuture<'a, ProviderPolyderQuotient> {
1175        Box::pin(async move {
1176            Err(anyhow::anyhow!(
1177                "polyder_quotient not supported by provider"
1178            ))
1179        })
1180    }
1181
1182    /// Integrate a polynomial represented as a vector of coefficients and append a constant term.
1183    fn polyint(
1184        &self,
1185        _polynomial: &GpuTensorHandle,
1186        _constant: f64,
1187    ) -> anyhow::Result<GpuTensorHandle> {
1188        Err(anyhow::anyhow!("polyint not supported by provider"))
1189    }
1190
1191    /// Allocate a tensor filled with random values drawn from U(0, 1).
1192    fn random_uniform(&self, _shape: &[usize]) -> anyhow::Result<GpuTensorHandle> {
1193        Err(anyhow::anyhow!("random_uniform not supported by provider"))
1194    }
1195
1196    /// Allocate a tensor filled with random values matching the prototype shape.
1197    fn random_uniform_like(&self, prototype: &GpuTensorHandle) -> anyhow::Result<GpuTensorHandle> {
1198        self.random_uniform(&prototype.shape)
1199    }
1200
1201    /// Allocate a tensor filled with standard normal (mean 0, stddev 1) random values.
1202    fn random_normal(&self, _shape: &[usize]) -> anyhow::Result<GpuTensorHandle> {
1203        Err(anyhow::anyhow!("random_normal not supported by provider"))
1204    }
1205
1206    /// Allocate a tensor of standard normal values matching a prototype's shape.
1207    fn random_normal_like(&self, prototype: &GpuTensorHandle) -> anyhow::Result<GpuTensorHandle> {
1208        self.random_normal(&prototype.shape)
1209    }
1210
1211    /// Exponentially-distributed random values with mean `mu`.
1212    fn random_exponential(&self, _mu: f64, _shape: &[usize]) -> anyhow::Result<GpuTensorHandle> {
1213        Err(anyhow::anyhow!(
1214            "random_exponential not supported by provider"
1215        ))
1216    }
1217
1218    /// Normal random values with mean `mu` and standard deviation `sigma`.
1219    fn random_normrnd(
1220        &self,
1221        _mu: f64,
1222        _sigma: f64,
1223        _shape: &[usize],
1224    ) -> anyhow::Result<GpuTensorHandle> {
1225        Err(anyhow::anyhow!("random_normrnd not supported by provider"))
1226    }
1227
1228    /// Uniform random values on the interval `[a, b)`.
1229    fn random_unifrnd(
1230        &self,
1231        _a: f64,
1232        _b: f64,
1233        _shape: &[usize],
1234    ) -> anyhow::Result<GpuTensorHandle> {
1235        Err(anyhow::anyhow!("random_unifrnd not supported by provider"))
1236    }
1237
1238    fn stochastic_evolution(
1239        &self,
1240        _state: &GpuTensorHandle,
1241        _drift: f64,
1242        _scale: f64,
1243        _steps: u32,
1244    ) -> anyhow::Result<GpuTensorHandle> {
1245        Err(anyhow::anyhow!(
1246            "stochastic_evolution not supported by provider"
1247        ))
1248    }
1249
1250    /// Set the provider RNG state to align with the host RNG.
1251    fn set_rng_state(&self, _state: u64) -> anyhow::Result<()> {
1252        Err(anyhow::anyhow!("set_rng_state not supported by provider"))
1253    }
1254
1255    /// Generate a 2-D correlation kernel matching MATLAB's `fspecial` builtin.
1256    fn fspecial(&self, _request: &FspecialRequest) -> anyhow::Result<GpuTensorHandle> {
1257        Err(anyhow::anyhow!("fspecial not supported by provider"))
1258    }
1259
1260    /// Evaluate the `peaks` test surface on an n×n grid spanning [-3,3]×[-3,3].
1261    /// Returns the Z matrix (n×n) as a GPU tensor.
1262    fn peaks(&self, _n: usize) -> anyhow::Result<GpuTensorHandle> {
1263        Err(anyhow::anyhow!("peaks not supported by provider"))
1264    }
1265
1266    /// Evaluate the `peaks` formula element-wise on caller-supplied GPU coordinate tensors.
1267    /// X and Y must have the same shape. Returns a Z tensor of the same shape.
1268    fn peaks_xy(
1269        &self,
1270        _x: &GpuTensorHandle,
1271        _y: &GpuTensorHandle,
1272    ) -> anyhow::Result<GpuTensorHandle> {
1273        Err(anyhow::anyhow!("peaks_xy not supported by provider"))
1274    }
1275
1276    fn hann_window(&self, _len: usize, _periodic: bool) -> anyhow::Result<GpuTensorHandle> {
1277        Err(anyhow::anyhow!("hann_window not supported by provider"))
1278    }
1279
1280    fn hamming_window(&self, _len: usize, _periodic: bool) -> anyhow::Result<GpuTensorHandle> {
1281        Err(anyhow::anyhow!("hamming_window not supported by provider"))
1282    }
1283
1284    fn blackman_window(&self, _len: usize, _periodic: bool) -> anyhow::Result<GpuTensorHandle> {
1285        Err(anyhow::anyhow!("blackman_window not supported by provider"))
1286    }
1287
1288    /// Apply an N-D correlation/convolution with padding semantics matching MATLAB's `imfilter`.
1289    fn imfilter<'a>(
1290        &'a self,
1291        _image: &'a GpuTensorHandle,
1292        _kernel: &'a GpuTensorHandle,
1293        _options: &'a ImfilterOptions,
1294    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1295        unsupported_future("imfilter not supported by provider")
1296    }
1297
1298    /// Allocate a tensor filled with random integers over an inclusive range.
1299    fn random_integer_range(
1300        &self,
1301        _lower: i64,
1302        _upper: i64,
1303        _shape: &[usize],
1304    ) -> anyhow::Result<GpuTensorHandle> {
1305        Err(anyhow::anyhow!(
1306            "random_integer_range not supported by provider"
1307        ))
1308    }
1309
1310    /// Allocate a random integer tensor matching the prototype shape.
1311    fn random_integer_like(
1312        &self,
1313        prototype: &GpuTensorHandle,
1314        lower: i64,
1315        upper: i64,
1316    ) -> anyhow::Result<GpuTensorHandle> {
1317        self.random_integer_range(lower, upper, &prototype.shape)
1318    }
1319
1320    /// Allocate a random permutation of 1..=n, returning the first k elements.
1321    fn random_permutation(&self, _n: usize, _k: usize) -> anyhow::Result<GpuTensorHandle> {
1322        Err(anyhow!("random_permutation not supported by provider"))
1323    }
1324
1325    /// Allocate a random permutation matching the prototype residency.
1326    fn random_permutation_like(
1327        &self,
1328        _prototype: &GpuTensorHandle,
1329        n: usize,
1330        k: usize,
1331    ) -> anyhow::Result<GpuTensorHandle> {
1332        self.random_permutation(n, k)
1333    }
1334
1335    /// Compute a covariance matrix across the columns of `matrix`.
1336    fn covariance<'a>(
1337        &'a self,
1338        _matrix: &'a GpuTensorHandle,
1339        _second: Option<&'a GpuTensorHandle>,
1340        _weights: Option<&'a GpuTensorHandle>,
1341        _options: &'a CovarianceOptions,
1342    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1343        unsupported_future("covariance not supported by provider")
1344    }
1345
1346    /// Compute a correlation coefficient matrix across the columns of `matrix`.
1347    fn corrcoef<'a>(
1348        &'a self,
1349        _matrix: &'a GpuTensorHandle,
1350        _options: &'a CorrcoefOptions,
1351    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1352        unsupported_future("corrcoef not supported by provider")
1353    }
1354
1355    // Optional operator hooks (default to unsupported)
1356    fn linspace(&self, _start: f64, _stop: f64, _count: usize) -> anyhow::Result<GpuTensorHandle> {
1357        Err(anyhow::anyhow!("linspace not supported by provider"))
1358    }
1359    fn elem_add<'a>(
1360        &'a self,
1361        _a: &'a GpuTensorHandle,
1362        _b: &'a GpuTensorHandle,
1363    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1364        unsupported_future("elem_add not supported by provider")
1365    }
1366    fn elem_mul<'a>(
1367        &'a self,
1368        _a: &'a GpuTensorHandle,
1369        _b: &'a GpuTensorHandle,
1370    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1371        unsupported_future("elem_mul not supported by provider")
1372    }
1373    fn elem_max<'a>(
1374        &'a self,
1375        _a: &'a GpuTensorHandle,
1376        _b: &'a GpuTensorHandle,
1377    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1378        unsupported_future("elem_max not supported by provider")
1379    }
1380    fn elem_min<'a>(
1381        &'a self,
1382        _a: &'a GpuTensorHandle,
1383        _b: &'a GpuTensorHandle,
1384    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1385        unsupported_future("elem_min not supported by provider")
1386    }
1387    fn elem_sub<'a>(
1388        &'a self,
1389        _a: &'a GpuTensorHandle,
1390        _b: &'a GpuTensorHandle,
1391    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1392        unsupported_future("elem_sub not supported by provider")
1393    }
1394    fn elem_div<'a>(
1395        &'a self,
1396        _a: &'a GpuTensorHandle,
1397        _b: &'a GpuTensorHandle,
1398    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1399        unsupported_future("elem_div not supported by provider")
1400    }
1401    fn elem_pow<'a>(
1402        &'a self,
1403        _a: &'a GpuTensorHandle,
1404        _b: &'a GpuTensorHandle,
1405    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1406        unsupported_future("elem_pow not supported by provider")
1407    }
1408
1409    fn elem_hypot<'a>(
1410        &'a self,
1411        _a: &'a GpuTensorHandle,
1412        _b: &'a GpuTensorHandle,
1413    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1414        unsupported_future("elem_hypot not supported by provider")
1415    }
1416    fn elem_ge<'a>(
1417        &'a self,
1418        _a: &'a GpuTensorHandle,
1419        _b: &'a GpuTensorHandle,
1420    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1421        unsupported_future("elem_ge not supported by provider")
1422    }
1423    fn elem_le<'a>(
1424        &'a self,
1425        _a: &'a GpuTensorHandle,
1426        _b: &'a GpuTensorHandle,
1427    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1428        unsupported_future("elem_le not supported by provider")
1429    }
1430    fn elem_lt<'a>(
1431        &'a self,
1432        _a: &'a GpuTensorHandle,
1433        _b: &'a GpuTensorHandle,
1434    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1435        unsupported_future("elem_lt not supported by provider")
1436    }
1437    fn elem_gt<'a>(
1438        &'a self,
1439        _a: &'a GpuTensorHandle,
1440        _b: &'a GpuTensorHandle,
1441    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1442        unsupported_future("elem_gt not supported by provider")
1443    }
1444    fn elem_eq<'a>(
1445        &'a self,
1446        _a: &'a GpuTensorHandle,
1447        _b: &'a GpuTensorHandle,
1448    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1449        unsupported_future("elem_eq not supported by provider")
1450    }
1451    fn elem_ne<'a>(
1452        &'a self,
1453        _a: &'a GpuTensorHandle,
1454        _b: &'a GpuTensorHandle,
1455    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1456        unsupported_future("elem_ne not supported by provider")
1457    }
1458    fn logical_and(
1459        &self,
1460        _a: &GpuTensorHandle,
1461        _b: &GpuTensorHandle,
1462    ) -> anyhow::Result<GpuTensorHandle> {
1463        Err(anyhow::anyhow!("logical_and not supported by provider"))
1464    }
1465    fn logical_or(
1466        &self,
1467        _a: &GpuTensorHandle,
1468        _b: &GpuTensorHandle,
1469    ) -> anyhow::Result<GpuTensorHandle> {
1470        Err(anyhow::anyhow!("logical_or not supported by provider"))
1471    }
1472    fn logical_xor(
1473        &self,
1474        _a: &GpuTensorHandle,
1475        _b: &GpuTensorHandle,
1476    ) -> anyhow::Result<GpuTensorHandle> {
1477        Err(anyhow::anyhow!("logical_xor not supported by provider"))
1478    }
1479    fn logical_not(&self, _a: &GpuTensorHandle) -> anyhow::Result<GpuTensorHandle> {
1480        Err(anyhow::anyhow!("logical_not not supported by provider"))
1481    }
1482    fn logical_islogical(&self, a: &GpuTensorHandle) -> anyhow::Result<bool> {
1483        Ok(handle_is_logical(a))
1484    }
1485    fn logical_isreal(&self, _a: &GpuTensorHandle) -> anyhow::Result<bool> {
1486        Err(anyhow::anyhow!("logical_isreal not supported by provider"))
1487    }
1488    fn logical_isfinite(&self, _a: &GpuTensorHandle) -> anyhow::Result<GpuTensorHandle> {
1489        Err(anyhow::anyhow!(
1490            "logical_isfinite not supported by provider"
1491        ))
1492    }
1493    fn logical_isnan(&self, _a: &GpuTensorHandle) -> anyhow::Result<GpuTensorHandle> {
1494        Err(anyhow::anyhow!("logical_isnan not supported by provider"))
1495    }
1496    fn logical_isinf(&self, _a: &GpuTensorHandle) -> anyhow::Result<GpuTensorHandle> {
1497        Err(anyhow::anyhow!("logical_isinf not supported by provider"))
1498    }
1499    fn elem_atan2<'a>(
1500        &'a self,
1501        _y: &'a GpuTensorHandle,
1502        _x: &'a GpuTensorHandle,
1503    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1504        unsupported_future("elem_atan2 not supported by provider")
1505    }
1506    // Unary elementwise operations (optional)
1507    fn unary_sin<'a>(
1508        &'a self,
1509        _a: &'a GpuTensorHandle,
1510    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1511        unsupported_future("unary_sin not supported by provider")
1512    }
1513    fn unary_gamma<'a>(
1514        &'a self,
1515        _a: &'a GpuTensorHandle,
1516    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1517        unsupported_future("unary_gamma not supported by provider")
1518    }
1519    fn unary_factorial<'a>(
1520        &'a self,
1521        _a: &'a GpuTensorHandle,
1522    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1523        unsupported_future("unary_factorial not supported by provider")
1524    }
1525    fn unary_asinh<'a>(
1526        &'a self,
1527        _a: &'a GpuTensorHandle,
1528    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1529        unsupported_future("unary_asinh not supported by provider")
1530    }
1531    fn unary_sinh<'a>(
1532        &'a self,
1533        _a: &'a GpuTensorHandle,
1534    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1535        unsupported_future("unary_sinh not supported by provider")
1536    }
1537    fn unary_cosh<'a>(
1538        &'a self,
1539        _a: &'a GpuTensorHandle,
1540    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1541        unsupported_future("unary_cosh not supported by provider")
1542    }
1543    fn unary_asin<'a>(
1544        &'a self,
1545        _a: &'a GpuTensorHandle,
1546    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1547        unsupported_future("unary_asin not supported by provider")
1548    }
1549    fn unary_acos<'a>(
1550        &'a self,
1551        _a: &'a GpuTensorHandle,
1552    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1553        unsupported_future("unary_acos not supported by provider")
1554    }
1555    fn unary_acosh<'a>(
1556        &'a self,
1557        _a: &'a GpuTensorHandle,
1558    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1559        unsupported_future("unary_acosh not supported by provider")
1560    }
1561    fn unary_tan<'a>(
1562        &'a self,
1563        _a: &'a GpuTensorHandle,
1564    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1565        unsupported_future("unary_tan not supported by provider")
1566    }
1567    fn unary_tanh<'a>(
1568        &'a self,
1569        _a: &'a GpuTensorHandle,
1570    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1571        unsupported_future("unary_tanh not supported by provider")
1572    }
1573    fn unary_atan<'a>(
1574        &'a self,
1575        _a: &'a GpuTensorHandle,
1576    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1577        unsupported_future("unary_atan not supported by provider")
1578    }
1579    fn unary_atanh<'a>(
1580        &'a self,
1581        _a: &'a GpuTensorHandle,
1582    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1583        unsupported_future("unary_atanh not supported by provider")
1584    }
1585    fn unary_ceil<'a>(
1586        &'a self,
1587        _a: &'a GpuTensorHandle,
1588    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1589        unsupported_future("unary_ceil not supported by provider")
1590    }
1591    fn unary_floor<'a>(
1592        &'a self,
1593        _a: &'a GpuTensorHandle,
1594    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1595        unsupported_future("unary_floor not supported by provider")
1596    }
1597    fn unary_round<'a>(
1598        &'a self,
1599        _a: &'a GpuTensorHandle,
1600    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1601        unsupported_future("unary_round not supported by provider")
1602    }
1603    fn unary_fix<'a>(
1604        &'a self,
1605        _a: &'a GpuTensorHandle,
1606    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1607        unsupported_future("unary_fix not supported by provider")
1608    }
1609    fn unary_cos<'a>(
1610        &'a self,
1611        _a: &'a GpuTensorHandle,
1612    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1613        unsupported_future("unary_cos not supported by provider")
1614    }
1615    fn unary_angle<'a>(
1616        &'a self,
1617        _a: &'a GpuTensorHandle,
1618    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1619        unsupported_future("unary_angle not supported by provider")
1620    }
1621    fn unary_imag<'a>(
1622        &'a self,
1623        _a: &'a GpuTensorHandle,
1624    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1625        unsupported_future("unary_imag not supported by provider")
1626    }
1627    fn unary_real<'a>(
1628        &'a self,
1629        _a: &'a GpuTensorHandle,
1630    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1631        unsupported_future("unary_real not supported by provider")
1632    }
1633    fn unary_conj<'a>(
1634        &'a self,
1635        _a: &'a GpuTensorHandle,
1636    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1637        unsupported_future("unary_conj not supported by provider")
1638    }
1639    fn unary_abs<'a>(
1640        &'a self,
1641        _a: &'a GpuTensorHandle,
1642    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1643        unsupported_future("unary_abs not supported by provider")
1644    }
1645    fn unary_sign<'a>(
1646        &'a self,
1647        _a: &'a GpuTensorHandle,
1648    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1649        unsupported_future("unary_sign not supported by provider")
1650    }
1651    fn unary_exp<'a>(
1652        &'a self,
1653        _a: &'a GpuTensorHandle,
1654    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1655        unsupported_future("unary_exp not supported by provider")
1656    }
1657    fn unary_expm1<'a>(
1658        &'a self,
1659        _a: &'a GpuTensorHandle,
1660    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1661        unsupported_future("unary_expm1 not supported by provider")
1662    }
1663    fn unary_log<'a>(
1664        &'a self,
1665        _a: &'a GpuTensorHandle,
1666    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1667        unsupported_future("unary_log not supported by provider")
1668    }
1669    fn unary_log2<'a>(
1670        &'a self,
1671        _a: &'a GpuTensorHandle,
1672    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1673        unsupported_future("unary_log2 not supported by provider")
1674    }
1675    fn unary_log10<'a>(
1676        &'a self,
1677        _a: &'a GpuTensorHandle,
1678    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1679        unsupported_future("unary_log10 not supported by provider")
1680    }
1681    fn unary_log1p<'a>(
1682        &'a self,
1683        _a: &'a GpuTensorHandle,
1684    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1685        unsupported_future("unary_log1p not supported by provider")
1686    }
1687    fn unary_sqrt<'a>(
1688        &'a self,
1689        _a: &'a GpuTensorHandle,
1690    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1691        unsupported_future("unary_sqrt not supported by provider")
1692    }
1693    fn unary_double<'a>(
1694        &'a self,
1695        _a: &'a GpuTensorHandle,
1696    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1697        unsupported_future("unary_double not supported by provider")
1698    }
1699    fn unary_single<'a>(
1700        &'a self,
1701        _a: &'a GpuTensorHandle,
1702    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1703        unsupported_future("unary_single not supported by provider")
1704    }
1705    fn unary_pow2<'a>(
1706        &'a self,
1707        _a: &'a GpuTensorHandle,
1708    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1709        unsupported_future("unary_pow2 not supported by provider")
1710    }
1711    fn unary_nextpow2<'a>(
1712        &'a self,
1713        _a: &'a GpuTensorHandle,
1714    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1715        unsupported_future("unary_nextpow2 not supported by provider")
1716    }
1717    fn pow2_scale(
1718        &self,
1719        _mantissa: &GpuTensorHandle,
1720        _exponent: &GpuTensorHandle,
1721    ) -> anyhow::Result<GpuTensorHandle> {
1722        Err(anyhow::anyhow!("pow2_scale not supported by provider"))
1723    }
1724    // Left-scalar operations (broadcast with scalar on the left)
1725    fn scalar_rsub(&self, _a: &GpuTensorHandle, _scalar: f64) -> anyhow::Result<GpuTensorHandle> {
1726        Err(anyhow::anyhow!("scalar_rsub not supported by provider"))
1727    }
1728    fn scalar_rdiv(&self, _a: &GpuTensorHandle, _scalar: f64) -> anyhow::Result<GpuTensorHandle> {
1729        Err(anyhow::anyhow!("scalar_rdiv not supported by provider"))
1730    }
1731    // Scalar operations: apply op with scalar right-hand side (broadcast over a)
1732    fn scalar_add(&self, _a: &GpuTensorHandle, _scalar: f64) -> anyhow::Result<GpuTensorHandle> {
1733        Err(anyhow::anyhow!("scalar_add not supported by provider"))
1734    }
1735    fn scalar_sub(&self, _a: &GpuTensorHandle, _scalar: f64) -> anyhow::Result<GpuTensorHandle> {
1736        Err(anyhow::anyhow!("scalar_sub not supported by provider"))
1737    }
1738    fn scalar_mul(&self, _a: &GpuTensorHandle, _scalar: f64) -> anyhow::Result<GpuTensorHandle> {
1739        Err(anyhow::anyhow!("scalar_mul not supported by provider"))
1740    }
1741    fn scalar_max(&self, _a: &GpuTensorHandle, _scalar: f64) -> anyhow::Result<GpuTensorHandle> {
1742        Err(anyhow::anyhow!("scalar_max not supported by provider"))
1743    }
1744    fn scalar_min(&self, _a: &GpuTensorHandle, _scalar: f64) -> anyhow::Result<GpuTensorHandle> {
1745        Err(anyhow::anyhow!("scalar_min not supported by provider"))
1746    }
1747    fn scalar_div(&self, _a: &GpuTensorHandle, _scalar: f64) -> anyhow::Result<GpuTensorHandle> {
1748        Err(anyhow::anyhow!("scalar_div not supported by provider"))
1749    }
1750    fn sort_dim<'a>(
1751        &'a self,
1752        _a: &'a GpuTensorHandle,
1753        _dim: usize,
1754        _order: SortOrder,
1755        _comparison: SortComparison,
1756    ) -> AccelProviderFuture<'a, SortResult> {
1757        unsupported_future("sort_dim not supported by provider")
1758    }
1759    fn sort_rows<'a>(
1760        &'a self,
1761        _a: &'a GpuTensorHandle,
1762        _columns: &'a [SortRowsColumnSpec],
1763        _comparison: SortComparison,
1764    ) -> AccelProviderFuture<'a, SortResult> {
1765        unsupported_future("sort_rows not supported by provider")
1766    }
1767    fn matmul<'a>(
1768        &'a self,
1769        _a: &'a GpuTensorHandle,
1770        _b: &'a GpuTensorHandle,
1771    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1772        unsupported_future("matmul not supported by provider")
1773    }
1774
1775    fn syrk(&self, _a: &GpuTensorHandle) -> anyhow::Result<GpuTensorHandle> {
1776        Err(anyhow::anyhow!("syrk not supported by provider"))
1777    }
1778    fn pagefun(&self, _request: &PagefunRequest) -> anyhow::Result<GpuTensorHandle> {
1779        Err(anyhow::anyhow!("pagefun not supported by provider"))
1780    }
1781
1782    /// Optional: matrix multiplication with an epilogue applied before store.
1783    ///
1784    /// The default implementation falls back to `matmul` when the epilogue is effectively a no-op
1785    /// (alpha=1, beta=0, no row/col scales), and otherwise returns `Err`.
1786    fn matmul_epilogue<'a>(
1787        &'a self,
1788        a: &'a GpuTensorHandle,
1789        b: &'a GpuTensorHandle,
1790        epilogue: &'a MatmulEpilogue,
1791    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1792        Box::pin(async move {
1793            if epilogue.is_noop() {
1794                return self.matmul(a, b).await;
1795            }
1796            Err(anyhow::anyhow!("matmul_epilogue not supported by provider"))
1797        })
1798    }
1799    fn image_normalize<'a>(
1800        &'a self,
1801        _input: &'a GpuTensorHandle,
1802        _desc: &'a ImageNormalizeDescriptor,
1803    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1804        unsupported_future("image_normalize fusion not supported by provider")
1805    }
1806    fn matmul_power_step<'a>(
1807        &'a self,
1808        _lhs: &'a GpuTensorHandle,
1809        _rhs: &'a GpuTensorHandle,
1810        _epilogue: &'a PowerStepEpilogue,
1811    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1812        unsupported_future("matmul_power_step normalization not supported by provider")
1813    }
1814    fn linsolve<'a>(
1815        &'a self,
1816        _lhs: &'a GpuTensorHandle,
1817        _rhs: &'a GpuTensorHandle,
1818        _options: &'a ProviderLinsolveOptions,
1819    ) -> AccelProviderFuture<'a, ProviderLinsolveResult> {
1820        unsupported_future("linsolve not supported by provider")
1821    }
1822    fn inv<'a>(
1823        &'a self,
1824        _matrix: &'a GpuTensorHandle,
1825        _options: ProviderInvOptions,
1826    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1827        unsupported_future("inv not supported by provider")
1828    }
1829    fn pinv<'a>(
1830        &'a self,
1831        _matrix: &'a GpuTensorHandle,
1832        _options: ProviderPinvOptions,
1833    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1834        unsupported_future("pinv not supported by provider")
1835    }
1836    fn cond<'a>(
1837        &'a self,
1838        _matrix: &'a GpuTensorHandle,
1839        _norm: ProviderCondNorm,
1840    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1841        Box::pin(async move { Err(anyhow::anyhow!("cond not supported by provider")) })
1842    }
1843    fn norm<'a>(
1844        &'a self,
1845        _tensor: &'a GpuTensorHandle,
1846        _order: ProviderNormOrder,
1847    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1848        Box::pin(async move { Err(anyhow::anyhow!("norm not supported by provider")) })
1849    }
1850    fn rank<'a>(
1851        &'a self,
1852        _matrix: &'a GpuTensorHandle,
1853        _tolerance: Option<f64>,
1854    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1855        Box::pin(async move { Err(anyhow::anyhow!("rank not supported by provider")) })
1856    }
1857    fn rcond<'a>(
1858        &'a self,
1859        _matrix: &'a GpuTensorHandle,
1860    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1861        Box::pin(async move { Err(anyhow::anyhow!("rcond not supported by provider")) })
1862    }
1863    fn mldivide<'a>(
1864        &'a self,
1865        _lhs: &'a GpuTensorHandle,
1866        _rhs: &'a GpuTensorHandle,
1867    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1868        Box::pin(async move { Err(anyhow::anyhow!("mldivide not supported by provider")) })
1869    }
1870    fn mrdivide<'a>(
1871        &'a self,
1872        _lhs: &'a GpuTensorHandle,
1873        _rhs: &'a GpuTensorHandle,
1874    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1875        Box::pin(async move { Err(anyhow::anyhow!("mrdivide not supported by provider")) })
1876    }
1877    fn eig<'a>(
1878        &'a self,
1879        _a: &'a GpuTensorHandle,
1880        _compute_left: bool,
1881    ) -> AccelProviderFuture<'a, ProviderEigResult> {
1882        Box::pin(async move { Err(anyhow::anyhow!("eig not supported by provider")) })
1883    }
1884    fn lu<'a>(&'a self, _a: &'a GpuTensorHandle) -> AccelProviderFuture<'a, ProviderLuResult> {
1885        Box::pin(async move { Err(anyhow::anyhow!("lu not supported by provider")) })
1886    }
1887
1888    fn chol<'a>(
1889        &'a self,
1890        _a: &'a GpuTensorHandle,
1891        _lower: bool,
1892    ) -> AccelProviderFuture<'a, ProviderCholResult> {
1893        Box::pin(async move { Err(anyhow::anyhow!("chol not supported by provider")) })
1894    }
1895    fn qr<'a>(
1896        &'a self,
1897        _a: &'a GpuTensorHandle,
1898        _options: ProviderQrOptions,
1899    ) -> AccelProviderFuture<'a, ProviderQrResult> {
1900        Box::pin(async move { Err(anyhow::anyhow!("qr not supported by provider")) })
1901    }
1902    fn take_matmul_sources(
1903        &self,
1904        _product: &GpuTensorHandle,
1905    ) -> Option<(GpuTensorHandle, GpuTensorHandle)> {
1906        None
1907    }
1908    fn qr_power_iter<'a>(
1909        &'a self,
1910        product: &'a GpuTensorHandle,
1911        _product_lhs: Option<&'a GpuTensorHandle>,
1912        q_handle: &'a GpuTensorHandle,
1913        options: &'a ProviderQrOptions,
1914    ) -> AccelProviderFuture<'a, Option<ProviderQrPowerIterResult>> {
1915        let _ = (product, q_handle, options);
1916        Box::pin(async move { Ok(None) })
1917    }
1918    fn transpose(&self, _a: &GpuTensorHandle) -> anyhow::Result<GpuTensorHandle> {
1919        Err(anyhow::anyhow!("transpose not supported by provider"))
1920    }
1921    fn conv1d(
1922        &self,
1923        _signal: &GpuTensorHandle,
1924        _kernel: &GpuTensorHandle,
1925        _options: ProviderConv1dOptions,
1926    ) -> anyhow::Result<GpuTensorHandle> {
1927        Err(anyhow::anyhow!("conv1d not supported by provider"))
1928    }
1929    fn conv2d(
1930        &self,
1931        _signal: &GpuTensorHandle,
1932        _kernel: &GpuTensorHandle,
1933        _mode: ProviderConvMode,
1934    ) -> anyhow::Result<GpuTensorHandle> {
1935        Err(anyhow::anyhow!("conv2d not supported by provider"))
1936    }
1937    fn iir_filter<'a>(
1938        &'a self,
1939        _b: &'a GpuTensorHandle,
1940        _a: &'a GpuTensorHandle,
1941        _x: &'a GpuTensorHandle,
1942        _options: ProviderIirFilterOptions,
1943    ) -> AccelProviderFuture<'a, ProviderIirFilterResult> {
1944        Box::pin(async move { Err(anyhow::anyhow!("iir_filter not supported by provider")) })
1945    }
1946    /// Reorder tensor dimensions according to `order`, expressed as zero-based indices.
1947    fn permute(
1948        &self,
1949        _handle: &GpuTensorHandle,
1950        _order: &[usize],
1951    ) -> anyhow::Result<GpuTensorHandle> {
1952        Err(anyhow::anyhow!("permute not supported by provider"))
1953    }
1954    fn flip(&self, _handle: &GpuTensorHandle, _axes: &[usize]) -> anyhow::Result<GpuTensorHandle> {
1955        Err(anyhow::anyhow!("flip not supported by provider"))
1956    }
1957    fn circshift(
1958        &self,
1959        _handle: &GpuTensorHandle,
1960        _shifts: &[isize],
1961    ) -> anyhow::Result<GpuTensorHandle> {
1962        Err(anyhow::anyhow!("circshift not supported by provider"))
1963    }
1964    fn diff_dim(
1965        &self,
1966        _handle: &GpuTensorHandle,
1967        _order: usize,
1968        _dim: usize,
1969    ) -> anyhow::Result<GpuTensorHandle> {
1970        Err(anyhow::anyhow!("diff_dim not supported by provider"))
1971    }
1972    fn gradient_dim(
1973        &self,
1974        _handle: &GpuTensorHandle,
1975        _dim: usize,
1976        _spacing: f64,
1977    ) -> anyhow::Result<GpuTensorHandle> {
1978        Err(anyhow::anyhow!("gradient_dim not supported by provider"))
1979    }
1980    /// Perform an in-place FFT along a zero-based dimension, optionally padding/truncating to `len`.
1981    fn fft_dim<'a>(
1982        &'a self,
1983        _handle: &'a GpuTensorHandle,
1984        _len: Option<usize>,
1985        _dim: usize,
1986    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1987        unsupported_future("fft_dim not supported by provider")
1988    }
1989    fn ifft_dim<'a>(
1990        &'a self,
1991        _handle: &'a GpuTensorHandle,
1992        _len: Option<usize>,
1993        _dim: usize,
1994    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
1995        unsupported_future("ifft_dim not supported by provider")
1996    }
1997    fn fft_extract_real<'a>(
1998        &'a self,
1999        _handle: &'a GpuTensorHandle,
2000    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2001        unsupported_future("fft_extract_real not supported by provider")
2002    }
2003    fn unique<'a>(
2004        &'a self,
2005        _handle: &'a GpuTensorHandle,
2006        _options: &'a UniqueOptions,
2007    ) -> AccelProviderFuture<'a, UniqueResult> {
2008        Box::pin(async move { Err(anyhow::anyhow!("unique not supported by provider")) })
2009    }
2010    fn union<'a>(
2011        &'a self,
2012        _a: &'a GpuTensorHandle,
2013        _b: &'a GpuTensorHandle,
2014        _options: &'a UnionOptions,
2015    ) -> AccelProviderFuture<'a, UnionResult> {
2016        Box::pin(async move { Err(anyhow::anyhow!("union not supported by provider")) })
2017    }
2018    fn setdiff<'a>(
2019        &'a self,
2020        _a: &'a GpuTensorHandle,
2021        _b: &'a GpuTensorHandle,
2022        _options: &'a SetdiffOptions,
2023    ) -> AccelProviderFuture<'a, SetdiffResult> {
2024        Box::pin(async move { Err(anyhow::anyhow!("setdiff not supported by provider")) })
2025    }
2026    fn ismember<'a>(
2027        &'a self,
2028        _a: &'a GpuTensorHandle,
2029        _b: &'a GpuTensorHandle,
2030        _options: &'a IsMemberOptions,
2031    ) -> AccelProviderFuture<'a, IsMemberResult> {
2032        Box::pin(async move { Err(anyhow::anyhow!("ismember not supported by provider")) })
2033    }
2034    fn reshape(
2035        &self,
2036        handle: &GpuTensorHandle,
2037        new_shape: &[usize],
2038    ) -> anyhow::Result<GpuTensorHandle> {
2039        let mut updated = handle.clone();
2040        updated.shape = new_shape.to_vec();
2041        Ok(updated)
2042    }
2043    /// Concatenate the provided tensors along the 1-based dimension `dim`.
2044    fn cat(&self, _dim: usize, _inputs: &[GpuTensorHandle]) -> anyhow::Result<GpuTensorHandle> {
2045        Err(anyhow::anyhow!("cat not supported by provider"))
2046    }
2047    fn repmat(
2048        &self,
2049        _handle: &GpuTensorHandle,
2050        _reps: &[usize],
2051    ) -> anyhow::Result<GpuTensorHandle> {
2052        Err(anyhow::anyhow!("repmat not supported by provider"))
2053    }
2054    /// Compute the Kronecker product of two tensors, matching MATLAB semantics.
2055    fn kron(&self, _a: &GpuTensorHandle, _b: &GpuTensorHandle) -> anyhow::Result<GpuTensorHandle> {
2056        Err(anyhow::anyhow!("kron not supported by provider"))
2057    }
2058    /// Compute the cross product of 3-element vectors along a matching dimension.
2059    fn cross(
2060        &self,
2061        _lhs: &GpuTensorHandle,
2062        _rhs: &GpuTensorHandle,
2063        _dim: Option<usize>,
2064    ) -> anyhow::Result<GpuTensorHandle> {
2065        Err(anyhow::anyhow!("cross not supported by provider"))
2066    }
2067    fn reduce_sum<'a>(
2068        &'a self,
2069        _a: &'a GpuTensorHandle,
2070    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2071        unsupported_future("reduce_sum not supported by provider")
2072    }
2073    fn reduce_sum_dim<'a>(
2074        &'a self,
2075        _a: &'a GpuTensorHandle,
2076        _dim: usize,
2077    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2078        unsupported_future("reduce_sum_dim not supported by provider")
2079    }
2080    fn dot<'a>(
2081        &'a self,
2082        _lhs: &'a GpuTensorHandle,
2083        _rhs: &'a GpuTensorHandle,
2084        _dim: Option<usize>,
2085    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2086        unsupported_future("dot not supported by provider")
2087    }
2088    fn reduce_nnz<'a>(
2089        &'a self,
2090        _a: &'a GpuTensorHandle,
2091    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2092        unsupported_future("reduce_nnz not supported by provider")
2093    }
2094    fn reduce_nnz_dim<'a>(
2095        &'a self,
2096        _a: &'a GpuTensorHandle,
2097        _dim: usize,
2098    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2099        unsupported_future("reduce_nnz_dim not supported by provider")
2100    }
2101    fn reduce_prod<'a>(
2102        &'a self,
2103        _a: &'a GpuTensorHandle,
2104    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2105        unsupported_future("reduce_prod not supported by provider")
2106    }
2107    fn reduce_prod_dim<'a>(
2108        &'a self,
2109        _a: &'a GpuTensorHandle,
2110        _dim: usize,
2111    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2112        unsupported_future("reduce_prod_dim not supported by provider")
2113    }
2114    fn reduce_mean<'a>(
2115        &'a self,
2116        _a: &'a GpuTensorHandle,
2117    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2118        unsupported_future("reduce_mean not supported by provider")
2119    }
2120    /// Reduce mean across multiple zero-based dimensions in one device pass.
2121    fn reduce_mean_nd<'a>(
2122        &'a self,
2123        _a: &'a GpuTensorHandle,
2124        _dims_zero_based: &'a [usize],
2125    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2126        unsupported_future("reduce_mean_nd not supported by provider")
2127    }
2128    /// Reduce moments across multiple zero-based dimensions in one device pass.
2129    /// Returns mean (E[x]) and mean of squares (E[x^2]).
2130    fn reduce_moments_nd<'a>(
2131        &'a self,
2132        _a: &'a GpuTensorHandle,
2133        _dims_zero_based: &'a [usize],
2134    ) -> AccelProviderFuture<'a, ProviderMoments2> {
2135        unsupported_future("reduce_moments_nd not supported by provider")
2136    }
2137    fn reduce_mean_dim<'a>(
2138        &'a self,
2139        _a: &'a GpuTensorHandle,
2140        _dim: usize,
2141    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2142        unsupported_future("reduce_mean_dim not supported by provider")
2143    }
2144    fn reduce_std<'a>(
2145        &'a self,
2146        _a: &'a GpuTensorHandle,
2147        _normalization: ProviderStdNormalization,
2148        _nan_mode: ProviderNanMode,
2149    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2150        unsupported_future("reduce_std not supported by provider")
2151    }
2152    fn reduce_std_dim<'a>(
2153        &'a self,
2154        _a: &'a GpuTensorHandle,
2155        _dim: usize,
2156        _normalization: ProviderStdNormalization,
2157        _nan_mode: ProviderNanMode,
2158    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2159        unsupported_future("reduce_std_dim not supported by provider")
2160    }
2161    fn reduce_any<'a>(
2162        &'a self,
2163        _a: &'a GpuTensorHandle,
2164        _omit_nan: bool,
2165    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2166        unsupported_future("reduce_any not supported by provider")
2167    }
2168    fn reduce_any_dim<'a>(
2169        &'a self,
2170        _a: &'a GpuTensorHandle,
2171        _dim: usize,
2172        _omit_nan: bool,
2173    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2174        unsupported_future("reduce_any_dim not supported by provider")
2175    }
2176    fn reduce_all<'a>(
2177        &'a self,
2178        _a: &'a GpuTensorHandle,
2179        _omit_nan: bool,
2180    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2181        unsupported_future("reduce_all not supported by provider")
2182    }
2183    fn reduce_all_dim<'a>(
2184        &'a self,
2185        _a: &'a GpuTensorHandle,
2186        _dim: usize,
2187        _omit_nan: bool,
2188    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2189        unsupported_future("reduce_all_dim not supported by provider")
2190    }
2191    fn reduce_median<'a>(
2192        &'a self,
2193        _a: &'a GpuTensorHandle,
2194    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2195        unsupported_future("reduce_median not supported by provider")
2196    }
2197    fn reduce_median_dim<'a>(
2198        &'a self,
2199        _a: &'a GpuTensorHandle,
2200        _dim: usize,
2201    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2202        unsupported_future("reduce_median_dim not supported by provider")
2203    }
2204    fn reduce_min<'a>(
2205        &'a self,
2206        _a: &'a GpuTensorHandle,
2207    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2208        unsupported_future("reduce_min not supported by provider")
2209    }
2210    fn reduce_min_dim<'a>(
2211        &'a self,
2212        _a: &'a GpuTensorHandle,
2213        _dim: usize,
2214    ) -> AccelProviderFuture<'a, ReduceDimResult> {
2215        unsupported_future("reduce_min_dim not supported by provider")
2216    }
2217    fn reduce_max<'a>(
2218        &'a self,
2219        _a: &'a GpuTensorHandle,
2220    ) -> AccelProviderFuture<'a, GpuTensorHandle> {
2221        unsupported_future("reduce_max not supported by provider")
2222    }
2223    fn reduce_max_dim<'a>(
2224        &'a self,
2225        _a: &'a GpuTensorHandle,
2226        _dim: usize,
2227    ) -> AccelProviderFuture<'a, ReduceDimResult> {
2228        unsupported_future("reduce_max_dim not supported by provider")
2229    }
2230    fn cumsum_scan(
2231        &self,
2232        _input: &GpuTensorHandle,
2233        _dim: usize,
2234        _direction: ProviderScanDirection,
2235        _nan_mode: ProviderNanMode,
2236    ) -> anyhow::Result<GpuTensorHandle> {
2237        Err(anyhow::anyhow!("cumsum_scan not supported by provider"))
2238    }
2239    fn cumprod_scan(
2240        &self,
2241        _input: &GpuTensorHandle,
2242        _dim: usize,
2243        _direction: ProviderScanDirection,
2244        _nan_mode: ProviderNanMode,
2245    ) -> anyhow::Result<GpuTensorHandle> {
2246        Err(anyhow::anyhow!("cumprod_scan not supported by provider"))
2247    }
2248    fn cummin_scan(
2249        &self,
2250        _input: &GpuTensorHandle,
2251        _dim: usize,
2252        _direction: ProviderScanDirection,
2253        _nan_mode: ProviderNanMode,
2254    ) -> anyhow::Result<ProviderCumminResult> {
2255        Err(anyhow::anyhow!("cummin_scan not supported by provider"))
2256    }
2257    fn cummax_scan(
2258        &self,
2259        _input: &GpuTensorHandle,
2260        _dim: usize,
2261        _direction: ProviderScanDirection,
2262        _nan_mode: ProviderNanMode,
2263    ) -> anyhow::Result<ProviderCummaxResult> {
2264        Err(anyhow::anyhow!("cummax_scan not supported by provider"))
2265    }
2266
2267    fn find(
2268        &self,
2269        _a: &GpuTensorHandle,
2270        _limit: Option<usize>,
2271        _direction: FindDirection,
2272    ) -> anyhow::Result<ProviderFindResult> {
2273        Err(anyhow::anyhow!("find not supported by provider"))
2274    }
2275
2276    fn fused_elementwise(
2277        &self,
2278        _shader: &str,
2279        _inputs: &[GpuTensorHandle],
2280        _output_shape: &[usize],
2281        _len: usize,
2282    ) -> anyhow::Result<GpuTensorHandle> {
2283        Err(anyhow::anyhow!(
2284            "fused_elementwise not supported by provider"
2285        ))
2286    }
2287
2288    /// Execute a single fused elementwise kernel that writes `num_outputs` output buffers in one
2289    /// dispatch. The shader is expected to declare `output0`, `output1`, … `output{N-1}` storage
2290    /// bindings (at binding indices `inputs.len()` through `inputs.len() + num_outputs - 1`) and a
2291    /// uniform `params` binding at `inputs.len() + num_outputs`.
2292    ///
2293    /// Providers that do not override this method fall back to calling `fused_elementwise` once
2294    /// per output, which preserves correctness at the cost of the O(N²) dispatch overhead this
2295    /// method is designed to eliminate.
2296    fn fused_elementwise_multi(
2297        &self,
2298        _shader: &str,
2299        _inputs: &[GpuTensorHandle],
2300        _output_shape: &[usize],
2301        _len: usize,
2302        _num_outputs: usize,
2303    ) -> anyhow::Result<Vec<GpuTensorHandle>> {
2304        Err(anyhow::anyhow!(
2305            "fused_elementwise_multi not supported by provider"
2306        ))
2307    }
2308
2309    /// Build a numeric tensor where NaNs in `a` are replaced with 0.0 (device side).
2310    fn map_nan_to_zero(&self, _a: &GpuTensorHandle) -> anyhow::Result<GpuTensorHandle> {
2311        Err(anyhow::anyhow!("map_nan_to_zero not supported by provider"))
2312    }
2313
2314    /// Build a numeric mask tensor with 1.0 where value is not NaN and 0.0 where value is NaN.
2315    fn not_nan_mask(&self, _a: &GpuTensorHandle) -> anyhow::Result<GpuTensorHandle> {
2316        Err(anyhow::anyhow!("not_nan_mask not supported by provider"))
2317    }
2318
2319    /// Generic fused reduction entrypoint.
2320    ///
2321    /// The shader is expected to implement a column-major reduction across `reduce_len` with
2322    /// `num_slices` independent slices (e.g., columns). Providers should create a uniform buffer
2323    /// compatible with the expected `Params/MParams` struct in the shader and dispatch
2324    /// `num_slices` workgroups with `workgroup_size` threads, or an equivalent strategy.
2325    #[allow(clippy::too_many_arguments)]
2326    fn fused_reduction(
2327        &self,
2328        _shader: &str,
2329        _inputs: &[GpuTensorHandle],
2330        _output_shape: &[usize],
2331        _reduce_len: usize,
2332        _num_slices: usize,
2333        _workgroup_size: u32,
2334        _flavor: ReductionFlavor,
2335    ) -> anyhow::Result<GpuTensorHandle> {
2336        Err(anyhow::anyhow!("fused_reduction not supported by provider"))
2337    }
2338
2339    /// Optionally pre-compile commonly used pipelines to amortize first-dispatch costs.
2340    fn warmup(&self) {}
2341
2342    /// Returns (cache_hits, cache_misses) for fused pipeline cache, if supported.
2343    fn fused_cache_counters(&self) -> (u64, u64) {
2344        (0, 0)
2345    }
2346
2347    /// Returns the duration of the last provider warmup in milliseconds, if known.
2348    fn last_warmup_millis(&self) -> Option<u64> {
2349        None
2350    }
2351
2352    /// Returns a snapshot of provider telemetry counters if supported.
2353    fn telemetry_snapshot(&self) -> ProviderTelemetry {
2354        let (hits, misses) = self.fused_cache_counters();
2355        ProviderTelemetry {
2356            fused_elementwise: ProviderDispatchStats::default(),
2357            fused_reduction: ProviderDispatchStats::default(),
2358            matmul: ProviderDispatchStats::default(),
2359            linsolve: ProviderDispatchStats::default(),
2360            mldivide: ProviderDispatchStats::default(),
2361            mrdivide: ProviderDispatchStats::default(),
2362            upload_bytes: 0,
2363            download_bytes: 0,
2364            solve_fallbacks: Vec::new(),
2365            fusion_cache_hits: hits,
2366            fusion_cache_misses: misses,
2367            bind_group_cache_hits: 0,
2368            bind_group_cache_misses: 0,
2369            bind_group_cache_by_layout: None,
2370            kernel_launches: Vec::new(),
2371        }
2372    }
2373
2374    /// Reset all telemetry counters maintained by the provider, if supported.
2375    fn reset_telemetry(&self) {}
2376
2377    /// Default reduction workgroup size the provider prefers.
2378    fn default_reduction_workgroup_size(&self) -> u32 {
2379        256
2380    }
2381
2382    /// Threshold above which provider will prefer two-pass reduction.
2383    fn two_pass_threshold(&self) -> usize {
2384        1024
2385    }
2386
2387    /// Current two-pass mode preference (auto/forced on/off).
2388    fn reduction_two_pass_mode(&self) -> ReductionTwoPassMode {
2389        ReductionTwoPassMode::Auto
2390    }
2391
2392    /// Fast-path: write a GPU column in a matrix from a GPU vector, returning a new handle.
2393    /// Expected: `values.shape == [rows, 1]` (or `[rows]`) and `col_index < cols`.
2394    fn scatter_column(
2395        &self,
2396        _matrix: &GpuTensorHandle,
2397        _col_index: usize,
2398        _values: &GpuTensorHandle,
2399    ) -> anyhow::Result<GpuTensorHandle> {
2400        Err(anyhow::anyhow!("scatter_column not supported by provider"))
2401    }
2402
2403    /// Fast-path: write a GPU row in a matrix from a GPU vector, returning a new handle.
2404    /// Expected: `values.shape == [1, cols]` (or `[cols]`) and `row_index < rows`.
2405    fn scatter_row(
2406        &self,
2407        _matrix: &GpuTensorHandle,
2408        _row_index: usize,
2409        _values: &GpuTensorHandle,
2410    ) -> anyhow::Result<GpuTensorHandle> {
2411        Err(anyhow::anyhow!("scatter_row not supported by provider"))
2412    }
2413
2414    fn sub2ind(
2415        &self,
2416        _dims: &[usize],
2417        _strides: &[usize],
2418        _inputs: &[&GpuTensorHandle],
2419        _scalar_mask: &[bool],
2420        _len: usize,
2421        _output_shape: &[usize],
2422    ) -> anyhow::Result<GpuTensorHandle> {
2423        Err(anyhow::anyhow!("sub2ind not supported by provider"))
2424    }
2425
2426    /// Returns true if the provider offers a device-side `ind2sub` implementation.
2427    fn supports_ind2sub(&self) -> bool {
2428        false
2429    }
2430
2431    /// Convert linear indices into per-dimension subscripts on the device.
2432    fn ind2sub(
2433        &self,
2434        _dims: &[usize],
2435        _strides: &[usize],
2436        _indices: &GpuTensorHandle,
2437        _total: usize,
2438        _len: usize,
2439        _output_shape: &[usize],
2440    ) -> anyhow::Result<Vec<GpuTensorHandle>> {
2441        Err(anyhow::anyhow!("ind2sub not supported by provider"))
2442    }
2443
2444    /// Determine if a matrix is symmetric (or skew-symmetric) without gathering it to the host.
2445    fn issymmetric(
2446        &self,
2447        _matrix: &GpuTensorHandle,
2448        _kind: ProviderSymmetryKind,
2449        _tolerance: f64,
2450    ) -> anyhow::Result<bool> {
2451        Err(anyhow::anyhow!(
2452            "issymmetric predicate not supported by provider"
2453        ))
2454    }
2455
2456    /// Determine if a matrix is Hermitian (or skew-Hermitian) without gathering it to the host.
2457    fn ishermitian<'a>(
2458        &'a self,
2459        _matrix: &'a GpuTensorHandle,
2460        _kind: ProviderHermitianKind,
2461        _tolerance: f64,
2462    ) -> AccelProviderFuture<'a, bool> {
2463        Box::pin(async move {
2464            Err(anyhow::anyhow!(
2465                "ishermitian predicate not supported by provider"
2466            ))
2467        })
2468    }
2469
2470    /// Inspect the bandwidth of a matrix without gathering it back to the host.
2471    fn bandwidth(&self, _matrix: &GpuTensorHandle) -> anyhow::Result<ProviderBandwidth> {
2472        Err(anyhow::anyhow!("bandwidth not supported by provider"))
2473    }
2474
2475    /// Compute the symmetric reverse Cuthill-McKee permutation for the matrix.
2476    ///
2477    /// Implementations may execute on the device or gather to the host. The permutation should be
2478    /// returned as zero-based indices.
2479    fn sym_rcm<'a>(&'a self, _matrix: &'a GpuTensorHandle) -> AccelProviderFuture<'a, Vec<usize>> {
2480        Box::pin(async move { Err(anyhow::anyhow!("sym_rcm not supported by provider")) })
2481    }
2482}
2483
2484static GLOBAL_PROVIDER: Lazy<RwLock<Option<&'static dyn AccelProvider>>> =
2485    Lazy::new(|| RwLock::new(None));
2486static PROVIDER_REGISTRY: Lazy<RwLock<HashMap<u32, &'static dyn AccelProvider>>> =
2487    Lazy::new(|| RwLock::new(HashMap::new()));
2488static DEVICE_ID_COUNTER: AtomicU32 = AtomicU32::new(1);
2489
2490#[cfg(not(target_arch = "wasm32"))]
2491thread_local! {
2492    static THREAD_PROVIDER: Cell<Option<&'static dyn AccelProvider>> = Cell::new(None);
2493}
2494
2495#[cfg(target_arch = "wasm32")]
2496static WASM_THREAD_PROVIDER: Lazy<Mutex<Option<&'static dyn AccelProvider>>> =
2497    Lazy::new(|| Mutex::new(None));
2498
2499#[cfg(not(target_arch = "wasm32"))]
2500fn replace_thread_provider(
2501    provider: Option<&'static dyn AccelProvider>,
2502) -> Option<&'static dyn AccelProvider> {
2503    THREAD_PROVIDER.with(|cell| {
2504        let prev = cell.get();
2505        cell.set(provider);
2506        prev
2507    })
2508}
2509
2510#[cfg(target_arch = "wasm32")]
2511fn replace_thread_provider(
2512    provider: Option<&'static dyn AccelProvider>,
2513) -> Option<&'static dyn AccelProvider> {
2514    let mut slot = WASM_THREAD_PROVIDER
2515        .lock()
2516        .expect("wasm provider mutex poisoned");
2517    let prev = *slot;
2518    *slot = provider;
2519    prev
2520}
2521
2522#[cfg(not(target_arch = "wasm32"))]
2523fn current_thread_provider() -> Option<&'static dyn AccelProvider> {
2524    THREAD_PROVIDER.with(|cell| cell.get())
2525}
2526
2527#[cfg(target_arch = "wasm32")]
2528fn current_thread_provider() -> Option<&'static dyn AccelProvider> {
2529    WASM_THREAD_PROVIDER
2530        .lock()
2531        .expect("wasm provider mutex poisoned")
2532        .as_ref()
2533        .copied()
2534}
2535
2536/// Register a global acceleration provider.
2537///
2538/// # Safety
2539/// - The caller must guarantee that `p` is valid for the entire program lifetime
2540///   (e.g., a `'static` singleton), as the runtime stores a raw reference globally.
2541/// - Concurrent callers must ensure registration happens once or is properly
2542///   synchronized; this function does not enforce thread-safety for re-registration.
2543pub unsafe fn register_provider(p: &'static dyn AccelProvider) {
2544    if let Ok(mut guard) = GLOBAL_PROVIDER.write() {
2545        *guard = Some(p);
2546    }
2547    register_provider_for_device(p.device_id(), p);
2548}
2549
2550unsafe fn register_provider_for_device(device_id: u32, provider: &'static dyn AccelProvider) {
2551    if let Ok(mut guard) = PROVIDER_REGISTRY.write() {
2552        guard.insert(device_id, provider);
2553    }
2554}
2555
2556pub fn provider() -> Option<&'static dyn AccelProvider> {
2557    if let Some(p) = current_thread_provider() {
2558        return Some(p);
2559    }
2560    GLOBAL_PROVIDER
2561        .read()
2562        .ok()
2563        .and_then(|guard| guard.as_ref().copied())
2564}
2565
2566/// Clear the globally registered provider. Intended for tests to ensure deterministic behaviour.
2567pub fn clear_provider() {
2568    replace_thread_provider(None);
2569    if let Ok(mut guard) = GLOBAL_PROVIDER.write() {
2570        *guard = None;
2571    }
2572    if let Ok(mut map) = PROVIDER_REGISTRY.write() {
2573        map.clear();
2574    }
2575}
2576
2577pub fn provider_for_device(device_id: u32) -> Option<&'static dyn AccelProvider> {
2578    PROVIDER_REGISTRY
2579        .read()
2580        .ok()
2581        .and_then(|guard| guard.get(&device_id).copied())
2582        .or_else(|| provider())
2583}
2584
2585pub fn provider_for_handle(handle: &GpuTensorHandle) -> Option<&'static dyn AccelProvider> {
2586    provider_for_device(handle.device_id)
2587}
2588
2589pub fn next_device_id() -> u32 {
2590    DEVICE_ID_COUNTER.fetch_add(1, Ordering::Relaxed)
2591}
2592
2593pub struct ThreadProviderGuard {
2594    prev: Option<&'static dyn AccelProvider>,
2595}
2596
2597impl ThreadProviderGuard {
2598    pub fn set(provider: Option<&'static dyn AccelProvider>) -> Self {
2599        let prev = replace_thread_provider(provider);
2600        ThreadProviderGuard { prev }
2601    }
2602}
2603
2604impl Drop for ThreadProviderGuard {
2605    fn drop(&mut self) {
2606        let prev = self.prev.take();
2607        replace_thread_provider(prev);
2608    }
2609}
2610
2611pub fn set_thread_provider(provider: Option<&'static dyn AccelProvider>) {
2612    replace_thread_provider(provider);
2613}
2614
2615/// Convenience: perform elementwise add via provider if possible; otherwise return None
2616pub async fn try_elem_add(a: &GpuTensorHandle, b: &GpuTensorHandle) -> Option<GpuTensorHandle> {
2617    if let Some(p) = provider() {
2618        if let Ok(h) = p.elem_add(a, b).await {
2619            return Some(h);
2620        }
2621    }
2622    None
2623}
2624
2625/// Convenience: perform elementwise hypot via provider if possible; otherwise return None
2626pub async fn try_elem_hypot(a: &GpuTensorHandle, b: &GpuTensorHandle) -> Option<GpuTensorHandle> {
2627    if let Some(p) = provider() {
2628        if let Ok(h) = p.elem_hypot(a, b).await {
2629            return Some(h);
2630        }
2631    }
2632    None
2633}
2634
2635/// Convenience: perform elementwise max via provider if possible; otherwise return None
2636pub async fn try_elem_max(a: &GpuTensorHandle, b: &GpuTensorHandle) -> Option<GpuTensorHandle> {
2637    if let Some(p) = provider() {
2638        if let Ok(h) = p.elem_max(a, b).await {
2639            return Some(h);
2640        }
2641    }
2642    None
2643}
2644
2645/// Convenience: perform elementwise min via provider if possible; otherwise return None
2646pub async fn try_elem_min(a: &GpuTensorHandle, b: &GpuTensorHandle) -> Option<GpuTensorHandle> {
2647    if let Some(p) = provider() {
2648        if let Ok(h) = p.elem_min(a, b).await {
2649            return Some(h);
2650        }
2651    }
2652    None
2653}
2654
2655/// Convenience: perform elementwise atan2 via provider if possible; otherwise return None
2656pub async fn try_elem_atan2(y: &GpuTensorHandle, x: &GpuTensorHandle) -> Option<GpuTensorHandle> {
2657    if let Some(p) = provider() {
2658        if let Ok(h) = p.elem_atan2(y, x).await {
2659            return Some(h);
2660        }
2661    }
2662    None
2663}
2664
2665// Minimal host tensor views to avoid depending on runmat-builtins and cycles
2666#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
2667pub struct HostTensorOwned {
2668    pub data: Vec<f64>,
2669    pub shape: Vec<usize>,
2670    pub storage: GpuTensorStorage,
2671}
2672
2673#[derive(Debug)]
2674pub struct HostTensorView<'a> {
2675    pub data: &'a [f64],
2676    pub shape: &'a [usize],
2677}
2678
2679/// Lightweight 1-D axis view used by provider meshgrid hooks.
2680#[derive(Debug)]
2681pub struct MeshgridAxisView<'a> {
2682    pub data: &'a [f64],
2683}
2684
2685/// Provider-side meshgrid result containing coordinate tensor handles.
2686#[derive(Debug, Clone)]
2687pub struct ProviderMeshgridResult {
2688    pub outputs: Vec<GpuTensorHandle>,
2689}
2690
2691/// Descriptor for GEMM epilogues applied to `C = A * B` before storing to `C`.
2692///
2693/// Supported operations:
2694/// - Scale by `alpha` and add scalar `beta`.
2695/// - Multiply output by per-row and/or per-column scale vectors (broadcasted).
2696#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
2697pub enum ScaleOp {
2698    Multiply,
2699    Divide,
2700}
2701
2702#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
2703pub struct MatmulEpilogue {
2704    /// Scalar multiply applied to each output element.
2705    pub alpha: f64,
2706    /// Scalar add applied to each output element after scaling.
2707    pub beta: f64,
2708    /// Optional per-row scale (length m). When present, output[row, col] *= row_scale[row].
2709    pub row_scale: Option<GpuTensorHandle>,
2710    /// Optional per-column scale (length n). When present, output[row, col] *= col_scale[col].
2711    pub col_scale: Option<GpuTensorHandle>,
2712    /// Row scale operation (multiply or divide). Ignored when `row_scale` is None.
2713    pub row_op: ScaleOp,
2714    /// Column scale operation (multiply or divide). Ignored when `col_scale` is None.
2715    pub col_op: ScaleOp,
2716    /// Optional lower clamp bound applied after scale/bias.
2717    #[serde(default)]
2718    pub clamp_min: Option<f64>,
2719    /// Optional upper clamp bound applied after scale/bias.
2720    #[serde(default)]
2721    pub clamp_max: Option<f64>,
2722    /// Optional power exponent applied after clamp (final operation in the epilogue).
2723    #[serde(default)]
2724    pub pow_exponent: Option<f64>,
2725    /// Optional output buffer for the diagonal of the result (length min(m, n)).
2726    #[serde(default)]
2727    pub diag_output: Option<GpuTensorHandle>,
2728}
2729
2730impl MatmulEpilogue {
2731    pub fn noop() -> Self {
2732        Self {
2733            alpha: 1.0,
2734            beta: 0.0,
2735            row_scale: None,
2736            col_scale: None,
2737            row_op: ScaleOp::Multiply,
2738            col_op: ScaleOp::Multiply,
2739            clamp_min: None,
2740            clamp_max: None,
2741            pow_exponent: None,
2742            diag_output: None,
2743        }
2744    }
2745    pub fn is_noop(&self) -> bool {
2746        self.alpha == 1.0
2747            && self.beta == 0.0
2748            && self.row_scale.is_none()
2749            && self.col_scale.is_none()
2750            && self.clamp_min.is_none()
2751            && self.clamp_max.is_none()
2752            && self.pow_exponent.is_none()
2753            && self.diag_output.is_none()
2754    }
2755}
2756
2757#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
2758pub struct PowerStepEpilogue {
2759    pub epsilon: f64,
2760}
2761
2762impl Default for PowerStepEpilogue {
2763    fn default() -> Self {
2764        Self { epsilon: 0.0 }
2765    }
2766}
2767
2768#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
2769pub struct ImageNormalizeDescriptor {
2770    pub batch: usize,
2771    pub height: usize,
2772    pub width: usize,
2773    pub epsilon: f64,
2774    #[serde(default)]
2775    pub gain: Option<f64>,
2776    #[serde(default)]
2777    pub bias: Option<f64>,
2778    #[serde(default)]
2779    pub gamma: Option<f64>,
2780}