oxirs_embed/acceleration/
gpu.rs

1//! GPU Acceleration for Embedding Computations
2//!
3//! This module provides GPU-accelerated implementations of embedding operations
4//! using scirs2-linalg GPU features for CUDA, OpenCL, ROCm, and Metal backends.
5
6use crate::models::common::*;
7use anyhow::Result;
8use scirs2_core::ndarray_ext::{Array1, Array2};
9#[cfg(feature = "gpu")]
10use std::collections::VecDeque;
11#[cfg(feature = "gpu")]
12use std::sync::atomic::{AtomicU64, Ordering};
13#[cfg(feature = "gpu")]
14use std::sync::{Arc, Mutex, RwLock};
15#[cfg(feature = "gpu")]
16use std::time::{Duration, Instant};
17
18#[cfg(feature = "gpu")]
19// TODO: scirs2_linalg::gpu module is not yet available
20// Enable this when the GPU module is implemented in scirs2_linalg
21// use scirs2_linalg::gpu::{GpuArray, GpuContext, GpuError};
22// Placeholder types until scirs2_linalg::gpu is available
23pub type GpuArray<T> = Vec<T>;
24#[cfg(feature = "gpu")]
25pub type GpuContext = ();
26#[cfg(feature = "gpu")]
27#[derive(Debug)]
28pub struct GpuError(String);
29
30#[cfg(feature = "gpu")]
31impl std::fmt::Display for GpuError {
32    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
33        write!(f, "{}", self.0)
34    }
35}
36
37#[cfg(feature = "gpu")]
38impl std::error::Error for GpuError {}
39
40/// Memory pool for GPU buffers
41#[cfg(feature = "gpu")]
42#[derive(Debug)]
43pub struct GpuMemoryPool {
44    available_buffers: VecDeque<GpuArray<f32>>,
45    buffer_size: usize,
46    total_allocated: AtomicU64,
47    peak_usage: AtomicU64,
48}
49
50/// Adaptive batch sizing configuration
51#[cfg(feature = "gpu")]
52#[derive(Debug, Clone)]
53pub struct AdaptiveBatchConfig {
54    pub min_batch_size: usize,
55    pub max_batch_size: usize,
56    pub target_gpu_utilization: f32,
57    pub memory_usage_threshold: f32,
58}
59
60/// Enhanced GPU-accelerated embedding computations with memory pooling and adaptive batching
61#[cfg(feature = "gpu")]
62pub struct GpuEmbeddingAccelerator {
63    context: GpuContext,
64    device_id: u32,
65    memory_pool: Arc<Mutex<GpuMemoryPool>>,
66    batch_config: AdaptiveBatchConfig,
67    performance_stats: Arc<RwLock<GpuPerformanceStats>>,
68    optimal_batch_size: Arc<AtomicU64>,
69}
70
71/// GPU performance statistics
72#[cfg(feature = "gpu")]
73#[derive(Debug, Default)]
74pub struct GpuPerformanceStats {
75    pub total_operations: u64,
76    pub total_compute_time: Duration,
77    pub memory_transfers: u64,
78    pub cache_hits: u64,
79    pub cache_misses: u64,
80    pub average_batch_size: f32,
81    pub gpu_utilization_percentage: f32,
82}
83
84/// Comprehensive GPU performance report
85#[cfg(feature = "gpu")]
86#[derive(Debug)]
87pub struct GpuPerformanceReport {
88    pub device_id: u32,
89    pub total_operations: u64,
90    pub average_compute_time: Duration,
91    pub gpu_utilization: f32,
92    pub memory_allocated_mb: f64,
93    pub memory_peak_mb: f64,
94    pub cache_hit_rate: f32,
95    pub optimal_batch_size: usize,
96}
97
98#[cfg(feature = "gpu")]
99impl GpuMemoryPool {
100    pub fn new(buffer_size: usize, initial_pool_size: usize) -> Self {
101        Self {
102            available_buffers: VecDeque::with_capacity(initial_pool_size),
103            buffer_size,
104            total_allocated: AtomicU64::new(0),
105            peak_usage: AtomicU64::new(0),
106        }
107    }
108
109    pub fn get_buffer(&mut self) -> Option<GpuArray<f32>> {
110        self.available_buffers.pop_front()
111    }
112
113    pub fn return_buffer(&mut self, buffer: GpuArray<f32>) {
114        if buffer.len() == self.buffer_size {
115            self.available_buffers.push_back(buffer);
116        }
117        // If buffer size doesn't match, let it drop (auto-deallocate)
118    }
119
120    pub fn get_memory_stats(&self) -> (u64, u64) {
121        (
122            self.total_allocated.load(Ordering::Relaxed),
123            self.peak_usage.load(Ordering::Relaxed),
124        )
125    }
126}
127
128#[cfg(feature = "gpu")]
129impl GpuEmbeddingAccelerator {
130    /// Create a new enhanced GPU accelerator with memory pooling and adaptive batching
131    /// Note: Currently using placeholder until scirs2_linalg::gpu is available
132    pub fn new(device_id: u32) -> Result<Self, GpuError> {
133        let context = (); // Placeholder GpuContext
134
135        let memory_pool = Arc::new(Mutex::new(GpuMemoryPool::new(1024 * 1024, 10))); // 1MB buffers, 10 initial
136
137        let batch_config = AdaptiveBatchConfig {
138            min_batch_size: 32,
139            max_batch_size: 8192,
140            target_gpu_utilization: 0.85,
141            memory_usage_threshold: 0.8,
142        };
143
144        Ok(Self {
145            context,
146            device_id,
147            memory_pool,
148            batch_config,
149            performance_stats: Arc::new(RwLock::new(GpuPerformanceStats::default())),
150            optimal_batch_size: Arc::new(AtomicU64::new(512)), // Start with reasonable default
151        })
152    }
153
154    /// Get optimal batch size based on recent performance
155    pub async fn get_optimal_batch_size(&self, data_size: usize) -> usize {
156        let optimal = self.optimal_batch_size.load(Ordering::Relaxed) as usize;
157        let config_min = self.batch_config.min_batch_size;
158        let config_max = self.batch_config.max_batch_size;
159
160        // Clamp to configuration bounds and data size
161        optimal.clamp(config_min, config_max.min(data_size))
162    }
163
164    /// Update optimal batch size based on performance feedback
165    pub async fn update_batch_size_feedback(&self, _batch_size: usize, performance_score: f32) {
166        let current_optimal = self.optimal_batch_size.load(Ordering::Relaxed) as usize;
167
168        // Simple adaptive algorithm: increase if performance is good, decrease if poor
169        let new_optimal = if performance_score > 0.8 {
170            // Good performance, try larger batches
171            (current_optimal as f32 * 1.1).round() as usize
172        } else if performance_score < 0.5 {
173            // Poor performance, try smaller batches
174            (current_optimal as f32 * 0.9).round() as usize
175        } else {
176            current_optimal
177        };
178
179        let clamped_optimal = new_optimal.clamp(
180            self.batch_config.min_batch_size,
181            self.batch_config.max_batch_size,
182        );
183
184        self.optimal_batch_size
185            .store(clamped_optimal as u64, Ordering::Relaxed);
186    }
187
188    /// GPU-accelerated batch distance computation
189    pub fn batch_l2_distances_gpu(
190        &self,
191        vectors_a: &[Array1<f64>],
192        vectors_b: &[Array1<f64>],
193    ) -> Result<Vec<f64>, GpuError> {
194        // TODO: Use actual GPU operations when scirs2_linalg::gpu is available
195        // For now, use CPU implementation as fallback
196        let mut distances = Vec::with_capacity(vectors_a.len());
197        for (a, b) in vectors_a.iter().zip(vectors_b.iter()) {
198            let dist: f64 = a
199                .iter()
200                .zip(b.iter())
201                .map(|(x, y)| (x - y).powi(2))
202                .sum::<f64>()
203                .sqrt();
204            distances.push(dist);
205        }
206        Ok(distances)
207    }
208
209    /// GPU-accelerated cosine similarity matrix
210    /// Note: Currently using CPU fallback until scirs2_linalg::gpu is available
211    pub fn cosine_similarity_matrix_gpu(
212        &self,
213        vectors: &[Array1<f64>],
214    ) -> Result<Array2<f64>, GpuError> {
215        // TODO: Use actual GPU operations when scirs2_linalg::gpu is available
216        // For now, use CPU implementation as fallback
217        use scirs2_core::ndarray_ext::Array2;
218
219        let n = vectors.len();
220        let mut similarity_matrix = Array2::zeros((n, n));
221
222        for i in 0..n {
223            for j in 0..n {
224                let dot: f64 = vectors[i]
225                    .iter()
226                    .zip(vectors[j].iter())
227                    .map(|(a, b)| a * b)
228                    .sum();
229                let norm_i: f64 = vectors[i].iter().map(|x| x * x).sum::<f64>().sqrt();
230                let norm_j: f64 = vectors[j].iter().map(|x| x * x).sum::<f64>().sqrt();
231                similarity_matrix[[i, j]] = dot / (norm_i * norm_j + 1e-8);
232            }
233        }
234        Ok(similarity_matrix)
235    }
236
237    /// GPU-accelerated gradient updates for large embedding matrices
238    /// Note: Currently using CPU fallback until scirs2_linalg::gpu is available
239    pub fn batch_gradient_update_gpu(
240        &self,
241        embeddings: &mut [Array2<f64>],
242        gradients: &[Array2<f64>],
243        learning_rate: f64,
244        l2_reg: f64,
245    ) -> Result<(), GpuError> {
246        // TODO: Use actual GPU operations when scirs2_linalg::gpu is available
247        // For now, use CPU implementation as fallback
248        for (embedding, gradient) in embeddings.iter_mut().zip(gradients.iter()) {
249            // Apply gradient update with L2 regularization
250            for (emb, grad) in embedding.iter_mut().zip(gradient.iter()) {
251                *emb -= learning_rate * (grad + l2_reg * *emb);
252            }
253        }
254        Ok(())
255    }
256
257    /// Advanced GPU-accelerated adaptive batch processing with memory pooling
258    pub async fn adaptive_batch_processing<T, R>(
259        &self,
260        data: &[T],
261        mut process_fn: impl FnMut(&[T]) -> Result<Vec<R>, GpuError>,
262    ) -> Result<Vec<R>, GpuError> {
263        let start_time = Instant::now();
264        let batch_size = self.get_optimal_batch_size(data.len()).await;
265
266        let mut results = Vec::with_capacity(data.len());
267        let mut total_processing_time = Duration::ZERO;
268
269        for chunk in data.chunks(batch_size) {
270            let chunk_start = Instant::now();
271            let chunk_results = process_fn(chunk)?;
272            let chunk_time = chunk_start.elapsed();
273
274            results.extend(chunk_results);
275            total_processing_time += chunk_time;
276        }
277
278        // Calculate performance score and update batch size
279        let total_time = start_time.elapsed();
280        let gpu_utilization = total_processing_time.as_secs_f32() / total_time.as_secs_f32();
281        let performance_score = gpu_utilization.min(1.0);
282
283        self.update_batch_size_feedback(batch_size, performance_score)
284            .await;
285
286        // Update performance statistics
287        let mut stats = self.performance_stats.write().unwrap();
288        stats.total_operations += 1;
289        stats.total_compute_time += total_time;
290        stats.gpu_utilization_percentage = gpu_utilization * 100.0;
291        stats.average_batch_size = (stats.average_batch_size + batch_size as f32) / 2.0;
292
293        Ok(results)
294    }
295
296    /// GPU-accelerated matrix multiplication with memory reuse
297    /// Note: Currently using CPU fallback until scirs2_linalg::gpu is available
298    pub async fn optimized_matrix_multiply(
299        &self,
300        a: &Array2<f32>,
301        b: &Array2<f32>,
302    ) -> Result<Array2<f32>, GpuError> {
303        // TODO: Use actual GPU operations when scirs2_linalg::gpu is available
304        // For now, use CPU implementation as fallback
305        let result = a.dot(b);
306
307        Ok(result)
308    }
309
310    /// High-performance embedding search with GPU acceleration
311    pub async fn gpu_embedding_search(
312        &self,
313        query_embedding: &Array1<f32>,
314        database_embeddings: &[Array1<f32>],
315        top_k: usize,
316    ) -> Result<Vec<(usize, f32)>, GpuError> {
317        // Use adaptive batching for large databases
318        let batch_size = self.get_optimal_batch_size(database_embeddings.len()).await;
319        let mut all_similarities = Vec::with_capacity(database_embeddings.len());
320
321        // Process in adaptive batches
322        for (batch_idx, batch) in database_embeddings.chunks(batch_size).enumerate() {
323            let similarities = self
324                .compute_batch_similarities(query_embedding, batch)
325                .await?;
326
327            for (local_idx, similarity) in similarities.iter().enumerate() {
328                let global_idx = batch_idx * batch_size + local_idx;
329                all_similarities.push((global_idx, *similarity));
330            }
331        }
332
333        // Sort and return top-k
334        all_similarities.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());
335        all_similarities.truncate(top_k);
336
337        Ok(all_similarities)
338    }
339
340    /// Compute similarities for a batch with GPU acceleration
341    /// Note: Currently using CPU fallback until scirs2_linalg::gpu is available
342    async fn compute_batch_similarities(
343        &self,
344        query: &Array1<f32>,
345        batch: &[Array1<f32>],
346    ) -> Result<Vec<f32>, GpuError> {
347        // TODO: Use actual GPU operations when scirs2_linalg::gpu is available
348        // For now, use CPU implementation as fallback
349        let mut similarities = Vec::with_capacity(batch.len());
350
351        for emb in batch {
352            // Compute cosine similarity: (a ยท b) / (||a|| * ||b||)
353            let dot_product: f32 = query.iter().zip(emb.iter()).map(|(a, b)| a * b).sum();
354            let norm_query: f32 = query.iter().map(|x| x * x).sum::<f32>().sqrt();
355            let norm_emb: f32 = emb.iter().map(|x| x * x).sum::<f32>().sqrt();
356            let similarity = dot_product / (norm_query * norm_emb + 1e-8);
357            similarities.push(similarity);
358        }
359
360        Ok(similarities)
361    }
362
363    /// GPU-accelerated Xavier initialization for large embedding matrices
364    /// Note: Currently using CPU fallback until scirs2_linalg::gpu is available
365    pub fn xavier_init_gpu(
366        &self,
367        shapes: &[(usize, usize)],
368        fan_in: usize,
369        fan_out: usize,
370        seed: u64,
371    ) -> Result<Vec<Array2<f64>>, GpuError> {
372        use scirs2_core::random::Random;
373
374        let limit = (6.0 / (fan_in + fan_out) as f64).sqrt();
375        let mut rng = Random::seed(seed);
376        let scale = 2.0 * limit;
377
378        let mut results = Vec::with_capacity(shapes.len());
379        for &(rows, cols) in shapes {
380            // Generate uniform random numbers in [-limit, limit]
381            let data: Vec<f64> = (0..rows * cols)
382                .map(|_| rng.random_f64() * scale - limit)
383                .collect();
384            let array = Array2::from_shape_vec((rows, cols), data)
385                .map_err(|e| GpuError(format!("Failed to create array: {}", e)))?;
386            results.push(array);
387        }
388        Ok(results)
389    }
390
391    /// GPU-accelerated contrastive learning updates
392    /// Note: Currently using CPU fallback until scirs2_linalg::gpu is available
393    pub fn contrastive_learning_gpu(
394        &self,
395        _entity_embeddings: &mut [Array1<f32>],
396        _similarity_pairs: &[(usize, usize)],
397        _negative_samples: &[(usize, usize)],
398        _temperature: f32,
399        _learning_rate: f32,
400    ) -> Result<f32, GpuError> {
401        // TODO: Use actual GPU operations when scirs2_linalg::gpu is available
402        // For now, return placeholder loss
403        Ok(0.0)
404    }
405
406    /// Helper function to upload vectors to GPU
407    /// Note: Currently using CPU fallback until scirs2_linalg::gpu is available
408    fn upload_vectors_to_gpu(&self, _vectors: &[Array1<f64>]) -> Result<GpuArray<f64>, GpuError> {
409        // TODO: Use actual GPU operations when scirs2_linalg::gpu is available
410        Ok(Vec::new())
411    }
412
413    /// Helper function to upload f32 vectors to GPU
414    /// Note: Currently using CPU fallback until scirs2_linalg::gpu is available
415    fn upload_f32_vectors_to_gpu(
416        &self,
417        _vectors: &[Array1<f32>],
418    ) -> Result<GpuArray<f32>, GpuError> {
419        // TODO: Use actual GPU operations when scirs2_linalg::gpu is available
420        Ok(Vec::new())
421    }
422
423    /// Get GPU device info
424    pub fn device_info(&self) -> String {
425        format!(
426            "GPU Device {} (placeholder - scirs2_linalg::gpu not yet available)",
427            self.device_id
428        )
429    }
430
431    /// Get available GPU memory
432    /// Note: Currently using placeholder until scirs2_linalg::gpu is available
433    pub fn available_memory(&self) -> Result<u64, GpuError> {
434        // TODO: Use actual GPU operations when scirs2_linalg::gpu is available
435        Ok(0)
436    }
437
438    /// GPU memory and performance monitoring
439    pub async fn get_performance_report(&self) -> GpuPerformanceReport {
440        let stats = self.performance_stats.read().unwrap();
441        let (allocated, peak) = {
442            let pool = self.memory_pool.lock().unwrap();
443            pool.get_memory_stats()
444        };
445
446        GpuPerformanceReport {
447            device_id: self.device_id,
448            total_operations: stats.total_operations,
449            average_compute_time: if stats.total_operations > 0 {
450                stats.total_compute_time / stats.total_operations as u32
451            } else {
452                Duration::ZERO
453            },
454            gpu_utilization: stats.gpu_utilization_percentage,
455            memory_allocated_mb: allocated as f64 / (1024.0 * 1024.0),
456            memory_peak_mb: peak as f64 / (1024.0 * 1024.0),
457            cache_hit_rate: if stats.cache_hits + stats.cache_misses > 0 {
458                stats.cache_hits as f32 / (stats.cache_hits + stats.cache_misses) as f32
459            } else {
460                0.0
461            },
462            optimal_batch_size: self.optimal_batch_size.load(Ordering::Relaxed) as usize,
463        }
464    }
465
466    /// Reset performance statistics
467    pub fn reset_performance_stats(&self) {
468        let mut stats = self.performance_stats.write().unwrap();
469        *stats = GpuPerformanceStats::default();
470        self.optimal_batch_size.store(512, Ordering::Relaxed);
471    }
472
473    /// Get current memory pool status
474    pub fn get_memory_pool_status(&self) -> (usize, u64, u64) {
475        let pool = self.memory_pool.lock().unwrap();
476        let (allocated, peak) = pool.get_memory_stats();
477        (pool.available_buffers.len(), allocated, peak)
478    }
479}
480
481/// CPU fallback implementations when GPU is not available
482#[cfg(not(feature = "gpu"))]
483use scirs2_core::random::Random;
484
485#[cfg(not(feature = "gpu"))]
486pub struct GpuEmbeddingAccelerator;
487
488#[cfg(not(feature = "gpu"))]
489impl GpuEmbeddingAccelerator {
490    pub fn new(_device_id: u32) -> Result<Self> {
491        Ok(Self)
492    }
493
494    /// Fallback to CPU implementation
495    pub fn batch_l2_distances_gpu(
496        &self,
497        vectors_a: &[Array1<f64>],
498        vectors_b: &[Array1<f64>],
499    ) -> Result<Vec<f64>> {
500        Ok(batch_l2_distances(vectors_a, vectors_b))
501    }
502
503    /// Fallback to CPU implementation
504    pub fn cosine_similarity_matrix_gpu(&self, vectors: &[Array1<f64>]) -> Result<Array2<f64>> {
505        Ok(pairwise_distances(vectors))
506    }
507
508    /// Fallback to CPU implementation
509    pub fn batch_gradient_update_gpu(
510        &self,
511        embeddings: &mut [Array2<f64>],
512        gradients: &[Array2<f64>],
513        learning_rate: f64,
514        l2_reg: f64,
515    ) -> Result<()> {
516        batch_gradient_update(embeddings, gradients, learning_rate, l2_reg);
517        Ok(())
518    }
519
520    /// Fallback to CPU implementation
521    pub fn xavier_init_gpu(
522        &self,
523        shapes: &[(usize, usize)],
524        fan_in: usize,
525        fan_out: usize,
526        _seed: u64,
527    ) -> Result<Vec<Array2<f64>>> {
528        let mut rng = Random::default();
529        Ok(batch_xavier_init(shapes, fan_in, fan_out, &mut rng))
530    }
531
532    pub fn device_info(&self) -> String {
533        "CPU (GPU acceleration not available)".to_string()
534    }
535
536    pub fn available_memory(&self) -> Result<u64> {
537        // Return available system RAM as approximation
538        Ok(8 * 1024 * 1024 * 1024) // 8GB default
539    }
540}
541
542/// Adaptive acceleration that chooses between GPU and CPU based on problem size
543pub struct AdaptiveEmbeddingAccelerator {
544    gpu_accelerator: Option<GpuEmbeddingAccelerator>,
545    gpu_threshold: usize,
546}
547
548impl AdaptiveEmbeddingAccelerator {
549    /// Create adaptive accelerator with optional GPU support
550    pub fn new(device_id: Option<u32>, gpu_threshold: usize) -> Result<Self> {
551        #[allow(unused_variables)]
552        let gpu_accelerator = if let Some(id) = device_id {
553            #[cfg(feature = "gpu")]
554            {
555                GpuEmbeddingAccelerator::new(id).ok()
556            }
557            #[cfg(not(feature = "gpu"))]
558            {
559                None
560            }
561        } else {
562            None
563        };
564
565        Ok(Self {
566            gpu_accelerator,
567            gpu_threshold,
568        })
569    }
570
571    /// Intelligently choose between GPU and CPU for distance computation
572    pub fn adaptive_batch_distances(
573        &self,
574        vectors_a: &[Array1<f64>],
575        vectors_b: &[Array1<f64>],
576    ) -> Result<Vec<f64>> {
577        if self.should_use_gpu(vectors_a.len() * vectors_b.len()) {
578            if let Some(ref gpu) = self.gpu_accelerator {
579                return gpu
580                    .batch_l2_distances_gpu(vectors_a, vectors_b)
581                    .map_err(|e| anyhow::anyhow!("GPU error: {:?}", e));
582            }
583        }
584
585        // Fallback to optimized CPU implementation
586        Ok(batch_l2_distances(vectors_a, vectors_b))
587    }
588
589    /// Intelligently choose between GPU and CPU for gradient updates
590    pub fn adaptive_gradient_update(
591        &self,
592        embeddings: &mut [Array2<f64>],
593        gradients: &[Array2<f64>],
594        learning_rate: f64,
595        l2_reg: f64,
596    ) -> Result<()> {
597        let total_elements: usize = embeddings.iter().map(|e| e.len()).sum();
598
599        if self.should_use_gpu(total_elements) {
600            if let Some(ref gpu) = self.gpu_accelerator {
601                return gpu
602                    .batch_gradient_update_gpu(embeddings, gradients, learning_rate, l2_reg)
603                    .map_err(|e| anyhow::anyhow!("GPU error: {:?}", e));
604            }
605        }
606
607        // Fallback to optimized CPU implementation
608        batch_gradient_update(embeddings, gradients, learning_rate, l2_reg);
609        Ok(())
610    }
611
612    /// Check if GPU should be used based on problem size
613    fn should_use_gpu(&self, problem_size: usize) -> bool {
614        self.gpu_accelerator.is_some() && problem_size >= self.gpu_threshold
615    }
616
617    /// Get acceleration info
618    pub fn info(&self) -> String {
619        match &self.gpu_accelerator {
620            Some(gpu) => format!(
621                "Adaptive: {} (threshold: {})",
622                gpu.device_info(),
623                self.gpu_threshold
624            ),
625            None => format!("Adaptive: CPU only (threshold: {})", self.gpu_threshold),
626        }
627    }
628}
629
630#[cfg(test)]
631mod tests {
632    use super::*;
633
634    #[test]
635    fn test_adaptive_accelerator_creation() {
636        let accelerator = AdaptiveEmbeddingAccelerator::new(None, 1000).unwrap();
637        assert!(accelerator.info().contains("CPU only"));
638    }
639
640    #[test]
641    fn test_fallback_distance_computation() {
642        let accelerator = AdaptiveEmbeddingAccelerator::new(None, 1000).unwrap();
643
644        let vectors_a = vec![
645            Array1::from_vec(vec![1.0, 2.0, 3.0]),
646            Array1::from_vec(vec![4.0, 5.0, 6.0]),
647        ];
648        let vectors_b = vec![
649            Array1::from_vec(vec![7.0, 8.0, 9.0]),
650            Array1::from_vec(vec![10.0, 11.0, 12.0]),
651        ];
652
653        let distances = accelerator
654            .adaptive_batch_distances(&vectors_a, &vectors_b)
655            .unwrap();
656        assert_eq!(distances.len(), 4); // 2x2 combinations
657    }
658
659    #[test]
660    fn test_fallback_gradient_update() {
661        let accelerator = AdaptiveEmbeddingAccelerator::new(None, 1000).unwrap();
662
663        let mut embeddings = vec![Array2::zeros((2, 3))];
664        let gradients = vec![Array2::ones((2, 3))];
665
666        accelerator
667            .adaptive_gradient_update(&mut embeddings, &gradients, 0.01, 0.001)
668            .unwrap();
669
670        // Check that gradients were applied
671        assert!(embeddings[0][[0, 0]] != 0.0);
672    }
673
674    #[cfg(feature = "gpu")]
675    #[test]
676    fn test_gpu_accelerator_creation() {
677        // This test will only run when GPU features are enabled
678        match GpuEmbeddingAccelerator::new(0) {
679            Ok(gpu) => {
680                println!("GPU Accelerator: {}", gpu.device_info());
681                let memory = gpu.available_memory().unwrap_or(0);
682                println!("Available GPU Memory: {} MB", memory / (1024 * 1024));
683            }
684            Err(_) => {
685                println!("GPU not available for testing");
686            }
687        }
688    }
689}