wsi_streamer/slide/
registry.rs

1//! Slide Registry for managing slide lifecycle and caching.
2//!
3//! The registry provides:
4//! - LRU caching of opened slide readers to avoid re-parsing metadata
5//! - Singleflight pattern to prevent duplicate opens for the same slide
6//! - Format auto-detection when opening slides
7//! - Block caching for efficient I/O
8//!
9//! # Example
10//!
11//! ```ignore
12//! use wsi_streamer::slide::{SlideRegistry, S3SlideSource};
13//! use wsi_streamer::io::create_s3_client;
14//!
15//! // Create S3 source
16//! let client = create_s3_client(None).await;
17//! let source = S3SlideSource::new(client, "my-bucket".to_string());
18//!
19//! // Create registry
20//! let registry = SlideRegistry::new(source);
21//!
22//! // Get a slide (opens and caches on first access)
23//! let slide = registry.get_slide("path/to/slide.svs").await?;
24//!
25//! // Read a tile
26//! let tile = slide.read_tile(0, 0, 0).await?;
27//! ```
28
29use std::collections::HashMap;
30use std::sync::Arc;
31
32use async_trait::async_trait;
33use bytes::Bytes;
34use lru::LruCache;
35use tokio::sync::{Mutex, Notify, RwLock};
36
37use crate::error::{FormatError, IoError, TiffError};
38use crate::format::{detect_format, GenericTiffReader, SlideFormat, SvsReader};
39use crate::io::{BlockCache, RangeReader, DEFAULT_BLOCK_SIZE};
40
41use super::reader::{LevelInfo, SlideReader};
42
43// =============================================================================
44// Configuration
45// =============================================================================
46
47/// Default capacity for slide cache (number of slides).
48const DEFAULT_SLIDE_CACHE_CAPACITY: usize = 100;
49
50/// Default capacity for block cache per slide (number of blocks).
51const DEFAULT_BLOCK_CACHE_CAPACITY: usize = 100;
52
53// =============================================================================
54// SlideSource Trait
55// =============================================================================
56
57/// Result of listing slides from storage.
58#[derive(Debug, Clone)]
59pub struct SlideListResult {
60    /// List of slide paths/keys.
61    pub slides: Vec<String>,
62    /// Continuation token for pagination (None if no more results).
63    pub next_cursor: Option<String>,
64}
65
66/// Trait for creating range readers from slide identifiers.
67///
68/// This abstraction allows the registry to work with different storage backends
69/// (S3, local files, etc.) without being tied to a specific implementation.
70#[async_trait]
71pub trait SlideSource: Send + Sync {
72    /// The type of range reader this source creates.
73    type Reader: RangeReader + 'static;
74
75    /// Create a range reader for the given slide identifier.
76    ///
77    /// # Arguments
78    /// * `slide_id` - Unique identifier for the slide (e.g., S3 key)
79    ///
80    /// # Returns
81    /// A range reader for accessing the slide's bytes.
82    async fn create_reader(&self, slide_id: &str) -> Result<Self::Reader, IoError>;
83
84    /// List available slides from the storage backend.
85    ///
86    /// This method returns slide paths/keys that can be used to access slides.
87    /// The default implementation returns an empty list.
88    ///
89    /// # Arguments
90    /// * `limit` - Maximum number of slides to return
91    /// * `cursor` - Continuation token for pagination (from previous response)
92    /// * `prefix` - Optional path prefix to filter results (e.g., "folder/")
93    ///
94    /// # Returns
95    /// A list of slide paths and optional continuation token.
96    async fn list_slides(
97        &self,
98        _limit: u32,
99        _cursor: Option<&str>,
100        _prefix: Option<&str>,
101    ) -> Result<SlideListResult, IoError> {
102        Ok(SlideListResult {
103            slides: vec![],
104            next_cursor: None,
105        })
106    }
107}
108
109// =============================================================================
110// CachedSlide
111// =============================================================================
112
113/// A slide that has been opened and cached.
114///
115/// This holds both the parsed slide structure and the underlying reader
116/// (wrapped in a BlockCache for efficient I/O).
117pub struct CachedSlide<R: RangeReader + 'static> {
118    /// The detected format of this slide
119    format: SlideFormat,
120
121    /// The underlying reader with block caching
122    reader: Arc<BlockCache<R>>,
123
124    /// The slide reader (either SVS or generic TIFF)
125    inner: SlideReaderInner,
126}
127
128/// Internal enum to hold format-specific readers.
129///
130/// We use an enum instead of trait objects because `SlideReader::read_tile`
131/// is generic over the reader type, making the trait not object-safe.
132enum SlideReaderInner {
133    Svs(SvsReader),
134    GenericTiff(GenericTiffReader),
135}
136
137impl<R: RangeReader + 'static> CachedSlide<R> {
138    /// Get the detected format of this slide.
139    pub fn format(&self) -> SlideFormat {
140        self.format
141    }
142
143    /// Get the number of pyramid levels.
144    pub fn level_count(&self) -> usize {
145        match &self.inner {
146            SlideReaderInner::Svs(r) => r.level_count(),
147            SlideReaderInner::GenericTiff(r) => r.level_count(),
148        }
149    }
150
151    /// Get dimensions of the full-resolution (level 0) image.
152    pub fn dimensions(&self) -> Option<(u32, u32)> {
153        match &self.inner {
154            SlideReaderInner::Svs(r) => r.dimensions(),
155            SlideReaderInner::GenericTiff(r) => r.dimensions(),
156        }
157    }
158
159    /// Get dimensions of a specific level.
160    pub fn level_dimensions(&self, level: usize) -> Option<(u32, u32)> {
161        match &self.inner {
162            SlideReaderInner::Svs(r) => r.level_dimensions(level),
163            SlideReaderInner::GenericTiff(r) => r.level_dimensions(level),
164        }
165    }
166
167    /// Get the downsample factor for a level.
168    pub fn level_downsample(&self, level: usize) -> Option<f64> {
169        match &self.inner {
170            SlideReaderInner::Svs(r) => r.level_downsample(level),
171            SlideReaderInner::GenericTiff(r) => r.level_downsample(level),
172        }
173    }
174
175    /// Get tile size for a level.
176    pub fn tile_size(&self, level: usize) -> Option<(u32, u32)> {
177        match &self.inner {
178            SlideReaderInner::Svs(r) => r.tile_size(level),
179            SlideReaderInner::GenericTiff(r) => r.tile_size(level),
180        }
181    }
182
183    /// Get the number of tiles in X and Y directions for a level.
184    pub fn tile_count(&self, level: usize) -> Option<(u32, u32)> {
185        match &self.inner {
186            SlideReaderInner::Svs(r) => r.tile_count(level),
187            SlideReaderInner::GenericTiff(r) => r.tile_count(level),
188        }
189    }
190
191    /// Get complete information about a level.
192    pub fn level_info(&self, level: usize) -> Option<LevelInfo> {
193        match &self.inner {
194            SlideReaderInner::Svs(r) => r.level_info(level),
195            SlideReaderInner::GenericTiff(r) => r.level_info(level),
196        }
197    }
198
199    /// Find the best level for a given downsample factor.
200    pub fn best_level_for_downsample(&self, downsample: f64) -> Option<usize> {
201        match &self.inner {
202            SlideReaderInner::Svs(r) => SlideReader::best_level_for_downsample(r, downsample),
203            SlideReaderInner::GenericTiff(r) => {
204                SlideReader::best_level_for_downsample(r, downsample)
205            }
206        }
207    }
208
209    /// Read a tile and prepare it for JPEG decoding.
210    ///
211    /// # Arguments
212    /// * `level` - Pyramid level index (0 = highest resolution)
213    /// * `tile_x` - Tile X coordinate (0-indexed from left)
214    /// * `tile_y` - Tile Y coordinate (0-indexed from top)
215    ///
216    /// # Returns
217    /// Complete JPEG data ready for decoding.
218    pub async fn read_tile(
219        &self,
220        level: usize,
221        tile_x: u32,
222        tile_y: u32,
223    ) -> Result<Bytes, TiffError> {
224        match &self.inner {
225            SlideReaderInner::Svs(r) => {
226                r.read_tile(self.reader.as_ref(), level, tile_x, tile_y)
227                    .await
228            }
229            SlideReaderInner::GenericTiff(r) => {
230                r.read_tile(self.reader.as_ref(), level, tile_x, tile_y)
231                    .await
232            }
233        }
234    }
235}
236
237// =============================================================================
238// SlideRegistry
239// =============================================================================
240
241/// Registry for managing slide lifecycle and caching.
242///
243/// The registry:
244/// - Caches opened slide readers with LRU eviction
245/// - Creates readers on-demand with format auto-detection
246/// - Wraps readers in BlockCache for efficient I/O
247/// - Uses singleflight to prevent duplicate opens for the same slide
248pub struct SlideRegistry<S: SlideSource> {
249    /// The source for creating range readers
250    source: S,
251
252    /// Cached slides indexed by slide ID
253    cache: RwLock<LruCache<String, Arc<CachedSlide<S::Reader>>>>,
254
255    /// In-flight opens for singleflight pattern
256    in_flight: Mutex<HashMap<String, Arc<InFlightState<S::Reader>>>>,
257
258    /// Block size for BlockCache
259    block_size: usize,
260
261    /// Block cache capacity per slide
262    block_cache_capacity: usize,
263}
264
265/// State for an in-flight slide open operation.
266struct InFlightState<R: RangeReader + 'static> {
267    /// Notification for waiters
268    notify: Notify,
269    /// Result of the open operation (set when complete)
270    result: Mutex<Option<Result<Arc<CachedSlide<R>>, FormatError>>>,
271}
272
273impl<S: SlideSource> SlideRegistry<S> {
274    /// Create a new SlideRegistry with default settings.
275    ///
276    /// Uses default cache capacities:
277    /// - Slide cache: 100 slides
278    /// - Block cache per slide: 100 blocks (25.6 MB per slide)
279    pub fn new(source: S) -> Self {
280        Self::with_capacity(
281            source,
282            DEFAULT_SLIDE_CACHE_CAPACITY,
283            DEFAULT_BLOCK_SIZE,
284            DEFAULT_BLOCK_CACHE_CAPACITY,
285        )
286    }
287
288    /// Create a new SlideRegistry with custom capacity settings.
289    ///
290    /// # Arguments
291    /// * `source` - The slide source for creating readers
292    /// * `slide_cache_capacity` - Maximum number of slides to cache
293    /// * `block_size` - Block size for the block cache (bytes)
294    /// * `block_cache_capacity` - Number of blocks to cache per slide
295    pub fn with_capacity(
296        source: S,
297        slide_cache_capacity: usize,
298        block_size: usize,
299        block_cache_capacity: usize,
300    ) -> Self {
301        Self {
302            source,
303            cache: RwLock::new(LruCache::new(
304                std::num::NonZeroUsize::new(slide_cache_capacity).unwrap(),
305            )),
306            in_flight: Mutex::new(HashMap::new()),
307            block_size,
308            block_cache_capacity,
309        }
310    }
311
312    /// Get a slide, opening it if not already cached.
313    ///
314    /// This method:
315    /// 1. Checks the cache for an existing slide
316    /// 2. If not cached, opens the slide with format auto-detection
317    /// 3. Uses singleflight to prevent duplicate opens for concurrent requests
318    ///
319    /// # Arguments
320    /// * `slide_id` - Unique identifier for the slide
321    ///
322    /// # Returns
323    /// An Arc-wrapped CachedSlide that can be used to read tiles.
324    pub async fn get_slide(
325        &self,
326        slide_id: &str,
327    ) -> Result<Arc<CachedSlide<S::Reader>>, FormatError> {
328        // Fast path: check cache
329        {
330            let mut cache = self.cache.write().await;
331            if let Some(slide) = cache.get(slide_id) {
332                return Ok(slide.clone());
333            }
334        }
335
336        // Slow path: check in_flight or become leader
337        loop {
338            let state = {
339                let mut in_flight = self.in_flight.lock().await;
340
341                if let Some(state) = in_flight.get(slide_id) {
342                    // Another task is opening this slide
343                    state.clone()
344                } else {
345                    // We're the leader for opening this slide
346                    let state = Arc::new(InFlightState {
347                        notify: Notify::new(),
348                        result: Mutex::new(None),
349                    });
350                    in_flight.insert(slide_id.to_string(), state.clone());
351                    drop(in_flight);
352
353                    // Perform the open
354                    let result = self.open_slide_internal(slide_id).await;
355
356                    // Store result and update cache
357                    {
358                        let mut result_guard = state.result.lock().await;
359                        *result_guard = Some(result.clone());
360                    }
361
362                    if let Ok(ref slide) = result {
363                        let mut cache = self.cache.write().await;
364                        cache.put(slide_id.to_string(), slide.clone());
365                    }
366
367                    // Clean up in_flight and notify waiters
368                    {
369                        let mut in_flight = self.in_flight.lock().await;
370                        in_flight.remove(slide_id);
371                    }
372                    state.notify.notify_waiters();
373
374                    return result;
375                }
376            };
377
378            // Wait for the leader to finish
379            state.notify.notified().await;
380
381            // Check if result is available
382            let result_guard = state.result.lock().await;
383            if let Some(ref result) = *result_guard {
384                return result.clone();
385            }
386
387            // Result not yet available, loop back (shouldn't normally happen)
388        }
389    }
390
391    /// Open a slide without caching (internal implementation).
392    async fn open_slide_internal(
393        &self,
394        slide_id: &str,
395    ) -> Result<Arc<CachedSlide<S::Reader>>, FormatError> {
396        // Create the underlying reader
397        let reader = self.source.create_reader(slide_id).await?;
398
399        // Wrap in block cache
400        let cached_reader = Arc::new(BlockCache::with_capacity(
401            reader,
402            self.block_size,
403            self.block_cache_capacity,
404        ));
405
406        // Detect format
407        let format = detect_format(cached_reader.as_ref()).await?;
408
409        // Open the appropriate reader
410        let inner = match format {
411            SlideFormat::AperioSvs => {
412                let svs = SvsReader::open(cached_reader.as_ref()).await?;
413                SlideReaderInner::Svs(svs)
414            }
415            SlideFormat::GenericTiff => {
416                let tiff = GenericTiffReader::open(cached_reader.as_ref()).await?;
417                SlideReaderInner::GenericTiff(tiff)
418            }
419        };
420
421        Ok(Arc::new(CachedSlide {
422            format,
423            reader: cached_reader,
424            inner,
425        }))
426    }
427
428    /// Remove a slide from the cache.
429    ///
430    /// This can be useful for forcing a reload of a slide's metadata.
431    pub async fn invalidate(&self, slide_id: &str) {
432        let mut cache = self.cache.write().await;
433        cache.pop(slide_id);
434    }
435
436    /// Clear all cached slides.
437    pub async fn clear(&self) {
438        let mut cache = self.cache.write().await;
439        cache.clear();
440    }
441
442    /// Get the number of cached slides.
443    pub async fn cached_count(&self) -> usize {
444        let cache = self.cache.read().await;
445        cache.len()
446    }
447
448    /// Get a reference to the underlying slide source.
449    ///
450    /// This can be used to access source-specific functionality like listing slides.
451    pub fn source(&self) -> &S {
452        &self.source
453    }
454}
455
456// =============================================================================
457// Tests
458// =============================================================================
459
460#[cfg(test)]
461mod tests {
462    use super::*;
463    use std::sync::atomic::{AtomicUsize, Ordering};
464
465    /// Mock slide source for testing
466    struct MockSlideSource {
467        /// Number of times create_reader was called
468        create_count: AtomicUsize,
469        /// Data to return
470        data: Bytes,
471    }
472
473    impl MockSlideSource {
474        fn new(data: Vec<u8>) -> Self {
475            Self {
476                create_count: AtomicUsize::new(0),
477                data: Bytes::from(data),
478            }
479        }
480
481        fn create_count(&self) -> usize {
482            self.create_count.load(Ordering::SeqCst)
483        }
484    }
485
486    /// Mock range reader
487    struct MockReader {
488        data: Bytes,
489        identifier: String,
490    }
491
492    #[async_trait]
493    impl RangeReader for MockReader {
494        async fn read_exact_at(&self, offset: u64, len: usize) -> Result<Bytes, IoError> {
495            let start = offset as usize;
496            let end = start + len;
497            if end > self.data.len() {
498                return Err(IoError::RangeOutOfBounds {
499                    offset,
500                    requested: len as u64,
501                    size: self.data.len() as u64,
502                });
503            }
504            Ok(self.data.slice(start..end))
505        }
506
507        fn size(&self) -> u64 {
508            self.data.len() as u64
509        }
510
511        fn identifier(&self) -> &str {
512            &self.identifier
513        }
514    }
515
516    #[async_trait]
517    impl SlideSource for MockSlideSource {
518        type Reader = MockReader;
519
520        async fn create_reader(&self, slide_id: &str) -> Result<Self::Reader, IoError> {
521            self.create_count.fetch_add(1, Ordering::SeqCst);
522            Ok(MockReader {
523                data: self.data.clone(),
524                identifier: format!("mock://{}", slide_id),
525            })
526        }
527    }
528
529    /// Create a minimal valid TIFF file for testing
530    ///
531    /// The TIFF must have dimensions > 1000x1000 to avoid being classified
532    /// as a label image by the pyramid detection logic.
533    fn create_minimal_tiff() -> Vec<u8> {
534        let mut data = vec![0u8; 8192];
535
536        // Little-endian TIFF header
537        data[0] = 0x49; // 'I'
538        data[1] = 0x49; // 'I'
539        data[2] = 0x2A; // Version 42
540        data[3] = 0x00;
541        data[4] = 0x08; // First IFD at offset 8
542        data[5] = 0x00;
543        data[6] = 0x00;
544        data[7] = 0x00;
545
546        // IFD at offset 8
547        // Entry count = 8
548        data[8] = 0x08;
549        data[9] = 0x00;
550
551        let mut offset = 10;
552
553        // Helper to write IFD entry
554        let write_entry =
555            |data: &mut [u8], offset: &mut usize, tag: u16, typ: u16, count: u32, value: u32| {
556                data[*offset..*offset + 2].copy_from_slice(&tag.to_le_bytes());
557                data[*offset + 2..*offset + 4].copy_from_slice(&typ.to_le_bytes());
558                data[*offset + 4..*offset + 8].copy_from_slice(&count.to_le_bytes());
559                data[*offset + 8..*offset + 12].copy_from_slice(&value.to_le_bytes());
560                *offset += 12;
561            };
562
563        // ImageWidth (2048) - tag 256, type LONG (4), count 1, value 2048
564        // Using LONG type to accommodate larger values
565        write_entry(&mut data, &mut offset, 256, 4, 1, 2048);
566
567        // ImageLength (1536) - tag 257, type LONG (4), count 1, value 1536
568        write_entry(&mut data, &mut offset, 257, 4, 1, 1536);
569
570        // Compression (7 = JPEG) - tag 259, type SHORT (3), count 1, value 7
571        write_entry(&mut data, &mut offset, 259, 3, 1, 7);
572
573        // TileWidth (256) - tag 322, type SHORT (3), count 1, value 256
574        write_entry(&mut data, &mut offset, 322, 3, 1, 256);
575
576        // TileLength (256) - tag 323, type SHORT (3), count 1, value 256
577        write_entry(&mut data, &mut offset, 323, 3, 1, 256);
578
579        // TileOffsets - tag 324, type LONG (4), count 48 (8x6 tiles), value at offset 200
580        // 2048/256 = 8 tiles in X, 1536/256 = 6 tiles in Y = 48 tiles
581        write_entry(&mut data, &mut offset, 324, 4, 48, 200);
582
583        // TileByteCounts - tag 325, type LONG (4), count 48, value at offset 400
584        write_entry(&mut data, &mut offset, 325, 4, 48, 400);
585
586        // BitsPerSample - tag 258, type SHORT (3), count 1, value 8
587        write_entry(&mut data, &mut offset, 258, 3, 1, 8);
588
589        // Next IFD offset (0 = no more IFDs)
590        data[offset..offset + 4].copy_from_slice(&0u32.to_le_bytes());
591
592        // Write tile offsets array at offset 200 (48 LONG values)
593        // Each tile starts at offset 1000 + (index * 100)
594        for i in 0..48u32 {
595            let tile_offset = 1000 + i * 100;
596            let arr_offset = 200 + (i as usize) * 4;
597            data[arr_offset..arr_offset + 4].copy_from_slice(&tile_offset.to_le_bytes());
598        }
599
600        // Write tile byte counts array at offset 400 (48 LONG values)
601        // Each tile is 90 bytes
602        for i in 0..48u32 {
603            let arr_offset = 400 + (i as usize) * 4;
604            data[arr_offset..arr_offset + 4].copy_from_slice(&90u32.to_le_bytes());
605        }
606
607        // Put some JPEG-like data for each tile (starting at offset 1000)
608        for i in 0..48 {
609            let tile_start = 1000 + i * 100;
610            data[tile_start] = 0xFF;
611            data[tile_start + 1] = 0xD8; // SOI
612            data[tile_start + 2] = 0xFF;
613            data[tile_start + 3] = 0xDB; // DQT marker (indicates complete JPEG)
614            data[tile_start + 88] = 0xFF;
615            data[tile_start + 89] = 0xD9; // EOI
616        }
617
618        data
619    }
620
621    #[tokio::test]
622    async fn test_registry_caches_slides() {
623        let tiff_data = create_minimal_tiff();
624        let source = MockSlideSource::new(tiff_data);
625        let registry = SlideRegistry::with_capacity(source, 10, 256, 10);
626
627        // First access should open the slide
628        let result = registry.get_slide("test.tif").await;
629        assert!(result.is_ok());
630        assert_eq!(registry.source.create_count(), 1);
631
632        // Second access should hit cache
633        let result2 = registry.get_slide("test.tif").await;
634        assert!(result2.is_ok());
635        assert_eq!(registry.source.create_count(), 1); // Still 1
636
637        // Different slide should create new reader
638        let result3 = registry.get_slide("test2.tif").await;
639        assert!(result3.is_ok());
640        assert_eq!(registry.source.create_count(), 2);
641    }
642
643    #[tokio::test]
644    async fn test_registry_cache_eviction() {
645        let tiff_data = create_minimal_tiff();
646        let source = MockSlideSource::new(tiff_data);
647        // Cache capacity of 2
648        let registry = SlideRegistry::with_capacity(source, 2, 256, 10);
649
650        // Open 3 slides (cache can only hold 2)
651        registry.get_slide("slide1.tif").await.unwrap();
652        registry.get_slide("slide2.tif").await.unwrap();
653        registry.get_slide("slide3.tif").await.unwrap();
654
655        assert_eq!(registry.source.create_count(), 3);
656        assert_eq!(registry.cached_count().await, 2);
657
658        // Access slide1 again - should be evicted, need to reopen
659        registry.get_slide("slide1.tif").await.unwrap();
660        assert_eq!(registry.source.create_count(), 4);
661    }
662
663    #[tokio::test]
664    async fn test_registry_invalidate() {
665        let tiff_data = create_minimal_tiff();
666        let source = MockSlideSource::new(tiff_data);
667        let registry = SlideRegistry::new(source);
668
669        // Open slide
670        registry.get_slide("test.tif").await.unwrap();
671        assert_eq!(registry.source.create_count(), 1);
672
673        // Invalidate
674        registry.invalidate("test.tif").await;
675        assert_eq!(registry.cached_count().await, 0);
676
677        // Reopen should create new reader
678        registry.get_slide("test.tif").await.unwrap();
679        assert_eq!(registry.source.create_count(), 2);
680    }
681
682    #[tokio::test]
683    async fn test_registry_clear() {
684        let tiff_data = create_minimal_tiff();
685        let source = MockSlideSource::new(tiff_data);
686        let registry = SlideRegistry::new(source);
687
688        // Open multiple slides
689        registry.get_slide("slide1.tif").await.unwrap();
690        registry.get_slide("slide2.tif").await.unwrap();
691        assert_eq!(registry.cached_count().await, 2);
692
693        // Clear cache
694        registry.clear().await;
695        assert_eq!(registry.cached_count().await, 0);
696    }
697
698    #[tokio::test]
699    async fn test_cached_slide_metadata() {
700        let tiff_data = create_minimal_tiff();
701        let source = MockSlideSource::new(tiff_data);
702        let registry = SlideRegistry::new(source);
703
704        let slide = registry.get_slide("test.tif").await.unwrap();
705
706        // Check metadata access
707        assert_eq!(slide.format(), SlideFormat::GenericTiff);
708        assert_eq!(slide.level_count(), 1);
709        assert_eq!(slide.dimensions(), Some((2048, 1536)));
710        assert_eq!(slide.tile_size(0), Some((256, 256)));
711        assert_eq!(slide.tile_count(0), Some((8, 6)));
712    }
713
714    #[tokio::test]
715    async fn test_concurrent_opens_singleflight() {
716        use std::sync::atomic::AtomicBool;
717        use tokio::time::{sleep, Duration};
718
719        /// Slow source that takes time to create readers
720        struct SlowMockSource {
721            data: Bytes,
722            create_count: AtomicUsize,
723            is_creating: AtomicBool,
724        }
725
726        impl SlowMockSource {
727            fn new(data: Vec<u8>) -> Self {
728                Self {
729                    data: Bytes::from(data),
730                    create_count: AtomicUsize::new(0),
731                    is_creating: AtomicBool::new(false),
732                }
733            }
734        }
735
736        #[async_trait]
737        impl SlideSource for SlowMockSource {
738            type Reader = MockReader;
739
740            async fn create_reader(&self, slide_id: &str) -> Result<Self::Reader, IoError> {
741                // Check if another create is in progress
742                let was_creating = self.is_creating.swap(true, Ordering::SeqCst);
743                assert!(
744                    !was_creating,
745                    "Concurrent creates detected - singleflight failed!"
746                );
747
748                self.create_count.fetch_add(1, Ordering::SeqCst);
749                sleep(Duration::from_millis(50)).await;
750
751                self.is_creating.store(false, Ordering::SeqCst);
752
753                Ok(MockReader {
754                    data: self.data.clone(),
755                    identifier: format!("mock://{}", slide_id),
756                })
757            }
758        }
759
760        let tiff_data = create_minimal_tiff();
761        let source = SlowMockSource::new(tiff_data);
762        let registry = Arc::new(SlideRegistry::new(source));
763
764        // Spawn multiple concurrent requests for the same slide
765        let mut handles = Vec::new();
766        for _ in 0..5 {
767            let registry = registry.clone();
768            handles.push(tokio::spawn(
769                async move { registry.get_slide("test.tif").await },
770            ));
771        }
772
773        // Wait for all to complete
774        for handle in handles {
775            let result = handle.await.unwrap();
776            assert!(result.is_ok());
777        }
778
779        // Should have only created one reader due to singleflight
780        assert_eq!(registry.source.create_count.load(Ordering::SeqCst), 1);
781    }
782}