Skip to main content

foxstash_core/storage/
file.rs

1//! File-based storage for native platforms
2//!
3//! Provides persistent storage with compression, atomic writes,
4//! and metadata management.
5//!
6//! # Features
7//!
8//! - **Atomic Writes**: Write to temporary files and rename to prevent corruption
9//! - **Compression**: Configurable compression codecs for space efficiency
10//! - **Metadata Tracking**: Store creation time, update time, and compression stats
11//! - **Type Safety**: Separate methods for documents and indices
12//!
13//! # Examples
14//!
15//! ```no_run
16//! use foxstash_core::storage::file::{FileStorage};
17//! use foxstash_core::storage::compression::Codec;
18//! use foxstash_core::Document;
19//!
20//! # fn main() -> foxstash_core::Result<()> {
21//! // Create storage with default codec (None)
22//! let storage = FileStorage::new("/tmp/rag_storage")?;
23//!
24//! // Or with compression
25//! let storage = FileStorage::with_codec("/tmp/rag_storage", Codec::Gzip)?;
26//!
27//! // Save a document
28//! let doc = Document {
29//!     id: "doc1".to_string(),
30//!     content: "Hello world".to_string(),
31//!     embedding: vec![0.1; 384],
32//!     metadata: None,
33//! };
34//! let stats = storage.save_document("doc1", &doc)?;
35//! println!("Compression ratio: {:.2}", stats.ratio);
36//!
37//! // Load it back
38//! let loaded = storage.load_document("doc1")?;
39//! assert_eq!(loaded.id, "doc1");
40//!
41//! // List all stored items
42//! let items = storage.list()?;
43//! println!("Stored items: {:?}", items);
44//! # Ok(())
45//! # }
46//! ```
47
48#![cfg(not(target_arch = "wasm32"))]
49
50use crate::storage::compression::{self, Codec, CompressionStats};
51use crate::{Document, RagError, Result};
52use serde::{Deserialize, Serialize};
53use std::fs::{self, File};
54use std::io::{Read, Write};
55use std::path::{Component, Path, PathBuf};
56use std::sync::atomic::{AtomicU64, Ordering};
57use std::time::{SystemTime, UNIX_EPOCH};
58
59const STORAGE_VERSION: u32 = 2;
60const DATA_EXTENSION: &str = "data";
61const META_EXTENSION: &str = "meta";
62const TMP_EXTENSION: &str = "tmp";
63static TMP_FILE_COUNTER: AtomicU64 = AtomicU64::new(0);
64
65/// Metadata for stored items
66///
67/// Contains information about the stored item including version,
68/// timestamps, and compression statistics.
69#[derive(Debug, Clone, Serialize, Deserialize)]
70pub struct StorageMetadata {
71    /// Storage format version
72    pub version: u32,
73    /// Unix timestamp when item was created
74    pub created_at: u64,
75    /// Unix timestamp when item was last updated
76    pub updated_at: u64,
77    /// Type of stored item ("document", "flat_index", "hnsw_index")
78    pub item_type: String,
79    /// Compression codec used
80    pub compression: Codec,
81    /// Original size before compression (bytes)
82    pub original_size: usize,
83    /// Compressed size after compression (bytes)
84    pub compressed_size: usize,
85}
86
87impl StorageMetadata {
88    /// Create new metadata
89    fn new(
90        item_type: String,
91        compression: Codec,
92        original_size: usize,
93        compressed_size: usize,
94    ) -> Self {
95        let now = SystemTime::now()
96            .duration_since(UNIX_EPOCH)
97            .unwrap()
98            .as_secs();
99
100        Self {
101            version: STORAGE_VERSION,
102            created_at: now,
103            updated_at: now,
104            item_type,
105            compression,
106            original_size,
107            compressed_size,
108        }
109    }
110
111    /// Update the updated_at timestamp
112    fn touch(&mut self) {
113        self.updated_at = SystemTime::now()
114            .duration_since(UNIX_EPOCH)
115            .unwrap()
116            .as_secs();
117    }
118}
119
120/// File-based storage manager
121///
122/// Manages persistent storage of documents and indices on the filesystem.
123/// Uses atomic writes to prevent corruption and supports configurable compression.
124///
125/// # Directory Structure
126///
127/// ```text
128/// base_path/
129/// ├── doc1.data       # Serialized and compressed document
130/// ├── doc1.meta       # Metadata for document
131/// ├── index1.data     # Serialized and compressed index
132/// └── index1.meta     # Metadata for index
133/// ```
134#[derive(Debug)]
135pub struct FileStorage {
136    base_path: PathBuf,
137    codec: Codec,
138}
139
140impl FileStorage {
141    /// Create new file storage at the specified path
142    ///
143    /// Creates the directory if it doesn't exist. Uses no compression by default.
144    ///
145    /// # Arguments
146    ///
147    /// * `base_path` - Directory path for storage
148    ///
149    /// # Returns
150    ///
151    /// * `Result<Self>` - New FileStorage instance
152    ///
153    /// # Errors
154    ///
155    /// Returns error if directory creation fails or path is invalid.
156    ///
157    /// # Examples
158    ///
159    /// ```no_run
160    /// # use foxstash_core::storage::file::FileStorage;
161    /// let storage = FileStorage::new("/tmp/my_storage").unwrap();
162    /// ```
163    pub fn new(base_path: impl AsRef<Path>) -> Result<Self> {
164        Self::with_codec(base_path, Codec::None)
165    }
166
167    /// Create file storage with specific compression codec
168    ///
169    /// # Arguments
170    ///
171    /// * `base_path` - Directory path for storage
172    /// * `codec` - Compression codec to use
173    ///
174    /// # Returns
175    ///
176    /// * `Result<Self>` - New FileStorage instance
177    ///
178    /// # Errors
179    ///
180    /// Returns error if directory creation fails or path is invalid.
181    ///
182    /// # Examples
183    ///
184    /// ```no_run
185    /// # use foxstash_core::storage::file::FileStorage;
186    /// # use foxstash_core::storage::compression::Codec;
187    /// let storage = FileStorage::with_codec("/tmp/my_storage", Codec::Gzip).unwrap();
188    /// ```
189    pub fn with_codec(base_path: impl AsRef<Path>, codec: Codec) -> Result<Self> {
190        let base_path = base_path.as_ref().to_path_buf();
191
192        // Create directory if it doesn't exist
193        if !base_path.exists() {
194            fs::create_dir_all(&base_path).map_err(|e| {
195                RagError::StorageError(format!("Failed to create storage directory: {}", e))
196            })?;
197        }
198
199        // Verify it's a directory
200        if !base_path.is_dir() {
201            return Err(RagError::StorageError(format!(
202                "Storage path is not a directory: {}",
203                base_path.display()
204            )));
205        }
206
207        Ok(Self { base_path, codec })
208    }
209
210    /// Save document with compression
211    ///
212    /// # Arguments
213    ///
214    /// * `id` - Unique identifier for the document
215    /// * `document` - Document to save
216    ///
217    /// # Returns
218    ///
219    /// * `Result<CompressionStats>` - Compression statistics
220    ///
221    /// # Errors
222    ///
223    /// Returns error if serialization or writing fails.
224    ///
225    /// # Examples
226    ///
227    /// ```no_run
228    /// # use foxstash_core::storage::file::FileStorage;
229    /// # use foxstash_core::Document;
230    /// # fn main() -> foxstash_core::Result<()> {
231    /// let storage = FileStorage::new("/tmp/storage")?;
232    /// let doc = Document {
233    ///     id: "doc1".to_string(),
234    ///     content: "Test".to_string(),
235    ///     embedding: vec![0.1; 384],
236    ///     metadata: None,
237    /// };
238    /// let stats = storage.save_document("doc1", &doc)?;
239    /// println!("Saved with ratio: {:.2}", stats.ratio);
240    /// # Ok(())
241    /// # }
242    /// ```
243    pub fn save_document(&self, id: &str, document: &Document) -> Result<CompressionStats> {
244        Self::validate_item_name(id)?;
245
246        // Use JSON serialization for documents because they contain serde_json::Value metadata
247        let serialized = serde_json::to_vec(document)
248            .map_err(|e| RagError::StorageError(format!("JSON serialization failed: {}", e)))?;
249
250        // Compress the data
251        let (compressed, stats) = compression::compress_with(&serialized, self.codec)
252            .map_err(|e| RagError::StorageError(format!("Compression failed: {}", e)))?;
253
254        // Create or update metadata
255        let metadata = if self.exists(id) {
256            let mut meta = self.get_metadata(id)?;
257            meta.touch();
258            meta.original_size = stats.original_size;
259            meta.compressed_size = stats.compressed_size;
260            meta.compression = stats.codec;
261            meta
262        } else {
263            StorageMetadata::new(
264                "document".to_string(),
265                stats.codec,
266                stats.original_size,
267                stats.compressed_size,
268            )
269        };
270
271        // Save data file atomically
272        let data_path = self.item_path(id);
273        self.write_atomic(&data_path, &compressed)?;
274
275        // Save metadata file atomically
276        let meta_path = self.metadata_path(id);
277        let meta_bytes = serde_json::to_vec(&metadata)
278            .map_err(|e| RagError::StorageError(format!("metadata serialize failed: {}", e)))?;
279        self.write_atomic(&meta_path, &meta_bytes)?;
280
281        Ok(stats)
282    }
283
284    /// Load document
285    ///
286    /// # Arguments
287    ///
288    /// * `id` - Unique identifier for the document
289    ///
290    /// # Returns
291    ///
292    /// * `Result<Document>` - Loaded document
293    ///
294    /// # Errors
295    ///
296    /// Returns error if document doesn't exist or deserialization fails.
297    ///
298    /// # Examples
299    ///
300    /// ```no_run
301    /// # use foxstash_core::storage::file::FileStorage;
302    /// # fn main() -> foxstash_core::Result<()> {
303    /// let storage = FileStorage::new("/tmp/storage")?;
304    /// let doc = storage.load_document("doc1")?;
305    /// println!("Loaded: {}", doc.id);
306    /// # Ok(())
307    /// # }
308    /// ```
309    pub fn load_document(&self, id: &str) -> Result<Document> {
310        Self::validate_item_name(id)?;
311
312        // Check if item exists
313        if !self.exists(id) {
314            return Err(RagError::StorageError(format!(
315                "Document not found: {}",
316                id
317            )));
318        }
319
320        // Load metadata
321        let metadata = self.get_metadata(id)?;
322
323        // Check version compatibility
324        if metadata.version != STORAGE_VERSION {
325            return Err(RagError::StorageError(format!(
326                "Incompatible storage version: expected {}, got {}",
327                STORAGE_VERSION, metadata.version
328            )));
329        }
330
331        // Load data file
332        let data_path = self.item_path(id);
333        let mut file = File::open(&data_path)?;
334        let mut compressed = Vec::new();
335        file.read_to_end(&mut compressed)?;
336
337        // Verify size matches metadata
338        if compressed.len() != metadata.compressed_size {
339            return Err(RagError::StorageError(format!(
340                "Data corruption detected: size mismatch for {}",
341                id
342            )));
343        }
344
345        // Decompress (codec detected automatically from header)
346        let decompressed = compression::decompress(&compressed)
347            .map_err(|e| RagError::StorageError(format!("Decompression failed: {}", e)))?;
348
349        // Deserialize using JSON
350        let document: Document = serde_json::from_slice(&decompressed)
351            .map_err(|e| RagError::StorageError(format!("JSON deserialization failed: {}", e)))?;
352
353        Ok(document)
354    }
355
356    /// Save FlatIndex
357    ///
358    /// # Arguments
359    ///
360    /// * `name` - Name for the index
361    /// * `index` - FlatIndex to save
362    ///
363    /// # Returns
364    ///
365    /// * `Result<CompressionStats>` - Compression statistics
366    ///
367    /// # Errors
368    ///
369    /// Returns error if serialization or writing fails.
370    pub fn save_flat_index(
371        &self,
372        name: &str,
373        index: &FlatIndexWrapper,
374    ) -> Result<CompressionStats> {
375        Self::validate_item_name(name)?;
376        self.save_with_metadata(name, index, "flat_index")
377    }
378
379    /// Load FlatIndex
380    ///
381    /// # Arguments
382    ///
383    /// * `name` - Name of the index
384    ///
385    /// # Returns
386    ///
387    /// * `Result<FlatIndex>` - Loaded index
388    ///
389    /// # Errors
390    ///
391    /// Returns error if index doesn't exist or deserialization fails.
392    pub fn load_flat_index(&self, name: &str) -> Result<FlatIndexWrapper> {
393        Self::validate_item_name(name)?;
394        self.load_with_metadata(name)
395    }
396
397    /// Save HNSWIndex
398    ///
399    /// # Arguments
400    ///
401    /// * `name` - Name for the index
402    /// * `index` - HNSWIndex to save
403    ///
404    /// # Returns
405    ///
406    /// * `Result<CompressionStats>` - Compression statistics
407    ///
408    /// # Errors
409    ///
410    /// Returns error if serialization or writing fails.
411    pub fn save_hnsw_index(
412        &self,
413        name: &str,
414        index: &HNSWIndexWrapper,
415    ) -> Result<CompressionStats> {
416        Self::validate_item_name(name)?;
417        self.save_with_metadata(name, index, "hnsw_index")
418    }
419
420    /// Load HNSWIndex
421    ///
422    /// # Arguments
423    ///
424    /// * `name` - Name of the index
425    ///
426    /// # Returns
427    ///
428    /// * `Result<HNSWIndex>` - Loaded index
429    ///
430    /// # Errors
431    ///
432    /// Returns error if index doesn't exist or deserialization fails.
433    pub fn load_hnsw_index(&self, name: &str) -> Result<HNSWIndexWrapper> {
434        Self::validate_item_name(name)?;
435        self.load_with_metadata(name)
436    }
437
438    /// Delete item from storage
439    ///
440    /// Removes both the data and metadata files.
441    ///
442    /// # Arguments
443    ///
444    /// * `name` - Name of the item to delete
445    ///
446    /// # Returns
447    ///
448    /// * `Result<()>` - Ok if successful
449    ///
450    /// # Errors
451    ///
452    /// Returns error if deletion fails. Does not error if item doesn't exist.
453    ///
454    /// # Examples
455    ///
456    /// ```no_run
457    /// # use foxstash_core::storage::file::FileStorage;
458    /// # fn main() -> foxstash_core::Result<()> {
459    /// let storage = FileStorage::new("/tmp/storage")?;
460    /// storage.delete("doc1")?;
461    /// # Ok(())
462    /// # }
463    /// ```
464    pub fn delete(&self, name: &str) -> Result<()> {
465        Self::validate_item_name(name)?;
466
467        let data_path = self.item_path(name);
468        let meta_path = self.metadata_path(name);
469
470        // Delete data file if it exists
471        if data_path.exists() {
472            fs::remove_file(&data_path).map_err(|e| {
473                RagError::StorageError(format!("Failed to delete data file: {}", e))
474            })?;
475        }
476
477        // Delete metadata file if it exists
478        if meta_path.exists() {
479            fs::remove_file(&meta_path).map_err(|e| {
480                RagError::StorageError(format!("Failed to delete metadata file: {}", e))
481            })?;
482        }
483
484        Ok(())
485    }
486
487    /// List all items in storage
488    ///
489    /// Returns names of all stored items (without extensions).
490    ///
491    /// # Returns
492    ///
493    /// * `Result<Vec<String>>` - List of item names
494    ///
495    /// # Errors
496    ///
497    /// Returns error if directory reading fails.
498    ///
499    /// # Examples
500    ///
501    /// ```no_run
502    /// # use foxstash_core::storage::file::FileStorage;
503    /// # fn main() -> foxstash_core::Result<()> {
504    /// let storage = FileStorage::new("/tmp/storage")?;
505    /// let items = storage.list()?;
506    /// for item in items {
507    ///     println!("Found: {}", item);
508    /// }
509    /// # Ok(())
510    /// # }
511    /// ```
512    pub fn list(&self) -> Result<Vec<String>> {
513        let entries = fs::read_dir(&self.base_path).map_err(|e| {
514            RagError::StorageError(format!("Failed to read storage directory: {}", e))
515        })?;
516
517        let mut names = std::collections::HashSet::new();
518
519        for entry in entries {
520            let entry = entry.map_err(|e| {
521                RagError::StorageError(format!("Failed to read directory entry: {}", e))
522            })?;
523
524            let path = entry.path();
525            if path.is_file() {
526                if let Some(ext) = path.extension() {
527                    if ext == DATA_EXTENSION || ext == META_EXTENSION {
528                        if let Some(stem) = path.file_stem() {
529                            if let Some(name) = stem.to_str() {
530                                names.insert(name.to_string());
531                            }
532                        }
533                    }
534                }
535            }
536        }
537
538        let mut result: Vec<String> = names.into_iter().collect();
539        result.sort();
540        Ok(result)
541    }
542
543    /// Get metadata for an item
544    ///
545    /// # Arguments
546    ///
547    /// * `name` - Name of the item
548    ///
549    /// # Returns
550    ///
551    /// * `Result<StorageMetadata>` - Item metadata
552    ///
553    /// # Errors
554    ///
555    /// Returns error if metadata doesn't exist or can't be read.
556    ///
557    /// # Examples
558    ///
559    /// ```no_run
560    /// # use foxstash_core::storage::file::FileStorage;
561    /// # fn main() -> foxstash_core::Result<()> {
562    /// let storage = FileStorage::new("/tmp/storage")?;
563    /// let meta = storage.get_metadata("doc1")?;
564    /// println!("Type: {}, Size: {} bytes", meta.item_type, meta.compressed_size);
565    /// # Ok(())
566    /// # }
567    /// ```
568    pub fn get_metadata(&self, name: &str) -> Result<StorageMetadata> {
569        Self::validate_item_name(name)?;
570
571        let meta_path = self.metadata_path(name);
572
573        if !meta_path.exists() {
574            return Err(RagError::StorageError(format!(
575                "Metadata not found for item: {}",
576                name
577            )));
578        }
579
580        let mut file = File::open(&meta_path)?;
581        let mut contents = Vec::new();
582        file.read_to_end(&mut contents)?;
583
584        // Try JSON first (v2+), fall back to bincode for v1 metadata files.
585        let metadata: StorageMetadata = serde_json::from_slice(&contents)
586            .or_else(|_| bincode::deserialize::<StorageMetadata>(&contents))
587            .map_err(|e| RagError::StorageError(format!("metadata deserialize failed: {}", e)))?;
588        Ok(metadata)
589    }
590
591    /// Get total storage size in bytes
592    ///
593    /// Calculates the sum of all data and metadata files.
594    ///
595    /// # Returns
596    ///
597    /// * `Result<u64>` - Total size in bytes
598    ///
599    /// # Errors
600    ///
601    /// Returns error if directory reading fails.
602    ///
603    /// # Examples
604    ///
605    /// ```no_run
606    /// # use foxstash_core::storage::file::FileStorage;
607    /// # fn main() -> foxstash_core::Result<()> {
608    /// let storage = FileStorage::new("/tmp/storage")?;
609    /// let size = storage.total_size()?;
610    /// println!("Storage uses {} bytes", size);
611    /// # Ok(())
612    /// # }
613    /// ```
614    pub fn total_size(&self) -> Result<u64> {
615        let entries = fs::read_dir(&self.base_path).map_err(|e| {
616            RagError::StorageError(format!("Failed to read storage directory: {}", e))
617        })?;
618
619        let mut total = 0u64;
620
621        for entry in entries {
622            let entry = entry.map_err(|e| {
623                RagError::StorageError(format!("Failed to read directory entry: {}", e))
624            })?;
625
626            let metadata = entry.metadata()?;
627            if metadata.is_file() {
628                total += metadata.len();
629            }
630        }
631
632        Ok(total)
633    }
634
635    /// Clear all storage
636    ///
637    /// Removes all data and metadata files from storage.
638    ///
639    /// # Returns
640    ///
641    /// * `Result<()>` - Ok if successful
642    ///
643    /// # Errors
644    ///
645    /// Returns error if file deletion fails.
646    ///
647    /// # Examples
648    ///
649    /// ```no_run
650    /// # use foxstash_core::storage::file::FileStorage;
651    /// # fn main() -> foxstash_core::Result<()> {
652    /// let storage = FileStorage::new("/tmp/storage")?;
653    /// storage.clear()?;
654    /// # Ok(())
655    /// # }
656    /// ```
657    pub fn clear(&self) -> Result<()> {
658        let entries = fs::read_dir(&self.base_path).map_err(|e| {
659            RagError::StorageError(format!("Failed to read storage directory: {}", e))
660        })?;
661
662        for entry in entries {
663            let entry = entry.map_err(|e| {
664                RagError::StorageError(format!("Failed to read directory entry: {}", e))
665            })?;
666
667            let path = entry.path();
668            if path.is_file() {
669                fs::remove_file(&path)
670                    .map_err(|e| RagError::StorageError(format!("Failed to delete file: {}", e)))?;
671            }
672        }
673
674        Ok(())
675    }
676
677    /// Check if item exists in storage
678    ///
679    /// # Arguments
680    ///
681    /// * `name` - Name of the item
682    ///
683    /// # Returns
684    ///
685    /// * `bool` - true if item exists
686    ///
687    /// # Examples
688    ///
689    /// ```no_run
690    /// # use foxstash_core::storage::file::FileStorage;
691    /// # fn main() -> foxstash_core::Result<()> {
692    /// let storage = FileStorage::new("/tmp/storage")?;
693    /// if storage.exists("doc1") {
694    ///     println!("Document exists!");
695    /// }
696    /// # Ok(())
697    /// # }
698    /// ```
699    pub fn exists(&self, name: &str) -> bool {
700        if Self::is_invalid_item_name(name) {
701            return false;
702        }
703        self.item_path(name).exists() && self.metadata_path(name).exists()
704    }
705
706    // Internal helper methods
707
708    fn is_invalid_item_name(name: &str) -> bool {
709        if name.is_empty() {
710            return true;
711        }
712
713        // Reject null bytes (could truncate paths in C-based syscalls)
714        if name.contains('\0') {
715            return true;
716        }
717
718        // Reject path separators on all platforms (storage files may be portable).
719        // On Unix, `\` is a valid filename char but we reject it for cross-platform safety.
720        if name.contains('/') || name.contains('\\') {
721            return true;
722        }
723
724        let path = Path::new(name);
725        if path.is_absolute() {
726            return true;
727        }
728
729        // Must be exactly one normal component (no separators, no .., no .)
730        let mut components = path.components();
731        match components.next() {
732            Some(Component::Normal(_)) => {
733                if components.next().is_some() {
734                    return true;
735                }
736            }
737            _ => return true,
738        }
739
740        // Reject Windows reserved device names (CON, PRN, AUX, NUL, COM1-9, LPT1-9).
741        // These are reserved with or without an extension (e.g. "CON.txt" is also invalid).
742        // Use the part before the first dot as the base name, since Windows treats
743        // "NUL.tar.gz" the same as "NUL".
744        let base_name = name.split('.').next().unwrap_or(name);
745        let stem_upper = base_name.to_ascii_uppercase();
746        let is_reserved = matches!(
747            stem_upper.as_str(),
748            "CON"
749                | "PRN"
750                | "AUX"
751                | "NUL"
752                | "COM1"
753                | "COM2"
754                | "COM3"
755                | "COM4"
756                | "COM5"
757                | "COM6"
758                | "COM7"
759                | "COM8"
760                | "COM9"
761                | "LPT1"
762                | "LPT2"
763                | "LPT3"
764                | "LPT4"
765                | "LPT5"
766                | "LPT6"
767                | "LPT7"
768                | "LPT8"
769                | "LPT9"
770        );
771        if is_reserved {
772            return true;
773        }
774
775        false
776    }
777
778    fn validate_item_name(name: &str) -> Result<()> {
779        if Self::is_invalid_item_name(name) {
780            return Err(RagError::StorageError(format!(
781                "Invalid item name: '{}'. Names must be a single path segment",
782                name
783            )));
784        }
785        Ok(())
786    }
787
788    /// Get path for item data file
789    fn item_path(&self, name: &str) -> PathBuf {
790        self.base_path.join(format!("{}.{}", name, DATA_EXTENSION))
791    }
792
793    /// Get path for item metadata file
794    fn metadata_path(&self, name: &str) -> PathBuf {
795        self.base_path.join(format!("{}.{}", name, META_EXTENSION))
796    }
797
798    /// Atomic write: write to temp file, then rename
799    ///
800    /// This ensures that even if the process crashes during write,
801    /// the original file is not corrupted.
802    ///
803    /// # Arguments
804    ///
805    /// * `path` - Target file path
806    /// * `data` - Data to write
807    ///
808    /// # Returns
809    ///
810    /// * `Result<()>` - Ok if successful
811    ///
812    /// # Errors
813    ///
814    /// Returns error if write or rename fails.
815    fn write_atomic(&self, path: &Path, data: &[u8]) -> Result<()> {
816        // Create temp file path
817        let filename = path.file_name().and_then(|f| f.to_str()).unwrap_or("item");
818        let counter = TMP_FILE_COUNTER.fetch_add(1, Ordering::Relaxed);
819        let tmp_path = path.with_file_name(format!(
820            "{}.{}.{}.{}",
821            filename,
822            std::process::id(),
823            counter,
824            TMP_EXTENSION
825        ));
826
827        // Write to temp file
828        {
829            let mut file = File::create(&tmp_path)?;
830            file.write_all(data)?;
831            file.sync_all()?; // Ensure data is flushed to disk
832        }
833
834        // Atomically rename temp to final
835        fs::rename(&tmp_path, path).map_err(|e| {
836            // Try to clean up temp file if rename fails
837            let _ = fs::remove_file(&tmp_path);
838            RagError::IoError(e)
839        })?;
840
841        Ok(())
842    }
843
844    /// Save item with metadata
845    ///
846    /// Generic method for saving any serializable item with metadata tracking.
847    fn save_with_metadata<T: Serialize>(
848        &self,
849        name: &str,
850        item: &T,
851        item_type: &str,
852    ) -> Result<CompressionStats> {
853        // Serialize the item
854        let serialized = serde_json::to_vec(item)
855            .map_err(|e| RagError::StorageError(format!("JSON serialization failed: {}", e)))?;
856
857        // Compress the data
858        let (compressed, stats) = compression::compress_with(&serialized, self.codec)
859            .map_err(|e| RagError::StorageError(format!("Compression failed: {}", e)))?;
860
861        // Create or update metadata
862        let metadata = if self.exists(name) {
863            let mut meta = self.get_metadata(name)?;
864            meta.touch();
865            meta.original_size = stats.original_size;
866            meta.compressed_size = stats.compressed_size;
867            meta.compression = stats.codec;
868            meta
869        } else {
870            StorageMetadata::new(
871                item_type.to_string(),
872                stats.codec,
873                stats.original_size,
874                stats.compressed_size,
875            )
876        };
877
878        // Save data file atomically
879        let data_path = self.item_path(name);
880        self.write_atomic(&data_path, &compressed)?;
881
882        // Save metadata file atomically
883        let meta_path = self.metadata_path(name);
884        let meta_bytes = serde_json::to_vec(&metadata)
885            .map_err(|e| RagError::StorageError(format!("metadata serialize failed: {}", e)))?;
886        self.write_atomic(&meta_path, &meta_bytes)?;
887
888        Ok(stats)
889    }
890
891    /// Load item with metadata check
892    ///
893    /// Generic method for loading any deserializable item with metadata verification.
894    fn load_with_metadata<T: for<'de> Deserialize<'de>>(&self, name: &str) -> Result<T> {
895        // Check if item exists
896        if !self.exists(name) {
897            return Err(RagError::StorageError(format!("Item not found: {}", name)));
898        }
899
900        // Load metadata
901        let metadata = self.get_metadata(name)?;
902
903        // Check version compatibility
904        if metadata.version != STORAGE_VERSION {
905            return Err(RagError::StorageError(format!(
906                "Incompatible storage version: expected {}, got {}",
907                STORAGE_VERSION, metadata.version
908            )));
909        }
910
911        // Load data file
912        let data_path = self.item_path(name);
913        let mut file = File::open(&data_path)?;
914        let mut compressed = Vec::new();
915        file.read_to_end(&mut compressed)?;
916
917        // Verify size matches metadata
918        if compressed.len() != metadata.compressed_size {
919            return Err(RagError::StorageError(format!(
920                "Data corruption detected: size mismatch for {}",
921                name
922            )));
923        }
924
925        // Decompress (codec detected automatically from header)
926        let decompressed = compression::decompress(&compressed)
927            .map_err(|e| RagError::StorageError(format!("Decompression failed: {}", e)))?;
928
929        // Deserialize
930        let item: T = serde_json::from_slice::<T>(&decompressed)
931            .map_err(|e| RagError::StorageError(format!("JSON deserialization failed: {}", e)))?;
932
933        Ok(item)
934    }
935}
936
937/// Wrapper for FlatIndex to enable serialization
938///
939/// Since FlatIndex uses HashMap internally, we need to ensure it's serializable.
940/// This wrapper provides serialization support.
941#[derive(Debug, Clone, Serialize, Deserialize)]
942pub struct FlatIndexWrapper {
943    pub embedding_dim: usize,
944    pub documents: Vec<Document>,
945}
946
947impl FlatIndexWrapper {
948    /// Create wrapper from FlatIndex
949    pub fn from_index(index: &crate::index::FlatIndex) -> Self {
950        Self {
951            embedding_dim: index.embedding_dim(),
952            documents: index.get_all_documents(),
953        }
954    }
955
956    /// Convert wrapper to FlatIndex
957    pub fn to_index(&self) -> Result<crate::index::FlatIndex> {
958        let mut index = crate::index::FlatIndex::new(self.embedding_dim);
959        index.add_batch(self.documents.clone())?;
960        Ok(index)
961    }
962}
963
964/// Wrapper for HNSWIndex to enable serialization
965///
966/// HNSWIndex has complex internal structures, so we serialize it as a flat list
967/// of documents and rebuild the index on load.
968#[derive(Debug, Clone, Serialize, Deserialize)]
969pub struct HNSWIndexWrapper {
970    pub embedding_dim: usize,
971    pub documents: Vec<Document>,
972    pub config: HNSWConfigWrapper,
973}
974
975/// Serializable wrapper for HNSWConfig
976#[derive(Debug, Clone, Serialize, Deserialize)]
977pub struct HNSWConfigWrapper {
978    pub m: usize,
979    pub m0: usize,
980    pub ef_construction: usize,
981    pub ef_search: usize,
982    pub ml: f32,
983    #[serde(default = "default_use_heuristic")]
984    pub use_heuristic: bool,
985    #[serde(default)]
986    pub extend_candidates: bool,
987    #[serde(default = "default_keep_pruned")]
988    pub keep_pruned_connections: bool,
989}
990
991fn default_use_heuristic() -> bool {
992    true
993}
994fn default_keep_pruned() -> bool {
995    true
996}
997
998impl From<&crate::index::HNSWConfig> for HNSWConfigWrapper {
999    fn from(config: &crate::index::HNSWConfig) -> Self {
1000        Self {
1001            m: config.m,
1002            m0: config.m0,
1003            ef_construction: config.ef_construction,
1004            ef_search: config.ef_search,
1005            ml: config.ml,
1006            use_heuristic: config.use_heuristic,
1007            extend_candidates: config.extend_candidates,
1008            keep_pruned_connections: config.keep_pruned_connections,
1009        }
1010    }
1011}
1012
1013impl From<HNSWConfigWrapper> for crate::index::HNSWConfig {
1014    fn from(wrapper: HNSWConfigWrapper) -> Self {
1015        Self {
1016            m: wrapper.m,
1017            m0: wrapper.m0,
1018            ef_construction: wrapper.ef_construction,
1019            ef_search: wrapper.ef_search,
1020            ml: wrapper.ml,
1021            use_heuristic: wrapper.use_heuristic,
1022            extend_candidates: wrapper.extend_candidates,
1023            keep_pruned_connections: wrapper.keep_pruned_connections,
1024            build_strategy: crate::index::BuildStrategy::default(),
1025            seed: None,
1026        }
1027    }
1028}
1029
1030impl HNSWIndexWrapper {
1031    /// Create wrapper from HNSWIndex
1032    pub fn from_index(index: &crate::index::HNSWIndex) -> Self {
1033        Self {
1034            embedding_dim: index.embedding_dim(),
1035            documents: index.get_all_documents(),
1036            config: HNSWConfigWrapper::from(index.config()),
1037        }
1038    }
1039
1040    /// Convert wrapper to HNSWIndex
1041    pub fn to_index(&self) -> Result<crate::index::HNSWIndex> {
1042        let config: crate::index::HNSWConfig = self.config.clone().into();
1043        let mut index = crate::index::HNSWIndex::new(self.embedding_dim, config);
1044        for doc in &self.documents {
1045            index.add(doc.clone())?;
1046        }
1047        Ok(index)
1048    }
1049}
1050
1051#[cfg(test)]
1052mod tests {
1053    use super::*;
1054    use std::sync::{Arc, Barrier};
1055    use std::thread;
1056    use tempfile::tempdir;
1057
1058    fn create_test_document(id: &str) -> Document {
1059        Document {
1060            id: id.to_string(),
1061            content: format!("Test content for {}", id),
1062            embedding: vec![0.1, 0.2, 0.3, 0.4, 0.5],
1063            metadata: Some(serde_json::json!({"test": true})),
1064        }
1065    }
1066
1067    fn create_test_flat_index() -> crate::index::FlatIndex {
1068        let mut index = crate::index::FlatIndex::new(5);
1069        index.add(create_test_document("doc1")).unwrap();
1070        index.add(create_test_document("doc2")).unwrap();
1071        index
1072    }
1073
1074    fn create_test_hnsw_index() -> crate::index::HNSWIndex {
1075        let mut index = crate::index::HNSWIndex::with_defaults(5);
1076        index.add(create_test_document("doc1")).unwrap();
1077        index.add(create_test_document("doc2")).unwrap();
1078        index
1079    }
1080
1081    #[test]
1082    fn test_new_storage() {
1083        let dir = tempdir().unwrap();
1084        let _storage = FileStorage::new(dir.path()).unwrap();
1085        assert!(dir.path().exists());
1086        assert!(dir.path().is_dir());
1087    }
1088
1089    #[test]
1090    fn test_new_storage_with_codec() {
1091        let dir = tempdir().unwrap();
1092        let _storage = FileStorage::with_codec(dir.path(), Codec::Gzip).unwrap();
1093        assert!(dir.path().exists());
1094    }
1095
1096    #[test]
1097    fn test_invalid_storage_path() {
1098        let dir = tempdir().unwrap();
1099        let file_path = dir.path().join("file.txt");
1100        std::fs::write(&file_path, b"test").unwrap();
1101
1102        let result = FileStorage::new(&file_path);
1103        assert!(result.is_err());
1104    }
1105
1106    #[test]
1107    fn test_document_save_load() {
1108        let dir = tempdir().unwrap();
1109        let storage = FileStorage::new(dir.path()).unwrap();
1110
1111        let doc = create_test_document("doc1");
1112        let stats = storage.save_document("doc1", &doc).unwrap();
1113
1114        assert!(stats.original_size > 0);
1115        assert_eq!(stats.codec, Codec::None);
1116
1117        let loaded = storage.load_document("doc1").unwrap();
1118        assert_eq!(loaded.id, doc.id);
1119        assert_eq!(loaded.content, doc.content);
1120        assert_eq!(loaded.embedding, doc.embedding);
1121    }
1122
1123    #[test]
1124    fn test_document_not_found() {
1125        let dir = tempdir().unwrap();
1126        let storage = FileStorage::new(dir.path()).unwrap();
1127
1128        let result = storage.load_document("nonexistent");
1129        assert!(result.is_err());
1130    }
1131
1132    #[test]
1133    fn test_flat_index_persistence() {
1134        let dir = tempdir().unwrap();
1135        let storage = FileStorage::new(dir.path()).unwrap();
1136
1137        let index = create_test_flat_index();
1138        let wrapper = FlatIndexWrapper::from_index(&index);
1139
1140        let stats = storage.save_flat_index("index1", &wrapper).unwrap();
1141        assert!(stats.original_size > 0);
1142
1143        let loaded_wrapper = storage.load_flat_index("index1").unwrap();
1144        let loaded_index = loaded_wrapper.to_index().unwrap();
1145
1146        assert_eq!(loaded_index.len(), index.len());
1147        assert_eq!(loaded_index.embedding_dim(), index.embedding_dim());
1148    }
1149
1150    #[test]
1151    fn test_hnsw_index_persistence() {
1152        let dir = tempdir().unwrap();
1153        let storage = FileStorage::new(dir.path()).unwrap();
1154
1155        let index = create_test_hnsw_index();
1156        let wrapper = HNSWIndexWrapper::from_index(&index);
1157
1158        let stats = storage.save_hnsw_index("index1", &wrapper).unwrap();
1159        assert!(stats.original_size > 0);
1160
1161        let loaded_wrapper = storage.load_hnsw_index("index1").unwrap();
1162        let loaded_index = loaded_wrapper.to_index().unwrap();
1163
1164        assert_eq!(loaded_index.len(), index.len());
1165        assert_eq!(loaded_index.embedding_dim(), index.embedding_dim());
1166    }
1167
1168    #[test]
1169    fn test_atomic_write() {
1170        let dir = tempdir().unwrap();
1171        let storage = FileStorage::new(dir.path()).unwrap();
1172
1173        let path = dir.path().join("test.data");
1174        let data = b"test data";
1175
1176        storage.write_atomic(&path, data).unwrap();
1177
1178        assert!(path.exists());
1179        let read_data = std::fs::read(&path).unwrap();
1180        assert_eq!(read_data, data);
1181
1182        // Verify no temp files left behind
1183        let has_tmp = std::fs::read_dir(dir.path())
1184            .unwrap()
1185            .filter_map(|entry| entry.ok())
1186            .map(|entry| entry.file_name().to_string_lossy().to_string())
1187            .any(|name| name.ends_with(".tmp"));
1188        assert!(!has_tmp);
1189    }
1190
1191    #[test]
1192    fn concurrent_atomic_writes_to_sibling_paths_do_not_cross_contaminate() {
1193        let dir = tempdir().unwrap();
1194        let storage = Arc::new(FileStorage::new(dir.path()).unwrap());
1195        let data_path = dir.path().join("doc.data");
1196        let meta_path = dir.path().join("doc.meta");
1197
1198        for _ in 0..128 {
1199            let barrier = Arc::new(Barrier::new(3));
1200            let s1 = Arc::clone(&storage);
1201            let b1 = Arc::clone(&barrier);
1202            let data_path_1 = data_path.clone();
1203            let t1 = thread::spawn(move || {
1204                b1.wait();
1205                s1.write_atomic(&data_path_1, b"DATA").unwrap();
1206            });
1207
1208            let s2 = Arc::clone(&storage);
1209            let b2 = Arc::clone(&barrier);
1210            let meta_path_1 = meta_path.clone();
1211            let t2 = thread::spawn(move || {
1212                b2.wait();
1213                s2.write_atomic(&meta_path_1, b"META").unwrap();
1214            });
1215
1216            barrier.wait();
1217            t1.join().unwrap();
1218            t2.join().unwrap();
1219
1220            assert_eq!(std::fs::read(&data_path).unwrap(), b"DATA");
1221            assert_eq!(std::fs::read(&meta_path).unwrap(), b"META");
1222        }
1223    }
1224
1225    #[test]
1226    fn test_metadata() {
1227        let dir = tempdir().unwrap();
1228        let storage = FileStorage::new(dir.path()).unwrap();
1229
1230        let doc = create_test_document("doc1");
1231        storage.save_document("doc1", &doc).unwrap();
1232
1233        let metadata = storage.get_metadata("doc1").unwrap();
1234        assert_eq!(metadata.version, STORAGE_VERSION);
1235        assert_eq!(metadata.item_type, "document");
1236        assert!(metadata.created_at > 0);
1237        assert_eq!(metadata.created_at, metadata.updated_at);
1238        assert_eq!(metadata.compression, Codec::None);
1239        assert!(metadata.original_size > 0);
1240    }
1241
1242    #[test]
1243    fn test_metadata_update() {
1244        let dir = tempdir().unwrap();
1245        let storage = FileStorage::new(dir.path()).unwrap();
1246
1247        let doc = create_test_document("doc1");
1248        storage.save_document("doc1", &doc).unwrap();
1249
1250        let meta1 = storage.get_metadata("doc1").unwrap();
1251
1252        // Wait a bit to ensure timestamp changes
1253        std::thread::sleep(std::time::Duration::from_millis(10));
1254
1255        // Save again
1256        storage.save_document("doc1", &doc).unwrap();
1257
1258        let meta2 = storage.get_metadata("doc1").unwrap();
1259        assert_eq!(meta2.created_at, meta1.created_at);
1260        assert!(meta2.updated_at >= meta1.updated_at);
1261    }
1262
1263    #[test]
1264    fn test_list_storage() {
1265        let dir = tempdir().unwrap();
1266        let storage = FileStorage::new(dir.path()).unwrap();
1267
1268        assert_eq!(storage.list().unwrap().len(), 0);
1269
1270        storage
1271            .save_document("doc1", &create_test_document("doc1"))
1272            .unwrap();
1273        storage
1274            .save_document("doc2", &create_test_document("doc2"))
1275            .unwrap();
1276        storage
1277            .save_document("doc3", &create_test_document("doc3"))
1278            .unwrap();
1279
1280        let items = storage.list().unwrap();
1281        assert_eq!(items.len(), 3);
1282        assert!(items.contains(&"doc1".to_string()));
1283        assert!(items.contains(&"doc2".to_string()));
1284        assert!(items.contains(&"doc3".to_string()));
1285    }
1286
1287    #[test]
1288    fn test_delete() {
1289        let dir = tempdir().unwrap();
1290        let storage = FileStorage::new(dir.path()).unwrap();
1291
1292        let doc = create_test_document("doc1");
1293        storage.save_document("doc1", &doc).unwrap();
1294
1295        assert!(storage.exists("doc1"));
1296        assert_eq!(storage.list().unwrap().len(), 1);
1297
1298        storage.delete("doc1").unwrap();
1299
1300        assert!(!storage.exists("doc1"));
1301        assert_eq!(storage.list().unwrap().len(), 0);
1302    }
1303
1304    #[test]
1305    fn test_delete_nonexistent() {
1306        let dir = tempdir().unwrap();
1307        let storage = FileStorage::new(dir.path()).unwrap();
1308
1309        // Should not error when deleting non-existent item
1310        let result = storage.delete("nonexistent");
1311        assert!(result.is_ok());
1312    }
1313
1314    #[test]
1315    fn test_clear() {
1316        let dir = tempdir().unwrap();
1317        let storage = FileStorage::new(dir.path()).unwrap();
1318
1319        storage
1320            .save_document("doc1", &create_test_document("doc1"))
1321            .unwrap();
1322        storage
1323            .save_document("doc2", &create_test_document("doc2"))
1324            .unwrap();
1325        storage
1326            .save_document("doc3", &create_test_document("doc3"))
1327            .unwrap();
1328
1329        assert_eq!(storage.list().unwrap().len(), 3);
1330
1331        storage.clear().unwrap();
1332
1333        assert_eq!(storage.list().unwrap().len(), 0);
1334    }
1335
1336    #[test]
1337    fn test_storage_size() {
1338        let dir = tempdir().unwrap();
1339        let storage = FileStorage::new(dir.path()).unwrap();
1340
1341        assert_eq!(storage.total_size().unwrap(), 0);
1342
1343        storage
1344            .save_document("doc1", &create_test_document("doc1"))
1345            .unwrap();
1346
1347        let size = storage.total_size().unwrap();
1348        assert!(size > 0);
1349
1350        storage
1351            .save_document("doc2", &create_test_document("doc2"))
1352            .unwrap();
1353
1354        let size2 = storage.total_size().unwrap();
1355        assert!(size2 > size);
1356    }
1357
1358    #[test]
1359    fn test_compression_codecs() {
1360        let dir = tempdir().unwrap();
1361
1362        // Test with different codecs
1363        #[allow(unused_mut)]
1364        let mut codecs = vec![Codec::None, Codec::Gzip];
1365
1366        #[cfg(feature = "zstd")]
1367        codecs.push(Codec::Zstd);
1368
1369        #[cfg(feature = "lz4")]
1370        codecs.push(Codec::Lz4);
1371
1372        for codec in codecs {
1373            let storage = FileStorage::with_codec(dir.path(), codec).unwrap();
1374            let doc = create_test_document("doc1");
1375
1376            let stats = storage.save_document("test", &doc).unwrap();
1377            assert!(stats.original_size > 0);
1378
1379            let loaded = storage.load_document("test").unwrap();
1380            assert_eq!(loaded.id, doc.id);
1381            assert_eq!(loaded.content, doc.content);
1382
1383            storage.delete("test").unwrap();
1384        }
1385    }
1386
1387    #[test]
1388    fn test_exists() {
1389        let dir = tempdir().unwrap();
1390        let storage = FileStorage::new(dir.path()).unwrap();
1391
1392        assert!(!storage.exists("doc1"));
1393
1394        storage
1395            .save_document("doc1", &create_test_document("doc1"))
1396            .unwrap();
1397
1398        assert!(storage.exists("doc1"));
1399        assert!(!storage.exists("doc2"));
1400    }
1401
1402    #[test]
1403    fn test_flat_index_wrapper_roundtrip() {
1404        let index = create_test_flat_index();
1405        let wrapper = FlatIndexWrapper::from_index(&index);
1406        let restored = wrapper.to_index().unwrap();
1407
1408        assert_eq!(restored.len(), index.len());
1409        assert_eq!(restored.embedding_dim(), index.embedding_dim());
1410
1411        // Test search works
1412        let query = vec![0.1, 0.2, 0.3, 0.4, 0.5];
1413        let results = restored.search(&query, 2).unwrap();
1414        assert_eq!(results.len(), 2);
1415    }
1416
1417    #[test]
1418    fn test_hnsw_index_wrapper_roundtrip() {
1419        let index = create_test_hnsw_index();
1420        let wrapper = HNSWIndexWrapper::from_index(&index);
1421        let restored = wrapper.to_index().unwrap();
1422
1423        assert_eq!(restored.len(), index.len());
1424        assert_eq!(restored.embedding_dim(), index.embedding_dim());
1425
1426        // Test search works
1427        let query = vec![0.1, 0.2, 0.3, 0.4, 0.5];
1428        let results = restored.search(&query, 2).unwrap();
1429        assert_eq!(results.len(), 2);
1430    }
1431
1432    #[test]
1433    fn test_concurrent_writes() {
1434        let dir = tempdir().unwrap();
1435        let storage = FileStorage::new(dir.path()).unwrap();
1436
1437        // Write same document multiple times to test atomicity
1438        let doc = create_test_document("doc1");
1439
1440        for _ in 0..10 {
1441            storage.save_document("doc1", &doc).unwrap();
1442            let loaded = storage.load_document("doc1").unwrap();
1443            assert_eq!(loaded.id, doc.id);
1444        }
1445    }
1446
1447    #[test]
1448    fn test_large_document() {
1449        let dir = tempdir().unwrap();
1450        let storage = FileStorage::new(dir.path()).unwrap();
1451
1452        // Create a large document
1453        let mut large_doc = create_test_document("large");
1454        large_doc.embedding = vec![0.5; 10000];
1455        large_doc.content = "x".repeat(100000);
1456
1457        let stats = storage.save_document("large", &large_doc).unwrap();
1458        assert!(stats.original_size > 100000);
1459
1460        let loaded = storage.load_document("large").unwrap();
1461        assert_eq!(loaded.id, large_doc.id);
1462        assert_eq!(loaded.embedding.len(), 10000);
1463        assert_eq!(loaded.content.len(), 100000);
1464    }
1465
1466    #[test]
1467    fn test_rejects_path_traversal_item_names() {
1468        let dir = tempdir().unwrap();
1469        let storage = FileStorage::new(dir.path()).unwrap();
1470        let doc = create_test_document("doc1");
1471
1472        let result = storage.save_document("../outside", &doc);
1473        assert!(result.is_err(), "path traversal names should be rejected");
1474    }
1475
1476    #[test]
1477    fn test_is_invalid_item_name_comprehensive() {
1478        // Valid names
1479        assert!(!FileStorage::is_invalid_item_name("hello"));
1480        assert!(!FileStorage::is_invalid_item_name("my_index"));
1481        assert!(!FileStorage::is_invalid_item_name("data-2024"));
1482        assert!(!FileStorage::is_invalid_item_name("file.txt"));
1483
1484        // Empty
1485        assert!(FileStorage::is_invalid_item_name(""));
1486
1487        // Path traversal / multi-component
1488        assert!(FileStorage::is_invalid_item_name(".."));
1489        assert!(FileStorage::is_invalid_item_name("."));
1490        assert!(FileStorage::is_invalid_item_name("foo/bar"));
1491        assert!(FileStorage::is_invalid_item_name("foo\\bar"));
1492        assert!(FileStorage::is_invalid_item_name("../outside"));
1493
1494        // Absolute paths
1495        assert!(FileStorage::is_invalid_item_name("/absolute"));
1496        #[cfg(target_os = "windows")]
1497        assert!(FileStorage::is_invalid_item_name("C:\\Windows\\System32"));
1498
1499        // Null bytes
1500        assert!(FileStorage::is_invalid_item_name("hello\0world"));
1501        assert!(FileStorage::is_invalid_item_name("\0"));
1502
1503        // Windows reserved device names (case-insensitive)
1504        assert!(FileStorage::is_invalid_item_name("CON"));
1505        assert!(FileStorage::is_invalid_item_name("con"));
1506        assert!(FileStorage::is_invalid_item_name("Con"));
1507        assert!(FileStorage::is_invalid_item_name("PRN"));
1508        assert!(FileStorage::is_invalid_item_name("AUX"));
1509        assert!(FileStorage::is_invalid_item_name("NUL"));
1510        assert!(FileStorage::is_invalid_item_name("nul"));
1511        assert!(FileStorage::is_invalid_item_name("COM1"));
1512        assert!(FileStorage::is_invalid_item_name("com1"));
1513        assert!(FileStorage::is_invalid_item_name("COM9"));
1514        assert!(FileStorage::is_invalid_item_name("LPT1"));
1515        assert!(FileStorage::is_invalid_item_name("lpt1"));
1516        assert!(FileStorage::is_invalid_item_name("LPT9"));
1517
1518        // Reserved names with extension (stem is still reserved)
1519        assert!(FileStorage::is_invalid_item_name("CON.txt"));
1520        assert!(FileStorage::is_invalid_item_name("NUL.tar.gz"));
1521        assert!(FileStorage::is_invalid_item_name("com1.data"));
1522        assert!(FileStorage::is_invalid_item_name("lpt3.log"));
1523    }
1524}