Skip to main content

optirs_bench/ci_cd_automation/
artifact_management.rs

1// Artifact Management and Storage
2//
3// This module provides comprehensive artifact management capabilities for CI/CD automation,
4// including multiple storage providers, upload/download functionality, retention policies,
5// and artifact lifecycle management.
6
7use crate::error::{OptimError, Result};
8use serde::{Deserialize, Serialize};
9use std::collections::HashMap;
10use std::fs;
11use std::path::{Path, PathBuf};
12use std::time::{Duration, SystemTime};
13
14use super::config::{
15    ArtifactDownloadConfig, ArtifactRetentionPolicy, ArtifactStorageConfig,
16    ArtifactStorageProvider, ArtifactUploadConfig, CompressionAlgorithm, EncryptionConfig,
17    ParallelUploadConfig,
18};
19
20/// Artifact manager for handling storage and retrieval
21#[derive(Debug)]
22pub struct ArtifactManager {
23    /// Storage provider implementation
24    pub storage_provider: Box<dyn ArtifactStorage>,
25    /// Configuration settings
26    pub config: ArtifactStorageConfig,
27    /// Artifact registry for tracking
28    pub registry: ArtifactRegistry,
29    /// Upload manager
30    pub upload_manager: UploadManager,
31    /// Download manager
32    pub download_manager: DownloadManager,
33    /// Retention manager
34    pub retention_manager: RetentionManager,
35}
36
37/// Trait for artifact storage implementations
38pub trait ArtifactStorage: std::fmt::Debug + Send + Sync {
39    /// Upload an artifact to storage
40    fn upload(&self, local_path: &Path, remote_key: &str) -> Result<String>;
41
42    /// Download an artifact from storage
43    fn download(&self, remote_key: &str, local_path: &Path) -> Result<()>;
44
45    /// Delete an artifact from storage
46    fn delete(&self, remote_key: &str) -> Result<()>;
47
48    /// List artifacts with optional prefix filter
49    fn list(&self, prefix: Option<&str>) -> Result<Vec<ArtifactInfo>>;
50
51    /// Check if an artifact exists
52    fn exists(&self, remote_key: &str) -> Result<bool>;
53
54    /// Get metadata for an artifact
55    fn get_metadata(&self, remote_key: &str) -> Result<ArtifactMetadata>;
56
57    /// Get storage statistics
58    fn get_storage_stats(&self) -> Result<StorageStatistics>;
59
60    /// Validate connection and permissions
61    fn validate_connection(&self) -> Result<()>;
62}
63
64/// Artifact registry for tracking uploaded artifacts
65#[derive(Debug, Clone)]
66pub struct ArtifactRegistry {
67    /// Registry of tracked artifacts
68    pub artifacts: HashMap<String, ArtifactRecord>,
69    /// Registry metadata
70    pub metadata: RegistryMetadata,
71}
72
73/// Individual artifact record in the registry
74#[derive(Debug, Clone, Serialize, Deserialize)]
75pub struct ArtifactRecord {
76    /// Unique artifact ID
77    pub id: String,
78    /// Artifact name/key
79    pub key: String,
80    /// Local file path
81    pub local_path: PathBuf,
82    /// Remote storage path/key
83    pub remote_key: String,
84    /// Artifact metadata
85    pub metadata: ArtifactMetadata,
86    /// Upload timestamp
87    pub uploaded_at: SystemTime,
88    /// Last accessed timestamp
89    pub last_accessed: Option<SystemTime>,
90    /// Artifact tags
91    pub tags: Vec<String>,
92    /// Retention policy applied
93    pub retention_policy: String,
94    /// Upload status
95    pub status: ArtifactStatus,
96}
97
98/// Artifact status in the system
99#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
100pub enum ArtifactStatus {
101    /// Pending upload
102    Pending,
103    /// Currently uploading
104    Uploading,
105    /// Successfully uploaded
106    Uploaded,
107    /// Upload failed
108    Failed,
109    /// Marked for deletion
110    MarkedForDeletion,
111    /// Deleted
112    Deleted,
113    /// Archived
114    Archived,
115}
116
117/// Registry metadata
118#[derive(Debug, Clone, Serialize, Deserialize)]
119pub struct RegistryMetadata {
120    /// Registry version
121    pub version: String,
122    /// Creation timestamp
123    pub created_at: SystemTime,
124    /// Last updated timestamp
125    pub updated_at: SystemTime,
126    /// Total artifacts tracked
127    pub total_artifacts: usize,
128    /// Registry statistics
129    pub statistics: RegistryStatistics,
130}
131
132/// Registry statistics
133#[derive(Debug, Clone, Serialize, Deserialize)]
134pub struct RegistryStatistics {
135    /// Total storage used (bytes)
136    pub total_size_bytes: u64,
137    /// Number of active artifacts
138    pub active_artifacts: usize,
139    /// Number of failed uploads
140    pub failed_uploads: usize,
141    /// Number of deleted artifacts
142    pub deleted_artifacts: usize,
143    /// Average artifact size
144    pub average_size_bytes: f64,
145}
146
147/// Artifact information from storage
148#[derive(Debug, Clone, Serialize, Deserialize)]
149pub struct ArtifactInfo {
150    /// Artifact key/name
151    pub key: String,
152    /// Artifact size in bytes
153    pub size: u64,
154    /// Last modified timestamp
155    pub last_modified: SystemTime,
156    /// Content type/MIME type
157    pub content_type: Option<String>,
158    /// Storage class
159    pub storage_class: Option<String>,
160    /// ETag or checksum
161    pub etag: Option<String>,
162}
163
164/// Artifact metadata
165#[derive(Debug, Clone, Serialize, Deserialize)]
166pub struct ArtifactMetadata {
167    /// File name
168    pub filename: String,
169    /// File size in bytes
170    pub size_bytes: u64,
171    /// Content type
172    pub content_type: String,
173    /// Checksum/hash
174    pub checksum: String,
175    /// Checksum algorithm
176    pub checksum_algorithm: ChecksumAlgorithm,
177    /// Compression information
178    pub compression: Option<CompressionInfo>,
179    /// Encryption information
180    pub encryption: Option<EncryptionInfo>,
181    /// Custom metadata fields
182    pub custom_metadata: HashMap<String, String>,
183}
184
185/// Checksum algorithms
186#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
187pub enum ChecksumAlgorithm {
188    /// MD5 hash
189    MD5,
190    /// SHA-1 hash
191    SHA1,
192    /// SHA-256 hash
193    SHA256,
194    /// SHA-512 hash
195    SHA512,
196    /// CRC32 checksum
197    CRC32,
198    /// Blake2b hash
199    Blake2b,
200}
201
202/// Compression information
203#[derive(Debug, Clone, Serialize, Deserialize)]
204pub struct CompressionInfo {
205    /// Compression algorithm used
206    pub algorithm: CompressionAlgorithm,
207    /// Compression level (1-9)
208    pub level: u8,
209    /// Original size before compression
210    pub original_size: u64,
211    /// Compressed size
212    pub compressed_size: u64,
213    /// Compression ratio
214    pub compression_ratio: f64,
215}
216
217/// Encryption information
218#[derive(Debug, Clone, Serialize, Deserialize)]
219pub struct EncryptionInfo {
220    /// Encryption algorithm used
221    pub algorithm: String,
222    /// Key identifier
223    pub key_id: String,
224    /// Initialization vector
225    pub iv: Option<String>,
226    /// Encryption metadata
227    pub metadata: HashMap<String, String>,
228}
229
230/// Storage statistics
231#[derive(Debug, Clone, Serialize, Deserialize)]
232pub struct StorageStatistics {
233    /// Total storage used (bytes)
234    pub total_size_bytes: u64,
235    /// Number of objects stored
236    pub object_count: usize,
237    /// Available storage space (bytes)
238    pub available_space_bytes: Option<u64>,
239    /// Storage utilization percentage
240    pub utilization_percent: Option<f64>,
241    /// Average object size
242    pub average_object_size: f64,
243}
244
245/// Upload manager for handling file uploads
246#[derive(Debug, Clone)]
247pub struct UploadManager {
248    /// Upload configuration
249    pub config: ArtifactUploadConfig,
250    /// Active uploads
251    pub active_uploads: HashMap<String, UploadTask>,
252    /// Upload history
253    pub upload_history: Vec<UploadResult>,
254}
255
256/// Individual upload task
257#[derive(Debug, Clone)]
258pub struct UploadTask {
259    /// Task ID
260    pub id: String,
261    /// Local file path
262    pub local_path: PathBuf,
263    /// Remote key
264    pub remote_key: String,
265    /// Upload progress
266    pub progress: UploadProgress,
267    /// Task status
268    pub status: UploadStatus,
269    /// Start timestamp
270    pub started_at: SystemTime,
271    /// Task configuration
272    pub config: UploadTaskConfig,
273}
274
275/// Upload progress tracking
276#[derive(Debug, Clone)]
277pub struct UploadProgress {
278    /// Bytes uploaded
279    pub bytes_uploaded: u64,
280    /// Total bytes to upload
281    pub total_bytes: u64,
282    /// Progress percentage (0.0 to 100.0)
283    pub percentage: f64,
284    /// Upload speed (bytes per second)
285    pub speed_bps: f64,
286    /// Estimated time remaining
287    pub eta_seconds: Option<u64>,
288}
289
290/// Upload status
291#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
292pub enum UploadStatus {
293    /// Upload queued
294    Queued,
295    /// Upload in progress
296    InProgress,
297    /// Upload completed successfully
298    Completed,
299    /// Upload failed
300    Failed,
301    /// Upload cancelled
302    Cancelled,
303    /// Upload paused
304    Paused,
305}
306
307/// Upload task configuration
308#[derive(Debug, Clone)]
309pub struct UploadTaskConfig {
310    /// Enable compression
311    pub compress: bool,
312    /// Compression level
313    pub compression_level: u8,
314    /// Enable encryption
315    pub encrypt: bool,
316    /// Chunk size for multipart uploads
317    pub chunk_size_bytes: usize,
318    /// Number of retry attempts
319    pub retry_attempts: u32,
320}
321
322/// Upload result
323#[derive(Debug, Clone, Serialize, Deserialize)]
324pub struct UploadResult {
325    /// Task ID
326    pub task_id: String,
327    /// Remote key
328    pub remote_key: String,
329    /// Upload status
330    pub status: UploadStatus,
331    /// Total bytes uploaded
332    pub bytes_uploaded: u64,
333    /// Upload duration
334    pub duration: Duration,
335    /// Average upload speed
336    pub average_speed_bps: f64,
337    /// Error message if failed
338    pub error_message: Option<String>,
339    /// Upload timestamp
340    pub timestamp: SystemTime,
341}
342
343/// Download manager for handling file downloads
344#[derive(Debug, Clone)]
345pub struct DownloadManager {
346    /// Download configuration
347    pub config: ArtifactDownloadConfig,
348    /// Download cache
349    pub cache: DownloadCache,
350    /// Active downloads
351    pub active_downloads: HashMap<String, DownloadTask>,
352}
353
354/// Download cache management
355#[derive(Debug, Clone)]
356pub struct DownloadCache {
357    /// Cache directory
358    pub cache_dir: PathBuf,
359    /// Cache size limit (bytes)
360    pub size_limit_bytes: u64,
361    /// Current cache size (bytes)
362    pub current_size_bytes: u64,
363    /// Cache entries
364    pub entries: HashMap<String, CacheEntry>,
365    /// Cache statistics
366    pub statistics: CacheStatistics,
367}
368
369/// Cache entry information
370#[derive(Debug, Clone, Serialize, Deserialize)]
371pub struct CacheEntry {
372    /// Cache key
373    pub key: String,
374    /// Local file path
375    pub local_path: PathBuf,
376    /// File size
377    pub size_bytes: u64,
378    /// Creation timestamp
379    pub created_at: SystemTime,
380    /// Last accessed timestamp
381    pub last_accessed: SystemTime,
382    /// Access count
383    pub access_count: u64,
384    /// Cache hit score
385    pub hit_score: f64,
386}
387
388/// Cache statistics
389#[derive(Debug, Clone, Serialize, Deserialize)]
390pub struct CacheStatistics {
391    /// Total cache hits
392    pub hits: u64,
393    /// Total cache misses
394    pub misses: u64,
395    /// Cache hit ratio
396    pub hit_ratio: f64,
397    /// Total cache evictions
398    pub evictions: u64,
399    /// Average file size
400    pub average_file_size: f64,
401}
402
403/// Download task
404#[derive(Debug, Clone)]
405pub struct DownloadTask {
406    /// Task ID
407    pub id: String,
408    /// Remote key
409    pub remote_key: String,
410    /// Local destination path
411    pub local_path: PathBuf,
412    /// Download progress
413    pub progress: DownloadProgress,
414    /// Task status
415    pub status: DownloadStatus,
416    /// Start timestamp
417    pub started_at: SystemTime,
418}
419
420/// Download progress tracking
421#[derive(Debug, Clone)]
422pub struct DownloadProgress {
423    /// Bytes downloaded
424    pub bytes_downloaded: u64,
425    /// Total bytes to download
426    pub total_bytes: u64,
427    /// Progress percentage (0.0 to 100.0)
428    pub percentage: f64,
429    /// Download speed (bytes per second)
430    pub speed_bps: f64,
431    /// Estimated time remaining
432    pub eta_seconds: Option<u64>,
433}
434
435/// Download status
436#[derive(Debug, Clone, PartialEq, Eq)]
437pub enum DownloadStatus {
438    /// Download queued
439    Queued,
440    /// Download in progress
441    InProgress,
442    /// Download completed successfully
443    Completed,
444    /// Download failed
445    Failed,
446    /// Download cancelled
447    Cancelled,
448    /// Served from cache
449    FromCache,
450}
451
452/// Retention manager for artifact lifecycle
453#[derive(Debug, Clone)]
454pub struct RetentionManager {
455    /// Retention policy
456    pub policy: ArtifactRetentionPolicy,
457    /// Cleanup scheduler
458    pub scheduler: CleanupScheduler,
459    /// Cleanup history
460    pub cleanup_history: Vec<CleanupResult>,
461}
462
463/// Cleanup scheduler
464#[derive(Debug, Clone)]
465pub struct CleanupScheduler {
466    /// Next cleanup time
467    pub next_cleanup: SystemTime,
468    /// Cleanup interval
469    pub interval: Duration,
470    /// Cleanup rules
471    pub rules: Vec<CleanupRule>,
472    /// Enabled status
473    pub enabled: bool,
474}
475
476/// Cleanup rule definition
477#[derive(Debug, Clone, Serialize, Deserialize)]
478pub struct CleanupRule {
479    /// Rule name
480    pub name: String,
481    /// Rule condition
482    pub condition: CleanupCondition,
483    /// Action to take
484    pub action: CleanupAction,
485    /// Rule priority (higher number = higher priority)
486    pub priority: u32,
487    /// Enabled status
488    pub enabled: bool,
489}
490
491/// Cleanup conditions
492#[derive(Debug, Clone, Serialize, Deserialize)]
493pub enum CleanupCondition {
494    /// Age-based condition
495    Age { days: u32 },
496    /// Size-based condition
497    Size { max_size_gb: f64 },
498    /// Count-based condition
499    Count { max_count: usize },
500    /// Tag-based condition
501    Tag { tag: String },
502    /// Status-based condition
503    Status { status: ArtifactStatus },
504    /// Custom condition
505    Custom { expression: String },
506}
507
508/// Cleanup actions
509#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
510pub enum CleanupAction {
511    /// Delete the artifact
512    Delete,
513    /// Archive the artifact
514    Archive,
515    /// Move to different storage class
516    MoveToStorageClass { class: String },
517    /// Compress the artifact
518    Compress,
519    /// Tag for review
520    TagForReview,
521    /// No action (just log)
522    Log,
523}
524
525/// Cleanup operation result
526#[derive(Debug, Clone, Serialize, Deserialize)]
527pub struct CleanupResult {
528    /// Cleanup run ID
529    pub run_id: String,
530    /// Cleanup timestamp
531    pub timestamp: SystemTime,
532    /// Artifacts processed
533    pub artifacts_processed: usize,
534    /// Artifacts deleted
535    pub artifacts_deleted: usize,
536    /// Artifacts archived
537    pub artifacts_archived: usize,
538    /// Space freed (bytes)
539    pub space_freed_bytes: u64,
540    /// Cleanup duration
541    pub duration: Duration,
542    /// Cleanup summary
543    pub summary: String,
544}
545
546/// Local filesystem storage implementation
547#[derive(Debug, Clone)]
548pub struct LocalArtifactStorage {
549    /// Base storage path
550    pub base_path: PathBuf,
551    /// Storage configuration
552    pub config: LocalStorageConfig,
553}
554
555/// Local storage configuration
556#[derive(Debug, Clone)]
557pub struct LocalStorageConfig {
558    /// Create directories if they don't exist
559    pub create_dirs: bool,
560    /// File permissions for created files
561    pub file_permissions: Option<u32>,
562    /// Directory permissions for created directories
563    pub dir_permissions: Option<u32>,
564    /// Enable symbolic links
565    pub allow_symlinks: bool,
566}
567
568impl ArtifactManager {
569    /// Create a new artifact manager
570    pub fn new(config: ArtifactStorageConfig) -> Result<Self> {
571        let storage_provider: Box<dyn ArtifactStorage> = match &config.provider {
572            ArtifactStorageProvider::Local(path) => {
573                Box::new(LocalArtifactStorage::new(path.clone()))
574            }
575            ArtifactStorageProvider::S3 {
576                bucket,
577                region,
578                prefix,
579            } => {
580                return Err(OptimError::InvalidConfig(
581                    "S3 storage not yet implemented".to_string(),
582                ));
583            }
584            ArtifactStorageProvider::GCS { bucket, prefix } => {
585                return Err(OptimError::InvalidConfig(
586                    "GCS storage not yet implemented".to_string(),
587                ));
588            }
589            ArtifactStorageProvider::AzureBlob {
590                account,
591                container,
592                prefix,
593            } => {
594                return Err(OptimError::InvalidConfig(
595                    "Azure Blob storage not yet implemented".to_string(),
596                ));
597            }
598            ArtifactStorageProvider::FTP {
599                host,
600                port,
601                path,
602                secure,
603            } => {
604                return Err(OptimError::InvalidConfig(
605                    "FTP storage not yet implemented".to_string(),
606                ));
607            }
608            ArtifactStorageProvider::HTTP { base_url, auth } => {
609                return Err(OptimError::InvalidConfig(
610                    "HTTP storage not yet implemented".to_string(),
611                ));
612            }
613        };
614
615        Ok(Self {
616            storage_provider,
617            config: config.clone(),
618            registry: ArtifactRegistry::new(),
619            upload_manager: UploadManager::new(config.upload.clone()),
620            download_manager: DownloadManager::new(config.download.clone()),
621            retention_manager: RetentionManager::new(config.retention.clone()),
622        })
623    }
624
625    /// Upload an artifact
626    pub fn upload_artifact(
627        &mut self,
628        local_path: &Path,
629        remote_key: &str,
630        tags: Vec<String>,
631    ) -> Result<String> {
632        // Validate file exists
633        if !local_path.exists() {
634            return Err(OptimError::IO(std::io::Error::new(
635                std::io::ErrorKind::NotFound,
636                format!("File not found: {:?}", local_path),
637            )));
638        }
639
640        // Create artifact metadata
641        let metadata = self.create_artifact_metadata(local_path)?;
642
643        // Create upload task
644        let task_id = self
645            .upload_manager
646            .create_upload_task(local_path, remote_key)?;
647
648        // Perform upload
649        let remote_url = self.storage_provider.upload(local_path, remote_key)?;
650
651        // Record in registry
652        let artifact_record = ArtifactRecord {
653            id: uuid::Uuid::new_v4().to_string(),
654            key: remote_key.to_string(),
655            local_path: local_path.to_path_buf(),
656            remote_key: remote_key.to_string(),
657            metadata,
658            uploaded_at: SystemTime::now(),
659            last_accessed: None,
660            tags,
661            retention_policy: "default".to_string(),
662            status: ArtifactStatus::Uploaded,
663        };
664
665        self.registry.add_artifact(artifact_record)?;
666
667        // Update upload manager
668        self.upload_manager.complete_upload(&task_id)?;
669
670        Ok(remote_url)
671    }
672
673    /// Download an artifact
674    pub fn download_artifact(&mut self, remote_key: &str, local_path: &Path) -> Result<()> {
675        // Check cache first
676        if let Some(cached_path) = self.download_manager.check_cache(remote_key)? {
677            if cached_path != local_path {
678                fs::copy(&cached_path, local_path).map_err(OptimError::IO)?;
679            }
680            return Ok(());
681        }
682
683        // Create download task
684        let task_id = self
685            .download_manager
686            .create_download_task(remote_key, local_path)?;
687
688        // Perform download
689        self.storage_provider.download(remote_key, local_path)?;
690
691        // Update cache
692        self.download_manager.update_cache(remote_key, local_path)?;
693
694        // Complete download task
695        self.download_manager.complete_download(&task_id)?;
696
697        Ok(())
698    }
699
700    /// Delete an artifact
701    pub fn delete_artifact(&mut self, remote_key: &str) -> Result<()> {
702        // Delete from storage
703        self.storage_provider.delete(remote_key)?;
704
705        // Update registry
706        if let Some(artifact) = self.registry.get_artifact_mut(remote_key) {
707            artifact.status = ArtifactStatus::Deleted;
708        }
709
710        // Remove from cache
711        self.download_manager.remove_from_cache(remote_key)?;
712
713        Ok(())
714    }
715
716    /// List artifacts
717    pub fn list_artifacts(&self, prefix: Option<&str>) -> Result<Vec<ArtifactInfo>> {
718        self.storage_provider.list(prefix)
719    }
720
721    /// Get artifact metadata
722    pub fn get_artifact_metadata(&self, remote_key: &str) -> Result<ArtifactMetadata> {
723        if let Some(artifact) = self.registry.get_artifact(remote_key) {
724            Ok(artifact.metadata.clone())
725        } else {
726            self.storage_provider.get_metadata(remote_key)
727        }
728    }
729
730    /// Run cleanup based on retention policies
731    pub fn run_cleanup(&mut self) -> Result<CleanupResult> {
732        self.retention_manager
733            .run_cleanup(&mut self.registry, &*self.storage_provider)
734    }
735
736    /// Get storage statistics
737    pub fn get_storage_statistics(&self) -> Result<StorageStatistics> {
738        self.storage_provider.get_storage_stats()
739    }
740
741    /// Create artifact metadata from file
742    fn create_artifact_metadata(&self, path: &Path) -> Result<ArtifactMetadata> {
743        let file_size = fs::metadata(path)?.len();
744        let filename = path
745            .file_name()
746            .and_then(|n| n.to_str())
747            .unwrap_or("unknown")
748            .to_string();
749
750        // Compute checksum
751        let checksum = self.compute_file_checksum(path, ChecksumAlgorithm::SHA256)?;
752
753        // Determine content type
754        let content_type = self.determine_content_type(path);
755
756        Ok(ArtifactMetadata {
757            filename,
758            size_bytes: file_size,
759            content_type,
760            checksum,
761            checksum_algorithm: ChecksumAlgorithm::SHA256,
762            compression: None,
763            encryption: None,
764            custom_metadata: HashMap::new(),
765        })
766    }
767
768    /// Compute file checksum
769    fn compute_file_checksum(&self, path: &Path, algorithm: ChecksumAlgorithm) -> Result<String> {
770        use std::io::Read;
771
772        let mut file = fs::File::open(path).map_err(OptimError::IO)?;
773
774        let mut buffer = Vec::new();
775        file.read_to_end(&mut buffer).map_err(OptimError::IO)?;
776
777        let checksum = match algorithm {
778            ChecksumAlgorithm::SHA256 => {
779                use sha2::{Digest, Sha256};
780                let mut hasher = Sha256::new();
781                hasher.update(&buffer);
782                format!("{:x}", hasher.finalize())
783            }
784            ChecksumAlgorithm::MD5 => {
785                let mut hasher = md5::Context::new();
786                hasher.consume(&buffer);
787                format!("{:x}", hasher.finalize())
788            }
789            _ => {
790                // Simplified implementation for other algorithms
791                format!("checksum_{}", buffer.len())
792            }
793        };
794
795        Ok(checksum)
796    }
797
798    /// Determine content type from file extension
799    fn determine_content_type(&self, path: &Path) -> String {
800        match path.extension().and_then(|ext| ext.to_str()) {
801            Some("json") => "application/json".to_string(),
802            Some("xml") => "application/xml".to_string(),
803            Some("html") | Some("htm") => "text/html".to_string(),
804            Some("txt") => "text/plain".to_string(),
805            Some("pdf") => "application/pdf".to_string(),
806            Some("zip") => "application/zip".to_string(),
807            Some("tar") => "application/x-tar".to_string(),
808            Some("gz") => "application/gzip".to_string(),
809            _ => "application/octet-stream".to_string(),
810        }
811    }
812}
813
814impl LocalArtifactStorage {
815    /// Create a new local storage instance
816    pub fn new(base_path: PathBuf) -> Self {
817        Self {
818            base_path,
819            config: LocalStorageConfig::default(),
820        }
821    }
822
823    /// Create a new local storage instance with configuration
824    pub fn new_with_config(base_path: PathBuf, config: LocalStorageConfig) -> Self {
825        Self { base_path, config }
826    }
827}
828
829impl ArtifactStorage for LocalArtifactStorage {
830    fn upload(&self, local_path: &Path, remote_key: &str) -> Result<String> {
831        let dest_path = self.base_path.join(remote_key);
832
833        // Create parent directories if needed
834        if let Some(parent) = dest_path.parent() {
835            if self.config.create_dirs {
836                fs::create_dir_all(parent).map_err(OptimError::IO)?;
837            }
838        }
839
840        // Copy file
841        fs::copy(local_path, &dest_path).map_err(OptimError::IO)?;
842
843        // Set permissions if configured
844        if let Some(permissions) = self.config.file_permissions {
845            #[cfg(unix)]
846            {
847                use std::os::unix::fs::PermissionsExt;
848                let mut perms = fs::metadata(&dest_path)?.permissions();
849                perms.set_mode(permissions);
850                fs::set_permissions(&dest_path, perms)?;
851            }
852        }
853
854        Ok(dest_path.to_string_lossy().to_string())
855    }
856
857    fn download(&self, remote_key: &str, local_path: &Path) -> Result<()> {
858        let source_path = self.base_path.join(remote_key);
859
860        if !source_path.exists() {
861            return Err(OptimError::IO(std::io::Error::new(
862                std::io::ErrorKind::NotFound,
863                format!("Remote file not found: {}", remote_key),
864            )));
865        }
866
867        // Create parent directories for local path
868        if let Some(parent) = local_path.parent() {
869            fs::create_dir_all(parent).map_err(OptimError::IO)?;
870        }
871
872        fs::copy(&source_path, local_path).map_err(OptimError::IO)?;
873
874        Ok(())
875    }
876
877    fn delete(&self, remote_key: &str) -> Result<()> {
878        let file_path = self.base_path.join(remote_key);
879
880        if file_path.exists() {
881            fs::remove_file(&file_path).map_err(OptimError::IO)?;
882        }
883
884        Ok(())
885    }
886
887    fn list(&self, prefix: Option<&str>) -> Result<Vec<ArtifactInfo>> {
888        let mut artifacts = Vec::new();
889        let search_path = if let Some(prefix) = prefix {
890            self.base_path.join(prefix)
891        } else {
892            self.base_path.clone()
893        };
894
895        self.collect_artifacts(&search_path, &mut artifacts, prefix)?;
896        Ok(artifacts)
897    }
898
899    fn exists(&self, remote_key: &str) -> Result<bool> {
900        let file_path = self.base_path.join(remote_key);
901        Ok(file_path.exists())
902    }
903
904    fn get_metadata(&self, remote_key: &str) -> Result<ArtifactMetadata> {
905        let file_path = self.base_path.join(remote_key);
906        let metadata = fs::metadata(&file_path).map_err(OptimError::IO)?;
907
908        let filename = file_path
909            .file_name()
910            .and_then(|n| n.to_str())
911            .unwrap_or("unknown")
912            .to_string();
913
914        Ok(ArtifactMetadata {
915            filename,
916            size_bytes: metadata.len(),
917            content_type: "application/octet-stream".to_string(), // Simplified
918            checksum: "unknown".to_string(), // Would compute in real implementation
919            checksum_algorithm: ChecksumAlgorithm::SHA256,
920            compression: None,
921            encryption: None,
922            custom_metadata: HashMap::new(),
923        })
924    }
925
926    fn get_storage_stats(&self) -> Result<StorageStatistics> {
927        let mut total_size = 0u64;
928        let mut object_count = 0usize;
929
930        // Walk directory tree to calculate statistics
931        fn walk_dir(dir: &Path, total_size: &mut u64, count: &mut usize) -> Result<()> {
932            for entry in fs::read_dir(dir)? {
933                let entry = entry?;
934                let path = entry.path();
935                if path.is_file() {
936                    *total_size += fs::metadata(&path)?.len();
937                    *count += 1;
938                } else if path.is_dir() {
939                    walk_dir(&path, total_size, count)?;
940                }
941            }
942            Ok(())
943        }
944
945        if self.base_path.exists() && self.base_path.is_dir() {
946            walk_dir(&self.base_path, &mut total_size, &mut object_count)?;
947        }
948
949        let average_object_size = if object_count > 0 {
950            total_size as f64 / object_count as f64
951        } else {
952            0.0
953        };
954
955        Ok(StorageStatistics {
956            total_size_bytes: total_size,
957            object_count,
958            available_space_bytes: None, // Could implement disk space check
959            utilization_percent: None,
960            average_object_size,
961        })
962    }
963
964    fn validate_connection(&self) -> Result<()> {
965        // For local storage, just check if base path exists and is accessible
966        if !self.base_path.exists() {
967            if self.config.create_dirs {
968                fs::create_dir_all(&self.base_path).map_err(OptimError::IO)?;
969            } else {
970                return Err(OptimError::IO(std::io::Error::new(
971                    std::io::ErrorKind::NotFound,
972                    format!("Base path does not exist: {:?}", self.base_path),
973                )));
974            }
975        }
976
977        // Test write access
978        let test_file = self.base_path.join(".write_test");
979        fs::write(&test_file, "test").map_err(OptimError::IO)?;
980        fs::remove_file(&test_file).map_err(OptimError::IO)?;
981
982        Ok(())
983    }
984}
985
986impl LocalArtifactStorage {
987    /// Recursively collect artifacts from directory
988    fn collect_artifacts(
989        &self,
990        dir: &Path,
991        artifacts: &mut Vec<ArtifactInfo>,
992        prefix: Option<&str>,
993    ) -> Result<()> {
994        if !dir.is_dir() {
995            return Ok(());
996        }
997
998        for entry in fs::read_dir(dir)? {
999            let entry = entry?;
1000            let path = entry.path();
1001
1002            if path.is_file() {
1003                let relative_path = path.strip_prefix(&self.base_path).map_err(|e| {
1004                    OptimError::IO(std::io::Error::new(
1005                        std::io::ErrorKind::InvalidInput,
1006                        format!("Failed to get relative path: {}", e),
1007                    ))
1008                })?;
1009
1010                let key = relative_path.to_string_lossy().to_string();
1011
1012                // Filter by prefix if specified
1013                if let Some(prefix) = prefix {
1014                    if !key.starts_with(prefix) {
1015                        continue;
1016                    }
1017                }
1018
1019                let metadata = fs::metadata(&path)?;
1020                let modified = metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH);
1021
1022                artifacts.push(ArtifactInfo {
1023                    key,
1024                    size: metadata.len(),
1025                    last_modified: modified,
1026                    content_type: None,
1027                    storage_class: None,
1028                    etag: None,
1029                });
1030            } else if path.is_dir() {
1031                self.collect_artifacts(&path, artifacts, prefix)?;
1032            }
1033        }
1034
1035        Ok(())
1036    }
1037}
1038
1039// Implementation of supporting structures
1040
1041impl Default for ArtifactRegistry {
1042    fn default() -> Self {
1043        Self::new()
1044    }
1045}
1046
1047impl ArtifactRegistry {
1048    /// Create a new artifact registry
1049    pub fn new() -> Self {
1050        Self {
1051            artifacts: HashMap::new(),
1052            metadata: RegistryMetadata {
1053                version: "1.0.0".to_string(),
1054                created_at: SystemTime::now(),
1055                updated_at: SystemTime::now(),
1056                total_artifacts: 0,
1057                statistics: RegistryStatistics::default(),
1058            },
1059        }
1060    }
1061
1062    /// Add an artifact to the registry
1063    pub fn add_artifact(&mut self, artifact: ArtifactRecord) -> Result<()> {
1064        self.artifacts.insert(artifact.key.clone(), artifact);
1065        self.metadata.total_artifacts = self.artifacts.len();
1066        self.metadata.updated_at = SystemTime::now();
1067        self.update_statistics();
1068        Ok(())
1069    }
1070
1071    /// Get an artifact from the registry
1072    pub fn get_artifact(&self, key: &str) -> Option<&ArtifactRecord> {
1073        self.artifacts.get(key)
1074    }
1075
1076    /// Get a mutable reference to an artifact
1077    pub fn get_artifact_mut(&mut self, key: &str) -> Option<&mut ArtifactRecord> {
1078        self.artifacts.get_mut(key)
1079    }
1080
1081    /// Remove an artifact from the registry
1082    pub fn remove_artifact(&mut self, key: &str) -> Option<ArtifactRecord> {
1083        let result = self.artifacts.remove(key);
1084        if result.is_some() {
1085            self.metadata.total_artifacts = self.artifacts.len();
1086            self.metadata.updated_at = SystemTime::now();
1087            self.update_statistics();
1088        }
1089        result
1090    }
1091
1092    /// Update registry statistics
1093    fn update_statistics(&mut self) {
1094        let mut total_size = 0u64;
1095        let mut active_count = 0;
1096        let mut failed_count = 0;
1097        let mut deleted_count = 0;
1098
1099        for artifact in self.artifacts.values() {
1100            total_size += artifact.metadata.size_bytes;
1101            match artifact.status {
1102                ArtifactStatus::Uploaded => active_count += 1,
1103                ArtifactStatus::Failed => failed_count += 1,
1104                ArtifactStatus::Deleted => deleted_count += 1,
1105                _ => {}
1106            }
1107        }
1108
1109        let average_size = if !self.artifacts.is_empty() {
1110            total_size as f64 / self.artifacts.len() as f64
1111        } else {
1112            0.0
1113        };
1114
1115        self.metadata.statistics = RegistryStatistics {
1116            total_size_bytes: total_size,
1117            active_artifacts: active_count,
1118            failed_uploads: failed_count,
1119            deleted_artifacts: deleted_count,
1120            average_size_bytes: average_size,
1121        };
1122    }
1123}
1124
1125impl UploadManager {
1126    /// Create a new upload manager
1127    pub fn new(config: ArtifactUploadConfig) -> Self {
1128        Self {
1129            config,
1130            active_uploads: HashMap::new(),
1131            upload_history: Vec::new(),
1132        }
1133    }
1134
1135    /// Create a new upload task
1136    pub fn create_upload_task(&mut self, local_path: &Path, remote_key: &str) -> Result<String> {
1137        let task_id = uuid::Uuid::new_v4().to_string();
1138        let file_size = fs::metadata(local_path)?.len();
1139
1140        let task = UploadTask {
1141            id: task_id.clone(),
1142            local_path: local_path.to_path_buf(),
1143            remote_key: remote_key.to_string(),
1144            progress: UploadProgress {
1145                bytes_uploaded: 0,
1146                total_bytes: file_size,
1147                percentage: 0.0,
1148                speed_bps: 0.0,
1149                eta_seconds: None,
1150            },
1151            status: UploadStatus::Queued,
1152            started_at: SystemTime::now(),
1153            config: UploadTaskConfig {
1154                compress: self.config.compress,
1155                compression_level: self.config.compression_level,
1156                encrypt: self.config.encrypt,
1157                chunk_size_bytes: self.config.parallel_uploads.chunk_size_mb * 1024 * 1024,
1158                retry_attempts: 3,
1159            },
1160        };
1161
1162        self.active_uploads.insert(task_id.clone(), task);
1163        Ok(task_id)
1164    }
1165
1166    /// Complete an upload task
1167    pub fn complete_upload(&mut self, task_id: &str) -> Result<()> {
1168        if let Some(mut task) = self.active_uploads.remove(task_id) {
1169            task.status = UploadStatus::Completed;
1170            let duration = SystemTime::now()
1171                .duration_since(task.started_at)
1172                .unwrap_or(Duration::from_secs(0));
1173
1174            let result = UploadResult {
1175                task_id: task_id.to_string(),
1176                remote_key: task.remote_key.clone(),
1177                status: UploadStatus::Completed,
1178                bytes_uploaded: task.progress.total_bytes,
1179                duration,
1180                average_speed_bps: if duration.as_secs() > 0 {
1181                    task.progress.total_bytes as f64 / duration.as_secs_f64()
1182                } else {
1183                    0.0
1184                },
1185                error_message: None,
1186                timestamp: SystemTime::now(),
1187            };
1188
1189            self.upload_history.push(result);
1190        }
1191
1192        Ok(())
1193    }
1194}
1195
1196impl DownloadManager {
1197    /// Create a new download manager
1198    pub fn new(config: ArtifactDownloadConfig) -> Self {
1199        let cache_dir = config
1200            .cache_directory
1201            .clone()
1202            .unwrap_or_else(|| PathBuf::from("./cache"));
1203
1204        Self {
1205            config: config.clone(),
1206            cache: DownloadCache {
1207                cache_dir,
1208                size_limit_bytes: config.cache_size_limit_mb.unwrap_or(1024) as u64 * 1024 * 1024,
1209                current_size_bytes: 0,
1210                entries: HashMap::new(),
1211                statistics: CacheStatistics::default(),
1212            },
1213            active_downloads: HashMap::new(),
1214        }
1215    }
1216
1217    /// Check if artifact is in cache
1218    pub fn check_cache(&mut self, remote_key: &str) -> Result<Option<PathBuf>> {
1219        if !self.config.enable_caching {
1220            return Ok(None);
1221        }
1222
1223        if let Some(entry) = self.cache.entries.get_mut(remote_key) {
1224            entry.last_accessed = SystemTime::now();
1225            entry.access_count += 1;
1226            self.cache.statistics.hits += 1;
1227
1228            if entry.local_path.exists() {
1229                return Ok(Some(entry.local_path.clone()));
1230            } else {
1231                // File was deleted, remove from cache
1232                self.cache.entries.remove(remote_key);
1233            }
1234        }
1235
1236        self.cache.statistics.misses += 1;
1237        Ok(None)
1238    }
1239
1240    /// Update cache with downloaded file
1241    pub fn update_cache(&mut self, remote_key: &str, local_path: &Path) -> Result<()> {
1242        if !self.config.enable_caching {
1243            return Ok(());
1244        }
1245
1246        let file_size = fs::metadata(local_path)?.len();
1247
1248        // Check if we need to evict entries
1249        while self.cache.current_size_bytes + file_size > self.cache.size_limit_bytes {
1250            self.evict_cache_entry()?;
1251        }
1252
1253        // Add to cache
1254        let cache_path = self.cache.cache_dir.join(remote_key);
1255        if let Some(parent) = cache_path.parent() {
1256            fs::create_dir_all(parent)?;
1257        }
1258
1259        fs::copy(local_path, &cache_path)?;
1260
1261        let entry = CacheEntry {
1262            key: remote_key.to_string(),
1263            local_path: cache_path,
1264            size_bytes: file_size,
1265            created_at: SystemTime::now(),
1266            last_accessed: SystemTime::now(),
1267            access_count: 1,
1268            hit_score: 1.0,
1269        };
1270
1271        self.cache.entries.insert(remote_key.to_string(), entry);
1272        self.cache.current_size_bytes += file_size;
1273
1274        Ok(())
1275    }
1276
1277    /// Remove artifact from cache
1278    pub fn remove_from_cache(&mut self, remote_key: &str) -> Result<()> {
1279        if let Some(entry) = self.cache.entries.remove(remote_key) {
1280            if entry.local_path.exists() {
1281                fs::remove_file(&entry.local_path)?;
1282            }
1283            self.cache.current_size_bytes -= entry.size_bytes;
1284        }
1285        Ok(())
1286    }
1287
1288    /// Create a download task
1289    pub fn create_download_task(&mut self, remote_key: &str, local_path: &Path) -> Result<String> {
1290        let task_id = uuid::Uuid::new_v4().to_string();
1291
1292        let task = DownloadTask {
1293            id: task_id.clone(),
1294            remote_key: remote_key.to_string(),
1295            local_path: local_path.to_path_buf(),
1296            progress: DownloadProgress {
1297                bytes_downloaded: 0,
1298                total_bytes: 0, // Will be updated when download starts
1299                percentage: 0.0,
1300                speed_bps: 0.0,
1301                eta_seconds: None,
1302            },
1303            status: DownloadStatus::Queued,
1304            started_at: SystemTime::now(),
1305        };
1306
1307        self.active_downloads.insert(task_id.clone(), task);
1308        Ok(task_id)
1309    }
1310
1311    /// Complete a download task
1312    pub fn complete_download(&mut self, task_id: &str) -> Result<()> {
1313        if let Some(mut task) = self.active_downloads.remove(task_id) {
1314            task.status = DownloadStatus::Completed;
1315        }
1316        Ok(())
1317    }
1318
1319    /// Evict least recently used cache entry
1320    fn evict_cache_entry(&mut self) -> Result<()> {
1321        let mut oldest_key: Option<String> = None;
1322        let mut oldest_time = SystemTime::now();
1323
1324        for (key, entry) in &self.cache.entries {
1325            if entry.last_accessed < oldest_time {
1326                oldest_time = entry.last_accessed;
1327                oldest_key = Some(key.clone());
1328            }
1329        }
1330
1331        if let Some(key) = oldest_key {
1332            self.remove_from_cache(&key)?;
1333            self.cache.statistics.evictions += 1;
1334        }
1335
1336        Ok(())
1337    }
1338}
1339
1340impl RetentionManager {
1341    /// Create a new retention manager
1342    pub fn new(policy: ArtifactRetentionPolicy) -> Self {
1343        Self {
1344            policy,
1345            scheduler: CleanupScheduler::new(),
1346            cleanup_history: Vec::new(),
1347        }
1348    }
1349
1350    /// Run cleanup operation
1351    pub fn run_cleanup(
1352        &mut self,
1353        registry: &mut ArtifactRegistry,
1354        storage: &dyn ArtifactStorage,
1355    ) -> Result<CleanupResult> {
1356        let run_id = uuid::Uuid::new_v4().to_string();
1357        let start_time = SystemTime::now();
1358
1359        let mut artifacts_processed = 0;
1360        let mut artifacts_deleted = 0;
1361        let mut artifacts_archived = 0;
1362        let mut space_freed = 0u64;
1363
1364        // Apply cleanup rules
1365        for rule in &self.scheduler.rules {
1366            if !rule.enabled {
1367                continue;
1368            }
1369
1370            let artifacts_to_process: Vec<String> = registry
1371                .artifacts
1372                .keys()
1373                .filter(|key| {
1374                    self.should_apply_rule(
1375                        registry.artifacts.get(*key).expect("unwrap failed"),
1376                        rule,
1377                    )
1378                })
1379                .cloned()
1380                .collect();
1381
1382            for artifact_key in artifacts_to_process {
1383                artifacts_processed += 1;
1384
1385                match rule.action {
1386                    CleanupAction::Delete => {
1387                        if let Some(artifact) = registry.get_artifact(&artifact_key) {
1388                            space_freed += artifact.metadata.size_bytes;
1389                            storage.delete(&artifact_key)?;
1390                            registry.remove_artifact(&artifact_key);
1391                            artifacts_deleted += 1;
1392                        }
1393                    }
1394                    CleanupAction::Archive => {
1395                        if let Some(artifact) = registry.get_artifact_mut(&artifact_key) {
1396                            artifact.status = ArtifactStatus::Archived;
1397                            artifacts_archived += 1;
1398                        }
1399                    }
1400                    _ => {
1401                        // Other actions not implemented in this simplified version
1402                    }
1403                }
1404            }
1405        }
1406
1407        let duration = SystemTime::now()
1408            .duration_since(start_time)
1409            .unwrap_or(Duration::from_secs(0));
1410
1411        let result = CleanupResult {
1412            run_id,
1413            timestamp: start_time,
1414            artifacts_processed,
1415            artifacts_deleted,
1416            artifacts_archived,
1417            space_freed_bytes: space_freed,
1418            duration,
1419            summary: format!(
1420                "Processed {} artifacts, deleted {}, archived {}, freed {} bytes",
1421                artifacts_processed, artifacts_deleted, artifacts_archived, space_freed
1422            ),
1423        };
1424
1425        self.cleanup_history.push(result.clone());
1426        Ok(result)
1427    }
1428
1429    /// Check if cleanup rule should be applied to artifact
1430    fn should_apply_rule(&self, artifact: &ArtifactRecord, rule: &CleanupRule) -> bool {
1431        match &rule.condition {
1432            CleanupCondition::Age { days } => {
1433                if let Ok(duration) = SystemTime::now().duration_since(artifact.uploaded_at) {
1434                    duration.as_secs() > (*days as u64 * 24 * 3600)
1435                } else {
1436                    false
1437                }
1438            }
1439            CleanupCondition::Status { status } => artifact.status == *status,
1440            CleanupCondition::Tag { tag } => artifact.tags.contains(tag),
1441            _ => false, // Other conditions not implemented in this simplified version
1442        }
1443    }
1444}
1445
1446impl Default for CleanupScheduler {
1447    fn default() -> Self {
1448        Self::new()
1449    }
1450}
1451
1452impl CleanupScheduler {
1453    /// Create a new cleanup scheduler
1454    pub fn new() -> Self {
1455        Self {
1456            next_cleanup: SystemTime::now() + Duration::from_secs(24 * 3600), // Tomorrow
1457            interval: Duration::from_secs(24 * 3600),                         // Daily
1458            rules: Vec::new(),
1459            enabled: true,
1460        }
1461    }
1462}
1463
1464// Default implementations
1465
1466impl Default for LocalStorageConfig {
1467    fn default() -> Self {
1468        Self {
1469            create_dirs: true,
1470            file_permissions: Some(0o644),
1471            dir_permissions: Some(0o755),
1472            allow_symlinks: false,
1473        }
1474    }
1475}
1476
1477impl Default for RegistryStatistics {
1478    fn default() -> Self {
1479        Self {
1480            total_size_bytes: 0,
1481            active_artifacts: 0,
1482            failed_uploads: 0,
1483            deleted_artifacts: 0,
1484            average_size_bytes: 0.0,
1485        }
1486    }
1487}
1488
1489impl Default for CacheStatistics {
1490    fn default() -> Self {
1491        Self {
1492            hits: 0,
1493            misses: 0,
1494            hit_ratio: 0.0,
1495            evictions: 0,
1496            average_file_size: 0.0,
1497        }
1498    }
1499}
1500
1501#[cfg(test)]
1502mod tests {
1503    use super::*;
1504    use tempfile::TempDir;
1505
1506    #[test]
1507    fn test_local_storage_creation() {
1508        let temp_dir = TempDir::new().expect("unwrap failed");
1509        let storage = LocalArtifactStorage::new(temp_dir.path().to_path_buf());
1510        assert!(storage.validate_connection().is_ok());
1511    }
1512
1513    #[test]
1514    fn test_artifact_registry() {
1515        let mut registry = ArtifactRegistry::new();
1516        assert_eq!(registry.artifacts.len(), 0);
1517
1518        let artifact = ArtifactRecord {
1519            id: "test-id".to_string(),
1520            key: "test-key".to_string(),
1521            local_path: PathBuf::from("/tmp/test"),
1522            remote_key: "remote/test".to_string(),
1523            metadata: ArtifactMetadata {
1524                filename: "test.txt".to_string(),
1525                size_bytes: 100,
1526                content_type: "text/plain".to_string(),
1527                checksum: "abc123".to_string(),
1528                checksum_algorithm: ChecksumAlgorithm::SHA256,
1529                compression: None,
1530                encryption: None,
1531                custom_metadata: HashMap::new(),
1532            },
1533            uploaded_at: SystemTime::now(),
1534            last_accessed: None,
1535            tags: vec!["test".to_string()],
1536            retention_policy: "default".to_string(),
1537            status: ArtifactStatus::Uploaded,
1538        };
1539
1540        registry.add_artifact(artifact).expect("unwrap failed");
1541        assert_eq!(registry.artifacts.len(), 1);
1542        assert!(registry.get_artifact("test-key").is_some());
1543    }
1544
1545    #[test]
1546    fn test_upload_manager() {
1547        let config = ArtifactUploadConfig {
1548            compress: false,
1549            compression_level: 6,
1550            encrypt: false,
1551            timeout_sec: 300,
1552            max_file_size_mb: 1024,
1553            parallel_uploads: ParallelUploadConfig {
1554                enabled: false,
1555                max_concurrent: 1,
1556                chunk_size_mb: 100,
1557            },
1558        };
1559
1560        let manager = UploadManager::new(config);
1561        assert_eq!(manager.active_uploads.len(), 0);
1562    }
1563
1564    #[test]
1565    fn test_checksum_algorithms() {
1566        assert_ne!(ChecksumAlgorithm::SHA256, ChecksumAlgorithm::MD5);
1567        assert_eq!(ChecksumAlgorithm::SHA256, ChecksumAlgorithm::SHA256);
1568    }
1569
1570    #[test]
1571    fn test_artifact_status() {
1572        assert_eq!(ArtifactStatus::Pending, ArtifactStatus::Pending);
1573        assert_ne!(ArtifactStatus::Pending, ArtifactStatus::Uploaded);
1574    }
1575}