1use crate::error::{OptimError, Result};
8use serde::{Deserialize, Serialize};
9use std::collections::HashMap;
10use std::fs;
11use std::path::{Path, PathBuf};
12use std::time::{Duration, SystemTime};
13
14use super::config::{
15 ArtifactDownloadConfig, ArtifactRetentionPolicy, ArtifactStorageConfig,
16 ArtifactStorageProvider, ArtifactUploadConfig, CompressionAlgorithm, EncryptionConfig,
17 ParallelUploadConfig,
18};
19
20#[derive(Debug)]
22pub struct ArtifactManager {
23 pub storage_provider: Box<dyn ArtifactStorage>,
25 pub config: ArtifactStorageConfig,
27 pub registry: ArtifactRegistry,
29 pub upload_manager: UploadManager,
31 pub download_manager: DownloadManager,
33 pub retention_manager: RetentionManager,
35}
36
37pub trait ArtifactStorage: std::fmt::Debug + Send + Sync {
39 fn upload(&self, local_path: &Path, remote_key: &str) -> Result<String>;
41
42 fn download(&self, remote_key: &str, local_path: &Path) -> Result<()>;
44
45 fn delete(&self, remote_key: &str) -> Result<()>;
47
48 fn list(&self, prefix: Option<&str>) -> Result<Vec<ArtifactInfo>>;
50
51 fn exists(&self, remote_key: &str) -> Result<bool>;
53
54 fn get_metadata(&self, remote_key: &str) -> Result<ArtifactMetadata>;
56
57 fn get_storage_stats(&self) -> Result<StorageStatistics>;
59
60 fn validate_connection(&self) -> Result<()>;
62}
63
64#[derive(Debug, Clone)]
66pub struct ArtifactRegistry {
67 pub artifacts: HashMap<String, ArtifactRecord>,
69 pub metadata: RegistryMetadata,
71}
72
73#[derive(Debug, Clone, Serialize, Deserialize)]
75pub struct ArtifactRecord {
76 pub id: String,
78 pub key: String,
80 pub local_path: PathBuf,
82 pub remote_key: String,
84 pub metadata: ArtifactMetadata,
86 pub uploaded_at: SystemTime,
88 pub last_accessed: Option<SystemTime>,
90 pub tags: Vec<String>,
92 pub retention_policy: String,
94 pub status: ArtifactStatus,
96}
97
98#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
100pub enum ArtifactStatus {
101 Pending,
103 Uploading,
105 Uploaded,
107 Failed,
109 MarkedForDeletion,
111 Deleted,
113 Archived,
115}
116
117#[derive(Debug, Clone, Serialize, Deserialize)]
119pub struct RegistryMetadata {
120 pub version: String,
122 pub created_at: SystemTime,
124 pub updated_at: SystemTime,
126 pub total_artifacts: usize,
128 pub statistics: RegistryStatistics,
130}
131
132#[derive(Debug, Clone, Serialize, Deserialize)]
134pub struct RegistryStatistics {
135 pub total_size_bytes: u64,
137 pub active_artifacts: usize,
139 pub failed_uploads: usize,
141 pub deleted_artifacts: usize,
143 pub average_size_bytes: f64,
145}
146
147#[derive(Debug, Clone, Serialize, Deserialize)]
149pub struct ArtifactInfo {
150 pub key: String,
152 pub size: u64,
154 pub last_modified: SystemTime,
156 pub content_type: Option<String>,
158 pub storage_class: Option<String>,
160 pub etag: Option<String>,
162}
163
164#[derive(Debug, Clone, Serialize, Deserialize)]
166pub struct ArtifactMetadata {
167 pub filename: String,
169 pub size_bytes: u64,
171 pub content_type: String,
173 pub checksum: String,
175 pub checksum_algorithm: ChecksumAlgorithm,
177 pub compression: Option<CompressionInfo>,
179 pub encryption: Option<EncryptionInfo>,
181 pub custom_metadata: HashMap<String, String>,
183}
184
185#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
187pub enum ChecksumAlgorithm {
188 MD5,
190 SHA1,
192 SHA256,
194 SHA512,
196 CRC32,
198 Blake2b,
200}
201
202#[derive(Debug, Clone, Serialize, Deserialize)]
204pub struct CompressionInfo {
205 pub algorithm: CompressionAlgorithm,
207 pub level: u8,
209 pub original_size: u64,
211 pub compressed_size: u64,
213 pub compression_ratio: f64,
215}
216
217#[derive(Debug, Clone, Serialize, Deserialize)]
219pub struct EncryptionInfo {
220 pub algorithm: String,
222 pub key_id: String,
224 pub iv: Option<String>,
226 pub metadata: HashMap<String, String>,
228}
229
230#[derive(Debug, Clone, Serialize, Deserialize)]
232pub struct StorageStatistics {
233 pub total_size_bytes: u64,
235 pub object_count: usize,
237 pub available_space_bytes: Option<u64>,
239 pub utilization_percent: Option<f64>,
241 pub average_object_size: f64,
243}
244
245#[derive(Debug, Clone)]
247pub struct UploadManager {
248 pub config: ArtifactUploadConfig,
250 pub active_uploads: HashMap<String, UploadTask>,
252 pub upload_history: Vec<UploadResult>,
254}
255
256#[derive(Debug, Clone)]
258pub struct UploadTask {
259 pub id: String,
261 pub local_path: PathBuf,
263 pub remote_key: String,
265 pub progress: UploadProgress,
267 pub status: UploadStatus,
269 pub started_at: SystemTime,
271 pub config: UploadTaskConfig,
273}
274
275#[derive(Debug, Clone)]
277pub struct UploadProgress {
278 pub bytes_uploaded: u64,
280 pub total_bytes: u64,
282 pub percentage: f64,
284 pub speed_bps: f64,
286 pub eta_seconds: Option<u64>,
288}
289
290#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
292pub enum UploadStatus {
293 Queued,
295 InProgress,
297 Completed,
299 Failed,
301 Cancelled,
303 Paused,
305}
306
307#[derive(Debug, Clone)]
309pub struct UploadTaskConfig {
310 pub compress: bool,
312 pub compression_level: u8,
314 pub encrypt: bool,
316 pub chunk_size_bytes: usize,
318 pub retry_attempts: u32,
320}
321
322#[derive(Debug, Clone, Serialize, Deserialize)]
324pub struct UploadResult {
325 pub task_id: String,
327 pub remote_key: String,
329 pub status: UploadStatus,
331 pub bytes_uploaded: u64,
333 pub duration: Duration,
335 pub average_speed_bps: f64,
337 pub error_message: Option<String>,
339 pub timestamp: SystemTime,
341}
342
343#[derive(Debug, Clone)]
345pub struct DownloadManager {
346 pub config: ArtifactDownloadConfig,
348 pub cache: DownloadCache,
350 pub active_downloads: HashMap<String, DownloadTask>,
352}
353
354#[derive(Debug, Clone)]
356pub struct DownloadCache {
357 pub cache_dir: PathBuf,
359 pub size_limit_bytes: u64,
361 pub current_size_bytes: u64,
363 pub entries: HashMap<String, CacheEntry>,
365 pub statistics: CacheStatistics,
367}
368
369#[derive(Debug, Clone, Serialize, Deserialize)]
371pub struct CacheEntry {
372 pub key: String,
374 pub local_path: PathBuf,
376 pub size_bytes: u64,
378 pub created_at: SystemTime,
380 pub last_accessed: SystemTime,
382 pub access_count: u64,
384 pub hit_score: f64,
386}
387
388#[derive(Debug, Clone, Serialize, Deserialize)]
390pub struct CacheStatistics {
391 pub hits: u64,
393 pub misses: u64,
395 pub hit_ratio: f64,
397 pub evictions: u64,
399 pub average_file_size: f64,
401}
402
403#[derive(Debug, Clone)]
405pub struct DownloadTask {
406 pub id: String,
408 pub remote_key: String,
410 pub local_path: PathBuf,
412 pub progress: DownloadProgress,
414 pub status: DownloadStatus,
416 pub started_at: SystemTime,
418}
419
420#[derive(Debug, Clone)]
422pub struct DownloadProgress {
423 pub bytes_downloaded: u64,
425 pub total_bytes: u64,
427 pub percentage: f64,
429 pub speed_bps: f64,
431 pub eta_seconds: Option<u64>,
433}
434
435#[derive(Debug, Clone, PartialEq, Eq)]
437pub enum DownloadStatus {
438 Queued,
440 InProgress,
442 Completed,
444 Failed,
446 Cancelled,
448 FromCache,
450}
451
452#[derive(Debug, Clone)]
454pub struct RetentionManager {
455 pub policy: ArtifactRetentionPolicy,
457 pub scheduler: CleanupScheduler,
459 pub cleanup_history: Vec<CleanupResult>,
461}
462
463#[derive(Debug, Clone)]
465pub struct CleanupScheduler {
466 pub next_cleanup: SystemTime,
468 pub interval: Duration,
470 pub rules: Vec<CleanupRule>,
472 pub enabled: bool,
474}
475
476#[derive(Debug, Clone, Serialize, Deserialize)]
478pub struct CleanupRule {
479 pub name: String,
481 pub condition: CleanupCondition,
483 pub action: CleanupAction,
485 pub priority: u32,
487 pub enabled: bool,
489}
490
491#[derive(Debug, Clone, Serialize, Deserialize)]
493pub enum CleanupCondition {
494 Age { days: u32 },
496 Size { max_size_gb: f64 },
498 Count { max_count: usize },
500 Tag { tag: String },
502 Status { status: ArtifactStatus },
504 Custom { expression: String },
506}
507
508#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
510pub enum CleanupAction {
511 Delete,
513 Archive,
515 MoveToStorageClass { class: String },
517 Compress,
519 TagForReview,
521 Log,
523}
524
525#[derive(Debug, Clone, Serialize, Deserialize)]
527pub struct CleanupResult {
528 pub run_id: String,
530 pub timestamp: SystemTime,
532 pub artifacts_processed: usize,
534 pub artifacts_deleted: usize,
536 pub artifacts_archived: usize,
538 pub space_freed_bytes: u64,
540 pub duration: Duration,
542 pub summary: String,
544}
545
546#[derive(Debug, Clone)]
548pub struct LocalArtifactStorage {
549 pub base_path: PathBuf,
551 pub config: LocalStorageConfig,
553}
554
555#[derive(Debug, Clone)]
557pub struct LocalStorageConfig {
558 pub create_dirs: bool,
560 pub file_permissions: Option<u32>,
562 pub dir_permissions: Option<u32>,
564 pub allow_symlinks: bool,
566}
567
568impl ArtifactManager {
569 pub fn new(config: ArtifactStorageConfig) -> Result<Self> {
571 let storage_provider: Box<dyn ArtifactStorage> = match &config.provider {
572 ArtifactStorageProvider::Local(path) => {
573 Box::new(LocalArtifactStorage::new(path.clone()))
574 }
575 ArtifactStorageProvider::S3 {
576 bucket,
577 region,
578 prefix,
579 } => {
580 return Err(OptimError::InvalidConfig(
581 "S3 storage not yet implemented".to_string(),
582 ));
583 }
584 ArtifactStorageProvider::GCS { bucket, prefix } => {
585 return Err(OptimError::InvalidConfig(
586 "GCS storage not yet implemented".to_string(),
587 ));
588 }
589 ArtifactStorageProvider::AzureBlob {
590 account,
591 container,
592 prefix,
593 } => {
594 return Err(OptimError::InvalidConfig(
595 "Azure Blob storage not yet implemented".to_string(),
596 ));
597 }
598 ArtifactStorageProvider::FTP {
599 host,
600 port,
601 path,
602 secure,
603 } => {
604 return Err(OptimError::InvalidConfig(
605 "FTP storage not yet implemented".to_string(),
606 ));
607 }
608 ArtifactStorageProvider::HTTP { base_url, auth } => {
609 return Err(OptimError::InvalidConfig(
610 "HTTP storage not yet implemented".to_string(),
611 ));
612 }
613 };
614
615 Ok(Self {
616 storage_provider,
617 config: config.clone(),
618 registry: ArtifactRegistry::new(),
619 upload_manager: UploadManager::new(config.upload.clone()),
620 download_manager: DownloadManager::new(config.download.clone()),
621 retention_manager: RetentionManager::new(config.retention.clone()),
622 })
623 }
624
625 pub fn upload_artifact(
627 &mut self,
628 local_path: &Path,
629 remote_key: &str,
630 tags: Vec<String>,
631 ) -> Result<String> {
632 if !local_path.exists() {
634 return Err(OptimError::IO(std::io::Error::new(
635 std::io::ErrorKind::NotFound,
636 format!("File not found: {:?}", local_path),
637 )));
638 }
639
640 let metadata = self.create_artifact_metadata(local_path)?;
642
643 let task_id = self
645 .upload_manager
646 .create_upload_task(local_path, remote_key)?;
647
648 let remote_url = self.storage_provider.upload(local_path, remote_key)?;
650
651 let artifact_record = ArtifactRecord {
653 id: uuid::Uuid::new_v4().to_string(),
654 key: remote_key.to_string(),
655 local_path: local_path.to_path_buf(),
656 remote_key: remote_key.to_string(),
657 metadata,
658 uploaded_at: SystemTime::now(),
659 last_accessed: None,
660 tags,
661 retention_policy: "default".to_string(),
662 status: ArtifactStatus::Uploaded,
663 };
664
665 self.registry.add_artifact(artifact_record)?;
666
667 self.upload_manager.complete_upload(&task_id)?;
669
670 Ok(remote_url)
671 }
672
673 pub fn download_artifact(&mut self, remote_key: &str, local_path: &Path) -> Result<()> {
675 if let Some(cached_path) = self.download_manager.check_cache(remote_key)? {
677 if cached_path != local_path {
678 fs::copy(&cached_path, local_path).map_err(OptimError::IO)?;
679 }
680 return Ok(());
681 }
682
683 let task_id = self
685 .download_manager
686 .create_download_task(remote_key, local_path)?;
687
688 self.storage_provider.download(remote_key, local_path)?;
690
691 self.download_manager.update_cache(remote_key, local_path)?;
693
694 self.download_manager.complete_download(&task_id)?;
696
697 Ok(())
698 }
699
700 pub fn delete_artifact(&mut self, remote_key: &str) -> Result<()> {
702 self.storage_provider.delete(remote_key)?;
704
705 if let Some(artifact) = self.registry.get_artifact_mut(remote_key) {
707 artifact.status = ArtifactStatus::Deleted;
708 }
709
710 self.download_manager.remove_from_cache(remote_key)?;
712
713 Ok(())
714 }
715
716 pub fn list_artifacts(&self, prefix: Option<&str>) -> Result<Vec<ArtifactInfo>> {
718 self.storage_provider.list(prefix)
719 }
720
721 pub fn get_artifact_metadata(&self, remote_key: &str) -> Result<ArtifactMetadata> {
723 if let Some(artifact) = self.registry.get_artifact(remote_key) {
724 Ok(artifact.metadata.clone())
725 } else {
726 self.storage_provider.get_metadata(remote_key)
727 }
728 }
729
730 pub fn run_cleanup(&mut self) -> Result<CleanupResult> {
732 self.retention_manager
733 .run_cleanup(&mut self.registry, &*self.storage_provider)
734 }
735
736 pub fn get_storage_statistics(&self) -> Result<StorageStatistics> {
738 self.storage_provider.get_storage_stats()
739 }
740
741 fn create_artifact_metadata(&self, path: &Path) -> Result<ArtifactMetadata> {
743 let file_size = fs::metadata(path)?.len();
744 let filename = path
745 .file_name()
746 .and_then(|n| n.to_str())
747 .unwrap_or("unknown")
748 .to_string();
749
750 let checksum = self.compute_file_checksum(path, ChecksumAlgorithm::SHA256)?;
752
753 let content_type = self.determine_content_type(path);
755
756 Ok(ArtifactMetadata {
757 filename,
758 size_bytes: file_size,
759 content_type,
760 checksum,
761 checksum_algorithm: ChecksumAlgorithm::SHA256,
762 compression: None,
763 encryption: None,
764 custom_metadata: HashMap::new(),
765 })
766 }
767
768 fn compute_file_checksum(&self, path: &Path, algorithm: ChecksumAlgorithm) -> Result<String> {
770 use std::io::Read;
771
772 let mut file = fs::File::open(path).map_err(OptimError::IO)?;
773
774 let mut buffer = Vec::new();
775 file.read_to_end(&mut buffer).map_err(OptimError::IO)?;
776
777 let checksum = match algorithm {
778 ChecksumAlgorithm::SHA256 => {
779 use sha2::{Digest, Sha256};
780 let mut hasher = Sha256::new();
781 hasher.update(&buffer);
782 format!("{:x}", hasher.finalize())
783 }
784 ChecksumAlgorithm::MD5 => {
785 let mut hasher = md5::Context::new();
786 hasher.consume(&buffer);
787 format!("{:x}", hasher.finalize())
788 }
789 _ => {
790 format!("checksum_{}", buffer.len())
792 }
793 };
794
795 Ok(checksum)
796 }
797
798 fn determine_content_type(&self, path: &Path) -> String {
800 match path.extension().and_then(|ext| ext.to_str()) {
801 Some("json") => "application/json".to_string(),
802 Some("xml") => "application/xml".to_string(),
803 Some("html") | Some("htm") => "text/html".to_string(),
804 Some("txt") => "text/plain".to_string(),
805 Some("pdf") => "application/pdf".to_string(),
806 Some("zip") => "application/zip".to_string(),
807 Some("tar") => "application/x-tar".to_string(),
808 Some("gz") => "application/gzip".to_string(),
809 _ => "application/octet-stream".to_string(),
810 }
811 }
812}
813
814impl LocalArtifactStorage {
815 pub fn new(base_path: PathBuf) -> Self {
817 Self {
818 base_path,
819 config: LocalStorageConfig::default(),
820 }
821 }
822
823 pub fn new_with_config(base_path: PathBuf, config: LocalStorageConfig) -> Self {
825 Self { base_path, config }
826 }
827}
828
829impl ArtifactStorage for LocalArtifactStorage {
830 fn upload(&self, local_path: &Path, remote_key: &str) -> Result<String> {
831 let dest_path = self.base_path.join(remote_key);
832
833 if let Some(parent) = dest_path.parent() {
835 if self.config.create_dirs {
836 fs::create_dir_all(parent).map_err(OptimError::IO)?;
837 }
838 }
839
840 fs::copy(local_path, &dest_path).map_err(OptimError::IO)?;
842
843 if let Some(permissions) = self.config.file_permissions {
845 #[cfg(unix)]
846 {
847 use std::os::unix::fs::PermissionsExt;
848 let mut perms = fs::metadata(&dest_path)?.permissions();
849 perms.set_mode(permissions);
850 fs::set_permissions(&dest_path, perms)?;
851 }
852 }
853
854 Ok(dest_path.to_string_lossy().to_string())
855 }
856
857 fn download(&self, remote_key: &str, local_path: &Path) -> Result<()> {
858 let source_path = self.base_path.join(remote_key);
859
860 if !source_path.exists() {
861 return Err(OptimError::IO(std::io::Error::new(
862 std::io::ErrorKind::NotFound,
863 format!("Remote file not found: {}", remote_key),
864 )));
865 }
866
867 if let Some(parent) = local_path.parent() {
869 fs::create_dir_all(parent).map_err(OptimError::IO)?;
870 }
871
872 fs::copy(&source_path, local_path).map_err(OptimError::IO)?;
873
874 Ok(())
875 }
876
877 fn delete(&self, remote_key: &str) -> Result<()> {
878 let file_path = self.base_path.join(remote_key);
879
880 if file_path.exists() {
881 fs::remove_file(&file_path).map_err(OptimError::IO)?;
882 }
883
884 Ok(())
885 }
886
887 fn list(&self, prefix: Option<&str>) -> Result<Vec<ArtifactInfo>> {
888 let mut artifacts = Vec::new();
889 let search_path = if let Some(prefix) = prefix {
890 self.base_path.join(prefix)
891 } else {
892 self.base_path.clone()
893 };
894
895 self.collect_artifacts(&search_path, &mut artifacts, prefix)?;
896 Ok(artifacts)
897 }
898
899 fn exists(&self, remote_key: &str) -> Result<bool> {
900 let file_path = self.base_path.join(remote_key);
901 Ok(file_path.exists())
902 }
903
904 fn get_metadata(&self, remote_key: &str) -> Result<ArtifactMetadata> {
905 let file_path = self.base_path.join(remote_key);
906 let metadata = fs::metadata(&file_path).map_err(OptimError::IO)?;
907
908 let filename = file_path
909 .file_name()
910 .and_then(|n| n.to_str())
911 .unwrap_or("unknown")
912 .to_string();
913
914 Ok(ArtifactMetadata {
915 filename,
916 size_bytes: metadata.len(),
917 content_type: "application/octet-stream".to_string(), checksum: "unknown".to_string(), checksum_algorithm: ChecksumAlgorithm::SHA256,
920 compression: None,
921 encryption: None,
922 custom_metadata: HashMap::new(),
923 })
924 }
925
926 fn get_storage_stats(&self) -> Result<StorageStatistics> {
927 let mut total_size = 0u64;
928 let mut object_count = 0usize;
929
930 fn walk_dir(dir: &Path, total_size: &mut u64, count: &mut usize) -> Result<()> {
932 for entry in fs::read_dir(dir)? {
933 let entry = entry?;
934 let path = entry.path();
935 if path.is_file() {
936 *total_size += fs::metadata(&path)?.len();
937 *count += 1;
938 } else if path.is_dir() {
939 walk_dir(&path, total_size, count)?;
940 }
941 }
942 Ok(())
943 }
944
945 if self.base_path.exists() && self.base_path.is_dir() {
946 walk_dir(&self.base_path, &mut total_size, &mut object_count)?;
947 }
948
949 let average_object_size = if object_count > 0 {
950 total_size as f64 / object_count as f64
951 } else {
952 0.0
953 };
954
955 Ok(StorageStatistics {
956 total_size_bytes: total_size,
957 object_count,
958 available_space_bytes: None, utilization_percent: None,
960 average_object_size,
961 })
962 }
963
964 fn validate_connection(&self) -> Result<()> {
965 if !self.base_path.exists() {
967 if self.config.create_dirs {
968 fs::create_dir_all(&self.base_path).map_err(OptimError::IO)?;
969 } else {
970 return Err(OptimError::IO(std::io::Error::new(
971 std::io::ErrorKind::NotFound,
972 format!("Base path does not exist: {:?}", self.base_path),
973 )));
974 }
975 }
976
977 let test_file = self.base_path.join(".write_test");
979 fs::write(&test_file, "test").map_err(OptimError::IO)?;
980 fs::remove_file(&test_file).map_err(OptimError::IO)?;
981
982 Ok(())
983 }
984}
985
986impl LocalArtifactStorage {
987 fn collect_artifacts(
989 &self,
990 dir: &Path,
991 artifacts: &mut Vec<ArtifactInfo>,
992 prefix: Option<&str>,
993 ) -> Result<()> {
994 if !dir.is_dir() {
995 return Ok(());
996 }
997
998 for entry in fs::read_dir(dir)? {
999 let entry = entry?;
1000 let path = entry.path();
1001
1002 if path.is_file() {
1003 let relative_path = path.strip_prefix(&self.base_path).map_err(|e| {
1004 OptimError::IO(std::io::Error::new(
1005 std::io::ErrorKind::InvalidInput,
1006 format!("Failed to get relative path: {}", e),
1007 ))
1008 })?;
1009
1010 let key = relative_path.to_string_lossy().to_string();
1011
1012 if let Some(prefix) = prefix {
1014 if !key.starts_with(prefix) {
1015 continue;
1016 }
1017 }
1018
1019 let metadata = fs::metadata(&path)?;
1020 let modified = metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH);
1021
1022 artifacts.push(ArtifactInfo {
1023 key,
1024 size: metadata.len(),
1025 last_modified: modified,
1026 content_type: None,
1027 storage_class: None,
1028 etag: None,
1029 });
1030 } else if path.is_dir() {
1031 self.collect_artifacts(&path, artifacts, prefix)?;
1032 }
1033 }
1034
1035 Ok(())
1036 }
1037}
1038
1039impl Default for ArtifactRegistry {
1042 fn default() -> Self {
1043 Self::new()
1044 }
1045}
1046
1047impl ArtifactRegistry {
1048 pub fn new() -> Self {
1050 Self {
1051 artifacts: HashMap::new(),
1052 metadata: RegistryMetadata {
1053 version: "1.0.0".to_string(),
1054 created_at: SystemTime::now(),
1055 updated_at: SystemTime::now(),
1056 total_artifacts: 0,
1057 statistics: RegistryStatistics::default(),
1058 },
1059 }
1060 }
1061
1062 pub fn add_artifact(&mut self, artifact: ArtifactRecord) -> Result<()> {
1064 self.artifacts.insert(artifact.key.clone(), artifact);
1065 self.metadata.total_artifacts = self.artifacts.len();
1066 self.metadata.updated_at = SystemTime::now();
1067 self.update_statistics();
1068 Ok(())
1069 }
1070
1071 pub fn get_artifact(&self, key: &str) -> Option<&ArtifactRecord> {
1073 self.artifacts.get(key)
1074 }
1075
1076 pub fn get_artifact_mut(&mut self, key: &str) -> Option<&mut ArtifactRecord> {
1078 self.artifacts.get_mut(key)
1079 }
1080
1081 pub fn remove_artifact(&mut self, key: &str) -> Option<ArtifactRecord> {
1083 let result = self.artifacts.remove(key);
1084 if result.is_some() {
1085 self.metadata.total_artifacts = self.artifacts.len();
1086 self.metadata.updated_at = SystemTime::now();
1087 self.update_statistics();
1088 }
1089 result
1090 }
1091
1092 fn update_statistics(&mut self) {
1094 let mut total_size = 0u64;
1095 let mut active_count = 0;
1096 let mut failed_count = 0;
1097 let mut deleted_count = 0;
1098
1099 for artifact in self.artifacts.values() {
1100 total_size += artifact.metadata.size_bytes;
1101 match artifact.status {
1102 ArtifactStatus::Uploaded => active_count += 1,
1103 ArtifactStatus::Failed => failed_count += 1,
1104 ArtifactStatus::Deleted => deleted_count += 1,
1105 _ => {}
1106 }
1107 }
1108
1109 let average_size = if !self.artifacts.is_empty() {
1110 total_size as f64 / self.artifacts.len() as f64
1111 } else {
1112 0.0
1113 };
1114
1115 self.metadata.statistics = RegistryStatistics {
1116 total_size_bytes: total_size,
1117 active_artifacts: active_count,
1118 failed_uploads: failed_count,
1119 deleted_artifacts: deleted_count,
1120 average_size_bytes: average_size,
1121 };
1122 }
1123}
1124
1125impl UploadManager {
1126 pub fn new(config: ArtifactUploadConfig) -> Self {
1128 Self {
1129 config,
1130 active_uploads: HashMap::new(),
1131 upload_history: Vec::new(),
1132 }
1133 }
1134
1135 pub fn create_upload_task(&mut self, local_path: &Path, remote_key: &str) -> Result<String> {
1137 let task_id = uuid::Uuid::new_v4().to_string();
1138 let file_size = fs::metadata(local_path)?.len();
1139
1140 let task = UploadTask {
1141 id: task_id.clone(),
1142 local_path: local_path.to_path_buf(),
1143 remote_key: remote_key.to_string(),
1144 progress: UploadProgress {
1145 bytes_uploaded: 0,
1146 total_bytes: file_size,
1147 percentage: 0.0,
1148 speed_bps: 0.0,
1149 eta_seconds: None,
1150 },
1151 status: UploadStatus::Queued,
1152 started_at: SystemTime::now(),
1153 config: UploadTaskConfig {
1154 compress: self.config.compress,
1155 compression_level: self.config.compression_level,
1156 encrypt: self.config.encrypt,
1157 chunk_size_bytes: self.config.parallel_uploads.chunk_size_mb * 1024 * 1024,
1158 retry_attempts: 3,
1159 },
1160 };
1161
1162 self.active_uploads.insert(task_id.clone(), task);
1163 Ok(task_id)
1164 }
1165
1166 pub fn complete_upload(&mut self, task_id: &str) -> Result<()> {
1168 if let Some(mut task) = self.active_uploads.remove(task_id) {
1169 task.status = UploadStatus::Completed;
1170 let duration = SystemTime::now()
1171 .duration_since(task.started_at)
1172 .unwrap_or(Duration::from_secs(0));
1173
1174 let result = UploadResult {
1175 task_id: task_id.to_string(),
1176 remote_key: task.remote_key.clone(),
1177 status: UploadStatus::Completed,
1178 bytes_uploaded: task.progress.total_bytes,
1179 duration,
1180 average_speed_bps: if duration.as_secs() > 0 {
1181 task.progress.total_bytes as f64 / duration.as_secs_f64()
1182 } else {
1183 0.0
1184 },
1185 error_message: None,
1186 timestamp: SystemTime::now(),
1187 };
1188
1189 self.upload_history.push(result);
1190 }
1191
1192 Ok(())
1193 }
1194}
1195
1196impl DownloadManager {
1197 pub fn new(config: ArtifactDownloadConfig) -> Self {
1199 let cache_dir = config
1200 .cache_directory
1201 .clone()
1202 .unwrap_or_else(|| PathBuf::from("./cache"));
1203
1204 Self {
1205 config: config.clone(),
1206 cache: DownloadCache {
1207 cache_dir,
1208 size_limit_bytes: config.cache_size_limit_mb.unwrap_or(1024) as u64 * 1024 * 1024,
1209 current_size_bytes: 0,
1210 entries: HashMap::new(),
1211 statistics: CacheStatistics::default(),
1212 },
1213 active_downloads: HashMap::new(),
1214 }
1215 }
1216
1217 pub fn check_cache(&mut self, remote_key: &str) -> Result<Option<PathBuf>> {
1219 if !self.config.enable_caching {
1220 return Ok(None);
1221 }
1222
1223 if let Some(entry) = self.cache.entries.get_mut(remote_key) {
1224 entry.last_accessed = SystemTime::now();
1225 entry.access_count += 1;
1226 self.cache.statistics.hits += 1;
1227
1228 if entry.local_path.exists() {
1229 return Ok(Some(entry.local_path.clone()));
1230 } else {
1231 self.cache.entries.remove(remote_key);
1233 }
1234 }
1235
1236 self.cache.statistics.misses += 1;
1237 Ok(None)
1238 }
1239
1240 pub fn update_cache(&mut self, remote_key: &str, local_path: &Path) -> Result<()> {
1242 if !self.config.enable_caching {
1243 return Ok(());
1244 }
1245
1246 let file_size = fs::metadata(local_path)?.len();
1247
1248 while self.cache.current_size_bytes + file_size > self.cache.size_limit_bytes {
1250 self.evict_cache_entry()?;
1251 }
1252
1253 let cache_path = self.cache.cache_dir.join(remote_key);
1255 if let Some(parent) = cache_path.parent() {
1256 fs::create_dir_all(parent)?;
1257 }
1258
1259 fs::copy(local_path, &cache_path)?;
1260
1261 let entry = CacheEntry {
1262 key: remote_key.to_string(),
1263 local_path: cache_path,
1264 size_bytes: file_size,
1265 created_at: SystemTime::now(),
1266 last_accessed: SystemTime::now(),
1267 access_count: 1,
1268 hit_score: 1.0,
1269 };
1270
1271 self.cache.entries.insert(remote_key.to_string(), entry);
1272 self.cache.current_size_bytes += file_size;
1273
1274 Ok(())
1275 }
1276
1277 pub fn remove_from_cache(&mut self, remote_key: &str) -> Result<()> {
1279 if let Some(entry) = self.cache.entries.remove(remote_key) {
1280 if entry.local_path.exists() {
1281 fs::remove_file(&entry.local_path)?;
1282 }
1283 self.cache.current_size_bytes -= entry.size_bytes;
1284 }
1285 Ok(())
1286 }
1287
1288 pub fn create_download_task(&mut self, remote_key: &str, local_path: &Path) -> Result<String> {
1290 let task_id = uuid::Uuid::new_v4().to_string();
1291
1292 let task = DownloadTask {
1293 id: task_id.clone(),
1294 remote_key: remote_key.to_string(),
1295 local_path: local_path.to_path_buf(),
1296 progress: DownloadProgress {
1297 bytes_downloaded: 0,
1298 total_bytes: 0, percentage: 0.0,
1300 speed_bps: 0.0,
1301 eta_seconds: None,
1302 },
1303 status: DownloadStatus::Queued,
1304 started_at: SystemTime::now(),
1305 };
1306
1307 self.active_downloads.insert(task_id.clone(), task);
1308 Ok(task_id)
1309 }
1310
1311 pub fn complete_download(&mut self, task_id: &str) -> Result<()> {
1313 if let Some(mut task) = self.active_downloads.remove(task_id) {
1314 task.status = DownloadStatus::Completed;
1315 }
1316 Ok(())
1317 }
1318
1319 fn evict_cache_entry(&mut self) -> Result<()> {
1321 let mut oldest_key: Option<String> = None;
1322 let mut oldest_time = SystemTime::now();
1323
1324 for (key, entry) in &self.cache.entries {
1325 if entry.last_accessed < oldest_time {
1326 oldest_time = entry.last_accessed;
1327 oldest_key = Some(key.clone());
1328 }
1329 }
1330
1331 if let Some(key) = oldest_key {
1332 self.remove_from_cache(&key)?;
1333 self.cache.statistics.evictions += 1;
1334 }
1335
1336 Ok(())
1337 }
1338}
1339
1340impl RetentionManager {
1341 pub fn new(policy: ArtifactRetentionPolicy) -> Self {
1343 Self {
1344 policy,
1345 scheduler: CleanupScheduler::new(),
1346 cleanup_history: Vec::new(),
1347 }
1348 }
1349
1350 pub fn run_cleanup(
1352 &mut self,
1353 registry: &mut ArtifactRegistry,
1354 storage: &dyn ArtifactStorage,
1355 ) -> Result<CleanupResult> {
1356 let run_id = uuid::Uuid::new_v4().to_string();
1357 let start_time = SystemTime::now();
1358
1359 let mut artifacts_processed = 0;
1360 let mut artifacts_deleted = 0;
1361 let mut artifacts_archived = 0;
1362 let mut space_freed = 0u64;
1363
1364 for rule in &self.scheduler.rules {
1366 if !rule.enabled {
1367 continue;
1368 }
1369
1370 let artifacts_to_process: Vec<String> = registry
1371 .artifacts
1372 .keys()
1373 .filter(|key| {
1374 self.should_apply_rule(
1375 registry.artifacts.get(*key).expect("unwrap failed"),
1376 rule,
1377 )
1378 })
1379 .cloned()
1380 .collect();
1381
1382 for artifact_key in artifacts_to_process {
1383 artifacts_processed += 1;
1384
1385 match rule.action {
1386 CleanupAction::Delete => {
1387 if let Some(artifact) = registry.get_artifact(&artifact_key) {
1388 space_freed += artifact.metadata.size_bytes;
1389 storage.delete(&artifact_key)?;
1390 registry.remove_artifact(&artifact_key);
1391 artifacts_deleted += 1;
1392 }
1393 }
1394 CleanupAction::Archive => {
1395 if let Some(artifact) = registry.get_artifact_mut(&artifact_key) {
1396 artifact.status = ArtifactStatus::Archived;
1397 artifacts_archived += 1;
1398 }
1399 }
1400 _ => {
1401 }
1403 }
1404 }
1405 }
1406
1407 let duration = SystemTime::now()
1408 .duration_since(start_time)
1409 .unwrap_or(Duration::from_secs(0));
1410
1411 let result = CleanupResult {
1412 run_id,
1413 timestamp: start_time,
1414 artifacts_processed,
1415 artifacts_deleted,
1416 artifacts_archived,
1417 space_freed_bytes: space_freed,
1418 duration,
1419 summary: format!(
1420 "Processed {} artifacts, deleted {}, archived {}, freed {} bytes",
1421 artifacts_processed, artifacts_deleted, artifacts_archived, space_freed
1422 ),
1423 };
1424
1425 self.cleanup_history.push(result.clone());
1426 Ok(result)
1427 }
1428
1429 fn should_apply_rule(&self, artifact: &ArtifactRecord, rule: &CleanupRule) -> bool {
1431 match &rule.condition {
1432 CleanupCondition::Age { days } => {
1433 if let Ok(duration) = SystemTime::now().duration_since(artifact.uploaded_at) {
1434 duration.as_secs() > (*days as u64 * 24 * 3600)
1435 } else {
1436 false
1437 }
1438 }
1439 CleanupCondition::Status { status } => artifact.status == *status,
1440 CleanupCondition::Tag { tag } => artifact.tags.contains(tag),
1441 _ => false, }
1443 }
1444}
1445
1446impl Default for CleanupScheduler {
1447 fn default() -> Self {
1448 Self::new()
1449 }
1450}
1451
1452impl CleanupScheduler {
1453 pub fn new() -> Self {
1455 Self {
1456 next_cleanup: SystemTime::now() + Duration::from_secs(24 * 3600), interval: Duration::from_secs(24 * 3600), rules: Vec::new(),
1459 enabled: true,
1460 }
1461 }
1462}
1463
1464impl Default for LocalStorageConfig {
1467 fn default() -> Self {
1468 Self {
1469 create_dirs: true,
1470 file_permissions: Some(0o644),
1471 dir_permissions: Some(0o755),
1472 allow_symlinks: false,
1473 }
1474 }
1475}
1476
1477impl Default for RegistryStatistics {
1478 fn default() -> Self {
1479 Self {
1480 total_size_bytes: 0,
1481 active_artifacts: 0,
1482 failed_uploads: 0,
1483 deleted_artifacts: 0,
1484 average_size_bytes: 0.0,
1485 }
1486 }
1487}
1488
1489impl Default for CacheStatistics {
1490 fn default() -> Self {
1491 Self {
1492 hits: 0,
1493 misses: 0,
1494 hit_ratio: 0.0,
1495 evictions: 0,
1496 average_file_size: 0.0,
1497 }
1498 }
1499}
1500
1501#[cfg(test)]
1502mod tests {
1503 use super::*;
1504 use tempfile::TempDir;
1505
1506 #[test]
1507 fn test_local_storage_creation() {
1508 let temp_dir = TempDir::new().expect("unwrap failed");
1509 let storage = LocalArtifactStorage::new(temp_dir.path().to_path_buf());
1510 assert!(storage.validate_connection().is_ok());
1511 }
1512
1513 #[test]
1514 fn test_artifact_registry() {
1515 let mut registry = ArtifactRegistry::new();
1516 assert_eq!(registry.artifacts.len(), 0);
1517
1518 let artifact = ArtifactRecord {
1519 id: "test-id".to_string(),
1520 key: "test-key".to_string(),
1521 local_path: PathBuf::from("/tmp/test"),
1522 remote_key: "remote/test".to_string(),
1523 metadata: ArtifactMetadata {
1524 filename: "test.txt".to_string(),
1525 size_bytes: 100,
1526 content_type: "text/plain".to_string(),
1527 checksum: "abc123".to_string(),
1528 checksum_algorithm: ChecksumAlgorithm::SHA256,
1529 compression: None,
1530 encryption: None,
1531 custom_metadata: HashMap::new(),
1532 },
1533 uploaded_at: SystemTime::now(),
1534 last_accessed: None,
1535 tags: vec!["test".to_string()],
1536 retention_policy: "default".to_string(),
1537 status: ArtifactStatus::Uploaded,
1538 };
1539
1540 registry.add_artifact(artifact).expect("unwrap failed");
1541 assert_eq!(registry.artifacts.len(), 1);
1542 assert!(registry.get_artifact("test-key").is_some());
1543 }
1544
1545 #[test]
1546 fn test_upload_manager() {
1547 let config = ArtifactUploadConfig {
1548 compress: false,
1549 compression_level: 6,
1550 encrypt: false,
1551 timeout_sec: 300,
1552 max_file_size_mb: 1024,
1553 parallel_uploads: ParallelUploadConfig {
1554 enabled: false,
1555 max_concurrent: 1,
1556 chunk_size_mb: 100,
1557 },
1558 };
1559
1560 let manager = UploadManager::new(config);
1561 assert_eq!(manager.active_uploads.len(), 0);
1562 }
1563
1564 #[test]
1565 fn test_checksum_algorithms() {
1566 assert_ne!(ChecksumAlgorithm::SHA256, ChecksumAlgorithm::MD5);
1567 assert_eq!(ChecksumAlgorithm::SHA256, ChecksumAlgorithm::SHA256);
1568 }
1569
1570 #[test]
1571 fn test_artifact_status() {
1572 assert_eq!(ArtifactStatus::Pending, ArtifactStatus::Pending);
1573 assert_ne!(ArtifactStatus::Pending, ArtifactStatus::Uploaded);
1574 }
1575}