1use crate::core_bridge::CoreBridge;
4use crate::error::{CollabError, Result};
5use crate::history::VersionControl;
6use crate::workspace::WorkspaceService;
7use chrono::{DateTime, Utc};
8use serde::{Deserialize, Serialize};
9use sqlx::{Pool, Sqlite};
10use std::path::Path;
11use std::sync::Arc;
12use uuid::Uuid;
13
14#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, sqlx::Type)]
16#[sqlx(type_name = "storage_backend", rename_all = "lowercase")]
17#[serde(rename_all = "lowercase")]
18pub enum StorageBackend {
19 Local,
21 S3,
23 Azure,
25 Gcs,
27 Custom,
29}
30
31#[derive(Debug, Clone, Serialize, Deserialize, sqlx::FromRow)]
33pub struct WorkspaceBackup {
34 pub id: Uuid,
36 pub workspace_id: Uuid,
38 pub backup_url: String,
40 pub storage_backend: StorageBackend,
42 pub storage_config: Option<serde_json::Value>,
44 pub size_bytes: i64,
46 pub backup_format: String,
48 pub encrypted: bool,
50 pub commit_id: Option<Uuid>,
52 pub created_at: DateTime<Utc>,
54 pub created_by: Uuid,
56 pub expires_at: Option<DateTime<Utc>>,
58}
59
60impl WorkspaceBackup {
61 #[must_use]
63 pub fn new(
64 workspace_id: Uuid,
65 backup_url: String,
66 storage_backend: StorageBackend,
67 size_bytes: i64,
68 created_by: Uuid,
69 ) -> Self {
70 Self {
71 id: Uuid::new_v4(),
72 workspace_id,
73 backup_url,
74 storage_backend,
75 storage_config: None,
76 size_bytes,
77 backup_format: "yaml".to_string(),
78 encrypted: false,
79 commit_id: None,
80 created_at: Utc::now(),
81 created_by,
82 expires_at: None,
83 }
84 }
85}
86
87pub struct BackupService {
89 db: Pool<Sqlite>,
90 version_control: VersionControl,
91 local_backup_dir: Option<String>,
92 core_bridge: Arc<CoreBridge>,
93 workspace_service: Arc<WorkspaceService>,
94}
95
96impl BackupService {
97 #[must_use]
99 pub fn new(
100 db: Pool<Sqlite>,
101 local_backup_dir: Option<String>,
102 core_bridge: Arc<CoreBridge>,
103 workspace_service: Arc<WorkspaceService>,
104 ) -> Self {
105 Self {
106 db: db.clone(),
107 version_control: VersionControl::new(db),
108 local_backup_dir,
109 core_bridge,
110 workspace_service,
111 }
112 }
113
114 pub async fn backup_workspace(
120 &self,
121 workspace_id: Uuid,
122 user_id: Uuid,
123 storage_backend: StorageBackend,
124 format: Option<String>,
125 commit_id: Option<Uuid>,
126 ) -> Result<WorkspaceBackup> {
127 let workspace = self
129 .workspace_service
130 .get_workspace(workspace_id)
131 .await
132 .map_err(|e| CollabError::Internal(format!("Failed to get workspace: {e}")))?;
133
134 let workspace_data = self
138 .core_bridge
139 .export_workspace_for_backup(&workspace)
140 .await
141 .map_err(|e| CollabError::Internal(format!("Failed to export workspace: {e}")))?;
142
143 let backup_format = format.unwrap_or_else(|| "yaml".to_string());
145 let serialized = match backup_format.as_str() {
146 "yaml" => serde_yaml::to_string(&workspace_data)
147 .map_err(|e| CollabError::Internal(format!("Failed to serialize to YAML: {e}")))?,
148 "json" => serde_json::to_string_pretty(&workspace_data)
149 .map_err(|e| CollabError::Internal(format!("Failed to serialize to JSON: {e}")))?,
150 _ => {
151 return Err(CollabError::InvalidInput(format!(
152 "Unsupported backup format: {backup_format}"
153 )));
154 }
155 };
156
157 let size_bytes = serialized.len() as i64;
158
159 let backup_url = match storage_backend {
161 StorageBackend::Local => {
162 self.save_to_local(workspace_id, &serialized, &backup_format).await?
163 }
164 StorageBackend::S3 => {
165 return Err(CollabError::Internal("S3 backup not yet implemented".to_string()));
166 }
167 StorageBackend::Azure => {
168 return Err(CollabError::Internal("Azure backup not yet implemented".to_string()));
169 }
170 StorageBackend::Gcs => {
171 return Err(CollabError::Internal("GCS backup not yet implemented".to_string()));
172 }
173 StorageBackend::Custom => {
174 return Err(CollabError::Internal(
175 "Custom storage backend not yet implemented".to_string(),
176 ));
177 }
178 };
179
180 let mut backup =
182 WorkspaceBackup::new(workspace_id, backup_url, storage_backend, size_bytes, user_id);
183 backup.backup_format = backup_format;
184 backup.commit_id = commit_id;
185
186 sqlx::query!(
188 r#"
189 INSERT INTO workspace_backups (
190 id, workspace_id, backup_url, storage_backend, storage_config,
191 size_bytes, backup_format, encrypted, commit_id, created_at, created_by, expires_at
192 )
193 VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
194 "#,
195 backup.id,
196 backup.workspace_id,
197 backup.backup_url,
198 backup.storage_backend,
199 backup.storage_config,
200 backup.size_bytes,
201 backup.backup_format,
202 backup.encrypted,
203 backup.commit_id,
204 backup.created_at,
205 backup.created_by,
206 backup.expires_at
207 )
208 .execute(&self.db)
209 .await?;
210
211 Ok(backup)
212 }
213
214 pub async fn restore_workspace(
216 &self,
217 backup_id: Uuid,
218 target_workspace_id: Option<Uuid>,
219 user_id: Uuid,
220 ) -> Result<Uuid> {
221 let backup = self.get_backup(backup_id).await?;
223
224 let backup_data = match backup.storage_backend {
226 StorageBackend::Local => self.load_from_local(&backup.backup_url).await?,
227 _ => {
228 return Err(CollabError::Internal(
229 "Only local backups are supported for restore".to_string(),
230 ));
231 }
232 };
233
234 let workspace_data: serde_json::Value = match backup.backup_format.as_str() {
236 "yaml" => serde_yaml::from_str(&backup_data)
237 .map_err(|e| CollabError::Internal(format!("Failed to deserialize YAML: {e}")))?,
238 "json" => serde_json::from_str(&backup_data)
239 .map_err(|e| CollabError::Internal(format!("Failed to deserialize JSON: {e}")))?,
240 _ => {
241 return Err(CollabError::Internal(format!(
242 "Unsupported backup format: {}",
243 backup.backup_format
244 )));
245 }
246 };
247
248 let backup_record = self.get_backup(backup_id).await?;
251 let owner_id = backup_record.created_by;
252
253 let restored_team_workspace = self
255 .core_bridge
256 .import_workspace_from_backup(&workspace_data, owner_id, None)
257 .await?;
258
259 let restored_workspace_id = target_workspace_id.unwrap_or(backup.workspace_id);
261
262 let team_workspace = if restored_workspace_id == backup.workspace_id {
264 restored_team_workspace
266 } else {
267 let mut new_workspace = restored_team_workspace;
269 new_workspace.id = restored_workspace_id;
270 new_workspace
271 };
272
273 self.core_bridge.save_workspace_to_disk(&team_workspace).await?;
277
278 if let Some(commit_id) = backup.commit_id {
280 let _ =
282 self.version_control.restore_to_commit(restored_workspace_id, commit_id).await?;
283 }
284
285 Ok(restored_workspace_id)
286 }
287
288 pub async fn list_backups(
290 &self,
291 workspace_id: Uuid,
292 limit: Option<i32>,
293 ) -> Result<Vec<WorkspaceBackup>> {
294 let limit = limit.unwrap_or(100);
295 let workspace_id_str = workspace_id.to_string();
296
297 let rows = sqlx::query!(
298 r#"
299 SELECT
300 id,
301 workspace_id,
302 backup_url,
303 storage_backend,
304 storage_config,
305 size_bytes,
306 backup_format,
307 encrypted,
308 commit_id,
309 created_at,
310 created_by,
311 expires_at
312 FROM workspace_backups
313 WHERE workspace_id = ?
314 ORDER BY created_at DESC
315 LIMIT ?
316 "#,
317 workspace_id_str,
318 limit
319 )
320 .fetch_all(&self.db)
321 .await?;
322
323 let backups: Result<Vec<WorkspaceBackup>> = rows
324 .into_iter()
325 .map(|row| {
326 Ok(WorkspaceBackup {
327 id: Uuid::parse_str(&row.id)
328 .map_err(|e| CollabError::Internal(format!("Invalid UUID: {e}")))?,
329 workspace_id: Uuid::parse_str(&row.workspace_id)
330 .map_err(|e| CollabError::Internal(format!("Invalid UUID: {e}")))?,
331 backup_url: row.backup_url,
332 storage_backend: serde_json::from_str(&row.storage_backend).map_err(|e| {
333 CollabError::Internal(format!("Invalid storage_backend: {e}"))
334 })?,
335 storage_config: row
336 .storage_config
337 .as_ref()
338 .and_then(|s| serde_json::from_str(s).ok()),
339 size_bytes: row.size_bytes,
340 backup_format: row.backup_format,
341 encrypted: row.encrypted != 0,
342 commit_id: row.commit_id.as_ref().and_then(|s| Uuid::parse_str(s).ok()),
343 created_at: DateTime::parse_from_rfc3339(&row.created_at)
344 .map_err(|e| CollabError::Internal(format!("Invalid timestamp: {e}")))?
345 .with_timezone(&Utc),
346 created_by: Uuid::parse_str(&row.created_by)
347 .map_err(|e| CollabError::Internal(format!("Invalid UUID: {e}")))?,
348 expires_at: row
349 .expires_at
350 .as_ref()
351 .map(|s| {
352 DateTime::parse_from_rfc3339(s)
353 .map(|dt| dt.with_timezone(&Utc))
354 .map_err(|e| {
355 CollabError::Internal(format!("Invalid timestamp: {e}"))
356 })
357 })
358 .transpose()?,
359 })
360 })
361 .collect();
362 let backups = backups?;
363
364 Ok(backups)
365 }
366
367 pub async fn get_backup(&self, backup_id: Uuid) -> Result<WorkspaceBackup> {
369 let backup_id_str = backup_id.to_string();
370 let row = sqlx::query!(
371 r#"
372 SELECT
373 id,
374 workspace_id,
375 backup_url,
376 storage_backend,
377 storage_config,
378 size_bytes,
379 backup_format,
380 encrypted,
381 commit_id,
382 created_at,
383 created_by,
384 expires_at
385 FROM workspace_backups
386 WHERE id = ?
387 "#,
388 backup_id_str
389 )
390 .fetch_optional(&self.db)
391 .await?
392 .ok_or_else(|| CollabError::Internal(format!("Backup not found: {backup_id}")))?;
393
394 Ok(WorkspaceBackup {
395 id: Uuid::parse_str(&row.id)
396 .map_err(|e| CollabError::Internal(format!("Invalid UUID: {e}")))?,
397 workspace_id: Uuid::parse_str(&row.workspace_id)
398 .map_err(|e| CollabError::Internal(format!("Invalid UUID: {e}")))?,
399 backup_url: row.backup_url,
400 storage_backend: serde_json::from_str(&row.storage_backend)
401 .map_err(|e| CollabError::Internal(format!("Invalid storage_backend: {e}")))?,
402 storage_config: row.storage_config.as_ref().and_then(|s| serde_json::from_str(s).ok()),
403 size_bytes: row.size_bytes,
404 backup_format: row.backup_format,
405 encrypted: row.encrypted != 0,
406 commit_id: row.commit_id.as_ref().and_then(|s| Uuid::parse_str(s).ok()),
407 created_at: DateTime::parse_from_rfc3339(&row.created_at)
408 .map_err(|e| CollabError::Internal(format!("Invalid timestamp: {e}")))?
409 .with_timezone(&Utc),
410 created_by: Uuid::parse_str(&row.created_by)
411 .map_err(|e| CollabError::Internal(format!("Invalid UUID: {e}")))?,
412 expires_at: row
413 .expires_at
414 .as_ref()
415 .map(|s| {
416 DateTime::parse_from_rfc3339(s)
417 .map(|dt| dt.with_timezone(&Utc))
418 .map_err(|e| CollabError::Internal(format!("Invalid timestamp: {e}")))
419 })
420 .transpose()?,
421 })
422 }
423
424 pub async fn delete_backup(&self, backup_id: Uuid) -> Result<()> {
426 let backup = self.get_backup(backup_id).await?;
428
429 match backup.storage_backend {
431 StorageBackend::Local => {
432 if Path::new(&backup.backup_url).exists() {
433 tokio::fs::remove_file(&backup.backup_url).await.map_err(|e| {
434 CollabError::Internal(format!("Failed to delete backup file: {e}"))
435 })?;
436 }
437 }
438 StorageBackend::S3 => {
439 self.delete_from_s3(&backup.backup_url, backup.storage_config.as_ref()).await?;
440 }
441 StorageBackend::Azure => {
442 self.delete_from_azure(&backup.backup_url, backup.storage_config.as_ref())
443 .await?;
444 }
445 StorageBackend::Gcs => {
446 self.delete_from_gcs(&backup.backup_url, backup.storage_config.as_ref()).await?;
447 }
448 StorageBackend::Custom => {
449 return Err(CollabError::Internal(
450 "Custom storage backend deletion not implemented".to_string(),
451 ));
452 }
453 }
454
455 let backup_id_str = backup_id.to_string();
457 sqlx::query!(
458 r#"
459 DELETE FROM workspace_backups
460 WHERE id = ?
461 "#,
462 backup_id_str
463 )
464 .execute(&self.db)
465 .await?;
466
467 Ok(())
468 }
469
470 async fn save_to_local(&self, workspace_id: Uuid, data: &str, format: &str) -> Result<String> {
472 let backup_dir = self.local_backup_dir.as_ref().ok_or_else(|| {
473 CollabError::Internal("Local backup directory not configured".to_string())
474 })?;
475
476 tokio::fs::create_dir_all(backup_dir).await.map_err(|e| {
478 CollabError::Internal(format!("Failed to create backup directory: {e}"))
479 })?;
480
481 let timestamp = Utc::now().format("%Y%m%d_%H%M%S");
483 let filename = format!("workspace_{workspace_id}_{timestamp}.{format}");
484 let backup_path = Path::new(backup_dir).join(&filename);
485
486 tokio::fs::write(&backup_path, data)
488 .await
489 .map_err(|e| CollabError::Internal(format!("Failed to write backup file: {e}")))?;
490
491 Ok(backup_path.to_string_lossy().to_string())
492 }
493
494 async fn load_from_local(&self, backup_url: &str) -> Result<String> {
496 tokio::fs::read_to_string(backup_url)
497 .await
498 .map_err(|e| CollabError::Internal(format!("Failed to read backup file: {e}")))
499 }
500
501 async fn delete_from_s3(
503 &self,
504 backup_url: &str,
505 storage_config: Option<&serde_json::Value>,
506 ) -> Result<()> {
507 #[cfg(feature = "s3")]
508 {
509 use aws_config::SdkConfig;
510 use aws_sdk_s3::config::{Credentials, Region};
511 use aws_sdk_s3::Client as S3Client;
512
513 if !backup_url.starts_with("s3://") {
515 return Err(CollabError::Internal(format!(
516 "Invalid S3 URL format: {}",
517 backup_url
518 )));
519 }
520
521 let url_parts: Vec<&str> =
522 backup_url.strip_prefix("s3://").unwrap().splitn(2, '/').collect();
523 if url_parts.len() != 2 {
524 return Err(CollabError::Internal(format!(
525 "Invalid S3 URL format: {}",
526 backup_url
527 )));
528 }
529
530 let bucket = url_parts[0];
531 let key = url_parts[1];
532
533 let aws_config: SdkConfig = if let Some(config) = storage_config {
535 let access_key_id =
538 config.get("access_key_id").and_then(|v| v.as_str()).ok_or_else(|| {
539 CollabError::Internal(
540 "S3 access_key_id not found in storage_config".to_string(),
541 )
542 })?;
543
544 let secret_access_key =
545 config.get("secret_access_key").and_then(|v| v.as_str()).ok_or_else(|| {
546 CollabError::Internal(
547 "S3 secret_access_key not found in storage_config".to_string(),
548 )
549 })?;
550
551 let region_str =
552 config.get("region").and_then(|v| v.as_str()).unwrap_or("us-east-1");
553
554 let credentials = Credentials::new(
556 access_key_id,
557 secret_access_key,
558 None, None, "mockforge",
561 );
562
563 aws_config::ConfigLoader::default()
565 .credentials_provider(credentials)
566 .region(Region::new(region_str.to_string()))
567 .load()
568 .await
569 } else {
570 aws_config::load_from_env().await
572 };
573
574 let client = S3Client::new(&aws_config);
576
577 client
579 .delete_object()
580 .bucket(bucket)
581 .key(key)
582 .send()
583 .await
584 .map_err(|e| CollabError::Internal(format!("Failed to delete S3 object: {}", e)))?;
585
586 tracing::info!("Successfully deleted S3 object: {}", backup_url);
587 Ok(())
588 }
589
590 #[cfg(not(feature = "s3"))]
591 {
592 Err(CollabError::Internal(
593 "S3 deletion requires 's3' feature to be enabled. Add 's3' feature to mockforge-collab in Cargo.toml.".to_string(),
594 ))
595 }
596 }
597
598 async fn delete_from_azure(
600 &self,
601 backup_url: &str,
602 storage_config: Option<&serde_json::Value>,
603 ) -> Result<()> {
604 #[cfg(feature = "azure")]
605 {
606 use azure_identity::DefaultAzureCredential;
607 use azure_storage_blobs::prelude::*;
608 use std::sync::Arc;
609
610 if !backup_url.contains("blob.core.windows.net") {
612 return Err(CollabError::Internal(format!(
613 "Invalid Azure Blob URL format: {}",
614 backup_url
615 )));
616 }
617
618 let url = url::Url::parse(backup_url)
620 .map_err(|e| CollabError::Internal(format!("Invalid Azure URL: {}", e)))?;
621
622 let hostname = url
624 .host_str()
625 .ok_or_else(|| CollabError::Internal("Invalid Azure hostname".to_string()))?;
626 let account_name = hostname.split('.').next().ok_or_else(|| {
627 CollabError::Internal("Invalid Azure hostname format".to_string())
628 })?;
629
630 let path = url.path();
632 let path_parts: Vec<&str> = path.splitn(3, '/').filter(|s| !s.is_empty()).collect();
633 if path_parts.len() < 2 {
634 return Err(CollabError::Internal(format!("Invalid Azure blob path: {}", path)));
635 }
636
637 let container_name = path_parts[0];
638 let blob_name = path_parts[1..].join("/");
639
640 return Err(CollabError::Internal(
653 "Azure deletion implementation needs to be updated for azure_storage_blobs 0.19 API. \
654 The API structure has changed and requires review of the 0.19 documentation."
655 .to_string(),
656 ));
657 }
658
659 #[cfg(not(feature = "azure"))]
660 {
661 Err(CollabError::Internal(
662 "Azure deletion requires 'azure' feature to be enabled. Add 'azure' feature to mockforge-collab in Cargo.toml.".to_string(),
663 ))
664 }
665 }
666
667 async fn delete_from_gcs(
669 &self,
670 backup_url: &str,
671 storage_config: Option<&serde_json::Value>,
672 ) -> Result<()> {
673 #[cfg(feature = "gcs")]
674 {
675 return Err(CollabError::Internal(
679 "GCS deletion implementation needs to be updated for google-cloud-storage 1.4.0 API. \
680 The API structure has changed significantly and requires review of the 1.4.0 documentation."
681 .to_string(),
682 ));
683
684 }
745
746 #[cfg(not(feature = "gcs"))]
747 {
748 Err(CollabError::Internal(
749 "GCS deletion requires 'gcs' feature to be enabled. Add 'gcs' feature to mockforge-collab in Cargo.toml.".to_string(),
750 ))
751 }
752 }
753
754 async fn get_workspace_data(&self, workspace_id: Uuid) -> Result<serde_json::Value> {
758 let team_workspace = self.workspace_service.get_workspace(workspace_id).await?;
760
761 self.core_bridge.get_workspace_state_json(&team_workspace)
763 }
764}
765
766#[cfg(test)]
767mod tests {
768 use super::*;
769
770 #[test]
771 fn test_storage_backend_equality() {
772 assert_eq!(StorageBackend::Local, StorageBackend::Local);
773 assert_eq!(StorageBackend::S3, StorageBackend::S3);
774 assert_eq!(StorageBackend::Azure, StorageBackend::Azure);
775 assert_eq!(StorageBackend::Gcs, StorageBackend::Gcs);
776 assert_eq!(StorageBackend::Custom, StorageBackend::Custom);
777
778 assert_ne!(StorageBackend::Local, StorageBackend::S3);
779 }
780
781 #[test]
782 fn test_storage_backend_serialization() {
783 let backend = StorageBackend::S3;
784 let json = serde_json::to_string(&backend).unwrap();
785 let deserialized: StorageBackend = serde_json::from_str(&json).unwrap();
786
787 assert_eq!(backend, deserialized);
788 }
789
790 #[test]
791 fn test_storage_backend_all_variants() {
792 let backends = vec![
793 StorageBackend::Local,
794 StorageBackend::S3,
795 StorageBackend::Azure,
796 StorageBackend::Gcs,
797 StorageBackend::Custom,
798 ];
799
800 for backend in backends {
801 let json = serde_json::to_string(&backend).unwrap();
802 let deserialized: StorageBackend = serde_json::from_str(&json).unwrap();
803 assert_eq!(backend, deserialized);
804 }
805 }
806
807 #[test]
808 fn test_workspace_backup_new() {
809 let workspace_id = Uuid::new_v4();
810 let created_by = Uuid::new_v4();
811 let backup_url = "s3://bucket/backup.yaml".to_string();
812 let size_bytes = 1024;
813
814 let backup = WorkspaceBackup::new(
815 workspace_id,
816 backup_url.clone(),
817 StorageBackend::S3,
818 size_bytes,
819 created_by,
820 );
821
822 assert_eq!(backup.workspace_id, workspace_id);
823 assert_eq!(backup.backup_url, backup_url);
824 assert_eq!(backup.storage_backend, StorageBackend::S3);
825 assert_eq!(backup.size_bytes, size_bytes);
826 assert_eq!(backup.created_by, created_by);
827 assert_eq!(backup.backup_format, "yaml");
828 assert!(!backup.encrypted);
829 assert!(backup.commit_id.is_none());
830 assert!(backup.expires_at.is_none());
831 assert!(backup.storage_config.is_none());
832 }
833
834 #[test]
835 fn test_workspace_backup_clone() {
836 let backup = WorkspaceBackup::new(
837 Uuid::new_v4(),
838 "backup.yaml".to_string(),
839 StorageBackend::Local,
840 512,
841 Uuid::new_v4(),
842 );
843
844 let cloned = backup.clone();
845
846 assert_eq!(backup.id, cloned.id);
847 assert_eq!(backup.workspace_id, cloned.workspace_id);
848 assert_eq!(backup.backup_url, cloned.backup_url);
849 assert_eq!(backup.size_bytes, cloned.size_bytes);
850 }
851
852 #[test]
853 fn test_workspace_backup_serialization() {
854 let backup = WorkspaceBackup::new(
855 Uuid::new_v4(),
856 "backup.yaml".to_string(),
857 StorageBackend::Local,
858 256,
859 Uuid::new_v4(),
860 );
861
862 let json = serde_json::to_string(&backup).unwrap();
863 let deserialized: WorkspaceBackup = serde_json::from_str(&json).unwrap();
864
865 assert_eq!(backup.id, deserialized.id);
866 assert_eq!(backup.workspace_id, deserialized.workspace_id);
867 assert_eq!(backup.storage_backend, deserialized.storage_backend);
868 }
869
870 #[test]
871 fn test_workspace_backup_with_commit() {
872 let mut backup = WorkspaceBackup::new(
873 Uuid::new_v4(),
874 "backup.yaml".to_string(),
875 StorageBackend::Local,
876 128,
877 Uuid::new_v4(),
878 );
879
880 let commit_id = Uuid::new_v4();
881 backup.commit_id = Some(commit_id);
882
883 assert_eq!(backup.commit_id, Some(commit_id));
884 }
885
886 #[test]
887 fn test_workspace_backup_with_encryption() {
888 let mut backup = WorkspaceBackup::new(
889 Uuid::new_v4(),
890 "backup.yaml".to_string(),
891 StorageBackend::S3,
892 2048,
893 Uuid::new_v4(),
894 );
895
896 backup.encrypted = true;
897
898 assert!(backup.encrypted);
899 }
900
901 #[test]
902 fn test_workspace_backup_with_expiration() {
903 let mut backup = WorkspaceBackup::new(
904 Uuid::new_v4(),
905 "backup.yaml".to_string(),
906 StorageBackend::Azure,
907 512,
908 Uuid::new_v4(),
909 );
910
911 let expires_at = Utc::now() + chrono::Duration::days(30);
912 backup.expires_at = Some(expires_at);
913
914 assert!(backup.expires_at.is_some());
915 }
916
917 #[test]
918 fn test_workspace_backup_with_storage_config() {
919 let mut backup = WorkspaceBackup::new(
920 Uuid::new_v4(),
921 "backup.yaml".to_string(),
922 StorageBackend::S3,
923 1024,
924 Uuid::new_v4(),
925 );
926
927 let config = serde_json::json!({
928 "region": "us-east-1",
929 "bucket": "my-bucket"
930 });
931 backup.storage_config = Some(config.clone());
932
933 assert_eq!(backup.storage_config, Some(config));
934 }
935
936 #[test]
937 fn test_workspace_backup_different_formats() {
938 let mut backup = WorkspaceBackup::new(
939 Uuid::new_v4(),
940 "backup.json".to_string(),
941 StorageBackend::Local,
942 256,
943 Uuid::new_v4(),
944 );
945
946 assert_eq!(backup.backup_format, "yaml"); backup.backup_format = "json".to_string();
949 assert_eq!(backup.backup_format, "json");
950 }
951
952 #[test]
953 fn test_storage_backend_debug() {
954 let backend = StorageBackend::S3;
955 let debug_str = format!("{:?}", backend);
956 assert!(debug_str.contains("S3"));
957 }
958
959 #[test]
960 fn test_workspace_backup_debug() {
961 let backup = WorkspaceBackup::new(
962 Uuid::new_v4(),
963 "backup.yaml".to_string(),
964 StorageBackend::Local,
965 100,
966 Uuid::new_v4(),
967 );
968
969 let debug_str = format!("{:?}", backup);
970 assert!(debug_str.contains("WorkspaceBackup"));
971 }
972}