mockforge_collab/
backup.rs

1//! Cloud backup and restore for workspaces
2
3use crate::core_bridge::CoreBridge;
4use crate::error::{CollabError, Result};
5use crate::history::VersionControl;
6use crate::workspace::WorkspaceService;
7use chrono::{DateTime, Utc};
8use serde::{Deserialize, Serialize};
9use sqlx::{Pool, Sqlite};
10use std::path::Path;
11use std::sync::Arc;
12use uuid::Uuid;
13
14/// Storage backend type
15#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, sqlx::Type)]
16#[sqlx(type_name = "storage_backend", rename_all = "lowercase")]
17#[serde(rename_all = "lowercase")]
18pub enum StorageBackend {
19    /// Local filesystem
20    Local,
21    /// Amazon S3
22    S3,
23    /// Azure Blob Storage
24    Azure,
25    /// Google Cloud Storage
26    Gcs,
27    /// Custom storage backend
28    Custom,
29}
30
31/// Backup record
32#[derive(Debug, Clone, Serialize, Deserialize, sqlx::FromRow)]
33pub struct WorkspaceBackup {
34    /// Unique backup ID
35    pub id: Uuid,
36    /// Workspace ID
37    pub workspace_id: Uuid,
38    /// Backup URL or path
39    pub backup_url: String,
40    /// Storage backend
41    pub storage_backend: StorageBackend,
42    /// Storage configuration (JSON)
43    pub storage_config: Option<serde_json::Value>,
44    /// Size in bytes
45    pub size_bytes: i64,
46    /// Backup format (yaml or json)
47    pub backup_format: String,
48    /// Whether backup is encrypted
49    pub encrypted: bool,
50    /// Commit ID this backup represents
51    pub commit_id: Option<Uuid>,
52    /// Created timestamp
53    pub created_at: DateTime<Utc>,
54    /// User who created the backup
55    pub created_by: Uuid,
56    /// Optional expiration date
57    pub expires_at: Option<DateTime<Utc>>,
58}
59
60impl WorkspaceBackup {
61    /// Create a new backup record
62    #[must_use]
63    pub fn new(
64        workspace_id: Uuid,
65        backup_url: String,
66        storage_backend: StorageBackend,
67        size_bytes: i64,
68        created_by: Uuid,
69    ) -> Self {
70        Self {
71            id: Uuid::new_v4(),
72            workspace_id,
73            backup_url,
74            storage_backend,
75            storage_config: None,
76            size_bytes,
77            backup_format: "yaml".to_string(),
78            encrypted: false,
79            commit_id: None,
80            created_at: Utc::now(),
81            created_by,
82            expires_at: None,
83        }
84    }
85}
86
87/// Backup service for managing workspace backups
88pub struct BackupService {
89    db: Pool<Sqlite>,
90    version_control: VersionControl,
91    local_backup_dir: Option<String>,
92    core_bridge: Arc<CoreBridge>,
93    workspace_service: Arc<WorkspaceService>,
94}
95
96impl BackupService {
97    /// Create a new backup service
98    #[must_use]
99    pub fn new(
100        db: Pool<Sqlite>,
101        local_backup_dir: Option<String>,
102        core_bridge: Arc<CoreBridge>,
103        workspace_service: Arc<WorkspaceService>,
104    ) -> Self {
105        Self {
106            db: db.clone(),
107            version_control: VersionControl::new(db),
108            local_backup_dir,
109            core_bridge,
110            workspace_service,
111        }
112    }
113
114    /// Create a backup of a workspace
115    ///
116    /// Exports the workspace to the specified storage backend.
117    /// For now, we support local filesystem backups. Cloud storage
118    /// backends (S3, Azure, GCS) can be added later.
119    pub async fn backup_workspace(
120        &self,
121        workspace_id: Uuid,
122        user_id: Uuid,
123        storage_backend: StorageBackend,
124        format: Option<String>,
125        commit_id: Option<Uuid>,
126    ) -> Result<WorkspaceBackup> {
127        // Get workspace data using CoreBridge to get full workspace state
128        let workspace = self
129            .workspace_service
130            .get_workspace(workspace_id)
131            .await
132            .map_err(|e| CollabError::Internal(format!("Failed to get workspace: {e}")))?;
133
134        // Use CoreBridge to get full workspace state from mockforge-core
135        // This integrates with mockforge-core to get the complete workspace state
136        // including all mocks, folders, and configuration
137        let workspace_data = self
138            .core_bridge
139            .export_workspace_for_backup(&workspace)
140            .await
141            .map_err(|e| CollabError::Internal(format!("Failed to export workspace: {e}")))?;
142
143        // Serialize workspace data
144        let backup_format = format.unwrap_or_else(|| "yaml".to_string());
145        let serialized = match backup_format.as_str() {
146            "yaml" => serde_yaml::to_string(&workspace_data)
147                .map_err(|e| CollabError::Internal(format!("Failed to serialize to YAML: {e}")))?,
148            "json" => serde_json::to_string_pretty(&workspace_data)
149                .map_err(|e| CollabError::Internal(format!("Failed to serialize to JSON: {e}")))?,
150            _ => {
151                return Err(CollabError::InvalidInput(format!(
152                    "Unsupported backup format: {backup_format}"
153                )));
154            }
155        };
156
157        let size_bytes = serialized.len() as i64;
158
159        // Save to storage backend
160        let backup_url = match storage_backend {
161            StorageBackend::Local => {
162                self.save_to_local(workspace_id, &serialized, &backup_format).await?
163            }
164            StorageBackend::S3 => {
165                return Err(CollabError::Internal("S3 backup not yet implemented".to_string()));
166            }
167            StorageBackend::Azure => {
168                return Err(CollabError::Internal("Azure backup not yet implemented".to_string()));
169            }
170            StorageBackend::Gcs => {
171                return Err(CollabError::Internal("GCS backup not yet implemented".to_string()));
172            }
173            StorageBackend::Custom => {
174                return Err(CollabError::Internal(
175                    "Custom storage backend not yet implemented".to_string(),
176                ));
177            }
178        };
179
180        // Create backup record
181        let mut backup =
182            WorkspaceBackup::new(workspace_id, backup_url, storage_backend, size_bytes, user_id);
183        backup.backup_format = backup_format;
184        backup.commit_id = commit_id;
185
186        // Save to database
187        sqlx::query!(
188            r#"
189            INSERT INTO workspace_backups (
190                id, workspace_id, backup_url, storage_backend, storage_config,
191                size_bytes, backup_format, encrypted, commit_id, created_at, created_by, expires_at
192            )
193            VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
194            "#,
195            backup.id,
196            backup.workspace_id,
197            backup.backup_url,
198            backup.storage_backend,
199            backup.storage_config,
200            backup.size_bytes,
201            backup.backup_format,
202            backup.encrypted,
203            backup.commit_id,
204            backup.created_at,
205            backup.created_by,
206            backup.expires_at
207        )
208        .execute(&self.db)
209        .await?;
210
211        Ok(backup)
212    }
213
214    /// Restore a workspace from a backup
215    pub async fn restore_workspace(
216        &self,
217        backup_id: Uuid,
218        target_workspace_id: Option<Uuid>,
219        user_id: Uuid,
220    ) -> Result<Uuid> {
221        // Get backup record
222        let backup = self.get_backup(backup_id).await?;
223
224        // Load backup data
225        let backup_data = match backup.storage_backend {
226            StorageBackend::Local => self.load_from_local(&backup.backup_url).await?,
227            _ => {
228                return Err(CollabError::Internal(
229                    "Only local backups are supported for restore".to_string(),
230                ));
231            }
232        };
233
234        // Deserialize workspace data
235        let workspace_data: serde_json::Value = match backup.backup_format.as_str() {
236            "yaml" => serde_yaml::from_str(&backup_data)
237                .map_err(|e| CollabError::Internal(format!("Failed to deserialize YAML: {e}")))?,
238            "json" => serde_json::from_str(&backup_data)
239                .map_err(|e| CollabError::Internal(format!("Failed to deserialize JSON: {e}")))?,
240            _ => {
241                return Err(CollabError::Internal(format!(
242                    "Unsupported backup format: {}",
243                    backup.backup_format
244                )));
245            }
246        };
247
248        // Get the user who created the backup (or use a default - this should be passed in)
249        // For now, we'll need to get it from the backup record
250        let backup_record = self.get_backup(backup_id).await?;
251        let owner_id = backup_record.created_by;
252
253        // Import workspace from backup using CoreBridge
254        let restored_team_workspace = self
255            .core_bridge
256            .import_workspace_from_backup(&workspace_data, owner_id, None)
257            .await?;
258
259        // Determine target workspace ID
260        let restored_workspace_id = target_workspace_id.unwrap_or(backup.workspace_id);
261
262        // If restoring to a different workspace, update the ID
263        let team_workspace = if restored_workspace_id == backup.workspace_id {
264            // Update existing workspace
265            restored_team_workspace
266        } else {
267            // Create new workspace with the restored data
268            let mut new_workspace = restored_team_workspace;
269            new_workspace.id = restored_workspace_id;
270            new_workspace
271        };
272
273        // Update the workspace in the database
274        // This is a simplified version - in production, you'd want to use WorkspaceService
275        // For now, we'll save it to disk and let the system pick it up
276        self.core_bridge.save_workspace_to_disk(&team_workspace).await?;
277
278        // Create restore commit if specified
279        if let Some(commit_id) = backup.commit_id {
280            // Restore to specific commit
281            let _ =
282                self.version_control.restore_to_commit(restored_workspace_id, commit_id).await?;
283        }
284
285        Ok(restored_workspace_id)
286    }
287
288    /// List all backups for a workspace
289    pub async fn list_backups(
290        &self,
291        workspace_id: Uuid,
292        limit: Option<i32>,
293    ) -> Result<Vec<WorkspaceBackup>> {
294        let limit = limit.unwrap_or(100);
295        let workspace_id_str = workspace_id.to_string();
296
297        let rows = sqlx::query!(
298            r#"
299            SELECT
300                id,
301                workspace_id,
302                backup_url,
303                storage_backend,
304                storage_config,
305                size_bytes,
306                backup_format,
307                encrypted,
308                commit_id,
309                created_at,
310                created_by,
311                expires_at
312            FROM workspace_backups
313            WHERE workspace_id = ?
314            ORDER BY created_at DESC
315            LIMIT ?
316            "#,
317            workspace_id_str,
318            limit
319        )
320        .fetch_all(&self.db)
321        .await?;
322
323        let backups: Result<Vec<WorkspaceBackup>> = rows
324            .into_iter()
325            .map(|row| {
326                Ok(WorkspaceBackup {
327                    id: Uuid::parse_str(&row.id)
328                        .map_err(|e| CollabError::Internal(format!("Invalid UUID: {e}")))?,
329                    workspace_id: Uuid::parse_str(&row.workspace_id)
330                        .map_err(|e| CollabError::Internal(format!("Invalid UUID: {e}")))?,
331                    backup_url: row.backup_url,
332                    storage_backend: serde_json::from_str(&row.storage_backend).map_err(|e| {
333                        CollabError::Internal(format!("Invalid storage_backend: {e}"))
334                    })?,
335                    storage_config: row
336                        .storage_config
337                        .as_ref()
338                        .and_then(|s| serde_json::from_str(s).ok()),
339                    size_bytes: row.size_bytes,
340                    backup_format: row.backup_format,
341                    encrypted: row.encrypted != 0,
342                    commit_id: row.commit_id.as_ref().and_then(|s| Uuid::parse_str(s).ok()),
343                    created_at: DateTime::parse_from_rfc3339(&row.created_at)
344                        .map_err(|e| CollabError::Internal(format!("Invalid timestamp: {e}")))?
345                        .with_timezone(&Utc),
346                    created_by: Uuid::parse_str(&row.created_by)
347                        .map_err(|e| CollabError::Internal(format!("Invalid UUID: {e}")))?,
348                    expires_at: row
349                        .expires_at
350                        .as_ref()
351                        .map(|s| {
352                            DateTime::parse_from_rfc3339(s)
353                                .map(|dt| dt.with_timezone(&Utc))
354                                .map_err(|e| {
355                                    CollabError::Internal(format!("Invalid timestamp: {e}"))
356                                })
357                        })
358                        .transpose()?,
359                })
360            })
361            .collect();
362        let backups = backups?;
363
364        Ok(backups)
365    }
366
367    /// Get a backup by ID
368    pub async fn get_backup(&self, backup_id: Uuid) -> Result<WorkspaceBackup> {
369        let backup_id_str = backup_id.to_string();
370        let row = sqlx::query!(
371            r#"
372            SELECT
373                id,
374                workspace_id,
375                backup_url,
376                storage_backend,
377                storage_config,
378                size_bytes,
379                backup_format,
380                encrypted,
381                commit_id,
382                created_at,
383                created_by,
384                expires_at
385            FROM workspace_backups
386            WHERE id = ?
387            "#,
388            backup_id_str
389        )
390        .fetch_optional(&self.db)
391        .await?
392        .ok_or_else(|| CollabError::Internal(format!("Backup not found: {backup_id}")))?;
393
394        Ok(WorkspaceBackup {
395            id: Uuid::parse_str(&row.id)
396                .map_err(|e| CollabError::Internal(format!("Invalid UUID: {e}")))?,
397            workspace_id: Uuid::parse_str(&row.workspace_id)
398                .map_err(|e| CollabError::Internal(format!("Invalid UUID: {e}")))?,
399            backup_url: row.backup_url,
400            storage_backend: serde_json::from_str(&row.storage_backend)
401                .map_err(|e| CollabError::Internal(format!("Invalid storage_backend: {e}")))?,
402            storage_config: row.storage_config.as_ref().and_then(|s| serde_json::from_str(s).ok()),
403            size_bytes: row.size_bytes,
404            backup_format: row.backup_format,
405            encrypted: row.encrypted != 0,
406            commit_id: row.commit_id.as_ref().and_then(|s| Uuid::parse_str(s).ok()),
407            created_at: DateTime::parse_from_rfc3339(&row.created_at)
408                .map_err(|e| CollabError::Internal(format!("Invalid timestamp: {e}")))?
409                .with_timezone(&Utc),
410            created_by: Uuid::parse_str(&row.created_by)
411                .map_err(|e| CollabError::Internal(format!("Invalid UUID: {e}")))?,
412            expires_at: row
413                .expires_at
414                .as_ref()
415                .map(|s| {
416                    DateTime::parse_from_rfc3339(s)
417                        .map(|dt| dt.with_timezone(&Utc))
418                        .map_err(|e| CollabError::Internal(format!("Invalid timestamp: {e}")))
419                })
420                .transpose()?,
421        })
422    }
423
424    /// Delete a backup
425    pub async fn delete_backup(&self, backup_id: Uuid) -> Result<()> {
426        // Get backup record to get the URL
427        let backup = self.get_backup(backup_id).await?;
428
429        // Delete from storage
430        match backup.storage_backend {
431            StorageBackend::Local => {
432                if Path::new(&backup.backup_url).exists() {
433                    tokio::fs::remove_file(&backup.backup_url).await.map_err(|e| {
434                        CollabError::Internal(format!("Failed to delete backup file: {e}"))
435                    })?;
436                }
437            }
438            StorageBackend::S3 => {
439                self.delete_from_s3(&backup.backup_url, backup.storage_config.as_ref()).await?;
440            }
441            StorageBackend::Azure => {
442                self.delete_from_azure(&backup.backup_url, backup.storage_config.as_ref())
443                    .await?;
444            }
445            StorageBackend::Gcs => {
446                self.delete_from_gcs(&backup.backup_url, backup.storage_config.as_ref()).await?;
447            }
448            StorageBackend::Custom => {
449                return Err(CollabError::Internal(
450                    "Custom storage backend deletion not implemented".to_string(),
451                ));
452            }
453        }
454
455        // Delete from database
456        let backup_id_str = backup_id.to_string();
457        sqlx::query!(
458            r#"
459            DELETE FROM workspace_backups
460            WHERE id = ?
461            "#,
462            backup_id_str
463        )
464        .execute(&self.db)
465        .await?;
466
467        Ok(())
468    }
469
470    /// Save backup to local filesystem
471    async fn save_to_local(&self, workspace_id: Uuid, data: &str, format: &str) -> Result<String> {
472        let backup_dir = self.local_backup_dir.as_ref().ok_or_else(|| {
473            CollabError::Internal("Local backup directory not configured".to_string())
474        })?;
475
476        // Ensure backup directory exists
477        tokio::fs::create_dir_all(backup_dir).await.map_err(|e| {
478            CollabError::Internal(format!("Failed to create backup directory: {e}"))
479        })?;
480
481        // Create backup filename with timestamp
482        let timestamp = Utc::now().format("%Y%m%d_%H%M%S");
483        let filename = format!("workspace_{workspace_id}_{timestamp}.{format}");
484        let backup_path = Path::new(backup_dir).join(&filename);
485
486        // Write backup file
487        tokio::fs::write(&backup_path, data)
488            .await
489            .map_err(|e| CollabError::Internal(format!("Failed to write backup file: {e}")))?;
490
491        Ok(backup_path.to_string_lossy().to_string())
492    }
493
494    /// Load backup from local filesystem
495    async fn load_from_local(&self, backup_url: &str) -> Result<String> {
496        tokio::fs::read_to_string(backup_url)
497            .await
498            .map_err(|e| CollabError::Internal(format!("Failed to read backup file: {e}")))
499    }
500
501    /// Delete backup from S3
502    async fn delete_from_s3(
503        &self,
504        backup_url: &str,
505        storage_config: Option<&serde_json::Value>,
506    ) -> Result<()> {
507        #[cfg(feature = "s3")]
508        {
509            use aws_config::SdkConfig;
510            use aws_sdk_s3::config::{Credentials, Region};
511            use aws_sdk_s3::Client as S3Client;
512
513            // Parse S3 URL (format: s3://bucket-name/path/to/file)
514            if !backup_url.starts_with("s3://") {
515                return Err(CollabError::Internal(format!(
516                    "Invalid S3 URL format: {}",
517                    backup_url
518                )));
519            }
520
521            let url_parts: Vec<&str> =
522                backup_url.strip_prefix("s3://").unwrap().splitn(2, '/').collect();
523            if url_parts.len() != 2 {
524                return Err(CollabError::Internal(format!(
525                    "Invalid S3 URL format: {}",
526                    backup_url
527                )));
528            }
529
530            let bucket = url_parts[0];
531            let key = url_parts[1];
532
533            // Build AWS config with credentials from storage_config or environment
534            let aws_config: SdkConfig = if let Some(config) = storage_config {
535                // Extract S3 credentials from storage_config
536                // Expected format: {"access_key_id": "...", "secret_access_key": "...", "region": "..."}
537                let access_key_id =
538                    config.get("access_key_id").and_then(|v| v.as_str()).ok_or_else(|| {
539                        CollabError::Internal(
540                            "S3 access_key_id not found in storage_config".to_string(),
541                        )
542                    })?;
543
544                let secret_access_key =
545                    config.get("secret_access_key").and_then(|v| v.as_str()).ok_or_else(|| {
546                        CollabError::Internal(
547                            "S3 secret_access_key not found in storage_config".to_string(),
548                        )
549                    })?;
550
551                let region_str =
552                    config.get("region").and_then(|v| v.as_str()).unwrap_or("us-east-1");
553
554                // Create credentials provider
555                let credentials = Credentials::new(
556                    access_key_id,
557                    secret_access_key,
558                    None, // session token
559                    None, // expiration
560                    "mockforge",
561                );
562
563                // Build AWS config with custom credentials and region
564                aws_config::ConfigLoader::default()
565                    .credentials_provider(credentials)
566                    .region(Region::new(region_str.to_string()))
567                    .load()
568                    .await
569            } else {
570                // Use default AWS config (from environment variables, IAM role, etc.)
571                aws_config::load_from_env().await
572            };
573
574            // Create S3 client
575            let client = S3Client::new(&aws_config);
576
577            // Delete object from S3
578            client
579                .delete_object()
580                .bucket(bucket)
581                .key(key)
582                .send()
583                .await
584                .map_err(|e| CollabError::Internal(format!("Failed to delete S3 object: {}", e)))?;
585
586            tracing::info!("Successfully deleted S3 object: {}", backup_url);
587            Ok(())
588        }
589
590        #[cfg(not(feature = "s3"))]
591        {
592            Err(CollabError::Internal(
593                "S3 deletion requires 's3' feature to be enabled. Add 's3' feature to mockforge-collab in Cargo.toml.".to_string(),
594            ))
595        }
596    }
597
598    /// Delete backup from Azure Blob Storage
599    async fn delete_from_azure(
600        &self,
601        backup_url: &str,
602        storage_config: Option<&serde_json::Value>,
603    ) -> Result<()> {
604        #[cfg(feature = "azure")]
605        {
606            use azure_identity::DefaultAzureCredential;
607            use azure_storage_blobs::prelude::*;
608            use std::sync::Arc;
609
610            // Parse Azure URL (format: https://account.blob.core.windows.net/container/path)
611            if !backup_url.contains("blob.core.windows.net") {
612                return Err(CollabError::Internal(format!(
613                    "Invalid Azure Blob URL format: {}",
614                    backup_url
615                )));
616            }
617
618            // Parse URL properly
619            let url = url::Url::parse(backup_url)
620                .map_err(|e| CollabError::Internal(format!("Invalid Azure URL: {}", e)))?;
621
622            // Extract account name from hostname (e.g., "account.blob.core.windows.net" -> "account")
623            let hostname = url
624                .host_str()
625                .ok_or_else(|| CollabError::Internal("Invalid Azure hostname".to_string()))?;
626            let account_name = hostname.split('.').next().ok_or_else(|| {
627                CollabError::Internal("Invalid Azure hostname format".to_string())
628            })?;
629
630            // Extract container and blob name from path
631            let path = url.path();
632            let path_parts: Vec<&str> = path.splitn(3, '/').filter(|s| !s.is_empty()).collect();
633            if path_parts.len() < 2 {
634                return Err(CollabError::Internal(format!("Invalid Azure blob path: {}", path)));
635            }
636
637            let container_name = path_parts[0];
638            let blob_name = path_parts[1..].join("/");
639
640            // Extract Azure credentials from storage_config
641            // Expected format: {"account_name": "...", "account_key": "..."} or use DefaultAzureCredential
642            //
643            // NOTE: azure_storage_blobs 0.19 API has changed from previous versions.
644            // The API structure requires review of the 0.19 documentation to properly implement
645            // credential handling and client creation. The previous implementation used a different
646            // API structure that is no longer compatible.
647            //
648            // TODO: Review azure_storage_blobs 0.19 API documentation and update implementation:
649            // - StorageCredentials import path and usage
650            // - BlobServiceClient::new() signature and credential types
651            // - DefaultAzureCredential integration with BlobServiceClient
652            return Err(CollabError::Internal(
653                "Azure deletion implementation needs to be updated for azure_storage_blobs 0.19 API. \
654                 The API structure has changed and requires review of the 0.19 documentation."
655                    .to_string(),
656            ));
657        }
658
659        #[cfg(not(feature = "azure"))]
660        {
661            Err(CollabError::Internal(
662                "Azure deletion requires 'azure' feature to be enabled. Add 'azure' feature to mockforge-collab in Cargo.toml.".to_string(),
663            ))
664        }
665    }
666
667    /// Delete backup from Google Cloud Storage
668    async fn delete_from_gcs(
669        &self,
670        backup_url: &str,
671        storage_config: Option<&serde_json::Value>,
672    ) -> Result<()> {
673        #[cfg(feature = "gcs")]
674        {
675            // Note: google-cloud-storage 1.4.0 API has changed significantly
676            // The API structure is different from previous versions
677            // This implementation may need adjustment based on actual 1.4.0 API documentation
678            return Err(CollabError::Internal(
679                "GCS deletion implementation needs to be updated for google-cloud-storage 1.4.0 API. \
680                 The API structure has changed significantly and requires review of the 1.4.0 documentation."
681                    .to_string(),
682            ));
683
684            /* TODO: Update to google-cloud-storage 1.4.0 API
685            // The 1.4.0 API uses a different structure. Example implementation:
686            use google_cloud_storage::client::Client;
687            use google_cloud_storage::http::objects::delete::DeleteObjectRequest;
688
689            // Parse GCS URL (format: gs://bucket-name/path/to/file)
690            if !backup_url.starts_with("gs://") {
691                return Err(CollabError::Internal(format!(
692                    "Invalid GCS URL format: {}",
693                    backup_url
694                )));
695            }
696
697            let url_parts: Vec<&str> =
698                backup_url.strip_prefix("gs://").unwrap().splitn(2, '/').collect();
699            if url_parts.len() != 2 {
700                return Err(CollabError::Internal(format!(
701                    "Invalid GCS URL format: {}",
702                    backup_url
703                )));
704            }
705
706            let bucket_name = url_parts[0];
707            let object_name = url_parts[1];
708
709            // Extract GCS credentials from storage_config
710            // Expected format: {"service_account_key": "...", "project_id": "..."}
711            let project_id = storage_config
712                .and_then(|c| c.get("project_id"))
713                .and_then(|v| v.as_str())
714                .ok_or_else(|| {
715                    CollabError::Internal("GCS project_id not found in storage_config".to_string())
716                })?;
717
718            // Initialize GCS client with google-cloud-storage 1.4.0 API
719            // Note: The 1.4.0 API uses a different structure. For now, we'll use default credentials
720            // and handle service account keys through environment variables or metadata server
721            let client = Client::default()
722                .await
723                .map_err(|e| {
724                    CollabError::Internal(format!("Failed to initialize GCS client: {}", e))
725                })?;
726
727            // Delete object using google-cloud-storage 1.4.0 API
728            let request = DeleteObjectRequest {
729                bucket: bucket_name.to_string(),
730                object: object_name.to_string(),
731                ..Default::default()
732            };
733
734            client
735                .delete_object(&request)
736                .await
737                .map_err(|e| {
738                    CollabError::Internal(format!("Failed to delete GCS object: {}", e))
739                })?;
740
741            tracing::info!("Successfully deleted GCS object: {}", backup_url);
742            Ok(())
743            */
744        }
745
746        #[cfg(not(feature = "gcs"))]
747        {
748            Err(CollabError::Internal(
749                "GCS deletion requires 'gcs' feature to be enabled. Add 'gcs' feature to mockforge-collab in Cargo.toml.".to_string(),
750            ))
751        }
752    }
753
754    /// Get workspace data for backup
755    ///
756    /// Gets the full workspace state from the `TeamWorkspace` and converts it to JSON.
757    async fn get_workspace_data(&self, workspace_id: Uuid) -> Result<serde_json::Value> {
758        // Get the TeamWorkspace
759        let team_workspace = self.workspace_service.get_workspace(workspace_id).await?;
760
761        // Use CoreBridge to get the full workspace state as JSON
762        self.core_bridge.get_workspace_state_json(&team_workspace)
763    }
764}
765
766#[cfg(test)]
767mod tests {
768    use super::*;
769
770    #[test]
771    fn test_storage_backend_equality() {
772        assert_eq!(StorageBackend::Local, StorageBackend::Local);
773        assert_eq!(StorageBackend::S3, StorageBackend::S3);
774        assert_eq!(StorageBackend::Azure, StorageBackend::Azure);
775        assert_eq!(StorageBackend::Gcs, StorageBackend::Gcs);
776        assert_eq!(StorageBackend::Custom, StorageBackend::Custom);
777
778        assert_ne!(StorageBackend::Local, StorageBackend::S3);
779    }
780
781    #[test]
782    fn test_storage_backend_serialization() {
783        let backend = StorageBackend::S3;
784        let json = serde_json::to_string(&backend).unwrap();
785        let deserialized: StorageBackend = serde_json::from_str(&json).unwrap();
786
787        assert_eq!(backend, deserialized);
788    }
789
790    #[test]
791    fn test_storage_backend_all_variants() {
792        let backends = vec![
793            StorageBackend::Local,
794            StorageBackend::S3,
795            StorageBackend::Azure,
796            StorageBackend::Gcs,
797            StorageBackend::Custom,
798        ];
799
800        for backend in backends {
801            let json = serde_json::to_string(&backend).unwrap();
802            let deserialized: StorageBackend = serde_json::from_str(&json).unwrap();
803            assert_eq!(backend, deserialized);
804        }
805    }
806
807    #[test]
808    fn test_workspace_backup_new() {
809        let workspace_id = Uuid::new_v4();
810        let created_by = Uuid::new_v4();
811        let backup_url = "s3://bucket/backup.yaml".to_string();
812        let size_bytes = 1024;
813
814        let backup = WorkspaceBackup::new(
815            workspace_id,
816            backup_url.clone(),
817            StorageBackend::S3,
818            size_bytes,
819            created_by,
820        );
821
822        assert_eq!(backup.workspace_id, workspace_id);
823        assert_eq!(backup.backup_url, backup_url);
824        assert_eq!(backup.storage_backend, StorageBackend::S3);
825        assert_eq!(backup.size_bytes, size_bytes);
826        assert_eq!(backup.created_by, created_by);
827        assert_eq!(backup.backup_format, "yaml");
828        assert!(!backup.encrypted);
829        assert!(backup.commit_id.is_none());
830        assert!(backup.expires_at.is_none());
831        assert!(backup.storage_config.is_none());
832    }
833
834    #[test]
835    fn test_workspace_backup_clone() {
836        let backup = WorkspaceBackup::new(
837            Uuid::new_v4(),
838            "backup.yaml".to_string(),
839            StorageBackend::Local,
840            512,
841            Uuid::new_v4(),
842        );
843
844        let cloned = backup.clone();
845
846        assert_eq!(backup.id, cloned.id);
847        assert_eq!(backup.workspace_id, cloned.workspace_id);
848        assert_eq!(backup.backup_url, cloned.backup_url);
849        assert_eq!(backup.size_bytes, cloned.size_bytes);
850    }
851
852    #[test]
853    fn test_workspace_backup_serialization() {
854        let backup = WorkspaceBackup::new(
855            Uuid::new_v4(),
856            "backup.yaml".to_string(),
857            StorageBackend::Local,
858            256,
859            Uuid::new_v4(),
860        );
861
862        let json = serde_json::to_string(&backup).unwrap();
863        let deserialized: WorkspaceBackup = serde_json::from_str(&json).unwrap();
864
865        assert_eq!(backup.id, deserialized.id);
866        assert_eq!(backup.workspace_id, deserialized.workspace_id);
867        assert_eq!(backup.storage_backend, deserialized.storage_backend);
868    }
869
870    #[test]
871    fn test_workspace_backup_with_commit() {
872        let mut backup = WorkspaceBackup::new(
873            Uuid::new_v4(),
874            "backup.yaml".to_string(),
875            StorageBackend::Local,
876            128,
877            Uuid::new_v4(),
878        );
879
880        let commit_id = Uuid::new_v4();
881        backup.commit_id = Some(commit_id);
882
883        assert_eq!(backup.commit_id, Some(commit_id));
884    }
885
886    #[test]
887    fn test_workspace_backup_with_encryption() {
888        let mut backup = WorkspaceBackup::new(
889            Uuid::new_v4(),
890            "backup.yaml".to_string(),
891            StorageBackend::S3,
892            2048,
893            Uuid::new_v4(),
894        );
895
896        backup.encrypted = true;
897
898        assert!(backup.encrypted);
899    }
900
901    #[test]
902    fn test_workspace_backup_with_expiration() {
903        let mut backup = WorkspaceBackup::new(
904            Uuid::new_v4(),
905            "backup.yaml".to_string(),
906            StorageBackend::Azure,
907            512,
908            Uuid::new_v4(),
909        );
910
911        let expires_at = Utc::now() + chrono::Duration::days(30);
912        backup.expires_at = Some(expires_at);
913
914        assert!(backup.expires_at.is_some());
915    }
916
917    #[test]
918    fn test_workspace_backup_with_storage_config() {
919        let mut backup = WorkspaceBackup::new(
920            Uuid::new_v4(),
921            "backup.yaml".to_string(),
922            StorageBackend::S3,
923            1024,
924            Uuid::new_v4(),
925        );
926
927        let config = serde_json::json!({
928            "region": "us-east-1",
929            "bucket": "my-bucket"
930        });
931        backup.storage_config = Some(config.clone());
932
933        assert_eq!(backup.storage_config, Some(config));
934    }
935
936    #[test]
937    fn test_workspace_backup_different_formats() {
938        let mut backup = WorkspaceBackup::new(
939            Uuid::new_v4(),
940            "backup.json".to_string(),
941            StorageBackend::Local,
942            256,
943            Uuid::new_v4(),
944        );
945
946        assert_eq!(backup.backup_format, "yaml"); // Default
947
948        backup.backup_format = "json".to_string();
949        assert_eq!(backup.backup_format, "json");
950    }
951
952    #[test]
953    fn test_storage_backend_debug() {
954        let backend = StorageBackend::S3;
955        let debug_str = format!("{:?}", backend);
956        assert!(debug_str.contains("S3"));
957    }
958
959    #[test]
960    fn test_workspace_backup_debug() {
961        let backup = WorkspaceBackup::new(
962            Uuid::new_v4(),
963            "backup.yaml".to_string(),
964            StorageBackend::Local,
965            100,
966            Uuid::new_v4(),
967        );
968
969        let debug_str = format!("{:?}", backup);
970        assert!(debug_str.contains("WorkspaceBackup"));
971    }
972}