1use crate::core_bridge::CoreBridge;
4use crate::error::{CollabError, Result};
5use crate::history::VersionControl;
6use crate::workspace::WorkspaceService;
7use chrono::{DateTime, Utc};
8use serde::{Deserialize, Serialize};
9use sqlx::{Pool, Sqlite};
10use std::path::Path;
11use std::sync::Arc;
12use uuid::Uuid;
13
14#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, sqlx::Type)]
16#[sqlx(type_name = "storage_backend", rename_all = "lowercase")]
17#[serde(rename_all = "lowercase")]
18pub enum StorageBackend {
19 Local,
21 S3,
23 Azure,
25 Gcs,
27 Custom,
29}
30
31#[derive(Debug, Clone, Serialize, Deserialize, sqlx::FromRow)]
33pub struct WorkspaceBackup {
34 pub id: Uuid,
36 pub workspace_id: Uuid,
38 pub backup_url: String,
40 pub storage_backend: StorageBackend,
42 pub storage_config: Option<serde_json::Value>,
44 pub size_bytes: i64,
46 pub backup_format: String,
48 pub encrypted: bool,
50 pub commit_id: Option<Uuid>,
52 pub created_at: DateTime<Utc>,
54 pub created_by: Uuid,
56 pub expires_at: Option<DateTime<Utc>>,
58}
59
60impl WorkspaceBackup {
61 #[must_use]
63 pub fn new(
64 workspace_id: Uuid,
65 backup_url: String,
66 storage_backend: StorageBackend,
67 size_bytes: i64,
68 created_by: Uuid,
69 ) -> Self {
70 Self {
71 id: Uuid::new_v4(),
72 workspace_id,
73 backup_url,
74 storage_backend,
75 storage_config: None,
76 size_bytes,
77 backup_format: "yaml".to_string(),
78 encrypted: false,
79 commit_id: None,
80 created_at: Utc::now(),
81 created_by,
82 expires_at: None,
83 }
84 }
85}
86
87pub struct BackupService {
89 db: Pool<Sqlite>,
90 version_control: VersionControl,
91 local_backup_dir: Option<String>,
92 core_bridge: Arc<CoreBridge>,
93 workspace_service: Arc<WorkspaceService>,
94}
95
96impl BackupService {
97 #[must_use]
99 pub fn new(
100 db: Pool<Sqlite>,
101 local_backup_dir: Option<String>,
102 core_bridge: Arc<CoreBridge>,
103 workspace_service: Arc<WorkspaceService>,
104 ) -> Self {
105 Self {
106 db: db.clone(),
107 version_control: VersionControl::new(db),
108 local_backup_dir,
109 core_bridge,
110 workspace_service,
111 }
112 }
113
114 pub async fn backup_workspace(
120 &self,
121 workspace_id: Uuid,
122 user_id: Uuid,
123 storage_backend: StorageBackend,
124 format: Option<String>,
125 commit_id: Option<Uuid>,
126 ) -> Result<WorkspaceBackup> {
127 self.backup_workspace_with_config(
128 workspace_id,
129 user_id,
130 storage_backend,
131 format,
132 commit_id,
133 None,
134 )
135 .await
136 }
137
138 pub async fn backup_workspace_with_config(
151 &self,
152 workspace_id: Uuid,
153 user_id: Uuid,
154 storage_backend: StorageBackend,
155 format: Option<String>,
156 commit_id: Option<Uuid>,
157 storage_config: Option<serde_json::Value>,
158 ) -> Result<WorkspaceBackup> {
159 let workspace = self
161 .workspace_service
162 .get_workspace(workspace_id)
163 .await
164 .map_err(|e| CollabError::Internal(format!("Failed to get workspace: {e}")))?;
165
166 let workspace_data = self
170 .core_bridge
171 .export_workspace_for_backup(&workspace)
172 .await
173 .map_err(|e| CollabError::Internal(format!("Failed to export workspace: {e}")))?;
174
175 let backup_format = format.unwrap_or_else(|| "yaml".to_string());
177 let serialized = match backup_format.as_str() {
178 "yaml" => serde_yaml::to_string(&workspace_data)
179 .map_err(|e| CollabError::Internal(format!("Failed to serialize to YAML: {e}")))?,
180 "json" => serde_json::to_string_pretty(&workspace_data)
181 .map_err(|e| CollabError::Internal(format!("Failed to serialize to JSON: {e}")))?,
182 _ => {
183 return Err(CollabError::InvalidInput(format!(
184 "Unsupported backup format: {backup_format}"
185 )));
186 }
187 };
188
189 let size_bytes = serialized.len() as i64;
190
191 let backup_url = match storage_backend {
193 StorageBackend::Local => {
194 self.save_to_local(workspace_id, &serialized, &backup_format).await?
195 }
196 StorageBackend::S3 => {
197 return Err(CollabError::Internal("S3 backup not yet implemented".to_string()));
198 }
199 StorageBackend::Azure => {
200 self.save_to_azure(
201 workspace_id,
202 &serialized,
203 &backup_format,
204 storage_config.as_ref(),
205 )
206 .await?
207 }
208 StorageBackend::Gcs => {
209 self.save_to_gcs(workspace_id, &serialized, &backup_format, storage_config.as_ref())
210 .await?
211 }
212 StorageBackend::Custom => {
213 return Err(CollabError::Internal(
214 "Custom storage backend not yet implemented".to_string(),
215 ));
216 }
217 };
218
219 let mut backup =
221 WorkspaceBackup::new(workspace_id, backup_url, storage_backend, size_bytes, user_id);
222 backup.backup_format = backup_format;
223 backup.storage_config = storage_config;
224 backup.commit_id = commit_id;
225
226 let storage_backend_str = match backup.storage_backend {
228 StorageBackend::Local => "local",
229 StorageBackend::S3 => "s3",
230 StorageBackend::Azure => "azure",
231 StorageBackend::Gcs => "gcs",
232 StorageBackend::Custom => "custom",
233 };
234 let storage_config_str = backup.storage_config.as_ref().map(|v| v.to_string());
235 let created_at_str = backup.created_at.to_rfc3339();
236 let expires_at_str = backup.expires_at.map(|dt| dt.to_rfc3339());
237
238 sqlx::query!(
240 r#"
241 INSERT INTO workspace_backups (
242 id, workspace_id, backup_url, storage_backend, storage_config,
243 size_bytes, backup_format, encrypted, commit_id, created_at, created_by, expires_at
244 )
245 VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
246 "#,
247 backup.id,
248 backup.workspace_id,
249 backup.backup_url,
250 storage_backend_str,
251 storage_config_str,
252 backup.size_bytes,
253 backup.backup_format,
254 backup.encrypted,
255 backup.commit_id,
256 created_at_str,
257 backup.created_by,
258 expires_at_str
259 )
260 .execute(&self.db)
261 .await?;
262
263 Ok(backup)
264 }
265
266 pub async fn restore_workspace(
268 &self,
269 backup_id: Uuid,
270 target_workspace_id: Option<Uuid>,
271 user_id: Uuid,
272 ) -> Result<Uuid> {
273 let backup = self.get_backup(backup_id).await?;
275
276 let backup_data = match backup.storage_backend {
278 StorageBackend::Local => self.load_from_local(&backup.backup_url).await?,
279 _ => {
280 return Err(CollabError::Internal(
281 "Only local backups are supported for restore".to_string(),
282 ));
283 }
284 };
285
286 let workspace_data: serde_json::Value = match backup.backup_format.as_str() {
288 "yaml" => serde_yaml::from_str(&backup_data)
289 .map_err(|e| CollabError::Internal(format!("Failed to deserialize YAML: {e}")))?,
290 "json" => serde_json::from_str(&backup_data)
291 .map_err(|e| CollabError::Internal(format!("Failed to deserialize JSON: {e}")))?,
292 _ => {
293 return Err(CollabError::Internal(format!(
294 "Unsupported backup format: {}",
295 backup.backup_format
296 )));
297 }
298 };
299
300 let backup_record = self.get_backup(backup_id).await?;
303 let owner_id = backup_record.created_by;
304
305 let restored_team_workspace = self
307 .core_bridge
308 .import_workspace_from_backup(&workspace_data, owner_id, None)
309 .await?;
310
311 let restored_workspace_id = target_workspace_id.unwrap_or(backup.workspace_id);
313
314 let team_workspace = if restored_workspace_id == backup.workspace_id {
316 restored_team_workspace
318 } else {
319 let mut new_workspace = restored_team_workspace;
321 new_workspace.id = restored_workspace_id;
322 new_workspace
323 };
324
325 self.core_bridge.save_workspace_to_disk(&team_workspace).await?;
329
330 if let Some(commit_id) = backup.commit_id {
332 let _ =
334 self.version_control.restore_to_commit(restored_workspace_id, commit_id).await?;
335 }
336
337 Ok(restored_workspace_id)
338 }
339
340 pub async fn list_backups(
342 &self,
343 workspace_id: Uuid,
344 limit: Option<i32>,
345 ) -> Result<Vec<WorkspaceBackup>> {
346 let limit = limit.unwrap_or(100);
347
348 let rows = sqlx::query!(
349 r#"
350 SELECT
351 id as "id: Uuid",
352 workspace_id as "workspace_id: Uuid",
353 backup_url,
354 storage_backend,
355 storage_config,
356 size_bytes,
357 backup_format,
358 encrypted,
359 commit_id as "commit_id: Uuid",
360 created_at,
361 created_by as "created_by: Uuid",
362 expires_at
363 FROM workspace_backups
364 WHERE workspace_id = ?
365 ORDER BY created_at DESC
366 LIMIT ?
367 "#,
368 workspace_id,
369 limit
370 )
371 .fetch_all(&self.db)
372 .await?;
373
374 let backups: Result<Vec<WorkspaceBackup>> = rows
375 .into_iter()
376 .map(|row| {
377 let storage_backend = match row.storage_backend.as_str() {
378 "local" => StorageBackend::Local,
379 "s3" => StorageBackend::S3,
380 "azure" => StorageBackend::Azure,
381 "gcs" => StorageBackend::Gcs,
382 "custom" => StorageBackend::Custom,
383 other => {
384 return Err(CollabError::Internal(format!(
385 "Invalid storage_backend: {other}"
386 )))
387 }
388 };
389 Ok(WorkspaceBackup {
390 id: row.id,
391 workspace_id: row.workspace_id,
392 backup_url: row.backup_url,
393 storage_backend,
394 storage_config: row
395 .storage_config
396 .as_ref()
397 .and_then(|s| serde_json::from_str(s).ok()),
398 size_bytes: row.size_bytes,
399 backup_format: row.backup_format,
400 encrypted: row.encrypted != 0,
401 commit_id: row.commit_id,
402 created_at: DateTime::parse_from_rfc3339(&row.created_at)
403 .map_err(|e| CollabError::Internal(format!("Invalid timestamp: {e}")))?
404 .with_timezone(&Utc),
405 created_by: row.created_by,
406 expires_at: row
407 .expires_at
408 .as_ref()
409 .map(|s| {
410 DateTime::parse_from_rfc3339(s)
411 .map(|dt| dt.with_timezone(&Utc))
412 .map_err(|e| {
413 CollabError::Internal(format!("Invalid timestamp: {e}"))
414 })
415 })
416 .transpose()?,
417 })
418 })
419 .collect();
420 let backups = backups?;
421
422 Ok(backups)
423 }
424
425 pub async fn get_backup(&self, backup_id: Uuid) -> Result<WorkspaceBackup> {
427 let row = sqlx::query!(
428 r#"
429 SELECT
430 id as "id: Uuid",
431 workspace_id as "workspace_id: Uuid",
432 backup_url,
433 storage_backend,
434 storage_config,
435 size_bytes,
436 backup_format,
437 encrypted,
438 commit_id as "commit_id: Uuid",
439 created_at,
440 created_by as "created_by: Uuid",
441 expires_at
442 FROM workspace_backups
443 WHERE id = ?
444 "#,
445 backup_id
446 )
447 .fetch_optional(&self.db)
448 .await?
449 .ok_or_else(|| CollabError::Internal(format!("Backup not found: {backup_id}")))?;
450
451 let storage_backend = match row.storage_backend.as_str() {
452 "local" => StorageBackend::Local,
453 "s3" => StorageBackend::S3,
454 "azure" => StorageBackend::Azure,
455 "gcs" => StorageBackend::Gcs,
456 "custom" => StorageBackend::Custom,
457 other => {
458 return Err(CollabError::Internal(format!("Invalid storage_backend: {other}")))
459 }
460 };
461
462 Ok(WorkspaceBackup {
463 id: row.id,
464 workspace_id: row.workspace_id,
465 backup_url: row.backup_url,
466 storage_backend,
467 storage_config: row.storage_config.as_ref().and_then(|s| serde_json::from_str(s).ok()),
468 size_bytes: row.size_bytes,
469 backup_format: row.backup_format,
470 encrypted: row.encrypted != 0,
471 commit_id: row.commit_id,
472 created_at: DateTime::parse_from_rfc3339(&row.created_at)
473 .map_err(|e| CollabError::Internal(format!("Invalid timestamp: {e}")))?
474 .with_timezone(&Utc),
475 created_by: row.created_by,
476 expires_at: row
477 .expires_at
478 .as_ref()
479 .map(|s| {
480 DateTime::parse_from_rfc3339(s)
481 .map(|dt| dt.with_timezone(&Utc))
482 .map_err(|e| CollabError::Internal(format!("Invalid timestamp: {e}")))
483 })
484 .transpose()?,
485 })
486 }
487
488 pub async fn delete_backup(&self, backup_id: Uuid) -> Result<()> {
490 let backup = self.get_backup(backup_id).await?;
492
493 match backup.storage_backend {
495 StorageBackend::Local => {
496 if Path::new(&backup.backup_url).exists() {
497 tokio::fs::remove_file(&backup.backup_url).await.map_err(|e| {
498 CollabError::Internal(format!("Failed to delete backup file: {e}"))
499 })?;
500 }
501 }
502 StorageBackend::S3 => {
503 self.delete_from_s3(&backup.backup_url, backup.storage_config.as_ref()).await?;
504 }
505 StorageBackend::Azure => {
506 self.delete_from_azure(&backup.backup_url, backup.storage_config.as_ref())
507 .await?;
508 }
509 StorageBackend::Gcs => {
510 self.delete_from_gcs(&backup.backup_url, backup.storage_config.as_ref()).await?;
511 }
512 StorageBackend::Custom => {
513 return Err(CollabError::Internal(
514 "Custom storage backend deletion not implemented".to_string(),
515 ));
516 }
517 }
518
519 sqlx::query!(
521 r#"
522 DELETE FROM workspace_backups
523 WHERE id = ?
524 "#,
525 backup_id
526 )
527 .execute(&self.db)
528 .await?;
529
530 Ok(())
531 }
532
533 async fn save_to_local(&self, workspace_id: Uuid, data: &str, format: &str) -> Result<String> {
535 let backup_dir = self.local_backup_dir.as_ref().ok_or_else(|| {
536 CollabError::Internal("Local backup directory not configured".to_string())
537 })?;
538
539 tokio::fs::create_dir_all(backup_dir).await.map_err(|e| {
541 CollabError::Internal(format!("Failed to create backup directory: {e}"))
542 })?;
543
544 let timestamp = Utc::now().format("%Y%m%d_%H%M%S");
546 let filename = format!("workspace_{workspace_id}_{timestamp}.{format}");
547 let backup_path = Path::new(backup_dir).join(&filename);
548
549 tokio::fs::write(&backup_path, data)
551 .await
552 .map_err(|e| CollabError::Internal(format!("Failed to write backup file: {e}")))?;
553
554 Ok(backup_path.to_string_lossy().to_string())
555 }
556
557 async fn load_from_local(&self, backup_url: &str) -> Result<String> {
559 tokio::fs::read_to_string(backup_url)
560 .await
561 .map_err(|e| CollabError::Internal(format!("Failed to read backup file: {e}")))
562 }
563
564 #[allow(unused_variables)]
566 async fn save_to_azure(
567 &self,
568 workspace_id: Uuid,
569 data: &str,
570 format: &str,
571 storage_config: Option<&serde_json::Value>,
572 ) -> Result<String> {
573 #[cfg(feature = "azure")]
574 {
575 use azure_identity::{DefaultAzureCredential, TokenCredentialOptions};
576 use azure_storage::StorageCredentials;
577 use azure_storage_blobs::prelude::*;
578 use std::sync::Arc;
579
580 let config = storage_config.ok_or_else(|| {
582 CollabError::Internal("Azure storage configuration required".to_string())
583 })?;
584
585 let account_name = config
586 .get("account_name")
587 .and_then(|v| v.as_str())
588 .map(|s| s.to_string())
589 .ok_or_else(|| {
590 CollabError::Internal(
591 "Azure account_name required in storage config".to_string(),
592 )
593 })?;
594
595 let container_name = config
596 .get("container_name")
597 .and_then(|v| v.as_str())
598 .map(|s| s.to_string())
599 .unwrap_or_else(|| "mockforge-backups".to_string());
600
601 let storage_credentials = if let Some(account_key) =
603 config.get("account_key").and_then(|v| v.as_str()).map(|s| s.to_string())
604 {
605 StorageCredentials::access_key(account_name.clone(), account_key)
606 } else if let Some(sas_token) =
607 config.get("sas_token").and_then(|v| v.as_str()).map(|s| s.to_string())
608 {
609 StorageCredentials::sas_token(sas_token)
610 .map_err(|e| CollabError::Internal(format!("Invalid SAS token: {}", e)))?
611 } else {
612 let credential = Arc::new(
613 DefaultAzureCredential::create(TokenCredentialOptions::default()).map_err(
614 |e| {
615 CollabError::Internal(format!(
616 "Failed to create Azure credentials: {}",
617 e
618 ))
619 },
620 )?,
621 );
622 StorageCredentials::token_credential(credential)
623 };
624
625 let timestamp = Utc::now().format("%Y%m%d_%H%M%S");
627 let blob_name = format!("workspace_{workspace_id}_{timestamp}.{format}");
628
629 let blob_client = ClientBuilder::new(account_name.clone(), storage_credentials)
631 .blob_client(&container_name, &blob_name);
632
633 blob_client
634 .put_block_blob(data.as_bytes().to_vec())
635 .content_type(match format {
636 "yaml" => "application/x-yaml",
637 "json" => "application/json",
638 _ => "application/octet-stream",
639 })
640 .await
641 .map_err(|e| CollabError::Internal(format!("Failed to upload to Azure: {}", e)))?;
642
643 let backup_url = format!(
644 "https://{}.blob.core.windows.net/{}/{}",
645 account_name, container_name, blob_name
646 );
647 tracing::info!("Successfully uploaded backup to Azure: {}", backup_url);
648 Ok(backup_url)
649 }
650
651 #[cfg(not(feature = "azure"))]
652 {
653 Err(CollabError::Internal(
654 "Azure backup requires 'azure' feature to be enabled. Add 'azure' feature to mockforge-collab in Cargo.toml.".to_string(),
655 ))
656 }
657 }
658
659 #[allow(unused_variables)]
661 async fn save_to_gcs(
662 &self,
663 workspace_id: Uuid,
664 data: &str,
665 format: &str,
666 storage_config: Option<&serde_json::Value>,
667 ) -> Result<String> {
668 #[cfg(feature = "gcs")]
669 {
670 use bytes::Bytes;
671 use google_cloud_storage::client::Storage;
672
673 let config = storage_config.ok_or_else(|| {
675 CollabError::Internal("GCS storage configuration required".to_string())
676 })?;
677
678 let bucket_name = config
679 .get("bucket_name")
680 .and_then(|v| v.as_str())
681 .unwrap_or("mockforge-backups");
682
683 let client = Storage::builder().build().await.map_err(|e| {
685 CollabError::Internal(format!("Failed to create GCS client: {}", e))
686 })?;
687
688 let timestamp = Utc::now().format("%Y%m%d_%H%M%S");
690 let object_name = format!("workspace_{workspace_id}_{timestamp}.{format}");
691
692 let payload = Bytes::from(data.as_bytes().to_vec());
695 client
696 .write_object(bucket_name, &object_name, payload)
697 .send_unbuffered()
698 .await
699 .map_err(|e| CollabError::Internal(format!("Failed to upload to GCS: {}", e)))?;
700
701 let backup_url = format!("gs://{}/{}", bucket_name, object_name);
702 tracing::info!("Successfully uploaded backup to GCS: {}", backup_url);
703 Ok(backup_url)
704 }
705
706 #[cfg(not(feature = "gcs"))]
707 {
708 Err(CollabError::Internal(
709 "GCS backup requires 'gcs' feature to be enabled. Add 'gcs' feature to mockforge-collab in Cargo.toml.".to_string(),
710 ))
711 }
712 }
713
714 async fn delete_from_s3(
716 &self,
717 backup_url: &str,
718 storage_config: Option<&serde_json::Value>,
719 ) -> Result<()> {
720 #[cfg(feature = "s3")]
721 {
722 use aws_config::SdkConfig;
723 use aws_sdk_s3::config::{Credentials, Region};
724 use aws_sdk_s3::Client as S3Client;
725
726 if !backup_url.starts_with("s3://") {
728 return Err(CollabError::Internal(format!(
729 "Invalid S3 URL format: {}",
730 backup_url
731 )));
732 }
733
734 let url_parts: Vec<&str> = backup_url
735 .strip_prefix("s3://")
736 .ok_or_else(|| {
737 CollabError::Internal(format!("Invalid S3 URL format: {}", backup_url))
738 })?
739 .splitn(2, '/')
740 .collect();
741 if url_parts.len() != 2 {
742 return Err(CollabError::Internal(format!(
743 "Invalid S3 URL format: {}",
744 backup_url
745 )));
746 }
747
748 let bucket = url_parts[0];
749 let key = url_parts[1];
750
751 let aws_config: SdkConfig = if let Some(config) = storage_config {
753 let access_key_id =
756 config.get("access_key_id").and_then(|v| v.as_str()).ok_or_else(|| {
757 CollabError::Internal(
758 "S3 access_key_id not found in storage_config".to_string(),
759 )
760 })?;
761
762 let secret_access_key =
763 config.get("secret_access_key").and_then(|v| v.as_str()).ok_or_else(|| {
764 CollabError::Internal(
765 "S3 secret_access_key not found in storage_config".to_string(),
766 )
767 })?;
768
769 let region_str =
770 config.get("region").and_then(|v| v.as_str()).unwrap_or("us-east-1");
771
772 let credentials = Credentials::new(
774 access_key_id,
775 secret_access_key,
776 None, None, "mockforge",
779 );
780
781 aws_config::ConfigLoader::default()
783 .credentials_provider(credentials)
784 .region(Region::new(region_str.to_string()))
785 .load()
786 .await
787 } else {
788 aws_config::load_from_env().await
790 };
791
792 let client = S3Client::new(&aws_config);
794
795 client
797 .delete_object()
798 .bucket(bucket)
799 .key(key)
800 .send()
801 .await
802 .map_err(|e| CollabError::Internal(format!("Failed to delete S3 object: {}", e)))?;
803
804 tracing::info!("Successfully deleted S3 object: {}", backup_url);
805 Ok(())
806 }
807
808 #[cfg(not(feature = "s3"))]
809 {
810 Err(CollabError::Internal(
811 "S3 deletion requires 's3' feature to be enabled. Add 's3' feature to mockforge-collab in Cargo.toml.".to_string(),
812 ))
813 }
814 }
815
816 async fn delete_from_azure(
818 &self,
819 backup_url: &str,
820 storage_config: Option<&serde_json::Value>,
821 ) -> Result<()> {
822 #[cfg(feature = "azure")]
823 {
824 use azure_identity::{DefaultAzureCredential, TokenCredentialOptions};
825 use azure_storage::StorageCredentials;
826 use azure_storage_blobs::prelude::*;
827 use std::sync::Arc;
828
829 if !backup_url.contains("blob.core.windows.net") {
831 return Err(CollabError::Internal(format!(
832 "Invalid Azure Blob URL format: {}",
833 backup_url
834 )));
835 }
836
837 let url = url::Url::parse(backup_url)
839 .map_err(|e| CollabError::Internal(format!("Invalid Azure URL: {}", e)))?;
840
841 let hostname = url
843 .host_str()
844 .ok_or_else(|| CollabError::Internal("Invalid Azure hostname".to_string()))?;
845 let account_name = hostname.split('.').next().ok_or_else(|| {
846 CollabError::Internal("Invalid Azure hostname format".to_string())
847 })?;
848
849 let path = url.path();
851 let path_parts: Vec<&str> = path.splitn(3, '/').filter(|s| !s.is_empty()).collect();
852 if path_parts.len() < 2 {
853 return Err(CollabError::Internal(format!("Invalid Azure blob path: {}", path)));
854 }
855
856 let container_name = path_parts[0].to_string();
857 let blob_name = path_parts[1..].join("/");
858 let account_name = account_name.to_string();
859
860 let create_default_creds = || -> Result<StorageCredentials> {
862 let credential = Arc::new(
863 DefaultAzureCredential::create(TokenCredentialOptions::default()).map_err(
864 |e| {
865 CollabError::Internal(format!(
866 "Failed to create Azure credentials: {}",
867 e
868 ))
869 },
870 )?,
871 );
872 Ok(StorageCredentials::token_credential(credential))
873 };
874
875 let storage_credentials = if let Some(config) = storage_config {
877 if let Some(account_key) =
878 config.get("account_key").and_then(|v| v.as_str()).map(|s| s.to_string())
879 {
880 StorageCredentials::access_key(account_name.clone(), account_key)
882 } else if let Some(sas_token) =
883 config.get("sas_token").and_then(|v| v.as_str()).map(|s| s.to_string())
884 {
885 StorageCredentials::sas_token(sas_token)
887 .map_err(|e| CollabError::Internal(format!("Invalid SAS token: {}", e)))?
888 } else {
889 create_default_creds()?
891 }
892 } else {
893 create_default_creds()?
895 };
896
897 let blob_client = ClientBuilder::new(account_name, storage_credentials)
899 .blob_client(&container_name, &blob_name);
900
901 blob_client.delete().await.map_err(|e| {
902 CollabError::Internal(format!("Failed to delete Azure blob: {}", e))
903 })?;
904
905 tracing::info!("Successfully deleted Azure blob: {}", backup_url);
906 Ok(())
907 }
908
909 #[cfg(not(feature = "azure"))]
910 {
911 let _ = (backup_url, storage_config); Err(CollabError::Internal(
913 "Azure deletion requires 'azure' feature to be enabled. Add 'azure' feature to mockforge-collab in Cargo.toml.".to_string(),
914 ))
915 }
916 }
917
918 async fn delete_from_gcs(
920 &self,
921 backup_url: &str,
922 _storage_config: Option<&serde_json::Value>,
923 ) -> Result<()> {
924 #[cfg(feature = "gcs")]
925 {
926 use google_cloud_storage::client::StorageControl;
927
928 if !backup_url.starts_with("gs://") {
930 return Err(CollabError::Internal(format!(
931 "Invalid GCS URL format: {}",
932 backup_url
933 )));
934 }
935
936 let url_parts: Vec<&str> = backup_url
937 .strip_prefix("gs://")
938 .ok_or_else(|| {
939 CollabError::Internal(format!("Invalid GCS URL format: {}", backup_url))
940 })?
941 .splitn(2, '/')
942 .collect();
943 if url_parts.len() != 2 {
944 return Err(CollabError::Internal(format!(
945 "Invalid GCS URL format (expected gs://bucket/object): {}",
946 backup_url
947 )));
948 }
949
950 let bucket_name = url_parts[0];
951 let object_name = url_parts[1];
952
953 let client = StorageControl::builder().build().await.map_err(|e| {
957 CollabError::Internal(format!("Failed to create GCS client: {}", e))
958 })?;
959
960 client
962 .delete_object()
963 .set_bucket(format!("projects/_/buckets/{}", bucket_name))
964 .set_object(object_name)
965 .send()
966 .await
967 .map_err(|e| {
968 CollabError::Internal(format!("Failed to delete GCS object: {}", e))
969 })?;
970
971 tracing::info!("Successfully deleted GCS object: {}", backup_url);
972 Ok(())
973 }
974
975 #[cfg(not(feature = "gcs"))]
976 {
977 let _ = backup_url; Err(CollabError::Internal(
979 "GCS deletion requires 'gcs' feature to be enabled. Add 'gcs' feature to mockforge-collab in Cargo.toml.".to_string(),
980 ))
981 }
982 }
983
984 async fn get_workspace_data(&self, workspace_id: Uuid) -> Result<serde_json::Value> {
988 let team_workspace = self.workspace_service.get_workspace(workspace_id).await?;
990
991 self.core_bridge.get_workspace_state_json(&team_workspace)
993 }
994}
995
996#[cfg(test)]
997mod tests {
998 use super::*;
999
1000 #[test]
1001 fn test_storage_backend_equality() {
1002 assert_eq!(StorageBackend::Local, StorageBackend::Local);
1003 assert_eq!(StorageBackend::S3, StorageBackend::S3);
1004 assert_eq!(StorageBackend::Azure, StorageBackend::Azure);
1005 assert_eq!(StorageBackend::Gcs, StorageBackend::Gcs);
1006 assert_eq!(StorageBackend::Custom, StorageBackend::Custom);
1007
1008 assert_ne!(StorageBackend::Local, StorageBackend::S3);
1009 }
1010
1011 #[test]
1012 fn test_storage_backend_serialization() {
1013 let backend = StorageBackend::S3;
1014 let json = serde_json::to_string(&backend).unwrap();
1015 let deserialized: StorageBackend = serde_json::from_str(&json).unwrap();
1016
1017 assert_eq!(backend, deserialized);
1018 }
1019
1020 #[test]
1021 fn test_storage_backend_all_variants() {
1022 let backends = vec![
1023 StorageBackend::Local,
1024 StorageBackend::S3,
1025 StorageBackend::Azure,
1026 StorageBackend::Gcs,
1027 StorageBackend::Custom,
1028 ];
1029
1030 for backend in backends {
1031 let json = serde_json::to_string(&backend).unwrap();
1032 let deserialized: StorageBackend = serde_json::from_str(&json).unwrap();
1033 assert_eq!(backend, deserialized);
1034 }
1035 }
1036
1037 #[test]
1038 fn test_workspace_backup_new() {
1039 let workspace_id = Uuid::new_v4();
1040 let created_by = Uuid::new_v4();
1041 let backup_url = "s3://bucket/backup.yaml".to_string();
1042 let size_bytes = 1024;
1043
1044 let backup = WorkspaceBackup::new(
1045 workspace_id,
1046 backup_url.clone(),
1047 StorageBackend::S3,
1048 size_bytes,
1049 created_by,
1050 );
1051
1052 assert_eq!(backup.workspace_id, workspace_id);
1053 assert_eq!(backup.backup_url, backup_url);
1054 assert_eq!(backup.storage_backend, StorageBackend::S3);
1055 assert_eq!(backup.size_bytes, size_bytes);
1056 assert_eq!(backup.created_by, created_by);
1057 assert_eq!(backup.backup_format, "yaml");
1058 assert!(!backup.encrypted);
1059 assert!(backup.commit_id.is_none());
1060 assert!(backup.expires_at.is_none());
1061 assert!(backup.storage_config.is_none());
1062 }
1063
1064 #[test]
1065 fn test_workspace_backup_clone() {
1066 let backup = WorkspaceBackup::new(
1067 Uuid::new_v4(),
1068 "backup.yaml".to_string(),
1069 StorageBackend::Local,
1070 512,
1071 Uuid::new_v4(),
1072 );
1073
1074 let cloned = backup.clone();
1075
1076 assert_eq!(backup.id, cloned.id);
1077 assert_eq!(backup.workspace_id, cloned.workspace_id);
1078 assert_eq!(backup.backup_url, cloned.backup_url);
1079 assert_eq!(backup.size_bytes, cloned.size_bytes);
1080 }
1081
1082 #[test]
1083 fn test_workspace_backup_serialization() {
1084 let backup = WorkspaceBackup::new(
1085 Uuid::new_v4(),
1086 "backup.yaml".to_string(),
1087 StorageBackend::Local,
1088 256,
1089 Uuid::new_v4(),
1090 );
1091
1092 let json = serde_json::to_string(&backup).unwrap();
1093 let deserialized: WorkspaceBackup = serde_json::from_str(&json).unwrap();
1094
1095 assert_eq!(backup.id, deserialized.id);
1096 assert_eq!(backup.workspace_id, deserialized.workspace_id);
1097 assert_eq!(backup.storage_backend, deserialized.storage_backend);
1098 }
1099
1100 #[test]
1101 fn test_workspace_backup_with_commit() {
1102 let mut backup = WorkspaceBackup::new(
1103 Uuid::new_v4(),
1104 "backup.yaml".to_string(),
1105 StorageBackend::Local,
1106 128,
1107 Uuid::new_v4(),
1108 );
1109
1110 let commit_id = Uuid::new_v4();
1111 backup.commit_id = Some(commit_id);
1112
1113 assert_eq!(backup.commit_id, Some(commit_id));
1114 }
1115
1116 #[test]
1117 fn test_workspace_backup_with_encryption() {
1118 let mut backup = WorkspaceBackup::new(
1119 Uuid::new_v4(),
1120 "backup.yaml".to_string(),
1121 StorageBackend::S3,
1122 2048,
1123 Uuid::new_v4(),
1124 );
1125
1126 backup.encrypted = true;
1127
1128 assert!(backup.encrypted);
1129 }
1130
1131 #[test]
1132 fn test_workspace_backup_with_expiration() {
1133 let mut backup = WorkspaceBackup::new(
1134 Uuid::new_v4(),
1135 "backup.yaml".to_string(),
1136 StorageBackend::Azure,
1137 512,
1138 Uuid::new_v4(),
1139 );
1140
1141 let expires_at = Utc::now() + chrono::Duration::days(30);
1142 backup.expires_at = Some(expires_at);
1143
1144 assert!(backup.expires_at.is_some());
1145 }
1146
1147 #[test]
1148 fn test_workspace_backup_with_storage_config() {
1149 let mut backup = WorkspaceBackup::new(
1150 Uuid::new_v4(),
1151 "backup.yaml".to_string(),
1152 StorageBackend::S3,
1153 1024,
1154 Uuid::new_v4(),
1155 );
1156
1157 let config = serde_json::json!({
1158 "region": "us-east-1",
1159 "bucket": "my-bucket"
1160 });
1161 backup.storage_config = Some(config.clone());
1162
1163 assert_eq!(backup.storage_config, Some(config));
1164 }
1165
1166 #[test]
1167 fn test_workspace_backup_different_formats() {
1168 let mut backup = WorkspaceBackup::new(
1169 Uuid::new_v4(),
1170 "backup.json".to_string(),
1171 StorageBackend::Local,
1172 256,
1173 Uuid::new_v4(),
1174 );
1175
1176 assert_eq!(backup.backup_format, "yaml"); backup.backup_format = "json".to_string();
1179 assert_eq!(backup.backup_format, "json");
1180 }
1181
1182 #[test]
1183 fn test_storage_backend_debug() {
1184 let backend = StorageBackend::S3;
1185 let debug_str = format!("{:?}", backend);
1186 assert!(debug_str.contains("S3"));
1187 }
1188
1189 #[test]
1190 fn test_workspace_backup_debug() {
1191 let backup = WorkspaceBackup::new(
1192 Uuid::new_v4(),
1193 "backup.yaml".to_string(),
1194 StorageBackend::Local,
1195 100,
1196 Uuid::new_v4(),
1197 );
1198
1199 let debug_str = format!("{:?}", backup);
1200 assert!(debug_str.contains("WorkspaceBackup"));
1201 }
1202}