1use crate::core_bridge::CoreBridge;
4use crate::error::{CollabError, Result};
5use crate::history::VersionControl;
6use crate::workspace::WorkspaceService;
7use chrono::{DateTime, Utc};
8use serde::{Deserialize, Serialize};
9use sqlx::{Pool, Sqlite};
10use std::path::Path;
11use std::sync::Arc;
12use uuid::Uuid;
13
14#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, sqlx::Type)]
16#[sqlx(type_name = "storage_backend", rename_all = "lowercase")]
17#[serde(rename_all = "lowercase")]
18pub enum StorageBackend {
19 Local,
21 S3,
23 Azure,
25 Gcs,
27 Custom,
29}
30
31#[derive(Debug, Clone, Serialize, Deserialize, sqlx::FromRow)]
33pub struct WorkspaceBackup {
34 pub id: Uuid,
36 pub workspace_id: Uuid,
38 pub backup_url: String,
40 pub storage_backend: StorageBackend,
42 pub storage_config: Option<serde_json::Value>,
44 pub size_bytes: i64,
46 pub backup_format: String,
48 pub encrypted: bool,
50 pub commit_id: Option<Uuid>,
52 pub created_at: DateTime<Utc>,
54 pub created_by: Uuid,
56 pub expires_at: Option<DateTime<Utc>>,
58}
59
60impl WorkspaceBackup {
61 pub fn new(
63 workspace_id: Uuid,
64 backup_url: String,
65 storage_backend: StorageBackend,
66 size_bytes: i64,
67 created_by: Uuid,
68 ) -> Self {
69 Self {
70 id: Uuid::new_v4(),
71 workspace_id,
72 backup_url,
73 storage_backend,
74 storage_config: None,
75 size_bytes,
76 backup_format: "yaml".to_string(),
77 encrypted: false,
78 commit_id: None,
79 created_at: Utc::now(),
80 created_by,
81 expires_at: None,
82 }
83 }
84}
85
86pub struct BackupService {
88 db: Pool<Sqlite>,
89 version_control: VersionControl,
90 local_backup_dir: Option<String>,
91 core_bridge: Arc<CoreBridge>,
92 workspace_service: Arc<WorkspaceService>,
93}
94
95impl BackupService {
96 pub fn new(
98 db: Pool<Sqlite>,
99 local_backup_dir: Option<String>,
100 core_bridge: Arc<CoreBridge>,
101 workspace_service: Arc<WorkspaceService>,
102 ) -> Self {
103 Self {
104 db: db.clone(),
105 version_control: VersionControl::new(db),
106 local_backup_dir,
107 core_bridge,
108 workspace_service,
109 }
110 }
111
112 pub async fn backup_workspace(
118 &self,
119 workspace_id: Uuid,
120 user_id: Uuid,
121 storage_backend: StorageBackend,
122 format: Option<String>,
123 commit_id: Option<Uuid>,
124 ) -> Result<WorkspaceBackup> {
125 let workspace_data = self.get_workspace_data(workspace_id).await?;
129
130 let backup_format = format.unwrap_or_else(|| "yaml".to_string());
132 let serialized = match backup_format.as_str() {
133 "yaml" => serde_yaml::to_string(&workspace_data).map_err(|e| {
134 CollabError::Internal(format!("Failed to serialize to YAML: {}", e))
135 })?,
136 "json" => serde_json::to_string_pretty(&workspace_data).map_err(|e| {
137 CollabError::Internal(format!("Failed to serialize to JSON: {}", e))
138 })?,
139 _ => {
140 return Err(CollabError::InvalidInput(format!(
141 "Unsupported backup format: {}",
142 backup_format
143 )));
144 }
145 };
146
147 let size_bytes = serialized.len() as i64;
148
149 let backup_url = match storage_backend {
151 StorageBackend::Local => {
152 self.save_to_local(workspace_id, &serialized, &backup_format).await?
153 }
154 StorageBackend::S3 => {
155 return Err(CollabError::Internal("S3 backup not yet implemented".to_string()));
156 }
157 StorageBackend::Azure => {
158 return Err(CollabError::Internal("Azure backup not yet implemented".to_string()));
159 }
160 StorageBackend::Gcs => {
161 return Err(CollabError::Internal("GCS backup not yet implemented".to_string()));
162 }
163 StorageBackend::Custom => {
164 return Err(CollabError::Internal(
165 "Custom storage backend not yet implemented".to_string(),
166 ));
167 }
168 };
169
170 let mut backup =
172 WorkspaceBackup::new(workspace_id, backup_url, storage_backend, size_bytes, user_id);
173 backup.backup_format = backup_format;
174 backup.commit_id = commit_id;
175
176 sqlx::query!(
178 r#"
179 INSERT INTO workspace_backups (
180 id, workspace_id, backup_url, storage_backend, storage_config,
181 size_bytes, backup_format, encrypted, commit_id, created_at, created_by, expires_at
182 )
183 VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
184 "#,
185 backup.id,
186 backup.workspace_id,
187 backup.backup_url,
188 backup.storage_backend,
189 backup.storage_config,
190 backup.size_bytes,
191 backup.backup_format,
192 backup.encrypted,
193 backup.commit_id,
194 backup.created_at,
195 backup.created_by,
196 backup.expires_at
197 )
198 .execute(&self.db)
199 .await?;
200
201 Ok(backup)
202 }
203
204 pub async fn restore_workspace(
206 &self,
207 backup_id: Uuid,
208 target_workspace_id: Option<Uuid>,
209 user_id: Uuid,
210 ) -> Result<Uuid> {
211 let backup = self.get_backup(backup_id).await?;
213
214 let backup_data = match backup.storage_backend {
216 StorageBackend::Local => self.load_from_local(&backup.backup_url).await?,
217 _ => {
218 return Err(CollabError::Internal(
219 "Only local backups are supported for restore".to_string(),
220 ));
221 }
222 };
223
224 let workspace_data: serde_json::Value = match backup.backup_format.as_str() {
226 "yaml" => serde_yaml::from_str(&backup_data)
227 .map_err(|e| CollabError::Internal(format!("Failed to deserialize YAML: {}", e)))?,
228 "json" => serde_json::from_str(&backup_data)
229 .map_err(|e| CollabError::Internal(format!("Failed to deserialize JSON: {}", e)))?,
230 _ => {
231 return Err(CollabError::Internal(format!(
232 "Unsupported backup format: {}",
233 backup.backup_format
234 )));
235 }
236 };
237
238 let backup_record = self.get_backup(backup_id).await?;
241 let owner_id = backup_record.created_by;
242
243 let restored_team_workspace = self
245 .core_bridge
246 .import_workspace_from_backup(&workspace_data, owner_id, None)
247 .await?;
248
249 let restored_workspace_id = target_workspace_id.unwrap_or(backup.workspace_id);
251
252 let mut team_workspace = if restored_workspace_id != backup.workspace_id {
254 let mut new_workspace = restored_team_workspace;
256 new_workspace.id = restored_workspace_id;
257 new_workspace
258 } else {
259 restored_team_workspace
261 };
262
263 self.core_bridge.save_workspace_to_disk(&team_workspace).await?;
267
268 if let Some(commit_id) = backup.commit_id {
270 let _ =
272 self.version_control.restore_to_commit(restored_workspace_id, commit_id).await?;
273 }
274
275 Ok(restored_workspace_id)
276 }
277
278 pub async fn list_backups(
280 &self,
281 workspace_id: Uuid,
282 limit: Option<i32>,
283 ) -> Result<Vec<WorkspaceBackup>> {
284 let limit = limit.unwrap_or(100);
285 let workspace_id_str = workspace_id.to_string();
286
287 let rows = sqlx::query!(
288 r#"
289 SELECT
290 id,
291 workspace_id,
292 backup_url,
293 storage_backend,
294 storage_config,
295 size_bytes,
296 backup_format,
297 encrypted,
298 commit_id,
299 created_at,
300 created_by,
301 expires_at
302 FROM workspace_backups
303 WHERE workspace_id = ?
304 ORDER BY created_at DESC
305 LIMIT ?
306 "#,
307 workspace_id_str,
308 limit
309 )
310 .fetch_all(&self.db)
311 .await?;
312
313 let backups: Result<Vec<WorkspaceBackup>> = rows
314 .into_iter()
315 .map(|row| {
316 Ok(WorkspaceBackup {
317 id: Uuid::parse_str(&row.id)
318 .map_err(|e| CollabError::Internal(format!("Invalid UUID: {}", e)))?,
319 workspace_id: Uuid::parse_str(&row.workspace_id)
320 .map_err(|e| CollabError::Internal(format!("Invalid UUID: {}", e)))?,
321 backup_url: row.backup_url,
322 storage_backend: serde_json::from_str(&row.storage_backend).map_err(|e| {
323 CollabError::Internal(format!("Invalid storage_backend: {}", e))
324 })?,
325 storage_config: row.storage_config.and_then(|s| serde_json::from_str(&s).ok()),
326 size_bytes: row.size_bytes,
327 backup_format: row.backup_format,
328 encrypted: row.encrypted != 0,
329 commit_id: row.commit_id.and_then(|s| Uuid::parse_str(&s).ok()),
330 created_at: chrono::DateTime::parse_from_rfc3339(&row.created_at)
331 .map_err(|e| CollabError::Internal(format!("Invalid timestamp: {}", e)))?
332 .with_timezone(&chrono::Utc),
333 created_by: Uuid::parse_str(&row.created_by)
334 .map_err(|e| CollabError::Internal(format!("Invalid UUID: {}", e)))?,
335 expires_at: row
336 .expires_at
337 .map(|s| {
338 chrono::DateTime::parse_from_rfc3339(&s)
339 .map(|dt| dt.with_timezone(&chrono::Utc))
340 .map_err(|e| {
341 CollabError::Internal(format!("Invalid timestamp: {}", e))
342 })
343 })
344 .transpose()?,
345 })
346 })
347 .collect();
348 let backups = backups?;
349
350 Ok(backups)
351 }
352
353 pub async fn get_backup(&self, backup_id: Uuid) -> Result<WorkspaceBackup> {
355 let backup_id_str = backup_id.to_string();
356 let row = sqlx::query!(
357 r#"
358 SELECT
359 id,
360 workspace_id,
361 backup_url,
362 storage_backend,
363 storage_config,
364 size_bytes,
365 backup_format,
366 encrypted,
367 commit_id,
368 created_at,
369 created_by,
370 expires_at
371 FROM workspace_backups
372 WHERE id = ?
373 "#,
374 backup_id_str
375 )
376 .fetch_optional(&self.db)
377 .await?
378 .ok_or_else(|| CollabError::Internal(format!("Backup not found: {}", backup_id)))?;
379
380 Ok(WorkspaceBackup {
381 id: Uuid::parse_str(&row.id)
382 .map_err(|e| CollabError::Internal(format!("Invalid UUID: {}", e)))?,
383 workspace_id: Uuid::parse_str(&row.workspace_id)
384 .map_err(|e| CollabError::Internal(format!("Invalid UUID: {}", e)))?,
385 backup_url: row.backup_url,
386 storage_backend: serde_json::from_str(&row.storage_backend)
387 .map_err(|e| CollabError::Internal(format!("Invalid storage_backend: {}", e)))?,
388 storage_config: row.storage_config.and_then(|s| serde_json::from_str(&s).ok()),
389 size_bytes: row.size_bytes,
390 backup_format: row.backup_format,
391 encrypted: row.encrypted != 0,
392 commit_id: row.commit_id.and_then(|s| Uuid::parse_str(&s).ok()),
393 created_at: chrono::DateTime::parse_from_rfc3339(&row.created_at)
394 .map_err(|e| CollabError::Internal(format!("Invalid timestamp: {}", e)))?
395 .with_timezone(&chrono::Utc),
396 created_by: Uuid::parse_str(&row.created_by)
397 .map_err(|e| CollabError::Internal(format!("Invalid UUID: {}", e)))?,
398 expires_at: row
399 .expires_at
400 .map(|s| {
401 chrono::DateTime::parse_from_rfc3339(&s)
402 .map(|dt| dt.with_timezone(&chrono::Utc))
403 .map_err(|e| CollabError::Internal(format!("Invalid timestamp: {}", e)))
404 })
405 .transpose()?,
406 })
407 }
408
409 pub async fn delete_backup(&self, backup_id: Uuid) -> Result<()> {
411 let backup = self.get_backup(backup_id).await?;
413
414 match backup.storage_backend {
416 StorageBackend::Local => {
417 if Path::new(&backup.backup_url).exists() {
418 tokio::fs::remove_file(&backup.backup_url).await.map_err(|e| {
419 CollabError::Internal(format!("Failed to delete backup file: {}", e))
420 })?;
421 }
422 }
423 _ => {
424 }
426 }
427
428 let backup_id_str = backup_id.to_string();
430 sqlx::query!(
431 r#"
432 DELETE FROM workspace_backups
433 WHERE id = ?
434 "#,
435 backup_id_str
436 )
437 .execute(&self.db)
438 .await?;
439
440 Ok(())
441 }
442
443 async fn save_to_local(&self, workspace_id: Uuid, data: &str, format: &str) -> Result<String> {
445 let backup_dir = self.local_backup_dir.as_ref().ok_or_else(|| {
446 CollabError::Internal("Local backup directory not configured".to_string())
447 })?;
448
449 tokio::fs::create_dir_all(backup_dir).await.map_err(|e| {
451 CollabError::Internal(format!("Failed to create backup directory: {}", e))
452 })?;
453
454 let timestamp = Utc::now().format("%Y%m%d_%H%M%S");
456 let filename = format!("workspace_{}_{}.{}", workspace_id, timestamp, format);
457 let backup_path = Path::new(backup_dir).join(&filename);
458
459 tokio::fs::write(&backup_path, data)
461 .await
462 .map_err(|e| CollabError::Internal(format!("Failed to write backup file: {}", e)))?;
463
464 Ok(backup_path.to_string_lossy().to_string())
465 }
466
467 async fn load_from_local(&self, backup_url: &str) -> Result<String> {
469 tokio::fs::read_to_string(backup_url)
470 .await
471 .map_err(|e| CollabError::Internal(format!("Failed to read backup file: {}", e)))
472 }
473
474 async fn get_workspace_data(&self, workspace_id: Uuid) -> Result<serde_json::Value> {
478 let team_workspace = self.workspace_service.get_workspace(workspace_id).await?;
480
481 self.core_bridge.get_workspace_state_json(&team_workspace)
483 }
484}