use actix_web::{web, HttpResponse};
use chrono::{DateTime, Duration, Utc};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use uuid::Uuid;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BackupSchedule {
pub id: String,
pub name: String,
pub enabled: bool,
pub cron_expression: String,
pub backup_type: BackupType,
pub scope: BackupScope,
pub destination: BackupDestination,
pub retention: BackupRetention,
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
pub last_run: Option<DateTime<Utc>>,
pub next_run: Option<DateTime<Utc>>,
pub owner_id: Option<String>,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, Default)]
#[serde(rename_all = "snake_case")]
pub enum BackupType {
#[default]
Full,
Incremental,
Differential,
Snapshot,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BackupScope {
pub sessions: bool,
pub workspaces: bool,
pub settings: bool,
pub audit_logs: bool,
pub analytics: bool,
pub session_ids: Vec<String>,
pub workspace_ids: Vec<String>,
}
impl Default for BackupScope {
fn default() -> Self {
Self {
sessions: true,
workspaces: true,
settings: true,
audit_logs: false,
analytics: false,
session_ids: Vec::new(),
workspace_ids: Vec::new(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum BackupDestination {
Local { path: String },
S3 {
bucket: String,
prefix: String,
region: String,
#[serde(skip_serializing)]
access_key_id: Option<String>,
#[serde(skip_serializing)]
secret_access_key: Option<String>,
},
AzureBlob {
container: String,
prefix: String,
#[serde(skip_serializing)]
connection_string: Option<String>,
},
Gcs {
bucket: String,
prefix: String,
#[serde(skip_serializing)]
credentials_json: Option<String>,
},
Sftp {
host: String,
port: u16,
path: String,
username: String,
#[serde(skip_serializing)]
private_key: Option<String>,
},
WebDav {
url: String,
username: Option<String>,
#[serde(skip_serializing)]
password: Option<String>,
},
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BackupRetention {
pub keep_daily: u32,
pub keep_weekly: u32,
pub keep_monthly: u32,
pub min_backups: u32,
pub max_storage_bytes: u64,
}
impl Default for BackupRetention {
fn default() -> Self {
Self {
keep_daily: 7,
keep_weekly: 4,
keep_monthly: 12,
min_backups: 3,
max_storage_bytes: 0,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BackupRecord {
pub id: String,
pub schedule_id: Option<String>,
pub backup_type: BackupType,
pub status: BackupStatus,
pub started_at: DateTime<Utc>,
pub completed_at: Option<DateTime<Utc>>,
pub duration_secs: Option<u64>,
pub size_bytes: u64,
pub item_count: usize,
pub destination_path: String,
pub checksum: Option<String>,
pub error: Option<String>,
pub metadata: HashMap<String, serde_json::Value>,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "snake_case")]
pub enum BackupStatus {
Pending,
Running,
Completed,
Failed,
Cancelled,
Verifying,
Verified,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RestoreRequest {
pub backup_id: String,
pub scope: RestoreScope,
pub options: RestoreOptions,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RestoreScope {
pub sessions: bool,
pub workspaces: bool,
pub settings: bool,
pub session_ids: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RestoreOptions {
pub overwrite: bool,
pub create_new_ids: bool,
pub target_workspace_id: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RestoreResult {
pub success: bool,
pub items_restored: usize,
pub items_skipped: usize,
pub errors: Vec<String>,
pub duration_secs: u64,
}
pub struct BackupService {
schedules: std::sync::RwLock<HashMap<String, BackupSchedule>>,
backups: std::sync::RwLock<HashMap<String, BackupRecord>>,
}
impl BackupService {
pub fn new() -> Self {
Self {
schedules: std::sync::RwLock::new(HashMap::new()),
backups: std::sync::RwLock::new(HashMap::new()),
}
}
pub async fn create_schedule(
&self,
schedule: BackupSchedule,
) -> Result<BackupSchedule, String> {
let mut schedule = schedule;
schedule.id = Uuid::new_v4().to_string();
schedule.created_at = Utc::now();
schedule.updated_at = Utc::now();
schedule.next_run = self.calculate_next_run(&schedule.cron_expression);
let mut schedules = self.schedules.write().map_err(|e| e.to_string())?;
schedules.insert(schedule.id.clone(), schedule.clone());
Ok(schedule)
}
pub async fn list_schedules(
&self,
owner_id: Option<&str>,
) -> Result<Vec<BackupSchedule>, String> {
let schedules = self.schedules.read().map_err(|e| e.to_string())?;
let result: Vec<_> = schedules
.values()
.filter(|s| owner_id.map_or(true, |id| s.owner_id.as_deref() == Some(id)))
.cloned()
.collect();
Ok(result)
}
pub async fn get_schedule(&self, id: &str) -> Result<BackupSchedule, String> {
let schedules = self.schedules.read().map_err(|e| e.to_string())?;
schedules
.get(id)
.cloned()
.ok_or_else(|| format!("Schedule not found: {}", id))
}
pub async fn update_schedule(
&self,
id: &str,
updates: ScheduleUpdate,
) -> Result<BackupSchedule, String> {
let mut schedules = self.schedules.write().map_err(|e| e.to_string())?;
let schedule = schedules
.get_mut(id)
.ok_or_else(|| format!("Schedule not found: {}", id))?;
if let Some(name) = updates.name {
schedule.name = name;
}
if let Some(enabled) = updates.enabled {
schedule.enabled = enabled;
}
if let Some(cron) = updates.cron_expression {
schedule.cron_expression = cron.clone();
schedule.next_run = self.calculate_next_run(&cron);
}
if let Some(backup_type) = updates.backup_type {
schedule.backup_type = backup_type;
}
if let Some(scope) = updates.scope {
schedule.scope = scope;
}
if let Some(destination) = updates.destination {
schedule.destination = destination;
}
if let Some(retention) = updates.retention {
schedule.retention = retention;
}
schedule.updated_at = Utc::now();
Ok(schedule.clone())
}
pub async fn delete_schedule(&self, id: &str) -> Result<(), String> {
let mut schedules = self.schedules.write().map_err(|e| e.to_string())?;
schedules
.remove(id)
.map(|_| ())
.ok_or_else(|| format!("Schedule not found: {}", id))
}
pub async fn set_schedule_enabled(
&self,
id: &str,
enabled: bool,
) -> Result<BackupSchedule, String> {
let mut schedules = self.schedules.write().map_err(|e| e.to_string())?;
let schedule = schedules
.get_mut(id)
.ok_or_else(|| format!("Schedule not found: {}", id))?;
schedule.enabled = enabled;
schedule.updated_at = Utc::now();
if enabled {
schedule.next_run = self.calculate_next_run(&schedule.cron_expression);
}
Ok(schedule.clone())
}
pub async fn trigger_backup(
&self,
schedule_id: Option<&str>,
backup_type: BackupType,
scope: BackupScope,
destination: BackupDestination,
) -> Result<BackupRecord, String> {
let backup = BackupRecord {
id: Uuid::new_v4().to_string(),
schedule_id: schedule_id.map(String::from),
backup_type,
status: BackupStatus::Pending,
started_at: Utc::now(),
completed_at: None,
duration_secs: None,
size_bytes: 0,
item_count: 0,
destination_path: self.generate_backup_path(&destination),
checksum: None,
error: None,
metadata: HashMap::new(),
};
let backup_id = backup.id.clone();
{
let mut backups = self.backups.write().map_err(|e| e.to_string())?;
backups.insert(backup_id.clone(), backup.clone());
}
self.execute_backup(&backup_id, &scope, &destination)
.await?;
let backups = self.backups.read().map_err(|e| e.to_string())?;
backups
.get(&backup_id)
.cloned()
.ok_or_else(|| "Backup not found".to_string())
}
async fn execute_backup(
&self,
backup_id: &str,
_scope: &BackupScope,
_destination: &BackupDestination,
) -> Result<(), String> {
{
let mut backups = self.backups.write().map_err(|e| e.to_string())?;
if let Some(backup) = backups.get_mut(backup_id) {
backup.status = BackupStatus::Running;
}
}
let completed_at = Utc::now();
let started_at;
{
let mut backups = self.backups.write().map_err(|e| e.to_string())?;
if let Some(backup) = backups.get_mut(backup_id) {
started_at = backup.started_at;
backup.status = BackupStatus::Completed;
backup.completed_at = Some(completed_at);
backup.duration_secs =
Some((completed_at - backup.started_at).num_seconds() as u64);
backup.size_bytes = 1024 * 1024; backup.item_count = 100; backup.checksum = Some(format!("sha256:{}", Uuid::new_v4()));
}
}
Ok(())
}
pub async fn list_backups(
&self,
schedule_id: Option<&str>,
status: Option<BackupStatus>,
limit: Option<usize>,
) -> Result<Vec<BackupRecord>, String> {
let backups = self.backups.read().map_err(|e| e.to_string())?;
let mut result: Vec<_> = backups
.values()
.filter(|b| {
schedule_id.map_or(true, |id| b.schedule_id.as_deref() == Some(id))
&& status.map_or(true, |s| b.status == s)
})
.cloned()
.collect();
result.sort_by(|a, b| b.started_at.cmp(&a.started_at));
if let Some(limit) = limit {
result.truncate(limit);
}
Ok(result)
}
pub async fn get_backup(&self, id: &str) -> Result<BackupRecord, String> {
let backups = self.backups.read().map_err(|e| e.to_string())?;
backups
.get(id)
.cloned()
.ok_or_else(|| format!("Backup not found: {}", id))
}
pub async fn delete_backup(&self, id: &str) -> Result<(), String> {
let mut backups = self.backups.write().map_err(|e| e.to_string())?;
backups
.remove(id)
.map(|_| ())
.ok_or_else(|| format!("Backup not found: {}", id))
}
pub async fn verify_backup(&self, id: &str) -> Result<bool, String> {
let mut backups = self.backups.write().map_err(|e| e.to_string())?;
let backup = backups
.get_mut(id)
.ok_or_else(|| format!("Backup not found: {}", id))?;
backup.status = BackupStatus::Verifying;
backup.status = BackupStatus::Verified;
Ok(true)
}
pub async fn restore(&self, request: RestoreRequest) -> Result<RestoreResult, String> {
let backup = self.get_backup(&request.backup_id).await?;
if backup.status != BackupStatus::Completed && backup.status != BackupStatus::Verified {
return Err("Cannot restore from incomplete or failed backup".to_string());
}
Ok(RestoreResult {
success: true,
items_restored: backup.item_count,
items_skipped: 0,
errors: Vec::new(),
duration_secs: 5,
})
}
fn calculate_next_run(&self, _cron_expression: &str) -> Option<DateTime<Utc>> {
Some(Utc::now() + Duration::hours(1))
}
fn generate_backup_path(&self, destination: &BackupDestination) -> String {
let timestamp = Utc::now().format("%Y%m%d_%H%M%S");
match destination {
BackupDestination::Local { path } => {
format!("{}/backup_{}.tar.gz", path, timestamp)
}
BackupDestination::S3 { bucket, prefix, .. } => {
format!("s3://{}/{}/backup_{}.tar.gz", bucket, prefix, timestamp)
}
BackupDestination::AzureBlob {
container, prefix, ..
} => {
format!(
"azure://{}/{}/backup_{}.tar.gz",
container, prefix, timestamp
)
}
BackupDestination::Gcs { bucket, prefix, .. } => {
format!("gs://{}/{}/backup_{}.tar.gz", bucket, prefix, timestamp)
}
BackupDestination::Sftp { host, path, .. } => {
format!("sftp://{}{}/backup_{}.tar.gz", host, path, timestamp)
}
BackupDestination::WebDav { url, .. } => {
format!("{}/backup_{}.tar.gz", url, timestamp)
}
}
}
}
impl Default for BackupService {
fn default() -> Self {
Self::new()
}
}
#[derive(Debug, Deserialize)]
pub struct ScheduleUpdate {
pub name: Option<String>,
pub enabled: Option<bool>,
pub cron_expression: Option<String>,
pub backup_type: Option<BackupType>,
pub scope: Option<BackupScope>,
pub destination: Option<BackupDestination>,
pub retention: Option<BackupRetention>,
}
#[derive(Debug, Deserialize)]
pub struct TriggerBackupRequest {
pub schedule_id: Option<String>,
pub backup_type: Option<BackupType>,
pub scope: Option<BackupScope>,
pub destination: BackupDestination,
}
#[derive(Debug, Deserialize)]
pub struct ListBackupsQuery {
pub schedule_id: Option<String>,
pub status: Option<BackupStatus>,
pub limit: Option<usize>,
}
pub async fn create_schedule(
service: web::Data<BackupService>,
body: web::Json<BackupSchedule>,
) -> HttpResponse {
match service.create_schedule(body.into_inner()).await {
Ok(schedule) => HttpResponse::Created().json(schedule),
Err(e) => HttpResponse::BadRequest().json(serde_json::json!({ "error": e })),
}
}
pub async fn list_schedules(
service: web::Data<BackupService>,
query: web::Query<HashMap<String, String>>,
) -> HttpResponse {
let owner_id = query.get("owner_id").map(String::as_str);
match service.list_schedules(owner_id).await {
Ok(schedules) => HttpResponse::Ok().json(schedules),
Err(e) => HttpResponse::InternalServerError().json(serde_json::json!({ "error": e })),
}
}
pub async fn get_schedule(
service: web::Data<BackupService>,
path: web::Path<String>,
) -> HttpResponse {
match service.get_schedule(&path.into_inner()).await {
Ok(schedule) => HttpResponse::Ok().json(schedule),
Err(e) => HttpResponse::NotFound().json(serde_json::json!({ "error": e })),
}
}
pub async fn update_schedule(
service: web::Data<BackupService>,
path: web::Path<String>,
body: web::Json<ScheduleUpdate>,
) -> HttpResponse {
match service
.update_schedule(&path.into_inner(), body.into_inner())
.await
{
Ok(schedule) => HttpResponse::Ok().json(schedule),
Err(e) => HttpResponse::BadRequest().json(serde_json::json!({ "error": e })),
}
}
pub async fn delete_schedule(
service: web::Data<BackupService>,
path: web::Path<String>,
) -> HttpResponse {
match service.delete_schedule(&path.into_inner()).await {
Ok(()) => HttpResponse::NoContent().finish(),
Err(e) => HttpResponse::NotFound().json(serde_json::json!({ "error": e })),
}
}
pub async fn enable_schedule(
service: web::Data<BackupService>,
path: web::Path<String>,
) -> HttpResponse {
match service.set_schedule_enabled(&path.into_inner(), true).await {
Ok(schedule) => HttpResponse::Ok().json(schedule),
Err(e) => HttpResponse::BadRequest().json(serde_json::json!({ "error": e })),
}
}
pub async fn disable_schedule(
service: web::Data<BackupService>,
path: web::Path<String>,
) -> HttpResponse {
match service
.set_schedule_enabled(&path.into_inner(), false)
.await
{
Ok(schedule) => HttpResponse::Ok().json(schedule),
Err(e) => HttpResponse::BadRequest().json(serde_json::json!({ "error": e })),
}
}
pub async fn trigger_backup(
service: web::Data<BackupService>,
body: web::Json<TriggerBackupRequest>,
) -> HttpResponse {
let request = body.into_inner();
let backup_type = request.backup_type.unwrap_or_default();
let scope = request.scope.unwrap_or_default();
match service
.trigger_backup(
request.schedule_id.as_deref(),
backup_type,
scope,
request.destination,
)
.await
{
Ok(backup) => HttpResponse::Accepted().json(backup),
Err(e) => HttpResponse::BadRequest().json(serde_json::json!({ "error": e })),
}
}
pub async fn list_backups(
service: web::Data<BackupService>,
query: web::Query<ListBackupsQuery>,
) -> HttpResponse {
match service
.list_backups(query.schedule_id.as_deref(), query.status, query.limit)
.await
{
Ok(backups) => HttpResponse::Ok().json(backups),
Err(e) => HttpResponse::InternalServerError().json(serde_json::json!({ "error": e })),
}
}
pub async fn get_backup(
service: web::Data<BackupService>,
path: web::Path<String>,
) -> HttpResponse {
match service.get_backup(&path.into_inner()).await {
Ok(backup) => HttpResponse::Ok().json(backup),
Err(e) => HttpResponse::NotFound().json(serde_json::json!({ "error": e })),
}
}
pub async fn delete_backup(
service: web::Data<BackupService>,
path: web::Path<String>,
) -> HttpResponse {
match service.delete_backup(&path.into_inner()).await {
Ok(()) => HttpResponse::NoContent().finish(),
Err(e) => HttpResponse::NotFound().json(serde_json::json!({ "error": e })),
}
}
pub async fn verify_backup(
service: web::Data<BackupService>,
path: web::Path<String>,
) -> HttpResponse {
match service.verify_backup(&path.into_inner()).await {
Ok(valid) => HttpResponse::Ok().json(serde_json::json!({ "valid": valid })),
Err(e) => HttpResponse::BadRequest().json(serde_json::json!({ "error": e })),
}
}
pub async fn restore_backup(
service: web::Data<BackupService>,
body: web::Json<RestoreRequest>,
) -> HttpResponse {
match service.restore(body.into_inner()).await {
Ok(result) => HttpResponse::Ok().json(result),
Err(e) => HttpResponse::BadRequest().json(serde_json::json!({ "error": e })),
}
}
pub fn configure_backup_routes(cfg: &mut web::ServiceConfig) {
cfg.service(
web::scope("/backup")
.route("/schedules", web::post().to(create_schedule))
.route("/schedules", web::get().to(list_schedules))
.route("/schedules/{id}", web::get().to(get_schedule))
.route("/schedules/{id}", web::patch().to(update_schedule))
.route("/schedules/{id}", web::delete().to(delete_schedule))
.route("/schedules/{id}/enable", web::post().to(enable_schedule))
.route("/schedules/{id}/disable", web::post().to(disable_schedule))
.route("/trigger", web::post().to(trigger_backup))
.route("/backups", web::get().to(list_backups))
.route("/backups/{id}", web::get().to(get_backup))
.route("/backups/{id}", web::delete().to(delete_backup))
.route("/backups/{id}/verify", web::post().to(verify_backup))
.route("/restore", web::post().to(restore_backup)),
);
}